ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 7df8793519388cf972061755f9783efafc7c52cc | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test new Elicoin multisig prefix functionality.
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import decimal
class ScriptAddress2Test(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 3
self.setup_clean_chain = False
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, []))
self.nodes.append(start_node(1, self.options.tmpdir, []))
self.nodes.append(start_node(2, self.options.tmpdir, []))
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
self.is_network_split = False
self.sync_all()
def run_test(self):
cnt = self.nodes[0].getblockcount()
# Mine some blocks
self.nodes[1].generate(100)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 100):
raise AssertionError("Failed to mine 100 blocks")
addr = self.nodes[0].getnewaddress()
addr2 = self.nodes[0].getnewaddress()
multisig_addr = self.nodes[0].addmultisigaddress(2, [addr, addr2], "multisigaccount")
assert_equal(multisig_addr[0], 'Q')
# Send to a new multisig address
txid = self.nodes[1].sendtoaddress(multisig_addr, 1)
block = self.nodes[1].generate(3)
self.sync_all()
tx = self.nodes[2].getrawtransaction(txid, 1)
dest_addrs = [tx["vout"][0]['scriptPubKey']['addresses'][0],
tx["vout"][1]['scriptPubKey']['addresses'][0]]
assert(multisig_addr in dest_addrs)
# Spend from the new multisig address
addr3 = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendfrom("multisigaccount", addr3, 0.8)
block = self.nodes[0].generate(2)
self.sync_all()
assert(self.nodes[0].getbalance("multisigaccount", 1) < 0.2)
assert(self.nodes[1].listtransactions()[-1]['address'] == addr3)
# Send to an old multisig address. The api addmultisigaddress
# can only generate a new address so we manually compute
# multisig_addr_old beforehand using an old client.
priv_keys = ["cU7eeLPKzXeKMeZvnEJhvZZ3tLqVF3XGeo1BbM8dnbmV7pP3Qg89",
"cTw7mRhSvTfzqCt6MFgBoTBqwBpYu2rWugisXcwjv4cAASh3iqPt"]
addrs = ["mj6gNGRXPXrD69R5ApjcsDerZGrYKSfb6v",
"mqET4JA3L7P7FoUjUP3F6m6YsLpCkyzzou"]
self.nodes[0].importprivkey(priv_keys[0])
self.nodes[0].importprivkey(priv_keys[1])
multisig_addr_new = self.nodes[0].addmultisigaddress(2, addrs, "multisigaccount2")
assert_equal(multisig_addr_new, "QZ974ZrPrmqMmm1PSVp4m8YEgo3bCQZBbe")
multisig_addr_old = "2N5nLwYz9qfnGdaFLpPn3gS6oYQbmLTWPjq"
## Let's send to the old address. We can then find it in the
## new address with the new client. So basically the old
## address and the new one are the same thing.
txid = self.nodes[1].sendtoaddress(multisig_addr_old, 1)
block = self.nodes[1].generate(1)
self.sync_all()
tx = self.nodes[2].getrawtransaction(txid, 1)
dest_addrs = [tx["vout"][0]['scriptPubKey']['addresses'][0],
tx["vout"][1]['scriptPubKey']['addresses'][0]]
assert(multisig_addr_new in dest_addrs)
assert(multisig_addr_old not in dest_addrs)
# Spend from the new multisig address
addr4 = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendfrom("multisigaccount2", addr4, 0.8)
block = self.nodes[0].generate(2)
self.sync_all()
assert(self.nodes[0].getbalance("multisigaccount2", 1) < 0.2)
assert(self.nodes[1].listtransactions()[-1]['address'] == addr4)
if __name__ == '__main__':
ScriptAddress2Test().main()
|
py | 7df879a3f55c8ee387d2379e63d9e5a40d0921c7 | expected_output = {
"tunnel": {
"29": {
"flags": "0x0",
"tx_data": 0,
"tx_parity": 0,
"rx_data": 5764,
"rx_parity": 1441,
"reconstruct": 1,
"tx_rx_wins": {
"0": {
"win_flags": "0X1",
"count": 0,
"isn": 0,
"tos": 0,
"parity_len": 0,
"fec_len": 0,
"fec_data": "0x0"
}
}
},
"34": {
"flags": "0x0",
"tx_data": 0,
"tx_parity": 0,
"rx_data": 4615,
"rx_parity": 1153,
"reconstruct": 2,
"tx_rx_wins": {
"0": {
"win_flags": "0X1",
"count": 0,
"isn": 0,
"tos": 0,
"parity_len": 0,
"fec_len": 0,
"fec_data": "0x0"
},
"129": {
"win_flags": "0X2",
"count": 3,
"isn": 4612,
"tos": 0,
"parity_len": 0,
"fec_len": 56,
"fec_data": "0xC8556540"
}
}
},
"54": {
"flags": "0x0",
"tx_data": 0,
"tx_parity": 0,
"rx_data": 75,
"rx_parity": 19,
"reconstruct": 1,
"tx_rx_wins": {
"0": {
"win_flags": "0X1",
"count": 0,
"isn": 0,
"tos": 0,
"parity_len": 0,
"fec_len": 0,
"fec_data": "0x0"
},
"435": {
"win_flags": "0X6",
"count": 2,
"isn": 327372,
"tos": 0,
"parity_len": 0,
"fec_len": 0,
"fec_data": "0xC86F50B0"
},
"454": {
"win_flags": "0X2",
"count": 1,
"isn": 327448,
"tos": 0,
"parity_len": 0,
"fec_len": 578,
"fec_data": "0xC8584B00"
}
}
},
"68": {
"flags": "0x0",
"tx_data": 0,
"tx_parity": 0,
"rx_data": 6802,
"rx_parity": 1700,
"reconstruct": 0,
"tx_rx_wins": {
"0": {
"win_flags": "0X1",
"count": 0,
"isn": 0,
"tos": 0,
"parity_len": 0,
"fec_len": 0,
"fec_data": "0x0"
},
"164": {
"win_flags": "0X2",
"count": 2,
"isn": 6800,
"tos": 0,
"parity_len": 0,
"fec_len": 0,
"fec_data": "0xC84A2F00"
}
}
},
"73": {
"flags": "0x0",
"tx_data": 0,
"tx_parity": 0,
"rx_data": 11846,
"rx_parity": 2961,
"reconstruct": 1,
"tx_rx_wins": {
"0": {
"win_flags": "0X1",
"count": 0,
"isn": 0,
"tos": 0,
"parity_len": 0,
"fec_len": 0,
"fec_data": "0x0"
},
"401": {
"win_flags": "0X2",
"count": 2,
"isn": 11844,
"tos": 0,
"parity_len": 0,
"fec_len": 0,
"fec_data": "0xC8422F20"
}
}
}
}
}
|
py | 7df87a4c446187cb5d2726bb31b38674909295a8 | VERSION = (0, 2, 2)
__version__ = ".".join(map(str, VERSION))
|
py | 7df87b7d4596662320a38e0009982415fe2d6823 | from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include("accounts.urls")),
]
|
py | 7df87e03c5252256165778be4d297c7325d85603 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow import DAG
from airflow.providers.cncf.kubernetes.operators import kubernetes_pod
from airflow.providers.google.cloud.transfers import gcs_to_bigquery
default_args = {
"owner": "Google",
"depends_on_past": False,
"start_date": "2021-03-01",
}
with DAG(
dag_id="epa_historical_air_quality.nonoxnoy_daily_summary",
default_args=default_args,
max_active_runs=1,
schedule_interval="0 4 * * *",
catchup=False,
default_view="graph",
) as dag:
# Run CSV transform within kubernetes pod
transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="transform_csv",
name="nonoxnoy_daily_summary",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.epa_historical_air_quality.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "https://aqs.epa.gov/aqsweb/airdata/daily_NONOxNOy_YEAR_ITERATOR.zip",
"START_YEAR": "1990",
"SOURCE_FILE": "files/data.csv",
"TARGET_FILE": "files/data_output.csv",
"CHUNKSIZE": "750000",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "data/epa_historical_air_quality/nonoxnoy_daily_summary/files/data_output.csv",
"DATA_NAMES": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "sample_duration",\n "pollutant_standard", "date_local", "units_of_measure", "event_type", "observation_count",\n "observation_percent", "arithmetic_mean", "first_max_value", "first_max_hour", "aqi",\n "method_code", "method_name", "local_site_name", "address", "state_name",\n "county_name", "city_name", "cbsa_name", "date_of_last_change" ]',
"DATA_DTYPES": '{ "state_code": "str", "county_code": "str", "site_num": "str", "parameter_code": "int32", "poc": "int32",\n "latitude": "float64", "longitude": "float64", "datum": "str", "parameter_name": "str", "sample_duration": "str",\n "pollutant_standard": "str", "date_local": "datetime64[ns]", "units_of_measure": "str", "event_type": "str", "observation_count": "int32",\n "observation_percent": "float64", "arithmetic_mean": "float64", "first_max_value": "float64", "first_max_hour": "int32", "aqi": "str",\n "method_code": "str", "method_name": "str", "local_site_name": "str", "address": "str", "state_name": "str",\n "county_name": "str", "city_name": "str", "cbsa_name": "str", "date_of_last_change": "datetime64[ns]" }',
},
resources={"limit_memory": "8G", "limit_cpu": "3"},
)
# Task to load CSV data to a BigQuery table
load_to_bq = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="load_to_bq",
bucket="{{ var.value.composer_bucket }}",
source_objects=[
"data/epa_historical_air_quality/nonoxnoy_daily_summary/files/data_output.csv"
],
source_format="CSV",
destination_project_dataset_table="{{ var.json.epa_historical_air_quality.container_registry.nonoxnoy_daily_summary_destination_table }}",
skip_leading_rows=1,
allow_quoted_newlines=True,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{
"name": "state_code",
"type": "STRING",
"description": "The FIPS code of the state in which the monitor resides.",
"mode": "NULLABLE",
},
{
"name": "county_code",
"type": "STRING",
"description": "The FIPS code of the county in which the monitor resides.",
"mode": "NULLABLE",
},
{
"name": "site_num",
"type": "STRING",
"description": "A unique number within the county identifying the site.",
"mode": "NULLABLE",
},
{
"name": "parameter_code",
"type": "INTEGER",
"description": "The AQS code corresponding to the parameter measured by the monitor.",
"mode": "NULLABLE",
},
{
"name": "poc",
"type": "INTEGER",
"description": "This is the “Parameter Occurrence Code” used to distinguish different instruments that measure the same parameter at the same site.",
"mode": "NULLABLE",
},
{
"name": "latitude",
"type": "FLOAT",
"description": "The monitoring site’s angular distance north of the equator measured in decimal degrees.",
"mode": "NULLABLE",
},
{
"name": "longitude",
"type": "FLOAT",
"description": "The monitoring site’s angular distance east of the prime meridian measured in decimal degrees.",
"mode": "NULLABLE",
},
{
"name": "datum",
"type": "STRING",
"description": "The Datum associated with the Latitude and Longitude measures.",
"mode": "NULLABLE",
},
{
"name": "parameter_name",
"type": "STRING",
"description": "The name or description assigned in AQS to the parameter measured by the monitor. Parameters may be pollutants or non-pollutants.",
"mode": "NULLABLE",
},
{
"name": "sample_duration",
"type": "STRING",
"description": "The length of time that air passes through the monitoring device before it is analyzed (measured). So, it represents an averaging period in the atmosphere (for example, a 24-hour sample duration draws ambient air over a collection filter for 24 straight hours). For continuous monitors, it can represent an averaging time of many samples (for example, a 1-hour value may be the average of four one-minute samples collected during each quarter of the hour).",
"mode": "NULLABLE",
},
{
"name": "pollutant_standard",
"type": "STRING",
"description": "A description of the ambient air quality standard rules used to aggregate statistics. (See description at beginning of document.)",
"mode": "NULLABLE",
},
{
"name": "date_local",
"type": "TIMESTAMP",
"description": "The calendar date for the summary. All daily summaries are for the local standard day (midnight to midnight) at the monitor.",
"mode": "NULLABLE",
},
{
"name": "units_of_measure",
"type": "STRING",
"description": "The unit of measure for the parameter. QAD always returns data in the standard units for the parameter. Submitters are allowed to report data in any unit and EPA converts to a standard unit so that we may use the data in calculations.",
"mode": "NULLABLE",
},
{
"name": "event_type",
"type": "STRING",
"description": "Indicates whether data measured during exceptional events are included in the summary. A wildfire is an example of an exceptional event; it is something that affects air quality, but the local agency has no control over. No Events means no events occurred. Events Included means events occurred and the data from them is included in the summary. Events Excluded means that events occurred but data form them is excluded from the summary. Concurred Events Excluded means that events occurred but only EPA concurred exclusions are removed from the summary. If an event occurred for the parameter in question, the data will have multiple records for each monitor.",
"mode": "NULLABLE",
},
{
"name": "observation_count",
"type": "INTEGER",
"description": "The number of observations (samples) taken during the day.",
"mode": "NULLABLE",
},
{
"name": "observation_percent",
"type": "FLOAT",
"description": "The percent representing the number of observations taken with respect to the number scheduled to be taken during the day. This is only calculated for monitors where measurements are required (e.g., only certain parameters).",
"mode": "NULLABLE",
},
{
"name": "arithmetic_mean",
"type": "FLOAT",
"description": "The average (arithmetic mean) value for the day.",
"mode": "NULLABLE",
},
{
"name": "first_max_value",
"type": "FLOAT",
"description": "The highest value for the day.",
"mode": "NULLABLE",
},
{
"name": "first_max_hour",
"type": "INTEGER",
"description": "The hour (on a 24-hour clock) when the highest value for the day (the previous field) was taken.",
"mode": "NULLABLE",
},
{
"name": "aqi",
"type": "INTEGER",
"description": "The Air Quality Index for the day for the pollutant, if applicable.",
"mode": "NULLABLE",
},
{
"name": "method_code",
"type": "INTEGER",
"description": "An internal system code indicating the method (processes, equipment, and protocols) used in gathering and measuring the sample. The method name is in the next column.",
"mode": "NULLABLE",
},
{
"name": "method_name",
"type": "STRING",
"description": "A short description of the processes, equipment, and protocols used in gathering and measuring the sample.",
"mode": "NULLABLE",
},
{
"name": "local_site_name",
"type": "STRING",
"description": "The name of the site (if any) given by the State, local, or tribal air pollution control agency that operates it.",
"mode": "NULLABLE",
},
{
"name": "address",
"type": "STRING",
"description": "The approximate street address of the monitoring site.",
"mode": "NULLABLE",
},
{
"name": "state_name",
"type": "STRING",
"description": "The name of the state where the monitoring site is located.",
"mode": "NULLABLE",
},
{
"name": "county_name",
"type": "STRING",
"description": "The name of the county where the monitoring site is located.",
"mode": "NULLABLE",
},
{
"name": "city_name",
"type": "STRING",
"description": "The name of the city where the monitoring site is located. This represents the legal incorporated boundaries of cities and not urban areas.",
"mode": "NULLABLE",
},
{
"name": "cbsa_name",
"type": "STRING",
"description": "The name of the core bases statistical area (metropolitan area) where the monitoring site is located.",
"mode": "NULLABLE",
},
{
"name": "date_of_last_change",
"type": "TIMESTAMP",
"description": "The date the last time any numeric values in this record were updated in the AQS data system.",
"mode": "NULLABLE",
},
],
)
transform_csv >> load_to_bq
|
py | 7df87f1fa420676244e1f69c09e41d9b0421a692 | import re
import os
import tkinter
import tkinter.messagebox
def get_title(title):
num = re.search('[0-9]{1,4}(?=\.\ )', title)
if num == None:
return None
num = num.group(0)
title = re.search('(?<=\.\ ).*', title)
if title == None:
return None
title = title.group(0)
return num + '.' + title.replace(' ','-')
def save(title,code):
if not os.path.exists('./Algorithms'):
os.mkdir('./Algorithms')
if not os.path.exists('./Algorithms/'+title):
os.mkdir('./Algorithms/'+title)
f = open('Algorithms/'+title+'/solution.cpp','w')
f.write(code)
f.close()
return
def pop_up_box():
"""
使用tkinter弹出输入框输入数字, 具有确定输入和清除功能, 可在函数内直接调用num(文本框的值)使用
"""
def inputstr():
nonlocal title
nonlocal code
title = get_title(entry.get())
if title == None:
tkinter.messagebox.showinfo(title='更新失败',message='更新失败')
return
code = text.get('0.0',tkinter.END)
save(title,code)
mess = '粘贴到readme:\n'+'['+title+'](Algorithms/'+title+'/solution.cpp)'
tkinter.messagebox.showinfo(title='更新完成',message=mess) # return ok
def clearstr():
entry.delete(0,tkinter.END)
text.delete('0.0',tkinter.END)
pass
title = ''
code = ''
root = tkinter.Tk(className='输入代码') # 弹出框框名
root.geometry('500x400') # 设置弹出框的大小 w x h
entry = tkinter.Entry(root)
entry.pack() # 将entry"打上去"
text = tkinter.Text(root, height=15) # 这即是输入框中的内容
text.pack()
btn1 = tkinter.Button(root, text='Input', command=inputstr) # 按下此按钮(Input), 触发inputint函数
btn2 = tkinter.Button(root, text='Clear', command=clearstr)
# 按钮定位
btn1.pack(side='bottom')
btn2.pack(side='bottom')
# 上述完成之后, 开始真正弹出弹出框
root.mainloop()
pop_up_box()
|
py | 7df87f36df86be8c09dd1744094597e0f00aaf3c | from rest_framework import permissions
import jwt
from django.conf import settings
from api.models import *
from django.core.exceptions import ObjectDoesNotExist
class CustomPermissions(permissions.BasePermission):
"""
Classe customizada para identificar se o usuário é criador ou pertence a empresa
"""
def has_permission(self, request, view):
"""
Return `True` if permission is granted, `False` otherwise.
"""
if request.method in permissions.SAFE_METHODS:
return True
token = request.META["HTTP_AUTHORIZATION"].replace("Bearer ","")
company_id = view.kwargs["company_id"]
try:
payload = jwt.decode(token, settings.SECRET_KEY)
except jwt.ExpiredSignatureError:
msg = 'Signature expired. Please log in again.'
raise exceptions.AuthenticationFailed(msg)
except jwt.InvalidTokenError:
msg = 'Invalid token. Please log in again.'
raise exceptions.AuthenticationFailed(msg)
try:
user = User.objects.get(pk=payload['user_id'])
company = Company.objects.get(pk=company_id)
except User.DoesNotExist:
msg = 'No user matching this token was found.'
raise exceptions.AuthenticationFailed(msg)
except Company.DoesNotExist:
msg = 'Company not matching this id.'
raise exceptions.AuthenticationFailed(msg)
try:
user.client
return True
except ObjectDoesNotExist:
try:
owner = user.owner
if owner == company.owner:
return True
except ObjectDoesNotExist:
employee = user.employee
if employee in company.employee:
return True
return False
class CustomPermissionsEmployee(permissions.BasePermission):
"""
Classe customizada para identificar se o usuário é o dono dos seus dados
"""
def has_permission(self, request, view):
"""
Return `True` if permission is granted, `False` otherwise.
"""
token = request.META["HTTP_AUTHORIZATION"].replace("Bearer ","")
company_id = view.kwargs["company_id"]
try:
payload = jwt.decode(token, settings.SECRET_KEY)
except jwt.ExpiredSignatureError:
msg = 'Signature expired. Please log in again.'
raise exceptions.AuthenticationFailed(msg)
except jwt.InvalidTokenError:
msg = 'Invalid token. Please log in again.'
raise exceptions.AuthenticationFailed(msg)
try:
user = User.objects.get(pk=payload['user_id'])
company = Company.objects.get(pk=company_id)
except User.DoesNotExist:
msg = 'No user matching this token was found.'
raise exceptions.AuthenticationFailed(msg)
except Company.DoesNotExist:
msg = 'Company not matching this id.'
raise exceptions.AuthenticationFailed(msg)
try:
user.client
return False
except ObjectDoesNotExist:
try:
owner = user.owner
if owner == company.owner:
return True
except ObjectDoesNotExist:
employee = user.employee
if employee in company.employee:
return True
return False
class CustomPermissionsOrder(permissions.BasePermission):
"""
Classe customizada para identificar se o usuário é criador ou pertence a empresa dona das orders
Caso seja um client:
o get de todas as ordes não será permitido;
o put, get de uma order e delete só será permitido caso sejá o criador da order
"""
def has_permission(self, request, view):
"""
Return `True` if permission is granted, `False` otherwise.
"""
token = request.META["HTTP_AUTHORIZATION"].replace("Bearer ","")
company_id = view.kwargs["company_id"]
try:
payload = jwt.decode(token, settings.SECRET_KEY)
except jwt.ExpiredSignatureError:
msg = 'Signature expired. Please log in again.'
raise exceptions.AuthenticationFailed(msg)
except jwt.InvalidTokenError:
msg = 'Invalid token. Please log in again.'
raise exceptions.AuthenticationFailed(msg)
try:
user = User.objects.get(pk=payload['user_id'])
company = Company.objects.get(pk=company_id)
except User.DoesNotExist:
msg = 'No user matching this token was found.'
raise exceptions.AuthenticationFailed(msg)
except Company.DoesNotExist:
msg = 'Company not matching this id.'
raise exceptions.AuthenticationFailed(msg)
try:
user.client
order_id = view.kwargs["pk"]
order = Order.objects.get(pk=order_id)
if user == order.client:
return True
except KeyError:
"""
Provavelmente indica que é o get all de todas as orders do client
'pk' não existe no request
"""
return True
except ObjectDoesNotExist:
try:
owner = user.owner
if owner == company.owner:
return True
except ObjectDoesNotExist:
employee = user.employee
if employee in company.employee:
return True
return False
class CustomPermissionsOrderTable(permissions.BasePermission):
"""
Classe customizada para identificar se o usuário é criador ou pertence a empresa dona das orders daquela mesa
Caso seja um client não poderá listar por mesa nesse momento
"""
def has_permission(self, request, view):
"""
Return `True` if permission is granted, `False` otherwise.
"""
token = request.META["HTTP_AUTHORIZATION"].replace("Bearer ","")
company_id = view.kwargs["company_id"]
try:
payload = jwt.decode(token, settings.SECRET_KEY)
except jwt.ExpiredSignatureError:
msg = 'Signature expired. Please log in again.'
raise exceptions.AuthenticationFailed(msg)
except jwt.InvalidTokenError:
msg = 'Invalid token. Please log in again.'
raise exceptions.AuthenticationFailed(msg)
try:
user = User.objects.get(pk=payload['user_id'])
company = Company.objects.get(pk=company_id)
except User.DoesNotExist:
msg = 'No user matching this token was found.'
raise exceptions.AuthenticationFailed(msg)
except Company.DoesNotExist:
msg = 'Company not matching this id.'
raise exceptions.AuthenticationFailed(msg)
try:
user.client
return False
except ObjectDoesNotExist:
try:
owner = user.owner
if owner == company.owner:
return True
except ObjectDoesNotExist:
employee = user.employee
if employee in company.employee:
return True
return False
class CustomPermissionsClient(permissions.BasePermission):
"""
Classe customizada para permitir que o client dono das suas informações tenha acessa a elas
"""
def has_permission(self, request, view):
"""
Return `True` if permission is granted, `False` otherwise.
"""
token = request.META["HTTP_AUTHORIZATION"].replace("Bearer ","")
client_id = view.kwargs["pk"]
try:
payload = jwt.decode(token, settings.SECRET_KEY)
except jwt.ExpiredSignatureError:
msg = 'Signature expired. Please log in again.'
raise exceptions.AuthenticationFailed(msg)
except jwt.InvalidTokenError:
msg = 'Invalid token. Please log in again.'
raise exceptions.AuthenticationFailed(msg)
try:
user = User.objects.get(pk=payload['user_id'])
company = Company.objects.get(pk=company_id)
except User.DoesNotExist:
msg = 'No user matching this token was found.'
raise exceptions.AuthenticationFailed(msg)
except Company.DoesNotExist:
msg = 'Company not matching this id.'
raise exceptions.AuthenticationFailed(msg)
try:
user.client
if user.client.id == client_id:
return True
except ObjectDoesNotExist:
return False
return False
class CustomPermissionsOwner(permissions.BasePermission):
"""
Classe customizada para permitir que o owner dono das suas informações tenha acessa a elas
"""
def has_permission(self, request, view):
"""
Return `True` if permission is granted, `False` otherwise.
"""
token = request.META["HTTP_AUTHORIZATION"].replace("Bearer ","")
owner_id = view.kwargs["pk"]
try:
payload = jwt.decode(token, settings.SECRET_KEY)
except jwt.ExpiredSignatureError:
msg = 'Signature expired. Please log in again.'
raise exceptions.AuthenticationFailed(msg)
except jwt.InvalidTokenError:
msg = 'Invalid token. Please log in again.'
raise exceptions.AuthenticationFailed(msg)
try:
user = User.objects.get(pk=payload['user_id'])
except User.DoesNotExist:
msg = 'No user matching this token was found.'
raise exceptions.AuthenticationFailed(msg)
except Company.DoesNotExist:
msg = 'Company not matching this id.'
raise exceptions.AuthenticationFailed(msg)
try:
user.owner
if user.owner.id == owner_id:
return True
except ObjectDoesNotExist:
return False
return False
|
py | 7df8810f2bdf6d43463346be88e88746f28ca4b1 | import re
class StrayLetterFlagger:
""" Flag up lines that contain only a single letter.
Example:
"[...]and wi his faird brekkis doun bews about.
T
Furth o that steid I went, [...]"
will flag up the 'T'.
NOTE: This is a workaround for an issue apparently caused by PDFMiner.
Without apparent reason a letter will be stripped of a word and placed
somewhere else in the document.
"""
name = 'StrayLetterFlagger'
desc = 'Flag up lines containing only a single letter'
def __call__(self, text: str):
lines = text.splitlines()
for i in range(len(lines)):
match = self.find_stray_letter(lines[i])
if match:
print('Stray letter found, line {}: {}'.format((i+1), lines[i]))
@staticmethod
def find_stray_letter(line: str):
""" Return match object for stray letter in line.
A stray letter is a letter that appears on a line entirely on its own.
Args:
line (str): Line to be searched.
Returns:
Match object: Match object if a letter is found on a line on its own,
None otherwise
"""
res = None
if len(line) == 1: # We're not interested for
res = re.search(r'\b[A-Za-z]\b', line) # matches within a longer line.
return res
|
py | 7df881f7993343ee9e697e4cb1fa125c702bd0b5 | import numpy as np
import tensorflow as tf
from tensorflow import keras
import tensorflow_hub as hub
import tensorflow_addons as tfa
import efficientnet.tfkeras as efn
import os
import config
AUTO = tf.data.experimental.AUTOTUNE
def build_mini_featvec_model(model_name="efficientnet_B0", dim=384, lr=None, image_only=True,
compile_model=True, metadata_length=9, feature_vec_size=1000,
normalize_features=True, return_feature_model=False,
inp_present=False, inp_layer=None, pretrained_model_weights=None,
effnet_pretrained_weights="imagenet", final_model=False):
if not final_model:
print("Starting to build the Mini-Featvec Model.........")
model_dict = {
"efficientnet_B0": efn.EfficientNetB0,
"efficientnet_B1": efn.EfficientNetB1,
"efficientnet_B2": efn.EfficientNetB2,
"efficientnet_B3": efn.EfficientNetB3,
"efficientnet_B4": efn.EfficientNetB4,
"efficientnet_B5": efn.EfficientNetB5,
"efficientnet_B6": efn.EfficientNetB6,
"resnet50": tf.keras.applications.ResNet50,
"vgg19": tf.keras.applications.VGG19,
"Xception": tf.keras.applications.Xception,
}
# For Image Input
if inp_present:
img_inp = inp_layer
else:
img_inp = keras.layers.Input(shape=(dim, dim, 3), name="img_input")
base = model_dict[model_name](input_shape=(dim,dim,3),
weights=effnet_pretrained_weights,
include_top=False)(img_inp)
if not final_model:
print(f"Created the {model_name} base......")
pooling_layer = keras.layers.GlobalAveragePooling2D()(base)
if not final_model:
print("Created the pooling layer.......")
if not image_only:
# For metadata input
metadata_inp = keras.layers.Input(shape=(metadata_length), name="metadata_input")
dense_1 = keras.layers.Dense(512)(metadata_inp)
dense_2 = keras.layers.Dense(256)(dense_1)
dense_3 = keras.layers.Dense(64)(dense_2)
# Concating the pooled features and metadata
concat = keras.layers.Concatenate()([dense_3, pooling_layer])
# A dense layer which will try to find a relation between image features and metadata
feature_layer = keras.layers.Dense(feature_vec_size, activation="selu", name="featvec")(concat)
# Normalizing the features
normalized_feature = keras.layers.BatchNormalization(name="norm_featvec")(feature_layer)
# Output
output = keras.layers.Dense(1, activation="sigmoid", name="output")(normalized_feature)
else:
feature_layer = pooling_layer
normalized_feature = keras.layers.BatchNormalization(name="norm_featvec")(pooling_layer)
output = keras.layers.Dense(1,activation='sigmoid')(pooling_layer)
if not final_model:
print("Created all the layers.........")
if normalize_features:
feat_output = normalized_feature
else:
feat_output = feature_layer
if image_only:
if return_feature_model:
featext_model = keras.Model(inputs=[img_inp], outputs=[feat_output])
model = keras.Model(inputs=[img_inp], outputs=[output])
else:
if return_feature_model:
featext_model = keras.Model(inputs=[metadata_inp, img_inp], outputs=[feat_output])
model = keras.Model(inputs=[metadata_inp, img_inp], outputs=[output])
if not final_model:
print("Built the model..........")
if pretrained_model_weights:
model.load_weights(pretrained_model_weights)
print("Loaded the pretrained weights...........")
if compile_model:
if lr:
optimizer = keras.optimizers.Nadam(lr)
else:
optimizer = keras.optimizers.Nadam()
model.compile(loss=tfa.losses.SigmoidFocalCrossEntropy(),
optimizer=keras.optimizers.Nadam(),
metrics=['AUC'])
if return_feature_model:
return model, featext_model, output, feat_output
else:
return model
def build_complete_ensemble_model(featvec_model_paths, inp_present=False, input_layer=None, dim=384,
lr=None, normalize_features=True, final_model=True,
compile_ensemble_model=True, concat_layer_name=None,
featvec_layer_name=None):
models = list()
if not inp_present:
input_layer = keras.layers.Input(shape=(dim, dim, 3), name="img_input")
for model_path in featvec_model_paths:
full_filename = os.path.basename(model_path)
if model_path.find("feat") == -1:
is_feat_path = False
if len(full_filename) == 28:
model_fold_name = f"{full_filename[0:15]}_{full_filename[-8]}_full"
fold = model_fold_name[-6]
else:
model_fold_name = f"{full_filename[0:15]}_{full_filename[-9:-7]}_full"
fold = model_fold_name[-7:-5]
model_name = model_fold_name[:15]
model_version = model_name[-2:]
else:
is_feat_path = True
model_fold_name = full_filename[5:-3]
model_name = model_fold_name[:-2]
model_version = model_name[-2:]
fold = model_fold_name[-1]
full_model, featvec_model, full_model_output, featvec_output = build_mini_featvec_model(model_name=model_name, dim=dim,
feature_vec_size=2000, return_feature_model=True,
image_only=config.IMG_ONLY, inp_present=True,
inp_layer=input_layer,
normalize_features=normalize_features,
effnet_pretrained_weights=None,
final_model=final_model,
compile_model=False)
if is_feat_path:
if compile_ensemble_model:
featvec_model.load_weights(model_path)
featvec_model.layers[1]._name = model_fold_name
featvec_model.layers[2]._name = f"global_avg_pooling_2d_{model_version}_{fold}"
featvec_model.layers[3]._name = f"{featvec_model.layers[3].name}_{model_version}_{fold}"
featvec_model.trainable = False
models.append(featvec_output)
else:
if compile_ensemble_model:
full_model.load_weights(model_path)
full_model.layers[1]._name = model_fold_name
##--
full_model.layers[2]._name = f"global_avg_pooling_2d_{model_version}_{fold}"
full_model.layers[3]._name = f"{full_model.layers[3].name}_{model_version}_{fold}"
##
effnet_layer = full_model.layers[1](input_layer)
global_avg_layer = full_model.layers[2](effnet_layer)
##--
#featvec_output = keras.layers.BatchNormalization()(global_avg_layer)
if normalize_features:
normalize_layer = full_model.layers[3]
featvec_output = normalize_layer(global_avg_layer)
##
else:
featvec_output = global_avg_layer
"""
if normalize_features:
featvec_output = keras.layers.BatchNormalization(
name=f"{featvec_model.layers[3].name}_{model_version}_{fold}")(global_avg_layer)
"""
new_featvec_model = keras.Model(inputs=input_layer, outputs=featvec_output)
if normalize_features:
non_trainable_layers = new_featvec_model.layers[:-1]
else:
non_trainable_layers = new_featvec_model.layers
for layer in non_trainable_layers:
layer.trainable = False
models.append(featvec_output)
#keras.backend.clear_session()
if concat_layer_name:
concat_layer = concat_layer = keras.layers.Concatenate(name=concat_layer_name)(models)
else:
concat_layer = keras.layers.Concatenate()(models)
if featvec_layer_name:
dense_3 = keras.layers.Dense(1024, activation="selu",
name=featvec_layer_name)(concat_layer)
else:
dense_3 = keras.layers.Dense(1024, activation="selu")(concat_layer)
output_layer = keras.layers.Dense(1, activation="sigmoid")(dense_3)
featvec_ensemble_model = keras.Model(inputs=[input_layer], outputs=[dense_3])
ensemble_model = keras.Model(inputs=[input_layer], outputs=[output_layer])
keras.utils.plot_model(ensemble_model)
#if fine_tuning:
# ensemble_model.load_weights(ensemble_weights)
if compile_ensemble_model:
if lr:
optimizer = keras.optimizers.Nadam(lr)
else:
optimizer = keras.optimizers.Nadam()
ensemble_model.compile(loss=tfa.losses.SigmoidFocalCrossEntropy(),
optimizer=optimizer,
metrics=['AUC'])
return ensemble_model, featvec_ensemble_model, output_layer, dense_3
def build_ensemble_of_ensemble(all_weights_list, dim=384, final_model=True,
lr=None, normalize_features=True):
ensemble_models_list = []
input_layer = keras.layers.Input(shape=(dim, dim, 3), name="img_input")
for i, ensemble_n_mini_featvec_weights in enumerate(all_weights_list):
ensemble_weights, mini_featvec_weights = ensemble_n_mini_featvec_weights
inner_concat_layer_name = f"concat_layer_{i}"
inner_featvec_layer_name = f"inner_featvec_layer_{i}"
ensemble_model, ensemble_featvec, ensemble_output, featvec_ensemble_output = build_complete_ensemble_model(mini_featvec_weights,
dim=384,
compile_ensemble_model=False,
inp_present=True,
input_layer=input_layer,
concat_layer_name=inner_concat_layer_name,
featvec_layer_name=inner_featvec_layer_name)
ensemble_model.load_weights(ensemble_weights)
for layer in ensemble_model.layers:
layer.trainable = False
ensemble_models_list.append(featvec_ensemble_output)
concat_layer = keras.layers.Concatenate()(ensemble_models_list)
featvec_layer = keras.layers.Dense(1024, activation="selu")(concat_layer)
output_layer = keras.layers.Dense(1, activation="sigmoid")(featvec_layer)
featvec_ens_ens_model = keras.Model(inputs=[input_layer], outputs=[featvec_layer])
complete_ens_ens_model = keras.Model(inputs=[input_layer], outputs=[output_layer])
if lr:
optimizer = keras.optimizers.Nadam(lr)
else:
optimizer = keras.optimizers.Nadam()
complete_ens_ens_model.compile(loss=tfa.losses.SigmoidFocalCrossEntropy(),
optimizer=optimizer,
metrics=['AUC'])
keras.utils.plot_model(complete_ens_ens_model)
return complete_ens_ens_model, featvec_ens_ens_model, output_layer, featvec_layer
# ---------------------------------------------------------BELOW FUNCTIONS ARE FOR IMAGE SEGMENTATION:---------------------------------------------------------
def conv2d_block(input_tensor, n_filters, kernel_size = 3, batchnorm = True):
"""Function to add 2 convolutional layers with the parameters passed to it"""
# first layer
x = keras.layers.Conv2D(filters = n_filters, kernel_size = (kernel_size, kernel_size),\
kernel_initializer = 'he_normal', padding = 'same')(input_tensor)
if batchnorm:
x = keras.layers.BatchNormalization()(x)
x = keras.layers.Activation('relu')(x)
# second layer
x = keras.layers.Conv2D(filters = n_filters, kernel_size = (kernel_size, kernel_size),\
kernel_initializer = 'he_normal', padding = 'same')(input_tensor)
if batchnorm:
x = keras.layers.BatchNormalization()(x)
x = keras.layers.Activation('relu')(x)
return x
def get_unet(input_img, n_filters = 16, dropout = 0.1, batchnorm = True, output_channels=3):
# Contracting Path
c1 = conv2d_block(input_img, n_filters * 1, kernel_size = 3, batchnorm = batchnorm)
p1 = keras.layers.MaxPooling2D((2, 2))(c1)
p1 = keras.layers.Dropout(dropout)(p1)
c2 = conv2d_block(p1, n_filters * 2, kernel_size = 3, batchnorm = batchnorm)
p2 = keras.layers.MaxPooling2D((2, 2))(c2)
p2 = keras.layers.Dropout(dropout)(p2)
c3 = conv2d_block(p2, n_filters * 4, kernel_size = 3, batchnorm = batchnorm)
p3 = keras.layers.MaxPooling2D((2, 2))(c3)
p3 = keras.layers.Dropout(dropout)(p3)
c4 = conv2d_block(p3, n_filters * 8, kernel_size = 3, batchnorm = batchnorm)
p4 = keras.layers.MaxPooling2D((2, 2))(c4)
p4 = keras.layers.Dropout(dropout)(p4)
c5 = conv2d_block(p4, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm)
# Expansive Path
u6 = keras.layers.Conv2DTranspose(n_filters * 8, (3, 3), strides = (2, 2), padding = 'same')(c5)
u6 = keras.layers.Concatenate()([u6, c4])
u6 = keras.layers.Dropout(dropout)(u6)
c6 = conv2d_block(u6, n_filters * 8, kernel_size = 3, batchnorm = batchnorm)
u7 = keras.layers.Conv2DTranspose(n_filters * 4, (3, 3), strides = (2, 2), padding = 'same')(c6)
u7 = keras.layers.Concatenate()([u7, c3])
u7 = keras.layers.Dropout(dropout)(u7)
c7 = conv2d_block(u7, n_filters * 4, kernel_size = 3, batchnorm = batchnorm)
u8 = keras.layers.Conv2DTranspose(n_filters * 2, (3, 3), strides = (2, 2), padding = 'same')(c7)
u8 = keras.layers.Concatenate()([u8, c2])
u8 = keras.layers.Dropout(dropout)(u8)
c8 = conv2d_block(u8, n_filters * 2, kernel_size = 3, batchnorm = batchnorm)
u9 = keras.layers.Conv2DTranspose(n_filters * 1, (3, 3), strides = (2, 2), padding = 'same')(c8)
u9 = keras.layers.Concatenate()([u9, c1])
u9 = keras.layers.Dropout(dropout)(u9)
c9 = conv2d_block(u9, n_filters * 1, kernel_size = 3, batchnorm = batchnorm)
seg_output = keras.layers.Conv2D(output_channels, (1, 1), activation='sigmoid', name="seg_output")(c9)
model = keras.Model(inputs=[input_img], outputs=[seg_output])
return model, seg_output |
py | 7df881fe78682af31a8a4e49d6455a6438d231de | # Copyright 2018-2019 Faculty Science Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import errno
from datetime import datetime
from collections import namedtuple
import pytz
from marshmallow import Schema, fields, post_load, ValidationError
AccessToken = namedtuple("AccessToken", ["token", "expires_at"])
class AccessTokenSchema(Schema):
token = fields.String(required=True)
expires_at = fields.DateTime(data_key="expiresAt", required=True)
@post_load
def make_access_token(self, data):
return AccessToken(**data)
class AccessTokenStore(object):
def __init__(self, tokens=None):
self.tokens = tokens or {}
@staticmethod
def _hash_profile(profile):
return str(hash(profile))
def __getitem__(self, profile):
return self.tokens[self._hash_profile(profile)]
def __setitem__(self, profile, access_token):
self.tokens[self._hash_profile(profile)] = access_token
def get(self, profile):
try:
return self[profile]
except KeyError:
return None
class AccessTokenStoreSchema(Schema):
tokens = fields.Dict(
keys=fields.String(),
values=fields.Nested(AccessTokenSchema),
required=True,
)
@post_load
def make_access_token_store(self, data):
return AccessTokenStore(**data)
def _is_valid_access_token(access_token_or_none):
if access_token_or_none is None:
return False
else:
return access_token_or_none.expires_at >= datetime.now(tz=pytz.utc)
class AccessTokenMemoryCache(object):
def __init__(self):
self._store = AccessTokenStore()
def get(self, profile):
access_token = self._store.get(profile)
return access_token if _is_valid_access_token(access_token) else None
def add(self, profile, access_token):
self._store[profile] = access_token
def _ensure_directory_exists(path, mode):
try:
os.makedirs(path, mode=mode)
except OSError as e:
if e.errno == errno.EEXIST:
# Directory already exists
pass
else:
raise
def _default_token_cache_path():
xdg_cache_home = os.environ.get("XDG_CACHE_HOME")
if not xdg_cache_home:
xdg_cache_home = os.path.expanduser("~/.cache")
return os.path.join(xdg_cache_home, "faculty", "token-cache.json")
class AccessTokenFileSystemCache(object):
def __init__(self, cache_path=None):
if cache_path is None:
self.cache_path = _default_token_cache_path()
else:
self.cache_path = str(cache_path)
self._store = None
def _load_from_disk(self):
try:
with open(self.cache_path, "r") as fp:
data = json.load(fp)
self._store = AccessTokenStoreSchema().load(data)
except IOError as e:
if e.errno == errno.ENOENT:
# File does not exist - initialise empty store
self._store = AccessTokenStore()
else:
raise
except (ValueError, ValidationError):
# File is of invalid format - reset with empty store
self._store = AccessTokenStore()
def _persist_to_disk(self):
dirname = os.path.dirname(self.cache_path)
_ensure_directory_exists(dirname, mode=0o700)
data = AccessTokenStoreSchema().dump(self._store)
with open(self.cache_path, "w") as fp:
json.dump(data, fp, separators=(",", ":"))
def get(self, profile):
if self._store is None:
self._load_from_disk()
access_token = self._store.get(profile)
return access_token if _is_valid_access_token(access_token) else None
def add(self, profile, access_token):
if self._store is None:
self._load_from_disk()
self._store[profile] = access_token
self._persist_to_disk()
|
py | 7df8824293c60861562d68f31844e13dfd1d05a1 | import os
import argparse
import random
import sys
import numpy, scipy, sklearn
import tensorflow as tf
import numpy as np
from misc.utils import ValidLoss, load_lr, load_valid_loss, save_codes_and_config, compute_cos_pairwise_eer
from model.trainer_multitask import Trainer
from dataset.data_loader import KaldiDataRandomQueue
from dataset.kaldi_io import FeatureReader
from six.moves import range
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--cont", action="store_true", help="Continue training from an existing model.")
parser.add_argument("--config", type=str, help="The configuration file.")
parser.add_argument("train_dir", type=str, help="The data directory of the training set.")
parser.add_argument("train_spklist", type=str, help="The spklist file maps the TRAINING speakers to the indices.")
parser.add_argument("valid_dir", type=str, help="The data directory of the validation set.")
parser.add_argument("valid_spklist", type=str, help="The spklist maps the VALID speakers to the indices.")
parser.add_argument("model", type=str, help="The output model directory.")
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
args = parser.parse_args()
params = save_codes_and_config(args.cont, args.model, args.config)
# The model directory always has a folder named nnet
model_dir = os.path.join(args.model, "nnet")
# Set the random seed. The random operations may appear in data input, batch forming, etc.
tf.set_random_seed(params.seed)
random.seed(params.seed)
np.random.seed(params.seed)
if args.cont:
# If we continue training, we can figure out how much steps the model has been trained,
# using the index of the checkpoint
import re
ckpt = tf.train.get_checkpoint_state(model_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
step = int(next(re.finditer("(\d+)(?!.*\d)", ckpt_name)).group(0))
else:
sys.exit("Cannot load checkpoint from %s" % model_dir)
start_epoch = int(step / params.num_steps_per_epoch)
else:
start_epoch = 0
learning_rate = params.learning_rate
learning_rate_array = []
if os.path.isfile(str(learning_rate)):
with open(str(learning_rate), "r") as f:
for line in f.readlines():
learning_rate_array.append(float(line.strip()))
# The size of the file should be large enough
assert len(learning_rate_array) > params.num_epochs, "The learning rate file is shorter than the num of epochs."
tf.logging.info("Using specified learning rate decay strategy.")
else:
# The learning rate is determined by the training process. However, if we continue training,
# the code doesn't know the previous learning rate if it is tuned using the validation set.
# To solve that, just save the learning rate to an individual file.
if os.path.isfile(os.path.join(model_dir, "learning_rate")):
learning_rate_array = load_lr(os.path.join(model_dir, "learning_rate"))
assert len(learning_rate_array) == start_epoch + 1, "Not enough learning rates in the learning_rate file."
else:
learning_rate_array = [float(learning_rate)] * (start_epoch + 1)
dim = FeatureReader(args.train_dir).get_dim()
with open(os.path.join(model_dir, "feature_dim"), "w") as f:
f.write("%d\n" % dim)
num_total_train_speakers = KaldiDataRandomQueue(args.train_dir, args.train_spklist).num_total_speakers
tf.logging.info("There are %d speakers in the training set and the dim is %d" % (num_total_train_speakers, dim))
# Load the history valid loss
min_valid_loss = ValidLoss()
if os.path.isfile(os.path.join(model_dir, "valid_loss")):
min_valid_loss = load_valid_loss(os.path.join(model_dir, "valid_loss"))
# The trainer is used to control the training process
trainer = Trainer(params, args.model)
trainer.build("train",
dim=dim,
loss_type=params.loss_func,
num_speakers=num_total_train_speakers)
trainer.build("valid",
dim=dim,
loss_type=params.loss_func,
num_speakers=num_total_train_speakers)
if "early_stop_epochs" not in params.dict:
params.dict["early_stop_epochs"] = 10
if "min_learning_rate" not in params.dict:
params.dict["min_learning_rate"] = 1e-5
for epoch in range(start_epoch, params.num_epochs):
trainer.train(args.train_dir, args.train_spklist, learning_rate_array[epoch])
valid_loss, valid_embeddings, valid_labels = trainer.valid(args.valid_dir, args.valid_spklist,
batch_type=params.batch_type,
output_embeddings=True)
eer = compute_cos_pairwise_eer(valid_embeddings, valid_labels)
tf.logging.info("[INFO] Valid EER: %f" % eer)
# Tune the learning rate if necessary.
if not os.path.isfile(str(learning_rate)):
new_learning_rate = learning_rate_array[epoch]
if valid_loss < min_valid_loss.min_loss:
min_valid_loss.min_loss = valid_loss
min_valid_loss.min_loss_epoch = epoch
else:
if epoch - min_valid_loss.min_loss_epoch >= params.reduce_lr_epochs:
new_learning_rate /= 2
# If the valid loss in the next epoch still does not reduce, the learning rate will keep reducing.
tf.logging.info("After epoch %d, no improvement. Reduce the learning rate to %.8f" % (
min_valid_loss.min_loss_epoch, new_learning_rate))
min_valid_loss.min_loss_epoch += 2
learning_rate_array.append(new_learning_rate)
if epoch == 0:
# If this is the first epoch, the first learning rate should be recorded
with open(os.path.join(model_dir, "learning_rate"), "a") as f:
f.write("0 %.8f\n" % learning_rate_array[0])
# Save the learning rate and loss for each epoch.
with open(os.path.join(model_dir, "learning_rate"), "a") as f:
f.write("%d %.8f\n" % (epoch + 1, learning_rate_array[epoch + 1]))
with open(os.path.join(model_dir, "valid_loss"), "a") as f:
f.write("%d %f %f\n" % (epoch, valid_loss, eer))
if not os.path.isfile(str(learning_rate)):
# If the learning rate is too small, the training is actually get stuck.
# Also early stop is applied.
# This is only applied when the learning rate is not specified.
if learning_rate_array[epoch + 1] < (params.min_learning_rate - 1e-12) or \
epoch - min_valid_loss.min_loss_epoch >= params.early_stop_epochs:
break
# Close the session before we exit.
trainer.close()
|
py | 7df88299c87fc2a7979c24f69420e235287900fe | from django.core.management.base import BaseCommand, CommandError
from accounts.models import User, Authority
class Command(BaseCommand):
args = 'user_id to_authority_id'
help = 'change current authority of given user'
def handle(self, *args, **options):
if len(args) < 2:
raise CommandError('Please provide user_id and to_authority_id')
user_id = args[0]
to_authority_id = args[1]
current_user = User.objects.get(pk=user_id)
auths = current_user.authority_users.all()
if len(auths) > 1:
raise CommandError('Number of authorities is more than 1')
current_authority = auths[0]
target_authority = Authority.objects.get(pk=to_authority_id)
current_authority.users.remove(current_user)
target_authority.users.add(current_user) |
py | 7df882c0bb31f0032272a570a0113e2524a392bf | #!/usr/bin/python3
'''A set of convenience functions for converting among different phone codes.
Usage:
import phonecodes
print phonecodes.CODES # the known phone codes
print phonecodes.LANGUAGES # the known languages
s1 = phonecodes.convert(s0, code0, code1, language)
# s0 and s1 are strings containing individual symbols
# code0 and code1 must be members of phonecodes.CODES, of course
# language must be a member of phonecodes.LANGUAGES, of course
# (but not all languages are known for all phone codes)
l1 = phonecodes.convertlist(l0, code0, code1, language)
# l0, l1 are lists of symbols
phonecodes.vowels
phonecodes.consonants
# list known IPA symbols of vowels, consonants.
# for other tables, see phonecode_tables.py
'''
import re,sys
import phonecodes.src.phonecode_tables as phonecode_tables
CODES=set(('ipa','arpabet','xsampa','disc','callhome'))
LANGUAGES=set(('eng','deu','nld','arz','cmn','spa','yue','lao','vie'))
vowels = phonecode_tables._ipa_vowels
consonants = phonecode_tables._ipa_consonants
stressmarkers = phonecode_tables._ipa_stressmarkers
tonecharacters = phonecode_tables._ipa_tonecharacters
diacritics = phonecode_tables._ipa_diacritics
#####################################################################
def translate_string(s, d):
'''(tl,ttf)=translate_string(s,d):
Translate the string, s, using symbols from dict, d, as:
1. Min # untranslatable symbols, then 2. Min # symbols.
tl = list of translated or untranslated symbols.
ttf[n] = True if tl[n] was translated, else ttf[n]=False.
'''
N = len(s)
symcost = 1 # path cost per translated symbol
oovcost = 10 # path cost per untranslatable symbol
maxsym = max(len(k) for k in d.keys()) # max input symbol length
# (pathcost to s[(n-m):n], n-m, translation[s[(n-m):m]], True/False)
lattice = [ (0,0,'',True) ]
for n in range(1,N+1):
# Initialize on the assumption that s[n-1] is untranslatable
lattice.append((oovcost+lattice[n-1][0],n-1,s[(n-1):n],False))
# Search for translatable sequences s[(n-m):n], and keep the best
for m in range(1,min(n+1,maxsym+1)):
if s[(n-m):n] in d and symcost+lattice[n-m][0] < lattice[n][0]:
lattice[n] = (symcost+lattice[n-m][0],n-m,d[s[(n-m):n]],True)
# Back-trace
tl = []
translated = []
n = N
while n > 0:
tl.append(lattice[n][2])
translated.append(lattice[n][3])
n = lattice[n][1]
return((tl[::-1], translated[::-1]))
def attach_tones_to_vowels(il, tones, vowels, searchstep, catdir):
'''Return a copy of il, with each tone attached to nearest vowel if any.
searchstep=1 means search for next vowel, searchstep=-1 means prev vowel.
catdir>=0 means concatenate after vowel, catdir<0 means cat before vowel.
Tones are not combined, except those also included in the vowels set.
'''
ol = il.copy()
v = 0 if searchstep>0 else len(ol)-1
t = -1
while 0<=v and v<len(ol):
if (ol[v] in vowels or (len(ol[v])>1 and ol[v][0] in vowels)) and t>=0:
ol[v]= ol[v]+ol[t] if catdir>=0 else ol[t]+ol[v]
ol = ol[0:t] + ol[(t+1):] # Remove the tone
t = -1 # Done with that tone
if v<len(ol) and ol[v] in tones:
t = v
v += searchstep
return(ol)
#####################################################################
# X-SAMPA
def ipa2xsampa(x):
'''Attempt to return X-SAMPA equivalent of an IPA phone x.'''
(tl,ttf) = translate_string(x, phonecode_tables._ipa2xsampa)
return(''.join(tl))
def xsampa2ipa(x):
'''Return the IPA equivalent of X-SAMPA phone x.'''
(tl,ttf) = translate_string(x, phonecode_tables._xsampa_and_diac2ipa)
return(''.join(tl))
######################################################################
# Language-dependent lexical tones and stress markers
def tone2ipa(n, alpha3):
return(phonecode_tables._tone2ipa[alpha3][n])
#####################################################################
# DISC, the system used by CELEX
def disc2ipa(x, alpha3):
'''Convert DISC symbol x into IPA, for language L'''
if alpha3=='nld':
(tl,ttf) = translate_string(x,phonecode_tables._disc2ipa_dutch)
return(''.join(tl))
elif alpha3=='eng':
(tl,ttf) = translate_string(x,phonecode_tables._disc2ipa_english)
return(''.join(tl))
else:
(tl,ttf) = translate_string(x,phonecode_tables._disc2ipa)
return(''.join(tl))
def ipa2disc(x):
'''Convert IPA symbol x into DISC'''
(tl,ttf) = translate_string(x,phonecode_tables._ipa2disc)
return(''.join(tl))
def ipa2disc_old(x):
'''Convert IPA symbol x into DISC'''
# Convert whole thing if possible; otherwise try prefix+vowel; else quit
if x in phonecode_tables._ipa2disc:
return(phonecode_tables._ipa2disc[x])
elif x[0] in phonecode_tables._ipa2disc and x[1:] in phonecode_tables._ipa2disc:
return(phonecode_tables._ipa2disc[x[0]]+phonecode_tables._ipa2disc[x[1:]])
else:
raise KeyError('Unknown IPA symbol %s'%(x))
#######################################################################
# Callhome phone codes
def callhome2ipa(x,alpha3):
'''Convert callhome phone symbol x into IPA for language alpha3'''
(il,ttf)=translate_string(x,phonecode_tables._callhome2ipa[alpha3])
if alpha3=='cmn':
ol=attach_tones_to_vowels(il,phonecode_tables._ipa_tones,phonecode_tables._ipa_vowels,-1,1)
else:
ol=attach_tones_to_vowels(il,phonecode_tables._ipa_stressmarkers,
phonecode_tables._ipa_vowels,-1,-1)
return(''.join(ol))
def ipa2callhome(x,alpha3):
'''Convert IPA symbol x into callhome notation, for language alpha3'''
(il,ttf)=translate_string(x,phonecode_tables._ipa2callhome[alpha3])
if alpha3=='cmn':
ol=attach_tones_to_vowels(il,'012345',phonecode_tables._callhome_vowels['cmn'],-1,1)
else:
ol=attach_tones_to_vowels(il,'012',phonecode_tables._callhome_vowels[alpha3],1,1)
return(''.join(ol))
#########################################################################
# ARPABET and TIMIT
def arpabet2ipa(x):
'''Convert ARPABET symbol X to IPA'''
(il,ttf)=translate_string(x,phonecode_tables._arpabet2ipa)
ol=attach_tones_to_vowels(il,phonecode_tables._ipa_stressmarkers,
phonecode_tables._ipa_vowels,-1,-1)
return(''.join(ol))
def ipa2arpabet(x):
'''Convert IPA symbols to ARPABET'''
(il,ttf)=translate_string(x,phonecode_tables._ipa2arpabet)
ol=attach_tones_to_vowels(il,'012',phonecode_tables._arpabet_vowels,1,1)
return(''.join(ol))
def timit2ipa(x):
'''Convert TIMIT phone codes to IPA'''
x = x.upper()
(il,ttf)=translate_string(x,phonecode_tables._timit2ipa)
ol=attach_tones_to_vowels(il,phonecode_tables._ipa_stressmarkers,
phonecode_tables._ipa_vowels,-1,-1)
return(''.join(ol))
#######################################################################
# phonecodes.convert and phonecodes.convertlist
# are used to convert symbols and lists of symbols, respectively,
# to or from IPA, by calling appropriate other functions.
#
_convertfuncs = {
'arpabet': (arpabet2ipa, ipa2arpabet),
'xsampa': (xsampa2ipa, ipa2xsampa),
'disc': (disc2ipa, ipa2disc),
'callhome': (callhome2ipa,ipa2callhome)
}
def convert(s0, c0, c1, language):
if c0=='ipa' and c1!='ipa':
x=_convertfuncs[c1][1](s0, language)
return(x)
elif c0!='ipa' and c1=='ipa':
return(_convertfuncs[c0][0](s0, language))
else:
raise RuntimeError('must convert to/from ipa, not %s to %s'%(c0,c1))
def convertlist(l0, c0, c1, language):
return([ convert(s0,c0,c1,language) for s0 in l0 ])
|
py | 7df882d0879ec9dcb170e1006718f9971eb341c6 | # -*- coding: utf-8 -*-
"""TextAnaly1.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1zen1h2ly517z9HEwTFdksGymM_CPfYEC
"""
import nltk
from nltk import sent_tokenize,word_tokenize
nltk.download('punkt')
text= "Hello iam doing text analytics here"
a=sent_tokenize(text)
b=word_tokenize(text)
a
b
# Frequency Distribution
from nltk import FreqDist
f=FreqDist(b)
f
# stop words
from nltk.corpus import stopwords
nltk.download('stopwords')
stop_words=list(stopwords.words('english'))
stop_words
# all the stop words will be ommited from a
new_list=[]
for i in a:
if i not in stop_words:
new_list.append(i)
print(new_list)
# Lexical Normalization
# stemming and lemitization
from nltk.stem import PorterStemmer
from nltk.stem.wordnet import WordNetLemmatizer
porter_stem=PorterStemmer()
new_text=[]
for i in a:
if i not in stop_words:
new_text.append(i)
print(new_text)
# lemitization
lemma=WordNetLemmatizer()
nltk.download('wordnet')
word="trying"
a=lemma.lemmatize(word,'v')
b=porter_stem.stem(word)
a
b
nltk.download('averaged_perceptron_tagger')
sentence="hello, iam new to nlp, nice to see you"
token=word_tokenize(sentence)
nltk.pos_tag(token)
|
py | 7df883c5480c8438353dd879204ebd7d34e387fc | """Kullback Leibler divergence estimates"""
from collections import namedtuple
from numpy import array, sqrt
class KLEstimate(namedtuple('KLEstimate', ['estimate', 'se'])):
"""Container for return value from kullback_leibler.
`estimate`: The estimated KL divergence, mean of the sampled integrand
values.
`se`: Estimated standard deviation of the samples from which the mean was
calculated. In general the mean and variance of log(P(x)) is not known to
be finite, but it will be for any distribution crosscat generates at the
moment, because they all have finite entropy. Hence the Central Limit
Theorem applies at some sample size, and this can in principle be used as a
rough guide to the precision of the estimate. In tests comparing the
univariate gaussians N(0,1) and N(0,2), it tended to have a visually
obvious bias for sample sizes below 100,000.
"""
pass
def kullback_leibler(postsample, postlpdf, complpdf):
"""Estimate KL-divergence of sample (a collection of values) w.r.t. known pdf,
`complpdf`, which returns the density when passed a sample. Return value is
a `KLEstimate`. The attribute you probably care most about is
`KLEstimate.estimate`. See `KLEstimate.__doc__` for more details. The
`postsample` argument is an approximate sample from the distribution
approximately represented by `postlpdf`.
"""
klsamples = array([postlpdf(x) - complpdf(x) for x in postsample])
std = klsamples.std() / sqrt(len(klsamples))
return KLEstimate(estimate=klsamples.mean(), se=std)
|
py | 7df884c298d2f23c9b2818e4ecee93251b051174 | from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \
PermissionsMixin
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"""Creates and saves a new user"""
if not email:
raise ValueError('Users must have an email adress')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""Create and saves a new superuser"""
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custom user model that supports using email instead of username"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
|
py | 7df88547fbac565c3c3bbab2dab60bc9a000bc34 | import sqlite3
from config import Config
class DBHelper:
@staticmethod
def connect_to_db():
return sqlite3.connect(Config.DB_PATH)
@staticmethod
def disconnect_from_db(conn, cursor):
cursor.close()
conn.close()
class DBRequest:
_cursor = None
_connect = None
def __init__(self):
self.set_connect(DBHelper.connect_to_db())
self.set_cursor(self.get_connect().cursor())
def set_cursor(self, cursor):
self._cursor = cursor
def get_cursor(self):
return self._cursor
def set_connect(self, connect):
self._connect = connect
def get_connect(self):
return self._connect
def complete(self):
self.get_connect().commit()
def close(self):
DBHelper.disconnect_from_db(self.get_connect(), self.get_cursor()) |
py | 7df886986913da67fc4f247b3417a8b2a6a87479 | import sys
sys.path.append("./Preprocess/")
from nfa_preprocess import take_alphabet
from nfa_preprocess import take_nfa_final_states
from nfa_preprocess import create_nfa_transition_table
#Take input: NUMBER OF STATES
print("""
\033[1m Enter Information for NFA \033[0m
""")
while True:
statenum = input("\nEnter number of states: ")
if statenum.isdigit():
statenum = int(statenum)
if statenum > 0:
break
else:
print("Invalid input")
else:
print("NFA must have at least 1 state")
print("")
#Input NFA Start State
while(True):
nfa_start_state = input("Enter Start State: ")
if nfa_start_state.isdigit():
if int(nfa_start_state) <= 0:
print("Invalid State.")
else:
break
else:
print("Not a Number.")
print("")
nfa_final_states = take_nfa_final_states(statenum)
print("")
alphabets = take_alphabet()
print("")
nfa = create_nfa_transition_table(statenum, alphabets)
print("""
\033[1m Nondeterministic Finite Automata \033[0m
5 Tuple System
1.States 2.Alphabets 3.Start State 4.Final State 5.Transition Rules
""")
print("States: ", end=" ")
nfa_temp_states = []
for i in range(statenum):
nfa_temp_states.append(i+1)
print(nfa_temp_states)
print("Alphabets: ", end=" ")
alphabets2 = alphabets[:]
try:
alphabets2.remove('ε')
except ValueError:
pass
print(alphabets2)
print("Start State: ", end=" ")
print(nfa_start_state)
print("Final States: ", end=" ")
print(nfa_final_states)
print("Transition Table: ", end=" ")
print(nfa)
#Convertion Begins
from dfa_preprocess import make_dfa_states
from dfa_preprocess import find_dfa_start_state
from dfa_preprocess import create_dfa_transition_table
from dfa_preprocess import find_dfa_final_states
dfa_states = make_dfa_states(statenum)
del dfa_states[0]
dfa_start_state = [int(nfa_start_state)]
#dfa_start_state = find_dfa_start_state(dfa_start_state, nfa, alphabets)
dfa_final_states = find_dfa_final_states(dfa_states, nfa_final_states)
dfa = create_dfa_transition_table(dfa_states, alphabets, nfa, statenum)
print("""
\033[1m Deterministic Finite Automata \033[0m
5 Tuple System
1.States 2.Alphabets 3.Start State 4.Final State 5.Transition Rules
""")
print("States: ", end=" ")
print(dfa_states)
print("Alphabets: ", end=" ")
print(alphabets2)
print("Start States: ", end=" ")
print(dfa_start_state)
print("Final States: ", end=" ")
print(dfa_final_states)
print("Transition Table: ", end=" ")
print(dfa)
|
py | 7df887c49e28ca68806066109edd0497d4bf9500 | """
WSGI config for empty_union_34333 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'empty_union_34333.settings')
application = get_wsgi_application()
|
py | 7df8888a33342e760fe70d55da837c69157b3773 | #!/usr/bin/env python3
import os
from os.path import join, dirname
from pprint import pprint
import vonage
from dotenv import load_dotenv
dotenv_path = join(dirname(__file__), "../.env")
load_dotenv(dotenv_path)
VONAGE_APPLICATION_ID = os.environ.get("VONAGE_APPLICATION_ID")
VONAGE_APPLICATION_PRIVATE_KEY_PATH = os.environ.get("VONAGE_APPLICATION_PRIVATE_KEY_PATH")
UUID = os.environ.get("UUID")
client = vonage.Client(
application_id=VONAGE_APPLICATION_ID,
private_key=VONAGE_APPLICATION_PRIVATE_KEY_PATH,
)
voice = vonage.Voice(client)
dest = {"type": "ncco", "url": ["https://developer.nexmo.com/ncco/tts.json"]}
response = voice.update_call(UUID, action="transfer", destination=dest)
pprint(response)
|
py | 7df8898e4c70062c4e068a88ea483b7086de6b7c | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import click
import yaml
from .controllers.cluster_controller import ClusterController
from .controllers.service_controller import ServiceController
@click.group(chain=True)
def cli():
"""
Cloudcrane's command group.
"""
@cli.command('cluster')
@click.argument('command')
@click.option('--cluster-name', default='default', help='Name of the ECS cluster (default = "default")')
@click.option('--ami', help='ID of AMI to be used for the instances of the cluster')
@click.option('--instance-type', default='t2.micro', help='EC2 instance type (default = t2.micro)')
@click.option('--max-instances', default='1', help='Maximum number of EC2 instances in auto-scaling group')
def cluster(command, cluster_name, ami, instance_type, max_instances):
"""
Manage ECS clusters.
Possible commands: create, list, delete
"""
cluster_controller = ClusterController()
if command == 'create':
cluster_controller.create(
cluster_name=cluster_name,
ami=ami,
instance_type=instance_type,
max_instances=max_instances
)
elif command == 'list':
cluster_controller.list(
all=cluster_name == 'all'
)
elif command == 'delete':
cluster_controller.delete(
cluster_name=cluster_name
)
@cli.command('service')
@click.argument('command')
@click.option('--application', help='Name of the application the AWS CloudFormation stack should be created for')
@click.option('--cluster-name', default='default', help='Name of the ECS cluster (default = "default")')
@click.option('--version', help='Version of the application the AWS CloudFormation stack should be created for')
@click.option('--region', default='eu-central-1', help='AWS region to create the new stack in')
@click.option('--parameters', default='cloudcrane.yaml',
help='YAML file with parameters for deployment of service to ECS')
def service(command, cluster_name, application, version, region, parameters):
"""
Manage services in ECS cluster.
Possible commands: deploy, delete, list
"""
service_controller = ServiceController()
if version:
service_name = application + '-' + version
else:
service_name = application
if command == 'deploy':
with open(parameters, 'rb') as f:
service_parameters = yaml.load(f)
service_controller.deploy(
cluster_name=cluster_name,
service_name=service_name,
region=region,
parameters=service_parameters
)
elif command == 'delete':
try:
service_controller.delete(
cluster_name=cluster_name,
service_name=service_name
)
except Exception as e:
print('ERROR: Error deleting service [{}]: {}'.format(service_name, e))
__print_usage(service)
exit(1)
elif command == 'list':
service_controller.list(
cluster_name=cluster_name
)
def __print_usage(command):
"""
Print usage information (help text) of click command
"""
with click.Context(command) as ctx:
click.echo(command.get_help(ctx))
def main():
cli()
if __name__ == "__main__":
main()
|
py | 7df8898f17cabdccf5618a10f4e8b31dcbe18b45 | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# IBM.NOS.get_version
# ---------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetversion import IGetVersion
class Script(BaseScript):
name = "IBM.NOS.get_version"
cache = True
interface = IGetVersion
rx_ver = re.compile(
r"Software Version\s+(?P<version>\S+)\s\(\w+\s(?P<image>\S+)\)", re.MULTILINE
)
rx_ser = re.compile(r"Serial Number\s+\:\s+(?P<serial>\S+)", re.MULTILINE)
rx_pla = re.compile(r"^IBM\s.*(?P<platform>(EN|CN|SI|G)\d{4}\w?)\s+", re.MULTILINE)
def execute_snmp(self):
try:
p = self.snmp.get("1.3.6.1.2.1.1.1.0", cached=True) # sysDescr.0
match = self.rx_pla.search(p)
platform = match.group("platform")
version = self.snmp.get(
"1.3.6.1.2.1.47.1.1.1.1.10.1", cached=True
) # entPhysicalSoftwareRev.1
image = self.snmp.get("1.3.6.1.2.1.25.4.2.1.2.1", cached=True) # hrSWRunName.1
serial = self.snmp.get(
"1.3.6.1.2.1.47.1.1.1.1.11.1", cached=True
) # entPhysicalSerialNum.1
return {
"vendor": "IBM",
"platform": platform,
"version": version,
"image": image,
"attributes": {"Serial Number": serial},
}
except self.snmp.TimeOutError:
pass
def execute_cli(self):
v = self.cli("show version | exclude Temp", cached=True)
match1 = self.rx_pla.search(v)
match2 = self.rx_ver.search(v)
match3 = self.rx_ser.search(v)
platform = match1.group("platform")
version = match2.group("version")
image = match2.group("image")
serial = match3.group("serial")
return {
"vendor": "IBM",
"platform": platform,
"version": version,
"image": image,
"attributes": {"Serial Number": serial},
}
|
py | 7df889a19e717f9832d670725e0356bb90bc247b | """
Status Progress of rack map load/refresh/load_refresh
"""
from base_progress import BaseProgress
class RackMapProgress(BaseProgress):
# progress type
TYPE_LOAD_MAP = "load_map"
TYPE_REFRESH_MAP = "refresh_map"
TYPE_LOAD_REFRESH_MAP = "load_refresh_map"
STATUS_SEND_HTTP_REQUEST = "send http request"
STATUS_READ_DATA_FROM_DB = "read data from db"
STATUS_CHECK_REFRESH_CONDITION = "check refresh condition"
STATUS_ASYNC_REFRESH = "async refresh data"
STATUS_READ_REFRESH_DATA = "read refresh data"
STATUS_PROCESS_DATA = "process map data"
STATUS_PLOT_MAP = "plot map"
STATUS_PLOT_LEGEND = "plot legend"
STATUS_LOADING_MAP = "loading map"
# var
map_progress_type = None
# all possible status of map loading progress
all_status_list = [
STATUS_SEND_HTTP_REQUEST,
STATUS_READ_DATA_FROM_DB,
STATUS_CHECK_REFRESH_CONDITION,
STATUS_ASYNC_REFRESH,
STATUS_READ_REFRESH_DATA,
STATUS_PROCESS_DATA,
STATUS_PLOT_MAP,
STATUS_PLOT_LEGEND,
STATUS_LOADING_MAP,
]
# (x, y), x means the index in all_status_list; y means the value of this status
value_list_dict = {
TYPE_LOAD_MAP: [
(0, 5),
(1, 20),
(5, 20),
(6, 25),
(7, 15),
(8, 15),
],
TYPE_REFRESH_MAP: [
(0, 5),
(2, 5),
(3, 45),
(4, 5),
(5, 10),
(6, 10),
(7, 5),
(8, 15),
],
TYPE_LOAD_REFRESH_MAP: [
(0, 5),
(1, 5),
(2, 5),
(3, 40),
(4, 5),
(5, 10),
(6, 10),
(7, 5),
(8, 15),
],
}
# status data dict
# {status_text: {"begin": value_begin, "range": value_range, "next": next_status}, ...}
status_data_dict = {}
"""
load_map including: 1) 5%, send http request,
2) 20%, read data from db,
3) 20%, process data,
4) 25%, plot map,
5) 15%, plot legend,
6) 15%, loading map, receive http response
refresh_map including: 1) 5%, send http request,
2) 5%, check refresh condition,
3) 45%, async send refresh,
4) 5%, read data from db,
5) 10%, process data,
6) 10%, plot map,
7) 5%, plot legend,
8) 15%, loading map, receive http response
load_refresh_map including:
1) 5%, send http request,
2) 5%, read data from db,
3) 5%, check refresh condition,
4) 40%, async send refresh,
5) 5%, read data from db,
6) 10%, process data,
7) 10%, plot map,
8) 5%, plot legend,
9) 15%, loading map, receive http response
"""
def __init__(self, vldist=None, status_list=None):
BaseProgress.__init__(self)
if status_list and type(status_list) is list:
self.all_status_list = status_list
if vldist and type(vldist) is dict:
for type_name in self.value_list_dict:
if type_name not in vldist:
self.init_status_data(
type_name, self.value_list_dict[type_name])
for type_name in vldist:
self.init_status_data(type_name, vldict[type_name])
else:
for type_name in self.value_list_dict:
self.init_status_data(
type_name, self.value_list_dict[type_name])
# init status_data_dict with value_list
def init_status_data(self, type_name, value_list):
self.status_data_dict[type_name] = {}
value_begin = 0
prev_status = None
for (value_index, value_range) in value_list:
status_text = self.all_status_list[value_index]
self.status_data_dict[type_name][status_text] = {"begin": value_begin,
"range": value_range,
"next": "",
}
if prev_status:
prev_status["next"] = status_text
prev_status = self.status_data_dict[type_name][status_text]
value_begin += value_range
# load map ?
def is_load_map(self):
return self.is_load_or_refresh_map(self.TYPE_LOAD_MAP)
# refresh map ?
def is_refresh_map(self):
return self.is_load_or_refresh_map(self.TYPE_REFRESH_MAP)
# load failed then refresh map ?
def is_load_refresh_map(self):
return self.is_load_or_refresh_map(self.TYPE_LOAD_REFRESH_MAP)
def is_load_or_refresh_map(self, str_type):
result = False
if self.map_progress_type == str_type:
result = True
return result
# set status to load map.
def set_load_map(self):
self.map_progress_type = self.TYPE_LOAD_MAP
self.clear_status()
# set status to refresh map
def set_refresh_map(self):
self.map_progress_type = self.TYPE_REFRESH_MAP
self.clear_status()
# set status to load failed then refresh map
def set_load_refresh_map(self):
self.map_progress_type = self.TYPE_LOAD_REFRESH_MAP
self.set_map_progress_status(self.status_text)
def set_map_progress_status(self, status, factor=1):
# print("map_progress_type is: {0}, status is: {1}".format(self.map_progress_type, status))
curr_status = self.status_data_dict[self.map_progress_type][status]
if factor == 1:
value = curr_status["begin"] + curr_status["range"]
else:
value = curr_status["begin"] + \
int(round(curr_status["range"] * factor))
# print("process status, status: '{0}', value: '{1}'".format(status, value))
next_status = curr_status["next"] if factor == 1 else status
self.set_status(status, value, next_status)
def set_send_http_request(self, factor=1):
self.set_map_progress_status(self.STATUS_SEND_HTTP_REQUEST, factor)
def set_read_data_from_db(self, factor=1):
self.set_map_progress_status(self.STATUS_READ_DATA_FROM_DB, factor)
def set_check_refresh_condition(self, factor=1):
self.set_map_progress_status(
self.STATUS_CHECK_REFRESH_CONDITION, factor)
def set_async_refresh(self, factor=1):
self.set_map_progress_status(self.STATUS_ASYNC_REFRESH, factor)
def set_read_refresh_data(self, factor=1):
self.set_map_progress_status(self.STATUS_READ_REFRESH_DATA, factor)
def set_process_data(self, factor=1):
self.set_map_progress_status(self.STATUS_PROCESS_DATA, factor)
def set_plot_map(self, factor=1):
self.set_map_progress_status(self.STATUS_PLOT_MAP, factor)
def set_plot_legend(self, factor=1):
self.set_map_progress_status(self.STATUS_PLOT_LEGEND, factor)
def set_receive_http_response(self, factor=1):
self.set_map_progress_status(self.STATUS_LOADING_MAP, factor)
class HeatMapProgress(RackMapProgress):
def __init__(self):
RackMapProgress.__init__(self, None)
class ServiceMapProgress(RackMapProgress):
def __init__(self):
RackMapProgress.__init__(self, None)
|
py | 7df88ade58a2a51bfb4272fdb2645a2680f9d456 | # Lint-as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from absl.testing import parameterized
from clif.testing.python import variables
# TODO: Restore simple import after OSS setup includes pybind11.
# pylint: disable=g-import-not-at-top
try:
from clif.testing.python import variables_pybind11
except ImportError:
variables_pybind11 = None
# pylint: enable=g-import-not-at-top
@parameterized.named_parameters([
np for np in zip(('c_api', 'pybind11'), (variables, variables_pybind11))
if np[1] is not None
])
class VariablesTest(absltest.TestCase):
def test_const_int(self, wrapper_lib):
self.assertEqual(wrapper_lib.kMyConstInt, 42)
def test_const_int_renamed(self, wrapper_lib):
self.assertEqual(wrapper_lib.const_int, 123)
def test_const_float(self, wrapper_lib):
self.assertEqual(wrapper_lib.kMyConstFloat, 15.0)
def test_const_bool(self, wrapper_lib):
self.assertEqual(wrapper_lib.kMyConstBool, True)
def test_const_complex(self, wrapper_lib):
self.assertEqual(wrapper_lib.kMyConstComplex, complex(1))
def test_const_array(self, wrapper_lib):
expected_array = [0, 10, 20, 30, 40]
self.assertSequenceEqual(expected_array, wrapper_lib.kMyConstIntArray)
def test_const_pair(self, wrapper_lib):
expected_tuple = [0, 10]
self.assertSequenceEqual(expected_tuple, wrapper_lib.kMyConstPair)
def test_const_dict(self, wrapper_lib):
expected_dict = {1: 10, 2: 20, 3: 30}
self.assertDictEqual(expected_dict, wrapper_lib.kMyConstMap)
def test_const_set(self, wrapper_lib):
expected_set = {1, 2, 3}
self.assertSetEqual(expected_set, wrapper_lib.kMyConstSet)
if __name__ == '__main__':
absltest.main()
|
py | 7df88b943b86b008bd56a403bc3a0e25942e1b3f | """
SynthTIGER
Copyright (c) 2021-present NAVER Corp.
MIT license
"""
import numpy as np
from synthtiger import utils
from synthtiger.components.component import Component
class Erode(Component):
def __init__(self, k=(1, 3)):
super().__init__()
self.k = k
def sample(self, meta=None):
if meta is None:
meta = {}
k = meta.get("k", np.random.randint(self.k[0], self.k[1] + 1))
meta = {
"k": k,
}
return meta
def apply(self, layers, meta=None):
meta = self.sample(meta)
k = meta["k"]
for layer in layers:
image = utils.erode_image(layer.image, k)
layer.image = image
return meta
|
py | 7df88cb117817d9f00c5d952f299db9fb3208121 | import json
from io import BytesIO
from wsgiref.util import setup_testing_defaults
from sqlalchemy import create_engine
from sqlalchemy.pool import StaticPool
import hiku.sources.sqlalchemy
from hiku.engine import Engine
from hiku.console.ui import ConsoleApplication
from hiku.executors.sync import SyncExecutor
from .test_source_sqlalchemy import SA_ENGINE_KEY, SyncQueries, setup_db
from .test_source_sqlalchemy import get_queries, get_graph
engine = Engine(SyncExecutor())
GRAPH = get_graph(get_queries(hiku.sources.sqlalchemy, SA_ENGINE_KEY,
SyncQueries))
def request(app, method, path_info, script_name='', payload=None):
env = {'REQUEST_METHOD': method, 'SCRIPT_NAME': script_name,
'PATH_INFO': path_info}
if payload is not None:
env['wsgi.input'] = BytesIO(payload)
env['CONTENT_LENGTH'] = len(payload)
meta = []
def start_response(status, headers, exc_info=None):
meta.extend((status, headers, exc_info))
setup_testing_defaults(env)
app_iter = app(env, start_response)
assert len(meta) == 3 and meta[2] is None
return meta[0], meta[1], b''.join(app_iter)
def test_ui():
app = ConsoleApplication(GRAPH, engine, debug=True)
status, headers, _ = request(app, 'GET', '/')
assert status == '200 OK'
assert ('Content-Type', 'text/html') in headers
def test_docs():
app = ConsoleApplication(GRAPH, engine, debug=True)
status, headers, content = request(app, 'GET', '/docs')
assert status == '200 OK'
assert content.startswith(b'type')
def test_query():
sa_engine = create_engine(
'sqlite://',
connect_args={'check_same_thread': False},
poolclass=StaticPool,
)
setup_db(sa_engine)
app = ConsoleApplication(GRAPH, engine, {SA_ENGINE_KEY: sa_engine},
debug=True)
query = b'[{:bar_list [:name :type {:foo_s [:name :count]}]}]'
status, headers, content = request(app, 'POST', '/', payload=query)
assert status == '200 OK'
assert ('Content-Type', 'application/json') in headers
result = json.loads(content.decode('utf-8'))
assert 'bar_list' in result
|
py | 7df88e038e93e9da1a0b80d7cc415fc6a7c6cefa | from rest_framework.routers import APIRootView
from nautobot.dcim.models import Device
from nautobot.extras.api.views import (
ConfigContextQuerySetMixin,
CustomFieldModelViewSet,
ModelViewSet,
StatusViewSetMixin,
)
from nautobot.utilities.utils import count_related
from nautobot.virtualization import filters
from nautobot.virtualization.models import (
Cluster,
ClusterGroup,
ClusterType,
VirtualMachine,
VMInterface,
)
from . import serializers
class VirtualizationRootView(APIRootView):
"""
Virtualization API root view
"""
def get_view_name(self):
return "Virtualization"
#
# Clusters
#
class ClusterTypeViewSet(CustomFieldModelViewSet):
queryset = ClusterType.objects.annotate(cluster_count=count_related(Cluster, "type"))
serializer_class = serializers.ClusterTypeSerializer
filterset_class = filters.ClusterTypeFilterSet
class ClusterGroupViewSet(CustomFieldModelViewSet):
queryset = ClusterGroup.objects.annotate(cluster_count=count_related(Cluster, "group"))
serializer_class = serializers.ClusterGroupSerializer
filterset_class = filters.ClusterGroupFilterSet
class ClusterViewSet(CustomFieldModelViewSet):
queryset = Cluster.objects.prefetch_related("type", "group", "tenant", "site", "tags").annotate(
device_count=count_related(Device, "cluster"),
virtualmachine_count=count_related(VirtualMachine, "cluster"),
)
serializer_class = serializers.ClusterSerializer
filterset_class = filters.ClusterFilterSet
#
# Virtual machines
#
class VirtualMachineViewSet(ConfigContextQuerySetMixin, StatusViewSetMixin, CustomFieldModelViewSet):
queryset = VirtualMachine.objects.prefetch_related(
"cluster__site",
"platform",
"primary_ip4",
"primary_ip6",
"status",
"role",
"tenant",
"tags",
)
filterset_class = filters.VirtualMachineFilterSet
def get_serializer_class(self):
"""
Select the specific serializer based on the request context.
If the `brief` query param equates to True, return the NestedVirtualMachineSerializer
If the `exclude` query param includes `config_context` as a value, return the VirtualMachineSerializer
Else, return the VirtualMachineWithConfigContextSerializer
"""
request = self.get_serializer_context()["request"]
if request.query_params.get("brief", False):
return serializers.NestedVirtualMachineSerializer
elif "config_context" in request.query_params.get("exclude", []):
return serializers.VirtualMachineSerializer
return serializers.VirtualMachineWithConfigContextSerializer
class VMInterfaceViewSet(ModelViewSet):
queryset = VMInterface.objects.prefetch_related("virtual_machine", "tags", "tagged_vlans")
serializer_class = serializers.VMInterfaceSerializer
filterset_class = filters.VMInterfaceFilterSet
brief_prefetch_fields = ["virtual_machine"]
|
py | 7df88e875f7e3c7cbc914dd18327bfc8eca5cc74 | __all__ = ['newaxis', 'ndarray', 'flatiter', 'ufunc',
'arange', 'array', 'zeros', 'empty', 'broadcast', 'dtype',
'fromstring', 'fromfile', 'frombuffer',
'int_asbuffer', 'where', 'argwhere',
'concatenate', 'fastCopyAndTranspose', 'lexsort',
'set_numeric_ops', 'can_cast',
'asarray', 'asanyarray', 'ascontiguousarray', 'asfortranarray',
'isfortran', 'empty_like', 'zeros_like',
'correlate', 'convolve', 'inner', 'dot', 'outer', 'vdot',
'alterdot', 'restoredot', 'roll', 'rollaxis', 'cross', 'tensordot',
'array2string', 'get_printoptions', 'set_printoptions',
'array_repr', 'array_str', 'set_string_function',
'little_endian', 'require',
'fromiter', 'array_equal', 'array_equiv',
'indices', 'fromfunction',
'load', 'loads', 'isscalar', 'binary_repr', 'base_repr',
'ones', 'identity', 'allclose', 'compare_chararrays', 'putmask',
'seterr', 'geterr', 'setbufsize', 'getbufsize',
'seterrcall', 'geterrcall', 'errstate', 'flatnonzero',
'Inf', 'inf', 'infty', 'Infinity',
'nan', 'NaN', 'False_', 'True_', 'bitwise_not',
'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', 'BUFSIZE', 'ALLOW_THREADS',
'ComplexWarning']
import sys
import warnings
import multiarray
import umath
from umath import *
import numerictypes
from numerictypes import *
if sys.version_info[0] < 3:
__all__.extend(['getbuffer', 'newbuffer'])
class ComplexWarning(RuntimeWarning):
"""
The warning raised when casting a complex dtype to a real dtype.
As implemented, casting a complex number to a real discards its imaginary
part, but this behavior may not be what the user actually wants.
"""
pass
bitwise_not = invert
CLIP = multiarray.CLIP
WRAP = multiarray.WRAP
RAISE = multiarray.RAISE
MAXDIMS = multiarray.MAXDIMS
ALLOW_THREADS = multiarray.ALLOW_THREADS
BUFSIZE = multiarray.BUFSIZE
ndarray = multiarray.ndarray
flatiter = multiarray.flatiter
broadcast = multiarray.broadcast
dtype = multiarray.dtype
ufunc = type(sin)
# originally from Fernando Perez's IPython
def zeros_like(a):
"""
Return an array of zeros with the same shape and type as a given array.
Equivalent to ``a.copy().fill(0)``.
Parameters
----------
a : array_like
The shape and data-type of `a` define the parameters of
the returned array.
Returns
-------
out : ndarray
Array of zeros with same shape and type as `a`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
empty_like : Return an empty array with shape and type of input.
zeros : Return a new array setting values to zero.
ones : Return a new array setting values to one.
empty : Return a new uninitialized array.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.zeros_like(x)
array([[0, 0, 0],
[0, 0, 0]])
>>> y = np.arange(3, dtype=np.float)
>>> y
array([ 0., 1., 2.])
>>> np.zeros_like(y)
array([ 0., 0., 0.])
"""
if isinstance(a, ndarray):
res = ndarray.__new__(type(a), a.shape, a.dtype, order=a.flags.fnc)
res.fill(0)
return res
try:
wrap = a.__array_wrap__
except AttributeError:
wrap = None
a = asarray(a)
res = zeros(a.shape, a.dtype)
if wrap:
res = wrap(res)
return res
def empty_like(a):
"""
Return a new array with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define the parameters of the
returned array.
Returns
-------
out : ndarray
Array of random data with the same shape and type as `a`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
Notes
-----
This function does *not* initialize the returned array; to do that use
`zeros_like` or `ones_like` instead. It may be marginally faster than the
functions that do set the array values.
Examples
--------
>>> a = ([1,2,3], [4,5,6]) # a is array-like
>>> np.empty_like(a)
array([[-1073741821, -1073741821, 3], #random
[ 0, 0, -1073741821]])
>>> a = np.array([[1., 2., 3.],[4.,5.,6.]])
>>> np.empty_like(a)
array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000], #random
[ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]])
"""
if isinstance(a, ndarray):
res = ndarray.__new__(type(a), a.shape, a.dtype, order=a.flags.fnc)
return res
try:
wrap = a.__array_wrap__
except AttributeError:
wrap = None
a = asarray(a)
res = empty(a.shape, a.dtype)
if wrap:
res = wrap(res)
return res
# end Fernando's utilities
def extend_all(module):
adict = {}
for a in __all__:
adict[a] = 1
try:
mall = getattr(module, '__all__')
except AttributeError:
mall = [k for k in module.__dict__.keys() if not k.startswith('_')]
for a in mall:
if a not in adict:
__all__.append(a)
extend_all(umath)
extend_all(numerictypes)
newaxis = None
arange = multiarray.arange
array = multiarray.array
zeros = multiarray.zeros
empty = multiarray.empty
fromstring = multiarray.fromstring
fromiter = multiarray.fromiter
fromfile = multiarray.fromfile
frombuffer = multiarray.frombuffer
if sys.version_info[0] < 3:
newbuffer = multiarray.newbuffer
getbuffer = multiarray.getbuffer
int_asbuffer = multiarray.int_asbuffer
where = multiarray.where
concatenate = multiarray.concatenate
fastCopyAndTranspose = multiarray._fastCopyAndTranspose
set_numeric_ops = multiarray.set_numeric_ops
can_cast = multiarray.can_cast
lexsort = multiarray.lexsort
compare_chararrays = multiarray.compare_chararrays
putmask = multiarray.putmask
def asarray(a, dtype=None, order=None):
"""
Convert the input to an array.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('F' for FORTRAN)
memory representation. Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
See Also
--------
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
asarray_chkfinite : Similar function which checks input for NaNs and Infs.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array:
>>> a = [1, 2]
>>> np.asarray(a)
array([1, 2])
Existing arrays are not copied:
>>> a = np.array([1, 2])
>>> np.asarray(a) is a
True
If `dtype` is set, array is copied only if dtype does not match:
>>> a = np.array([1, 2], dtype=np.float32)
>>> np.asarray(a, dtype=np.float32) is a
True
>>> np.asarray(a, dtype=np.float64) is a
False
Contrary to `asanyarray`, ndarray subclasses are not passed through:
>>> issubclass(np.matrix, np.ndarray)
True
>>> a = np.matrix([[1, 2]])
>>> np.asarray(a) is a
False
>>> np.asanyarray(a) is a
True
"""
return array(a, dtype, copy=False, order=order)
def asanyarray(a, dtype=None, order=None):
"""
Convert the input to a ndarray, but pass ndarray subclasses through.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes scalars, lists, lists of tuples, tuples, tuples of tuples,
tuples of lists and ndarrays.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('F') memory
representation. Defaults to 'C'.
Returns
-------
out : ndarray or an ndarray subclass
Array interpretation of `a`. If `a` is an ndarray or a subclass
of ndarray, it is returned as-is and no copy is performed.
See Also
--------
asarray : Similar function which always returns ndarrays.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
asarray_chkfinite : Similar function which checks input for NaNs and Infs.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array:
>>> a = [1, 2]
>>> np.asanyarray(a)
array([1, 2])
Instances of `ndarray` subclasses are passed through as-is:
>>> a = np.matrix([1, 2])
>>> np.asanyarray(a) is a
True
"""
return array(a, dtype, copy=False, order=order, subok=True)
def ascontiguousarray(a, dtype=None):
"""
Return a contiguous array in memory (C order).
Parameters
----------
a : array_like
Input array.
dtype : str or dtype object, optional
Data-type of returned array.
Returns
-------
out : ndarray
Contiguous array of same shape and content as `a`, with type `dtype`
if specified.
See Also
--------
asfortranarray : Convert input to an ndarray with column-major
memory order.
require : Return an ndarray that satisfies requirements.
ndarray.flags : Information about the memory layout of the array.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> np.ascontiguousarray(x, dtype=np.float32)
array([[ 0., 1., 2.],
[ 3., 4., 5.]], dtype=float32)
>>> x.flags['C_CONTIGUOUS']
True
"""
return array(a, dtype, copy=False, order='C', ndmin=1)
def asfortranarray(a, dtype=None):
"""
Return an array laid out in Fortran order in memory.
Parameters
----------
a : array_like
Input array.
dtype : str or dtype object, optional
By default, the data-type is inferred from the input data.
Returns
-------
out : ndarray
The input `a` in Fortran, or column-major, order.
See Also
--------
ascontiguousarray : Convert input to a contiguous (C order) array.
asanyarray : Convert input to an ndarray with either row or
column-major memory order.
require : Return an ndarray that satisfies requirements.
ndarray.flags : Information about the memory layout of the array.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> y = np.asfortranarray(x)
>>> x.flags['F_CONTIGUOUS']
False
>>> y.flags['F_CONTIGUOUS']
True
"""
return array(a, dtype, copy=False, order='F', ndmin=1)
def require(a, dtype=None, requirements=None):
"""
Return an ndarray of the provided type that satisfies requirements.
This function is useful to be sure that an array with the correct flags
is returned for passing to compiled code (perhaps through ctypes).
Parameters
----------
a : array_like
The object to be converted to a type-and-requirement-satisfying array.
dtype : data-type
The required data-type, the default data-type is float64).
requirements : str or list of str
The requirements list can be any of the following
* 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array
* 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array
* 'ALIGNED' ('A') - ensure a data-type aligned array
* 'WRITEABLE' ('W') - ensure a writable array
* 'OWNDATA' ('O') - ensure an array that owns its own data
See Also
--------
asarray : Convert input to an ndarray.
asanyarray : Convert to an ndarray, but pass through ndarray subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfortranarray : Convert input to an ndarray with column-major
memory order.
ndarray.flags : Information about the memory layout of the array.
Notes
-----
The returned array will be guaranteed to have the listed requirements
by making a copy if needed.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> x.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : False
WRITEABLE : True
ALIGNED : True
UPDATEIFCOPY : False
>>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F'])
>>> y.flags
C_CONTIGUOUS : False
F_CONTIGUOUS : True
OWNDATA : True
WRITEABLE : True
ALIGNED : True
UPDATEIFCOPY : False
"""
if requirements is None:
requirements = []
else:
requirements = [x.upper() for x in requirements]
if not requirements:
return asanyarray(a, dtype=dtype)
if 'ENSUREARRAY' in requirements or 'E' in requirements:
subok = False
else:
subok = True
arr = array(a, dtype=dtype, copy=False, subok=subok)
copychar = 'A'
if 'FORTRAN' in requirements or \
'F_CONTIGUOUS' in requirements or \
'F' in requirements:
copychar = 'F'
elif 'CONTIGUOUS' in requirements or \
'C_CONTIGUOUS' in requirements or \
'C' in requirements:
copychar = 'C'
for prop in requirements:
if not arr.flags[prop]:
arr = arr.copy(copychar)
break
return arr
def isfortran(a):
"""
Returns True if array is arranged in Fortran-order in memory
and dimension > 1.
Parameters
----------
a : ndarray
Input array.
Examples
--------
np.array allows to specify whether the array is written in C-contiguous
order (last index varies the fastest), or FORTRAN-contiguous order in
memory (first index varies the fastest).
>>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(a)
False
>>> b = np.array([[1, 2, 3], [4, 5, 6]], order='FORTRAN')
>>> b
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(b)
True
The transpose of a C-ordered array is a FORTRAN-ordered array.
>>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(a)
False
>>> b = a.T
>>> b
array([[1, 4],
[2, 5],
[3, 6]])
>>> np.isfortran(b)
True
1-D arrays always evaluate as False.
>>> np.isfortran(np.array([1, 2], order='FORTRAN'))
False
"""
return a.flags.fnc
def argwhere(a):
"""
Find the indices of array elements that are non-zero, grouped by element.
Parameters
----------
a : array_like
Input data.
Returns
-------
index_array : ndarray
Indices of elements that are non-zero. Indices are grouped by element.
See Also
--------
where, nonzero
Notes
-----
``np.argwhere(a)`` is the same as ``np.transpose(np.nonzero(a))``.
The output of ``argwhere`` is not suitable for indexing arrays.
For this purpose use ``where(a)`` instead.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.argwhere(x>1)
array([[0, 2],
[1, 0],
[1, 1],
[1, 2]])
"""
return transpose(asanyarray(a).nonzero())
def flatnonzero(a):
"""
Return indices that are non-zero in the flattened version of a.
This is equivalent to a.ravel().nonzero()[0].
Parameters
----------
a : ndarray
Input array.
Returns
-------
res : ndarray
Output array, containing the indices of the elements of `a.ravel()`
that are non-zero.
See Also
--------
nonzero : Return the indices of the non-zero elements of the input array.
ravel : Return a 1-D array containing the elements of the input array.
Examples
--------
>>> x = np.arange(-2, 3)
>>> x
array([-2, -1, 0, 1, 2])
>>> np.flatnonzero(x)
array([0, 1, 3, 4])
Use the indices of the non-zero elements as an index array to extract
these elements:
>>> x.ravel()[np.flatnonzero(x)]
array([-2, -1, 1, 2])
"""
return a.ravel().nonzero()[0]
_mode_from_name_dict = {'v': 0,
's' : 1,
'f' : 2}
def _mode_from_name(mode):
if isinstance(mode, type("")):
return _mode_from_name_dict[mode.lower()[0]]
return mode
def correlate(a,v,mode='valid',old_behavior=True):
"""
Discrete, linear correlation of two 1-dimensional sequences.
This function is equivalent to
>>> np.convolve(a, v[::-1], mode=mode)
... #doctest: +SKIP
where ``v[::-1]`` is the reverse of `v`.
Parameters
----------
a, v : array_like
Input sequences.
mode : {'valid', 'same', 'full'}, optional
Refer to the `convolve` docstring. Note that the default
is `valid`, unlike `convolve`, which uses `full`.
old_behavior : bool
If True, uses the old, numeric behavior (correlate(a,v) == correlate(v,
a), and the conjugate is not taken for complex arrays). If False, uses
the conventional signal processing definition (see note).
See Also
--------
convolve : Discrete, linear convolution of two
one-dimensional sequences.
acorrelate : Discrete correlation following the usual signal processing
definition for complex arrays, and without assuming that
``correlate(a, b) == correlate(b, a)``.
Notes
-----
If `old_behavior` is False, this function computes the correlation as
generally defined in signal processing texts::
z[k] = sum_n a[n] * conj(v[n+k])
with a and v sequences being zero-padded where necessary and conj being
the conjugate.
Examples
--------
>>> np.correlate([1, 2, 3], [0, 1, 0.5])
array([ 3.5])
>>> np.correlate([1, 2, 3], [0, 1, 0.5], "same")
array([ 2. , 3.5, 3. ])
>>> np.correlate([1, 2, 3], [0, 1, 0.5], "full")
array([ 0.5, 2. , 3.5, 3. , 0. ])
"""
mode = _mode_from_name(mode)
if old_behavior:
warnings.warn("""
The current behavior of correlate is deprecated for 1.4.0, and will be removed
for NumPy 1.5.0.
The new behavior fits the conventional definition of correlation: inputs are
never swapped, and the second argument is conjugated for complex arrays.""",
DeprecationWarning)
return multiarray.correlate(a,v,mode)
else:
return multiarray.correlate2(a,v,mode)
def convolve(a,v,mode='full'):
"""
Returns the discrete, linear convolution of two one-dimensional sequences.
The convolution operator is often seen in signal processing, where it
models the effect of a linear time-invariant system on a signal [1]_. In
probability theory, the sum of two independent random variables is
distributed according to the convolution of their individual
distributions.
Parameters
----------
a : (N,) array_like
First one-dimensional input array.
v : (M,) array_like
Second one-dimensional input array.
mode : {'full', 'valid', 'same'}, optional
'full':
By default, mode is 'full'. This returns the convolution
at each point of overlap, with an output shape of (N+M-1,). At
the end-points of the convolution, the signals do not overlap
completely, and boundary effects may be seen.
'same':
Mode `same` returns output of length ``max(M, N)``. Boundary
effects are still visible.
'valid':
Mode `valid` returns output of length
``max(M, N) - min(M, N) + 1``. The convolution product is only given
for points where the signals overlap completely. Values outside
the signal boundary have no effect.
Returns
-------
out : ndarray
Discrete, linear convolution of `a` and `v`.
See Also
--------
scipy.signal.fftconvolve : Convolve two arrays using the Fast Fourier
Transform.
scipy.linalg.toeplitz : Used to construct the convolution operator.
Notes
-----
The discrete convolution operation is defined as
.. math:: (f * g)[n] = \\sum_{m = -\\infty}^{\\infty} f[m] g[n - m]
It can be shown that a convolution :math:`x(t) * y(t)` in time/space
is equivalent to the multiplication :math:`X(f) Y(f)` in the Fourier
domain, after appropriate padding (padding is necessary to prevent
circular convolution). Since multiplication is more efficient (faster)
than convolution, the function `scipy.signal.fftconvolve` exploits the
FFT to calculate the convolution of large data-sets.
References
----------
.. [1] Wikipedia, "Convolution", http://en.wikipedia.org/wiki/Convolution.
Examples
--------
Note how the convolution operator flips the second array
before "sliding" the two across one another:
>>> np.convolve([1, 2, 3], [0, 1, 0.5])
array([ 0. , 1. , 2.5, 4. , 1.5])
Only return the middle values of the convolution.
Contains boundary effects, where zeros are taken
into account:
>>> np.convolve([1,2,3],[0,1,0.5], 'same')
array([ 1. , 2.5, 4. ])
The two arrays are of the same length, so there
is only one position where they completely overlap:
>>> np.convolve([1,2,3],[0,1,0.5], 'valid')
array([ 2.5])
"""
a,v = array(a, ndmin=1),array(v, ndmin=1)
if (len(v) > len(a)):
a, v = v, a
if len(a) == 0 :
raise ValueError('a cannot be empty')
if len(v) == 0 :
raise ValueError('v cannot be empty')
mode = _mode_from_name(mode)
return multiarray.correlate(a, v[::-1], mode)
def outer(a,b):
"""
Compute the outer product of two vectors.
Given two vectors, ``a = [a0, a1, ..., aM]`` and
``b = [b0, b1, ..., bN]``,
the outer product [1]_ is::
[[a0*b0 a0*b1 ... a0*bN ]
[a1*b0 .
[ ... .
[aM*b0 aM*bN ]]
Parameters
----------
a, b : array_like, shape (M,), (N,)
First and second input vectors. Inputs are flattened if they
are not already 1-dimensional.
Returns
-------
out : ndarray, shape (M, N)
``out[i, j] = a[i] * b[j]``
References
----------
.. [1] : G. H. Golub and C. F. van Loan, *Matrix Computations*, 3rd
ed., Baltimore, MD, Johns Hopkins University Press, 1996,
pg. 8.
Examples
--------
Make a (*very* coarse) grid for computing a Mandelbrot set:
>>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5))
>>> rl
array([[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.]])
>>> im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5,)))
>>> im
array([[ 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j],
[ 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j],
[ 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]])
>>> grid = rl + im
>>> grid
array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j],
[-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j],
[-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j],
[-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j],
[-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]])
An example using a "vector" of letters:
>>> x = np.array(['a', 'b', 'c'], dtype=object)
>>> np.outer(x, [1, 2, 3])
array([[a, aa, aaa],
[b, bb, bbb],
[c, cc, ccc]], dtype=object)
"""
a = asarray(a)
b = asarray(b)
return a.ravel()[:,newaxis]*b.ravel()[newaxis,:]
# try to import blas optimized dot if available
try:
# importing this changes the dot function for basic 4 types
# to blas-optimized versions.
from _dotblas import dot, vdot, inner, alterdot, restoredot
except ImportError:
# docstrings are in add_newdocs.py
inner = multiarray.inner
dot = multiarray.dot
def vdot(a, b):
return dot(asarray(a).ravel().conj(), asarray(b).ravel())
def alterdot():
pass
def restoredot():
pass
def tensordot(a, b, axes=2):
"""
Compute tensor dot product along specified axes for arrays >= 1-D.
Given two tensors (arrays of dimension greater than or equal to one),
``a`` and ``b``, and an array_like object containing two array_like
objects, ``(a_axes, b_axes)``, sum the products of ``a``'s and ``b``'s
elements (components) over the axes specified by ``a_axes`` and
``b_axes``. The third argument can be a single non-negative
integer_like scalar, ``N``; if it is such, then the last ``N``
dimensions of ``a`` and the first ``N`` dimensions of ``b`` are summed
over.
Parameters
----------
a, b : array_like, len(shape) >= 1
Tensors to "dot".
axes : variable type
* integer_like scalar
Number of axes to sum over (applies to both arrays); or
* array_like, shape = (2,), both elements array_like
Axes to be summed over, first sequence applying to ``a``, second
to ``b``.
See Also
--------
numpy.dot
Notes
-----
When there is more than one axis to sum over - and they are not the last
(first) axes of ``a`` (``b``) - the argument ``axes`` should consist of
two sequences of the same length, with the first axis to sum over given
first in both sequences, the second axis second, and so forth.
Examples
--------
A "traditional" example:
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> c = np.tensordot(a,b, axes=([1,0],[0,1]))
>>> c.shape
(5, 2)
>>> c
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> # A slower but equivalent way of computing the same...
>>> d = np.zeros((5,2))
>>> for i in range(5):
... for j in range(2):
... for k in range(3):
... for n in range(4):
... d[i,j] += a[k,n,i] * b[n,k,j]
>>> c == d
array([[ True, True],
[ True, True],
[ True, True],
[ True, True],
[ True, True]], dtype=bool)
An extended example taking advantage of the overloading of + and \\*:
>>> a = np.array(range(1, 9))
>>> a.shape = (2, 2, 2)
>>> A = np.array(('a', 'b', 'c', 'd'), dtype=object)
>>> A.shape = (2, 2)
>>> a; A
array([[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]]])
array([[a, b],
[c, d]], dtype=object)
>>> np.tensordot(a, A) # third argument default is 2
array([abbcccdddd, aaaaabbbbbbcccccccdddddddd], dtype=object)
>>> np.tensordot(a, A, 1)
array([[[acc, bdd],
[aaacccc, bbbdddd]],
[[aaaaacccccc, bbbbbdddddd],
[aaaaaaacccccccc, bbbbbbbdddddddd]]], dtype=object)
>>> np.tensordot(a, A, 0) # "Left for reader" (result too long to incl.)
array([[[[[a, b],
[c, d]],
...
>>> np.tensordot(a, A, (0, 1))
array([[[abbbbb, cddddd],
[aabbbbbb, ccdddddd]],
[[aaabbbbbbb, cccddddddd],
[aaaabbbbbbbb, ccccdddddddd]]], dtype=object)
>>> np.tensordot(a, A, (2, 1))
array([[[abb, cdd],
[aaabbbb, cccdddd]],
[[aaaaabbbbbb, cccccdddddd],
[aaaaaaabbbbbbbb, cccccccdddddddd]]], dtype=object)
>>> np.tensordot(a, A, ((0, 1), (0, 1)))
array([abbbcccccddddddd, aabbbbccccccdddddddd], dtype=object)
>>> np.tensordot(a, A, ((2, 1), (1, 0)))
array([acccbbdddd, aaaaacccccccbbbbbbdddddddd], dtype=object)
"""
try:
iter(axes)
except:
axes_a = range(-axes,0)
axes_b = range(0,axes)
else:
axes_a, axes_b = axes
try:
na = len(axes_a)
axes_a = list(axes_a)
except TypeError:
axes_a = [axes_a]
na = 1
try:
nb = len(axes_b)
axes_b = list(axes_b)
except TypeError:
axes_b = [axes_b]
nb = 1
a, b = asarray(a), asarray(b)
as_ = a.shape
nda = len(a.shape)
bs = b.shape
ndb = len(b.shape)
equal = True
if (na != nb): equal = False
else:
for k in xrange(na):
if as_[axes_a[k]] != bs[axes_b[k]]:
equal = False
break
if axes_a[k] < 0:
axes_a[k] += nda
if axes_b[k] < 0:
axes_b[k] += ndb
if not equal:
raise ValueError, "shape-mismatch for sum"
# Move the axes to sum over to the end of "a"
# and to the front of "b"
notin = [k for k in range(nda) if k not in axes_a]
newaxes_a = notin + axes_a
N2 = 1
for axis in axes_a:
N2 *= as_[axis]
newshape_a = (-1, N2)
olda = [as_[axis] for axis in notin]
notin = [k for k in range(ndb) if k not in axes_b]
newaxes_b = axes_b + notin
N2 = 1
for axis in axes_b:
N2 *= bs[axis]
newshape_b = (N2, -1)
oldb = [bs[axis] for axis in notin]
at = a.transpose(newaxes_a).reshape(newshape_a)
bt = b.transpose(newaxes_b).reshape(newshape_b)
res = dot(at, bt)
return res.reshape(olda + oldb)
def roll(a, shift, axis=None):
"""
Roll array elements along a given axis.
Elements that roll beyond the last position are re-introduced at
the first.
Parameters
----------
a : array_like
Input array.
shift : int
The number of places by which elements are shifted.
axis : int, optional
The axis along which elements are shifted. By default, the array
is flattened before shifting, after which the original
shape is restored.
Returns
-------
res : ndarray
Output array, with the same shape as `a`.
See Also
--------
rollaxis : Roll the specified axis backwards, until it lies in a
given position.
Examples
--------
>>> x = np.arange(10)
>>> np.roll(x, 2)
array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])
>>> x2 = np.reshape(x, (2,5))
>>> x2
array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> np.roll(x2, 1)
array([[9, 0, 1, 2, 3],
[4, 5, 6, 7, 8]])
>>> np.roll(x2, 1, axis=0)
array([[5, 6, 7, 8, 9],
[0, 1, 2, 3, 4]])
>>> np.roll(x2, 1, axis=1)
array([[4, 0, 1, 2, 3],
[9, 5, 6, 7, 8]])
"""
a = asanyarray(a)
if axis is None:
n = a.size
reshape = True
else:
n = a.shape[axis]
reshape = False
shift %= n
indexes = concatenate((arange(n-shift,n),arange(n-shift)))
res = a.take(indexes, axis)
if reshape:
return res.reshape(a.shape)
else:
return res
def rollaxis(a, axis, start=0):
"""
Roll the specified axis backwards, until it lies in a given position.
Parameters
----------
a : ndarray
Input array.
axis : int
The axis to roll backwards. The positions of the other axes do not
change relative to one another.
start : int, optional
The axis is rolled until it lies before this position.
Returns
-------
res : ndarray
Output array.
See Also
--------
roll : Roll the elements of an array by a number of positions along a
given axis.
Examples
--------
>>> a = np.ones((3,4,5,6))
>>> np.rollaxis(a, 3, 1).shape
(3, 6, 4, 5)
>>> np.rollaxis(a, 2).shape
(5, 3, 4, 6)
>>> np.rollaxis(a, 1, 4).shape
(3, 5, 6, 4)
"""
n = a.ndim
if axis < 0:
axis += n
if start < 0:
start += n
msg = 'rollaxis: %s (%d) must be >=0 and < %d'
if not (0 <= axis < n):
raise ValueError, msg % ('axis', axis, n)
if not (0 <= start < n+1):
raise ValueError, msg % ('start', start, n+1)
if (axis < start): # it's been removed
start -= 1
if axis==start:
return a
axes = range(0,n)
axes.remove(axis)
axes.insert(start, axis)
return a.transpose(axes)
# fix hack in scipy which imports this function
def _move_axis_to_0(a, axis):
return rollaxis(a, axis, 0)
def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
"""
Return the cross product of two (arrays of) vectors.
The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular
to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors
are defined by the last axis of `a` and `b` by default, and these axes
can have dimensions 2 or 3. Where the dimension of either `a` or `b` is
2, the third component of the input vector is assumed to be zero and the
cross product calculated accordingly. In cases where both input vectors
have dimension 2, the z-component of the cross product is returned.
Parameters
----------
a : array_like
Components of the first vector(s).
b : array_like
Components of the second vector(s).
axisa : int, optional
Axis of `a` that defines the vector(s). By default, the last axis.
axisb : int, optional
Axis of `b` that defines the vector(s). By default, the last axis.
axisc : int, optional
Axis of `c` containing the cross product vector(s). By default, the
last axis.
axis : int, optional
If defined, the axis of `a`, `b` and `c` that defines the vector(s)
and cross product(s). Overrides `axisa`, `axisb` and `axisc`.
Returns
-------
c : ndarray
Vector cross product(s).
Raises
------
ValueError
When the dimension of the vector(s) in `a` and/or `b` does not
equal 2 or 3.
See Also
--------
inner : Inner product
outer : Outer product.
ix_ : Construct index arrays.
Examples
--------
Vector cross-product.
>>> x = [1, 2, 3]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([-3, 6, -3])
One vector with dimension 2.
>>> x = [1, 2]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([12, -6, -3])
Equivalently:
>>> x = [1, 2, 0]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([12, -6, -3])
Both vectors with dimension 2.
>>> x = [1,2]
>>> y = [4,5]
>>> np.cross(x, y)
-3
Multiple vector cross-products. Note that the direction of the cross
product vector is defined by the `right-hand rule`.
>>> x = np.array([[1,2,3], [4,5,6]])
>>> y = np.array([[4,5,6], [1,2,3]])
>>> np.cross(x, y)
array([[-3, 6, -3],
[ 3, -6, 3]])
The orientation of `c` can be changed using the `axisc` keyword.
>>> np.cross(x, y, axisc=0)
array([[-3, 3],
[ 6, -6],
[-3, 3]])
Change the vector definition of `x` and `y` using `axisa` and `axisb`.
>>> x = np.array([[1,2,3], [4,5,6], [7, 8, 9]])
>>> y = np.array([[7, 8, 9], [4,5,6], [1,2,3]])
>>> np.cross(x, y)
array([[ -6, 12, -6],
[ 0, 0, 0],
[ 6, -12, 6]])
>>> np.cross(x, y, axisa=0, axisb=0)
array([[-24, 48, -24],
[-30, 60, -30],
[-36, 72, -36]])
"""
if axis is not None:
axisa,axisb,axisc=(axis,)*3
a = asarray(a).swapaxes(axisa, 0)
b = asarray(b).swapaxes(axisb, 0)
msg = "incompatible dimensions for cross product\n"\
"(dimension must be 2 or 3)"
if (a.shape[0] not in [2,3]) or (b.shape[0] not in [2,3]):
raise ValueError(msg)
if a.shape[0] == 2:
if (b.shape[0] == 2):
cp = a[0]*b[1] - a[1]*b[0]
if cp.ndim == 0:
return cp
else:
return cp.swapaxes(0, axisc)
else:
x = a[1]*b[2]
y = -a[0]*b[2]
z = a[0]*b[1] - a[1]*b[0]
elif a.shape[0] == 3:
if (b.shape[0] == 3):
x = a[1]*b[2] - a[2]*b[1]
y = a[2]*b[0] - a[0]*b[2]
z = a[0]*b[1] - a[1]*b[0]
else:
x = -a[2]*b[1]
y = a[2]*b[0]
z = a[0]*b[1] - a[1]*b[0]
cp = array([x,y,z])
if cp.ndim == 1:
return cp
else:
return cp.swapaxes(0,axisc)
#Use numarray's printing function
from arrayprint import array2string, get_printoptions, set_printoptions
_typelessdata = [int_, float_, complex_]
if issubclass(intc, int):
_typelessdata.append(intc)
if issubclass(longlong, int):
_typelessdata.append(longlong)
def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
"""
Return the string representation of an array.
Parameters
----------
arr : ndarray
Input array.
max_line_width : int, optional
The maximum number of columns the string should span. Newline
characters split the string appropriately after array elements.
precision : int, optional
Floating point precision. Default is the current printing precision
(usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
Represent very small numbers as zero, default is False. Very small
is defined by `precision`, if the precision is 8 then
numbers smaller than 5e-9 are represented as zero.
Returns
-------
string : str
The string representation of an array.
See Also
--------
array_str, array2string, set_printoptions
Examples
--------
>>> np.array_repr(np.array([1,2]))
'array([1, 2])'
>>> np.array_repr(np.ma.array([0.]))
'MaskedArray([ 0.])'
>>> np.array_repr(np.array([], np.int32))
'array([], dtype=int32)'
>>> x = np.array([1e-6, 4e-7, 2, 3])
>>> np.array_repr(x, precision=6, suppress_small=True)
'array([ 0.000001, 0. , 2. , 3. ])'
"""
if arr.size > 0 or arr.shape==(0,):
lst = array2string(arr, max_line_width, precision, suppress_small,
', ', "array(")
else: # show zero-length shape unless it is (0,)
lst = "[], shape=%s" % (repr(arr.shape),)
typeless = arr.dtype.type in _typelessdata
if arr.__class__ is not ndarray:
cName= arr.__class__.__name__
else:
cName = "array"
if typeless and arr.size:
return cName + "(%s)" % lst
else:
typename=arr.dtype.name
lf = ''
if issubclass(arr.dtype.type, flexible):
if arr.dtype.names:
typename = "%s" % str(arr.dtype)
else:
typename = "'%s'" % str(arr.dtype)
lf = '\n'+' '*len("array(")
return cName + "(%s, %sdtype=%s)" % (lst, lf, typename)
def array_str(a, max_line_width=None, precision=None, suppress_small=None):
"""
Return a string representation of the data in an array.
The data in the array is returned as a single string. This function
is similar to `array_repr`, the difference is that `array_repr` also
returns information on the type of array and data type.
Parameters
----------
a : ndarray
Input array.
max_line_width : int, optional
Inserts newlines if text is longer than `max_line_width`.
precision : int, optional
Floating point precision. Default is the current printing precision
(usually 8), which can be altered using set_printoptions.
suppress_small : bool, optional
Represent very small numbers as zero, default is False. Very small is
defined by precision, if the precision is 8 then numbers smaller than
5e-9 are represented as zero.
See Also
--------
array2string, array_repr, set_printoptions
Examples
--------
>>> np.array_str(np.arange(3))
'[0 1 2]'
"""
return array2string(a, max_line_width, precision, suppress_small, ' ', "", str)
def set_string_function(f, repr=True):
"""
Set a Python function to be used when pretty printing arrays.
Parameters
----------
f : function or None
Function to be used to pretty print arrays. The function should expect
a single array argument and return a string of the representation of
the array. If None, the function is reset to the default NumPy function
to print arrays.
repr : bool, optional
If True (default), the function for pretty printing (``__repr__``)
is set, if False the function that returns the default string
representation (``__str__``) is set.
See Also
--------
set_printoptions, get_printoptions
Examples
--------
>>> def pprint(arr):
... return 'HA! - What are you going to do now?'
...
>>> np.set_string_function(pprint)
>>> a = np.arange(10)
>>> a
HA! - What are you going to do now?
>>> print a
[0 1 2 3 4 5 6 7 8 9]
We can reset the function to the default:
>>> np.set_string_function(None)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
`repr` affects either pretty printing or normal string representation.
Note that ``__repr__`` is still affected by setting ``__str__``
because the width of each array element in the returned string becomes
equal to the length of the result of ``__str__()``.
>>> x = np.arange(4)
>>> np.set_string_function(lambda x:'random', repr=False)
>>> x.__str__()
'random'
>>> x.__repr__()
'array([ 0, 1, 2, 3])'
"""
if f is None:
if repr:
return multiarray.set_string_function(array_repr, 1)
else:
return multiarray.set_string_function(array_str, 0)
else:
return multiarray.set_string_function(f, repr)
set_string_function(array_str, 0)
set_string_function(array_repr, 1)
little_endian = (sys.byteorder == 'little')
def indices(dimensions, dtype=int):
"""
Return an array representing the indices of a grid.
Compute an array where the subarrays contain index values 0,1,...
varying only along the corresponding axis.
Parameters
----------
dimensions : sequence of ints
The shape of the grid.
dtype : dtype, optional
Data type of the result.
Returns
-------
grid : ndarray
The array of grid indices,
``grid.shape = (len(dimensions),) + tuple(dimensions)``.
See Also
--------
mgrid, meshgrid
Notes
-----
The output shape is obtained by prepending the number of dimensions
in front of the tuple of dimensions, i.e. if `dimensions` is a tuple
``(r0, ..., rN-1)`` of length ``N``, the output shape is
``(N,r0,...,rN-1)``.
The subarrays ``grid[k]`` contains the N-D array of indices along the
``k-th`` axis. Explicitly::
grid[k,i0,i1,...,iN-1] = ik
Examples
--------
>>> grid = np.indices((2, 3))
>>> grid.shape
(2, 2, 3)
>>> grid[0] # row indices
array([[0, 0, 0],
[1, 1, 1]])
>>> grid[1] # column indices
array([[0, 1, 2],
[0, 1, 2]])
The indices can be used as an index into an array.
>>> x = np.arange(20).reshape(5, 4)
>>> row, col = np.indices((2, 3))
>>> x[row, col]
array([[0, 1, 2],
[4, 5, 6]])
Note that it would be more straightforward in the above example to
extract the required elements directly with ``x[:2, :3]``.
"""
dimensions = tuple(dimensions)
N = len(dimensions)
if N == 0:
return array([],dtype=dtype)
res = empty((N,)+dimensions, dtype=dtype)
for i, dim in enumerate(dimensions):
tmp = arange(dim,dtype=dtype)
tmp.shape = (1,)*i + (dim,)+(1,)*(N-i-1)
newdim = dimensions[:i] + (1,)+ dimensions[i+1:]
val = zeros(newdim, dtype)
add(tmp, val, res[i])
return res
def fromfunction(function, shape, **kwargs):
"""
Construct an array by executing a function over each coordinate.
The resulting array therefore has a value ``fn(x, y, z)`` at
coordinate ``(x, y, z)``.
Parameters
----------
function : callable
The function is called with N parameters, each of which
represents the coordinates of the array varying along a
specific axis. For example, if `shape` were ``(2, 2)``, then
the parameters would be two arrays, ``[[0, 0], [1, 1]]`` and
``[[0, 1], [0, 1]]``. `function` must be capable of operating on
arrays, and should return a scalar value.
shape : (N,) tuple of ints
Shape of the output array, which also determines the shape of
the coordinate arrays passed to `function`.
dtype : data-type, optional
Data-type of the coordinate arrays passed to `function`.
By default, `dtype` is float.
Returns
-------
out : any
The result of the call to `function` is passed back directly.
Therefore the type and shape of `out` is completely determined by
`function`.
See Also
--------
indices, meshgrid
Notes
-----
Keywords other than `shape` and `dtype` are passed to `function`.
Examples
--------
>>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int)
array([[ True, False, False],
[False, True, False],
[False, False, True]], dtype=bool)
>>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int)
array([[0, 1, 2],
[1, 2, 3],
[2, 3, 4]])
"""
dtype = kwargs.pop('dtype', float)
args = indices(shape, dtype=dtype)
return function(*args,**kwargs)
def isscalar(num):
"""
Returns True if the type of `num` is a scalar type.
Parameters
----------
num : any
Input argument, can be of any type and shape.
Returns
-------
val : bool
True if `num` is a scalar type, False if it is not.
Examples
--------
>>> np.isscalar(3.1)
True
>>> np.isscalar([3.1])
False
>>> np.isscalar(False)
True
"""
if isinstance(num, generic):
return True
else:
return type(num) in ScalarType
_lkup = {
'0':'0000',
'1':'0001',
'2':'0010',
'3':'0011',
'4':'0100',
'5':'0101',
'6':'0110',
'7':'0111',
'8':'1000',
'9':'1001',
'a':'1010',
'b':'1011',
'c':'1100',
'd':'1101',
'e':'1110',
'f':'1111',
'A':'1010',
'B':'1011',
'C':'1100',
'D':'1101',
'E':'1110',
'F':'1111',
'L':''}
def binary_repr(num, width=None):
"""
Return the binary representation of the input number as a string.
For negative numbers, if width is not given, a minus sign is added to the
front. If width is given, the two's complement of the number is
returned, with respect to that width.
In a two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit two's-complement
system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
num : int
Only an integer decimal number can be used.
width : int, optional
The length of the returned string if `num` is positive, the length of
the two's complement if `num` is negative.
Returns
-------
bin : str
Binary representation of `num` or two's complement of `num`.
See Also
--------
base_repr: Return a string representation of a number in the given base
system.
Notes
-----
`binary_repr` is equivalent to using `base_repr` with base 2, but about 25x
faster.
References
----------
.. [1] Wikipedia, "Two's complement",
http://en.wikipedia.org/wiki/Two's_complement
Examples
--------
>>> np.binary_repr(3)
'11'
>>> np.binary_repr(-3)
'-11'
>>> np.binary_repr(3, width=4)
'0011'
The two's complement is returned when the input number is negative and
width is specified:
>>> np.binary_repr(-3, width=4)
'1101'
"""
sign = ''
if num < 0:
if width is None:
sign = '-'
num = -num
else:
# replace num with its 2-complement
num = 2**width + num
elif num == 0:
return '0'*(width or 1)
ostr = hex(num)
bin = ''.join([_lkup[ch] for ch in ostr[2:]])
bin = bin.lstrip('0')
if width is not None:
bin = bin.zfill(width)
return sign + bin
def base_repr (number, base=2, padding=0):
"""
Return a string representation of a number in the given base system.
Parameters
----------
number : scalar
The value to convert. Only positive values are handled.
base : int, optional
Convert `number` to the `base` number system. The valid range is 2-36,
the default value is 2.
padding : int, optional
Number of zeros padded on the left. Default is 0 (no padding).
Returns
-------
out : str
String representation of `number` in `base` system.
See Also
--------
binary_repr : Faster version of `base_repr` for base 2 that also handles
negative numbers.
Examples
--------
>>> np.base_repr(5)
'101'
>>> np.base_repr(6, 5)
'11'
>>> np.base_repr(7, base=5, padding=3)
'00012'
>>> np.base_repr(10, base=16)
'A'
>>> np.base_repr(32, base=16)
'20'
"""
if number < 0:
raise ValueError("negative numbers not handled in base_repr")
if base > 36:
raise ValueError("bases greater than 36 not handled in base_repr")
chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
import math
lnb = math.log(base)
res = padding*chars[0]
if number == 0:
return res + chars[0]
exponent = int (math.log (number)/lnb)
while(exponent >= 0):
term = long(base)**exponent
lead_digit = int(number / term)
res += chars[lead_digit]
number -= term*lead_digit
exponent -= 1
return res
from cPickle import load, loads
_cload = load
_file = open
def load(file):
"""
Wrapper around cPickle.load which accepts either a file-like object or
a filename.
Note that the NumPy binary format is not based on pickle/cPickle anymore.
For details on the preferred way of loading and saving files, see `load`
and `save`.
See Also
--------
load, save
"""
if isinstance(file, type("")):
file = _file(file,"rb")
return _cload(file)
# These are all essentially abbreviations
# These might wind up in a special abbreviations module
def _maketup(descr, val):
dt = dtype(descr)
# Place val in all scalar tuples:
fields = dt.fields
if fields is None:
return val
else:
res = [_maketup(fields[name][0],val) for name in dt.names]
return tuple(res)
def ones(shape, dtype=None, order='C'):
"""
Return a new array of given shape and type, filled with ones.
Please refer to the documentation for `zeros`.
See Also
--------
zeros
Examples
--------
>>> np.ones(5)
array([ 1., 1., 1., 1., 1.])
>>> np.ones((5,), dtype=np.int)
array([1, 1, 1, 1, 1])
>>> np.ones((2, 1))
array([[ 1.],
[ 1.]])
>>> s = (2,2)
>>> np.ones(s)
array([[ 1., 1.],
[ 1., 1.]])
"""
a = empty(shape, dtype, order)
try:
a.fill(1)
# Above is faster now after addition of fast loops.
#a = zeros(shape, dtype, order)
#a+=1
except TypeError:
obj = _maketup(dtype, 1)
a.fill(obj)
return a
def identity(n, dtype=None):
"""
Return the identity array.
The identity array is a square array with ones on
the main diagonal.
Parameters
----------
n : int
Number of rows (and columns) in `n` x `n` output.
dtype : data-type, optional
Data-type of the output. Defaults to ``float``.
Returns
-------
out : ndarray
`n` x `n` array with its main diagonal set to one,
and all other elements 0.
Examples
--------
>>> np.identity(3)
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
"""
a = zeros((n,n), dtype=dtype)
a.flat[::n+1] = 1
return a
def allclose(a, b, rtol=1.e-5, atol=1.e-8):
"""
Returns True if two arrays are element-wise equal within a tolerance.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
Parameters
----------
a, b : array_like
Input arrays to compare.
rtol : float
The relative tolerance parameter (see Notes).
atol : float
The absolute tolerance parameter (see Notes).
Returns
-------
y : bool
Returns True if the two arrays are equal within the given
tolerance; False otherwise. If either array contains NaN, then
False is returned.
See Also
--------
all, any, alltrue, sometrue
Notes
-----
If the following equation is element-wise True, then allclose returns
True.
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
The above equation is not symmetric in `a` and `b`, so that
`allclose(a, b)` might be different from `allclose(b, a)` in
some rare cases.
Examples
--------
>>> np.allclose([1e10,1e-7], [1.00001e10,1e-8])
False
>>> np.allclose([1e10,1e-8], [1.00001e10,1e-9])
True
>>> np.allclose([1e10,1e-8], [1.0001e10,1e-9])
False
>>> np.allclose([1.0, np.nan], [1.0, np.nan])
False
"""
x = array(a, copy=False)
y = array(b, copy=False)
xinf = isinf(x)
if not all(xinf == isinf(y)):
return False
if not any(xinf):
return all(less_equal(absolute(x-y), atol + rtol * absolute(y)))
if not all(x[xinf] == y[xinf]):
return False
x = x[~xinf]
y = y[~xinf]
return all(less_equal(absolute(x-y), atol + rtol * absolute(y)))
def array_equal(a1, a2):
"""
True if two arrays have the same shape and elements, False otherwise.
Parameters
----------
a1, a2 : array_like
Input arrays.
Returns
-------
b : bool
Returns True if the arrays are equal.
See Also
--------
allclose: Returns True if two arrays are element-wise equal within a
tolerance.
array_equiv: Returns True if input arrays are shape consistent and all
elements equal.
Examples
--------
>>> np.array_equal([1, 2], [1, 2])
True
>>> np.array_equal(np.array([1, 2]), np.array([1, 2]))
True
>>> np.array_equal([1, 2], [1, 2, 3])
False
>>> np.array_equal([1, 2], [1, 4])
False
"""
try:
a1, a2 = asarray(a1), asarray(a2)
except:
return False
if a1.shape != a2.shape:
return False
return bool(logical_and.reduce(equal(a1,a2).ravel()))
def array_equiv(a1, a2):
"""
Returns True if input arrays are shape consistent and all elements equal.
Shape consistent means they are either the same shape, or one input array
can be broadcasted to create the same shape as the other one.
Parameters
----------
a1, a2 : array_like
Input arrays.
Returns
-------
out : bool
True if equivalent, False otherwise.
Examples
--------
>>> np.array_equiv([1, 2], [1, 2])
True
>>> np.array_equiv([1, 2], [1, 3])
False
Showing the shape equivalence:
>>> np.array_equiv([1, 2], [[1, 2], [1, 2]])
True
>>> np.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]])
False
>>> np.array_equiv([1, 2], [[1, 2], [1, 3]])
False
"""
try:
a1, a2 = asarray(a1), asarray(a2)
except:
return False
try:
return bool(logical_and.reduce(equal(a1,a2).ravel()))
except ValueError:
return False
_errdict = {"ignore":ERR_IGNORE,
"warn":ERR_WARN,
"raise":ERR_RAISE,
"call":ERR_CALL,
"print":ERR_PRINT,
"log":ERR_LOG}
_errdict_rev = {}
for key in _errdict.keys():
_errdict_rev[_errdict[key]] = key
del key
def seterr(all=None, divide=None, over=None, under=None, invalid=None):
"""
Set how floating-point errors are handled.
Note that operations on integer scalar types (such as `int16`) are
handled like floating point, and are affected by these settings.
Parameters
----------
all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Set treatment for all types of floating-point errors at once:
- ignore: Take no action when the exception occurs.
- warn: Print a `RuntimeWarning` (via the Python `warnings` module).
- raise: Raise a `FloatingPointError`.
- call: Call a function specified using the `seterrcall` function.
- print: Print a warning directly to ``stdout``.
- log: Record error in a Log object specified by `seterrcall`.
The default is not to change the current behavior.
divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for division by zero.
over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for floating-point overflow.
under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for floating-point underflow.
invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for invalid floating-point operation.
Returns
-------
old_settings : dict
Dictionary containing the old settings.
See also
--------
seterrcall : Set a callback function for the 'call' mode.
geterr, geterrcall
Notes
-----
The floating-point exceptions are defined in the IEEE 754 standard [1]:
- Division by zero: infinite result obtained from finite numbers.
- Overflow: result too large to be expressed.
- Underflow: result so close to zero that some precision
was lost.
- Invalid operation: result is not an expressible number, typically
indicates that a NaN was produced.
.. [1] http://en.wikipedia.org/wiki/IEEE_754
Examples
--------
>>> old_settings = np.seterr(all='ignore') #seterr to known value
>>> np.seterr(over='raise')
{'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore',
'under': 'ignore'}
>>> np.seterr(all='ignore') # reset to default
{'over': 'raise', 'divide': 'ignore', 'invalid': 'ignore', 'under': 'ignore'}
>>> np.int16(32000) * np.int16(3)
30464
>>> old_settings = np.seterr(all='warn', over='raise')
>>> np.int16(32000) * np.int16(3)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FloatingPointError: overflow encountered in short_scalars
>>> old_settings = np.seterr(all='print')
>>> np.geterr()
{'over': 'print', 'divide': 'print', 'invalid': 'print', 'under': 'print'}
>>> np.int16(32000) * np.int16(3)
Warning: overflow encountered in short_scalars
30464
"""
pyvals = umath.geterrobj()
old = geterr()
if divide is None: divide = all or old['divide']
if over is None: over = all or old['over']
if under is None: under = all or old['under']
if invalid is None: invalid = all or old['invalid']
maskvalue = ((_errdict[divide] << SHIFT_DIVIDEBYZERO) +
(_errdict[over] << SHIFT_OVERFLOW ) +
(_errdict[under] << SHIFT_UNDERFLOW) +
(_errdict[invalid] << SHIFT_INVALID))
pyvals[1] = maskvalue
umath.seterrobj(pyvals)
return old
def geterr():
"""
Get the current way of handling floating-point errors.
Returns
-------
res : dict
A dictionary with keys "divide", "over", "under", and "invalid",
whose values are from the strings "ignore", "print", "log", "warn",
"raise", and "call". The keys represent possible floating-point
exceptions, and the values define how these exceptions are handled.
See Also
--------
geterrcall, seterr, seterrcall
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterr() # default is all set to 'ignore'
{'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore',
'under': 'ignore'}
>>> np.arange(3.) / np.arange(3.)
array([ NaN, 1., 1.])
>>> oldsettings = np.seterr(all='warn', over='raise')
>>> np.geterr()
{'over': 'raise', 'divide': 'warn', 'invalid': 'warn', 'under': 'warn'}
>>> np.arange(3.) / np.arange(3.)
__main__:1: RuntimeWarning: invalid value encountered in divide
array([ NaN, 1., 1.])
"""
maskvalue = umath.geterrobj()[1]
mask = 7
res = {}
val = (maskvalue >> SHIFT_DIVIDEBYZERO) & mask
res['divide'] = _errdict_rev[val]
val = (maskvalue >> SHIFT_OVERFLOW) & mask
res['over'] = _errdict_rev[val]
val = (maskvalue >> SHIFT_UNDERFLOW) & mask
res['under'] = _errdict_rev[val]
val = (maskvalue >> SHIFT_INVALID) & mask
res['invalid'] = _errdict_rev[val]
return res
def setbufsize(size):
"""
Set the size of the buffer used in ufuncs.
Parameters
----------
size : int
Size of buffer.
"""
if size > 10e6:
raise ValueError, "Buffer size, %s, is too big." % size
if size < 5:
raise ValueError, "Buffer size, %s, is too small." %size
if size % 16 != 0:
raise ValueError, "Buffer size, %s, is not a multiple of 16." %size
pyvals = umath.geterrobj()
old = getbufsize()
pyvals[0] = size
umath.seterrobj(pyvals)
return old
def getbufsize():
"""Return the size of the buffer used in ufuncs.
"""
return umath.geterrobj()[0]
def seterrcall(func):
"""
Set the floating-point error callback function or log object.
There are two ways to capture floating-point error messages. The first
is to set the error-handler to 'call', using `seterr`. Then, set
the function to call using this function.
The second is to set the error-handler to 'log', using `seterr`.
Floating-point errors then trigger a call to the 'write' method of
the provided object.
Parameters
----------
func : callable f(err, flag) or object with write method
Function to call upon floating-point errors ('call'-mode) or
object whose 'write' method is used to log such message ('log'-mode).
The call function takes two arguments. The first is the
type of error (one of "divide", "over", "under", or "invalid"),
and the second is the status flag. The flag is a byte, whose
least-significant bits indicate the status::
[0 0 0 0 invalid over under invalid]
In other words, ``flags = divide + 2*over + 4*under + 8*invalid``.
If an object is provided, its write method should take one argument,
a string.
Returns
-------
h : callable, log instance or None
The old error handler.
See Also
--------
seterr, geterr, geterrcall
Examples
--------
Callback upon error:
>>> def err_handler(type, flag):
... print "Floating point error (%s), with flag %s" % (type, flag)
...
>>> saved_handler = np.seterrcall(err_handler)
>>> save_err = np.seterr(all='call')
>>> np.array([1, 2, 3]) / 0.0
Floating point error (divide by zero), with flag 1
array([ Inf, Inf, Inf])
>>> np.seterrcall(saved_handler)
<function err_handler at 0x...>
>>> np.seterr(**save_err)
{'over': 'call', 'divide': 'call', 'invalid': 'call', 'under': 'call'}
Log error message:
>>> class Log(object):
... def write(self, msg):
... print "LOG: %s" % msg
...
>>> log = Log()
>>> saved_handler = np.seterrcall(log)
>>> save_err = np.seterr(all='log')
>>> np.array([1, 2, 3]) / 0.0
LOG: Warning: divide by zero encountered in divide
<BLANKLINE>
array([ Inf, Inf, Inf])
>>> np.seterrcall(saved_handler)
<__main__.Log object at 0x...>
>>> np.seterr(**save_err)
{'over': 'log', 'divide': 'log', 'invalid': 'log', 'under': 'log'}
"""
if func is not None and not callable(func):
if not hasattr(func, 'write') or not callable(func.write):
raise ValueError, "Only callable can be used as callback"
pyvals = umath.geterrobj()
old = geterrcall()
pyvals[2] = func
umath.seterrobj(pyvals)
return old
def geterrcall():
"""
Return the current callback function used on floating-point errors.
When the error handling for a floating-point error (one of "divide",
"over", "under", or "invalid") is set to 'call' or 'log', the function
that is called or the log instance that is written to is returned by
`geterrcall`. This function or log instance has been set with
`seterrcall`.
Returns
-------
errobj : callable, log instance or None
The current error handler. If no handler was set through `seterrcall`,
``None`` is returned.
See Also
--------
seterrcall, seterr, geterr
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterrcall() # we did not yet set a handler, returns None
>>> oldsettings = np.seterr(all='call')
>>> def err_handler(type, flag):
... print "Floating point error (%s), with flag %s" % (type, flag)
>>> oldhandler = np.seterrcall(err_handler)
>>> np.array([1, 2, 3]) / 0.0
Floating point error (divide by zero), with flag 1
array([ Inf, Inf, Inf])
>>> cur_handler = np.geterrcall()
>>> cur_handler is err_handler
True
"""
return umath.geterrobj()[2]
class _unspecified(object):
pass
_Unspecified = _unspecified()
class errstate(object):
"""
errstate(**kwargs)
Context manager for floating-point error handling.
Using an instance of `errstate` as a context manager allows statements in
that context to execute with a known error handling behavior. Upon entering
the context the error handling is set with `seterr` and `seterrcall`, and
upon exiting it is reset to what it was before.
Parameters
----------
kwargs : {divide, over, under, invalid}
Keyword arguments. The valid keywords are the possible floating-point
exceptions. Each keyword should have a string value that defines the
treatment for the particular error. Possible values are
{'ignore', 'warn', 'raise', 'call', 'print', 'log'}.
See Also
--------
seterr, geterr, seterrcall, geterrcall
Notes
-----
The ``with`` statement was introduced in Python 2.5, and can only be used
there by importing it: ``from __future__ import with_statement``. In
earlier Python versions the ``with`` statement is not available.
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> from __future__ import with_statement # use 'with' in Python 2.5
>>> olderr = np.seterr(all='ignore') # Set error handling to known state.
>>> np.arange(3) / 0.
array([ NaN, Inf, Inf])
>>> with np.errstate(divide='warn'):
... np.arange(3) / 0.
...
__main__:2: RuntimeWarning: divide by zero encountered in divide
array([ NaN, Inf, Inf])
>>> np.sqrt(-1)
nan
>>> with np.errstate(invalid='raise'):
... np.sqrt(-1)
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
FloatingPointError: invalid value encountered in sqrt
Outside the context the error handling behavior has not changed:
>>> np.geterr()
{'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore',
'under': 'ignore'}
"""
# Note that we don't want to run the above doctests because they will fail
# without a from __future__ import with_statement
def __init__(self, **kwargs):
self.call = kwargs.pop('call',_Unspecified)
self.kwargs = kwargs
def __enter__(self):
self.oldstate = seterr(**self.kwargs)
if self.call is not _Unspecified:
self.oldcall = seterrcall(self.call)
def __exit__(self, *exc_info):
seterr(**self.oldstate)
if self.call is not _Unspecified:
seterrcall(self.oldcall)
def _setdef():
defval = [UFUNC_BUFSIZE_DEFAULT, ERR_DEFAULT2, None]
umath.seterrobj(defval)
# set the default values
_setdef()
Inf = inf = infty = Infinity = PINF
nan = NaN = NAN
False_ = bool_(False)
True_ = bool_(True)
import fromnumeric
from fromnumeric import *
extend_all(fromnumeric)
|
py | 7df88f596826b65bfe76077ff78c74b27dc2f229 | #!/usr/bin/env python
__author__ = 'simonernst'
"""
This program will get 3D position of an item from get_coordinate_object node
and save it as an Interest Point
"""
import rospy
import os, time
import shutil
import math, json, random
from rospy_message_converter import message_converter, json_message_converter
import tf
from tf_broadcaster.msg import DetectionCoordinates, PointCoordinates
from geometry_msgs.msg import PointStamped, Pose
from robocup_msgs.msg import InterestPoint
import thread
from map_manager.srv import *
from tf_broadcaster.srv import CurrentArea, GetObjectsInRoom, GetObjectsInRoomResponse, GetPeopleInRoom, GetPeopleInRoomResponse, ResetObjects, ResetObjectsResponse
from ros_people_mng_msgs.msg import PeopleMetaInfoListWithoutImg
class ObjectTfBroadcaster:
TEMP_PATH=""
MAP_MANAGER_PATH=""
_sleep_time = 2
def __init__(self):
rospy.init_node('tfbroadcaster')
print(os.getcwd())
self.current_dir = os.path.dirname(os.path.realpath(__file__))
self.TEMP_PATH=rospy.get_param("~path_to_data_temp")
temp_folder = os.path.join(self.current_dir,self.TEMP_PATH)
if not os.path.exists(temp_folder):
os.makedirs(temp_folder)
self.MAP_MANAGER_PATH=rospy.get_param("~path_to_data_ipt")
itp_folder = os.path.join(self.current_dir,self.MAP_MANAGER_PATH)
if not os.path.exists(itp_folder):
os.makedirs(itp_folder)
self.configure()
def configure(self):
self.dirs = os.listdir(self.TEMP_PATH)
self.br=tf.TransformBroadcaster()
self.listener=tf.TransformListener()
variable=rospy.get_param('~result_topic')
self.tf_frame_source=rospy.get_param('~tf_frame_source')
self.tf_frame_target=rospy.get_param('~tf_frame_target')
self.sub_detection_object=rospy.Subscriber(variable,DetectionCoordinates,self.handle_message_objects)
self.sub_detection_people = rospy.Subscriber(rospy.get_param("~people_detection_topic"),PeopleMetaInfoListWithoutImg, self.handle_people_detection_topic)
self.get_objects_list_service = rospy.Service(rospy.get_param("~service_get_objects_in_room"), GetObjectsInRoom, self.handle_service_get_objects)
self.get_people_list_in_room_service = rospy.Service(rospy.get_param("~service_get_people_in_room"),GetPeopleInRoom,self.handle_service_get_people_in_room)
self.reset_objects_service = rospy.Service(rospy.get_param("~service_reset_objects"), ResetObjects, self.handle_reset_object_service)
self.update_score_available = True
#thread.start_new_thread(self.update_score,())
# spin() simply keeps python from exiting until this node is stopped
rospy.spin()
def handle_reset_object_service(self,req):
self.update_score_available = False
if req.all_objects == True:
rospy.loginfo("{class_name} : RESET OBJECTS IN TEMP FOLDER".format(class_name=self.__class__.__name__))
for filename in os.listdir(self.TEMP_PATH):
file_path = os.path.join(self.TEMP_PATH, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
rospy.logerr('Failed to delete %s. Reason: %s',file_path, e)
rospy.loginfo("{class_name} : RESETTING OBJECT FILES".format(class_name=self.__class__.__name__))
for filename in os.listdir(self.MAP_MANAGER_PATH):
if "object_" in filename:
file_path = os.path.join(self.MAP_MANAGER_PATH,filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
rospy.logerr('Failed to delete %s. Reason: %s',file_path, e)
else:
rospy.loginfo("{class_name} : RESETTING OBJECT %s".format(class_name=self.__class__.__name__),req.object_label)
for filename in os.listdir(self.MAP_MANAGER_PATH):
if req.object_label in filename:
file_path = os.path.join(self.MAP_MANAGER_PATH,filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
rospy.logerr('Failed to delete %s. Reason: %s',file_path, e)
for filename in os.listdir(self.TEMP_PATH):
if req.object_label in filename:
file_path = os.path.join(self.TEMP_PATH,filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
rospy.logerr('Failed to delete %s. Reason: %s',file_path, e)
self.update_score_available = True
#thread.start_new_thread(self.update_score,())
return ResetObjectsResponse("OK")
def handle_service_get_objects(self,req):
room = req.room
room = room.replace(" ","")
list_objects = []
dirs = os.listdir(self.MAP_MANAGER_PATH)
for fileName in dirs:
if "object_"+room in fileName:
with open(self.MAP_MANAGER_PATH + fileName,"r") as f:
data = json.load(f)
list_objects.append(json.dumps(data))
return GetObjectsInRoomResponse(list_objects)
def handle_service_get_people_in_room(self,req):
room = req.room
room = room.replace(" ","")
list_people = []
dirs = os.listdir(self.MAP_MANAGER_PATH)
for fileName in dirs:
if "people_"+room in fileName:
with open(self.MAP_MANAGER_PATH + fileName,"r") as f:
data = json.load(f)
list_people.append(json.dumps(data))
return GetPeopleInRoomResponse(list_people)
def handle_people_detection_topic(self,req):
detection_list = req.peopleList
for detected_people in detection_list:
if detected_people.label_id == "Unknown" or detected_people.label_id == "None":
rospy.loginfo("I can't save data from unknown people")
else:
people_name = detected_people.label_id
people_score = detected_people.label_score
people_position = detected_people.pose.position
now = rospy.Time(0)
object_point = PointStamped()
object_point.header.frame_id = "palbator_arm_kinect_link"
object_point.header.stamp = now
object_point.point.x = people_position.x
object_point.point.y = people_position.y
object_point.point.z = people_position.z
rospy.loginfo("{class_name} : Object coords in palbator_arm_kinect_link : %s".format(class_name=self.__class__.__name__),str(object_point))
self.listener.waitForTransform("/map", "/palbator_arm_kinect_link", now, rospy.Duration(20))
target = self.listener.transformPoint("/map",object_point)
rospy.loginfo("{class_name} : Object coords in map : %s".format(class_name=self.__class__.__name__),str(target))
####### TO DO : STOCKER LES INFOS DES PERSONNES CONNUES DETECTEES (un peu comme les objets) ######
def update_score(self):
dirs = os.listdir(self.TEMP_PATH)
itP_dirs = os.listdir(self.MAP_MANAGER_PATH)
rospy.loginfo("Updating score initiated")
for fileName in dirs:
score=0.0
if "object" in str(fileName):
json_file = open(self.TEMP_PATH + str(fileName), 'r')
#rospy.logwarn(fileName)
data = json.load(json_file)
json_file.close()
cumul_darknet = data['confidence_darknet']
count = data['count']
overlap = data['overlap']
#total counts - counts of other objects at the same location
corrected_counts = count - overlap
#No need for negative values
if corrected_counts < 0:
corrected_counts = 0
temps = time.time() - data['last_seen']
round_time=math.ceil(temps)
val = math.log(round_time+0.0000001)+1
mean_darknet = cumul_darknet / count
#Score calculation function
score = 100*(float(corrected_counts)*0.01*mean_darknet/float(val))/count
#rospy.loginfo("\n Object label : %s \n Mean confidence : %f \n Time since last seen : %f \n Counts : %d \n Corrected counts : %d",
# str(data['label']), mean_darknet, temps,count,corrected_counts)
json_tmp = open(self.TEMP_PATH + str(fileName), 'w+')
data['score']=score
json_tmp.write(json.dumps(data))
json_tmp.close()
#rospy.loginfo("Object %s has a score of %f with %d counts \n", data['label'], score, count)
if os.path.exists(self.MAP_MANAGER_PATH + str(fileName)):
json_itp = open(self.MAP_MANAGER_PATH + str(fileName), 'w+')
json_itp.write(json.dumps(data))
json_itp.close()
#rospy.loginfo("Updating Interest Point %s", fileName)
#time.sleep(self._sleep_time)
rospy.loginfo("Update score completed")
def save_InterestPoint(self):
tmp_dir = os.listdir(self.TEMP_PATH)
itP_dirs = os.listdir(self.MAP_MANAGER_PATH)
for fileName in tmp_dir:
json_file = open(self.TEMP_PATH + str(fileName), 'r')
data = json.load(json_file)
json_file.close()
if data['score']>0 and data['count']>5 and not os.path.exists(self.MAP_MANAGER_PATH + str(fileName)):
rospy.loginfo("Saving an object as Interest Point")
#save object position as geometry_msgs/Pose
itp_pose = Pose()
itp_pose.position.x = data['pose']['position']['x']
itp_pose.position.y = data['pose']['position']['y']
itp_pose.position.z = data['pose']['position']['z']
itp_pose.orientation.x = data['pose']['orientation']['x']
itp_pose.orientation.y = data['pose']['orientation']['y']
itp_pose.orientation.z = data['pose']['orientation']['z']
itp_pose.orientation.w = data['pose']['orientation']['w']
#calling MapManager/save_interestPoint Service
rospy.wait_for_service('save_InterestPoint')
save_InterestPoint = rospy.ServiceProxy('save_InterestPoint', saveitP_service)
itPoint=InterestPoint()
itPoint.count = data['count']
itPoint.confidence_darknet = data['confidence_darknet']
itPoint.last_seen = data['last_seen']
itPoint.label = data['label']
itPoint.pose = itp_pose
itPoint.arm_position = 0
success = save_InterestPoint(itPoint)
#purging Interest Points too old and scoreless
elif (time.time() - data['last_seen']) > 200 and data['score']==0 and os.path.exists(self.MAP_MANAGER_PATH + str(fileName)):
file_remove=str(self.MAP_MANAGER_PATH)+str(fileName)
rospy.loginfo("Removing file %s", file_remove)
os.remove(file_remove)
itP_dirs = os.listdir(self.MAP_MANAGER_PATH)
elif (time.time() - data['last_seen']) > 10000 and os.path.exists(self.MAP_MANAGER_PATH + str(fileName)):
file_remove=str(self.MAP_MANAGER_PATH)+str(fileName)
rospy.loginfo("Removing file %s", file_remove)
os.remove(file_remove)
itP_dirs = os.listdir(self.MAP_MANAGER_PATH)
def handle_message_objects(self,req):
"""
Get the ROS message with the XYZ coordinates and publish a TF with the XYZ point as origin
"""
tic=time.time()
self.dirs = os.listdir(self.TEMP_PATH)
#os.system('clear')
#rospy.loginfo("Reading interest point directory \n")
if len(req.points)>0:
for point in req.points:
pos_x = point.x
pos_y = point.y
pos_z = point.z
darknet_score = point.score
print(pos_x, pos_y, pos_z)
#Check if 3D pose available
if math.isnan(pos_x)==False and math.isnan(pos_y)==False and math.isnan(pos_z)==False:
#Calculating object position in the map frame
points = PointStamped()
points.header.frame_id = self.tf_frame_source
points.header.stamp=rospy.Time(0)
points.point.x = point.x
points.point.y = point.y
points.point.z = point.z
self.listener.waitForTransform("map",self.tf_frame_source, rospy.Time(0), rospy.Duration(5.0))
p = self.listener.transformPoint("map",points)
pos_x = p.point.x
pos_y = p.point.y
pos_z = p.point.z
#Refactoring checking if object already in Itp list
count=0
c=0
for fileName in self.dirs:
if "object" in str(fileName):
with open(self.TEMP_PATH + str(fileName), 'r') as json_file:
#rospy.logwarn(fileName)
data = json.load(json_file)
json_file.close()
if pos_x - 0.5 <= data['pose']['position']['x'] <= pos_x + 0.5 and pos_y - 0.5 <= data['pose']['position']['y'] <= pos_y + 0.5 and pos_z - 0.5 <= data['pose']['position']['z'] <= pos_z + 0.5:
#Object detected at the same spot
if str(point.name) in str(fileName):
c+=1
#same object -> updating last seen / increase count / darknet confidence
count += 1
json_new = open(self.TEMP_PATH + str(fileName), 'w+')
data['count'] += 1
data['confidence_darknet'] += darknet_score
data['last_seen'] = time.time()
json_new.write(json.dumps(data))
json_new.close()
else:
#diff object at the same place -> decreasing count and adding overlap info
if data['overlap'] < data['count']:
data['overlap'] += 1
json_new = open(self.TEMP_PATH + str(fileName), 'w+')
json_new.write(json.dumps(data))
json_new.close()
else:
if str(point.name) in str(fileName):
c+=1
#object at a different place : in case it has never been seen it will be saved after
continue
#Saving new object in Itp list
if count == 0:
#Saving to temp dir
rospy.loginfo("Object at a new location - Saving information")
#save object position as geometry_msgs/Pose
itp_pose = Pose()
itp_pose.position.x = p.point.x
itp_pose.position.y = p.point.y
itp_pose.position.z = p.point.z
itp_pose.orientation.x = 0
itp_pose.orientation.y = 0
itp_pose.orientation.z = 0
itp_pose.orientation.w = 1
#calling tf_broadcaster/getCurrentArea Service
service_name = rospy.get_param("~service_get_current_area_name")
rospy.wait_for_service(service_name)
self.proxy_areas = rospy.ServiceProxy(service_name, CurrentArea)
response = self.proxy_areas(itp_pose.position.x,itp_pose.position.y)
if response.room != '':
current_room = response.room
else:
current_room = ''
itPoint=InterestPoint()
itPoint.count = 1
itPoint.confidence_darknet = darknet_score
itPoint.last_seen = time.time()
itPoint.label = "object_" + current_room + "_" + str(point.name)+str(c)
itPoint.pose = itp_pose
itPoint.arm_position = 0
#success = save_InterestPoint(itPoint)
temp_file = open(self.TEMP_PATH + str(itPoint.label) + '.coord', 'w+')
json_str = json_message_converter.convert_ros_message_to_json(itPoint)
temp_file.write(json_str)
temp_file.close()
#self.listener.waitForTransform(self.tf_frame_target,self.tf_frame_source,rospy.Time(0),rospy.Duration(1.0))
#self.br.sendTransform((pos_x,pos_y,pos_z), (0,0,0,1), rospy.Time.now(), tf_name, self.tf_frame_target)
else:
rospy.loginfo("Impossible to calculate depth of object")
self.save_InterestPoint()
self.update_score()
tac=time.time()
process=tac-tic
rospy.loginfo("Process time %f ",process)
if __name__ == '__main__':
# try:
# map_value="../../../../data/world_mng/interest_points/"
# temp_value="../../../../data/world_mng/temp/"
# except:
# pass
#Find a way to create those directories if non existent
# config_directory_param=rospy.get_param("~confPath",map_value)
# config_directory_param2=rospy.get_param("~trucpath",temp_value)
tf_broadcaster_instance = ObjectTfBroadcaster()
|
py | 7df88f83386be17817a571340bcbf56a63882c55 | # coding: utf-8
from __future__ import absolute_import, unicode_literals
import logging
# https://charlesleifer.com/blog/django-patterns-pluggable-backends/
from . import PaymentProviderException, payment_providers
logger = logging.getLogger(__name__)
class BasePaymentProvider(object):
"""
The abstract base class for all payment providers.
"""
def __init__(self, name):
self.name = name
def __unicode__(self):
return 'Base payment provider'
def payment_url(self):
"""
:return: The payment URL
"""
raise NotImplementedError()
# TODO: use this instead of global urls.
def get_provider_url_patterns(self):
"""
Return the local url patterns
:return: A list of url instances
"""
raise NotImplementedError
def validate_project(self, project, db_instance=None):
"""
A provider can validate a project.
:param project: The project instance to validate
:param db_instance: The project instance in the database
:raise: ValidationError
"""
pass
def collect_pledge(self, pledge):
"""
Collects payment for the given pledge.
:param pledge: An authorized pledge.
"""
raise NotImplementedError()
def refund_pledge(self, pledge):
"""
Frees reserved funds for the given pledge.
:param pledge: An authorized pledge.
"""
raise NotImplementedError()
def collect_billable_payments(self, project):
"""
Collects billable payments for the given project.
:param project: The project to collect payments for.
:return: The amount of processed pledges.
"""
pledges = project.authorized_pledges.filter(provider=self.name)
for pledge in pledges:
try:
self.collect_pledge(pledge)
except PaymentProviderException as e:
logger.info(e.message)
def refund_payments(self, project):
"""
Refunds reserved payments for the given project.
:param project: The project to collect payments for.
:return: The amount of processed pledges.
"""
raise NotImplementedError()
|
py | 7df88fc746a8a95f319f4420c8ae1303adc7ce42 | # Copyright (c) 2018 Yellow Pages Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Atlas module
Core module which provides access to MongoDB Atlas Cloud Provider APIs
"""
from datetime import datetime, timezone
from dateutil.relativedelta import relativedelta
from .errors import *
from .network import Network
from .settings import Settings
class Atlas:
"""Atlas constructor
Args:
user (str): Atlas user
password (str): Atlas password
group (str): Atlas group
"""
def __init__(self, user, password, group):
self.group = group
# Network calls which will handld user/passord for auth
self.network = Network(user, password)
# APIs
self.Clusters = Atlas._Clusters(self)
self.Whitelist = Atlas._Whitelist(self)
self.DatabaseUsers = Atlas._DatabaseUsers(self)
self.Projects = Atlas._Projects(self)
self.Alerts = Atlas._Alerts(self)
class _Clusters:
"""Clusters API
see: https://docs.atlas.mongodb.com/reference/api/clusters/
Constructor
Args:
atlas (Atlas): Atlas instance
"""
def __init__(self, atlas):
self.atlas = atlas
def is_existing_cluster(self, cluster):
"""Check if the cluster exists
Not part of Atlas api but provided to simplify some code
Args:
cluster (str): The cluster name
Returns:
bool: The cluster exists or not
"""
try:
self.get_a_single_cluster(cluster)
return True
except ErrAtlasNotFound:
return False
def get_all_clusters(self, pageNum=Settings.pageNum, itemsPerPage=Settings.itemsPerPage, iterable=False):
"""Get All Clusters
url: https://docs.atlas.mongodb.com/reference/api/clusters-get-all/
Keyword Args:
pageNum (int): Page number
itemsPerPage (int): Number of Users per Page
iterable (bool): To return an iterable high level object instead of a low level API response
Returns:
AtlasPagination or dict: Iterable object representing this function OR Response payload
Raises:
ErrPaginationLimits: Out of limits
"""
# Check limits and raise an Exception if needed
ErrPaginationLimits.checkAndRaise(pageNum, itemsPerPage)
if iterable:
return ClustersGetAll(self.atlas, pageNum, itemsPerPage)
uri = Settings.api_resources["Clusters"]["Get All Clusters"] % (
self.atlas.group, pageNum, itemsPerPage)
return self.atlas.network.get(Settings.BASE_URL + uri)
def get_a_single_cluster(self, cluster):
"""Get a Single Cluster
url: https://docs.atlas.mongodb.com/reference/api/clusters-get-one/
Args:
cluster (str): The cluster name
Returns:
dict: Response payload
"""
uri = Settings.api_resources["Clusters"]["Get a Single Cluster"] % (
self.atlas.group, cluster)
return self.atlas.network.get(Settings.BASE_URL + uri)
def delete_a_cluster(self, cluster, areYouSure=False):
"""Delete a Cluster
url: https://docs.atlas.mongodb.com/reference/api/clusters-delete-one/
Args:
cluster (str): Cluster name
Keyword Args:
areYouSure (bool): safe flag to don't delete a cluster by mistake
Returns:
dict: Response payload
Raises:
ErrConfirmationRequested: Need a confirmation to delete the cluster
"""
if areYouSure:
uri = Settings.api_resources["Clusters"]["Delete a Cluster"] % (
self.atlas.group, cluster)
return self.atlas.network.delete(Settings.BASE_URL + uri)
else:
raise ErrConfirmationRequested(
"Please set areYouSure=True on delete_a_cluster call if you really want to delete [%s]" % cluster)
class _Whitelist:
"""Whitelist API
see: https://docs.atlas.mongodb.com/reference/api/whitelist/
Constructor
Args:
atlas (Atlas): Atlas instance
"""
def __init__(self, atlas):
self.atlas = atlas
def get_all_whitelist_entries(self, pageNum=Settings.pageNum, itemsPerPage=Settings.itemsPerPage, iterable=False):
"""Get All whitelist entries
url: https://docs.atlas.mongodb.com/reference/api/whitelist-get-all/
Keyword Args:
pageNum (int): Page number
itemsPerPage (int): Number of Users per Page
iterable (bool): To return an iterable high level object instead of a low level API response
Returns:
AtlasPagination or dict: Iterable object representing this function OR Response payload
Raises:
ErrPaginationLimits: Out of limits
"""
# Check limits and raise an Exception if needed
ErrPaginationLimits.checkAndRaise(pageNum, itemsPerPage)
if iterable:
return WhitelistGetAll(self.atlas, pageNum, itemsPerPage)
uri = Settings.api_resources["Whitelist"]["Get All Whitelist Entries"] % (
self.atlas.group, pageNum, itemsPerPage)
return self.atlas.network.get(Settings.BASE_URL + uri)
def get_whitelist_entry(self, ip_address):
"""Get a whitelist entry
url: https://docs.atlas.mongodb.com/reference/api/whitelist-get-one-entry/
Args:
ip_address (str): ip address to fetch from whitelist
Returns:
dict: Response payload
"""
uri = Settings.api_resources["Whitelist"]["Get Whitelist Entry"] % (
self.atlas.group, ip_address)
return self.atlas.network.get(Settings.BASE_URL + uri)
def create_whitelist_entry(self, ip_address, comment):
"""Create a whitelist entry
url: https://docs.atlas.mongodb.com/reference/api/whitelist-add-one/
Args:
ip_address (str): ip address to add to whitelist
comment (str): comment describing the whitelist entry
Returns:
dict: Response payload
"""
uri = Settings.api_resources["Whitelist"]["Create Whitelist Entry"] % self.atlas.group
whitelist_entry = [{'ipAddress': ip_address, 'comment': comment}]
return self.atlas.network.post(Settings.BASE_URL + uri, whitelist_entry)
def delete_a_whitelist_entry(self, ip_address):
"""Delete a whitelist entry
url: https://docs.atlas.mongodb.com/reference/api/whitelist-delete-one/
Args:
ip_address (str): ip address to delete from whitelist
Returns:
dict: Response payload
"""
uri = Settings.api_resources["Whitelist"]["Delete Whitelist Entry"] % (
self.atlas.group, ip_address)
return self.atlas.network.delete(Settings.BASE_URL + uri)
class _DatabaseUsers:
"""Database Users API
see: https://docs.atlas.mongodb.com/reference/api/database-users/
Constructor
Args:
atlas (Atlas): Atlas instance
"""
def __init__(self, atlas):
self.atlas = atlas
def get_all_database_users(self, pageNum=Settings.pageNum, itemsPerPage=Settings.itemsPerPage, iterable=False):
"""Get All Database Users
url: https://docs.atlas.mongodb.com/reference/api/database-users-get-all-users/
Keyword Args:
pageNum (int): Page number
itemsPerPage (int): Number of Users per Page
iterable (bool): To return an iterable high level object instead of a low level API response
Returns:
AtlasPagination or dict: Iterable object representing this function OR Response payload
Raises:
ErrPaginationLimits: Out of limits
"""
# Check limits and raise an Exception if needed
ErrPaginationLimits.checkAndRaise(pageNum, itemsPerPage)
if iterable:
return DatabaseUsersGetAll(self.atlas, pageNum, itemsPerPage)
uri = Settings.api_resources["Database Users"]["Get All Database Users"] % (
self.atlas.group, pageNum, itemsPerPage)
return self.atlas.network.get(Settings.BASE_URL + uri)
def get_a_single_database_user(self, user):
"""Get a Database User
url: https://docs.atlas.mongodb.com/reference/api/database-users-get-single-user/
Args:
user (str): User
Returns:
dict: Response payload
"""
uri = Settings.api_resources["Database Users"]["Get a Single Database User"] % (
self.atlas.group, user)
return self.atlas.network.get(Settings.BASE_URL + uri)
def create_a_database_user(self, permissions):
"""Create a Database User
url: https://docs.atlas.mongodb.com/reference/api/database-users-create-a-user/
Args:
permissions (DatabaseUsersPermissionsSpec): Permissions to apply
Returns:
dict: Response payload
"""
uri = Settings.api_resources["Database Users"]["Create a Database User"] % self.atlas.group
return self.atlas.network.post(Settings.BASE_URL + uri, permissions.getSpecs())
def update_a_database_user(self, user, permissions):
"""Update a Database User
url: https://docs.atlas.mongodb.com/reference/api/database-users-update-a-user/
Args:
user (str): User
permissions (DatabaseUsersUpdatePermissionsSpecs): Permissions to apply
Returns:
dict: Response payload
"""
uri = Settings.api_resources["Database Users"]["Update a Database User"] % (
self.atlas.group, user)
return self.atlas.network.patch(Settings.BASE_URL + uri, permissions.getSpecs())
def delete_a_database_user(self, user):
"""Delete a Database User
url: https://docs.atlas.mongodb.com/reference/api/database-users-delete-a-user/
Args:
user (str): User to delete
Returns:
dict: Response payload
"""
uri = Settings.api_resources["Database Users"]["Delete a Database User"] % (
self.atlas.group, user)
return self.atlas.network.delete(Settings.BASE_URL + uri)
class _Projects:
"""Projects API
see: https://docs.atlas.mongodb.com/reference/api/projects/
Constructor
Args:
atlas (Atlas): Atlas instance
"""
def __init__(self, atlas):
self.atlas = atlas
def get_all_projects(self, pageNum=Settings.pageNum, itemsPerPage=Settings.itemsPerPage, iterable=False):
"""Get All Projects
url: https://docs.atlas.mongodb.com/reference/api/project-get-all/
Keyword Args:
pageNum (int): Page number
itemsPerPage (int): Number of Users per Page
iterable (bool): To return an iterable high level object instead of a low level API response
Returns:
AtlasPagination or dict: Iterable object representing this function OR Response payload
Raises:
ErrPaginationLimits: Out of limits
"""
# Check limits and raise an Exception if needed
ErrPaginationLimits.checkAndRaise(pageNum, itemsPerPage)
if iterable:
return ProjectsGetAll(self.atlas, pageNum, itemsPerPage)
uri = Settings.api_resources["Projects"]["Get All Projects"] % (
pageNum, itemsPerPage)
return self.atlas.network.get(Settings.BASE_URL + uri)
def get_one_project(self, groupid):
"""Get one Project
url: https://docs.atlas.mongodb.com/reference/api/project-get-one/
Args:
groupid (str): Group Id
Returns:
dict: Response payload
"""
uri = Settings.api_resources["Projects"]["Get One Project"] % (
groupid)
return self.atlas.network.get(Settings.BASE_URL + uri)
def create_a_project(self, name, orgId=None):
"""Create a Project
url: https://docs.atlas.mongodb.com/reference/api/project-create-one/
Args:
name (str): Project name
Keyword Args:
orgId (ObjectId): The ID of the organization you want to create the project within.
Returns:
dict: Response payload
"""
uri = Settings.api_resources["Projects"]["Create a Project"]
project = {"name": name}
if orgId:
project["orgId"] = orgId
return self.atlas.network.post(Settings.BASE_URL + uri, project)
class _Alerts:
"""Alerts API
see: https://docs.atlas.mongodb.com/reference/api/alerts/
Constructor
Args:
atlas (Atlas): Atlas instance
"""
def __init__(self, atlas):
self.atlas = atlas
def get_all_alerts(self, status=None, pageNum=Settings.pageNum, itemsPerPage=Settings.itemsPerPage, iterable=False):
"""Get All Alerts
url: https://docs.atlas.mongodb.com/reference/api/alerts-get-all-alerts/
Keyword Args:
status (AlertStatusSpec): filter on alerts status
pageNum (int): Page number
itemsPerPage (int): Number of Users per Page
iterable (bool): To return an iterable high level object instead of a low level API response
Returns:
AtlasPagination or dict: Iterable object representing this function OR Response payload
Raises:
ErrPaginationLimits: Out of limits
"""
# Check limits and raise an Exception if needed
ErrPaginationLimits.checkAndRaise(pageNum, itemsPerPage)
if iterable:
return AlertsGetAll(self.atlas, status, pageNum, itemsPerPage)
if status:
uri = Settings.api_resources["Alerts"]["Get All Alerts with status"] % (
self.atlas.group, status, pageNum, itemsPerPage)
else:
uri = Settings.api_resources["Alerts"]["Get All Alerts"] % (
self.atlas.group, pageNum, itemsPerPage)
return self.atlas.network.get(Settings.BASE_URL + uri)
def get_an_alert(self, alert):
"""Get an Alert
url: https://docs.atlas.mongodb.com/reference/api/alerts-get-alert/
Args:
alert (str): The alert id
Returns:
dict: Response payload
"""
uri = Settings.api_resources["Alerts"]["Get an Alert"] % (
self.atlas.group, alert)
return self.atlas.network.get(Settings.BASE_URL + uri)
def acknowledge_an_alert(self, alert, until, comment=None):
"""Acknowledge an Alert
url: https://docs.atlas.mongodb.com/reference/api/alerts-acknowledge-alert/
Args:
alert (str): The alert id
until (datetime): Acknowledge until
Keyword Args:
comment (str): The acknowledge comment
Returns:
dict: Response payload
"""
data = {"acknowledgedUntil": until.isoformat(timespec='seconds')}
if comment:
data["acknowledgementComment"] = comment
uri = Settings.api_resources["Alerts"]["Acknowledge an Alert"] % (
self.atlas.group, alert)
return self.atlas.network.patch(Settings.BASE_URL + uri, data)
def unacknowledge_an_alert(self, alert):
"""Acknowledge an Alert
url: https://docs.atlas.mongodb.com/reference/api/alerts-acknowledge-alert/
Args:
alert (str): The alert id
Returns:
dict: Response payload
"""
# see https://docs.atlas.mongodb.com/reference/api/alerts-acknowledge-alert/#request-body-parameters
# To unacknowledge a previously acknowledged alert, set the field value to the past.
now = datetime.now(timezone.utc)
until = now - relativedelta(days=1)
return self.acknowledge_an_alert(alert, until)
def acknowledge_an_alert_forever(self, alert, comment=None):
"""Acknowledge an Alert forever
url: https://docs.atlas.mongodb.com/reference/api/alerts-acknowledge-alert/
Args:
alert (str): The alert id
Keyword Args:
comment (str): The acknowledge comment
Returns:
dict: Response payload
"""
# see https://docs.atlas.mongodb.com/reference/api/alerts-acknowledge-alert/#request-body-parameters
# To acknowledge an alert “forever”, set the field value to 100 years in the future.
now = datetime.now(timezone.utc)
until = now + relativedelta(years=100)
return self.acknowledge_an_alert(alert, until, comment)
class AtlasPagination:
"""Atlas Pagination Generic Implementation
Constructor
Args:
atlas (Atlas): Atlas instance
fetch (function): The function "get_all" to call
pageNum (int): Page number
itemsPerPage (int): Number of Users per Page
"""
def __init__(self, atlas, fetch, pageNum, itemsPerPage):
self.atlas = atlas
self.fetch = fetch
self.pageNum = pageNum
self.itemsPerPage = itemsPerPage
def __iter__(self):
"""Iterable
Yields:
str: One result
"""
# pageNum is set with the value requested (so not necessary 1)
pageNum = self.pageNum
# total: This is a fake value to enter into the while. It will be updated with a real value later
total = pageNum * self.itemsPerPage
while (pageNum * self.itemsPerPage - total < self.itemsPerPage):
# fetch the API
try:
details = self.fetch(pageNum, self.itemsPerPage)
except:
raise ErrPagination()
# set the real total
total = details["totalCount"]
# while into the page results
results = details["results"]
results_count = len(results)
index = 0
while (index < results_count):
result = results[index]
index += 1
yield result
# next page
pageNum += 1
class DatabaseUsersGetAll(AtlasPagination):
"""Pagination for Database User : Get All"""
def __init__(self, atlas, pageNum, itemsPerPage):
super().__init__(atlas, atlas.DatabaseUsers.get_all_database_users, pageNum, itemsPerPage)
class WhitelistGetAll(AtlasPagination):
"""Pagination for Database User : Get All"""
def __init__(self, atlas, pageNum, itemsPerPage):
super().__init__(atlas, atlas.Whitelist.get_all_whitelist_entries, pageNum, itemsPerPage)
class ProjectsGetAll(AtlasPagination):
"""Pagination for Projects : Get All"""
def __init__(self, atlas, pageNum, itemsPerPage):
super().__init__(atlas, atlas.Projects.get_all_projects, pageNum, itemsPerPage)
class ClustersGetAll(AtlasPagination):
"""Pagination for Clusters : Get All"""
def __init__(self, atlas, pageNum, itemsPerPage):
super().__init__(atlas, atlas.Clusters.get_all_clusters, pageNum, itemsPerPage)
class AlertsGetAll(AtlasPagination):
"""Pagination for Alerts : Get All"""
def __init__(self, atlas, status, pageNum, itemsPerPage):
super().__init__(atlas, self.fetch, pageNum, itemsPerPage)
self.get_all_alerts = atlas.Alerts.get_all_alerts
self.status = status
def fetch(self, pageNum, itemsPerPage):
"""Intermediate fetching
Args:
pageNum (int): Page number
itemsPerPage (int): Number of Users per Page
Returns:
dict: Response payload
"""
return self.get_all_alerts(self.status, pageNum, itemsPerPage)
|
py | 7df89065654f64e00a9121439330570d628a433c | from rest_framework import serializers
from core.models import Tag,Ingredient
class TagSerializer(serializers.ModelSerializer):
"""Serializer for tag objects"""
class Meta:
model = Tag
fields = ('id','name')
read_only_fields = ('id',)
class IngredientSerializer(serializers.ModelSerializer):
"""Serializer for ingredients objects"""
class Meta:
model = Ingredient
fields = ('id','name')
read_only_fields = ('id',)
|
py | 7df89116d4bf5b79d8a52ead0406314f0922b436 | import tkinter as tk
class ToolTip(object):
def __init__(self, widget):
self.widget = widget
self.tipwindow = None
self.id = None
self.x = self.y = 0
def showtip(self, text):
"Display text in tooltip window"
self.text = text
if self.tipwindow or not self.text:
return
x, y, _cx, cy = self.widget.bbox("insert")
x = x + self.widget.winfo_rootx() + 27
y = y + cy + self.widget.winfo_rooty() +27
self.tipwindow = tw = tk.Toplevel(self.widget)
tw.wm_overrideredirect(1)
tw.wm_geometry("+%d+%d" % (x, y))
label = tk.Label(tw, text=self.text, justify=tk.LEFT,
background="#ffffe0", relief=tk.SOLID, borderwidth=1,
font=("tahoma", "8", "normal"))
label.pack(ipadx=1)
def hidetip(self):
tw = self.tipwindow
self.tipwindow = None
if tw:
tw.destroy()
def create_tooltip(widget, text):
toolTip = ToolTip(widget)
def enter(event):
toolTip.showtip(text)
def leave(event):
toolTip.hidetip()
widget.bind('<Enter>', enter)
widget.bind('<Leave>', leave)
|
py | 7df891c1f99ba8257ee1aaeaf5a2771d2b4944c3 | from ._autozi import AUTOZI
from ._condscvi import CondSCVI
from ._destvi import DestVI
from ._linear_scvi import LinearSCVI
from ._multivi import MULTIVI
from ._peakvi import PEAKVI
from ._scanvi import SCANVI
from ._scvi import SCVI
from ._totalvi import TOTALVI
__all__ = [
"SCVI",
"TOTALVI",
"LinearSCVI",
"AUTOZI",
"SCANVI",
"PEAKVI",
"CondSCVI",
"DestVI",
"MULTIVI",
]
|
py | 7df891ee3151e0f33144a79d5fbc6ede5a1d6d04 | import math
class Node():
"""A node class for A* Pathfinding"""
def __init__(self, parent=None, position=None):
self.parent = parent
self.position = position
self.g = 0
self.h = 0
self.f = 0
def __eq__(self, other):
return self.position == other.position
def astar(maze, start, end,enemies):
"""Returns a list of tuples as a path from the given start to the given end in the given maze"""
# Create start and end node
start_node = Node(None, start)
start_node.g = start_node.h = start_node.f = 0
end_node = Node(None, end)
end_node.g = end_node.h = end_node.f = 0
# Initialize both open and closed list
open_list = []
closed_list = []
# Add the start node
open_list.append(start_node)
break_loop = 0
# Loop until you find the end
while len(open_list) > 0:
if break_loop >= 500:
return []
break_loop += 1
# Get the current node
current_node = open_list[0]
current_index = 0
for index, item in enumerate(open_list):
if item.f < current_node.f:
current_node = item
current_index = index
# Pop current off open list, add to closed list
open_list.pop(current_index)
closed_list.append(current_node)
# Found the goal
if calc_distance(current_node.position, end_node.position) <= 1:
path = []
current = current_node
while current is not None:
path.append(current.position)
current = current.parent
return path[::-1] # Return reversed path
# Generate children
children = []
for new_position in [(0, -1), (0, 1), (-1, 0), (1, 0)]: # Adjacent squares
# Get node position
node_position = (current_node.position[0] + new_position[0], current_node.position[1] + new_position[1])
# Make sure within range
if node_position[0] > (len(maze) - 1) or node_position[0] < 0 or node_position[1] > (len(maze[len(maze)-1]) -1) or node_position[1] < 0:
continue
# Make sure walkable terrain
if maze[node_position[0]][node_position[1]] != 0 or [node_position[0], node_position[1]] in node_position:
continue
# Create new node
new_node = Node(current_node, node_position)
# Append
children.append(new_node)
# Loop through children
for child in children:
# Child is on the closed list
if child in closed_list:
continue
# Create the f, g, and h values
child.g = current_node.g + 1
child.h = ((child.position[0] - end_node.position[0]) ** 2) + ((child.position[1] - end_node.position[1]) ** 2)
child.f = child.g + child.h
# Child is already in the open list
for open_node in open_list:
if child == open_node and child.g > open_node.g:
continue
# Add the child to the open list
open_list.append(child)
def calc_distance(pos1, pos2):
x1, y1 = pos1
x2, y2 = pos2
return math.sqrt(pow(x2 - x1, 2) + pow(y2 - y1, 2)) |
py | 7df89317e7a2eb8c46e0f10944595108c74c31ec | from datetime import datetime
from uuid import uuid4
def timestamp():
return datetime.now()
def uuid():
return str(uuid4())
def eventid():
return f'{timestamp()}-{uuid()}'.replace(' ', '-')
|
py | 7df893726d71a1d24f972bd3acdc5108165648b5 | import tensorflow as tf
import os
import random
import numpy as np
from multiprocessing import Process, Queue, Event
from dataset.kaldi_io import FeatureReader
from six.moves import range
import time
class DataOutOfRange(Exception):
pass
def sample_with_probability_valid(rd, candidates, num_selects, regions, valid_list=None):
"""Sample speakers with their frames.
The more #frames, the higher probability to be selected.
Args:
rd: random generator
candidates: the list
num_selects: selected number
regions: how to pick the candidates
valid_list: a set. The valid feature list.
:return: the selected candidates
"""
selected = []
num_candidates = len(candidates)
while len(selected) < num_selects:
r = rd.uniform(0, regions[-1])
for k in range(num_candidates):
if regions[k] >= r:
if candidates[k] not in selected and (valid_list is None or candidates[k] in valid_list):
selected.append(candidates[k])
break
return selected
def get_speaker_info(data, spklist):
"""Get speaker information from the data directory.
This function will be used in KaldiDataReader and KaldiDataQueue. So make it a normal function rather than a class
method would be fine.
Args:
data: The kaldi data directory.
spklist: The spklist file gives the index of each speaker.
:return:
spk2features: A dict. The key is the speaker id and the value is the segments belonging to this speaker.
features2spk: A dict. The key is the segment and the value is the corresponding speaker id.
spk2index: A dict from speaker NAME to speaker ID. This is useful to get the number of speakers. Because
sometimes, the speakers are not all included in the data directory (like in the valid set).
segment format: "utt_name filename:offset"
"""
assert (os.path.isdir(data) and os.path.isfile(spklist))
spk2index = {}
with open(spklist, "r") as f:
for line in f.readlines():
spk, index = line.strip().split(" ")
spk2index[spk] = int(index)
utt2spk = {}
with open(os.path.join(data, "spk2utt"), "r") as f:
for line in f.readlines():
spk, utts = line.strip().split(" ", 1)
for utt in utts.split(" "):
utt2spk[utt] = spk2index[spk]
spk2features = {}
features2spk = {}
with open(os.path.join(data, "feats.scp"), "r") as f:
for line in f.readlines():
(key, rxfile) = line.decode().split(' ')
spk = utt2spk[key]
if spk not in spk2features:
spk2features[spk] = []
spk2features[spk].append(key + ' ' + rxfile)
features2spk[key + ' ' + rxfile] = spk
return spk2features, features2spk, spk2index
def get_aux_speaker_info(data, aux_data, spklist):
"""Get speaker information and auxiliary features from the data directory.
This function is similar to the above one, while it also loads auxiliary features.
Args:
data: The kaldi data directory.
aux_data: A dict contains arbitrary number of auxiliary auxiliary data directories.
spklist: The spklist file gives the index of each speaker.
:return:
spk2features: A dict. The key is the speaker id and the value is the segments
and auxiliary features belonging to this speaker.
spk2features[spk] is a list, each element a dict.
The normal feature is in spk2features[spk][n]["features"]
features2spk: A dict. The key is the segment and the value is the corresponding speaker id.
spk2index: A dict from speaker NAME to speaker ID. This is useful to get the number of speakers. Because
sometimes, the speakers are not all included in the data directory (like in the valid set).
"""
assert (os.path.isdir(data) and os.path.isfile(spklist))
spk2index = {}
with open(spklist, "r") as f:
for line in f.readlines():
spk, index = line.strip().split(" ")
spk2index[spk] = int(index)
utt2spk = {}
with open(os.path.join(data, "spk2utt"), "r") as f:
for line in f.readlines():
spk, utts = line.strip().split(" ", 1)
for utt in utts.split(" "):
utt2spk[utt] = spk2index[spk]
# Load auxiliary features.
aux_utt2features = {}
for name in aux_data:
with open(os.path.join(aux_data[name], "feats.scp"), "r") as f:
for line in f.readlines():
(key, rxfile) = line.decode().split(' ')
if key not in aux_utt2features:
aux_utt2features[key] = {}
aux_utt2features[key][name] = key + ' ' + rxfile
spk2features = {}
features2spk = {}
with open(os.path.join(data, "feats.scp"), "r") as f:
for line in f.readlines():
(key, rxfile) = line.decode().split(' ')
spk = utt2spk[key]
if spk not in spk2features:
spk2features[spk] = []
features2spk[key + ' ' + rxfile] = spk
aux_utt2features[key]["features"] = key + ' ' + rxfile
spk2features[spk].append(aux_utt2features[key])
return spk2features, features2spk, spk2index
class KaldiDataRandomReader(object):
"""Used to read data from a kaldi data directory."""
def __init__(self, data_dir, spklist, num_parallel=1, num_speakers=None, num_segments=None, min_len=None, max_len=None, shuffle=True):
""" Create a data_reader from a given directory.
Args:
data_dir: The kaldi data directory.
spklist: The spklist tells the relation between speaker and index.
num_parallel: The number of threads to read features.
num_speakers: The number of speakers per batch.
num_segments: The number of semgents per speaker.
batch_size = num_speakers * num_segments
When num_segments = 1, the batch is randomly chosen from n speakers,
which is used for softmax-like loss function. While we can sample multiple segments for each speaker,
which is used for triplet-loss or GE2E loss.
min_len: The minimum length of the sampled sequence.
max_len: The maximum length of the sampled sequence.
shuffle: Load the feature from the 0-th frame or a random frame.
"""
self.data = data_dir
self.num_speakers = num_speakers
self.num_segments = num_segments
self.min_len = min_len
self.max_len = max_len
self.shuffle = shuffle
# We process the data directory and fetch speaker information
self.dim = FeatureReader(data_dir).get_dim()
self.spk2features, self.features2spk, spk2index = get_speaker_info(data_dir, spklist)
self.speakers = list(self.spk2features.keys())
self.num_total_speakers = len(list(spk2index.keys()))
self.num_parallel_datasets = num_parallel
if self.num_parallel_datasets != 1:
raise NotImplementedError("When num_parallel_datasets != 1, we got some strange problem with the dataset. Waiting for fix.")
def set_batch(self, num_speakers, num_segments):
"""Set the batch-related parameters
Args:
num_speakers: The number of speakers per batch.
num_segments: The number of semgents per speaker.
"""
self.num_speakers = num_speakers
self.num_segments = num_segments
def set_length(self, min_len, max_len):
"""Set the length of the sequence
Args:
min_len: The minimum length of the sampled sequence.
max_len: The maximum length of the sampled sequence.
"""
self.min_len = min_len
self.max_len = max_len
def batch_random(self):
"""Randomly load features to form a batch
This function is used in the load routine to feed data to the dataset object
It can also be used as a generator to get data directly.
"""
feature_reader = FeatureReader(self.data)
speakers = self.speakers
if self.num_total_speakers < self.num_speakers:
print(
"[Warning] The number of available speakers are less than the required speaker. Some speakers will be duplicated.")
speakers = self.speakers * (int(self.num_speakers / self.num_total_speakers) + 1)
while True:
batch_length = random.randint(self.min_len, self.max_len)
batch_speakers = random.sample(speakers, self.num_speakers)
features = np.zeros((self.num_speakers * self.num_segments, batch_length, feature_reader.dim),
dtype=np.float32)
labels = np.zeros((self.num_speakers * self.num_segments), dtype=np.int32)
for i, speaker in enumerate(batch_speakers):
labels[i * self.num_segments:(i + 1) * self.num_segments] = speaker
feature_list = self.spk2features[speaker]
if len(feature_list) < self.num_segments:
feature_list *= (int(self.num_segments / len(feature_list)) + 1)
# Now the length of the list must be greater than the sample size.
speaker_features = random.sample(feature_list, self.num_segments)
for j, feat in enumerate(speaker_features):
features[i * self.num_segments + j, :, :], _ = feature_reader.read(feat, batch_length, shuffle=self.shuffle)
yield (features, labels)
def load_dataset(self):
""" Load data from Kaldi features and return tf.dataset.
The function is useful for training, since it randomly loads features and labels from N speakers,
with K segments per speaker.
The batch is sampled randomly, so there is no need to do shuffle again.
:return: A nested tensor (features, labels)
"""
batch_size = self.num_speakers * self.num_segments
if self.num_parallel_datasets == 1:
# Single thread loading
dataset = tf.data.Dataset.from_generator(self.batch_random, (tf.float32, tf.int32),
(tf.TensorShape([batch_size, None, self.dim]),
tf.TensorShape([batch_size])))
else:
# Multiple threads loading
# It is very strange that the following code doesn't work properly.
# I guess the reason may be the py_func influence the performance of parallel_interleave.
dataset = tf.data.Dataset.range(self.num_parallel_datasets).apply(
tf.contrib.data.parallel_interleave(
lambda x: tf.data.Dataset.from_generator(self.batch_random, (tf.float32, tf.int32),
(tf.TensorShape([batch_size, None, self.dim]),
tf.TensorShape([batch_size]))),
cycle_length=self.num_parallel_datasets,
sloppy=False))
dataset = dataset.prefetch(1)
return dataset.make_one_shot_iterator().get_next()
def batch_random(stop_event,
queue,
data,
spk2features,
spk2num_frames,
utt2num_frames,
num_total_speakers,
num_speakers=10,
num_segments=10,
min_len=200,
max_len=400,
shuffle=True,
seed=0,
sample_with_prob=False):
"""Load features and fill a queue. Used in KaldiDataRandomQueue
Args:
stop_event: An event to tell the process to stop.
queue: A queue to put the data.
data: The kaldi data directory.
spk2features: A dict from speaker index to the segments.
spk2num_frames: #frames per speaker
utt2num_frames: #frames per utt
num_total_speakers: The total number of speakers.
num_speakers: The number of speakers in the batch.
num_segments: The number of segments per speaker.
min_len: The minimum length of the features.
max_len: The maximum length of the features.
shuffle: Load the feature from the 0-th frame or a random frame.
seed: The value used to generate the random seed.
sample_with_prob: sampled using probability.
"""
# TODO: If you use numpy.random in the sub-process, it is better to use:
# local_state = np.random.RandomState(seed)
# print local_state.uniform(0, 1, 5)
#
# The re-seed is necessary if numpy.random is used
# You can use os.urandom to generate the `random` seed.
rd = random.Random(os.urandom(4))
rd.jumpahead(seed)
feature_reader = FeatureReader(data)
speakers = list(spk2features.keys())
if num_total_speakers < num_speakers:
print(
"[Warning] The number of available speakers are less than the required speaker. Some speakers will be duplicated.")
speakers = speakers * (int(num_speakers / num_total_speakers) + 1)
total_num_frames = np.sum(spk2num_frames.values())
spk_sample_region = []
current_region = 0
for spk in speakers:
current_region += spk2num_frames[spk]
spk_sample_region.append(current_region)
assert total_num_frames == current_region
spk2utt_sample_region = {}
for spk in speakers:
spk2utt_sample_region[spk] = []
current_region = 0
for utt in spk2features[spk]:
current_region += utt2num_frames[utt.split(" ")[0]]
spk2utt_sample_region[spk].append(current_region)
# Now we have enough speakers
while not stop_event.is_set():
if sample_with_prob:
batch_speakers = sample_with_probability_valid(rd, speakers, num_speakers, spk_sample_region)
else:
batch_speakers = rd.sample(speakers, num_speakers)
batch_length = rd.randint(min_len, max_len)
features = np.zeros((num_speakers * num_segments, batch_length, feature_reader.dim), dtype=np.float32)
labels = np.zeros((num_speakers * num_segments), dtype=np.int32)
for i, speaker in enumerate(batch_speakers):
spk = speaker
# The length may be larger than the utterance length. A check should be applied first.
feature_list = set()
while len(feature_list) == 0:
feature_list = set()
for feat in spk2features[spk]:
if feature_reader.utt2num_frames[feat.split(' ')[0]] > batch_length:
feature_list.add(feat)
if len(feature_list) == 0:
# The speaker is not appropriate for this batch. Resample the speaker
spk = rd.choice(list(set(speakers) - set(batch_speakers)))
batch_speakers[i] = spk
labels[i * num_segments:(i + 1) * num_segments] = spk
# If the number is not enough
feature_list = list(feature_list)
if len(feature_list) < num_segments:
feature_list *= (int(num_segments / len(feature_list)) + 1)
# Now the length of the list must be greater than the sample size.
if sample_with_prob:
speaker_features = sample_with_probability_valid(rd, spk2features[spk], num_segments,
spk2utt_sample_region[spk], valid_list=feature_list)
else:
speaker_features = rd.sample(feature_list, num_segments)
for j, feat in enumerate(speaker_features):
features[i * num_segments + j, :, :], _ = feature_reader.read_segment(feat, batch_length, shuffle=shuffle)
queue.put((features, labels))
time.sleep(3)
while not queue.empty():
try:
queue.get(block=False)
except:
pass
print("The process {} is about to exit.".format(os.getpid()))
return
class KaldiDataRandomQueue(object):
"""A queue to read features from Kaldi data directory."""
def __init__(self, data_dir, spklist, num_parallel=1, max_qsize=20, num_speakers=None, num_segments=None, min_len=None, max_len=None, shuffle=True, sample_with_prob=False):
""" Create a queue from a given directory.
This is basically similar with KaldiDataRead. The difference is that KaldiDataReader uses tf.data to load
features and KaldiDataQueue uses multiprocessing to load features which seems to be a better choice since
the multiprocessing significantly speed up the loading in my case. If you can make parallel_interleave works,
it is definitely more convenient to use KaldiDataReader because it's more simple.
Args:
data_dir: The kaldi data directory.
spklist: The spklist tells the mapping from the speaker name to the speaker id.
num_parallel: The number of threads to read features.
max_qsize: The capacity of the queue
num_speakers: The number of speakers per batch.
num_segments: The number of semgents per speaker.
batch_size = num_speakers * num_segments
When num_segments = 1, the batch is randomly chosen from n speakers,
which is used for softmax-like loss function. While we can sample multiple segments for each speaker,
which is used for triplet-loss or GE2E loss.
min_len: The minimum length of the sampled sequence.
max_len: The maximum length of the sampled sequence.
shuffle: Loading data from the 0-th frame or a random frame.
sample_with_prob: Sample the speaker and utt with the probability according to the data length.
"""
self.data = data_dir
self.num_speakers = num_speakers
self.num_segments = num_segments
self.min_len = min_len
self.max_len = max_len
self.num_parallel_datasets = num_parallel
self.shuffle = shuffle
self.sample_with_prob = sample_with_prob
if self.sample_with_prob:
tf.logging.info("The training examples are sampled with probability.")
# We process the data directory and fetch speaker information.
self.spk2features, self.features2spk, spk2index = get_speaker_info(data_dir, spklist)
# We also load #frames for each speaker and #frames for each utt
self.utt2num_frames = {}
with open(os.path.join(data_dir, "utt2num_frames"), 'r') as f:
for line in f.readlines():
utt, n = line.strip().split(" ")
self.utt2num_frames[utt] = int(n)
self.spk2num_frames = {}
for spk in self.spk2features:
n = 0
for utt in self.spk2features[spk]:
n += self.utt2num_frames[utt.split(" ")[0]]
self.spk2num_frames[spk] = n
# The number of speakers should be
self.num_total_speakers = len(list(spk2index.keys()))
# The Queue is thread-safe and used to save the features.
self.queue = Queue(max_qsize)
self.stop_event = Event()
# And the prcesses are saved
self.processes = []
def set_batch(self, num_speakers, num_segments):
"""Set the batch-related parameters
Args:
num_speakers: The number of speakers per batch.
num_segments: The number of semgents per speaker.
"""
self.num_speakers = num_speakers
self.num_segments = num_segments
def set_length(self, min_len, max_len):
"""Set the length of the sequence
Args:
min_len: The minimum length of the sampled sequence.
max_len: The maximum length of the sampled sequence.
"""
self.min_len = min_len
self.max_len = max_len
def start(self):
"""Start processes to load features
"""
self.processes = [Process(target=batch_random, args=(self.stop_event,
self.queue,
self.data,
self.spk2features,
self.spk2num_frames,
self.utt2num_frames,
self.num_total_speakers,
self.num_speakers,
self.num_segments,
self.min_len,
self.max_len,
self.shuffle,
i,
self.sample_with_prob))
for i in range(self.num_parallel_datasets)]
for process in self.processes:
process.daemon = True
process.start()
def fetch(self):
"""Fetch data from the queue"""
return self.queue.get()
def stop(self):
"""Stop the threads
After stop, the processes are terminated and the queue may become unavailable.
"""
self.stop_event.set()
print("Clean the data queue that subprocesses can detect the stop event...")
while not self.queue.empty():
# Clear the queue content before join the threads. They may wait for putting the data to the queue.
self.queue.get()
time.sleep(3)
for process in self.processes:
# TODO: fix the join problem
process.terminate()
# process.join()
def batch_sequence(stop_event,
queue,
data,
feature_list,
features2spk,
batch_size=128,
min_len=200,
max_len=400,
shuffle=True,
seed=0):
"""Load features and fill a queue. Used in KaldiDataSeqQueue.
Args:
stop_event: An event indicating the reading is finished.
queue: A queue to put the data.
data: The kaldi data directory.
feature_list: A list shows which features the process should read.
features2spk: A dict map features to speaker index.
batch_size: The batch_size
min_len: The minimum length of the features.
max_len: The maximum length of the features.
shuffle: Load the feature from the 0-th frame or a random frame.
seed: The number is used to generate a random seed
"""
# Read the comment in batch_random
rd = random.Random(os.urandom(4))
rd.jumpahead(seed)
feature_reader = FeatureReader(data)
num_batches = int(len(feature_list) / batch_size) + 1
batch_size = min(batch_size, len(feature_list))
if num_batches * batch_size > len(feature_list):
feature_list = feature_list * (int(num_batches * batch_size / len(feature_list)) + 1)
for i in range(num_batches):
batch_length = rd.randint(min_len, max_len)
# In some cases, the minimum length of the utterances is smaller than the batch length.
# Use the smallest length as the real batch length.
for j in range(batch_size):
if feature_reader.utt2num_frames[feature_list[i * batch_size + j].split(' ')[0]] < batch_length:
batch_length = feature_reader.utt2num_frames[feature_list[i * batch_size + j].split(' ')[0]]
features = np.zeros((batch_size, batch_length, feature_reader.dim), dtype=np.float32)
labels = np.zeros((batch_size), dtype=np.int32)
for j in range(batch_size):
features[j, :, :], _ = feature_reader.read_segment(feature_list[i * batch_size + j], batch_length, shuffle=shuffle)
labels[j] = features2spk[feature_list[i * batch_size + j]]
queue.put((features, labels))
stop_event.set()
print("The process {} is about to exit.".format(os.getpid()))
return
class KaldiDataSeqQueue(object):
"""A queue to read features from Kaldi data directory."""
def __init__(self, data_dir, spklist, num_parallel=1, max_qsize=20, batch_size=128, min_len=None, max_len=None, shuffle=True):
""" Create a queue from a given directory.
Unlike KaldiDataRandomQueue, KaldiDataSeqQueue load data in sequence which means each segment appears once
in one epoch. This is usually used for validation (using softmax-like loss or EER).
Args:
data_dir: The kaldi data directory.
spklist: The spklist tells the mapping from the speaker name to the speaker id.
num_parallel: The number of threads to read features.
max_qsize: The capacity of the queue.
batch_size: The batch size.
min_len: The minimum length of the sampled sequence.
max_len: The maximum length of the sampled sequence.
shuffle: Shuffle the load sequence and loading data from a random frame.
"""
self.data = data_dir
self.batch_size = batch_size
self.min_len = min_len
self.max_len = max_len
self.num_parallel_datasets = num_parallel
self.shuffle = shuffle
# We process the data directory and fetch speaker information.
self.spk2features, self.features2spk, spk2index = get_speaker_info(data_dir, spklist)
self.num_total_speakers = len(list(spk2index.keys()))
# Arrange features in sequence
self.feature_list = []
self.sub_feature_list = []
for spk in self.spk2features:
self.feature_list += self.spk2features[spk]
if shuffle:
random.shuffle(self.feature_list)
# Split the features to N sub-list. The lists are used in each process.
num_sub_features = len(self.feature_list) / num_parallel
for i in range(num_parallel):
if i == num_parallel - 1:
self.sub_feature_list.append(self.feature_list[i * num_sub_features:])
else:
self.sub_feature_list.append(self.feature_list[i * num_sub_features:(i + 1) * num_sub_features])
# The Queue is thread-safe and used to save the features.
self.queue = Queue(max_qsize)
# The events will be set once the processes finish its job
self.stop_event = [Event() for _ in range(num_parallel)]
# And the prcesses are saved
self.processes = []
def set_batch(self, batch_size):
"""Set the batch size
"""
self.batch_size = batch_size
def set_length(self, min_len, max_len):
"""Set the length of the sequence
Args:
min_len: The minimum length of the sampled sequence.
max_len: The maximum length of the sampled sequence.
"""
self.min_len = min_len
self.max_len = max_len
def start(self):
"""Start processes to load features
"""
self.processes = [Process(target=batch_sequence, args=(self.stop_event[i],
self.queue,
self.data,
self.sub_feature_list[i],
self.features2spk,
self.batch_size,
self.min_len,
self.max_len,
self.shuffle,
i))
for i in range(self.num_parallel_datasets)]
for process in self.processes:
process.daemon = True
process.start()
def fetch(self):
"""Fetch data from the queue"""
if self.queue.empty():
all_finish = [self.stop_event[i].is_set() for i in range(self.num_parallel_datasets)]
if all(all_finish):
# If the queue is empty and all processes are finished, we got nothing to read.
for process in self.processes:
# TODO: fix the join problem
process.terminate()
raise DataOutOfRange
return self.queue.get()
def stop(self):
"""Stop the threads"""
for process in self.processes:
# TODO: fix the join problem
process.terminate()
# process.join()
def multi_batch_random(stop_event,
queue,
data,
aux_data,
spk2features,
num_total_speakers,
num_speakers=10,
num_segments=10,
min_len=200,
max_len=400,
shuffle=True,
seed=0):
"""Load features and auxiliary features, fill a queue. Used in KaldiMultiDataRandomQueue
Args:
stop_event: An event to tell the process to stop.
queue: A queue to put the data.
data: The kaldi data directory.
aux_data: A dict. The auxiliary data directories.
spk2features: A dict from speaker index to the segments.
num_total_speakers: The total number of speakers.
num_speakers: The number of speakers in the batch.
num_segments: The number of segments per speaker.
min_len: The minimum length of the features.
max_len: The maximum length of the features.
shuffle: Load the feature from the 0-th frame or a random frame.
seed: The value used to generate the random seed.
"""
# TODO: If you use numpy.random in the sub-process, it is better to use:
# local_state = np.random.RandomState(seed)
# print local_state.uniform(0, 1, 5)
#
# The re-seed is necessary if numpy.random is used
# You can use os.urandom to generate the `random` seed.
rd = random.Random(os.urandom(4))
rd.jumpahead(seed)
feature_reader = {}
feature_reader["features"] = FeatureReader(data)
for name in aux_data:
feature_reader[name] = FeatureReader(aux_data[name])
speakers = list(spk2features.keys())
if num_total_speakers < num_speakers:
print(
"[Warning] The number of available speakers are less than the required speaker. Some speakers will be duplicated.")
speakers = speakers * (int(num_speakers / num_total_speakers) + 1)
while not stop_event.is_set():
batch_speakers = rd.sample(speakers, num_speakers)
batch_length = rd.randint(min_len, max_len)
features = {}
for name in feature_reader:
features[name] = np.zeros((num_speakers * num_segments, batch_length, feature_reader[name].dim),
dtype=np.float32)
labels = np.zeros((num_speakers * num_segments), dtype=np.int32)
for i, speaker in enumerate(batch_speakers):
# The length may be larger than the utterance length. A check should be applied first.
feature_list = []
spk = speaker
while len(feature_list) == 0:
feature_list = []
for feat in spk2features[spk]:
if feature_reader["features"].utt2num_frames[feat["features"].split(' ')[0]] > batch_length:
feature_list.append(feat)
if len(feature_list) == 0:
# The speaker is not appropriate for this batch. Resample the speaker
spk = rd.choice(list(set(speakers) - set(batch_speakers)))
batch_speakers[i] = spk
labels[i * num_segments:(i + 1) * num_segments] = spk
if len(feature_list) < num_segments:
feature_list *= (int(num_segments / len(feature_list)) + 1)
# Now the length of the list must be greater than the sample size.
speaker_features = rd.sample(feature_list, num_segments)
for j, feat in enumerate(speaker_features):
# Load features first.
features["features"][i * num_segments + j, :, :], start_pos = feature_reader["features"].read_segment(feat["features"],
batch_length,
shuffle=shuffle)
for name in feature_reader:
if name == "features":
continue
features[name][i * num_segments + j, :, :], _ = feature_reader[name].read_segment(feat[name], batch_length, start=start_pos)
queue.put((features, labels))
time.sleep(3)
while not queue.empty():
try:
queue.get(block=False)
except:
pass
print("The process {} is about to exit.".format(os.getpid()))
return
class KaldiMultiDataRandomQueue(KaldiDataRandomQueue):
"""A queue to read features from Kaldi data directory (with auxiliary features)."""
def __init__(self, data_dir, aux_data, spklist, num_parallel=1, max_qsize=20, num_speakers=None, num_segments=None,
min_len=None, max_len=None, shuffle=True):
"""Create a queue from a feature directory and some auxiliary feature directories.
"""
super(KaldiMultiDataRandomQueue, self).__init__(data_dir, spklist, num_parallel, max_qsize, num_speakers,
num_segments, min_len, max_len, shuffle)
self.aux_data = {}
for dirname in os.listdir(aux_data):
if dirname[0] == ".":
continue
if os.path.isdir(os.path.join(aux_data, dirname)):
self.aux_data[dirname] = os.path.join(aux_data, dirname)
# Preload the information. We should build the map from the utterance to the auxiliary feature.
self.spk2features, self.features2spk, spk2index = get_aux_speaker_info(data_dir, self.aux_data, spklist)
def start(self):
"""Start processes to load features
"""
self.processes = [Process(target=multi_batch_random, args=(self.stop_event,
self.queue,
self.data,
self.aux_data,
self.spk2features,
self.num_total_speakers,
self.num_speakers,
self.num_segments,
self.min_len,
self.max_len,
self.shuffle,
i))
for i in range(self.num_parallel_datasets)]
for process in self.processes:
process.daemon = True
process.start()
def multi_batch_sequence(stop_event,
queue,
data,
aux_data,
feature_list,
features2spk,
batch_size=128,
min_len=200,
max_len=400,
shuffle=True,
seed=0):
"""Load features, auxiliary features, fill a queue. Used in KaldiMultiDataSeqQueue.
Args:
stop_event: An event indicating the reading is finished.
queue: A queue to put the data.
data: The kaldi data directory.
aux_data: A dict. The auxiliary data directories.
feature_list: A dict. A list shows which features the process should read (containing auxiliary features).
features2spk: A dict map features to speaker index.
batch_size: The batch_size
min_len: The minimum length of the features.
max_len: The maximum length of the features.
shuffle: Load the feature from the 0-th frame or a random frame.
seed: The number is used to generate a random seed
"""
# Read the comment in batch_random
rd = random.Random(os.urandom(4))
rd.jumpahead(seed)
feature_reader = {}
feature_reader["features"] = FeatureReader(data)
for name in aux_data:
feature_reader[name] = FeatureReader(aux_data[name])
num_batches = int(len(feature_list) / batch_size)
for i in range(num_batches):
batch_length = rd.randint(min_len, max_len)
for j in range(batch_size):
if feature_reader["features"].utt2num_frames[feature_list[i * batch_size + j]["features"].split(' ')[0]] < batch_length:
batch_length = feature_reader["features"].utt2num_frames[feature_list[i * batch_size + j]["features"].split(' ')[0]]
features = {}
for name in feature_reader:
features[name] = np.zeros((batch_size, batch_length, feature_reader[name].dim), dtype=np.float32)
labels = np.zeros((batch_size), dtype=np.int32)
for j in range(batch_size):
# Load the features first
features["features"][j, :, :], start_pos = feature_reader["features"].read_segment(feature_list[i * batch_size + j]["features"],
batch_length,
shuffle=shuffle)
for name in feature_reader:
if name == "features":
continue
features[name][j, :, :], _ = feature_reader[name].read_segment(feature_list[i * batch_size + j][name], batch_length, start=start_pos)
labels[j] = features2spk[feature_list[i * batch_size + j]["features"]]
queue.put((features, labels))
stop_event.set()
print("The process {} is about to exit.".format(os.getpid()))
return
class KaldiMultiDataSeqQueue(KaldiDataSeqQueue):
"""A queue to read features from Kaldi data directory (with auxiliary features)."""
def __init__(self, data_dir, aux_data, spklist, num_parallel=1, max_qsize=20, batch_size=128,
min_len=None, max_len=None, shuffle=True):
"""Create a queue from a feature directory and some auxiliary feature directories.
"""
super(KaldiMultiDataSeqQueue, self).__init__(data_dir, spklist, num_parallel, max_qsize, batch_size,
min_len, max_len, shuffle)
self.aux_data = {}
for dirname in os.listdir(aux_data):
# Skip hidden directories (e.g. .backup/)
if dirname[0] == ".":
continue
if os.path.isdir(os.path.join(aux_data, dirname)):
self.aux_data[dirname] = os.path.join(aux_data, dirname)
# Preload the information. We should build the map from the utterance to the auxiliary feature.
self.spk2features, self.features2spk, spk2index = get_aux_speaker_info(data_dir, self.aux_data, spklist)
# Re-arrange the feature list since now we have auxiliary features.
self.feature_list = []
self.sub_feature_list = []
for spk in self.spk2features:
self.feature_list += self.spk2features[spk]
if shuffle:
random.shuffle(self.feature_list)
num_sub_features = len(self.feature_list) / num_parallel
for i in range(num_parallel):
if i == num_parallel - 1:
self.sub_feature_list.append(self.feature_list[i * num_sub_features:])
else:
self.sub_feature_list.append(self.feature_list[i * num_sub_features:(i + 1) * num_sub_features])
def start(self):
"""Start processes to load features
"""
self.processes = [Process(target=multi_batch_sequence, args=(self.stop_event[i],
self.queue,
self.data,
self.aux_data,
self.sub_feature_list[i],
self.features2spk,
self.batch_size,
self.min_len,
self.max_len,
self.shuffle,
i))
for i in range(self.num_parallel_datasets)]
for process in self.processes:
process.daemon = True
process.start()
def phone_batch_random(stop_event,
queue,
data,
spk2features,
spk2num_frames,
utt2num_frames,
num_total_speakers,
num_speakers=10,
num_segments=10,
min_len=200,
max_len=400,
shuffle=True,
prev_dim=0,
phone_class=None,
seed=0,
sample_with_prob=False):
"""Load features and fill a queue. Used in KaldiDataRandomQueue
Args:
stop_event: An event to tell the process to stop.
queue: A queue to put the data.
data: The kaldi data directory.
spk2features: A dict from speaker index to the segments.
spk2num_frames: #frames per speaker
utt2num_frames: #frames per utt
num_total_speakers: The total number of speakers.
num_speakers: The number of speakers in the batch.
num_segments: The number of segments per speaker.
min_len: The minimum length of the features.
max_len: The maximum length of the features.
shuffle: Load the feature from the 0-th frame or a random frame.
seed: The value used to generate the random seed.
sample_with_prob: sampled using probability.
"""
# TODO: If you use numpy.random in the sub-process, it is better to use:
# local_state = np.random.RandomState(seed)
# print local_state.uniform(0, 1, 5)
#
# The re-seed is necessary if numpy.random is used
# You can use os.urandom to generate the `random` seed.
rd = random.Random(os.urandom(4))
rd.jumpahead(seed)
num_classes = len(phone_class)
feature_reader = FeatureReader(data)
speakers = list(spk2features.keys())
if num_total_speakers < num_speakers:
print(
"[Warning] The number of available speakers are less than the required speaker. Some speakers will be duplicated.")
speakers = speakers * (int(num_speakers / num_total_speakers) + 1)
total_num_frames = np.sum(spk2num_frames.values())
spk_sample_region = []
current_region = 0
for spk in speakers:
current_region += spk2num_frames[spk]
spk_sample_region.append(current_region)
assert total_num_frames == current_region
spk2utt_sample_region = {}
for spk in speakers:
spk2utt_sample_region[spk] = []
current_region = 0
for utt in spk2features[spk]:
current_region += utt2num_frames[utt.split(" ")[0]]
spk2utt_sample_region[spk].append(current_region)
# Now we have enough speakers
while not stop_event.is_set():
if sample_with_prob:
batch_speakers = sample_with_probability_valid(rd, speakers, num_speakers, spk_sample_region)
else:
batch_speakers = rd.sample(speakers, num_speakers)
batch_length = rd.randint(min_len, max_len)
features = np.zeros((num_speakers * num_segments, batch_length, feature_reader.dim), dtype=np.float32)
labels = np.zeros((num_speakers * num_segments), dtype=np.int32)
for i, speaker in enumerate(batch_speakers):
spk = speaker
# The length may be larger than the utterance length. A check should be applied first.
feature_list = set()
while len(feature_list) == 0:
feature_list = set()
for feat in spk2features[spk]:
if feature_reader.utt2num_frames[feat.split(' ')[0]] > batch_length:
feature_list.add(feat)
if len(feature_list) == 0:
# The speaker is not appropriate for this batch. Resample the speaker
spk = rd.choice(list(set(speakers) - set(batch_speakers)))
batch_speakers[i] = spk
labels[i * num_segments:(i + 1) * num_segments] = spk
# # If the number is not enough
# if len(feature_list) < num_segments:
# feature_list *= (int(num_segments / len(feature_list)) + 1)
# Now the length of the list must be greater than the sample size.
if sample_with_prob:
speaker_features = sample_with_probability_valid(rd, spk2features[spk], num_segments,
spk2utt_sample_region[spk], valid_list=feature_list)
else:
speaker_features = rd.sample(feature_list, num_segments)
for j, feat in enumerate(speaker_features):
features[i * num_segments + j, :, :], _ = feature_reader.read_segment(feat, batch_length, shuffle=shuffle)
# # We need re-normalize the posteriors
# post = features[:, :, prev_dim:]
# post /= np.sum(post, axis=2, keepdims=True)
# post_new = np.zeros((post.shape[0], post.shape[1], num_classes))
#
# for index, phones in enumerate(phone_class):
# post_new[:, :, index] = np.sum(post[:, :, phones], axis=2)
# features_new = np.concatenate((features[:, :, :prev_dim], post_new), axis=2)
features = features[:, :, :(prev_dim+num_classes)]
queue.put((features, labels))
time.sleep(3)
while not queue.empty():
try:
queue.get(block=False)
except:
pass
print("The process {} is about to exit.".format(os.getpid()))
return
class KaldiPhoneDataRandomQueue(object):
"""A queue to read features from Kaldi data directory."""
def __init__(self, params, data_dir, spklist, num_parallel=1, max_qsize=20, num_speakers=None, num_segments=None, min_len=None, max_len=None, shuffle=True, sample_with_prob=False):
""" Create a queue from a given directory.
This is basically similar with KaldiDataRead. The difference is that KaldiDataReader uses tf.data to load
features and KaldiDataQueue uses multiprocessing to load features which seems to be a better choice since
the multiprocessing significantly speed up the loading in my case. If you can make parallel_interleave works,
it is definitely more convenient to use KaldiDataReader because it's more simple.
Args:
data_dir: The kaldi data directory.
spklist: The spklist tells the mapping from the speaker name to the speaker id.
num_parallel: The number of threads to read features.
max_qsize: The capacity of the queue
num_speakers: The number of speakers per batch.
num_segments: The number of semgents per speaker.
batch_size = num_speakers * num_segments
When num_segments = 1, the batch is randomly chosen from n speakers,
which is used for softmax-like loss function. While we can sample multiple segments for each speaker,
which is used for triplet-loss or GE2E loss.
min_len: The minimum length of the sampled sequence.
max_len: The maximum length of the sampled sequence.
shuffle: Loading data from the 0-th frame or a random frame.
sample_with_prob: Sample the speaker and utt with the probability according to the data length.
"""
self.params = params
self.data = data_dir
self.num_speakers = num_speakers
self.num_segments = num_segments
self.min_len = min_len
self.max_len = max_len
self.num_parallel_datasets = num_parallel
self.shuffle = shuffle
self.sample_with_prob = sample_with_prob
if self.sample_with_prob:
tf.logging.info("The training examples are sampled with probability.")
# We process the data directory and fetch speaker information.
self.spk2features, self.features2spk, spk2index = get_speaker_info(data_dir, spklist)
# We also load #frames for each speaker and #frames for each utt
self.utt2num_frames = {}
with open(os.path.join(data_dir, "utt2num_frames"), 'r') as f:
for line in f.readlines():
utt, n = line.strip().split(" ")
self.utt2num_frames[utt] = int(n)
self.spk2num_frames = {}
for spk in self.spk2features:
n = 0
for utt in self.spk2features[spk]:
n += self.utt2num_frames[utt.split(" ")[0]]
self.spk2num_frames[spk] = n
# The number of speakers should be
self.num_total_speakers = len(list(spk2index.keys()))
# The Queue is thread-safe and used to save the features.
self.queue = Queue(max_qsize)
self.stop_event = Event()
# And the prcesses are saved
self.processes = []
def set_batch(self, num_speakers, num_segments):
"""Set the batch-related parameters
Args:
num_speakers: The number of speakers per batch.
num_segments: The number of semgents per speaker.
"""
self.num_speakers = num_speakers
self.num_segments = num_segments
def set_length(self, min_len, max_len):
"""Set the length of the sequence
Args:
min_len: The minimum length of the sampled sequence.
max_len: The maximum length of the sampled sequence.
"""
self.min_len = min_len
self.max_len = max_len
def start(self):
"""Start processes to load features
"""
self.processes = [Process(target=phone_batch_random, args=(self.stop_event,
self.queue,
self.data,
self.spk2features,
self.spk2num_frames,
self.utt2num_frames,
self.num_total_speakers,
self.num_speakers,
self.num_segments,
self.min_len,
self.max_len,
self.shuffle,
self.params.feat_dim + self.params.bn_dim,
self.params.phone_class,
i,
self.sample_with_prob))
for i in range(self.num_parallel_datasets)]
for process in self.processes:
process.daemon = True
process.start()
def fetch(self):
"""Fetch data from the queue"""
return self.queue.get()
def stop(self):
"""Stop the threads
After stop, the processes are terminated and the queue may become unavailable.
"""
self.stop_event.set()
print("Clean the data queue that subprocesses can detect the stop event...")
while not self.queue.empty():
# Clear the queue content before join the threads. They may wait for putting the data to the queue.
self.queue.get()
time.sleep(3)
for process in self.processes:
# TODO: fix the join problem
process.terminate()
# process.join()
def phone_batch_sequence(stop_event,
queue,
data,
feature_list,
features2spk,
batch_size=128,
min_len=200,
max_len=400,
shuffle=True,
prev_dim=0,
phone_class=None,
seed=0):
"""Load features and fill a queue. Used in KaldiDataSeqQueue.
Args:
stop_event: An event indicating the reading is finished.
queue: A queue to put the data.
data: The kaldi data directory.
feature_list: A list shows which features the process should read.
features2spk: A dict map features to speaker index.
batch_size: The batch_size
min_len: The minimum length of the features.
max_len: The maximum length of the features.
shuffle: Load the feature from the 0-th frame or a random frame.
seed: The number is used to generate a random seed
"""
# Read the comment in batch_random
rd = random.Random(os.urandom(4))
rd.jumpahead(seed)
num_classes = len(phone_class)
feature_reader = FeatureReader(data)
num_batches = int(len(feature_list) / batch_size)
for i in range(num_batches):
batch_length = rd.randint(min_len, max_len)
# In some cases, the minimum length of the utterances is smaller than the batch length.
# Use the smallest length as the real batch length.
for j in range(batch_size):
if feature_reader.utt2num_frames[feature_list[i * batch_size + j].split(' ')[0]] < batch_length:
batch_length = feature_reader.utt2num_frames[feature_list[i * batch_size + j].split(' ')[0]]
features = np.zeros((batch_size, batch_length, feature_reader.dim), dtype=np.float32)
labels = np.zeros((batch_size), dtype=np.int32)
for j in range(batch_size):
features[j, :, :], _ = feature_reader.read_segment(feature_list[i * batch_size + j], batch_length, shuffle=shuffle)
labels[j] = features2spk[feature_list[i * batch_size + j]]
# # We need re-normalize the posteriors
# post = features[:, :, prev_dim:]
# post /= np.sum(post, axis=2, keepdims=True)
# post_new = np.zeros((post.shape[0], post.shape[1], num_classes))
#
# for index, phones in enumerate(phone_class):
# post_new[:, :, index] = np.sum(post[:, :, phones], axis=2)
# features_new = np.concatenate((features[:, :, :prev_dim], post_new), axis=2)
features = features[:, :, :(prev_dim + num_classes)]
queue.put((features, labels))
stop_event.set()
print("The process {} is about to exit.".format(os.getpid()))
return
class KaldiPhoneDataSeqQueue(object):
"""A queue to read features from Kaldi data directory."""
def __init__(self, params, data_dir, spklist, num_parallel=1, max_qsize=20, batch_size=128, min_len=None, max_len=None, shuffle=True):
""" Create a queue from a given directory.
Unlike KaldiDataRandomQueue, KaldiDataSeqQueue load data in sequence which means each segment appears once
in one epoch. This is usually used for validation (using softmax-like loss or EER).
Args:
data_dir: The kaldi data directory.
spklist: The spklist tells the mapping from the speaker name to the speaker id.
num_parallel: The number of threads to read features.
max_qsize: The capacity of the queue.
batch_size: The batch size.
min_len: The minimum length of the sampled sequence.
max_len: The maximum length of the sampled sequence.
shuffle: Shuffle the load sequence and loading data from a random frame.
"""
self.params = params
self.data = data_dir
self.batch_size = batch_size
self.min_len = min_len
self.max_len = max_len
self.num_parallel_datasets = num_parallel
self.shuffle = shuffle
# We process the data directory and fetch speaker information.
self.spk2features, self.features2spk, spk2index = get_speaker_info(data_dir, spklist)
self.num_total_speakers = len(list(spk2index.keys()))
# Arrange features in sequence
self.feature_list = []
self.sub_feature_list = []
for spk in self.spk2features:
self.feature_list += self.spk2features[spk]
if shuffle:
random.shuffle(self.feature_list)
# Split the features to N sub-list. The lists are used in each process.
num_sub_features = len(self.feature_list) / num_parallel
for i in range(num_parallel):
if i == num_parallel - 1:
self.sub_feature_list.append(self.feature_list[i * num_sub_features:])
else:
self.sub_feature_list.append(self.feature_list[i * num_sub_features:(i + 1) * num_sub_features])
# The Queue is thread-safe and used to save the features.
self.queue = Queue(max_qsize)
# The events will be set once the processes finish its job
self.stop_event = [Event() for _ in range(num_parallel)]
# And the prcesses are saved
self.processes = []
def set_batch(self, batch_size):
"""Set the batch size
"""
self.batch_size = batch_size
def set_length(self, min_len, max_len):
"""Set the length of the sequence
Args:
min_len: The minimum length of the sampled sequence.
max_len: The maximum length of the sampled sequence.
"""
self.min_len = min_len
self.max_len = max_len
def start(self):
"""Start processes to load features
"""
self.processes = [Process(target=phone_batch_sequence, args=(self.stop_event[i],
self.queue,
self.data,
self.sub_feature_list[i],
self.features2spk,
self.batch_size,
self.min_len,
self.max_len,
self.shuffle,
self.params.feat_dim + self.params.bn_dim,
self.params.phone_class,
i))
for i in range(self.num_parallel_datasets)]
for process in self.processes:
process.daemon = True
process.start()
def fetch(self):
"""Fetch data from the queue"""
if self.queue.empty():
all_finish = [self.stop_event[i].is_set() for i in range(self.num_parallel_datasets)]
if all(all_finish):
# If the queue is empty and all processes are finished, we got nothing to read.
for process in self.processes:
# TODO: fix the join problem
process.terminate()
raise DataOutOfRange
return self.queue.get()
def stop(self):
"""Stop the threads"""
for process in self.processes:
# TODO: fix the join problem
process.terminate()
# process.join()
if __name__ == "__main__":
# data_simple = "/home/heliang05/liuyi/voxceleb/data/voxceleb_train_combined_no_sil/softmax_valid"
# spklist_simple = "/home/heliang05/liuyi/voxceleb/data/voxceleb_train_combined_no_sil/train/spklist"
# # data = "/home/heliang05/liuyi/voxceleb/data/voxceleb_train_combined_no_sil/end2end_valid"
# # spklist = "/home/heliang05/liuyi/voxceleb/data/voxceleb_train_combined_no_sil/end2end_valid/spklist"
# aux_data_dir = "/home/heliang05/liuyi/voxceleb/data/voxceleb_train_combined_no_sil/test_aux_data"
# num_loads = 10
# import time
num_speakers = 16
num_segments = 1
min_len = 200
max_len = 400
batch_size = num_speakers * num_segments
shuffle = False
num_parallel = 1
# spk2features, features2spk, spk2index = get_speaker_info(data, spklist)
# num_total_speakers = len(list(spk2index.keys()))
#
# rd = random.Random(os.urandom(4))
# feature_reader = FeatureReader(data)
# speakers = list(spk2features.keys())
# if num_total_speakers < num_speakers:
# print(
# "[Warning] The number of available speakers are less than the required speaker. Some speakers will be duplicated.")
# speakers = speakers * (int(num_speakers / num_total_speakers) + 1)
#
# # Single input and single output.
#
# batch_speakers = rd.sample(speakers, num_speakers)
# batch_length = rd.randint(min_len, max_len)
# features = np.zeros((num_speakers * num_segments, batch_length, feature_reader.dim), dtype=np.float32)
# features_comp = np.zeros((num_speakers * num_segments, batch_length, feature_reader.dim), dtype=np.float32)
# labels = np.zeros((num_speakers * num_segments), dtype=np.int32)
# for i, speaker in enumerate(batch_speakers):
# # The length may be larger than the utterance length. A check should be applied first.
# feature_list = []
# spk = speaker
# while len(feature_list) == 0:
# feature_list = []
# for feat in spk2features[spk]:
# if feature_reader.utt2num_frames[feat.split(' ')[0]] > batch_length:
# feature_list.append(feat)
# if len(feature_list) == 0:
# # The speaker is not appropriate for this batch. Resample the speaker
# spk = rd.choice(list(set(speakers) - set(batch_speakers)))
# batch_speakers[i] = spk
#
# labels[i * num_segments:(i + 1) * num_segments] = spk
# # If the number is not enough
# if len(feature_list) < num_segments:
# feature_list *= (int(num_segments / len(feature_list)) + 1)
# # Now the length of the list must be greater than the sample size.
# speaker_features = rd.sample(feature_list, num_segments)
# for j, feat in enumerate(speaker_features):
# features[i * num_segments + j, :, :], start_pos = feature_reader.read_segment(feat, batch_length, shuffle=shuffle)
# features_comp[i * num_segments + j, :, :], _ = feature_reader.read(feat, batch_length, start=start_pos)
#
# pdb.set_trace()
# assert np.allclose(features, features_comp)
# print(labels)
#
# feature_list = []
# for spk in spk2features:
# feature_list += spk2features[spk]
# num_batches = len(feature_list) / batch_size
# for i in range(10):
# batch_length = rd.randint(min_len, max_len)
#
# # In some cases, the minimum length of the utterances is smaller than the batch length.
# # Use the smallest length as the real batch length.
# for j in range(batch_size):
# if feature_reader.utt2num_frames[feature_list[i * batch_size + j].split(' ')[0]] < batch_length:
# batch_length = feature_reader.utt2num_frames[feature_list[i * batch_size + j].split(' ')[0]]
#
# features = np.zeros((batch_size, batch_length, feature_reader.dim), dtype=np.float32)
# features_comp = np.zeros((batch_size, batch_length, feature_reader.dim), dtype=np.float32)
# labels = np.zeros((batch_size), dtype=np.int32)
# for j in range(batch_size):
# features[j, :, :], start_pos = feature_reader.read_segment(feature_list[i * batch_size + j], batch_length, shuffle=shuffle)
# features_comp[j, :, :], _ = feature_reader.read(feature_list[i * batch_size + j], batch_length, start=start_pos)
# labels[j] = features2spk[feature_list[i * batch_size + j]]
#
# pdb.set_trace()
# assert np.allclose(features, features_comp)
# print(labels)
#
# # Using KaldiDataQueue (multiprocessing)
# # Although this will introduce CPU-GPU transfer overhead, it seems to be much faster.
# data_loader = KaldiDataRandomQueue(data_simple, spklist_simple, num_parallel=8, max_qsize=10, num_speakers=64, num_segments=1, min_len=2000, max_len=2000, shuffle=True)
# with tf.Session() as sess:
# ts = time.time()
# features = tf.placeholder(tf.float32, shape=[None, None, None])
# labels = tf.placeholder(tf.int32, shape=[None])
# features += 1
# data_loader.start()
# for _ in range(num_loads):
# features_val, labels_val = data_loader.fetch()
# features_test, labels_test = sess.run([features, labels], feed_dict={features: features_val,
# labels: labels_val})
# te = time.time() - ts
# data_loader.stop()
# print("Time: %.4f s" % te)
# pdb.set_trace()
# print(labels_test)
#
# data_loader = KaldiDataSeqQueue(data, spklist, num_parallel=8, max_qsize=10, batch_size=64, min_len=200, max_len=400, shuffle=True)
# with tf.Session() as sess:
# features = tf.placeholder(tf.float32, shape=[None, None, None])
# labels = tf.placeholder(tf.int32, shape=[None])
# features += 1
# data_loader.start()
# index = 1
# while index < 10:
# try:
# features_val, labels_val = data_loader.fetch()
# features_test, labels_test = sess.run([features, labels], feed_dict={features: features_val,
# labels: labels_val})
# index += 1
# except DataOutOfRange:
# break
# data_loader.stop()
# pdb.set_trace()
# print(labels_test)
#
# # Multiple input
#
# aux_data = {}
# for dirname in os.listdir(aux_data_dir):
# if os.path.isdir(os.path.join(aux_data_dir, dirname)):
# aux_data[dirname] = os.path.join(aux_data_dir, dirname)
# spk2features, features2spk, spk2index = get_aux_speaker_info(data, aux_data, spklist)
#
# feature_reader = {}
# feature_reader["features"] = FeatureReader(data)
# for name in aux_data:
# feature_reader[name] = FeatureReader(aux_data[name])
#
# speakers = list(spk2features.keys())
# if num_total_speakers < num_speakers:
# print(
# "[Warning] The number of available speakers are less than the required speaker. Some speakers will be duplicated.")
# speakers = speakers * (int(num_speakers / num_total_speakers) + 1)
#
# batch_speakers = rd.sample(speakers, num_speakers)
# batch_length = rd.randint(min_len, max_len)
# features = {}
# for name in feature_reader:
# features[name] = np.zeros((num_speakers * num_segments, batch_length, feature_reader[name].dim),
# dtype=np.float32)
# features_comp = {}
# for name in feature_reader:
# features_comp[name] = np.zeros((num_speakers * num_segments, batch_length, feature_reader[name].dim),
# dtype=np.float32)
# labels = np.zeros((num_speakers * num_segments), dtype=np.int32)
#
# for i, speaker in enumerate(batch_speakers):
# # The length may be larger than the utterance length. A check should be applied first.
# feature_list = []
# spk = speaker
# while len(feature_list) == 0:
# feature_list = []
# for feat in spk2features[spk]:
# if feature_reader["features"].utt2num_frames[feat["features"].split(' ')[0]] > batch_length:
# feature_list.append(feat)
# if len(feature_list) == 0:
# # The speaker is not appropriate for this batch. Resample the speaker
# spk = rd.choice(list(set(speakers) - set(batch_speakers)))
# batch_speakers[i] = spk
#
# labels[i * num_segments:(i + 1) * num_segments] = spk
# if len(feature_list) < num_segments:
# feature_list *= (int(num_segments / len(feature_list)) + 1)
# # Now the length of the list must be greater than the sample size.
# speaker_features = rd.sample(feature_list, num_segments)
#
# for j, feat in enumerate(speaker_features):
# # Load features first.
# features["features"][i * num_segments + j, :, :], start_pos = feature_reader["features"].read_segment(
# feat["features"],
# batch_length,
# shuffle=shuffle)
# for name in feature_reader:
# if name == "features":
# continue
# features[name][i * num_segments + j, :, :], _ = feature_reader[name].read_segment(feat[name], batch_length,
# start=start_pos)
#
# features_comp["features"][i * num_segments + j, :, :], _ = feature_reader["features"].read(
# feat["features"],
# batch_length,
# start=start_pos)
# for name in feature_reader:
# if name == "features":
# continue
# features_comp[name][i * num_segments + j, :, :], _ = feature_reader[name].read(feat[name], batch_length,
# start=start_pos)
#
# # Test the consistency of the featuers (the starting points)
# for name in feature_reader:
# assert np.allclose(features[name], features_comp[name])
# assert np.allclose(features[name], features["features"])
# print(labels)
#
# feature_list = []
# for spk in spk2features:
# feature_list += spk2features[spk]
# num_batches = int(len(feature_list) / batch_size)
# pdb.set_trace()
# for i in range(10):
# batch_length = rd.randint(min_len, max_len)
# for j in range(batch_size):
# if feature_reader["features"].utt2num_frames[feature_list[i * batch_size + j]["features"].split(' ')[0]] < batch_length:
# batch_length = feature_reader["features"].utt2num_frames[feature_list[i * batch_size + j]["features"].split(' ')[0]]
#
# features = {}
# for name in feature_reader:
# features[name] = np.zeros((batch_size, batch_length, feature_reader[name].dim), dtype=np.float32)
# features_comp = {}
# for name in feature_reader:
# features_comp[name] = np.zeros((batch_size, batch_length, feature_reader[name].dim), dtype=np.float32)
# labels = np.zeros((batch_size), dtype=np.int32)
# for j in range(batch_size):
# # Load the features first
# features["features"][j, :, :], start_pos = feature_reader["features"].read_segment(
# feature_list[i * batch_size + j]["features"],
# batch_length,
# shuffle=shuffle)
# for name in feature_reader:
# if name == "features":
# continue
# features[name][j, :, :], _ = feature_reader[name].read_segment(feature_list[i * batch_size + j][name],
# batch_length, start=start_pos)
# features_comp["features"][j, :, :], _ = feature_reader["features"].read(
# feature_list[i * batch_size + j]["features"],
# batch_length,
# start=start_pos)
# for name in feature_reader:
# if name == "features":
# continue
# features_comp[name][j, :, :], _ = feature_reader[name].read(feature_list[i * batch_size + j][name],
# batch_length, start=start_pos)
# labels[j] = features2spk[feature_list[i * batch_size + j]["features"]]
#
# pdb.set_trace()
# for name in feature_reader:
# assert np.allclose(features[name], features_comp[name])
# assert np.allclose(features[name], features["features"])
# print(labels)
#
# data_loader = KaldiMultiDataRandomQueue(data, aux_data_dir, spklist, num_parallel=10)
# data_loader.set_batch(64, 1)
# data_loader.set_length(200, 400)
# data_loader.start()
# for _ in range(num_loads):
# features_val, labels_val = data_loader.fetch()
# data_loader.stop()
# pdb.set_trace()
# print(labels_val)
# for name in features_val:
# assert np.allclose(features_val[name], features_val["features"])
#
# data_loader = KaldiMultiDataSeqQueue(data, aux_data_dir, spklist)
# data_loader.set_batch(64)
# data_loader.set_length(200, 400)
# data_loader.start()
# for _ in range(num_loads):
# features_val, labels_val = data_loader.fetch()
# data_loader.stop()
# pdb.set_trace()
# print(labels_val)
# for name in features_val:
# assert np.allclose(features_val[name], features_val["features"])
# # We process the data directory and fetch speaker information.
# spk2features, features2spk, spk2index = get_speaker_info(data_simple, spklist_simple)
# utt2num_frames = {}
# with open(os.path.join(data_simple, "utt2num_frames"), 'r') as f:
# for line in f.readlines():
# utt, n = line.strip().split(" ")
# utt2num_frames[utt] = int(n)
#
# spk2num_frames = {}
# for spk in spk2features:
# n = 0
# for utt in spk2features[spk]:
# n += utt2num_frames[utt.split(" ")[0]]
# spk2num_frames[spk] = n
#
# # The number of speakers should be
# num_total_speakers = len(list(spk2index.keys()))
#
# rd = random.Random(os.urandom(4))
# rd.jumpahead(0)
#
# feature_reader = FeatureReader(data_simple)
# speakers = list(spk2features.keys())
# if num_total_speakers < num_speakers:
# print(
# "[Warning] The number of available speakers are less than the required speaker. Some speakers will be duplicated.")
# speakers = speakers * (int(num_speakers / num_total_speakers) + 1)
#
# total_num_frames = np.sum(spk2num_frames.values())
# spk_sample_region = []
# current_region = 0
# for spk in speakers:
# current_region += spk2num_frames[spk]
# spk_sample_region.append(current_region)
# assert total_num_frames == current_region
#
# spk2utt_sample_region = {}
# for spk in speakers:
# spk2utt_sample_region[spk] = []
# current_region = 0
# for utt in spk2features[spk]:
# current_region += utt2num_frames[utt.split(" ")[0]]
# spk2utt_sample_region[spk].append(current_region)
#
# pdb.set_trace()
# # Now we have enough speakers
# for i in range(10):
# # batch_speakers = rd.sample(speakers, num_speakers)
# batch_speakers = sample_with_probability_valid(rd, speakers, num_speakers, spk_sample_region)
#
# batch_length = rd.randint(min_len, max_len)
# features = np.zeros((num_speakers * num_segments, batch_length, feature_reader.dim), dtype=np.float32)
# labels = np.zeros((num_speakers * num_segments), dtype=np.int32)
#
# for i, speaker in enumerate(batch_speakers):
# spk = speaker
#
# # The length may be larger than the utterance length. A check should be applied first.
# feature_list = set()
# while len(feature_list) == 0:
# feature_list = set()
# for feat in spk2features[spk]:
# if feature_reader.utt2num_frames[feat.split(' ')[0]] > batch_length:
# feature_list.add(feat)
# if len(feature_list) == 0:
# # The speaker is not appropriate for this batch. Resample the speaker
# spk = rd.choice(list(set(speakers) - set(batch_speakers)))
# batch_speakers[i] = spk
#
# labels[i * num_segments:(i + 1) * num_segments] = spk
#
# # # If the number is not enough
# # if len(feature_list) < num_segments:
# # feature_list *= (int(num_segments / len(feature_list)) + 1)
#
# # Now the length of the list must be greater than the sample size.
# # speaker_features = rd.sample(feature_list, num_segments)
# speaker_features = sample_with_probability_valid(rd, spk2features[spk], num_segments,
# spk2utt_sample_region[spk], valid_list=feature_list)
# for j, feat in enumerate(speaker_features):
# features[i * num_segments + j, :, :], _ = feature_reader.read_segment(feat, batch_length,
# shuffle=shuffle)
data = "/home/heliang05/liuyi/sre.full/data/swbd_sre_combined_phone_nosil_vcno/train"
spklist = "/home/heliang05/liuyi/sre.full/data/swbd_sre_combined_phone_nosil_vcno/train/spklist"
prev_dim = 23 + 40
phone_class = []
with open("misc/tuning/phone_class.txt", "r") as f:
for line in f.readlines():
phones = line.strip().split(" ")
phone_class.append([int(p) for p in phones])
# # We process the data directory and fetch speaker information.
# spk2features, features2spk, spk2index = get_speaker_info(data, spklist)
# num_total_speakers = len(list(spk2index.keys()))
#
# # Arrange features in sequence
# feature_list = []
# sub_feature_list = []
# for spk in spk2features:
# feature_list += spk2features[spk]
#
# seed = 1
# if shuffle:
# random.shuffle(feature_list)
# rd = random.Random(os.urandom(4))
# rd.jumpahead(seed)
# num_classes = len(phone_class)
# feature_reader = FeatureReader(data)
# num_batches = int(len(feature_list) / batch_size)
# for i in range(num_batches):
# batch_length = rd.randint(min_len, max_len)
#
# # In some cases, the minimum length of the utterances is smaller than the batch length.
# # Use the smallest length as the real batch length.
# for j in range(batch_size):
# if feature_reader.utt2num_frames[feature_list[i * batch_size + j].split(' ')[0]] < batch_length:
# batch_length = feature_reader.utt2num_frames[feature_list[i * batch_size + j].split(' ')[0]]
#
# features = np.zeros((batch_size, batch_length, feature_reader.dim), dtype=np.float32)
# labels = np.zeros((batch_size), dtype=np.int32)
# for j in range(batch_size):
# features[j, :, :], _ = feature_reader.read_segment(feature_list[i * batch_size + j], batch_length,
# shuffle=shuffle)
# labels[j] = features2spk[feature_list[i * batch_size + j]]
#
# import pdb
# pdb.set_trace()
# # We need re-normalize the posteriors
# post = features[:, :, prev_dim:]
# post /= np.sum(post, axis=2, keepdims=True)
# post_new = np.zeros((post.shape[0], post.shape[1], num_classes))
#
# for index, phones in enumerate(phone_class):
# post_new[:, :, index] = np.sum(post[:, :, phones], axis=2)
# features_new = np.concatenate((features[:, :, :prev_dim], post_new), axis=2)
# print(features_new)
# utt2num_frames = {}
# with open(os.path.join(data, "utt2num_frames"), 'r') as f:
# for line in f.readlines():
# utt, n = line.strip().split(" ")
# utt2num_frames[utt] = int(n)
#
# spk2num_frames = {}
# for spk in spk2features:
# n = 0
# for utt in spk2features[spk]:
# n += utt2num_frames[utt.split(" ")[0]]
# spk2num_frames[spk] = n
#
# rd = random.Random(os.urandom(4))
# rd.jumpahead(seed)
# num_classes = len(phone_class)
# feature_reader = FeatureReader(data)
# speakers = list(spk2features.keys())
# if num_total_speakers < num_speakers:
# print(
# "[Warning] The number of available speakers are less than the required speaker. Some speakers will be duplicated.")
# speakers = speakers * (int(num_speakers / num_total_speakers) + 1)
#
# total_num_frames = np.sum(spk2num_frames.values())
# spk_sample_region = []
# current_region = 0
# for spk in speakers:
# current_region += spk2num_frames[spk]
# spk_sample_region.append(current_region)
# assert total_num_frames == current_region
#
# spk2utt_sample_region = {}
# for spk in speakers:
# spk2utt_sample_region[spk] = []
# current_region = 0
# for utt in spk2features[spk]:
# current_region += utt2num_frames[utt.split(" ")[0]]
# spk2utt_sample_region[spk].append(current_region)
#
# # Now we have enough speakers
# while True:
# batch_speakers = rd.sample(speakers, num_speakers)
#
# batch_length = rd.randint(min_len, max_len)
# features = np.zeros((num_speakers * num_segments, batch_length, feature_reader.dim), dtype=np.float32)
# labels = np.zeros((num_speakers * num_segments), dtype=np.int32)
#
# for i, speaker in enumerate(batch_speakers):
# spk = speaker
#
# # The length may be larger than the utterance length. A check should be applied first.
# feature_list = set()
# while len(feature_list) == 0:
# feature_list = set()
# for feat in spk2features[spk]:
# if feature_reader.utt2num_frames[feat.split(' ')[0]] > batch_length:
# feature_list.add(feat)
# if len(feature_list) == 0:
# # The speaker is not appropriate for this batch. Resample the speaker
# spk = rd.choice(list(set(speakers) - set(batch_speakers)))
# batch_speakers[i] = spk
#
# labels[i * num_segments:(i + 1) * num_segments] = spk
#
# # # If the number is not enough
# # if len(feature_list) < num_segments:
# # feature_list *= (int(num_segments / len(feature_list)) + 1)
#
# speaker_features = rd.sample(feature_list, num_segments)
#
# for j, feat in enumerate(speaker_features):
# features[i * num_segments + j, :, :], _ = feature_reader.read_segment(feat, batch_length, shuffle=shuffle)
#
# # We need re-normalize the posteriors
# post = features[:, :, prev_dim:]
# post /= np.sum(post, axis=2, keepdims=True)
# post_new = np.zeros((post.shape[0], post.shape[1], num_classes))
#
# for index, phones in enumerate(phone_class):
# post_new[:, :, index] = np.sum(post[:, :, phones], axis=2)
# features_new = np.concatenate((features[:, :, :prev_dim], post_new), axis=2)
# print(features_new)
from misc.utils import ParamsPlain
params = ParamsPlain()
params.dict["feat_dim"] = 23
params.dict["bn_dim"] = 40
params.dict["phone_class"] = phone_class
data_loader = KaldiPhoneDataRandomQueue(params, data, spklist, num_speakers=64, num_segments=1, min_len=200, max_len=400)
# KaldiPhoneDataSeqQueue(params, data, spklist, batch_size=128, min_len=200, max_len=400)
import pdb
pdb.set_trace()
data_loader.start()
for _ in range(10):
features_val, labels_val = data_loader.fetch()
print(features_val)
data_loader.stop()
|
py | 7df894be1ca37b0306645cdc26f03d15fc9af674 | """calibration tool"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import copy
import math
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, utils
hist = {}
hist_edges = {}
iteration_idx = 0
class algorithm(object):
"""algorithm class"""
def get_predecessor_op(self, inp_index, op_pos, predict_def):
""" get predecessor op from the net"""
if op_pos < 1 or op_pos >= len(predict_def.op):
return None, None
pos = op_pos - 1
input_name = predict_def.op[op_pos].input[inp_index]
while pos >= 0:
op = predict_def.op[pos]
for outp in op.output:
if outp == input_name:
return op, pos
pos -= 1
return None, None
def get_successor_ops(self, op_pos, predict_def):
""" get successor op from the net"""
if op_pos < 0 or op_pos >= (len(predict_def.op) - 1):
return []
successors = []
pos = op_pos + 1
output = predict_def.op[op_pos].output[0]
while pos < len(predict_def.op):
op = predict_def.op[pos]
for inp in op.input:
if inp == output:
successors.append(op)
break
for outp in op.output:
if outp == output:
return successors
pos += 1
return successors
def get_input_index(self, inp_name, op):
for i, inp in enumerate(op.input):
if inp == inp_name:
return i
return None
def insert_op(self, index, new_op, predict_def):
src_op = new_op
cur_index = index
while cur_index < len(predict_def.op):
cur_op = predict_def.op[cur_index]
buf_op = copy.deepcopy(cur_op)
cur_op.CopyFrom(src_op)
src_op = buf_op
cur_index += 1
predict_def.op.extend([src_op])
def arg_index(self, op, name):
for i in range(len(op.arg)):
if op.arg[i].name == name:
return i
return None
def get_arg(self, op, name):
for i in range(len(op.arg)):
if op.arg[i].name == name:
return op.arg[i]
return None
def remove_arg(self, op, name):
for i in range(len(op.arg)):
if op.arg[i].name == name:
del op.arg[i]
return True
return False
def remove_max(self, predict_def):
for op in predict_def.op:
for i in range(len(op.input)):
self.remove_arg(op, 'absmax_input'+ '_' + str(i))
for j in range(len(op.output)):
self.remove_arg(op, 'absmax_output'+ '_' + str(j))
def get_max(self, op, blob, max_name, tensor_idx, tensor_name):
raise Exception("Please add max value computation method!")
def gather_max(self, predict_def):
pass
def update_status(self):
pass
class KLCalib(algorithm):
"""clibrator of KL"""
def __init__(self, kl_iter_num_for_range=100):
self.kl_iter_num_for_range = kl_iter_num_for_range
def update_status(self):
global iteration_idx
iteration_idx += 1
def get_max(self, op, blob, max_name, tensor_idx, tensor_name):
global iteration_idx
name = max_name + "_" + str(tensor_idx)
op_hist_name = tensor_name + "_" + max_name + "_" + str(tensor_idx)
arg = self.get_arg(op, name)
if iteration_idx < self.kl_iter_num_for_range:
max_min = np.array([np.max(blob), np.min(blob)]).astype(np.float32)
if arg is not None:
orig_max = arg.floats[0]
orig_min = arg.floats[1]
cur_max = max(orig_max, max_min[0])
cur_min = min(orig_min, max_min[1])
max_min = np.array([cur_max, cur_min]).astype(np.float32)
self.remove_arg(op, name)
# save max vaules in predict_def as operator arguments
max_arg = utils.MakeArgument(name, max_min)
op.arg.extend([max_arg])
else:
assert arg is not None
max_val = arg.floats[0]
min_val = arg.floats[1]
self.get_kl_hist(blob, min_val, max_val, op_hist_name)
def update_max(self, op, max_name, tensor_idx, tensor_name):
"""update the max data of the collected data"""
global hist
global hist_edges
global iteration_idx
name = max_name + "_" + str(tensor_idx)
hist_name = tensor_name + "_" + max_name + "_" + str(tensor_idx)
P_sum = iteration_idx - self.kl_iter_num_for_range
arg = self.get_arg(op, name)
assert arg is not None
max_val = arg.floats[0]
min_val = arg.floats[1]
hist_iter = hist[hist_name]
hist_edges_iter = hist_edges[hist_name]
layer_max = self.get_optimal_scaling_factor(hist_iter,
hist_edges_iter, P_sum, max_val, min_val)
self.remove_arg(op, name)
max_arg = utils.MakeArgument(name, np.array([layer_max]).astype(np.float32))
# save max vaules in predict_def as operator arguments
op.arg.extend([max_arg])
def gather_max(self, predict_def):
for op in predict_def.op[0:]:
for j, input_name in enumerate(op.input):
max_name = 'absmax_input'
self.update_max(op, max_name, j, input_name)
for m, output_name in enumerate(op.output):
max_name = 'absmax_output'
self.update_max(op, max_name, m, output_name)
def get_kl_hist(self, data, min_val, max_val, name):
hist_iter, hist_edges_iter = np.histogram(data, bins=2048,
range=(min_val, max_val))
global hist
global hist_edges
if name not in hist:
hist[name] = np.array(hist_iter)
hist_edges[name] = np.array(hist_edges_iter)
else:
hist[name] += np.array(hist_iter)
def expand_quantized_bins(self, quantized_bins, reference_bins):
"""expand quantized bins"""
expanded_quantized_bins = [0]*len(reference_bins)
num_merged_bins = int(len(reference_bins)/len(quantized_bins))
j_start = 0
j_end = num_merged_bins
for idx in xrange(len(quantized_bins)): #pylint: disable=undefined-variable
zero_count = reference_bins[j_start:j_end].count(0)
num_merged_bins = j_end-j_start
if zero_count == num_merged_bins:
avg_bin_ele = 0
else:
avg_bin_ele = quantized_bins[idx]/(num_merged_bins - zero_count + 0.0)
for idx1 in xrange(j_start, j_end): #pylint: disable=undefined-variable
expanded_quantized_bins[idx1] = (0 if reference_bins[idx1] == 0
else avg_bin_ele)
j_start += num_merged_bins
j_end += num_merged_bins
if (idx+1) == len(quantized_bins) - 1:
j_end = len(reference_bins)
return expanded_quantized_bins
def safe_entropy(self, reference_distr_P, P_sum, candidate_distr_Q, Q_sum):
"""safe entropy"""
assert len(reference_distr_P) == len(candidate_distr_Q)
tmp_sum1 = 0
tmp_sum2 = 0
for idx, _ in enumerate(reference_distr_P):
p_idx = reference_distr_P[idx]
q_idx = candidate_distr_Q[idx]
if p_idx == 0:
tmp_sum1 += 0
tmp_sum2 += 0
else:
if q_idx == 0:
print("Fatal error!, idx = " + str(idx) +
" qindex = 0! p_idx = " + str(p_idx))
tmp_sum1 += p_idx * (math.log(Q_sum*p_idx))
tmp_sum2 += p_idx * (math.log(P_sum*q_idx))
return (tmp_sum1 - tmp_sum2)/P_sum
def get_optimal_scaling_factor(self, hist, hist_edges, P_sum, max_val, min_val,
num_quantized_bins=255):
"""get the optimal scaling factor"""
if min_val >= 0:
ending_iter = 2047
starting_iter = int(ending_iter * 0.7)
else:
th = max(abs(max_val), abs(min_val))
starting_iter = 0
ending_iter = 2047
if abs(max_val) > abs(min_val):
while starting_iter < ending_iter:
if hist[starting_iter] == 0:
starting_iter += 1
continue
else:
break
starting_iter += int((ending_iter - starting_iter)*0.6)
else:
while ending_iter > 0:
if hist[ending_iter] == 0:
ending_iter -= 1
continue
else:
break
starting_iter = int(0.6 * ending_iter)
bin_width = hist_edges[1]-hist_edges[0]
min_kl_divergence = 0
min_kl_index = 0
kl_inited = False
for i in range(starting_iter, ending_iter+1):
reference_distr_P = hist[0:i].tolist()
outliers_count = sum(hist[i:2048])
if reference_distr_P[i-1] == 0:
continue
reference_distr_P[i-1] += outliers_count
reference_distr_bins = reference_distr_P[:]
candidate_distr_Q = hist[0:i].tolist()
num_merged_bins = int(i/num_quantized_bins)
candidate_distr_Q_quantized = [0]*num_quantized_bins
j_start = 0
j_end = num_merged_bins
for idx in xrange(num_quantized_bins): #pylint: disable=undefined-variable
candidate_distr_Q_quantized[idx] = sum(candidate_distr_Q[j_start:j_end])
j_start += num_merged_bins
j_end += num_merged_bins
if (idx+1) == num_quantized_bins - 1:
j_end = i
candidate_distr_Q = self.expand_quantized_bins(candidate_distr_Q_quantized,
reference_distr_bins)
Q_sum = sum(candidate_distr_Q)
kl_divergence = self.safe_entropy(reference_distr_P, P_sum,
candidate_distr_Q, Q_sum)
if not kl_inited:
min_kl_divergence = kl_divergence
min_kl_index = i
kl_inited = True
elif kl_divergence < min_kl_divergence:
min_kl_divergence = kl_divergence
min_kl_index = i
else:
pass
if min_kl_index == 0:
while starting_iter > 0:
if hist[starting_iter] == 0:
starting_iter -= 1
continue
else:
break
min_kl_index = starting_iter
return (min_kl_index+0.5)*bin_width
class AbsmaxCalib(algorithm):
"""calibrator for AbsMax"""
def get_max(self, op, blob, max_name, tensor_idx, tensor_name):
name = max_name + "_" + str(tensor_idx)
arg = self.get_arg(op, name)
absmax = np.array([np.absolute(blob).max()]).astype(np.float32)
if arg is not None:
orig_absmax = arg.floats[0]
absmax = np.array([np.absolute([orig_absmax, absmax]).max()]).astype(np.float32)
self.remove_arg(op, name)
max_arg = utils.MakeArgument(name, absmax)
# save max vaules in predict_def as operator arguments
op.arg.extend([max_arg])
class EMACalib(algorithm):
"""calibrator for moving average"""
def __init__(self, ema_alpha=0.5):
self.ema_alpha = ema_alpha
def get_max(self, op, blob, max_name, tensor_idx, tensor_name):
name = max_name + "_" + str(tensor_idx)
arg = self.get_arg(op, name)
absmax = np.array([np.absolute(blob).max()]).astype(np.float32)
if arg is not None:
orig_absmax = arg.floats[0]
absmax = np.array([self.ema_alpha * absmax + (1-self.ema_alpha) * orig_absmax]).astype(np.float32)
self.remove_arg(op, name)
max_arg = utils.MakeArgument(name, absmax)
# save max vaules in predict_def as operator arguments
op.arg.extend([max_arg])
class Calibrator(object):
"""main calss for calibrator"""
def __init__(self, algorithm, device_option=None):
self.algo = algorithm
self.dev_opt = device_option
def RunCalibIter(self, ws, predict_def):
"""run calibrator in iteration"""
for op in predict_def.op[0:]:
for j, input_name in enumerate(op.input):
input_blob = ws.FetchBlob(input_name)
max_name = 'absmax_input'
self.algo.get_max(op, input_blob, max_name, j, input_name)
this_op = copy.deepcopy(op)
if self.dev_opt is not None:
this_op.device_option.CopyFrom(self.dev_opt)
ws.RunOperatorOnce(this_op)
for m, output_name in enumerate(op.output):
output_blob = ws.FetchBlob(output_name)
max_name = 'absmax_output'
self.algo.get_max(op, output_blob, max_name, m, output_name)
self.algo.update_status()
return predict_def
def DepositQuantizedModule(self, ws, predict_def):
"""deposit quantized module"""
DATA_TYPE_UND = 0
DATA_TYPE_FP32 = 1
DATA_TYPE_S32 = 2
DATA_TYPE_S16 = 4
DATA_TYPE_S8 = 5
DATA_TYPE_U8 = 6
def get_zero_point(data_type):
return {
DATA_TYPE_S32 : 0,
DATA_TYPE_S8 : 128,
DATA_TYPE_U8 : 0,
}.get(data_type, None)
def get_abs_max(data_type):
return {
DATA_TYPE_S32 : 0x7FFFFFFF,
DATA_TYPE_S16 : 0x7FFF,
DATA_TYPE_S8 : 0x7F,
DATA_TYPE_U8 : 0xFF,
}.get(data_type, None)
def get_quantized_op_type(op_type):
return {
"Conv" : "Int8Conv",
"Relu" : "Int8Relu",
"Sum" : "Int8Sum",
"Add" : "Int8Add",
"MaxPool" : "Int8MaxPool",
"AveragePool" : "Int8AveragePool",
# Int8FC is not supported so far
#"FC" : "Int8FC",
}.get(op_type, None)
def get_quantized_op_type_by_fusion_type(fusion_type):
return {
1 : "Int8ConvRelu",
2 : "Int8ConvSum",
3 : "Int8ConvSumRelu",
}.get(fusion_type, None)
def get_output_format(op_type):
if op_type.startswith("Conv"):
return "NCHW"
if op_type.startswith("Int8Conv"):
return "NHWC"
if op_type.endswith("FC"):
return "NC"
return {
"NCHW2NHWC" : "NHWC",
"NHWC2NCHW" : "NCHW",
}.get(op_type, None)
def not_need_quantize(op_type):
if not op_type.startswith("Int8"):
return True
return {
"Int8Quantize" : True,
"Int8Dequantize" : True,
}.get(op_type, False)
def not_need_dequantize(op_type):
if op_type.startswith("Int8"):
return True
return {
"NCHW2NHWC" : True,
"NHWC2NCHW" : True,
}.get(op_type, False)
def not_need_output_scale(op_type):
if not op_type.startswith("Int8"):
return True
return {
"Int8Dequantize" : True
}.get(op_type, False)
def is_data_type_changed(op_type):
key_type_segment = ["Conv", "Sum", "FC", "Concat"]
for key in key_type_segment:
if op_type.find(key) != -1:
return True
return False
def has_weights(op):
key_type_segment = ["Int8Conv", "Int8FC"]
for key in key_type_segment:
if op.type.startswith(key):
return True
return False
def has_bias(op):
if op.type.startswith("Int8Conv"):
if op.type.find("Sum") != -1:
if len(op.input) == 4:
return True
elif len(op.input) == 3:
return True
return False
elif op.type.startswith("Int8FC") and len(op.input) == 3:
return True
return False
def predict_output_format(predict_def, op_pos):
if op_pos is None:
return "NCHW"
cur_pos = op_pos
op = predict_def.op[cur_pos]
while op is not None:
fmt = get_output_format(op.type)
if fmt is not None:
return fmt
op, cur_pos = self.algo.get_predecessor_op(0, cur_pos, predict_def)
return "NCHW"
def update_op_type(predict_def):
for _, op in enumerate(predict_def.op):
op_type = get_quantized_op_type(op.type)
if op_type is not None:
op.type = op_type
continue
if op.type == "ConvFusion":
arg = self.algo.get_arg(op, "fusion_type")
assert arg is not None
op_type = get_quantized_op_type_by_fusion_type(arg.i)
assert op_type is not None
op.type = op_type
def add_order_swtich(predict_def, op_pos, op_type="NCHW2NHWC", is_insert=True):
op = predict_def.op[op_pos]
if is_insert:
insert_pos = op_pos
data_in = op.input[0]
data_out = data_in + "_" + op_type + "_" + str(op_pos)
op.input[0] = data_out
else:
insert_pos = op_pos + 1
data_out = op.output[0]
data_in = data_out + "_" + op_type + "_" + str(op_pos)
op.output[0] = data_in
order_sw_op = core.CreateOperator(op_type, [data_in], [data_out])
self.algo.insert_op(insert_pos, order_sw_op, predict_def)
def insert_quantize(predict_def):
cur_pos = 0
op_len = len(predict_def.op)
while cur_pos < op_len:
op = predict_def.op[cur_pos]
if not_need_quantize(op.type):
cur_pos += 1
continue
inp_index = 0
inp_len = len(op.input)
new_pos = cur_pos
while inp_index < inp_len:
op = predict_def.op[new_pos]
pre_op, pre_pos = self.algo.get_predecessor_op(inp_index, new_pos, predict_def)
if pre_op is None:
if inp_index != 0 or new_pos != 0:
inp_index += 1
continue
elif pre_op.type.startswith("Int8"):
inp_index += 1
continue
inp = op.input[inp_index]
outp = inp + "_quantized_" + str(cur_pos) + "_" + str(inp_index)
op.input[inp_index] = outp
qua_op = core.CreateOperator("Int8Quantize", [inp], [outp])
self.algo.insert_op(new_pos, qua_op, predict_def)
if predict_output_format(predict_def, pre_pos) == "NCHW":
add_order_swtich(predict_def, new_pos)
op_len += 1
new_pos += 1
op_len += 1
new_pos += 1
inp_index += 1
cur_pos = new_pos + 1
def insert_dequantize(predict_def):
cur_pos = 0
op_len = len(predict_def.op)
while cur_pos < op_len:
op = predict_def.op[cur_pos]
if not_need_dequantize(op.type):
cur_pos += 1
continue
pre_op, pre_pos = self.algo.get_predecessor_op(0, cur_pos, predict_def)
if pre_op is None or not pre_op.type.startswith("Int8"):
cur_pos += 1
continue
inp = op.input[0]
outp = inp + "_dequantized_" + str(cur_pos)
op.input[0] = outp
deq_op = core.CreateOperator("Int8Dequantize", [inp], [outp])
self.algo.insert_op(cur_pos, deq_op, predict_def)
if predict_output_format(predict_def, pre_pos) == "NHWC":
add_order_swtich(predict_def, cur_pos, "NHWC2NCHW", False)
op_len += 1
cur_pos += 1
op_len += 1
cur_pos += 2
def refine_module_outputs(predict_def):
cur_pos = 0
op_len = len(predict_def.op)
while cur_pos < op_len:
op = predict_def.op[cur_pos]
if not_need_quantize(op.type):
cur_pos += 1
continue
successors = self.algo.get_successor_ops(cur_pos, predict_def)
if len(successors) > 0:
cur_pos += 1
continue
deq_inp = op.output[0] + "_orig_" + str(cur_pos)
deq_outp = op.output[0]
op.output[0] = deq_inp
if predict_output_format(predict_def, cur_pos) == "NHWC":
order_sw_inp = deq_outp + "_dequantized_" + str(cur_pos)
order_sw_outp = deq_outp
deq_outp = order_sw_inp
deq_op = core.CreateOperator("Int8Dequantize", [deq_inp], [deq_outp])
order_sw_op = core.CreateOperator("NHWC2NCHW", [order_sw_inp], [order_sw_outp])
predict_def.op.extend([deq_op, order_sw_op])
op_len += 2
else:
deq_op = core.CreateOperator("Int8Dequantize", [deq_inp], [deq_outp])
predict_def.op.extend([deq_op])
op_len += 1
cur_pos += 1
def add_storage_order(predict_def):
order_arg = utils.MakeArgument("order", str("NHWC"))
for op in predict_def.op:
if not op.type.startswith("Int8"):
continue
if op.type == "Int8Quantize" or op.type == "Int8Dequantize":
continue
arg = self.algo.get_arg(op, "order")
if arg is not None:
arg.s = str("NHWC")
else:
op.arg.extend([order_arg])
def predict_output_data_type(predict_def):
output_data_type = []
pos = 0
while pos < len(predict_def.op):
op = predict_def.op[pos]
if not op.type.startswith("Int8"):
output_data_type.append(DATA_TYPE_FP32)
elif op.type.endswith("Relu"):
output_data_type.append(DATA_TYPE_U8)
elif is_data_type_changed(op.type):
output_data_type.append(DATA_TYPE_S8)
else:
_, pre_pos = self.algo.get_predecessor_op(0, pos, predict_def)
if pre_pos is None:
output_data_type.append(DATA_TYPE_S8)
elif output_data_type[pre_pos] == DATA_TYPE_FP32:
output_data_type.append(DATA_TYPE_S8)
else:
output_data_type.append(output_data_type[pre_pos])
pos += 1
return output_data_type
def add_output_scale(predict_def, output_data_type):
for i, op in enumerate(predict_def.op):
if not_need_output_scale(op.type):
continue
if op.type == "Int8Quantize":
successors = self.algo.get_successor_ops(i, predict_def)
assert len(successors) > 0
successor = successors[0]
input_index = self.algo.get_input_index(op.output[0], successor)
arg_name = "absmax_input" + "_" + str(input_index)
arg = self.algo.get_arg(successor, arg_name)
else:
arg_name = "absmax_output" + "_" + str(0)
arg = self.algo.get_arg(op, arg_name)
assert arg is not None
output_scale = arg.floats[0] / get_abs_max(output_data_type[i])
self.algo.remove_arg(op, "Y_scale")
op.arg.extend([utils.MakeArgument("Y_scale", output_scale)])
self.algo.remove_arg(op, "Y_zero_point")
output_zero_point = get_zero_point(output_data_type[i])
op.arg.extend([utils.MakeArgument("Y_zero_point", output_zero_point)])
def quantize_weights(ws, op, init_def):
assert len(op.input) >= 2
weights = ws.FetchBlob(op.input[1]).astype(np.float32)
if len(weights.shape) == 4:
weights = np.transpose(weights, (0, 2, 3, 1)).astype(np.float32)
arg = self.algo.get_arg(op, "absmax_input" + "_" + str(1))
assert arg is not None
output_scale = arg.floats[0] / get_abs_max(DATA_TYPE_S8)
output_zero_point = get_zero_point(DATA_TYPE_S8)
values = np.rint((weights / output_scale)).astype(np.int8) + output_zero_point
filler = core.CreateOperator(
"Int8GivenTensorFill",
[], [op.input[1]],
arg=[
utils.MakeArgument("shape", weights.shape),
utils.MakeArgument("values", values.astype(np.uint8).tobytes()),
utils.MakeArgument("Y_zero_point", output_zero_point),
utils.MakeArgument("Y_scale", output_scale)])
init_def.op.extend([filler])
return output_scale
def quantize_bias(ws, op, init_def, input_data_type, weights_scale):
assert len(op.input) >= 3
bias = ws.FetchBlob(op.input[2]).astype(np.float32)
arg = self.algo.get_arg(op, "absmax_input" + "_" + str(0))
assert arg is not None
input_scale = arg.floats[0] / get_abs_max(input_data_type)
output_scale = input_scale * weights_scale
output_zero_point = get_zero_point(DATA_TYPE_S32)
values = np.rint(bias / output_scale).astype(np.int32)
filler = core.CreateOperator(
"Int8GivenIntTensorFill",
[], [op.input[2]],
arg=[
utils.MakeArgument("shape", bias.shape),
utils.MakeArgument("values", values),
utils.MakeArgument("Y_zero_point", output_zero_point),
utils.MakeArgument("Y_scale", output_scale)])
init_def.op.extend([filler])
def gen_quantized_init_def(ws, predict_def, output_data_type):
init_def = caffe2_pb2.NetDef()
init_def.name = predict_def.name + "_weights_bias"
for i, op in enumerate(predict_def.op):
if not op.type.startswith("Int8"):
continue
if has_weights(op):
weights_scale = quantize_weights(ws, op, init_def)
if has_bias(op):
_, pre_pos = self.algo.get_predecessor_op(0, i, predict_def)
assert pre_pos is not None
input_data_type = output_data_type[pre_pos]
quantize_bias(ws, op, init_def, input_data_type, weights_scale)
return init_def
def organize_external_input(ws, predict_def, init_def):
kTypeNameMapper = {
np.dtype('float32') : "GivenTensorFill",
np.dtype('int32') : "GivenTensorIntFill",
np.dtype('int64') : "GivenTensorInt64Fill",
np.dtype('uint8') : "GivenTensorStringFill",
}
all_existing_inputs = []
for op in init_def.op:
all_existing_inputs.append(op.output[0])
for inp in predict_def.external_input:
if inp == predict_def.op[0].input[0]:
continue
if inp in all_existing_inputs:
continue
in_data = ws.FetchBlob(inp)
shape = in_data.shape
values = in_data
# pass array of uint8 as a string to save storage
# storing uint8_t has a large overhead for now
if in_data.dtype == np.dtype('uint8'):
shape = [1]
values = [str(in_data.data)]
op = core.CreateOperator(
kTypeNameMapper[in_data.dtype],
[], [inp],
arg=[
utils.MakeArgument("shape", shape),
utils.MakeArgument("values", values),
]
)
init_def.op.extend([op])
predict_quantized = copy.deepcopy(predict_def)
self.algo.gather_max(predict_quantized)
self.algo.update_status()
update_op_type(predict_quantized)
insert_dequantize(predict_quantized)
insert_quantize(predict_quantized)
refine_module_outputs(predict_quantized)
# DO NOT change the operator order of the module after below line
output_data_type = predict_output_data_type(predict_quantized)
add_output_scale(predict_quantized, output_data_type)
add_storage_order(predict_quantized)
init_quantized = gen_quantized_init_def(ws, predict_quantized, output_data_type)
self.algo.remove_max(predict_quantized)
for op in predict_quantized.op:
if op.type.startswith("Int8"):
op.engine = str("DNNLOWP")
self.algo.remove_arg(op, "fusion_type")
op.device_option.CopyFrom(caffe2_pb2.DeviceOption())
organize_external_input(ws, predict_quantized, init_quantized)
return predict_quantized, init_quantized
|
py | 7df896359c9c5c5b0e16e3565bcbcd1eed4225ea | # coding: utf-8
"""
Intersight REST API
This is Intersight REST API
OpenAPI spec version: 1.0.9-255
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class QataskUcsTask1Ref(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'moid': 'str',
'object_type': 'str'
}
attribute_map = {
'moid': 'Moid',
'object_type': 'ObjectType'
}
def __init__(self, moid=None, object_type=None):
"""
QataskUcsTask1Ref - a model defined in Swagger
"""
self._moid = None
self._object_type = None
if moid is not None:
self.moid = moid
if object_type is not None:
self.object_type = object_type
@property
def moid(self):
"""
Gets the moid of this QataskUcsTask1Ref.
:return: The moid of this QataskUcsTask1Ref.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this QataskUcsTask1Ref.
:param moid: The moid of this QataskUcsTask1Ref.
:type: str
"""
self._moid = moid
@property
def object_type(self):
"""
Gets the object_type of this QataskUcsTask1Ref.
:return: The object_type of this QataskUcsTask1Ref.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this QataskUcsTask1Ref.
:param object_type: The object_type of this QataskUcsTask1Ref.
:type: str
"""
self._object_type = object_type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, QataskUcsTask1Ref):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
py | 7df896dfc3066863a0da4229efefc84ed2d5c287 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2018-08-27 01:26
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('bfrs', '0013_auto_20180809_1457'),
]
operations = [
migrations.RemoveField(
model_name='bushfire',
name='fire_bombing_req',
),
migrations.RemoveField(
model_name='bushfiresnapshot',
name='fire_bombing_req',
),
]
|
py | 7df8970d3ed5c6f164eac3820576186d1423d818 | import json
import pandas as pd
import argparse
import logging
import copy
logger = logging.getLogger(__name__)
def get_tissue_mappings(mapping_filename):
"""Return a dictionary that maps tissue names to anatomical systems and organs"""
with open(mapping_filename, 'r') as mappings_file:
mapping_dict = json.load(mappings_file)
return mapping_dict['tissues']
def initialise_expression_dict(mapping_dictionary):
# Extract anatomical systems
anatomical_systems = set()
for tissues, mappings in mapping_dictionary.items():
anatomical_systems.update(mappings['anatomical_systems'])
# Initialise scoring dictionary
expression_dict={}
for anatomical_system in anatomical_systems:
# Remove white spaces
anatomical_system_clean_name = anatomical_system.strip().replace(" ", "_")
expression_dict[anatomical_system_clean_name] = {
'is_expressed' : False,
'expressed_tissue_list' : []
}
return expression_dict
def parse_baseline(baseline_filename, tissue_mapping, output_filename):
baseline_df = pd.read_csv(baseline_filename, sep='\t', header=0, index_col=0)
# Check that column names in baseline file exist in mapping file
columns_to_drop = []
for column in baseline_df.columns:
if column not in tissue_mapping:
logger.warning("{} is not a supported tissue, skipping it".format(column))
columns_to_drop.append(column)
# Drop unmapped tissues
if columns_to_drop:
baseline_df.drop(columns_to_drop, axis=1, inplace=True)
empty_expression_dict = initialise_expression_dict(tissue_mapping)
expression_per_anatomical_systems_list = []
# Iterate all genes
for gene, expression in baseline_df.to_dict('index').items():
expression_per_anatomical_systems_dict = copy.deepcopy(empty_expression_dict)
expression_per_anatomical_systems_dict['id'] = gene
for tissue in expression:
# Gene is considered expressed if > 6 tpm
if expression[tissue] > 6:
for anat_sys in tissue_mapping[tissue]['anatomical_systems']:
# Remove white spaces
anat_sys_clean_name = anat_sys.strip().replace(" ", "_")
expression_per_anatomical_systems_dict[anat_sys_clean_name]['is_expressed'] = True
expression_per_anatomical_systems_dict[anat_sys_clean_name]['expressed_tissue_list'].append(tissue)
expression_per_anatomical_systems_list.append(expression_per_anatomical_systems_dict)
expression_per_anatomical_systems_df = pd.json_normalize(expression_per_anatomical_systems_list, max_level=1, sep="_")
# Drop anatomical systems where no gene is expressed - happens for sensory system
# Find columns with single unique value - only "is_expressed" columns can be used as lists are not hashable
columns_count_unique = expression_per_anatomical_systems_df.filter(regex="is_expressed").nunique()
columns_single_unique_value = columns_count_unique[columns_count_unique==1].index
# Check that the unique values are either False or empty list
empty_columns = []
for column in columns_single_unique_value:
unique_value = expression_per_anatomical_systems_df[column].unique()[0]
if unique_value == False:
# Add both "is_expressed" column and list column to list to be removed
empty_columns.append(column)
empty_columns.append(column.replace("is_expressed", "expressed_tissue_list"))
expression_per_anatomical_systems_df.drop(columns=empty_columns, inplace=True)
# Save columns that contain lists as valid JSON strings
for column in expression_per_anatomical_systems_df.filter(regex="_list").columns:
expression_per_anatomical_systems_df[column] = expression_per_anatomical_systems_df[column].apply(lambda x: json.dumps(x) if isinstance(x, list) else x)
# Write to file
expression_per_anatomical_systems_df.to_csv(output_filename, sep='\t', index=False)
def main():
# Parse CLI parameters
parser = argparse.ArgumentParser(description='Parse baseline expression file and report the anatomical systems where each target is expressed.')
parser.add_argument('-i','--input',
help='Baseline expression tab-separated file',
type=str, default='ot_baseline.tsv')
parser.add_argument('-m','--mapping',
help='Name of file that maps tissues to anatomical systems',
type=str, default='ot_map_with_efos.json')
parser.add_argument('-o','--output',
help='Output file name',
type=str, default='baseline_expression_per_anatomical_system.tsv')
args = parser.parse_args()
# Get parameters:
input_file = args.input
mapping_file = args.mapping
output_file = args.output
# Load tissue mappings
tissue_mappings = get_tissue_mappings(mapping_file)
parse_baseline(input_file, tissue_mappings, output_file)
if __name__ == '__main__':
main()
|
py | 7df8972e9ab9df29f757ef878c5969a4c081406e | # -*- coding: utf-8 -*-
import click, datetime, peewee
from captains_log.backend.init import init_database
from captains_log.backend.models import CaptainsLogDatabase, Category, Entry
from captains_log.renderer.history import SimpleHistoryRenderer, TabulatedHistoryRenderer, ColumnedHistoryRenderer
@click.command(short_help='Browse your logs history')
@click.argument('period', type=click.Choice(['all', 'year', 'month', 'day']), default="all", required=True, metavar='<period>')
@click.option('--search_pattern', '-s', default=None, help='Pattern to search for entries than contain it')
@click.pass_context
def entries_history_command(ctx, period, search_pattern=None):
"""
Browse your logs history for the optionnal <period> keyword given.
If given, <period> must be a valid choice, where 'year' will limit results
to the current year, "month" to the current month and "day" the current day.
"""
init_database()
# Start queryset defining JOIN on entry and category
queryset = Entry.select(Entry, Category).join(Category, peewee.JOIN_LEFT_OUTER)
empty_message = "There is no entries for.."
if period != 'all':
now = datetime.datetime.now()
if period == 'year':
queryset = queryset.where(Entry.created.year == now.year)
empty_message = "There is no entries for the current year"
elif period == 'month':
queryset = queryset.where(
(Entry.created.year == now.year) &
(Entry.created.month == now.month)
)
empty_message = "There is no entries for the current month"
elif period == 'day':
queryset = queryset.where(
(Entry.created.year == now.year) &
(Entry.created.month == now.month) &
(Entry.created.day == now.day)
)
empty_message = "There is no entries for the current day"
# TODO: use calendar to find the week start and end days so we can
# limit results to the current week
# Use a pattern to seach for entries that contains it
if search_pattern:
queryset = queryset.where(Entry.content.contains(search_pattern))
# Finish with adding order and aggregating
queryset = queryset.order_by(Entry.created.asc()).aggregate_rows()
if queryset.count() > 0:
## Simple history columns are just joined with a space, no tabulated layout
#click.echo(SimpleHistoryRenderer(queryset).render())
## History tabulated with "tabulate" package, should be deprecated
#click.echo(TabulatedHistoryRenderer(queryset).render())
## History correctly tabulated
click.echo(ColumnedHistoryRenderer(queryset).render())
else:
click.echo(empty_message)
# TODO: add an option to print message about finded results ?
#print "Period keyword:", period
#print "Results:", len(list(queryset))
|
py | 7df898f5d543aa82a074aa68ade123a40c6b1877 | class PipeFactory:
"""
Class representing factory of pipes
"""
def __init__(self, type):
parts = type.split('.')
module = ".".join(parts[:-1])
self.m = __import__(module)
for comp in parts[1:]:
self.m = getattr(self.m, comp)
def get_pipe(self, sides, x, y):
return self.m(sides, x, y) |
py | 7df899b150a45f59552fa717d9b7e228b9a92637 | ###########################################################################
#
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
placementStrategy_Schema = [
{
"mode": "NULLABLE",
"type": "STRING",
"description": "",
"name": "kind"
},
{
"mode": "NULLABLE",
"type": "INT64",
"description": "",
"name": "id"
},
{
"mode": "NULLABLE",
"type": "STRING",
"description": "",
"name": "name"
},
{
"mode": "NULLABLE",
"type": "INT64",
"description": "",
"name": "accountId"
}
]
|
py | 7df899fd84bb9626f50b64f6faa25c8d85c4cd9a | from click.testing import CliRunner
from pytest import mark
from site_checker.run import cli
@mark.parametrize(
"args",
[
"producer --config-path example/example.config.ini --dry-run",
"""producer \
--name test \
--url http://www.test.org \
--regex test \
--kafka-bootstrap-servers 127.0.0.1:28177 \
--kafka-security-protocol SSL \
--kafka-ssl-cafile ./config/ca.pem \
--kafka-ssl-certfile ./config/service.cert \
--kafka-ssl-keyfile ./config/service.key \
--pooling-interval 60 \
--dry-run""",
],
)
def test_run_producer_with_config_ini_and_arguments(args):
runner = CliRunner()
args = args.split()
result = runner.invoke(cli, args)
assert result.exit_code == 0
@mark.parametrize(
"args",
[
"consumer --config-path example/example.config.ini --dry-run",
"""consumer \
--kafka-bootstrap-servers 127.0.0.1:28177 \
--kafka-security-protocol SSL \
--kafka-ssl-cafile ./config/ca.pem \
--kafka-ssl-certfile ./config/service.cert \
--kafka-ssl-keyfile ./config/service.key \
--kafka-topic test_topic \
--postgres-user user_test \
--postgres-password pass_test \
--postgres-host host_test \
--postgres-port port_test \
--postgres-database database_test \
--postgres-sslmode verify-ca \
--postgres-sslrootcert ca.pem \
--dry-run
""",
],
)
def test_run_consumer_with_config_ini_and_arguments(args):
runner = CliRunner()
args = args.split()
result = runner.invoke(cli, args)
assert result.exit_code == 0
|
py | 7df89a303eebb29110fa2226fc32f4f13aa3cb2f | from __future__ import annotations
from typing import Union
from .time import Time
class TimeRange:
def __init__(self, start: Time, end: Time):
"""TimeRange(Time, Time) -> TimeRange"""
if start >= end:
raise ValueError("Invalid TimeRange: start must be less than end")
self._start = start
self._end = end
@classmethod
def from_json(cls, j: dict) -> TimeRange:
"""TimeRange.from_json(dict) -> TimeRange"""
return TimeRange(
Time.from_json(j["start"]),
Time.from_json(j["end"])
)
def to_json(self) -> dict:
"""TimeRange.to_json() -> dict"""
return {
"start": self.start.to_json(),
"end": self.end.to_json()
}
def copy(self) -> TimeRange:
"""TimeRange.copy() -> TimeRange"""
return TimeRange(
self.start.copy(),
self.end.copy()
)
def __hash__(self) -> int:
"""hash(TimeRange) -> int"""
return hash((self.start, self.end))
def __repr__(self) -> str:
return f"TimeRange({self.start!r}, {self.end!r})"
def __str__(self) -> str:
return f"{self.start} - {self.end}"
def __len__(self) -> int:
"""len(TimeRange) -> int"""
return self.end - self.start
@property
def start(self) -> int:
"""Time.start -> int"""
return self._start
@property
def end(self) -> int:
"""Time.end -> int"""
return self._end
def __add__(self, other: int) -> int:
"""TimeRange + int -> TimeRange"""
if isinstance(other, int):
start = self.start + other
end = self.end + other
return TimeRange(start, end)
else:
return NotImplemented
def __sub__(self, other: int) -> int:
"""TimeRange - int -> TimeRange"""
if isinstance(other, int):
return self.__add__(-other)
else:
return NotImplemented
def __radd__(self, other: int) -> int:
"""int + TimeRange -> TimeRange"""
if isinstance(other, int):
return self.__add__(other)
else:
return NotImplemented
def __contains__(self, other: Union[Time, TimeRange]):
"""Time in TimeRange -> bool"""
"""TimeRange in TimeRange -> bool"""
if isinstance(other, Time):
return other >= self.start and other < self.end
elif isinstance(other, TimeRange):
return other.start >= self.start and other.end <= self.end
else:
return NotImplemented
def __lt__(self, other: Time):
"""Time < TimeRange -> bool"""
"""TimeRange < TimeRange -> bool"""
if isinstance(other, Time):
return self.end <= other
elif isinstance(other, TimeRange):
return (self.start, self.end) < (other.start, other.end)
else:
return NotImplemented
def __le__(self, other: Time):
"""Time <= TimeRange -> bool"""
"""TimeRange <= TimeRange -> bool"""
if isinstance(other, Time):
return self.start <= other
elif isinstance(other, TimeRange):
return (self.start, self.end) <= (other.start, other.end)
else:
return NotImplemented
def __eq__(self, other: Time):
"""TimeRange == TimeRange -> bool"""
if isinstance(other, TimeRange):
return (self.start, self.end) == (other.start, other.end)
else:
return NotImplemented
def __ne__(self, other: Time):
"""TimeRange != TimeRange -> bool"""
return not self.__eq__(other)
def __gt__(self, other: Time):
"""Time > TimeRange -> bool"""
"""TimeRange > TimeRange -> bool"""
if isinstance(other, Time):
return self.start > other
elif isinstance(other, TimeRange):
return (self.start, self.end) > (other.start, other.end)
else:
return NotImplemented
def __ge__(self, other: Time):
"""Time >= TimeRange -> bool"""
"""TimeRange >= TimeRange -> bool"""
if isinstance(other, Time):
return self.end > other
elif isinstance(other, TimeRange):
return (self.start, self.end) >= (other.start, other.end)
else:
return NotImplemented
|
py | 7df89a43bcd9fb9a73553e06735d7477c114645a | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) Merchise Autrement [~º/~] and Contributors
# All rights reserved.
#
# This is free software; you can do what the LICENCE file allows you to.
#
{
"name": "test_xoeuf_models",
"author": "Merchise Autrement [~º/~] and Contributors",
"description": "Test the module xoeuf.models",
"depends": ["base"],
"data": ["data/data.xml"],
"installable": True,
"auto_install": False,
}
|
py | 7df89bf420ea0239c1118316eaef0ed73ff1919b | # Generated by Django 2.2.18 on 2022-04-27 17:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dmd', '0003_auto_20191008_1141'),
]
operations = [
migrations.AlterField(
model_name='dtinfo',
name='prevprice',
field=models.IntegerField(help_text='Previous price (pence)', null=True),
),
migrations.AlterField(
model_name='dtinfo',
name='price',
field=models.IntegerField(help_text='Drug Tariff price (pence)', null=True),
),
migrations.AlterField(
model_name='priceinfo',
name='price',
field=models.IntegerField(help_text='Price (pence)', null=True),
),
migrations.AlterField(
model_name='priceinfo',
name='price_prev',
field=models.IntegerField(help_text='Price prior to change date (pence)', null=True),
),
]
|
py | 7df89c22fc44fe59f0f02ba1a4ac83212e3ff1e3 | from datetime import date, timedelta
from typing import List
import os
import settings
class Task:
def __init__(self, id: str, name: str, date_done: date, num_days: int,
**kwargs):
self.id: str = id
self.name: str = name
self.date_done: date = date_done
self.num_days = num_days
self.description: str = kwargs.get('description') or self.name
self.pending: bool = kwargs.get('pending') or False
self.current_task: int = kwargs.get('current_task') or 0
self.tasks: List[str] = kwargs.get('tasks') or []
def subject(self) -> str:
return self.current_task_name()
def current_task_name(self) -> str:
if self.tasks:
return self.tasks[self.current_task - 1]
else:
return self.name
def date_due(self) -> date:
return self.date_done + timedelta(days=self.num_days)
def due(self) -> bool:
return date.today() >= self.date_due()
def num_tasks(self) -> int:
return len(self.tasks)
def __repr__(self):
return "<Task %s>" % str(self.__dict__)
def completion_url(self) -> str:
return os.environ['API_GATEWAY_BASE_URL'] + '/' + self.id + '/complete'
def email_subject(self) -> str:
return "Reminder: " + self.subject()
|
py | 7df89c8b31e9960be58cc63ccaf028e945be92c2 | # -*- coding: utf-8 -*-
"""
MacAdam (1942) Ellipses (Observer PGN)
======================================
Defines *MacAdam (1942) Ellipses (Observer PGN)* ellipses data.
References
----------
- :cite:`Macadam1942` : Macadam, D. L. (1942). Visual Sensitivities to Color
Differences in Daylight. Journal of the Optical Society of America, 32(5),
28. doi:10.1364/JOSA.32.000247
- :cite:`Wyszecki2000` : Wyszecki, Günther, & Stiles, W. S. (2000). Table
2(5.4.1) MacAdam Ellipses (Observer PGN) Observed and Calculated on the
Basis of a Normal Distribution of Color Matches about a Color Center
(Silberstein and MacAdam, 1945). In Color Science: Concepts and Methods,
Quantitative Data and Formulae (p. 309). Wiley. ISBN:978-0-471-39918-6
"""
import numpy as np
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = ['DATA_MACADAM_1942_ELLIPSES']
DATA_MACADAM_1942_ELLIPSES = np.array([
[0.160, 0.057, 0.85, 0.35, 62.5, 0.94, 0.30, 62.3],
[0.187, 0.118, 2.20, 0.55, 77.0, 2.31, 0.44, 74.8],
[0.253, 0.125, 2.50, 0.50, 55.5, 2.49, 0.49, 54.8],
[0.150, 0.680, 9.60, 2.30, 105.0, 9.09, 2.21, 102.9],
[0.131, 0.521, 4.70, 2.00, 112.5, 4.67, 2.10, 110.5],
[0.212, 0.550, 5.80, 2.30, 100.0, 5.63, 2.30, 100.0],
[0.258, 0.450, 5.00, 2.00, 92.0, 4.54, 2.08, 88.5],
[0.152, 0.365, 3.80, 1.90, 110.0, 3.81, 1.86, 111.0],
[0.280, 0.385, 4.00, 1.50, 75.5, 4.26, 1.46, 74.6],
[0.380, 0.498, 4.40, 1.20, 70.0, 4.23, 1.32, 69.4],
[0.160, 0.200, 2.10, 0.95, 104.0, 2.08, 0.94, 95.4],
[0.228, 0.250, 3.10, 0.90, 72.0, 3.09, 0.82, 70.9],
[0.305, 0.323, 2.30, 0.90, 58.0, 2.55, 0.68, 57.2],
[0.385, 0.393, 3.80, 1.60, 65.5, 3.70, 1.48, 65.5],
[0.472, 0.399, 3.20, 1.40, 51.0, 3.21, 1.30, 54.0],
[0.527, 0.350, 2.60, 1.30, 20.0, 2.56, 1.27, 22.8],
[0.475, 0.300, 2.90, 1.10, 28.5, 2.89, 0.99, 29.1],
[0.510, 0.236, 2.40, 1.20, 29.5, 2.40, 1.15, 30.7],
[0.596, 0.283, 2.60, 1.30, 13.0, 2.49, 1.15, 11.1],
[0.344, 0.284, 2.30, 0.90, 60.0, 2.24, 0.97, 65.7],
[0.390, 0.237, 2.50, 1.00, 47.0, 2.43, 0.98, 44.2],
[0.441, 0.198, 2.80, 0.95, 34.5, 2.73, 0.90, 33.7],
[0.278, 0.223, 2.40, 0.55, 57.5, 2.34, 0.61, 60.3],
[0.300, 0.163, 2.90, 0.60, 54.0, 3.01, 0.60, 53.4],
[0.365, 0.153, 3.60, 0.95, 40.0, 4.12, 0.90, 38.6],
])
"""
*MacAdam (1942) Ellipses (Observer PGN)* ellipses data.
Table 2(5.4.1) data in *Wyszecki and Stiles (2000)* is as follows:
+--------------+---------------------------+---------------------------+
| Color Center | Observed | Calculated |
+--------------+---------------------------+---------------------------+
| x_0 | y_0 | 10**3 a | 10**3 b | theta | 10**3 a | 10**3 b | theta |
+-------+------+---------+---------+-------+---------+---------+-------+
where :math:`x_0` and :math:`y_0` are the coordinates of the ellipse center,
:math:`a` is the semi-major axis length, :math:`b` is the semi-minor axis
length and :math:`\\theta` is the angle from the semi-major axis :math:`a`.
The *Calculated* column should be preferred to the *Observed* one as the later
is the result from measurements observed on *MacAdam (1942)* diagrams while the
former is fitted on his observational data.
References
----------
:cite:`Wyszecki2000`, :cite:`Macadam1942`
DATA_MACADAM_1942_ELLIPSES : ndarray
"""
|
py | 7df89d58b245fffca79056922a52280596f57c68 | class GraphiteRecord(object):
def __init__(self, metric_string, default_nan_value=None, ignore_nan=False):
try:
meta, data = metric_string.split('|')
except ValueError:
peek = ((metric_string[:40] + '..')
if len(metric_string) > 40 else metric_string)
raise ValueError("Unable to parse graphite record: {}".format(peek))
self.target, start_time, end_time, step = meta.rsplit(',', 3)
self.start_time = int(start_time)
self.end_time = int(end_time)
self.step = int(step)
self.default_nan_value = default_nan_value
self.ignore_nan = ignore_nan
self.values = list(self._values(data.rsplit(',')))
self.empty = len(self.values) == 0
def _values(self, values):
for value in values:
try:
if self.ignore_nan and float(value) == self.default_nan_value:
continue
yield float(value)
except ValueError:
continue
@property
def average(self):
return self.sum / len(self.values)
@property
def last_value(self):
return self.values[-1]
@property
def sum(self):
return sum(self.values)
@property
def minimum(self):
return min(self.values)
@property
def maximum(self):
return max(self.values)
|
py | 7df89dc97da374b4f85cab0f46cc93e9b52a6015 | ## @file __init__.py
# @brief Tests for PbOSE - Protobuf Object Signing and Encryption.
#
# @copyright
# Copyright 2018 PbOSE <https://pbose.io>
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
"""Tests for PbOSE - Protobuf Object Signing and Encryption."""
|
py | 7df89ee18a39f037ea4242d00bda19e074f19188 | # coding: utf-8
"""
Pure Storage FlashBlade REST 1.2 Python SDK
Pure Storage FlashBlade REST 1.2 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.2
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Blade(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
#BEGIN_CUSTOM
# IR-51527: Prevent Pytest from attempting to collect this class based on name.
__test__ = False
#END_CUSTOM
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'details': 'str',
'raw_capacity': 'int',
'target': 'str',
'progress': 'float',
'status': 'str'
}
attribute_map = {
'name': 'name',
'details': 'details',
'raw_capacity': 'raw_capacity',
'target': 'target',
'progress': 'progress',
'status': 'status'
}
def __init__(self, name=None, details=None, raw_capacity=None, target=None, progress=None, status=None): # noqa: E501
"""Blade - a model defined in Swagger""" # noqa: E501
self._name = None
self._details = None
self._raw_capacity = None
self._target = None
self._progress = None
self._status = None
self.discriminator = None
if name is not None:
self.name = name
if details is not None:
self.details = details
if raw_capacity is not None:
self.raw_capacity = raw_capacity
if target is not None:
self.target = target
if progress is not None:
self.progress = progress
if status is not None:
self.status = status
@property
def name(self):
"""Gets the name of this Blade. # noqa: E501
blade name # noqa: E501
:return: The name of this Blade. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Blade.
blade name # noqa: E501
:param name: The name of this Blade. # noqa: E501
:type: str
"""
self._name = name
@property
def details(self):
"""Gets the details of this Blade. # noqa: E501
blade details # noqa: E501
:return: The details of this Blade. # noqa: E501
:rtype: str
"""
return self._details
@details.setter
def details(self, details):
"""Sets the details of this Blade.
blade details # noqa: E501
:param details: The details of this Blade. # noqa: E501
:type: str
"""
self._details = details
@property
def raw_capacity(self):
"""Gets the raw_capacity of this Blade. # noqa: E501
blade capacity in bytes # noqa: E501
:return: The raw_capacity of this Blade. # noqa: E501
:rtype: int
"""
return self._raw_capacity
@raw_capacity.setter
def raw_capacity(self, raw_capacity):
"""Sets the raw_capacity of this Blade.
blade capacity in bytes # noqa: E501
:param raw_capacity: The raw_capacity of this Blade. # noqa: E501
:type: int
"""
self._raw_capacity = raw_capacity
@property
def target(self):
"""Gets the target of this Blade. # noqa: E501
evacuation target # noqa: E501
:return: The target of this Blade. # noqa: E501
:rtype: str
"""
return self._target
@target.setter
def target(self, target):
"""Sets the target of this Blade.
evacuation target # noqa: E501
:param target: The target of this Blade. # noqa: E501
:type: str
"""
self._target = target
@property
def progress(self):
"""Gets the progress of this Blade. # noqa: E501
current operation progress # noqa: E501
:return: The progress of this Blade. # noqa: E501
:rtype: float
"""
return self._progress
@progress.setter
def progress(self, progress):
"""Sets the progress of this Blade.
current operation progress # noqa: E501
:param progress: The progress of this Blade. # noqa: E501
:type: float
"""
self._progress = progress
@property
def status(self):
"""Gets the status of this Blade. # noqa: E501
blade status # noqa: E501
:return: The status of this Blade. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this Blade.
blade status # noqa: E501
:param status: The status of this Blade. # noqa: E501
:type: str
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Blade, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Blade):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 7df89f6a3ede70f36665a92ae5920555ace5466c | # Copyright 2017 BBVA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import requests
from requests.cookies import RequestsCookieJar
try:
import ujson as json
except ImportError:
import json
from apitest.helpers.fuzzer import *
from apitest.core.helpers import make_url_signature
class Response(object):
"""
This class is a wrapper of requests response. This is necessary because py.test cache need a JSON serializable
data type.
This class only get useful requests response data, and make it serializable.
"""
def __init__(self, *, status_code: int = 200, headers: dict = None, cookies: dict = None,
reason: str = None, body: str = None):
body = body or ""
status_code = status_code or 200
headers = headers or {}
cookies = cookies or {}
if isinstance(cookies, RequestsCookieJar):
cookies = dict(cookies)
reason = reason or "OK"
assert isinstance(body, str)
assert isinstance(reason, str)
assert isinstance(headers, dict)
assert isinstance(cookies, dict)
assert isinstance(status_code, int)
assert isinstance(status_code, int)
self.body = body
self.reason = reason
self.headers = headers
self.cookies = cookies
self.status_code = status_code
self.__content_type_cache = None
self.__content_body_cache = None
@classmethod
def build_from_json(cls, **kwargs):
o = cls(status_code=kwargs.get("status_code"),
headers=kwargs.get("headers"),
body=kwargs.get("body"),
cookies=kwargs.get("cookies"),
reason=kwargs.get("reason"))
return o
@property
def dump_json(self):
return {key: value for key, value in vars(self).items() if not key.startswith("_")}
@property
def json_body(self):
"""
:return: return content type as JSON data type, if content Type of response is JSON
:rtype: dict
"""
if not self.__content_body_cache:
if self.content_type == "json":
self.__content_body_cache = json.loads(self.body)
else:
self.__content_body_cache = self.body
return self.__content_body_cache
@property
def content_type(self):
"""
:return: return a string with the content type. Available values are: "json", "raw"
:rtype: str
"""
if not self.__content_type_cache:
if any(True for value in self.headers.values() if "application/json" in value):
self.__content_type_cache = "json"
else:
self.__content_type_cache = "raw"
return self.__content_type_cache
@pytest.fixture(scope="module")
def request_good(request):
def _new_fn(url: str, *, method: str = "GET", headers: dict = None, body: str = None):
# Get the unique signature for the Query
url_signature = "%s_ok" % make_url_signature(url,
method=method,
headers=headers,
body=body)
response = request.config.cache.get(url_signature, None)
# If response is not cached
if not response:
# Get and store a GOOD requests
raw_response = requests.request(url=url, method=method, headers=headers, data=body)
response = Response(status_code=raw_response.status_code,
headers=dict(raw_response.headers),
cookies=raw_response.cookies,
reason=raw_response.reason,
body=raw_response.text)
request.config.cache.set(url_signature, response.dump_json)
else:
# Recover response from cached info
response = Response.build_from_json(**response)
return response
return _new_fn
@pytest.fixture(scope="module")
def request_bad(request):
def _new_fn(url: str, *, method: str = "GET", headers: dict = None, body: str = None, fuzz_selector: int = None):
url_signature = "%s_bad" % make_url_signature(url,
method=method,
headers=headers,
body=body)
# Get selectors
fuzz_opt = fuzz_selector or FUZZSelector.BODY | FUZZSelector.BODY
# Build fuzzer options
fuzzer = FUZZSelector(fuzz_opt)
response = request.config.cache.get(url_signature, None)
# If response is not cached
if not response:
# --------------------------------------------------------------------------
# Fuzz selected values
# --------------------------------------------------------------------------
fuzzed_url = build_fuzzed_url(url) if fuzzer.is_url else url
fuzzed_headers = build_fuzzed_http_header(headers) if fuzzer.is_header else headers
fuzzed_method = build_fuzzed_method() if fuzzer.is_method else method
if headers and "application/json" in headers.values():
# TODO: make for dump_json
fuzzed_body = build_fuzzed_x_form(body)
else:
fuzzed_body = build_fuzzed_x_form(body)
# Get and store a BAD requests
raw_response = requests.request(url=fuzzed_url, method=fuzzed_method, headers=fuzzed_headers, data=fuzzed_body)
response = Response(status_code=raw_response.status_code,
headers=dict(raw_response.headers),
cookies=raw_response.cookies,
reason=raw_response.reason,
body=raw_response.text)
request.config.cache.set(url_signature, response.dump_json)
else:
response = Response.build_from_json(**response)
return response
return _new_fn
@pytest.fixture(scope="module")
def make_request():
def _new_fn(url: str, *, method: str = "GET", headers: dict = None, body: str = None):
raw_response = requests.request(url=url, method=method, headers=headers, data=body)
return Response(status_code=raw_response.status_code,
headers=dict(raw_response.headers),
cookies=raw_response.cookies,
reason=raw_response.reason,
body=raw_response.text)
return _new_fn
|
py | 7df89fbcc64acb4db322673f49865d2d8d16ae3e | #
# This file is part of pySMT.
#
# Copyright 2014 Andrea Micheli and Marco Gario
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from pysmt.logics import QF_NRA
from pysmt.test.smtlib.parser_utils import execute_script_fname, SMTLIB_TEST_FILES, SMTLIB_DIR
def test_generator():
for (logic, f, expected_result) in SMTLIB_TEST_FILES:
smtfile = os.path.join(SMTLIB_DIR, f)
if logic == QF_NRA:
yield execute_script_fname, smtfile, logic, expected_result
|
py | 7df8a0a84afa02ac09510dc2963ab56f1a2a855b | #!/usr/bin/env python
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.ticker import MultipleLocator
from scipy.optimize import curve_fit
from scipy.signal import savgol_filter
from scipy.integrate import simps
from math import factorial
from PyAstronomy import pyasl
from astropy import constants as const
#Constants and values to be used among all classes defined in this document.
#Speed of light in km/s
c = const.c.to('km/s').value
#Window separation (in angstroms) -- used for location feature shoulders and
#to compute the noise in the spectra (via rms).
sep = 20.
#Keyword for the features to be fitted. As in table 1 of
#http://adsabs.harvard.edu/abs/2012MNRAS.425.1819S
keys = ['6', '7', 'C']
#Boundaries of line regions. See reference above.
MD = {}
MD['rest_f1'] = [3945.28]
MD['blue_lower_f1'], MD['blue_upper_f1'] =3400., 3800.
MD['red_lower_f1'], MD['red_upper_f1'] = 3800., 4100.
MD['rest_f2'] = [4129.73]
MD['blue_lower_f2'], MD['blue_upper_f2'] = 3850., 4000.
MD['red_lower_f2'], MD['red_upper_f2'] = 4000., 4150.
#rest flux is the upper red bound for uniform selection criteria.
MD['rest_f3'] = [4700.]
MD['blue_lower_f3'], MD['blue_upper_f3'] = 4000., 4150.
MD['red_lower_f3'], MD['red_upper_f3'] = 4350., 4700.
#rest flux is the upper red bound for uniform selection criteria.
MD['rest_f4'] = [5550.]
MD['blue_lower_f4'], MD['blue_upper_f4'] = 4350., 4700.
MD['red_lower_f4'], MD['red_upper_f4'] = 5050., 5550.
MD['rest_f5'] = [5624.32]
MD['blue_lower_f5'], MD['blue_upper_f5'] = 5100., 5300.
MD['red_lower_f5'], MD['red_upper_f5'] = 5450., 5700.
MD['rest_f6'] = [5971.85]
MD['blue_lower_f6'], MD['blue_upper_f6'] = 5400., 5750. #5700 originally
MD['red_lower_f6'], MD['red_upper_f6'] = 5750., 6060. #6000. originally
MD['rest_f7'] = [6355.21]
MD['blue_lower_f7'], MD['blue_upper_f7'] = 5750., 6060.
MD['red_lower_f7'], MD['red_upper_f7'] = 6150., 6600. #6200. originally
MD['rest_f8'] = [7773.37]
MD['blue_lower_f8'], MD['blue_upper_f8'] = 6800., 7450.
MD['red_lower_f8'], MD['red_upper_f8'] = 7600., 8000.
MD['rest_f9'] = [8498., 8542., 8662.]
MD['blue_lower_f9'], MD['blue_upper_f9'] = 7500., 8100.
MD['red_lower_f9'], MD['red_upper_f9'] = 8200., 8900.
#Below, the line boundaries are not really given the BSNIP paper IV;
#For the blue side, using the same limits as the red side of f7 and
#for the red side the regions was obtained by trial and error.
MD['rest_fC'] = [6580.]
MD['blue_lower_fC'], MD['blue_upper_fC'] = 6100., 6600.
MD['red_lower_fC'], MD['red_upper_fC'] = 6300., 6800.
class Analyse_Spectra(object):
"""Computes a set of spectral features.
Parameters
----------
wavelength : ~np.array
Array containing the wavelength values of the spectra.
flux : ~np.array
Array containing the flux values of the spectra. Same length of the
wavelength array.
redshift : ~float
Redshift of the host galaxy. Usually the observed spectra is corrected
by redshift and therefore syntethic spectra should use redshift=0.
extinction : ~float
Extinction to be corrected. Usually the observed spectra is not
corrected for extinction and the syntethic spectra is reddened using
a negative value for extinction.
D : ~dictionary
If a dictionary already containing properties of a given spectrum (such
as phase) already exists, then it may be passed as an argument and
the features computed here will be added as new entries to the passed
dictionary. Note that if it contains the entries 'wavelength_raw',
'flux_raw', 'redshift' or 'extinction', they will be over-written by
the inputs stated above.
smoothing_window : ~float
Window to be used by the Savitzky-Golay filter to smooth the spectra.
Adopting smoothing_window=21 seems suitable for TARDIS syntethic
spectra. For objects from the BSNIP database, a smoothing_window=51 is
recommended.
deredshift_and_normalize : ~boolean
Flag to whether or not de-redshift the spectrum.
Returns
-------
self.D : ~ dictionary
Dictionary containing quantities computed by this routine, such as:
'wavelength_corr' - de-redshifted wavelength.
'flux_normalized' - flux normalized by the mean.
'X_fY' - quantity Y of feature X, where Y is 'pEW', 'velocity' or
'depth' and X is given in keys, defined above. Uncertainties to
these quantities can be computed by calling the Compute_Uncertainty
class defined below.
"""
def __init__(self, wavelength, flux, redshift=0., extinction=0., D={},
smoothing_window=21, deredshift_and_normalize=True,
verbose=False):
self.wavelength = wavelength
self.flux = flux
self.redshift = redshift
self.extinction = extinction
self.D = D
self.smoothing_window = smoothing_window
self.deredshift_and_normalize = deredshift_and_normalize
self.verbose = verbose
#@profile
def perform_checks(self):
"""Check whether the type of the input variables is appropriated.
"""
def check_type(var, var_name, wanted_type):
if not isinstance(var, wanted_type):
raise TypeError(
'Input ' + var_name + ' must be a ' + wanted_type.__name__
+ ', not ' + type(var).__name__ + '.')
#Check whether variable types are as requeired.
check_type(self.wavelength, 'wavelength_raw', np.ndarray)
check_type(self.flux, 'flux_raw', np.ndarray)
check_type(self.redshift, 'host_redshift', float)
check_type(self.extinction, 'extinction', float)
#Once variables are checked to be ok, then store them in the dict.
self.D['wavelength_raw'] = self.wavelength
self.D['flux_raw'] = self.flux
self.D['host_redshift'] = self.redshift
self.D['extinction'] = self.extinction
#@profile
def deredshift_spectrum(self):
"""Correct the wavelength for redshift. Note that the data downloaded
from BSNIP is not in rest-wavelength."""
self.D['wavelength_corr'] = (self.D['wavelength_raw']
/ (1. + self.D['host_redshift']))
#@profile
def normalize_flux_and_correct_extinction(self):
""" Normalize the flux according to the mean in the wavelength from
4000 to 9000 angs. This ensures that the smooothing method works and
allows the output spectra to be plotted in the same scale.
"""
#@profile
def get_normalized_flux(w, f, e):
#Redden/Un-redden the spectra.
aux_flux = pyasl.unred(w, f, ebv=e, R_V=3.1)
#Wavelength window where the mean flux is computed.
window_condition = ((w >= 4000.) & (w <= 9000.))
flux_window = aux_flux[window_condition]
normalization_factor = np.mean(flux_window)
aux_flux /= normalization_factor
return aux_flux, normalization_factor
self.D['flux_normalized'], self.D['norm_factor'] = get_normalized_flux(
self.D['wavelength_corr'], self.D['flux_raw'], self.D['extinction'])
#@profile
def convolve_with_filters(self):
"""Use PyAStronmy TransmissionCurves to convolve the de-redshifted,
rest-frame spectrum with Johnson filters.
"""
tcs = pyasl.TransmissionCurves()
#@profile
def get_color(w, f, req_filter):
transmission = tcs.getTransCurve('Johnson ' + req_filter)(w)
conv_spec = tcs.convolveWith(w, f, 'Johnson ' + req_filter)
filter_L = simps(conv_spec, w) / simps(transmission, w)
return filter_L
for inp_filter in ['U', 'B', 'V']:
filter_L = get_color(self.D['wavelength_corr'],
self.D['flux_normalized'], inp_filter)
self.D['filter_Johnson-' + inp_filter] = filter_L
#@profile
def smooth_spectrum(self):
"""Smooth the spectrum using the savgol-golay filter.
"""
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
"""This was taken from
http://scipy-cookbook.readthedocs.io/items/SavitzkyGolay.html
The package that can be directly imported was conflicting with
?numpy?.
"""
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError, msg:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order+1)
half_window = (window_size -1) // 2
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m[::-1], y, mode='valid')
#Smooth flux
self.D['flux_smoothed'] = savitzky_golay(
self.D['flux_normalized'], self.smoothing_window, 3)
#Smooth the derivative of the smoothed flux.
def smooth_derivative(wavelength, f_smoothed):
dw = np.diff(wavelength)
df = np.diff(f_smoothed)
der = savitzky_golay(np.divide(df, dw), self.smoothing_window, 3)
return np.append(np.array([np.nan]), der)
self.D['derivative'] = smooth_derivative(self.D['wavelength_corr'],
self.D['flux_smoothed'])
#This comment chunck perfomers the exact same calculation, but
#is 20% slower. However it does not require reproducing the scipy code.
'''
self.D['flux_smoothed'] = savgol_filter(
self.D['flux_normalized'], self.smoothing_window, 3)
def smooth_derivative(wavelength, f_smoothed):
dw = np.diff(wavelength)
df = np.diff(f_smoothed)
der = np.append(np.array([np.nan]), savgol_filter(
np.divide(df, dw), self.smoothing_window, 3))
return der
self.D['derivative'] = smooth_derivative(self.D['wavelength_corr'],
self.D['flux_smoothed'])
'''
#@profile
def find_zeros_in_features(self):
""" Find where the deepest minimum in the feature region is. Then
selected the closest maxima to the red and blue as the boundaries of
the feature. If the deepest minimum has no maximum either to the red or
to the blue, then select the next deepest minimum. Once the 'true'
minimum is determined, if there are more than one maximum to the red
or blue, then check if the nearest maxima are not shoulders by checking
for the presence another minimum withing the sep window of the
nearest maximum. If the maximum is deemed as a shoulder and if
there is another bluer/redder minimum bounded by another maximum,
then determine this minimum as the true one.
"""
def get_zeros(wavelength, flux, derivative, key):
#Retrieve all maxima and minima that are within the feature range.
window_condition = ((wavelength >= MD['blue_lower_f'+key])
& (wavelength <= MD['red_upper_f'+key]))
w_window = wavelength[window_condition]
f_window = flux[window_condition]
der_window = derivative[window_condition]
#Find the points where the sign of the derivative changes.
#These are used as the conditions to determine maxima and
#minima candidates.
minima_cond = ((der_window[0:-3] < 0.) & (der_window[1:-2] < 0.)
& (der_window[2:-1] > 0.) & (der_window[3:] > 0.))
maxima_cond = ((der_window[0:-3] > 0.) & (der_window[1:-2] > 0.)
& (der_window[2:-1] < 0.) & (der_window[3:] < 0.))
#Condition array has len = len(w_window) - 3 as it uses consecutive
#elements. Below it could be used w_window[1:], differences in the
#computed quantities are not significant (usually < 1ang in pEW.)
w_minima_window = w_window[1:-2][minima_cond]
f_minima_window = f_window[1:-2][minima_cond]
w_maxima_window = w_window[1:-2][maxima_cond]
f_maxima_window = f_window[1:-2][maxima_cond]
def guess_minimum(potential_w, potential_f):
""" In low noise spectra, get minimum at wavelength where the
line would have been shifted due to a typical ejecta
velocity of ~ -11,000 km/s. Maybe need some improvement to also
consider the deepest minimum.
"""
if len(potential_w) <= 4:
rest_w = np.mean(MD['rest_f' + key])
typical_v = -11000.
typical_w = (rest_w * np.sqrt(1. + typical_v / c) /
np.sqrt(1. - typical_v / c))
w_diff = np.absolute(potential_w - typical_w)
w_guess = potential_w[w_diff.argmin()]
f_guess = potential_f[w_diff.argmin()]
#In noisy spectra, get the deepest minimum.
elif len(potential_w) > 4:
f_guess = min(potential_f)
w_guess = potential_w[potential_f.argmin()]
return w_guess, f_guess
copy_w_minima_window = np.copy(w_minima_window)
copy_f_minima_window = np.copy(f_minima_window)
for i in range(len(w_minima_window)):
if len(copy_w_minima_window) > 0:
#Assign a minimum.
w_min, f_min = guess_minimum(copy_w_minima_window,
copy_f_minima_window)
#Trimming minima and maxima in feature window:
#Select only minima/maxima in the left (right) side of the
#true minimum for the blue (red) window. These are bounded
#by the pre-fixed limits for the window and the position
#of the true minimum.
min_blue_condition = (w_minima_window < w_min)
min_red_condition = (w_minima_window > w_min)
max_blue_condition = (w_maxima_window < w_min)
max_red_condition = (w_maxima_window > w_min)
minima_window_blue_condition = (min_blue_condition
& (w_minima_window <= MD['blue_upper_f'+key])
& (w_minima_window >= MD['blue_lower_f'+key]))
maxima_window_blue_condition = (max_blue_condition
& (w_maxima_window <= MD['blue_upper_f'+key])
& (w_maxima_window >= MD['blue_lower_f'+key]))
minima_window_red_condition = (min_red_condition
& (w_minima_window <= MD['red_upper_f'+key])
& (w_minima_window >= MD['red_lower_f'+key]))
maxima_window_red_condition = (max_red_condition
& (w_maxima_window <= MD['red_upper_f'+key])
& (w_maxima_window >= MD['red_lower_f'+key]))
w_minima_window_blue = w_minima_window[
minima_window_blue_condition]
f_minima_window_blue = f_minima_window[
minima_window_blue_condition]
w_maxima_window_blue = w_maxima_window[
maxima_window_blue_condition]
f_maxima_window_blue = f_maxima_window[
maxima_window_blue_condition]
w_minima_window_red = w_minima_window[
minima_window_red_condition]
f_minima_window_red = f_minima_window[
minima_window_red_condition]
w_maxima_window_red = w_maxima_window[
maxima_window_red_condition]
f_maxima_window_red = f_maxima_window[
maxima_window_red_condition]
#Select the maxima to the right and to the left of the
#Minimum determined above.
try:
w_max_blue = w_maxima_window_blue[-1]
f_max_blue = f_maxima_window_blue[-1]
w_max_red = w_maxima_window_red[0]
f_max_red = f_maxima_window_red[0]
except:
w_max_blue, f_max_blue = np.nan, np.nan
w_max_red, f_max_red = np.nan, np.nan
#If there is no maximum to either the left or to the right,
#remove the minimum from the list of minima and
#try the next deepest minimum.
if not np.isnan(w_max_blue) and not np.isnan(w_max_red):
break
else:
copy_w_minima_window = np.asarray(
filter(lambda x : x != w_min, copy_w_minima_window))
copy_f_minima_window = np.asarray(
filter(lambda x : x != f_min, copy_f_minima_window))
if len(copy_w_minima_window) == 0:
w_min, f_min = np.nan, np.nan
w_max_blue, f_max_blue = np.nan, np.nan
w_max_red, f_max_red = np.nan, np.nan
#Once the true minimum is known, check whether the nearest maxima
#are just shoulders.
if not np.isnan(w_max_blue) and len(w_maxima_window_blue) > 1:
#Compute wavelength separation between minima to the maximum.
d_minima_window_blue = w_minima_window_blue - w_max_blue
#For each minimum, compute the largest relative fluxe
#in the window between current maximum and the minimum.
#This will assess whether the spectra is flat in this region.
r_minima_window_blue = []
for w_mwb in w_minima_window_blue:
try:
condition = ((wavelength <= w_max_blue)
& (wavelength >= w_mwb))
r_max = max([abs(f_step - f_max_blue) / f_max_blue for
f_step in flux[condition]])
r_minima_window_blue.append(r_max)
except:
r_minima_window_blue.append(np.nan)
#ASelect only the minima which are bluer than the maximum
#and within the separation window or within 1% of the maximum
#flux. This avoids tricky situations where there happens to be
#a shoulder from a neighbor feature at the same level.
d_minima_window_blue = np.asarray(
[d for (d, r) in zip(d_minima_window_blue, r_minima_window_blue)
if d < 0. and ((d > -1. * sep) or (r <= 0.01))])
#If there are shoulders, select the largest peak
#that is bluer than the shoulder as the new maximum.
if len(d_minima_window_blue) > 0:
condition = (w_maxima_window_blue <= w_max_blue)
w_maxima_window_blue = w_maxima_window_blue[condition]
f_maxima_window_blue = f_maxima_window_blue[condition]
if len(w_maxima_window_blue) >= 1:
f_max_blue = max(f_maxima_window_blue)
w_max_blue = w_maxima_window_blue[f_maxima_window_blue.argmax()]
if not np.isnan(w_max_red) and len(w_maxima_window_red) > 1:
#Compute wavelength separation between minima to the maximum.
d_minima_window_red = w_minima_window_red - w_max_red
#For each minimum, compute the largest relative fluxe
#in the window between current maximum and the minimum.
#This will assess whether the spectra is flat in this region.
r_minima_window_red = []
for w_mwr in w_minima_window_red:
try:
condition = ((wavelength >= w_max_red)
& (wavelength <= w_mwr))
r_max = max([abs(f_step - f_max_red) / f_max_red for
f_step in flux[condition]])
r_minima_window_red.append(r_max)
except:
r_minima_window_red.append(np.nan)
#Select only the minima which are bluer than the maximum
#and within the separation window or within 1% of the maximum
#flux. This avoids tricky situations where there ahppens to be
#a shoulder from a neighbor feature at the same level.
d_minima_window_red = np.asarray(
[d for (d, r) in zip(d_minima_window_red, r_minima_window_red)
if d > 0. and ((d < 1. * sep) or (r <= 0.01))])
#If there are shoulders, select the largest peak
#that is redr than the shoulder as the new maximum.
if len(d_minima_window_red) > 0:
condition = (w_maxima_window_red >= w_max_red)
w_maxima_window_red = w_maxima_window_red[condition]
f_maxima_window_red = f_maxima_window_red[condition]
if len(w_maxima_window_red) >= 1:
f_max_red = max(f_maxima_window_red)
w_max_red = w_maxima_window_red[f_maxima_window_red.argmax()]
return float(w_min), float(f_min), float(w_max_blue), \
float(f_max_blue), float(w_max_red), float(f_max_red)
for key in keys:
v1, v2, v3, v4, v5, v6 = get_zeros(
self.D['wavelength_corr'], self.D['flux_smoothed'],
self.D['derivative'], key)
self.D['wavelength_minima_f' + key] = v1
self.D['flux_minima_f' + key] = v2
self.D['wavelength_maxima_blue_f' + key] = v3
self.D['flux_maxima_blue_f' + key] = v4
self.D['wavelength_maxima_red_f' + key] = v5
self.D['flux_maxima_red_f' + key] = v6
#@profile
def grab_feature_regions(self):
""" Store the region of the features (boundaries determined at
find_zeros_in_features) in order to facilitate computing features.
"""
def isolate_region(wavelength, flux_normalized, flux_smoothed,
blue_boundary, red_boundary):
if not np.isnan(blue_boundary) and not np.isnan(red_boundary):
region_condition = ((wavelength >= blue_boundary)
& (wavelength <= red_boundary))
wavelength_region = wavelength[region_condition]
flux_normalized_region = flux_normalized[region_condition]
flux_smoothed_region = flux_smoothed[region_condition]
else:
wavelength_region = np.array([np.nan])
flux_normalized_region = np.array([np.nan])
flux_smoothed_region = np.array([np.nan])
return wavelength_region, flux_normalized_region, \
flux_smoothed_region
for key in keys:
c1, c2, c3 = isolate_region(
self.D['wavelength_corr'], self.D['flux_normalized'],
self.D['flux_smoothed'],
self.D['wavelength_maxima_blue_f' + key],
self.D['wavelength_maxima_red_f' + key])
self.D['wavelength_region_f' + key] = c1
self.D['flux_normalized_region_f' + key] = c2
self.D['flux_smoothed_region_f' + key] = c3
#@profile
def make_pseudo_continuum(self):
""" The pseudo continuum slope is simply a line connecting the
feature region boundaries. It depends only on the wavelength array and
boundary values. The latter coming from smoothed spectrum.
"""
def get_psedo_continuum_flux(w, x1, y1, x2, y2, f_smoothed):
if len(f_smoothed) > 1:
slope = (y2 - y1) / (x2 - x1)
intercept = y1 - slope * x1
def pseudo_cont(x):
return slope * x + intercept
pseudo_flux = pseudo_cont(w)
#Check whether the continuum is always higher than the
#**smoothed** flux and the array contains more than one element.
boolean_check = (f_smoothed - pseudo_flux > 0.15
* (max(f_smoothed) - min(f_smoothed)))
if True in boolean_check or len(boolean_check) < 1:
pseudo_flux = np.array([np.nan])
else:
pseudo_flux = np.array([np.nan])
return pseudo_flux
for key in keys:
self.D['pseudo_cont_flux_f' + key] = get_psedo_continuum_flux(
self.D['wavelength_region_f' + key],
self.D['wavelength_maxima_blue_f' + key],
self.D['flux_maxima_blue_f' + key],
self.D['wavelength_maxima_red_f' + key],
self.D['flux_maxima_red_f' + key],
self.D['flux_smoothed_region_f' + key])
#@profile
def compute_pEW(self):
""" Compute the pEW of features.
"""
def get_pEW(wavelength_region, flux_region, pseudo_flux):
if len(pseudo_flux) > 1:
pEW = sum(np.multiply(
np.diff(wavelength_region),
np.divide(pseudo_flux[0:-1] - flux_region[0:-1], pseudo_flux[0:-1])))
else:
pEW = np.nan
return pEW
for key in keys:
self.D['pEW_f' + key] = get_pEW(
self.D['wavelength_region_f' + key],
self.D['flux_normalized_region_f' + key],
self.D['pseudo_cont_flux_f' + key])
#@profile
def compute_smoothed_velocity_and_depth(self):
""" Compute the velocity of the features according to the rest
wavelength of the line forming the feature.
The velocity is computed by fitting a parabola to the minimum of the
feature.
"""
def make_parabola(x_ref):
def parabola(x, a, b, c):
return a * (x - x_ref)**2. + b * (x - x_ref) + c
return parabola
#@profile
def get_smoothed_velocity(wavelength_region, flux_region,
pseudo_flux, rest_wavelength):
if len(pseudo_flux) > 1:
flux_at_min = min(flux_region)
wavelength_at_min = wavelength_region[flux_region.argmin()]
pseudo_cont_at_min = pseudo_flux[flux_region.argmin()]
wavelength_par = wavelength_region[
(wavelength_region >= wavelength_at_min - sep)
& (wavelength_region <= wavelength_at_min + sep)]
flux_par = flux_region[
(wavelength_region >= wavelength_at_min - sep)
& (wavelength_region <= wavelength_at_min + sep)]
#Note that using polyfit is significant faster than curve_fit.
popt = np.polyfit(wavelength_par, flux_par, 2)
rest_wavelength = np.mean(rest_wavelength)
wavelength_par_min = - popt[1] / (2 * popt[0])
flux_par_min = np.polyval(popt, wavelength_par_min)
#Velocity is given in units of [1000 km/s].
velocity = (c / 1.e3
* ((wavelength_par_min / rest_wavelength)**2. - 1.)
/ ((wavelength_par_min / rest_wavelength)**2. + 1.))
depth = 1. - flux_par_min / pseudo_cont_at_min
if popt[0] < 0. or velocity > 0. or velocity < -30000.:
velocity = np.nan
else:
wavelength_par_min, flux_par_min = np.nan, np.nan
velocity, depth = np.nan, np.nan
return wavelength_par_min, flux_par_min, velocity, depth
for key in keys:
a1, a2, a3, a4 = get_smoothed_velocity(
self.D['wavelength_region_f' + key],
self.D['flux_normalized_region_f' + key],
self.D['pseudo_cont_flux_f' + key],
MD['rest_f' + key])
self.D['wavelength_at_min_f' + key] = a1
self.D['flux_at_min_f' + key] = a2
self.D['velocity_f' + key] = a3
self.D['depth_f' + key] = a4
#@profile
def run_analysis(self):
"""Main routine to call the functions of this class."""
#'if' condition is useful when producing mock spectra to compute the
#uncertainty -- it prevents repeating the calculation to normalize and
#de-redshift the spectrum.
self.perform_checks()
if self.deredshift_and_normalize:
self.deredshift_spectrum()
self.normalize_flux_and_correct_extinction()
self.convolve_with_filters()
else:
self.D['wavelength_corr'] = self.D['wavelength_raw']
self.D['flux_normalized'] = self.D['flux_raw']
self.smooth_spectrum()
self.find_zeros_in_features()
self.grab_feature_regions()
self.make_pseudo_continuum()
self.compute_pEW()
self.compute_smoothed_velocity_and_depth()
return self.D
class Compute_Uncertainty(object):
"""Uses a MC approach to compute the uncertainty of spectral features.
As a guideline, this follows Liu+ 2016
[[http://adsabs.harvard.edu/abs/2016ApJ...827...90L]].
Parameters
----------
D : ~dictionary
The input dictionary needs to contain keys computed by the
Analyse_Spectra class, such as 'wavelength_corr' and the computed
features.
smoothing_window : ~float
Window to be used by the Savitzky-Golay filter to smooth the spectra.
Adopting smoothing_window=21 seems suitable for TARDIS syntethic
spectra. For objects from the BSNIP database, a smoothing_window=51 is
recommended.
N_MC_runs : ~float
Number of mock spectra (with artificial noise) used for the MC run.
Returns
-------
self.D : ~dictionary
Dictionary containing the uncertainties of the features computed by the
Analyse_Spectra class. E.g. 'pEW_unc_f7'.
"""
def __init__(self, D, smoothing_window=21, N_MC_runs=3000):
self.D = D
self.smoothing_window = smoothing_window
self.N_MC_runs = N_MC_runs
#Relatively small correction needed due to the fact that the smoothed
#spectra 'follows' the noise, leading to a smaller than expected rms noise.
#17 below to be checked.
if smoothing_window == 21 or smoothing_window == 17:
self._corr = 1. / 0.93
elif smoothing_window == 51:
self._corr = 1. / 0.96
else:
raise ValueError('Smoothing correction not defined for this'
+ 'smoothing window.')
#@profile
def compute_flux_rms(self, wave, fnor, fsmo):
""" Estimate the flux noise in each pixel using a simple rms
in a bin defined by the sep parameter.
"""
def rms(y_data, y_smot):
#Given a noisy and a smoothed data, compute an array of the
#squared differences and take the square-root of its mean.
#Used as a proxy of the noise.
rms_out = np.sqrt(((y_data - y_smot)**2.).mean())
if rms_out < 1.e-10: rms_out = 1.e-5
return rms_out
#Compute the rms as a proxy of the noise of the flux point-wise.
#Although point-wise, the noise of a given point is determined by
#computing the rms including also the nearby points -- this prevents
#funky values from being generated. In the loop below, for each point
#'w' in the wavelength array, created a mini array containing the
#nearby normalized and smoothed fluxes, which are then used as inputs
#to the rms function.
rms = np.asarray([rms(
fnor[(wave >= w - sep) & (wave <= w + sep)],
fsmo[(wave >= w - sep) & (wave <= w + sep)])
* self._corr for w in wave])
return rms
#@profile
def compute_uncertainty(self, q_MC, q_orig):
"""The MC mock spectra produce an array of values for each quantity.
These values are used to estimate the uncertainty using np.std.
"""
#Check that at least one computed value in the the MC simulations is
#not nan. Else, flag it.
if not np.isnan(q_MC).all() and not np.isnan(q_orig):
flag = False
q_MC = q_MC[~np.isnan(q_MC)]
q_MC_remout = np.copy(q_MC)
i = 0
#Iteractively remove outliers that are > 5 sigma from the original
#computed value. Uncertainty is the standard deviation of the
#'trimmed' array of MC values.
while True:
len_init = len(q_MC_remout)
unc = abs(np.std(q_MC_remout))
outlier_filter = ((q_MC_remout > q_orig - 5. * unc)
& (q_MC_remout < q_orig + 5. * unc))
q_MC_remout = q_MC_remout[outlier_filter]
if (len(q_MC_remout) == len_init) or (i == 10):
break
else:
i += 1
q_median = np.median(q_MC_remout)
q_mean = np.mean(q_MC_remout)
#If the quantity value and the median of the values from the MC
#simulations are farther than the uncertainty, then flag it.
if abs(q_orig - q_median) > unc or i == 10:
flag = True
else:
unc, flag = np.nan, True
return unc, flag
#@profile
def run_uncertainties(self):
"""Main function to run the modules in this class to estimate the
uncertainties.
"""
#Estimate the noise by computing the rms in a wavelength window.
self.D['flux_rms'] = self.compute_flux_rms(self.D['wavelength_corr'],
self.D['flux_normalized'],
self.D['flux_smoothed'])
#Initialize dictionary to store compute quantities (such as pEW)
#for the mock runs
_store_D = {}
for key in keys:
_store_D['pEW_f' + key] = []
_store_D['velocity_f' + key] = []
_store_D['depth_f' + key] = []
#Compute quantities using mock spectra and store their values.
for i in range(self.N_MC_runs):
mock_flux = np.random.normal(self.D['flux_normalized'],
self.D['flux_rms'])
mock_D = {}
mock_D = Analyse_Spectra(
wavelength=self.D['wavelength_corr'], flux=mock_flux,
redshift=self.D['host_redshift'], extinction=self.D['extinction'],
D={}, smoothing_window=self.smoothing_window,
deredshift_and_normalize=False, verbose=False).run_analysis()
for key in keys:
_store_D['pEW_f' + key].append(mock_D['pEW_f' + key])
_store_D['velocity_f' + key].append(mock_D['velocity_f' + key])
_store_D['depth_f' + key].append(mock_D['depth_f' + key])
#Compute uncertainties.
for key in keys:
for var in ['pEW', 'velocity', 'depth']:
unc, flag = self.compute_uncertainty(
np.asarray(_store_D[var + '_f' + key]),
self.D[var + '_f' + key])
self.D[var + '_unc_f' + key] = unc
self.D[var + '_flag_f' + key] = flag
return self.D
class Plot_Spectra(object):
"""Creates a plot of the spectra where the computed feature regions are
highlighted..
Parameters
----------
D : ~dictionary
The input dictionary needs to contain keys computed by the
Analyse_Spectra class, such as 'wavelength_corr' and the computed
features.
outfile : ~str
String with the full path of the output figure. This should include
the desired format.
show_fig : ~boolean
If true, the created figure will be shown when the program is run.
save_fig : ~boolean
If true, the created figure will be saved as outfile.
Returns
-------
None
"""
def __init__(self, D, outfile, show_fig=False, save_fig=False):
self.D = D
self.outfile = outfile
self.show_fig = show_fig
self.save_fig = save_fig
self.fs_label = 26
self.fs_ticks = 26
self.fs_legend = 20
self.fs_text = 22
self.fs_as = 24
self.fs_feature = 14
mpl.rcParams['mathtext.fontset'] = 'stix'
mpl.rcParams['mathtext.fontset'] = 'stix'
mpl.rcParams['font.family'] = 'STIXGeneral'
self.make_plots()
def set_fig_frame(self, ax):
x_label = r'$\lambda \ \mathrm{[\AA}]}$'
y_label = r'$\mathrm{f}_{\lambda}/ \langle \mathrm{f}_{\lambda} \rangle$'
ax.set_xlabel(x_label, fontsize=self.fs_label)
ax.set_ylabel(y_label, fontsize=self.fs_label)
ax.set_xlim(1500.,10000.)
ax.set_ylim(0.,5.)
ax.tick_params(axis='y', which='major', labelsize=self.fs_ticks, pad=8)
ax.tick_params(axis='x', which='major', labelsize=self.fs_ticks, pad=8)
ax.minorticks_on()
ax.tick_params('both', length=8, width=1, which='major')
ax.tick_params('both', length=4, width=1, which='minor')
ax.xaxis.set_minor_locator(MultipleLocator(500.))
ax.xaxis.set_major_locator(MultipleLocator(1000.))
ax.yaxis.set_minor_locator(MultipleLocator(0.1))
ax.yaxis.set_major_locator(MultipleLocator(0.5))
def add_feature_shade(self, ax, w, f, f_c, color, alpha):
try:
ax.plot(w, f_c, ls='--', c=color, alpha=alpha)
ax.fill_between(w, f, f_c, color=color, alpha=alpha)
except:
pass
def add_boundaries(self, ax, w_max_blue, f_max_blue, w_max_red,
f_max_red, w_min, f_min, color):
ax.plot(w_max_blue, f_max_blue, color=color, marker='+', markersize=12.)
ax.plot(w_max_red, f_max_red, color=color, marker='+', markersize=12.)
ax.plot(w_min, f_min, color=color, marker='x', markersize=12.)
def save_figure(self, dpi=360):
if self.save_fig:
extension = self.outfile.split('.')[-1]
plt.savefig(self.outfile, format=extension, dpi=dpi)
def show_figure(self):
if self.show_fig:
plt.show()
def make_plots(self):
colors = ['b', 'r', 'g']
alpha = 0.5
fig = plt.figure(figsize=(16, 12))
ax = fig.add_subplot(111)
self.set_fig_frame(ax)
ax.plot(self.D['wavelength_corr'], self.D['flux_normalized'],
color='k', alpha=alpha, lw=1.)
ax.plot(self.D['wavelength_corr'], self.D['flux_smoothed'],
color='k', alpha=1., lw=2.)
for i, key in enumerate(keys):
self.add_feature_shade(ax, self.D['wavelength_region_f' + key],
self.D['flux_normalized_region_f' + key],
self.D['pseudo_cont_flux_f' + key],
color=colors[i], alpha=alpha)
self.add_feature_shade(ax, self.D['wavelength_region_f' + key],
self.D['flux_normalized_region_f' + key],
self.D['pseudo_cont_flux_f' + key],
color=colors[i], alpha=alpha)
self.add_boundaries(ax, self.D['wavelength_maxima_blue_f' + key],
self.D['flux_maxima_blue_f' + key],
self.D['wavelength_maxima_red_f' + key],
self.D['flux_maxima_red_f' + key],
self.D['wavelength_minima_f' + key],
self.D['flux_minima_f' + key], color=colors[i])
ax.grid(True)
plt.tight_layout()
self.save_figure()
self.show_figure()
plt.close(fig)
|
py | 7df8a1ae632602e4c61bc6b7584371217f369346 | from deepdab.ai import *
from deepdab.util.helper_functions import *
from deepdab.util.file_helper import *
from deepdab.util.reward_util import *
from deepdab.util.rate_util import *
from deepdab.util.evaluator import *
board_size = (2, 2)
num_episodes = 1500000
learning_rate_schedule = {0: 0.005, 1000000: 0.0005}
epsilon = 0.1
batch_size = 32
decay_speed = 1.0
use_symmetries = True
base_path = get_base_path_arg()
print("initializing for (%s, %s) game..." % (board_size[0], board_size[1]))
policy = PGPolicyCNN2(board_size, batch_size=batch_size, dropout_keep_prob=0.5)
# MCTS = MCTSRootParallelPolicy(board_size, num_playouts=250, num_workers=4, default_policy=Level2HeuristicPolicy(board_size))
L2 = Level2HeuristicPolicy(board_size)
L1 = Level1HeuristicPolicy(board_size)
L0 = RandomPolicy()
reward_fn = DelayedBinaryReward()
# opponent_schedule = {0: L0, 200000: L1, 400000: L2, 600000: policy}
opponent_schedule = {0: L0, 200000: L1, 400000: L2}
# opponent_schedule = {0: L0, 200000: L1, 400000: L2, 600000: MCTS}
print_info(board_size=board_size, num_episodes=num_episodes, policy=policy, mode='self-play or watch opponents',
reward=reward_fn, updates='offline', learning_rate_schedule=learning_rate_schedule, epsilon=epsilon,
architecture=policy.get_architecture(), batch_size=batch_size)
unique_states_visited = set()
all_transitions = []
for episode_num in range(1, num_episodes + 1):
lr = gen_rate_step(episode_num, learning_rate_schedule)
eps = epsilon
opponent = gen_rate_step(episode_num, opponent_schedule)
policy.set_boltzmann_action(False)
policy.set_epsilon(eps)
policy.set_learning_rate(lr)
policy_actions = []
opponent_actions = []
policy_states = []
opponent_states = []
players = ['policy', 'opponent'] if episode_num % 2 == 0 else ['opponent', 'policy']
game = Game(board_size, players)
current_player = game.get_current_player()
while not game.is_finished():
board_state = game.get_board_state()
if current_player == 'policy':
policy_states.append(board_state)
edge = policy.select_edge(board_state)
policy_actions.append(to_one_hot_action(board_state, edge))
current_player, _ = game.select_edge(edge, current_player)
unique_states_visited.add(as_string(game.get_board_state()))
else:
opponent_states.append(board_state)
if isinstance(opponent, MCTSRootParallelPolicy):
edge = opponent.select_edge(board_state, game.get_score(current_player))
else:
edge = opponent.select_edge(board_state)
opponent_actions.append(to_one_hot_action(board_state, edge))
current_player, _ = game.select_edge(edge, current_player)
unique_states_visited.add(as_string(game.get_board_state()))
policy_reward = reward_fn.compute_reward(game, 'policy', 'opponent')
opponent_reward = reward_fn.compute_reward(game, 'opponent', 'policy')
# don't add transitions that have 0 reward as the gradient will be zero anyways
if policy_reward == 1:
policy_outcomes = len(policy_actions)*[policy_reward]
append_transitions(policy_states, policy_actions, policy_outcomes, all_transitions, use_symmetries, board_size)
elif opponent_reward == 1:
opponent_outcomes = len(opponent_actions)*[opponent_reward]
append_transitions(opponent_states, opponent_actions, opponent_outcomes, all_transitions, use_symmetries, board_size)
if episode_num % 100 == 0:
policy.update_model(all_transitions)
all_transitions = []
# analyze results
if episode_num % 1000 == 0:
# play against opponents
policy.set_boltzmann_action(False)
policy.set_epsilon(0.0)
opponents = [RandomPolicy(), Level1HeuristicPolicy(board_size), Level2HeuristicPolicy(board_size)]
results = evaluate(policy, board_size, 1000, opponents)
print("%s, %s, %s, %s, %s, %s, %s, %s" % (episode_num, results[RandomPolicy.__name__]['won'],
results[Level1HeuristicPolicy.__name__]['won'],
results[Level2HeuristicPolicy.__name__]['won'],
results, len(unique_states_visited), eps, lr))
WeightWriter.print_episode(base_path, episode_num, policy.print_params)
|
py | 7df8a3ccd86b63226269c021b28a473dedd6a34e | from typing import Tuple, FrozenSet
from collections import Iterable
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or
from mathsat import msat_make_leq, msat_make_equal
from mathsat import msat_make_number, msat_make_plus
from pysmt.environment import Environment as PysmtEnv
import pysmt.typing as types
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next, symb_to_next
from hint import Hint, Location
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def check_ltl(menv: msat_env, enc: LTLEncoder) -> Tuple[Iterable, msat_term,
msat_term, msat_term]:
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
bool_type = msat_get_bool_type(menv)
real_type = msat_get_rational_type(menv)
i = msat_declare_function(menv, "i", real_type)
i = msat_make_constant(menv, i)
r = msat_declare_function(menv, "r", real_type)
r = msat_make_constant(menv, r)
l = msat_declare_function(menv, "l", real_type)
l = msat_make_constant(menv, l)
inc_i = msat_declare_function(menv, "inc_i", bool_type)
inc_i = msat_make_constant(menv, inc_i)
x_i = msat_declare_function(menv, name_next("i"), real_type)
x_i = msat_make_constant(menv, x_i)
x_r = msat_declare_function(menv, name_next("r"), real_type)
x_r = msat_make_constant(menv, x_r)
x_l = msat_declare_function(menv, name_next("l"), real_type)
x_l = msat_make_constant(menv, x_l)
x_inc_i = msat_declare_function(menv, name_next("inc_i"), bool_type)
x_inc_i = msat_make_constant(menv, x_inc_i)
curr2next = {i: x_i, r: x_r, l: x_l, inc_i: x_inc_i}
zero = msat_make_number(menv, "0")
one = msat_make_number(menv, "1")
r_gt_0 = msat_make_gt(menv, r, zero)
r_lt_l = msat_make_lt(menv, r, l)
i_geq_0 = msat_make_geq(menv, i, zero)
init = msat_make_and(menv, r_gt_0, r_lt_l)
init = msat_make_and(menv, init,
msat_make_and(menv, i_geq_0,
msat_make_not(menv, inc_i)))
init = msat_make_and(menv, init, msat_make_gt(menv, l, zero))
# r' = r
trans = msat_make_equal(menv, x_r, r)
# i < l -> ((inc_i' & i' = i + 1) | (!inc_i' & i' = i)) & l' = l
i_lt_l = msat_make_lt(menv, i, l)
x_i_eq_i_p_1 = msat_make_and(menv, x_inc_i,
msat_make_equal(menv, x_i,
msat_make_plus(menv, i, one)))
x_i_eq_i = msat_make_and(menv, msat_make_not(menv, x_inc_i),
msat_make_equal(menv, x_i, i))
x_i_eq_i_p_1_or_i = msat_make_or(menv, x_i_eq_i_p_1, x_i_eq_i)
x_l_eq_l = msat_make_equal(menv, x_l, l)
x_i_eq_i_p_1_or_i_and_x_l_eq_l = msat_make_and(menv, x_i_eq_i_p_1_or_i,
x_l_eq_l)
trans = msat_make_and(menv, trans,
msat_make_impl(menv, i_lt_l,
x_i_eq_i_p_1_or_i_and_x_l_eq_l))
# i >= l -> i' = 0 & l' = l + 1 & !inc_i'
i_geq_l = msat_make_geq(menv, i, l)
x_i_eq_0 = msat_make_equal(menv, x_i, zero)
x_l_eq_l_p_1 = msat_make_equal(menv, x_l, msat_make_plus(menv, l, one))
x_i_eq_0_and_x_l_eq_l_p_1 = msat_make_and(menv,
msat_make_and(menv, x_i_eq_0,
x_l_eq_l_p_1),
msat_make_not(menv, x_inc_i))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, i_geq_l,
x_i_eq_0_and_x_l_eq_l_p_1))
# (G F inc_i) -> ! G F r > i
G_F_x_i_gt_i = enc.make_G(enc.make_F(inc_i))
r_gt_i = msat_make_gt(menv, r, i)
n_G_F_r_gt_i = msat_make_not(menv, enc.make_G(enc.make_F(r_gt_i)))
ltl = msat_make_impl(menv, G_F_x_i_gt_i, n_G_F_r_gt_i)
return TermMap(curr2next), init, trans, ltl
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
i = mgr.Symbol("i", types.REAL)
r = mgr.Symbol("r", types.REAL)
l = mgr.Symbol("l", types.REAL)
inc_i = mgr.Symbol("inc_i", types.BOOL)
symbs = frozenset([i, r, l, inc_i])
x_i = symb_to_next(mgr, i)
x_r = symb_to_next(mgr, r)
x_l = symb_to_next(mgr, l)
x_inc_i = symb_to_next(mgr, inc_i)
res = []
n0 = mgr.Real(0)
n1 = mgr.Real(1)
loc = Location(env, mgr.GE(l, n0))
loc.set_progress(0, mgr.Equals(x_l, mgr.Plus(l, n1)))
h_l = Hint("h_l0", env, frozenset([l]), symbs)
h_l.set_locs([loc])
res.append(h_l)
loc = Location(env, inc_i)
loc.set_progress(0, x_inc_i)
h_inc = Hint("h_inc0", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc])
res.append(h_inc)
loc = Location(env, mgr.Not(inc_i))
loc.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc1", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc])
res.append(h_inc)
loc0 = Location(env, mgr.GE(i, n0))
loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1)))
loc1 = Location(env, mgr.GE(i, n0))
loc1.set_progress(0, mgr.Equals(x_i, i))
h_i = Hint("h_i2", env, frozenset([i]), symbs)
h_i.set_locs([loc0, loc1])
res.append(h_i)
loc0 = Location(env, mgr.GE(r, n0))
loc0.set_progress(1, mgr.Equals(x_r, r))
loc1 = Location(env, mgr.GE(r, n0))
loc1.set_progress(0, mgr.Equals(x_r, mgr.Plus(r, n1)))
h_r = Hint("h_r2", env, frozenset([r]), symbs)
h_r.set_locs([loc0, loc1])
res.append(h_r)
loc0 = Location(env, mgr.GE(l, n0))
loc0.set_progress(1, mgr.Equals(x_l, mgr.Plus(l, n1)))
loc1 = Location(env, mgr.GE(l, n0))
loc1.set_progress(0, mgr.Equals(x_l, l))
h_l = Hint("h_l2", env, frozenset([l]), symbs)
h_l.set_locs([loc0, loc1])
res.append(h_l)
loc0 = Location(env, mgr.Not(inc_i))
loc0.set_progress(1, x_inc_i)
loc1 = Location(env, inc_i)
loc1.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc2", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc0, loc1])
res.append(h_inc)
loc0 = Location(env, mgr.GE(i, n0), mgr.GE(l, n0),
stutterT=mgr.Equals(x_i, mgr.Plus(i, l)))
loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1)))
loc1 = Location(env, mgr.GE(i, n0))
loc1.set_progress(0, mgr.Equals(x_i, i))
h_i = Hint("h_i3", env, frozenset([i]), symbs)
h_i.set_locs([loc0, loc1])
res.append(h_i)
loc0 = Location(env, mgr.Not(inc_i))
loc0.set_progress(1, x_inc_i)
loc1 = Location(env, inc_i, stutterT=x_inc_i)
loc1.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc3", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc0, loc1])
res.append(h_inc)
loc0 = Location(env, mgr.GE(r, n0))
loc0.set_progress(1, mgr.Equals(x_r, r))
loc1 = Location(env, mgr.GE(r, n0))
loc1.set_progress(2, mgr.Equals(x_r, mgr.Plus(r, n1)))
loc2 = Location(env, mgr.GE(r, n0))
loc2.set_progress(0, mgr.Equals(x_r, r))
h_r = Hint("h_r4", env, frozenset([r]), symbs)
h_r.set_locs([loc0, loc1, loc2])
res.append(h_r)
loc0 = Location(env, mgr.GE(l, n0))
loc0.set_progress(1, mgr.Equals(x_l, mgr.Plus(l, n1)))
loc1 = Location(env, mgr.GE(l, n0))
loc1.set_progress(2, mgr.Equals(x_l, l))
loc2 = Location(env, mgr.GE(l, n0))
loc2.set_progress(0, mgr.Equals(x_l, l))
h_l = Hint("h_l4", env, frozenset([l]), symbs)
h_l.set_locs([loc0, loc1, loc2])
res.append(h_l)
return frozenset(res)
|
py | 7df8a4ef20d0308fd564f095cd0761f30289c44d | from binary_tree import Tree
class Solution:
def __init__(self, tree_data) -> None:
super().__init__()
self.tree = Tree(tree_data)
self.max_path = float("-inf")
def get_max_path(self):
def max_sub_path(node):
if node is None:
return 0
else:
left_max = max(max_sub_path(node.left) + node.val, 0)
right_max = max(max_sub_path(node.right) + node.val, 0)
self.max_path = max(self.max_path, left_max + node.val + right_max)
# Should've used the line below, but because we set the minimum to 0 as above, we can simplify to the line above
# self.max_path = max(self.max_path, left_max + node.val + right_max, left_max, right_max)
return max(left_max, right_max)
max_sub_path(self.tree.root)
return self.max_path
tree_data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
tree = Tree(tree_data)
print(tree)
s = Solution(tree_data)
print(s.get_max_path())
|
py | 7df8a64690d99d9be947c66979911e4c7206e99a | """
Helper module for Odometry
"""
import create2
import math
class Odometry:
"""This class keeps the current state (x,y,theta) up-to-date based
on wheel encoder readings.
Call the update function as frequent as possible with the current
encoder readings.
"""
def __init__(self):
"""Constructor.
"""
self.x = 0
self.y = 0
self.theta = 0
self.last_left_encoder_counts = None
self.last_right_encoder_counts = None
self.d_l = create2.Specs.WheelDiameterInMM / 1000
self.d_r = create2.Specs.WheelDiameterInMM / 1000
self.n_l = create2.Specs.CountsPerRev
self.n_r = create2.Specs.CountsPerRev
self.w = create2.Specs.WheelDistanceInMM / 1000
def update(self, left_encoder_counts, right_encoder_counts):
"""Update function to keep the state up-to-date
Args:
left_encoder_counts (int)
right_encoder_counts (int)
"""
if self.last_right_encoder_counts is not None:
n_l_actual = left_encoder_counts - self.last_left_encoder_counts
n_r_actual = right_encoder_counts - self.last_right_encoder_counts
# account for overflow
if n_l_actual > 32768:
n_l_actual -= 65535
if n_r_actual > 32768:
n_r_actual -= 65535
if n_l_actual < -32768:
n_l_actual += 65535
if n_r_actual < -32768:
n_r_actual += 65535
c_l = math.pi * self.d_l / self.n_l
c_r = math.pi * self.d_r / self.n_r
delta_l = c_l * n_l_actual
delta_r = c_r * n_r_actual
delta_d = (delta_r + delta_l) / 2
delta_theta = (delta_r - delta_l) / self.w
self.x += delta_d * math.cos(self.theta)
self.y += delta_d * math.sin(self.theta)
self.theta = math.fmod(self.theta + delta_theta, 2 * math.pi)
self.last_left_encoder_counts = left_encoder_counts
self.last_right_encoder_counts = right_encoder_counts
|
py | 7df8a70027a3b56ebfa0138536d650d3b39dd699 | #####################################################################################
# Copyright (c) 2021 Joseph Reeves, Marijn Heule, Randal E. Bryant, Carnegie Mellon University
# Last edit: Nov. 2, 2021
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
########################################################################################
import sys
import getopt
def trim(s):
while len(s) > 0 and s[-1] in '\r\n':
s = s[:-1]
return s
def deletes(fname):
file = open(fname, 'r')
ds = []
for line in file:
line = trim(line)
if len(line) == 0: continue
lits = line.split()
print(line) # original proof
pivot = lits[0]
pidx = 1
found = False
if len(lits) > 1:
for l in lits[1:]:
if l == pivot:
found = True
break
pidx += 1
if pidx > 1 and found:
ds.append(line)
for d in ds:
st = "d "
print(st+d)
def run(name, args):
fname = None
optlist, args = getopt.getopt(args, "f:")
for (opt, val) in optlist:
if opt == '-f':
fname = val
deletes(val)
if __name__ == "__main__":
run(sys.argv[0], sys.argv[1:])
|
py | 7df8a7a841ddb9d59423049a2b93919512cc670f | # --------------------------------------------------------
# SiamMask
# Licensed under The MIT License
# Written by Qiang Wang (wangqiang2015 at ia.ac.cn)
# --------------------------------------------------------
import numpy as np
class Meter(object):
def __init__(self, name, val, avg):
self.name = name
self.val = val
self.avg = avg
def __repr__(self):
return "{name}: {val:.6f} ({avg:.6f})".format(
name=self.name, val=self.val, avg=self.avg
)
def __format__(self, *tuples, **kwargs):
return self.__repr__()
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = {}
self.sum = {}
self.count = {}
def update(self, batch=1, **kwargs):
val = {}
for k in kwargs:
val[k] = kwargs[k] / float(batch)
self.val.update(val)
for k in kwargs:
if k not in self.sum:
self.sum[k] = 0
self.count[k] = 0
self.sum[k] += kwargs[k]
self.count[k] += batch
def __repr__(self):
s = ''
for k in self.sum:
s += self.format_str(k)
return s
def format_str(self, attr):
return "{name}: {val:.6f} ({avg:.6f}) ".format(
name=attr,
val=float(self.val[attr]),
avg=float(self.sum[attr]) / self.count[attr])
def __getattr__(self, attr):
if attr in self.__dict__:
return super(AverageMeter, self).__getattr__(attr)
if attr not in self.sum:
# logger.warn("invalid key '{}'".format(attr))
print("invalid key '{}'".format(attr))
return Meter(attr, 0, 0)
return Meter(attr, self.val[attr], self.avg(attr))
def avg(self, attr):
return float(self.sum[attr]) / self.count[attr]
class IouMeter(object):
def __init__(self, thrs, sz):
self.sz = sz
self.iou = np.zeros((sz, len(thrs)), dtype=np.float32)
self.thrs = thrs
self.reset()
def reset(self):
self.iou.fill(0.)
self.n = 0
def add(self, output, target):
if self.n >= len(self.iou):
return
target, output = target.squeeze(), output.squeeze()
for i, thr in enumerate(self.thrs):
pred = output > thr
mask_sum = (pred == 1).astype(np.uint8) + (target > 0).astype(np.uint8)
intxn = np.sum(mask_sum == 2)
union = np.sum(mask_sum > 0)
if union > 0:
self.iou[self.n, i] = intxn / union
elif union == 0 and intxn == 0:
self.iou[self.n, i] = 1
self.n += 1
def value(self, s):
nb = max(int(np.sum(self.iou > 0)), 1)
iou = self.iou[:nb]
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
if s == 'mean':
res = np.mean(iou, axis=0)
elif s == 'median':
res = np.median(iou, axis=0)
elif is_number(s):
res = np.sum(iou > float(s), axis=0) / float(nb)
return res
if __name__ == '__main__':
avg = AverageMeter()
avg.update(time=1.1, accuracy=.99)
avg.update(time=1.0, accuracy=.90)
print(avg)
print(avg.time)
print(avg.time.avg)
print(avg.time.val)
print(avg.SS)
|
py | 7df8a7ce3f3755b6935fdaca7421ed8be0bafbc9 |
import MySQLdb
import io
import os
import cloudstorage as gcs
import csv
import timeit
import json
from bottle import Bottle
from google.appengine.api import app_identity
from StringIO import StringIO
from bottle import route, request, response, template, get, HTTPResponse
bottle = Bottle()
#location of file into default bucket on google cloud storage
bucket_name = os.environ.get('BUCKET_NAME', app_identity.get_default_gcs_bucket_name())
bucket = '/' + bucket_name
filename = bucket + '/earthquake.csv'
#declare cursor globally
connobj = MySQLdb.connect(unix_socket='/cloudsql/cloudcomp2-979:simple' ,user='root')
c = connobj.cursor()
#Get filename from user
@bottle.route('/uploadform')
def uploadform():
return template('upload_form')
#Upload file into bucket on google cloud storage
@bottle.route('/uploadfile', method='POST')
def uploadfile():
#Calculate start time
start = timeit.default_timer()
filecontent = request.files.get('filecontent')
rawfilecontent = filecontent.file.read()
write_retry_params = gcs.RetryParams(backoff_factor=1.1)
gcs_file = gcs.open(filename,'w',content_type='text/plain',retry_params=write_retry_params)
gcs_file.write(rawfilecontent)
gcs_file.close()
#Calculate end time
stop = timeit.default_timer()
#Calculate total time
time_taken = stop - start
return template('upload_file',time_taken=time_taken)
#Read data from bucket and Insert data into google MySQLdb
def parse(filename, delimiter,c):
with gcs.open(filename, 'r') as gcs_file:
csv_reader = csv.reader(StringIO(gcs_file.read()), delimiter=',',
quotechar='"')
# Skip the header line
csv_reader.next()
try:
start = timeit.default_timer()
for row in csv_reader:
time = timestamp(row[0])
updated = timestamp(row[12])
for i in range (0,14):
if row[i] == '':
row[i] = "''"
place = str(row[13])
place = place.replace("'","")
insert = "INSERT INTO earthquake (time, latitude, longitude, depth, mag, magType, nst, gap, dmin, rms, net, id, updated,\
place, type) values('"+time+"',"+row[1]+","+row[2]+","+row[3]+","+row[4]+",'"+row[5]+"',"+row[6]+","+row[7]+",\
"+row[8]+","+row[9]+",'"+row[10]+"','"+row[11]+"','"+updated+"','"+place+"','"+row[14]+"')"
c.execute(insert)
stop = timeit.default_timer()
insert_time = stop - start
return insert_time
except Exception as e:
print ("Data can't be inserted" + str(e))
#coverting time format
def timestamp(string):
ans = string[:10] + ' ' + string[11:19]
return ans
#query to get result for different magnitude for each week
def query(mag,c):
query = 'SELECT week(time) as week, count(*) as count, mag as mag FROM earthquake WHERE mag = '+str(mag)+' GROUP BY week(time), mag'
c.execute(query)
ans_query = c.fetchall()
return ans_query
#query for magnitude greater than 5
def bigquery(mag,c):
query = 'SELECT week(time) as week, count(*) as count, mag as mag FROM earthquake WHERE mag > '+str(mag)+' GROUP BY week(time), mag'
c.execute(query)
ans_query = c.fetchall()
return ans_query
#function to format generated result
def ans_format(mag):
table = "<table border='2'><tr><th>Week</th><th>Number of quakes</th><th>Magnitude</th></tr>"
ans = ""
for x in mag:
ans = ans +"<tr><td>" + str(x[0]) + "</td><td>" + str(x[1]) + "</td><td>" + str(x[2]) +"</td></tr>"
table += ans + "</table>"
return table
#Displays the webinterface for user to enter magnitude and location
@bottle.route('/webinterface')
def webinterface():
return template('webinterface')
@bottle.route('/dynamic_query', method = "POST")
def dynamic_query():
if request.headers.get('X-Requested-With') == 'XMLHttpRequest':
dict_data = request.forms.dict
print dict_data
query_final = create_query(dict_data)
connectdb = 'USE db'
c.execute(connectdb)
query_ans = c.execute(query_final)
query_result = c.fetchall()
print query_result
query_output = query_format(query_result)
print query_output
return HTTPResponse(body=str(query_output), status=200)
#function to create dynamic query
def create_query(dict_data):
q1 = "SELECT * FROM earthquake WHERE "
q2 = "mag "
param1 = ""
if dict_data["param1"][0] == "eq":
param1 = "= "
elif dict_data["param1"][0]== "gt":
param1 = "> "
elif dict_data["param1"][0] == "gte":
param1 = ">= "
elif dict_data["param1"][0] == "lt":
param1 = "< "
elif dict_data["param1"][0] == "lte":
param1 = "<= "
q3 = param1
mag = dict_data["mag"][0]
q4 = mag
param2 = ""
if dict_data["param2"][0] == "or":
param2 = " or "
elif dict_data["param2"][0] == "and":
param2 = " and "
q5 = param2
q6 = "place LIKE "
loc = dict_data["loc"][0]
q7 = loc
query_final = str(q1 + q2 + q3 + q4 + q5 + q6 + "'%" +q7+ "%'")
return query_final
def query_format(query_result):
table = "<table border='2'><tr><th>time</th><th>latitude</th><th>longitude</th><th>depth</th><th>mag</th><th>magType</th><th>nst</th>"\
"<th>gap</th><th>dmin</th><th>rms</th><th>net</th><th>id</th><th>updated</th><th>place</th><th>type</th></tr>"
ans = ""
for x in query_result:
print x
ans += "<tr>"
ans += "<td>"+x[0].strftime("%d/%m/%Y %H:%M:%S")+"</td>"
ans += "<td>"+str(x[1])+"</td>"
ans += "<td>"+str(x[2])+"</td>"
ans += "<td>"+str(x[3])+"</td>"
ans += "<td>"+str(x[4])+"</td>"
ans += "<td>"+str(x[5])+"</td>"
ans += "<td>"+str(x[6])+"</td>"
ans += "<td>"+str(x[7])+"</td>"
ans += "<td>"+str(x[8])+"</td>"
ans += "<td>"+str(x[9])+"</td>"
ans += "<td>"+str(x[10])+"</td>"
ans += "<td>"+str(x[11])+"</td>"
ans += "<td>"+x[12].strftime("%d/%m/%Y %H:%M:%S")+"</td>"
ans += "<td>"+str(x[13])+"</td>"
ans += "<td>"+str(x[14])+"</td>"
ans += "</tr>"
table += ans + "</table>"
return table
@bottle.route('/')
def main():
try:
createdb = 'CREATE DATABASE IF NOT EXISTS db'
c.execute(createdb)
connectdb = 'USE db'
c.execute(connectdb)
table = 'CREATE TABLE IF NOT EXISTS earthquake '\
'(time TIMESTAMP,'\
'latitude DOUBLE,'\
'longitude DOUBLE,'\
'depth DOUBLE,'\
'mag DOUBLE,'\
'magType varchar(500),'\
'nst DOUBLE,'\
'gap DOUBLE,'\
'dmin DOUBLE,'\
'rms DOUBLE,'\
'net varchar(500),'\
'id varchar(500),'\
'updated TIMESTAMP,'\
'place VARCHAR(500),'\
'type VARCHAR(500))'
c.execute(table)
insert_time = parse(filename,',',c)
mag2 = query(2,c)
mag3 = query(3,c)
mag4 = query(4,c)
mag5 = query(5,c)
maggt5 = bigquery(5,c)
ans_mag2 = ans_format(mag2)
ans_mag3 = ans_format(mag3)
ans_mag4 = ans_format(mag4)
ans_mag5 = ans_format(mag5)
ans_maggt5 = ans_format(maggt5)
ans = "Final Result: <br><br> Time taken to Insert data into MySQL database is: <br>" +str(insert_time)+"<br><br>" \
"Earthquake of magnitude 2: <br> "+str(ans_mag2)+"<br><br> Earthquake of magnitude 3: <br>" \
+str(ans_mag3)+ "<br><br> Earthquake of magnitude 4: <br>" +str(ans_mag4)+ "<br><br> Earthquake" \
"of magnitude 5: <br>" +str(ans_mag5)+ "<br><br> Earthquake of magnitude greater than 5: <br>" +str(ans_maggt5)
return ans
except Exception as e:
print str(e)
return e
# Define an handler for 404 errors.
@bottle.error(404)
def error_404(error):
"""Return a custom error 404."""
return 'Sorry, nothing at this URL.'
# [END all]
|
py | 7df8a7d62a193fb97e453cfdb15b40cc15b1d0c6 | #!/usr/bin/env python2
import sys
import os
import time
# Generate the master out.grid
#
# make out.grd
cmd="./makegrid.sh"
os.system(cmd)
print cmd
#
# Call each of the installed crustal models and time how
# long it takes to populate the models
#
#
# model bbp1d
#
start = time.time()
model_string = "bbp1d"
cmd="../bin/ucvm_query -f ../conf/ucvm.conf -m %s < ../utilities/out.grd > mesh_%s.out"%(model_string,model_string)
print cmd
os.system(cmd)
end = time.time()
print "Mesh extraction for model %s : %d seconds"%(model_string,(end-start))
#
#
# model 1d
#
start = time.time()
model_string = "1d"
cmd="../bin/ucvm_query -f ../conf/ucvm.conf -m %s < ../utilities/out.grd > mesh_%s.out"%(model_string,model_string)
print cmd
os.system(cmd)
end = time.time()
print "Mesh extraction for model %s : %d seconds"%(model_string,(end-start))
#
# model CVM-S4
#
start = time.time()
model_string = "cvms"
cmd="../bin/ucvm_query -f ../conf/ucvm.conf -m %s < ../utilities/out.grd > mesh_%s.out"%(model_string,model_string)
print cmd
os.system(cmd)
end = time.time()
print "Mesh extraction for model %s : %d seconds"%(model_string,(end-start))
#
# model CVM-S4.26
#
start = time.time()
model_string = "cvms5"
cmd="../bin/ucvm_query -f ../conf/ucvm.conf -m %s < ../utilities/out.grd > mesh_%s.out"%(model_string,model_string)
print cmd
os.system(cmd)
end = time.time()
print "Mesh extraction for model %s : %d seconds"%(model_string,(end-start))
#
# model CVM-S4.26.M01
#
start = time.time()
model_string = "cvmsi"
cmd="../bin/ucvm_query -f ../conf/ucvm.conf -m %s < ../utilities/out.grd > mesh_%s.out"%(model_string,model_string)
print cmd
os.system(cmd)
end = time.time()
print "Mesh extraction for model %s : %d seconds"%(model_string,(end-start))
#
# model CVM-H v15.1
#
start = time.time()
model_string = "cvmh"
cmd="../bin/ucvm_query -f ../conf/ucvm.conf -m %s < ../utilities/out.grd > mesh_%s.out"%(model_string,model_string)
print cmd
os.system(cmd)
end = time.time()
print "Mesh extraction for model %s : %d seconds"%(model_string,(end-start))
#
# model cencal
#
start = time.time()
model_string = "cencal"
cmd="../bin/ucvm_query -f ../conf/ucvm.conf -m %s < ../utilities/out.grd > mesh_%s.out"%(model_string,model_string)
print cmd
os.system(cmd)
end = time.time()
print "Mesh extraction for model %s : %d seconds"%(model_string,(end-start))
sys.exit(0)
#
# model cca
#
start = time.time()
model_string = "cca"
cmd="../bin/ucvm_query -f ../conf/ucvm.conf -m %s < ../utilities/out.grd > mesh_%s.out"%(model_string,model_string)
print cmd
os.system(cmd)
end = time.time()
print "Mesh extraction for model %s : %d seconds"%(model_string,(end-start))
sys.exit(0)
|
py | 7df8a8b4ec9cc4f83271278607d4370146a72e24 | '''
Training part
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import Queue
import datetime
import logging
import os
import threading
import time
import numpy as np
import tensorflow as tf
from maze import MazeGenerator
from predictron import Predictron
FLAGS = tf.app.flags.FLAGS
tf.flags.DEFINE_string('train_dir', './ckpts/predictron_train',
'dir to save checkpoints and TB logs')
tf.flags.DEFINE_integer('max_steps', 10000000, 'num of batches')
tf.flags.DEFINE_float('learning_rate', 1e-3, 'learning rate')
tf.flags.DEFINE_integer('batch_size', 128, 'batch size')
tf.flags.DEFINE_integer('maze_size', 20, 'size of maze (square)')
tf.flags.DEFINE_float('maze_density', 0.3, 'Maze density')
tf.flags.DEFINE_integer('max_depth', 16, 'maximum model depth')
tf.flags.DEFINE_float('max_grad_norm', 10., 'clip grad norm into this value')
tf.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
tf.flags.DEFINE_integer('num_threads', 10, 'num of threads used to generate mazes.')
logging.basicConfig()
logger = logging.getLogger('training')
logger.setLevel(logging.INFO)
def train():
config = FLAGS
global_step = tf.get_variable(
'global_step', [],
initializer=tf.constant_initializer(0), trainable=False)
maze_ims_ph = tf.placeholder(tf.float32, [None, FLAGS.maze_size, FLAGS.maze_size, 1])
maze_labels_ph = tf.placeholder(tf.float32, [None, FLAGS.maze_size])
model = Predictron(maze_ims_ph, maze_labels_ph, config)
model.build()
loss = model.total_loss
loss_preturns = model.loss_preturns
loss_lambda_preturns = model.loss_lambda_preturns
opt = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
grad_vars = opt.compute_gradients(loss, tf.trainable_variables())
grads, vars = zip(*grad_vars)
grads_clipped, _ = tf.clip_by_global_norm(grads, FLAGS.max_grad_norm)
grad_vars = zip(grads_clipped, vars)
apply_gradient_op = opt.apply_gradients(grad_vars, global_step=global_step)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
update_op = tf.group(*update_ops)
# Group all updates to into a single train op.
train_op = tf.group(apply_gradient_op, update_op)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
saver = tf.train.Saver(tf.global_variables())
tf.train.start_queue_runners(sess=sess)
train_dir = os.path.join(FLAGS.train_dir, 'max_steps_{}'.format(FLAGS.max_depth))
summary_merged = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(train_dir, sess.graph)
maze_queue = Queue.Queue(100)
def maze_generator():
maze_gen = MazeGenerator(
height=FLAGS.maze_size,
width=FLAGS.maze_size,
density=FLAGS.maze_density)
while True:
maze_ims, maze_labels = maze_gen.generate_labelled_mazes(FLAGS.batch_size)
maze_queue.put((maze_ims, maze_labels))
for thread_i in xrange(FLAGS.num_threads):
t = threading.Thread(target=maze_generator)
t.start()
for step in xrange(FLAGS.max_steps):
start_time = time.time()
maze_ims_np, maze_labels_np = maze_queue.get()
_, loss_value, loss_preturns_val, loss_lambda_preturns_val, summary_str = sess.run(
[train_op, loss, loss_preturns, loss_lambda_preturns, summary_merged],
feed_dict={
maze_ims_ph: maze_ims_np,
maze_labels_ph: maze_labels_np
})
duration = time.time() - start_time
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step % 10 == 0:
num_examples_per_step = FLAGS.batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = duration
format_str = (
'%s: step %d, loss = %.4f, loss_preturns = %.4f, loss_lambda_preturns = %.4f (%.1f examples/sec; %.3f '
'sec/batch)')
logger.info(format_str % (datetime.datetime.now(), step, loss_value, loss_preturns_val, loss_lambda_preturns_val,
examples_per_sec, sec_per_batch))
if step % 100 == 0:
summary_writer.add_summary(summary_str, step)
# Save the model checkpoint periodically.
if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_path = os.path.join(train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
def main(argv=None):
train()
if __name__ == '__main__':
tf.app.run()
|
py | 7df8ab01ecfc2896e4de48c5351998c7ca5df719 | # -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
csv = 'result'
cfg_dict = {
'aux_no_0':
{
'dataset': 'aux_no_0',
'p': 3,
'd': 1,
'q': 1,
'taus': [1533, 4],
'Rs': [5, 4],
'k': 10,
'tol': 0.001,
'testsize': 0.1,
'loop_time': 5,
'info': 'dataset',
'Us_mode': 4,
'filename': csv
},
'aux_raw':
{
'dataset': 'aux_raw',
'p': 3,
'd': 1,
'q': 1,
'taus': [2246, 4],
'Rs': [5, 4],
'k': 10,
'tol': 0.001,
'testsize': 0.1,
'loop_time': 5,
'info': 'dataset',
'Us_mode': 4,
'filename': csv
},
'PC_W':
{
'dataset': 'PC_W',
'p': 3,
'd': 1,
'q': 1,
'taus': [9, 4],
'Rs': [5, 4],
'k': 10,
'tol': 0.001,
'testsize': 0.1,
'loop_time': 5,
'info': 'dataset',
'Us_mode': 4,
'filename': csv
},
'ele40':
{
'dataset': 'ele40',
'p': 3,
'd': 2,
'q': 1,
'taus': [321, 4],
'Rs': [20, 4],
'k': 10,
'tol': 0.001,
'testsize': 0.1,
'loop_time': 5,
'info': 'dataset',
'Us_mode': 4,
'filename': csv
},
'ele_big':
{
'dataset': 'ele_big',
'p': 3,
'd': 2,
'q': 1,
'taus': [321, 4],
'Rs': [20, 4],
'k': 10,
'tol': 0.001,
'testsize': 0.1,
'loop_time': 5,
'info': 'dataset',
'Us_mode': 4,
'filename': csv
},
'traffic_40':
{
'dataset': 'traffic_40',
'p': 3,
'd': 2,
'q': 1,
'taus': [228, 4],
'Rs': [20, 4],
'k': 10,
'tol': 0.001,
'testsize': 0.1,
'loop_time': 5,
'info': 'dataset',
'Us_mode': 4,
'filename': csv
},
'traffic_80':
{
'dataset': 'traffic_small',
'p': 3,
'd': 2,
'q': 1,
'taus': [228, 4],
'Rs': [20, 4],
'k': 10,
'tol': 0.001,
'testsize': 0.1,
'loop_time': 5,
'info': 'dataset',
'Us_mode': 4,
'filename': csv
},
'stock':
{
'dataset': 'stock',
'p': 8,
'd': 1,
'q': 1,
'taus': [4, 5],
'Rs': [4, 5],
'k': 10,
'tol': 0.001,
'testsize': 0.1,
'loop_time': 5,
'info': 'dataset',
'Us_mode': 4,
'filename': csv
}
}
|
py | 7df8ab2381da76399ac75f325810a2f0b3340541 | import os
import numpy as np
from bbox_functions.convex import (
SphereFunction,
)
from bbox_functions.non_convex import (
RastriginFunction,
AckleyFunction,
RosenbrockFunction,
BealeFunction,
HimmelblausFunction,
HölderTableFunction,
CrossInTrayFunction,
)
from bbox_functions.visualize import (
matplotlib_heatmap,
matplotlib_surface,
)
path = os.path.realpath(__file__).rsplit("/", 1)[0]
sphere_function = SphereFunction(2, metric="loss")
rastrigin_function = RastriginFunction(2, metric="loss")
ackley_function = AckleyFunction(metric="loss")
rosenbrock_function = RosenbrockFunction(metric="loss")
beale_function = BealeFunction(metric="loss")
himmelblaus_function = HimmelblausFunction(metric="loss")
hölder_table_function = HölderTableFunction(metric="loss")
cross_in_tray_function = CrossInTrayFunction(metric="score")
resolution = 0.05
search_space1 = {
"x0": np.arange(-5, 5, resolution),
"x1": np.arange(-5, 5, resolution),
}
search_space2 = {
"x0": np.arange(-10, 10, resolution),
"x1": np.arange(-10, 10, resolution),
}
search_space2 = {
"x0": np.arange(-7, 7, resolution),
"x1": np.arange(-7, 7, resolution),
}
objective_function_infos = {
sphere_function: {
"search_space": search_space1,
"norm": None,
},
rastrigin_function: {
"search_space": search_space1,
"norm": None,
},
ackley_function: {
"search_space": search_space1,
"norm": None,
},
rosenbrock_function: {
"search_space": search_space1,
"norm": None,
},
beale_function: {
"search_space": search_space1,
"norm": "color_log",
},
himmelblaus_function: {
"search_space": search_space1,
"norm": "color_log",
},
hölder_table_function: {
"search_space": search_space2,
"norm": None,
},
cross_in_tray_function: {
"search_space": search_space2,
"norm": None,
},
}
for objective_function in objective_function_infos.keys():
objective_function_info = objective_function_infos[objective_function]
name = objective_function.__name__
search_space = objective_function_info["search_space"]
norm = objective_function_info["norm"]
print(name, "\n")
matplotlib_heatmap(objective_function, search_space, norm=norm).savefig(
path + "/images/" + name + "_heatmap.jpg", dpi=100
)
matplotlib_surface(objective_function, search_space, norm=norm).savefig(
path + "/images/" + name + "_surface.jpg", dpi=100
)
|
py | 7df8ab9c41abe8fd0c3848462d0a9fc649bf44a2 | from CSIKit.reader.reader import Reader
from CSIKit.reader.readers.read_atheros import ATHBeamformReader
from CSIKit.reader.readers.read_bfee import IWLBeamformReader
from CSIKit.reader.readers.read_csv import CSVBeamformReader
from CSIKit.reader.readers.read_pcap import NEXBeamformReader
from CSIKit.reader.reader_selector import get_reader |
py | 7df8ad7a21ce95ad9c4084e662c8bc4363583abf | # coding=utf-8
# Copyright 2018 The Google AI Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Run masked LM/next sentence masked_lm pre-training for ALBERT."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from albert import modeling
from albert import optimization
from six.moves import range
import tensorflow.compat.v1 as tf
from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver
from tensorflow.contrib import data as contrib_data
from tensorflow.contrib import tpu as contrib_tpu
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"albert_config_file",
None,
"The config json file corresponding to the pre-trained ALBERT model. "
"This specifies the model architecture.",
)
flags.DEFINE_string(
"input_file", None, "Input TF example files (can be a glob or comma separated)."
)
flags.DEFINE_string(
"output_dir",
None,
"The output directory where the model checkpoints will be written.",
)
## Other parameters
flags.DEFINE_string(
"init_checkpoint",
None,
"Initial checkpoint (usually from a pre-trained ALBERT model).",
)
flags.DEFINE_integer(
"max_seq_length",
512,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded. Must match data generation.",
)
flags.DEFINE_integer(
"max_predictions_per_seq",
20,
"Maximum number of masked LM predictions per sequence. "
"Must match data generation.",
)
flags.DEFINE_bool("do_train", True, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_integer("train_batch_size", 4096, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 64, "Total batch size for eval.")
flags.DEFINE_enum("optimizer", "lamb", ["adamw", "lamb"], "The optimizer for training.")
flags.DEFINE_float("learning_rate", 0.00176, "The initial learning rate.")
flags.DEFINE_float("poly_power", 1.0, "The power of poly decay.")
flags.DEFINE_integer("num_train_steps", 125000, "Number of training steps.")
flags.DEFINE_integer("num_warmup_steps", 3125, "Number of warmup steps.")
flags.DEFINE_integer("start_warmup_step", 0, "The starting step of warmup.")
flags.DEFINE_integer(
"save_checkpoints_steps", 5000, "How often to save the model checkpoint."
)
flags.DEFINE_integer("keep_checkpoint_max", 5, "How many checkpoints to keep.")
flags.DEFINE_integer(
"iterations_per_loop", 1000, "How many steps to make in each estimator call."
)
flags.DEFINE_integer("max_eval_steps", 100, "Maximum number of eval steps.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
flags.DEFINE_bool(
"init_from_group0",
False,
"Whether to initialize" "parameters of other groups from group 0",
)
tf.flags.DEFINE_string(
"tpu_name",
None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.",
)
tf.flags.DEFINE_string(
"tpu_zone",
None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.",
)
tf.flags.DEFINE_string(
"gcp_project",
None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.",
)
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores",
8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.",
)
flags.DEFINE_float(
"masked_lm_budget",
0,
"If >0, the ratio of masked ngrams to unmasked ngrams. Default 0,"
"for offline masking",
)
def model_fn_builder(
albert_config,
init_checkpoint,
learning_rate,
num_train_steps,
num_warmup_steps,
use_tpu,
use_one_hot_embeddings,
optimizer,
poly_power,
start_warmup_step,
):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
masked_lm_positions = features["masked_lm_positions"]
masked_lm_ids = features["masked_lm_ids"]
masked_lm_weights = features["masked_lm_weights"]
# Note: We keep this feature name `next_sentence_labels` to be compatible
# with the original data created by lanzhzh@. However, in the ALBERT case
# it does represent sentence_order_labels.
sentence_order_labels = features["next_sentence_labels"]
is_training = mode == tf.estimator.ModeKeys.TRAIN
model = modeling.AlbertModel(
config=albert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings,
)
(
masked_lm_loss,
masked_lm_example_loss,
masked_lm_log_probs,
) = get_masked_lm_output(
albert_config,
model.get_sequence_output(),
model.get_embedding_table(),
masked_lm_positions,
masked_lm_ids,
masked_lm_weights,
)
(
sentence_order_loss,
sentence_order_example_loss,
sentence_order_log_probs,
) = get_sentence_order_output(
albert_config, model.get_pooled_output(), sentence_order_labels
)
total_loss = masked_lm_loss + sentence_order_loss
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
tf.logging.info(
"number of hidden group %d to initialize",
albert_config.num_hidden_groups,
)
num_of_initialize_group = 1
if FLAGS.init_from_group0:
num_of_initialize_group = albert_config.num_hidden_groups
if albert_config.net_structure_type > 0:
num_of_initialize_group = albert_config.num_hidden_layers
(
assignment_map,
initialized_variable_names,
) = modeling.get_assignment_map_from_checkpoint(
tvars, init_checkpoint, num_of_initialize_group
)
if use_tpu:
def tpu_scaffold():
for gid in range(num_of_initialize_group):
tf.logging.info("initialize the %dth layer", gid)
tf.logging.info(assignment_map[gid])
tf.train.init_from_checkpoint(
init_checkpoint, assignment_map[gid]
)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
for gid in range(num_of_initialize_group):
tf.logging.info("initialize the %dth layer", gid)
tf.logging.info(assignment_map[gid])
tf.train.init_from_checkpoint(init_checkpoint, assignment_map[gid])
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(
" name = %s, shape = %s%s", var.name, var.shape, init_string
)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss,
learning_rate,
num_train_steps,
num_warmup_steps,
use_tpu,
optimizer,
poly_power,
start_warmup_step,
)
output_spec = contrib_tpu.TPUEstimatorSpec(
mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn
)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(*args):
"""Computes the loss and accuracy of the model."""
(
masked_lm_example_loss,
masked_lm_log_probs,
masked_lm_ids,
masked_lm_weights,
sentence_order_example_loss,
sentence_order_log_probs,
sentence_order_labels,
) = args[:7]
masked_lm_log_probs = tf.reshape(
masked_lm_log_probs, [-1, masked_lm_log_probs.shape[-1]]
)
masked_lm_predictions = tf.argmax(
masked_lm_log_probs, axis=-1, output_type=tf.int32
)
masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])
masked_lm_ids = tf.reshape(masked_lm_ids, [-1])
masked_lm_weights = tf.reshape(masked_lm_weights, [-1])
masked_lm_accuracy = tf.metrics.accuracy(
labels=masked_lm_ids,
predictions=masked_lm_predictions,
weights=masked_lm_weights,
)
masked_lm_mean_loss = tf.metrics.mean(
values=masked_lm_example_loss, weights=masked_lm_weights
)
metrics = {
"masked_lm_accuracy": masked_lm_accuracy,
"masked_lm_loss": masked_lm_mean_loss,
}
sentence_order_log_probs = tf.reshape(
sentence_order_log_probs, [-1, sentence_order_log_probs.shape[-1]]
)
sentence_order_predictions = tf.argmax(
sentence_order_log_probs, axis=-1, output_type=tf.int32
)
sentence_order_labels = tf.reshape(sentence_order_labels, [-1])
sentence_order_accuracy = tf.metrics.accuracy(
labels=sentence_order_labels, predictions=sentence_order_predictions
)
sentence_order_mean_loss = tf.metrics.mean(
values=sentence_order_example_loss
)
metrics.update(
{
"sentence_order_accuracy": sentence_order_accuracy,
"sentence_order_loss": sentence_order_mean_loss,
}
)
return metrics
metric_values = [
masked_lm_example_loss,
masked_lm_log_probs,
masked_lm_ids,
masked_lm_weights,
sentence_order_example_loss,
sentence_order_log_probs,
sentence_order_labels,
]
eval_metrics = (metric_fn, metric_values)
output_spec = contrib_tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn,
)
else:
raise ValueError("Only TRAIN and EVAL modes are supported: %s" % (mode))
return output_spec
return model_fn
def get_masked_lm_output(
albert_config, input_tensor, output_weights, positions, label_ids, label_weights
):
"""Get loss and log probs for the masked LM."""
input_tensor = gather_indexes(input_tensor, positions)
with tf.variable_scope("cls/predictions"):
# We apply one more non-linear transformation before the output layer.
# This matrix is not used after pre-training.
with tf.variable_scope("transform"):
input_tensor = tf.layers.dense(
input_tensor,
units=albert_config.embedding_size,
activation=modeling.get_activation(albert_config.hidden_act),
kernel_initializer=modeling.create_initializer(
albert_config.initializer_range
),
)
input_tensor = modeling.layer_norm(input_tensor)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
output_bias = tf.get_variable(
"output_bias",
shape=[albert_config.vocab_size],
initializer=tf.zeros_initializer(),
)
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
label_ids = tf.reshape(label_ids, [-1])
label_weights = tf.reshape(label_weights, [-1])
one_hot_labels = tf.one_hot(
label_ids, depth=albert_config.vocab_size, dtype=tf.float32
)
# The `positions` tensor might be zero-padded (if the sequence is too
# short to have the maximum number of predictions). The `label_weights`
# tensor has a value of 1.0 for every real prediction and 0.0 for the
# padding predictions.
per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])
numerator = tf.reduce_sum(label_weights * per_example_loss)
denominator = tf.reduce_sum(label_weights) + 1e-5
loss = numerator / denominator
return (loss, per_example_loss, log_probs)
def get_sentence_order_output(albert_config, input_tensor, labels):
"""Get loss and log probs for the next sentence prediction."""
# Simple binary classification. Note that 0 is "next sentence" and 1 is
# "random sentence". This weight matrix is not used after pre-training.
with tf.variable_scope("cls/seq_relationship"):
output_weights = tf.get_variable(
"output_weights",
shape=[2, albert_config.hidden_size],
initializer=modeling.create_initializer(albert_config.initializer_range),
)
output_bias = tf.get_variable(
"output_bias", shape=[2], initializer=tf.zeros_initializer()
)
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
labels = tf.reshape(labels, [-1])
one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, log_probs)
def gather_indexes(sequence_tensor, positions):
"""Gathers the vectors at the specific positions over a minibatch."""
sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)
batch_size = sequence_shape[0]
seq_length = sequence_shape[1]
width = sequence_shape[2]
flat_offsets = tf.reshape(
tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1]
)
flat_positions = tf.reshape(positions + flat_offsets, [-1])
flat_sequence_tensor = tf.reshape(sequence_tensor, [batch_size * seq_length, width])
output_tensor = tf.gather(flat_sequence_tensor, flat_positions)
return output_tensor
def input_fn_builder(
input_files, max_seq_length, max_predictions_per_seq, is_training, num_cpu_threads=4
):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
name_to_features = {
"input_ids": tf.FixedLenFeature([max_seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([max_seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([max_seq_length], tf.int64),
# Note: We keep this feature name `next_sentence_labels` to be
# compatible with the original data created by lanzhzh@. However, in
# the ALBERT case it does represent sentence_order_labels.
"next_sentence_labels": tf.FixedLenFeature([1], tf.int64),
}
if FLAGS.masked_lm_budget:
name_to_features.update(
{"token_boundary": tf.FixedLenFeature([max_seq_length], tf.int64)}
)
else:
name_to_features.update(
{
"masked_lm_positions": tf.FixedLenFeature(
[max_predictions_per_seq], tf.int64
),
"masked_lm_ids": tf.FixedLenFeature(
[max_predictions_per_seq], tf.int64
),
"masked_lm_weights": tf.FixedLenFeature(
[max_predictions_per_seq], tf.float32
),
}
)
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
if is_training:
d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files))
d = d.repeat()
d = d.shuffle(buffer_size=len(input_files))
# `cycle_length` is the number of parallel files that get read.
cycle_length = min(num_cpu_threads, len(input_files))
# `sloppy` mode means that the interleaving is not exact. This adds
# even more randomness to the training pipeline.
d = d.apply(
contrib_data.parallel_interleave(
tf.data.TFRecordDataset,
sloppy=is_training,
cycle_length=cycle_length,
)
)
d = d.shuffle(buffer_size=100)
else:
d = tf.data.TFRecordDataset(input_files)
# Since we evaluate for a fixed number of steps we don't want to encounter
# out-of-range exceptions.
d = d.repeat()
# We must `drop_remainder` on training because the TPU requires fixed
# size dimensions. For eval, we assume we are evaluating on the CPU or GPU
# and we *don't* want to drop the remainder, otherwise we wont cover
# every sample.
d = d.apply(
tf.data.experimental.map_and_batch_with_legacy_function(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
num_parallel_batches=num_cpu_threads,
drop_remainder=True,
)
)
tf.logging.info(d)
return d
return input_fn
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
if not FLAGS.do_train and not FLAGS.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
albert_config = modeling.AlbertConfig.from_json_file(FLAGS.albert_config_file)
tf.gfile.MakeDirs(FLAGS.output_dir)
input_files = []
for input_pattern in FLAGS.input_file.split(","):
input_files.extend(tf.gfile.Glob(input_pattern))
tf.logging.info("*** Input Files ***")
for input_file in input_files:
tf.logging.info(" %s" % input_file)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project
)
is_per_host = contrib_tpu.InputPipelineConfig.PER_HOST_V2
run_config = contrib_tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
keep_checkpoint_max=FLAGS.keep_checkpoint_max,
tpu_config=contrib_tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host,
),
)
model_fn = model_fn_builder(
albert_config=albert_config,
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=FLAGS.num_train_steps,
num_warmup_steps=FLAGS.num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu,
optimizer=FLAGS.optimizer,
poly_power=FLAGS.poly_power,
start_warmup_step=FLAGS.start_warmup_step,
)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = contrib_tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
)
if FLAGS.do_train:
tf.logging.info("***** Running training *****")
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
train_input_fn = input_fn_builder(
input_files=input_files,
max_seq_length=FLAGS.max_seq_length,
max_predictions_per_seq=FLAGS.max_predictions_per_seq,
is_training=True,
)
estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps)
if FLAGS.do_eval:
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
global_step = -1
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
writer = tf.gfile.GFile(output_eval_file, "w")
eval_input_fn = input_fn_builder(
input_files=input_files,
max_seq_length=FLAGS.max_seq_length,
max_predictions_per_seq=FLAGS.max_predictions_per_seq,
is_training=False,
)
best_perf = 0
key_name = "masked_lm_accuracy"
while global_step < FLAGS.num_train_steps:
if estimator.latest_checkpoint() is None:
tf.logging.info("No checkpoint found yet. Sleeping.")
time.sleep(1)
else:
result = estimator.evaluate(
input_fn=eval_input_fn, steps=FLAGS.max_eval_steps
)
global_step = result["global_step"]
tf.logging.info("***** Eval results *****")
checkpoint_path = estimator.latest_checkpoint()
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if result[key_name] > best_perf:
best_perf = result[key_name]
for ext in ["meta", "data-00000-of-00001", "index"]:
src_ckpt = checkpoint_path + ".{}".format(ext)
tgt_ckpt = checkpoint_path.rsplit("-", 1)[
0
] + "-best.{}".format(ext)
tf.logging.info(
"saving {} to {}".format(src_ckpt, tgt_ckpt)
)
tf.gfile.Copy(src_ckpt, tgt_ckpt, overwrite=True)
writer.write("saved {} to {}\n".format(src_ckpt, tgt_ckpt))
if __name__ == "__main__":
flags.mark_flag_as_required("input_file")
flags.mark_flag_as_required("albert_config_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
|
py | 7df8aff69bee5563c571738a20929d225f27529a | from openTSNE import TSNE
from ._transformer import SklearnTransformer
class OpenTsne(SklearnTransformer):
"""
This transformer transformers all vectors in an [EmbeddingSet][whatlies.embeddingset.EmbeddingSet]
by means of tsne. This implementation used
[open-tsne](https://opentsne.readthedocs.io/en/latest/tsne_algorithm.html).
Important:
OpenTSNE is a faster variant of TSNE but it only allows for <2 components.
You may also notice that it is relatively slow. This unfortunately is a fact of TSNE.
This embedding transformation might require you to manually install extra dependencies
unless you installed via either;
```
pip install whatlies[opentsne]
pip install whatlies[all]
```
Arguments:
n_components: the number of compoments to create/add
kwargs: keyword arguments passed to the OpenTsne implementation, includes things like `perplexity` [link](https://opentsne.readthedocs.io/en/latest/api/index.html)
Usage:
```python
from whatlies.language import SpacyLanguage
from whatlies.transformers import OpenTsne
words = ["prince", "princess", "nurse", "doctor", "banker", "man", "woman",
"cousin", "neice", "king", "queen", "dude", "guy", "gal", "fire",
"dog", "cat", "mouse", "red", "blue", "green", "yellow", "water",
"person", "family", "brother", "sister"]
lang = SpacyLanguage("en_core_web_md")
emb = lang[words]
emb.transform(OpenTsne(2)).plot_interactive_matrix()
```
"""
def __init__(self, n_components=2, **kwargs):
super().__init__(
TSNE, f"opentsne_{n_components}", n_components=n_components, **kwargs
)
|
py | 7df8b16fac9fd6d8ba341ee27d97c2799bf8ecf1 | """
Common functions for processing datasets
Includes extracting of FCGF features, pose computation using LMPR/RANSAC
"""
import glob
import itertools
import logging
import multiprocessing
import os
import sys
import cv2
import MinkowskiEngine as ME
import numpy as np
import open3d as o3d
from functools import partial
from sklearn.neighbors import NearestNeighbors
import torch
from tqdm import tqdm
from common.lie.numpy import SE3
import data_processing.lmpr.config
from data_processing.lmpr.descriptor.fcgf import FCGFNet
from data_processing.lmpr.checkpoints import CheckpointIO
from data_processing.lmpr.utils import load_config
_DIR = os.path.dirname(os.path.abspath(__file__))
VOXEL_SIZE = 0.025
FCGF_CHECKPOINT = os.path.join(_DIR, 'lmpr/pretrained/fcgf32_voxel25.pth')
REGBLOCK_CONFIG_PATH = os.path.join(_DIR, 'lmpr/config.yaml') # Config of LMPR pairwise registration block
REGBLOCK_CHECKPOINT = os.path.join(_DIR, 'lmpr/pretrained/pairwise_reg.pt')
# Parameters for FastGR matching
FGR_VOXEL_SIZE = 0.02
# Subfolders
CLOUDS = 'raw_data'
FEATURES = 'features'
MATCHES = 'correspondences'
PAIRWISE_POSES = 'pairwise_poses'
# Filenames
FNAME_PLY_FORMAT = 'cloud_bin_{}.ply'
FNAME_POSE_FORMAT = 'cloud_bin_{}.info.txt'
FNAME_FEAT_FORMAT = '{}_{:03d}.npz' # features file name
FNAME_CORR_FORMAT = '{}_{:03d}_{:03d}.npz' # correspondences file name
FNAME_RELPOSE_FORMAT = 'relpose_{:03d}_{:03d}.npy' # Computed pairwise pose
NUM_PROCESSES = 10 # number of threads to use for matching
METHOD_TO_FOLDER = { # Maps method, use_mutuals to folder name
('FCGF_LMPR', True): 'RegBlock',
('FCGF_RANSAC', True): 'RANSAC',
('FGR', True): 'FastGR',
}
_logger = logging.getLogger(__name__)
def create_cloud(xyz: np.ndarray):
cloud = o3d.geometry.PointCloud()
cloud.points = o3d.utility.Vector3dVector(xyz.astype(np.float64))
return cloud
def load_depth_and_convert_to_cloud(depth_fname: str, intrinsics):
depth_array = cv2.imread(depth_fname, cv2.IMREAD_ANYDEPTH)
depth_im = o3d.geometry.Image(depth_array)
pcd = o3d.geometry.PointCloud.create_from_depth_image(depth_im, intrinsics)
return pcd
def save_info(out_fname, scene, frame_no, pose):
with open(out_fname, 'w') as fid:
fid.write('{}\t{}\n'.format(scene, frame_no))
for i in range(4):
fid.write('\t'.join(map(str, pose[i, :])) + '\n')
def load_info(fname):
with open(fname, 'r') as fid:
fid = open(fname, 'r')
first_line = fid.readline()
tokens = first_line.split()
pose = np.loadtxt(fid)
info = {'scene': tokens[0],
'orig_frame': tokens[1],
'pose': pose}
return info
def create_fcgf_model():
device = torch.device('cuda')
model = FCGFNet(in_channels=1, out_channels=32,
bn_momentum=0.05,
normalize_feature=True,
conv1_kernel_size=7,
D=3)
_logger.info('Created model of type {}'.format(type(model)))
_logger.info('Loading pretrained weights from {}'.format(FCGF_CHECKPOINT))
state = torch.load(FCGF_CHECKPOINT)
model.load_state_dict(state['state_dict'])
model.to(device)
model.eval()
return model, device
def extract_fcgf(model, xyz, device):
sel = ME.utils.sparse_quantize(xyz / VOXEL_SIZE, return_index=True)
xyz_down = xyz[sel, :] # Selected coordinates
feats = np.ones([xyz_down.shape[0], 1]) # dummy: just contains ones
coords = np.floor(xyz_down / VOXEL_SIZE)
coordsC, featsC = ME.utils.sparse_collate(
[torch.from_numpy(coords)],
[torch.from_numpy(feats).float()])
sinput = ME.SparseTensor(featsC, coords=coordsC).to(device)
return xyz_down, model(sinput).F
def extract_features_batch(data_path, scenes):
source_path = os.path.join(data_path, CLOUDS)
target_path = os.path.join(data_path, FEATURES)
os.makedirs(target_path, exist_ok=True)
list_file = os.path.join(target_path, 'list.txt')
f = open(list_file, 'w')
model, device = create_fcgf_model()
model.eval()
for scene in scenes:
num_ply_files = len(glob.glob(os.path.join(source_path, scene, '*.ply')))
os.makedirs(os.path.join(target_path, scene), exist_ok=True)
f.write('%s %d\n' % (scene, num_ply_files))
for i in tqdm(range(num_ply_files), leave=False):
save_fn = FNAME_FEAT_FORMAT.format(scene, i)
in_fname = os.path.join(source_path, scene, FNAME_PLY_FORMAT.format(i))
if os.path.exists(os.path.join(target_path, scene, save_fn)):
_logger.debug('Features file already exist moving to the next example: {} - {}'.format(
scene, save_fn + '.npz'))
else:
# Extract features from a file
pcd = o3d.io.read_point_cloud(in_fname)
xyz_down, feature = extract_fcgf(model,
xyz=np.array(pcd.points),
device=device)
np.savez_compressed(os.path.join(target_path, scene, save_fn),
points=np.array(pcd.points),
xyz=xyz_down,
feature=feature.detach().cpu().numpy())
f.close()
def extract_correspondences(data_path: str, num_correspondences: int, max_frames_apart: int, scene: str):
logging.info('Matching keypoints for {}'.format(scene))
src_folder = os.path.join(data_path, FEATURES, scene)
dst_folder = os.path.join(data_path, MATCHES, scene)
os.makedirs(os.path.join(dst_folder), exist_ok=True)
# Read all features
fnames = [f for f in os.listdir(src_folder) if f.endswith('.npz')]
num_clouds = len(fnames)
pairs = list(itertools.combinations(range(num_clouds), 2))
np.random.seed(0)
for (idx0, idx1) in pairs:
if max_frames_apart > 0 and idx1 - idx0 >= max_frames_apart:
# We only match frames which are within a certain time apart,
# since we won't be considering graphs above this size
continue
out_path = os.path.join(dst_folder, FNAME_CORR_FORMAT.format(scene, idx0, idx1))
if os.path.exists(out_path):
logging.debug('Skipping feature matching as already exists for ' + out_path)
continue
pc_0_data = np.load(os.path.join(src_folder, FNAME_FEAT_FORMAT.format(scene, idx0)))
feat0, kp0 = pc_0_data['feature'], pc_0_data['xyz']
pc_1_data = np.load(os.path.join(src_folder, FNAME_FEAT_FORMAT.format(scene, idx1)))
feat1, kp1 = pc_1_data['feature'], pc_1_data['xyz']
# Sample 5000 points (if not enough, sample with replacement as in [1])
inds0 = np.random.choice(len(kp0), num_correspondences,
replace=False if len(kp0) >= num_correspondences else True)
inds1 = np.random.choice(len(kp1), num_correspondences,
replace=False if len(kp1) >= num_correspondences else True)
kp0, feat0 = kp0[inds0], feat0[inds0]
kp1, feat1 = kp1[inds1], feat1[inds1]
# find the correspondence using nearest neighbor search in the feature space (two way)
# For every point in cloud0, find best point in cloud1
nn_search = NearestNeighbors(n_neighbors=1, metric='euclidean')
nn_search.fit(feat1)
nn_dists0, nn_indices0 = nn_search.kneighbors(X=feat0, n_neighbors=2, return_distance=True)
# For every point in cloud1, find best point in cloud0
nn_search.fit(feat0)
nn_dists1, nn_indices1 = nn_search.kneighbors(X=feat1, n_neighbors=2, return_distance=True)
# Compute mutual match
mutuals = (nn_indices0[nn_indices1[:, 0], 0] == np.arange(len(kp1))) # size = (n1,)
ratios = nn_dists0[:, 0] / nn_dists0[:, 1]
# Concatenate the correspondence coordinates
xs = np.concatenate([kp0[nn_indices1[:, 0]], kp1], axis=1) # (n0, 6)
np.savez_compressed(out_path, x=xs, mutuals=mutuals, ratios=ratios)
logging.info('Finished matching for {}'.format(scene))
def extract_correspondences_batch(data_path, num_correspondences, max_frames_apart, scenes):
pool = multiprocessing.Pool(processes=NUM_PROCESSES)
func = partial(extract_correspondences, data_path, num_correspondences, max_frames_apart)
pool.map(func, scenes)
pool.close()
pool.join()
def compute_pose_regblock(data_path, model, use_mutuals, scene):
_logger.info('Computing poses using Regblock for {}'.format(scene))
matches_folder = os.path.join(data_path, MATCHES, scene)
dst_folder = os.path.join(data_path, PAIRWISE_POSES, METHOD_TO_FOLDER[('FCGF_LMPR', use_mutuals)], scene)
os.makedirs(dst_folder, exist_ok=True)
# Compute relative poses
matches_fpaths = glob.glob(os.path.join(matches_folder, '*.npz'))
for matches_fpath in tqdm(matches_fpaths, ncols=80):
idx0 = int(matches_fpath.split('_')[-2])
idx1 = int(matches_fpath.split('_')[-1].split('.')[0])
out_fname = os.path.join(dst_folder, FNAME_RELPOSE_FORMAT.format(idx0, idx1))
if os.path.exists(out_fname):
continue
# Load correspondence file
matches_data = np.load(matches_fpath)
pts01 = matches_data['x'] if 'x' in matches_data else matches_data['correspondences']
mutuals = matches_data['mutuals'].flatten().astype(np.bool)
# Forward pass through the network to compute pose
xs = torch.from_numpy(pts01).float().to(model.device)
data = {'xs': xs[None, None, mutuals, :]} if use_mutuals else \
{'xs': xs[None, None, :, :]} # use only mutuals if desired
est_data = model.filter_correspondences(data)
rot = est_data['rot_est'][-1][0, ...].cpu().numpy()
trans = est_data['trans_est'][-1][0, ...].cpu().numpy()
rel_pose = np.eye(4) # transforms from xyz0 to xyz1
rel_pose[0:3, 0:3] = rot
rel_pose[0:3, 3:4] = trans
rel_pose = SE3.from_matrix(rel_pose, normalize=True)
# Save out transformation matrix
np.save(out_fname, rel_pose.as_matrix())
def compute_pose_ransac(data_path, use_mutuals, scene):
_logger.info('Computing poses using RANSAC for {}'.format(scene))
matches_folder = os.path.join(data_path, MATCHES, scene)
dst_folder = os.path.join(data_path, PAIRWISE_POSES, METHOD_TO_FOLDER[('FCGF_RANSAC', use_mutuals)], scene)
os.makedirs(dst_folder, exist_ok=True)
# Compute relative poses
matches_fpaths = glob.glob(os.path.join(matches_folder, '*.npz'))
for matches_fpath in tqdm(matches_fpaths, ncols=80):
idx0 = int(matches_fpath.split('_')[-2])
idx1 = int(matches_fpath.split('_')[-1].split('.')[0])
out_fname = os.path.join(dst_folder, FNAME_RELPOSE_FORMAT.format(idx0, idx1))
if os.path.exists(out_fname):
continue
# Load correspondence file
matches_data = np.load(matches_fpath)
pts01 = matches_data['x'] if 'x' in matches_data else matches_data['correspondences']
mutuals = matches_data['mutuals'].flatten().astype(np.bool)
# Forward pass through the network to compute pose
if use_mutuals:
pts01 = pts01[mutuals, :]
# Use Open3d's RANSAC function to compute transformation
matches = np.tile(np.arange(len(pts01))[:, None], (1, 2))
result_ransac = o3d.registration.registration_ransac_based_on_correspondence(
source=create_cloud(pts01[:, 0:3]),
target=create_cloud(pts01[:, 3:6]),
corres=o3d.utility.Vector2iVector(matches),
max_correspondence_distance=VOXEL_SIZE * 2,
estimation_method=o3d.registration.TransformationEstimationPointToPoint(False),
ransac_n=4)
rel_pose = result_ransac.transformation
# Save out transformation matrix
np.save(out_fname, rel_pose)
def compute_pose_fastgr(data_path, scene):
"""Computes relative pose using FastGR. Parameters follow that of
"Learning Transformation Synchronization"
"""
logging.info('Starting FastGR matching for {}'.format(scene))
src_folder = os.path.join(data_path, CLOUDS, scene)
dst_folder = os.path.join(data_path, PAIRWISE_POSES, METHOD_TO_FOLDER[('FGR', True)], scene)
os.makedirs(dst_folder, exist_ok=True)
voxel_size = FGR_VOXEL_SIZE
fnames = [f for f in os.listdir(src_folder) if f.endswith('.ply')]
num_clouds = len(fnames)
max_frames_apart = 30
# Load point clouds and compute normals
pcds, pcds_down, pcds_fpfh = [], [], []
for i in range(num_clouds):
pcd = o3d.io.read_point_cloud(os.path.join(src_folder, FNAME_PLY_FORMAT.format(i)))
pcd_down = pcd.voxel_down_sample(voxel_size)
pcd.estimate_normals(o3d.geometry.KDTreeSearchParamHybrid(radius=0.2, max_nn=60))
pcd_down.estimate_normals(o3d.geometry.KDTreeSearchParamHybrid(radius=voxel_size * 2, max_nn=30))
pcd_fpfh = o3d.registration.compute_fpfh_feature(
pcd_down,
o3d.geometry.KDTreeSearchParamHybrid(radius=voxel_size * 5, max_nn=100))
pcds.append(pcd)
pcds_down.append(pcd_down)
pcds_fpfh.append(pcd_fpfh)
# Perform fast global registration
pairs = list(itertools.combinations(range(num_clouds), 2))
for (idx0, idx1) in pairs:
if max_frames_apart > 0 and idx1 - idx0 >= max_frames_apart:
# We only match frames which are within a certain time apart,
# since we won't be considering graphs above this size
continue
out_fname = os.path.join(dst_folder, FNAME_RELPOSE_FORMAT.format(idx0, idx1))
if os.path.exists(out_fname):
continue
result_fast = o3d.registration.registration_fast_based_on_feature_matching(
pcds_down[idx0], pcds_down[idx1], pcds_fpfh[idx0], pcds_fpfh[idx1],
o3d.registration.FastGlobalRegistrationOption(
maximum_correspondence_distance=voxel_size * 5))
# Refine using ICP
result_icp = o3d.registration.registration_icp(pcds[idx0], pcds[idx1], voxel_size * 1.5,
result_fast.transformation,
o3d.registration.TransformationEstimationPointToPlane())
rel_pose = result_icp.transformation
# Save out transformation matrix
np.save(out_fname, rel_pose)
logging.info('Finished FastGR matching for {}'.format(scene))
def compute_pose_batch(data_path, scenes, method, use_mutuals: bool):
if method == 'FCGF_LMPR':
# Get model
cfg = load_config(REGBLOCK_CONFIG_PATH)
model = data_processing.lmpr.config.get_model(cfg)
model.eval()
# Load checkpoints
checkpoint_io = CheckpointIO('', model=model)
checkpoint_io.load(REGBLOCK_CHECKPOINT)
os.makedirs(os.path.join(data_path, PAIRWISE_POSES), exist_ok=True)
with torch.no_grad():
for scene in scenes:
compute_pose_regblock(data_path, model, use_mutuals, scene)
elif method == 'FCGF_RANSAC':
for scene in scenes:
compute_pose_ransac(data_path, use_mutuals, scene)
elif method == 'FGR':
pool = multiprocessing.Pool(processes=NUM_PROCESSES//2)
func = partial(compute_pose_fastgr, data_path)
pool.map(func, scenes)
pool.close()
pool.join()
else:
raise NotImplementedError('Invalid pose estimation method')
def generate_traj(data_path, method, use_mutuals, scene):
data_folder = os.path.join(data_path, PAIRWISE_POSES, METHOD_TO_FOLDER[(method, use_mutuals)], scene)
pose_fpaths = glob.glob(os.path.join(data_folder, '*.npy'))
out_fname = os.path.join(data_folder, 'traj.txt')
if os.path.exists(out_fname):
_logger.info('Skipping {} as already generated'.format(scene))
return
out_file = open(out_fname, 'w')
for pose_fpath in pose_fpaths:
idx0 = int(pose_fpath.split('_')[-2])
idx1 = int(pose_fpath.split('_')[-1].split('.')[0])
pose = np.load(pose_fpath)
inv_pose = SE3.from_matrix(pose).inv().as_matrix()
out_file.write('{}\t{}\tTrue\n'.format(idx0, idx1)) # We don't compute overlap, so add dummy
for row in inv_pose:
out_file.write('\t'.join(map(str, row)) + '\n')
out_file.close()
def generate_traj_batch(data_path, scenes, method, use_mutuals):
"""Generates traj.txt to compare with LMPR evaluation code"""
_logger.info('Generating traj.txt')
for scene in tqdm(scenes, ncols=80, leave=False):
generate_traj(data_path, method, use_mutuals, scene)
def compute_median_point_distance(points_i, points_j, transform_ij: SE3) -> float:
"""Computes median point distance in overlapping region. This is used in
Learn2Sync and LMPR as a heuristic of the registration quality
"""
points_i_transformed = transform_ij.transform(points_i)
tree_j = NearestNeighbors(n_neighbors=1, algorithm='kd_tree').fit(np.asarray(points_j))
distances, indices = tree_j.kneighbors(points_i_transformed) # [np, 1], [np, 1]
idx = np.where(distances < 0.2)[0]
sigma = np.inf if len(idx) == 0 else np.median(distances[idx])
return sigma
def generate_trainval_data(data_path, method, use_mutuals, scene):
_logger.info('Generating trainval data for {}'.format(scene))
cloud_folder = os.path.join(data_path, CLOUDS, scene)
meas_poses_folder = os.path.join(data_path, PAIRWISE_POSES, METHOD_TO_FOLDER[(method, use_mutuals)], scene)
result_fname = os.path.join(data_path, PAIRWISE_POSES, METHOD_TO_FOLDER[method, use_mutuals],
'{}.npz'.format(scene))
if os.path.exists(result_fname):
_logger.info('Skipping generating of npz file for {} as already exists.'.format(scene))
return
num_clouds = len(glob.glob(os.path.join(cloud_folder, '*.ply')))
clouds = [np.zeros([])] * num_clouds
Tstar = np.zeros((num_clouds, 4, 4))
Tij_meas = np.zeros((num_clouds, num_clouds, 4, 4))
Tij_gt = np.zeros((num_clouds, num_clouds, 4, 4))
aerr = np.full((num_clouds, num_clouds), np.inf)
terr = np.full((num_clouds, num_clouds), np.inf)
sigma = np.full((num_clouds, num_clouds), np.inf)
# Collates the groundtruth absolute poses
for i in range(num_clouds):
cloud_meta = load_info(os.path.join(cloud_folder, FNAME_POSE_FORMAT.format(i)))
Tstar[i, :, :] = cloud_meta['pose']
clouds[i] = np.asarray(
o3d.io.read_point_cloud(os.path.join(cloud_folder, FNAME_PLY_FORMAT.format(i))).points)
Tstar_se3 = SE3.from_matrix(Tstar, normalize=True)
Tstar = Tstar_se3.as_matrix()
# Collates pairwise measured poses
poses_files = glob.glob(os.path.join(meas_poses_folder, '*.npy'))
for poses_fpath in poses_files:
i = int(poses_fpath.split('_')[-2])
j = int(poses_fpath.split('_')[-1].split('.')[0])
pose_data = np.load(poses_fpath)
rel_meas = SE3.from_matrix(pose_data)
rel_gt = Tstar_se3[j].inv() * Tstar_se3[i]
meas_err = rel_gt.compare(rel_meas)
median_pt_dist = compute_median_point_distance(clouds[i], clouds[j], rel_meas)
Tij_meas[i, j] = rel_meas.as_matrix()
Tij_gt[i, j] = rel_gt.as_matrix()
aerr[i, j] = meas_err['rot_deg']
terr[i, j] = meas_err['trans']
sigma[i, j] = median_pt_dist
Tij_meas[j, i] = rel_meas.inv().as_matrix()
Tij_gt[j, i] = rel_gt.inv().as_matrix()
aerr[j, i] = meas_err['rot_deg']
terr[j, i] = meas_err['trans']
sigma[j, i] = median_pt_dist
# Output to file
np.savez(result_fname,
Tstar=Tstar, Tij_meas=Tij_meas, Tij_gt=Tij_gt, aerr=aerr, terr=terr,
sigma=sigma)
_logger.info('Done generating trainval data for {}'.format(scene))
def generate_trainval_data_batch(data_path, scenes, method, use_mutuals):
_logger.info('Generating train/val data...')
pool = multiprocessing.Pool(processes=NUM_PROCESSES)
func = partial(generate_trainval_data, data_path, method, use_mutuals)
pool.map(func, scenes)
pool.close()
pool.join()
|
py | 7df8b20a36d994aff14a8d868927d9377d4ec4ff | def kFrameUnion(input):
"""
Calculate the union of the kDataFrames. The result kDataframe will have all the kmers in the input list of kDataframes.
The count of the kmers equals to the sum of the kmer count in the input list.
.. warning:: This function works only with :class:`kProcessor.kDataFrameMQF`.
:param input: List of kDataFrames
:type input: list of :class:`kProcessor.kDataFrameMQF`
:return: New kDataFrame object holding the union of kmers in the kDataFrames list.
:rtype: :class:`kProcessor.kDataFrame`
"""
def kFrameIntersect(input):
"""
Calculate the intersect of the kDataFrames. The result kDataframe will have only kmers that exists in all the kDataframes.
The count of the kmers equals to the minimum of the kmer count in the input list.
.. warning:: This function works only with :class:`kProcessor.kDataFrameMQF`.
:param input: List of kDataFrames
:type input: list of :class:`kProcessor.kDataFrameMQF`
:return: New kDataFrame object holding the intersection of kmers in the kDataFrames list.
:rtype: :class:`kDataFrame`
"""
def kFrameDiff(input):
"""
Calculate the difference of the kDataframes.
The result kDataframe will have only kmers that exists in the first kDataframe and not in any of the rest input kDataframes.
The count of the kmers equals to the count in the first kDataframe.
.. warning:: This function works only with :class:`kProcessor.kDataFrameMQF`.
:param input: List of kDataFrames
:type input: list of :class:`kProcessor.kDataFrameMQF`
:return: New kDataFrame object holding the difference of kmers in the kDataFrames list.
:rtype: :class:`kDataFrame`
"""
|
py | 7df8b212706eb83ce4ee2c09d90d392ac6f6b60b | from django.apps import AppConfig
class ProductConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'product'
verbose_name = 'محصولات'
|
py | 7df8b299bc069dc90be5a98909a0291d8e1a3831 | import pymongo
import logging
LOGGER = logging.getLogger(__name__)
class DatabaseImp(object):
#URI = "mongodb://root:password@bb:27017/"
DATABASE = None
@staticmethod
def initialize(uri):
try:
client = pymongo.MongoClient(uri)
DatabaseImp.DATABASE = client['blocks']
DatabaseImp.DATABASE["blkTxns"].insert({"block_num": 0, "transactions": []})
DatabaseImp.DATABASE["height"].insert({"height": 0})
except Exception as ex:
LOGGER.warning(ex)
@staticmethod
def insert(collection, data):
try:
DatabaseImp.DATABASE[collection].insert(data)
except Exception as ex:
LOGGER.warning(ex)
@staticmethod
def find(collection, query):
return DatabaseImp.DATABASE[collection].find(query)
@staticmethod
def find_one(collection, query):
return DatabaseImp.DATABASE[collection].find_one(query)
@staticmethod
def find_last_record(collection):
try:
record = DatabaseImp.DATABASE[collection].find({}).sort("_id", -1).limit(1)
except Exception as ex:
LOGGER.warning(ex)
return record.next()
|
py | 7df8b2eb29601cb85dec5196aceb6195c406c3aa | #
# @lc app=leetcode id=326 lang=python3
#
# [326] Power of Three
#
# https://leetcode.com/problems/power-of-three/description/
#
# algorithms
# Easy (41.87%)
# Likes: 367
# Dislikes: 1244
# Total Accepted: 217.1K
# Total Submissions: 518K
# Testcase Example: '27'
#
# Given an integer, write a function to determine if it is a power of three.
#
# Example 1:
#
#
# Input: 27
# Output: true
#
#
# Example 2:
#
#
# Input: 0
# Output: false
#
# Example 3:
#
#
# Input: 9
# Output: true
#
# Example 4:
#
#
# Input: 45
# Output: false
#
# Follow up:
# Could you do it without using any loop / recursion?
#
# @lc code=start
class Solution:
def isPowerOfThree(self, n: int) -> bool:
if n==0:
return False
if n==1:
return True
while n%3==0:
n = n//3
if n==1:
return True
else:
return False
# @lc code=end
|
py | 7df8b6cea29416f42725598540054fec2cf40481 | from collections.abc import Iterable
from .inspection import InspectionMixin
class SerializeMixin(InspectionMixin):
"""Mixin to make model serializable."""
__abstract__ = True
def to_dict(self,nested = False, hybrid_attributes = False, exclude = None):
"""Return dict object with model's data.
:param nested: flag to return nested relationships' data if true
:type: bool
:param include_hybrid: flag to include hybrid attributes if true
:return: dict
"""
result = dict()
if exclude is None:
view_cols = self.columns
else :
view_cols = filter(lambda e: e not in exclude, self.columns)
for key in view_cols :
result[key] = getattr(self, key)
if hybrid_attributes:
for key in self.hybrid_properties:
result[key] = getattr(self, key)
if nested:
for key in self.relations:
obj = getattr(self, key)
if isinstance(obj, SerializeMixin):
result[key] = obj.to_dict(hybrid_attributes=hybrid_attributes)
elif isinstance(obj, Iterable):
result[key] = [o.to_dict(hybrid_attributes=hybrid_attributes) for o in obj]
return result
|
py | 7df8b7d4345a163ddd1a416c750450e598f2f0a7 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from neutron_lib import constants
from oslo_config import cfg
from neutron.agent import securitygroups_rpc
from neutron.callbacks import events
from neutron.callbacks import registry
from neutron.extensions import portbindings
from neutron.plugins.common import constants as p_constants
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers import mech_agent
from neutron.plugins.ml2.drivers.openvswitch.agent.common \
import constants as a_const
from neutron.services.qos.drivers.openvswitch import driver as ovs_qos_driver
IPTABLES_FW_DRIVER_FULL = ("neutron.agent.linux.iptables_firewall."
"OVSHybridIptablesFirewallDriver")
class OpenvswitchMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase):
"""Attach to networks using openvswitch L2 agent.
The OpenvswitchMechanismDriver integrates the ml2 plugin with the
openvswitch L2 agent. Port binding with this driver requires the
openvswitch agent to be running on the port's host, and that agent
to have connectivity to at least one segment of the port's
network.
"""
def __init__(self):
sg_enabled = securitygroups_rpc.is_firewall_enabled()
hybrid_plug_required = (not cfg.CONF.SECURITYGROUP.firewall_driver or
cfg.CONF.SECURITYGROUP.firewall_driver in (
IPTABLES_FW_DRIVER_FULL, 'iptables_hybrid')) and sg_enabled
vif_details = {portbindings.CAP_PORT_FILTER: sg_enabled,
portbindings.OVS_HYBRID_PLUG: hybrid_plug_required}
super(OpenvswitchMechanismDriver, self).__init__(
constants.AGENT_TYPE_OVS,
portbindings.VIF_TYPE_OVS,
vif_details)
ovs_qos_driver.register()
def get_allowed_network_types(self, agent):
return (agent['configurations'].get('tunnel_types', []) +
[p_constants.TYPE_LOCAL, p_constants.TYPE_FLAT,
p_constants.TYPE_VLAN])
def get_mappings(self, agent):
return agent['configurations'].get('bridge_mappings', {})
def check_vlan_transparency(self, context):
"""Currently Openvswitch driver doesn't support vlan transparency."""
return False
def try_to_bind_segment_for_agent(self, context, segment, agent):
if self.check_segment_for_agent(segment, agent):
context.set_binding(segment[api.ID],
self.get_vif_type(agent, context),
self.get_vif_details(agent, context))
return True
else:
return False
def get_vif_type(self, agent, context):
caps = agent['configurations'].get('ovs_capabilities', {})
if (any(x in caps.get('iface_types', []) for x
in [a_const.OVS_DPDK_VHOST_USER,
a_const.OVS_DPDK_VHOST_USER_CLIENT]) and
agent['configurations'].get('datapath_type') ==
a_const.OVS_DATAPATH_NETDEV):
return portbindings.VIF_TYPE_VHOST_USER
return self.vif_type
def get_vhost_mode(self, iface_types):
# NOTE(sean-k-mooney): this function converts the ovs vhost user
# driver mode into the qemu vhost user mode. If OVS is the server,
# qemu is the client and vice-versa.
if (a_const.OVS_DPDK_VHOST_USER_CLIENT in iface_types):
return portbindings.VHOST_USER_MODE_SERVER
return portbindings.VHOST_USER_MODE_CLIENT
def get_vif_details(self, agent, context):
vif_details = self._pre_get_vif_details(agent, context)
self._set_bridge_name(context.current, vif_details)
return vif_details
@staticmethod
def _set_bridge_name(port, vif_details):
# REVISIT(rawlin): add BridgeName as a nullable column to the Port
# model and simply check here if it's set and insert it into the
# vif_details.
def set_bridge_name_inner(bridge_name):
vif_details[portbindings.VIF_DETAILS_BRIDGE_NAME] = bridge_name
registry.notify(
a_const.OVS_BRIDGE_NAME, events.BEFORE_READ,
set_bridge_name_inner, port=port)
def _pre_get_vif_details(self, agent, context):
a_config = agent['configurations']
vif_type = self.get_vif_type(agent, context)
if vif_type != portbindings.VIF_TYPE_VHOST_USER:
details = dict(self.vif_details)
hybrid = portbindings.OVS_HYBRID_PLUG
if hybrid in a_config:
# we only override the vif_details for hybrid plugging set
# in the constructor if the agent specifically requests it
details[hybrid] = a_config[hybrid]
return details
else:
sock_path = self.agent_vhu_sockpath(agent, context.current['id'])
caps = a_config.get('ovs_capabilities', {})
mode = self.get_vhost_mode(caps.get('iface_types', []))
return {
portbindings.CAP_PORT_FILTER: False,
portbindings.OVS_HYBRID_PLUG: False,
portbindings.VHOST_USER_MODE: mode,
portbindings.VHOST_USER_OVS_PLUG: True,
portbindings.VHOST_USER_SOCKET: sock_path
}
return self.vif_details
@staticmethod
def agent_vhu_sockpath(agent, port_id):
"""Return the agent's vhost-user socket path for a given port"""
sockdir = agent['configurations'].get('vhostuser_socket_dir',
a_const.VHOST_USER_SOCKET_DIR)
sock_name = (constants.VHOST_USER_DEVICE_PREFIX + port_id)[:14]
return os.path.join(sockdir, sock_name)
|
py | 7df8b7f336315e43a109315b63ae5b10e71fedcd | import logging
import re
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugin.plugin import LOW_PRIORITY, parse_params
from streamlink.stream import HDSStream
from streamlink.utils import update_scheme
log = logging.getLogger(__name__)
@pluginmatcher(re.compile(
r"hds://(?P<url>\S+)(?:\s(?P<params>.+))?"
))
@pluginmatcher(priority=LOW_PRIORITY, pattern=re.compile(
r"(?P<url>\S+\.f4m(?:\?\S*)?)(?:\s(?P<params>.+))?"
))
class HDSPlugin(Plugin):
def _get_streams(self):
data = self.match.groupdict()
url = update_scheme("http://", data.get("url"))
params = parse_params(data.get("params"))
log.debug(f"URL={url}; params={params}")
return HDSStream.parse_manifest(self.session, url, **params)
__plugin__ = HDSPlugin
|
py | 7df8b866502f4829f9da74a7e5fc06ebd62397cb | """Event loop using a selector and related classes.
A selector is a "notify-when-ready" multiplexer. For a subclass which
also includes support for signal handling, see the unix_events sub-module.
"""
__all__ = ['BaseSelectorEventLoop']
import collections
import errno
import functools
import socket
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
from . import base_events
from . import constants
from . import events
from . import futures
from . import selectors
from . import transports
from .log import logger
def _test_selector_event(selector, fd, event):
# Test if the selector is monitoring 'event' events
# for the file descriptor 'fd'.
try:
key = selector.get_key(fd)
except KeyError:
return False
else:
return bool(key.events & event)
class BaseSelectorEventLoop(base_events.BaseEventLoop):
"""Selector event loop.
See events.EventLoop for API specification.
"""
def __init__(self, selector=None):
super().__init__()
if selector is None:
selector = selectors.DefaultSelector()
logger.debug('Using selector: %s', selector.__class__.__name__)
self._selector = selector
self._make_self_pipe()
def _make_socket_transport(self, sock, protocol, waiter=None, *,
extra=None, server=None):
return _SelectorSocketTransport(self, sock, protocol, waiter,
extra, server)
def _make_ssl_transport(self, rawsock, protocol, sslcontext, waiter, *,
server_side=False, server_hostname=None,
extra=None, server=None):
return _SelectorSslTransport(
self, rawsock, protocol, sslcontext, waiter,
server_side, server_hostname, extra, server)
def _make_datagram_transport(self, sock, protocol,
address=None, waiter=None, extra=None):
return _SelectorDatagramTransport(self, sock, protocol,
address, waiter, extra)
def close(self):
if self._running:
raise RuntimeError("Cannot close a running event loop")
if self.is_closed():
return
self._close_self_pipe()
super().close()
if self._selector is not None:
self._selector.close()
self._selector = None
def _socketpair(self):
raise NotImplementedError
def _close_self_pipe(self):
self.remove_reader(self._ssock.fileno())
self._ssock.close()
self._ssock = None
self._csock.close()
self._csock = None
self._internal_fds -= 1
def _make_self_pipe(self):
# A self-socket, really. :-)
self._ssock, self._csock = self._socketpair()
self._ssock.setblocking(False)
self._csock.setblocking(False)
self._internal_fds += 1
self.add_reader(self._ssock.fileno(), self._read_from_self)
def _process_self_data(self, data):
pass
def _read_from_self(self):
while True:
try:
data = self._ssock.recv(4096)
if not data:
break
self._process_self_data(data)
except InterruptedError:
continue
except BlockingIOError:
break
def _write_to_self(self):
# This may be called from a different thread, possibly after
# _close_self_pipe() has been called or even while it is
# running. Guard for self._csock being None or closed. When
# a socket is closed, send() raises OSError (with errno set to
# EBADF, but let's not rely on the exact error code).
csock = self._csock
if csock is not None:
try:
csock.send(b'\0')
except OSError:
if self._debug:
logger.debug("Fail to write a null byte into the "
"self-pipe socket",
exc_info=True)
def _start_serving(self, protocol_factory, sock,
sslcontext=None, server=None):
self.add_reader(sock.fileno(), self._accept_connection,
protocol_factory, sock, sslcontext, server)
def _accept_connection(self, protocol_factory, sock,
sslcontext=None, server=None):
try:
conn, addr = sock.accept()
if self._debug:
logger.debug("%r got a new connection from %r: %r",
server, addr, conn)
conn.setblocking(False)
except (BlockingIOError, InterruptedError, ConnectionAbortedError):
pass # False alarm.
except OSError as exc:
# There's nowhere to send the error, so just log it.
# TODO: Someone will want an error handler for this.
if exc.errno in (errno.EMFILE, errno.ENFILE,
errno.ENOBUFS, errno.ENOMEM):
# Some platforms (e.g. Linux keep reporting the FD as
# ready, so we remove the read handler temporarily.
# We'll try again in a while.
self.call_exception_handler({
'message': 'socket.accept() out of system resource',
'exception': exc,
'socket': sock,
})
self.remove_reader(sock.fileno())
self.call_later(constants.ACCEPT_RETRY_DELAY,
self._start_serving,
protocol_factory, sock, sslcontext, server)
else:
raise # The event loop will catch, log and ignore it.
else:
if sslcontext:
self._make_ssl_transport(
conn, protocol_factory(), sslcontext, None,
server_side=True, extra={'peername': addr}, server=server)
else:
self._make_socket_transport(
conn, protocol_factory(), extra={'peername': addr},
server=server)
# It's now up to the protocol to handle the connection.
def add_reader(self, fd, callback, *args):
"""Add a reader callback."""
self._check_closed()
handle = events.Handle(callback, args, self)
try:
key = self._selector.get_key(fd)
except KeyError:
self._selector.register(fd, selectors.EVENT_READ,
(handle, None))
else:
mask, (reader, writer) = key.events, key.data
self._selector.modify(fd, mask | selectors.EVENT_READ,
(handle, writer))
if reader is not None:
reader.cancel()
def remove_reader(self, fd):
"""Remove a reader callback."""
if self.is_closed():
return False
try:
key = self._selector.get_key(fd)
except KeyError:
return False
else:
mask, (reader, writer) = key.events, key.data
mask &= ~selectors.EVENT_READ
if not mask:
self._selector.unregister(fd)
else:
self._selector.modify(fd, mask, (None, writer))
if reader is not None:
reader.cancel()
return True
else:
return False
def add_writer(self, fd, callback, *args):
"""Add a writer callback.."""
self._check_closed()
handle = events.Handle(callback, args, self)
try:
key = self._selector.get_key(fd)
except KeyError:
self._selector.register(fd, selectors.EVENT_WRITE,
(None, handle))
else:
mask, (reader, writer) = key.events, key.data
self._selector.modify(fd, mask | selectors.EVENT_WRITE,
(reader, handle))
if writer is not None:
writer.cancel()
def remove_writer(self, fd):
"""Remove a writer callback."""
if self.is_closed():
return False
try:
key = self._selector.get_key(fd)
except KeyError:
return False
else:
mask, (reader, writer) = key.events, key.data
# Remove both writer and connector.
mask &= ~selectors.EVENT_WRITE
if not mask:
self._selector.unregister(fd)
else:
self._selector.modify(fd, mask, (reader, None))
if writer is not None:
writer.cancel()
return True
else:
return False
def sock_recv(self, sock, n):
"""Receive data from the socket.
The return value is a bytes object representing the data received.
The maximum amount of data to be received at once is specified by
nbytes.
This method is a coroutine.
"""
if self.get_debug() and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
fut = futures.Future(loop=self)
self._sock_recv(fut, False, sock, n)
return fut
def _sock_recv(self, fut, registered, sock, n):
# _sock_recv() can add itself as an I/O callback if the operation can't
# be done immediately. Don't use it directly, call sock_recv().
fd = sock.fileno()
if registered:
# Remove the callback early. It should be rare that the
# selector says the fd is ready but the call still returns
# EAGAIN, and I am willing to take a hit in that case in
# order to simplify the common case.
self.remove_reader(fd)
if fut.cancelled():
return
try:
data = sock.recv(n)
except (BlockingIOError, InterruptedError):
self.add_reader(fd, self._sock_recv, fut, True, sock, n)
except Exception as exc:
fut.set_exception(exc)
else:
fut.set_result(data)
def sock_sendall(self, sock, data):
"""Send data to the socket.
The socket must be connected to a remote socket. This method continues
to send data from data until either all data has been sent or an
error occurs. None is returned on success. On error, an exception is
raised, and there is no way to determine how much data, if any, was
successfully processed by the receiving end of the connection.
This method is a coroutine.
"""
if self.get_debug() and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
fut = futures.Future(loop=self)
if data:
self._sock_sendall(fut, False, sock, data)
else:
fut.set_result(None)
return fut
def _sock_sendall(self, fut, registered, sock, data):
fd = sock.fileno()
if registered:
self.remove_writer(fd)
if fut.cancelled():
return
try:
n = sock.send(data)
except (BlockingIOError, InterruptedError):
n = 0
except Exception as exc:
fut.set_exception(exc)
return
if n == len(data):
fut.set_result(None)
else:
if n:
data = data[n:]
self.add_writer(fd, self._sock_sendall, fut, True, sock, data)
def sock_connect(self, sock, address):
"""Connect to a remote socket at address.
The address must be already resolved to avoid the trap of hanging the
entire event loop when the address requires doing a DNS lookup. For
example, it must be an IP address, not an hostname, for AF_INET and
AF_INET6 address families. Use getaddrinfo() to resolve the hostname
asynchronously.
This method is a coroutine.
"""
if self.get_debug() and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
fut = futures.Future(loop=self)
try:
base_events._check_resolved_address(sock, address)
except ValueError as err:
fut.set_exception(err)
else:
self._sock_connect(fut, sock, address)
return fut
def _sock_connect(self, fut, sock, address):
fd = sock.fileno()
try:
while True:
try:
sock.connect(address)
except InterruptedError:
continue
else:
break
except BlockingIOError:
fut.add_done_callback(functools.partial(self._sock_connect_done,
sock))
self.add_writer(fd, self._sock_connect_cb, fut, sock, address)
except Exception as exc:
fut.set_exception(exc)
else:
fut.set_result(None)
def _sock_connect_done(self, sock, fut):
self.remove_writer(sock.fileno())
def _sock_connect_cb(self, fut, sock, address):
if fut.cancelled():
return
try:
err = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
# Jump to any except clause below.
raise OSError(err, 'Connect call failed %s' % (address,))
except (BlockingIOError, InterruptedError):
# socket is still registered, the callback will be retried later
pass
except Exception as exc:
fut.set_exception(exc)
else:
fut.set_result(None)
def sock_accept(self, sock):
"""Accept a connection.
The socket must be bound to an address and listening for connections.
The return value is a pair (conn, address) where conn is a new socket
object usable to send and receive data on the connection, and address
is the address bound to the socket on the other end of the connection.
This method is a coroutine.
"""
if self.get_debug() and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
fut = futures.Future(loop=self)
self._sock_accept(fut, False, sock)
return fut
def _sock_accept(self, fut, registered, sock):
fd = sock.fileno()
if registered:
self.remove_reader(fd)
if fut.cancelled():
return
try:
conn, address = sock.accept()
conn.setblocking(False)
except (BlockingIOError, InterruptedError):
self.add_reader(fd, self._sock_accept, fut, True, sock)
except Exception as exc:
fut.set_exception(exc)
else:
fut.set_result((conn, address))
def _process_events(self, event_list):
for key, mask in event_list:
fileobj, (reader, writer) = key.fileobj, key.data
if mask & selectors.EVENT_READ and reader is not None:
if reader._cancelled:
self.remove_reader(fileobj)
else:
self._add_callback(reader)
if mask & selectors.EVENT_WRITE and writer is not None:
if writer._cancelled:
self.remove_writer(fileobj)
else:
self._add_callback(writer)
def _stop_serving(self, sock):
self.remove_reader(sock.fileno())
sock.close()
class _SelectorTransport(transports._FlowControlMixin,
transports.Transport):
max_size = 256 * 1024 # Buffer size passed to recv().
_buffer_factory = bytearray # Constructs initial value for self._buffer.
def __init__(self, loop, sock, protocol, extra, server=None):
super().__init__(extra, loop)
self._extra['socket'] = sock
self._extra['sockname'] = sock.getsockname()
if 'peername' not in self._extra:
try:
self._extra['peername'] = sock.getpeername()
except socket.error:
self._extra['peername'] = None
self._sock = sock
self._sock_fd = sock.fileno()
self._protocol = protocol
self._server = server
self._buffer = self._buffer_factory()
self._conn_lost = 0 # Set when call to connection_lost scheduled.
self._closing = False # Set when close() called.
if self._server is not None:
self._server._attach()
def __repr__(self):
info = [self.__class__.__name__]
if self._sock is None:
info.append('closed')
elif self._closing:
info.append('closing')
info.append('fd=%s' % self._sock_fd)
# test if the transport was closed
if self._loop is not None:
polling = _test_selector_event(self._loop._selector,
self._sock_fd, selectors.EVENT_READ)
if polling:
info.append('read=polling')
else:
info.append('read=idle')
polling = _test_selector_event(self._loop._selector,
self._sock_fd, selectors.EVENT_WRITE)
if polling:
state = 'polling'
else:
state = 'idle'
bufsize = self.get_write_buffer_size()
info.append('write=<%s, bufsize=%s>' % (state, bufsize))
return '<%s>' % ' '.join(info)
def abort(self):
self._force_close(None)
def close(self):
if self._closing:
return
self._closing = True
self._loop.remove_reader(self._sock_fd)
if not self._buffer:
self._conn_lost += 1
self._loop.call_soon(self._call_connection_lost, None)
def _fatal_error(self, exc, message='Fatal error on transport'):
# Should be called from exception handler only.
if isinstance(exc, (BrokenPipeError, ConnectionResetError)):
if self._loop.get_debug():
logger.debug("%r: %s", self, message, exc_info=True)
else:
self._loop.call_exception_handler({
'message': message,
'exception': exc,
'transport': self,
'protocol': self._protocol,
})
self._force_close(exc)
def _force_close(self, exc):
if self._conn_lost:
return
if self._buffer:
self._buffer.clear()
self._loop.remove_writer(self._sock_fd)
if not self._closing:
self._closing = True
self._loop.remove_reader(self._sock_fd)
self._conn_lost += 1
self._loop.call_soon(self._call_connection_lost, exc)
def _call_connection_lost(self, exc):
try:
self._protocol.connection_lost(exc)
finally:
self._sock.close()
self._sock = None
self._protocol = None
self._loop = None
server = self._server
if server is not None:
server._detach()
self._server = None
def get_write_buffer_size(self):
return len(self._buffer)
class _SelectorSocketTransport(_SelectorTransport):
def __init__(self, loop, sock, protocol, waiter=None,
extra=None, server=None):
super().__init__(loop, sock, protocol, extra, server)
self._eof = False
self._paused = False
self._loop.add_reader(self._sock_fd, self._read_ready)
self._loop.call_soon(self._protocol.connection_made, self)
if waiter is not None:
# wait until protocol.connection_made() has been called
self._loop.call_soon(waiter._set_result_unless_cancelled, None)
def pause_reading(self):
if self._closing:
raise RuntimeError('Cannot pause_reading() when closing')
if self._paused:
raise RuntimeError('Already paused')
self._paused = True
self._loop.remove_reader(self._sock_fd)
if self._loop.get_debug():
logger.debug("%r pauses reading", self)
def resume_reading(self):
if not self._paused:
raise RuntimeError('Not paused')
self._paused = False
if self._closing:
return
self._loop.add_reader(self._sock_fd, self._read_ready)
if self._loop.get_debug():
logger.debug("%r resumes reading", self)
def _read_ready(self):
try:
data = self._sock.recv(self.max_size)
except (BlockingIOError, InterruptedError):
pass
except Exception as exc:
self._fatal_error(exc, 'Fatal read error on socket transport')
else:
if data:
self._protocol.data_received(data)
else:
if self._loop.get_debug():
logger.debug("%r received EOF", self)
keep_open = self._protocol.eof_received()
if keep_open:
# We're keeping the connection open so the
# protocol can write more, but we still can't
# receive more, so remove the reader callback.
self._loop.remove_reader(self._sock_fd)
else:
self.close()
def write(self, data):
if not isinstance(data, (bytes, bytearray, memoryview)):
raise TypeError('data argument must be byte-ish (%r)',
type(data))
if self._eof:
raise RuntimeError('Cannot call write() after write_eof()')
if not data:
return
if self._conn_lost:
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
logger.warning('socket.send() raised exception.')
self._conn_lost += 1
return
if not self._buffer:
# Optimization: try to send now.
try:
n = self._sock.send(data)
except (BlockingIOError, InterruptedError):
pass
except Exception as exc:
self._fatal_error(exc, 'Fatal write error on socket transport')
return
else:
data = data[n:]
if not data:
return
# Not all was written; register write handler.
self._loop.add_writer(self._sock_fd, self._write_ready)
# Add it to the buffer.
self._buffer.extend(data)
self._maybe_pause_protocol()
def _write_ready(self):
assert self._buffer, 'Data should not be empty'
try:
n = self._sock.send(self._buffer)
except (BlockingIOError, InterruptedError):
pass
except Exception as exc:
self._loop.remove_writer(self._sock_fd)
self._buffer.clear()
self._fatal_error(exc, 'Fatal write error on socket transport')
else:
if n:
del self._buffer[:n]
self._maybe_resume_protocol() # May append to buffer.
if not self._buffer:
self._loop.remove_writer(self._sock_fd)
if self._closing:
self._call_connection_lost(None)
elif self._eof:
self._sock.shutdown(socket.SHUT_WR)
def write_eof(self):
if self._eof:
return
self._eof = True
if not self._buffer:
self._sock.shutdown(socket.SHUT_WR)
def can_write_eof(self):
return True
class _SelectorSslTransport(_SelectorTransport):
_buffer_factory = bytearray
def __init__(self, loop, rawsock, protocol, sslcontext, waiter=None,
server_side=False, server_hostname=None,
extra=None, server=None):
if ssl is None:
raise RuntimeError('stdlib ssl module not available')
if server_side:
if not sslcontext:
raise ValueError('Server side ssl needs a valid SSLContext')
else:
if not sslcontext:
# Client side may pass ssl=True to use a default
# context; in that case the sslcontext passed is None.
# The default is secure for client connections.
if hasattr(ssl, 'create_default_context'):
# Python 3.4+: use up-to-date strong settings.
sslcontext = ssl.create_default_context()
if not server_hostname:
sslcontext.check_hostname = False
else:
# Fallback for Python 3.3.
sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext.options |= ssl.OP_NO_SSLv2
sslcontext.options |= ssl.OP_NO_SSLv3
sslcontext.set_default_verify_paths()
sslcontext.verify_mode = ssl.CERT_REQUIRED
wrap_kwargs = {
'server_side': server_side,
'do_handshake_on_connect': False,
}
if server_hostname and not server_side:
wrap_kwargs['server_hostname'] = server_hostname
sslsock = sslcontext.wrap_socket(rawsock, **wrap_kwargs)
super().__init__(loop, sslsock, protocol, extra, server)
self._server_hostname = server_hostname
self._waiter = waiter
self._sslcontext = sslcontext
self._paused = False
# SSL-specific extra info. (peercert is set later)
self._extra.update(sslcontext=sslcontext)
if self._loop.get_debug():
logger.debug("%r starts SSL handshake", self)
start_time = self._loop.time()
else:
start_time = None
self._on_handshake(start_time)
def _on_handshake(self, start_time):
try:
self._sock.do_handshake()
except ssl.SSLWantReadError:
self._loop.add_reader(self._sock_fd,
self._on_handshake, start_time)
return
except ssl.SSLWantWriteError:
self._loop.add_writer(self._sock_fd,
self._on_handshake, start_time)
return
except BaseException as exc:
if self._loop.get_debug():
logger.warning("%r: SSL handshake failed",
self, exc_info=True)
self._loop.remove_reader(self._sock_fd)
self._loop.remove_writer(self._sock_fd)
self._sock.close()
if self._waiter is not None:
self._waiter.set_exception(exc)
if isinstance(exc, Exception):
return
else:
raise
self._loop.remove_reader(self._sock_fd)
self._loop.remove_writer(self._sock_fd)
peercert = self._sock.getpeercert()
if not hasattr(self._sslcontext, 'check_hostname'):
# Verify hostname if requested, Python 3.4+ uses check_hostname
# and checks the hostname in do_handshake()
if (self._server_hostname and
self._sslcontext.verify_mode != ssl.CERT_NONE):
try:
ssl.match_hostname(peercert, self._server_hostname)
except Exception as exc:
if self._loop.get_debug():
logger.warning("%r: SSL handshake failed "
"on matching the hostname",
self, exc_info=True)
self._sock.close()
if self._waiter is not None:
self._waiter.set_exception(exc)
return
# Add extra info that becomes available after handshake.
self._extra.update(peercert=peercert,
cipher=self._sock.cipher(),
compression=self._sock.compression(),
)
self._read_wants_write = False
self._write_wants_read = False
self._loop.add_reader(self._sock_fd, self._read_ready)
self._loop.call_soon(self._protocol.connection_made, self)
if self._waiter is not None:
# wait until protocol.connection_made() has been called
self._loop.call_soon(self._waiter._set_result_unless_cancelled,
None)
if self._loop.get_debug():
dt = self._loop.time() - start_time
logger.debug("%r: SSL handshake took %.1f ms", self, dt * 1e3)
def pause_reading(self):
# XXX This is a bit icky, given the comment at the top of
# _read_ready(). Is it possible to evoke a deadlock? I don't
# know, although it doesn't look like it; write() will still
# accept more data for the buffer and eventually the app will
# call resume_reading() again, and things will flow again.
if self._closing:
raise RuntimeError('Cannot pause_reading() when closing')
if self._paused:
raise RuntimeError('Already paused')
self._paused = True
self._loop.remove_reader(self._sock_fd)
if self._loop.get_debug():
logger.debug("%r pauses reading", self)
def resume_reading(self):
if not self._paused:
raise RuntimeError('Not paused')
self._paused = False
if self._closing:
return
self._loop.add_reader(self._sock_fd, self._read_ready)
if self._loop.get_debug():
logger.debug("%r resumes reading", self)
def _read_ready(self):
if self._write_wants_read:
self._write_wants_read = False
self._write_ready()
if self._buffer:
self._loop.add_writer(self._sock_fd, self._write_ready)
try:
data = self._sock.recv(self.max_size)
except (BlockingIOError, InterruptedError, ssl.SSLWantReadError):
pass
except ssl.SSLWantWriteError:
self._read_wants_write = True
self._loop.remove_reader(self._sock_fd)
self._loop.add_writer(self._sock_fd, self._write_ready)
except Exception as exc:
self._fatal_error(exc, 'Fatal read error on SSL transport')
else:
if data:
self._protocol.data_received(data)
else:
try:
if self._loop.get_debug():
logger.debug("%r received EOF", self)
keep_open = self._protocol.eof_received()
if keep_open:
logger.warning('returning true from eof_received() '
'has no effect when using ssl')
finally:
self.close()
def _write_ready(self):
if self._read_wants_write:
self._read_wants_write = False
self._read_ready()
if not (self._paused or self._closing):
self._loop.add_reader(self._sock_fd, self._read_ready)
if self._buffer:
try:
n = self._sock.send(self._buffer)
except (BlockingIOError, InterruptedError, ssl.SSLWantWriteError):
n = 0
except ssl.SSLWantReadError:
n = 0
self._loop.remove_writer(self._sock_fd)
self._write_wants_read = True
except Exception as exc:
self._loop.remove_writer(self._sock_fd)
self._buffer.clear()
self._fatal_error(exc, 'Fatal write error on SSL transport')
return
if n:
del self._buffer[:n]
self._maybe_resume_protocol() # May append to buffer.
if not self._buffer:
self._loop.remove_writer(self._sock_fd)
if self._closing:
self._call_connection_lost(None)
def write(self, data):
if not isinstance(data, (bytes, bytearray, memoryview)):
raise TypeError('data argument must be byte-ish (%r)',
type(data))
if not data:
return
if self._conn_lost:
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
logger.warning('socket.send() raised exception.')
self._conn_lost += 1
return
if not self._buffer:
self._loop.add_writer(self._sock_fd, self._write_ready)
# Add it to the buffer.
self._buffer.extend(data)
self._maybe_pause_protocol()
def can_write_eof(self):
return False
class _SelectorDatagramTransport(_SelectorTransport):
_buffer_factory = collections.deque
def __init__(self, loop, sock, protocol, address=None,
waiter=None, extra=None):
super().__init__(loop, sock, protocol, extra)
self._address = address
self._loop.add_reader(self._sock_fd, self._read_ready)
self._loop.call_soon(self._protocol.connection_made, self)
if waiter is not None:
# wait until protocol.connection_made() has been called
self._loop.call_soon(waiter._set_result_unless_cancelled, None)
def get_write_buffer_size(self):
return sum(len(data) for data, _ in self._buffer)
def _read_ready(self):
try:
data, addr = self._sock.recvfrom(self.max_size)
except (BlockingIOError, InterruptedError):
pass
except OSError as exc:
self._protocol.error_received(exc)
except Exception as exc:
self._fatal_error(exc, 'Fatal read error on datagram transport')
else:
self._protocol.datagram_received(data, addr)
def sendto(self, data, addr=None):
if not isinstance(data, (bytes, bytearray, memoryview)):
raise TypeError('data argument must be byte-ish (%r)',
type(data))
if not data:
return
if self._address and addr not in (None, self._address):
raise ValueError('Invalid address: must be None or %s' %
(self._address,))
if self._conn_lost and self._address:
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
logger.warning('socket.send() raised exception.')
self._conn_lost += 1
return
if not self._buffer:
# Attempt to send it right away first.
try:
if self._address:
self._sock.send(data)
else:
self._sock.sendto(data, addr)
return
except (BlockingIOError, InterruptedError):
self._loop.add_writer(self._sock_fd, self._sendto_ready)
except OSError as exc:
self._protocol.error_received(exc)
return
except Exception as exc:
self._fatal_error(exc,
'Fatal write error on datagram transport')
return
# Ensure that what we buffer is immutable.
self._buffer.append((bytes(data), addr))
self._maybe_pause_protocol()
def _sendto_ready(self):
while self._buffer:
data, addr = self._buffer.popleft()
try:
if self._address:
self._sock.send(data)
else:
self._sock.sendto(data, addr)
except (BlockingIOError, InterruptedError):
self._buffer.appendleft((data, addr)) # Try again later.
break
except OSError as exc:
self._protocol.error_received(exc)
return
except Exception as exc:
self._fatal_error(exc,
'Fatal write error on datagram transport')
return
self._maybe_resume_protocol() # May append to buffer.
if not self._buffer:
self._loop.remove_writer(self._sock_fd)
if self._closing:
self._call_connection_lost(None)
|
py | 7df8b8b7c2f2af4408fb484848b3ce3a87d4683c | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : Separation_Auditing
Case Name : 关闭数据库对象TEXT SEARCH的CREATE、DROP、ALTER操作审计功能,
audit_system_object=65535
Description :
1.设置gs_guc reload -N all -I all -c "audit_system_object=65535"
2.登录数据库,创建TEXT SEARCH对象
3.修改TEXT SEARCH对象
4.删除TEXT SEARCH对象
5.登录数据库,查看审计日志SELECT * FROM pg_query_audit('$start_time',
'$end_time');时间设在最接近登录数据
库的时间
Expect :
1.设置成功
2.创建成功
3.修改成功
4.删除成功
5.未查询到创建、删除TEXT SEARCH信息
History :
"""
import time
import unittest
from yat.test import Node
from yat.test import macro
from testcase.utils.Common import Common
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Logger import Logger
class Security(unittest.TestCase):
def setUp(self):
self.logger = Logger()
self.logger.info(
'====Opengauss_Function_Security_Auditing_Case0066 start====')
self.common = Common()
self.sh_primy = CommonSH('PrimaryDbUser')
self.userNode = Node('PrimaryDbUser')
self.DB_ENV_PATH = macro.DB_ENV_PATH
def test_policy(self):
self.logger.info('---------设置参数audit_system_object=65535-------')
excute_cmd0 = f'source {self.DB_ENV_PATH};' \
f'gs_guc reload -N all -I all -c ' \
f'"audit_system_object=65535"'
msg0 = self.userNode.sh(excute_cmd0).result()
self.logger.info(msg0)
self.logger.info('--------------创建TEXT SEARCH对象---------------')
start_time_msg = self.sh_primy.execut_db_sql("SELECT sysdate;")
start_time = start_time_msg.splitlines()[2].strip()
time.sleep(3)
sql_cmd1 = 'CREATE TEXT SEARCH CONFIGURATION ngram2 (parser=ngram) ' \
'WITH (gram_size = 2,grapsymbol_ignore = false);' \
'ALTER TEXT SEARCH CONFIGURATION ngram2 ADD MAPPING FOR ' \
'multisymbol WITH simple;' \
'DROP TEXT SEARCH CONFIGURATION ngram2;'
msg2 = self.sh_primy.execut_db_sql(sql_cmd1)
self.logger.info(msg2)
self.common.equal_sql_mdg(msg2, 'CREATE TEXT SEARCH CONFIGURATION',
'ALTER TEXT SEARCH CONFIGURATION',
'DROP TEXT SEARCH CONFIGURATION')
time.sleep(3)
end_time_msg = self.sh_primy.execut_db_sql('SELECT sysdate;')
end_time = end_time_msg.splitlines()[2].strip()
sql_cmd2 = f'select * from pg_query_audit(\'{start_time}\',\
\'{end_time}\');'
msg2 = self.sh_primy.execut_db_sql(sql_cmd2)
self.logger.info(msg2)
self.assertFalse(msg2.find('CREATE TEXT SEARCH CONFIGURATION ngram2 '
'(parser=ngram) WITH (gram_size = 2, '
'grapsymbol_ignore = false)') > -1)
self.assertFalse(msg2.find('ALTER TEXT SEARCH CONFIGURATION ngram2 '
'ADD MAPPING FOR multisymbol WITH') > -1)
self.assertFalse(
msg2.find('DROP TEXT SEARCH CONFIGURATION ngram2') > -1)
def tearDown(self):
self.logger.info('-----------恢复配置-----------')
excute_cmd1 = f'source {self.DB_ENV_PATH};' \
f'gs_guc reload -N all -I all -c ' \
f'"audit_system_object=12295"'
msg1 = self.userNode.sh(excute_cmd1).result()
self.logger.info(msg1)
self.logger.info(
'====Opengauss_Function_Security_Auditing_Case0066 finish====')
|
py | 7df8b9108bd79ddf0b53ec9f7ba8bf4bb6e5bdee | # -*- coding: utf-8 -*-
from __future__ import print_function
import io
import random
import sys
try:
import cPickle as pickle
except:
import pickle
from passage.preprocessing import Tokenizer
from passage.layers import Embedding, GatedRecurrent, Dense
from passage.models import RNN
from passage.utils import save, load
random.seed(0)
textfile, labelfile, embedding_size, gru_size, num_epochs = sys.argv[1:]
#textfile = 'cwi_inputs.txt'
#labelfile = 'cwi_labels.txt'
train_text, train_labels = [], []
with io.open(textfile, 'r', encoding='utf8') as txtfin, \
io.open(labelfile, 'r') as labelfin:
for text, label in zip(txtfin, labelfin):
train_text.append(text.strip())
train_labels.append(int(label.strip()))
tokenizer = Tokenizer()
train_tokens = tokenizer.fit_transform(train_text)
layers = [Embedding(size=embedding_size, n_features=tokenizer.n_features),
GatedRecurrent(size=gru_size),
Dense(size=1, activation='sigmoid')]
model = RNN(layers=layers, cost='BinaryCrossEntropy')
model.fit(train_tokens, train_labels, n_epochs=int(num_epochs))
modelfile_name = 'stubborn_model.gridsearch.embedding{}.gru{}.epoch{}'.format(embedding_size, gru_size, num_epochs)
save(model, modelfile_name+ '.pkl')
pickle.dump(tokenizer, open(textfile + '-tokenizer.pkl', 'wb'))
|
py | 7df8b91dad762b43f7d4074c20df58b78cc4113e | from django.contrib import admin
from pradnya import models
# Register your models here.
admin.site.register(models.Questions)
admin.site.register(models.user)
admin.site.register(models.submissions)
|
py | 7df8b96c01043d85f90be9e6a694f4843dc52709 | """Feature engineers the NYC taxi dataset."""
import glob
import logging
import os
import subprocess
import sys
subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'sagemaker'])
from zipfile import ZipFile
# from time import gmtime, strftime
import socket
import shutil
import json
import time
import argparse
import boto3
import uuid
# Install geopandas dependency before including pandas
subprocess.check_call([sys.executable, "-m", "pip", "install", "geopandas==0.9.0"])
import pandas as pd # noqa: E402
import geopandas as gpd # noqa: E402
from sklearn.model_selection import train_test_split # noqa: E402
import sagemaker
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
def parse_args() -> None:
parser = argparse.ArgumentParser()
parser.add_argument('--base_dir', type=str, default="/opt/ml/processing")
args, _ = parser.parse_known_args()
return args
def extract_zones(zones_file: str, zones_dir: str):
logger.info(f"Extracting zone file: {zones_file}")
with ZipFile(zones_file, "r") as zip:
zip.extractall(zones_dir)
def load_zones(zones_dir: str):
logging.info(f"Loading zones from {zones_dir}")
# Load the shape file and get the geometry and lat/lon
zone_df = gpd.read_file(os.path.join(zones_dir, "taxi_zones.shp"))
# Get centroids as EPSG code of 3310 to measure distance
zone_df["centroid"] = zone_df.geometry.centroid.to_crs(epsg=3310)
# Convert cordinates to the WSG84 lat/long CRS has a EPSG code of 4326.
zone_df["latitude"] = zone_df.centroid.to_crs(epsg=4326).x
zone_df["longitude"] = zone_df.centroid.to_crs(epsg=4326).y
return zone_df
def load_data(file_list: list):
# Define dates, and columns to use
use_cols = [
"fare_amount",
"lpep_pickup_datetime",
"lpep_dropoff_datetime",
"passenger_count",
"PULocationID",
"DOLocationID",
]
# Concat input files with select columns
dfs = []
for file in file_list:
dfs.append(pd.read_csv(file, usecols=use_cols))
return pd.concat(dfs, ignore_index=True)
def enrich_data(trip_df: pd.DataFrame, zone_df: pd.DataFrame):
# Join trip DF to zones for poth pickup and drop off locations
trip_df = gpd.GeoDataFrame(
trip_df.join(zone_df, on="PULocationID").join(
zone_df, on="DOLocationID", rsuffix="_DO", lsuffix="_PU"
)
)
trip_df["geo_distance"] = (
trip_df["centroid_PU"].distance(trip_df["centroid_DO"]) / 1000
)
# Add date parts
trip_df["lpep_pickup_datetime"] = pd.to_datetime(trip_df["lpep_pickup_datetime"])
trip_df["hour"] = trip_df["lpep_pickup_datetime"].dt.hour
trip_df["weekday"] = trip_df["lpep_pickup_datetime"].dt.weekday
trip_df["month"] = trip_df["lpep_pickup_datetime"].dt.month
# Get calculated duration in minutes
trip_df["lpep_dropoff_datetime"] = pd.to_datetime(trip_df["lpep_dropoff_datetime"])
trip_df["duration_minutes"] = (
trip_df["lpep_dropoff_datetime"] - trip_df["lpep_pickup_datetime"]
).dt.seconds / 60
# Rename and filter cols
trip_df = trip_df.rename(
columns={
"latitude_PU": "pickup_latitude",
"longitude_PU": "pickup_longitude",
"latitude_DO": "dropoff_latitude",
"longitude_DO": "dropoff_longitude",
}
)
trip_df['FS_ID'] = trip_df.index + 1000
current_time_sec = int(round(time.time()))
trip_df["FS_time"] = pd.Series([current_time_sec]*len(trip_df), dtype="float64")
return trip_df
def clean_data(trip_df: pd.DataFrame):
# Remove outliers
trip_df = trip_df[
(trip_df.fare_amount > 0)
& (trip_df.fare_amount < 200)
& (trip_df.passenger_count > 0)
& (trip_df.duration_minutes > 0)
& (trip_df.duration_minutes < 120)
& (trip_df.geo_distance > 0)
& (trip_df.geo_distance < 121)
].dropna()
# Filter columns
cols = [
"fare_amount",
"passenger_count",
"pickup_latitude",
"pickup_longitude",
"dropoff_latitude",
"dropoff_longitude",
"geo_distance",
"hour",
"weekday",
"month",
]
cols_fg = [
"fare_amount",
"passenger_count",
"pickup_latitude",
"pickup_longitude",
"dropoff_latitude",
"dropoff_longitude",
"geo_distance",
"hour",
"weekday",
"month",
"FS_ID",
"FS_time"
]
return trip_df[cols], trip_df[cols_fg]
def save_files(base_dir: str, data_df: pd.DataFrame, data_fg: pd.DataFrame,
val_size=0.2, test_size=0.05, current_host=None):
logger.info(f"Splitting {len(data_df)} rows of data into train, val, test.")
train_df, val_df = train_test_split(data_df, test_size=val_size, random_state=42)
val_df, test_df = train_test_split(val_df, test_size=test_size, random_state=42)
logger.info(f"Writing out datasets to {base_dir}")
tmp_id = uuid.uuid4().hex[:8]
train_df.to_csv(f"{base_dir}/train/train_{current_host}_{tmp_id}.csv", header=False, index=False)
val_df.to_csv(f"{base_dir}/validation/validation_{current_host}_{tmp_id}.csv", header=False, index=False)
# Save test data without header
test_df.to_csv(f"{base_dir}/test/test_{current_host}_{tmp_id}.csv", header=False, index=False)
return
def _read_json(path): # type: (str) -> dict
"""Read a JSON file.
Args:
path (str): Path to the file.
Returns:
(dict[object, object]): A dictionary representation of the JSON file.
"""
with open(path, "r") as f:
return json.load(f)
def main(base_dir: str, args: argparse.Namespace):
# Input data files
input_dir = os.path.join(base_dir, "input/data")
input_file_list = glob.glob(f"{input_dir}/*.csv")
logger.info(f"Input file list: {input_file_list}")
hosts = _read_json("/opt/ml/config/resourceconfig.json")
logger.info(hosts)
current_host = hosts["current_host"]
logger.info(current_host)
if len(input_file_list) == 0:
raise Exception(f"No input files found in {input_dir}")
# Input zones file
zones_dir = os.path.join(base_dir, "input/zones")
zones_file = os.path.join(zones_dir, "taxi_zones.zip")
if not os.path.exists(zones_file):
raise Exception(f"Zones file {zones_file} does not exist")
# Extract and load taxi zones geopandas dataframe
extract_zones(zones_file, zones_dir)
zone_df = load_zones(zones_dir)
# Load input files
data_df = load_data(input_file_list)
data_df = enrich_data(data_df, zone_df)
data_df, data_fg = clean_data(data_df)
return save_files(base_dir, data_df, data_fg, current_host=current_host)
if __name__ == "__main__":
logger.info("Starting preprocessing.")
args = parse_args()
base_dir = args.base_dir
main(base_dir, args)
logger.info("Done") |
py | 7df8b976dc539772638ff8e29fe4b30167f3ac7b | #!/usr/bin/python
# -*- coding: utf-8 -*-
##############################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
##############################################################################
import calendar
import json
import logging
import os
import sys
import time
import traceback
from datetime import datetime
import boto3
def lambda_handler(event, context):
"""
This function triggers from an S3 event source when a manifest file
for a new product update is put in the ManifestBucket
"""
try:
global log_level
log_level = str(os.environ.get("LOG_LEVEL")).upper()
valid_log_levels = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]
if log_level not in valid_log_levels:
log_level = "ERROR"
logging.getLogger().setLevel(log_level)
STATE_MACHINE_ARN = os.environ["STATE_MACHINE_ARN"]
assets_per_revision = int(os.environ.get("ASSETS_PER_REVISION", "10000"))
logging.debug(f"{event=}")
bucket = event["Records"][0]["s3"]["bucket"]["name"]
key = event["Records"][0]["s3"]["object"]["key"]
logging.info(f"validating the manifest file from s3://{bucket}/{key}")
s3 = boto3.client("s3")
obj = s3.get_object(Bucket=bucket, Key=key)
manifest_dict_flat = json.loads(obj["Body"].read())
product_id = manifest_dict_flat["product_id"]
dataset_id = manifest_dict_flat["dataset_id"]
intial_asset_list = manifest_dict_flat["asset_list"]
asset_list = []
try:
for entry in intial_asset_list:
asset_bucket = entry["Bucket"]
prefix = entry["Key"]
if prefix.endswith("/"):
paginator = s3.get_paginator("list_objects_v2")
response_iterator = paginator.paginate(
Bucket=asset_bucket,
Prefix=prefix,
PaginationConfig={"PageSize": 1000},
)
for page in response_iterator:
logging.info(f"Finding keys in prefix={prefix} and page={page}")
if "Contents" not in page:
raise ValueError(
"Failed - no resources found in the prefix"
)
files = page["Contents"]
for file in files:
if file["Size"] != 0:
logging.info(file["Key"])
asset_list.append(
{"Bucket": asset_bucket, "Key": file["Key"]}
)
logging.info(f"Adding key to manifest: {file['Key']}")
else:
asset_list.append({"Bucket": asset_bucket, "Key": prefix})
except Exception as error:
logging.error(f"lambda_handler error: {error}")
logging.error(f"lambda_handler trace: {traceback.format_exc()}")
result = {"Error": f"{error=}"}
return json.dumps(result)
# Update ends
num_assets = len(asset_list)
if not product_id or not dataset_id or not asset_list:
error_message = (
"Invalid manifest file; missing required fields from manifest file: product_id, "
"dataset_id, asset_list "
)
logging.error(error_message)
sys.exit(error_message)
logging.debug(
f"{bucket=}\n{key=}\n{product_id=}\n{dataset_id=}\n{num_assets=}\n{assets_per_revision=}"
)
asset_list_nested = []
logging.info(
"chunk into lists of 10k assets to account for ADX limit of 10k assets per revision"
)
asset_lists_10k = [
asset_list[i: i + assets_per_revision]
for i in range(0, len(asset_list), assets_per_revision)
]
for revision_index, assets_10k in enumerate(asset_lists_10k):
logging.info(
"chunk into lists of 100 assets to account for ADX limit of 100 assets per job"
)
asset_lists_100 = [
assets_10k[i: i + 100] for i in range(0, len(assets_10k), 100)
]
asset_list_nested.append(asset_lists_100)
nested_manifest_file_key = key.split(".")[0] + ".manifest"
manifest_dict = {
"product_id": product_id,
"dataset_id": dataset_id,
"asset_list_nested": asset_list_nested,
}
s3 = boto3.client("s3")
data = json.dumps(manifest_dict).encode("utf-8")
response = s3.put_object(Body=data, Bucket=bucket, Key=nested_manifest_file_key)
EXECUTION_NAME = f"Execution-ADX-PublishingWorkflow-SFN@{str(calendar.timegm(time.gmtime()))}"
INPUT = json.dumps({"Bucket": bucket, "Key": nested_manifest_file_key})
sfn = boto3.client("stepfunctions")
logging.debug(f"{EXECUTION_NAME=}")
sfn_response = sfn.start_execution(
stateMachineArn=STATE_MACHINE_ARN, name=EXECUTION_NAME, input=INPUT
)
logging.debug(f"{INPUT=}")
logging.debug(f"{sfn_response=}")
metrics = {
"Version": os.getenv("Version"),
"TimeStamp": datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S.%f"),
"Bucket": bucket,
"Key": nested_manifest_file_key,
"StateMachineARN": STATE_MACHINE_ARN,
"ExecutionName": EXECUTION_NAME,
}
logging.info(f"Metrics:{metrics}")
except Exception as error:
logging.error(f"lambda_handler error: {error}")
logging.error(f"lambda_handler trace: {traceback.format_exc()}")
result = {"Error": f"{error=}"}
return json.dumps(result)
return {"Message": "State machine started"}
|
py | 7df8ba4e1eef98cfdb58f46fb906185bffc31ab6 | #!/usr/bin/env python2.7
import sys
from lib import shellhelpers as shell
def _locate_ohai():
return 'ohai'
if __name__ == '__main__':
# this is a workaround since we use run-remote and it
# passes missing command as None in argv.
command = ([_locate_ohai()] + [i for i in sys.argv[1:] if i != 'None'])
sys.exit(shell.shell_out(command))
|
py | 7df8bc1468c8f0001f34371d7de067cb65693189 | #!/usr/bin/env python3
import argparse
import hashlib
import os
import shutil
import sys
import tempfile
sys.path.append(
os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + "/../.."))
from lib.util import download, rm_rf, store_artifact, safe_mkdir
DIST_URL = 'https://electronjs.org/headers/'
def main():
args = parse_args()
dist_url = args.dist_url
if dist_url[-1] != "/":
dist_url += "/"
url = dist_url + args.version + '/'
directory, files = download_files(url, get_files_list(args.version))
checksums = [
create_checksum('sha1', directory, 'SHASUMS.txt', files),
create_checksum('sha256', directory, 'SHASUMS256.txt', files)
]
if args.target_dir is None:
store_artifact(directory, 'atom-shell/dist/{0}'.format(args.version),
checksums)
else:
copy_files(checksums, args.target_dir)
rm_rf(directory)
def parse_args():
parser = argparse.ArgumentParser(description='upload sumsha file')
parser.add_argument('-v', '--version', help='Specify the version',
required=True)
parser.add_argument('-u', '--dist-url',
help='Specify the dist url for downloading',
required=False, default=DIST_URL)
parser.add_argument('-t', '--target-dir',
help='Specify target dir of checksums',
required=False)
return parser.parse_args()
def get_files_list(version):
return [
{ "filename": 'node-{0}.tar.gz'.format(version), "required": True },
{ "filename": 'node-{0}-headers.tar.gz'.format(version), "required": True },
{ "filename": 'iojs-{0}.tar.gz'.format(version), "required": True },
{ "filename": 'iojs-{0}-headers.tar.gz'.format(version), "required": True },
{ "filename": 'node.lib', "required": False },
{ "filename": 'x64/node.lib', "required": False },
{ "filename": 'win-x86/iojs.lib', "required": False },
{ "filename": 'win-x64/iojs.lib', "required": False },
{ "filename": 'win-x86/node.lib', "required": False },
{ "filename": 'win-x64/node.lib', "required": False },
{ "filename": 'arm64/node.lib', "required": False },
{ "filename": 'win-arm64/iojs.lib', "required": False },
{ "filename": 'win-arm64/node.lib', "required": False }
]
def download_files(url, files):
directory = tempfile.mkdtemp(prefix='electron-tmp')
result = []
for optional_f in files:
required = optional_f['required']
f = optional_f['filename']
try:
result.append(download(f, url + f, os.path.join(directory, f)))
except Exception:
if required:
raise
return directory, result
def create_checksum(algorithm, directory, filename, files):
lines = []
for path in files:
h = hashlib.new(algorithm)
with open(path, 'rb') as f:
h.update(f.read())
lines.append(h.hexdigest() + ' ' + os.path.relpath(path, directory))
checksum_file = os.path.join(directory, filename)
with open(checksum_file, 'w') as f:
f.write('\n'.join(lines) + '\n')
return checksum_file
def copy_files(source_files, output_dir):
for source_file in source_files:
output_path = os.path.join(output_dir, os.path.basename(source_file))
safe_mkdir(os.path.dirname(output_path))
shutil.copy2(source_file, output_path)
if __name__ == '__main__':
sys.exit(main())
|
py | 7df8bc54cd8a4094c7d8be869cc5a75987336a11 | """Tests the xonfig command.
Actually, just a down payment on a full test.
Currently exercises only these options:
- xonfig info
- xonfig jupyter_kernel
"""
import os
import re
import sys
import json
import pytest # noqa F401
import io
from xonsh.tools import ON_WINDOWS
from xonsh.xonfig import xonfig_main
from xonsh.webconfig import main as web_main
def test_xonfg_help(capsys, xession):
"""verify can invoke it, and usage knows about all the options"""
with pytest.raises(SystemExit):
xonfig_main(["-h"])
capout = capsys.readouterr().out
pat = re.compile(r"^usage:\s*xonfig[^\n]*{([\w,-]+)}", re.MULTILINE)
m = pat.match(capout)
assert m[1]
verbs = {v.strip().lower() for v in m[1].split(",")}
assert verbs == {
"jupyter-kernel",
"info",
"styles",
"wizard",
"web",
"colors",
"tutorial",
}
@pytest.mark.parametrize(
"args",
[
([]),
(
[
"info",
]
),
], # NOQA E231
)
def test_xonfig_info(args, xession):
"""info works, and reports no jupyter if none in environment"""
capout = xonfig_main(args)
assert capout.startswith("+---")
assert capout.endswith("---+\n")
pat = re.compile(r".*on jupyter\s+\|\s+false", re.MULTILINE | re.IGNORECASE)
m = pat.search(capout)
assert m
def strip_sep(path: str) -> str:
"""remove all path separators from argument"""
retval = path.replace(os.sep, "")
if ON_WINDOWS:
retval = retval.replace(os.altsep, "")
return retval
@pytest.fixture
def fake_lib(monkeypatch):
"""insulate sys.modules from hacking test modules may do with it.
Apparently, monkeypath.syspath_prepend() doesn't flush
imported modules, so they're still visible in other test cases.
"""
# get absolute path to fake_lib, assuming this test file itself is in same folder.
fake_lib_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "fake_lib"))
monkeypatch.syspath_prepend(fake_lib_path)
yield
# monkeypatch will have restored sys.path, but it's up to us to purge the imported modules
fake_packages = tuple(f.name for f in os.scandir(fake_lib_path) if os.path.isdir(f))
modules_to_delete = []
for (m, mod) in sys.modules.items():
if m.startswith(fake_packages):
if mod.__file__.startswith(fake_lib_path):
modules_to_delete.append(m) # can't modify collection while iterating
for m in modules_to_delete:
del sys.modules[m]
def test_xonfig_kernel_with_jupyter(monkeypatch, capsys, fake_lib, xession):
cap_args = None
cap_spec = None
import jupyter_client.kernelspec # from fake_lib, hopefully.
def mock_install_kernel_spec(*args, **kwargs): # arg[0] is self
nonlocal cap_args
nonlocal cap_spec
cap_args = dict(args=args, kw=kwargs)
spec_file = os.path.join(args[1], "kernel.json")
cap_spec = json.load(open(spec_file))
def mock_get_kernel_spec(*args, **kwargs):
raise jupyter_client.kernelspec.NoSuchKernel
monkeypatch.setattr(
jupyter_client.kernelspec.KernelSpecManager,
"install_kernel_spec",
value=mock_install_kernel_spec,
raising=False,
)
monkeypatch.setattr(
jupyter_client.kernelspec.KernelSpecManager,
"get_kernel_spec",
value=mock_get_kernel_spec,
raising=False,
)
rc = xonfig_main(["jupyter-kernel"])
assert rc == 0
capout = capsys.readouterr().out
assert "Jupyter" in capout
assert "xonsh" == cap_args["args"][2]
assert cap_spec
assert cap_spec["language"] == "xonsh"
assert strip_sep(cap_spec["argv"][0]) == strip_sep(sys.executable)
assert cap_spec["argv"][2] == "xonsh.jupyter_kernel"
def test_xonfig_kernel_no_jupyter(capsys, xession):
with pytest.raises(ImportError):
rc = xonfig_main(["jupyter-kernel"]) # noqa F841
@pytest.fixture
def request_factory():
class MockSocket:
def getsockname(self):
return ("sockname",)
def sendall(self, data):
self.data = data
class MockRequest:
_sock = MockSocket()
def __init__(self, path: str, method: str):
self._path = path
self.data = b""
self.method = method.upper()
def makefile(self, *args, **kwargs):
if args[0] == "rb":
return io.BytesIO(f"{self.method} {self._path} HTTP/1.0".encode())
elif args[0] == "wb":
return io.BytesIO(b"")
else:
raise ValueError("Unknown file type to make", args, kwargs)
def sendall(self, data):
self.data = data
return MockRequest
@pytest.fixture
def get_req(request_factory):
from urllib import parse
def factory(path, data: "dict[str, str]|None" = None):
if data:
path = path + "?" + parse.urlencode(data)
request = request_factory(path, "get")
handle = web_main.XonshConfigHTTPRequestHandler(request, (0, 0), None)
return request, handle, request.data.decode()
return factory
class TestXonfigWeb:
def test_colors_get(self, get_req):
_, _, resp = get_req("/")
assert "Colors" in resp
def test_xontribs_get(self, get_req):
_, _, resp = get_req("/xontribs")
assert "Xontribs" in resp
def test_prompts_get(self, get_req):
_, _, resp = get_req("/prompts")
assert "Prompts" in resp
|
py | 7df8bca6a48dc214d836972a250cf227008dc066 | # -*- coding: utf-8 -*-
from py2cytoscape.data.base_view import BaseView
import requests
import json
from . import HEADERS
class NodeView(BaseView):
# Utility Methods to access node position
def get_x(self):
return self.get_value('NODE_X_LOCATION')
def get_y(self):
return self.get_value('NODE_Y_LOCATION')
def set_x(self, x):
self.set_value('NODE_X_LOCATION', x)
|
py | 7df8bd6e8bcfccb08c71a537d11e666f1ca5cf59 | # -*- coding: utf-8 -*-
"""
sphinx.setup_command
~~~~~~~~~~~~~~~~~~~~
Setuptools/distutils commands to assist the building of sphinx
documentation.
:author: Sebastian Wiesner
:contact: [email protected]
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import sys
import os
import traceback
from distutils.cmd import Command
from distutils.errors import DistutilsOptionError, DistutilsExecError
from six import StringIO, string_types
from sphinx.application import Sphinx
from sphinx.util.console import darkred, nocolor, color_terminal
from sphinx.util.osutil import abspath
class BuildDoc(Command):
"""
Distutils command to build Sphinx documentation.
The Sphinx build can then be triggered from distutils, and some Sphinx
options can be set in ``setup.py`` or ``setup.cfg`` instead of Sphinx own
configuration file.
For instance, from `setup.py`::
# this is only necessary when not using setuptools/distribute
from sphinx.setup_command import BuildDoc
cmdclass = {'build_sphinx': BuildDoc}
name = 'My project'
version = '1.2'
release = '1.2.0'
setup(
name=name,
author='Bernard Montgomery',
version=release,
cmdclass=cmdclass,
# these are optional and override conf.py settings
command_options={
'build_sphinx': {
'project': ('setup.py', name),
'version': ('setup.py', version),
'release': ('setup.py', release)}},
)
Or add this section in ``setup.cfg``::
[build_sphinx]
project = 'My project'
version = 1.2
release = 1.2.0
"""
description = 'Build Sphinx documentation'
user_options = [
('fresh-env', 'E', 'discard saved environment'),
('all-files', 'a', 'build all files'),
('source-dir=', 's', 'Source directory'),
('build-dir=', None, 'Build directory'),
('config-dir=', 'c', 'Location of the configuration directory'),
('builder=', 'b', 'The builder to use. Defaults to "html"'),
('warning-is-error', 'W', 'Turn warning into errors'),
('project=', None, 'The documented project\'s name'),
('version=', None, 'The short X.Y version'),
('release=', None, 'The full version, including alpha/beta/rc tags'),
('today=', None, 'How to format the current date, used as the '
'replacement for |today|'),
('link-index', 'i', 'Link index.html to the master doc'),
('copyright', None, 'The copyright string'),
('pdb', None, 'Start pdb on exception'),
]
boolean_options = ['fresh-env', 'all-files', 'warning-is-error',
'link-index']
def initialize_options(self):
self.fresh_env = self.all_files = False
self.pdb = False
self.source_dir = self.build_dir = None
self.builder = 'html'
self.warning_is_error = False
self.project = ''
self.version = ''
self.release = ''
self.today = ''
self.config_dir = None
self.link_index = False
self.copyright = ''
def _guess_source_dir(self):
for guess in ('doc', 'docs'):
if not os.path.isdir(guess):
continue
for root, dirnames, filenames in os.walk(guess):
if 'conf.py' in filenames:
return root
return None
# Overriding distutils' Command._ensure_stringlike which doesn't support
# unicode, causing finalize_options to fail if invoked again. Workaround
# for http://bugs.python.org/issue19570
def _ensure_stringlike(self, option, what, default=None):
val = getattr(self, option)
if val is None:
setattr(self, option, default)
return default
elif not isinstance(val, string_types):
raise DistutilsOptionError("'%s' must be a %s (got `%s`)"
% (option, what, val))
return val
def finalize_options(self):
if self.source_dir is None:
self.source_dir = self._guess_source_dir()
self.announce('Using source directory %s' % self.source_dir)
self.ensure_dirname('source_dir')
if self.source_dir is None:
self.source_dir = os.curdir
self.source_dir = abspath(self.source_dir)
if self.config_dir is None:
self.config_dir = self.source_dir
self.config_dir = abspath(self.config_dir)
if self.build_dir is None:
build = self.get_finalized_command('build')
self.build_dir = os.path.join(abspath(build.build_base), 'sphinx')
self.mkpath(self.build_dir)
self.build_dir = abspath(self.build_dir)
self.doctree_dir = os.path.join(self.build_dir, 'doctrees')
self.mkpath(self.doctree_dir)
self.builder_target_dir = os.path.join(self.build_dir, self.builder)
self.mkpath(self.builder_target_dir)
def run(self):
if not color_terminal():
nocolor()
if not self.verbose:
status_stream = StringIO()
else:
status_stream = sys.stdout
confoverrides = {}
if self.project:
confoverrides['project'] = self.project
if self.version:
confoverrides['version'] = self.version
if self.release:
confoverrides['release'] = self.release
if self.today:
confoverrides['today'] = self.today
if self.copyright:
confoverrides['copyright'] = self.copyright
app = Sphinx(self.source_dir, self.config_dir,
self.builder_target_dir, self.doctree_dir,
self.builder, confoverrides, status_stream,
freshenv=self.fresh_env,
warningiserror=self.warning_is_error)
try:
app.build(force_all=self.all_files)
if app.statuscode:
raise DistutilsExecError(
'caused by %s builder.' % app.builder.name)
except Exception as err:
if self.pdb:
import pdb
print(darkred('Exception occurred while building, starting debugger:'),
file=sys.stderr)
traceback.print_exc()
pdb.post_mortem(sys.exc_info()[2])
else:
from docutils.utils import SystemMessage
if isinstance(err, SystemMessage):
print(darkred('reST markup error:'), file=sys.stderr)
print(err.args[0].encode('ascii', 'backslashreplace'),
file=sys.stderr)
else:
raise
if self.link_index:
src = app.config.master_doc + app.builder.out_suffix
dst = app.builder.get_outfilename('index')
os.symlink(src, dst)
|
py | 7df8be202e40c33894da94a3042043438c55412c | from load import load
data = load()
height = len(data)
width = len(data[0])
def descend_slope(right: int, down: int) -> int:
x = y = 0
trees = ""
while y < height:
# Add next point to trees array even if not all of them are trees
trees += data[y][x]
x = (x + right) % width
y += down
return trees.count("#") # count the trees (#)
print(descend_slope(3, 1))
slopes = [(1, 1), (3, 1), (5, 1), (7, 1), (1, 2)]
product = 1
for slope in slopes:
product *= descend_slope(*slope)
print(product)
|
py | 7df8be50bfbd7c85e72d663ea69eb2d17ad9b343 | # ----------------------------------------------
# Creator : Naimish Mani B
# Date : 27th July 2021
# ----------------------------------------------
# Automate marking attendance for sixphrase
# It makes use of Selenium and Chromedriver
# ----------------------------------------------
# Download the correct chromedriver from the url
# https://chromedriver.chromium.org/downloads
# To ensure this runs smoothly.
# ----------------------------------------------
from selenium import webdriver
from time import sleep
from datetime import datetime
from tqdm import tqdm
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--mute-audio")
BASE_URL = "http://3.12.107.113/"
EMAIL_ID = ""
PASSWORD = ""
def openBrowser():
# Initialize the webdriver object
driver = webdriver.Chrome('Driver/chromedriver', options=chrome_options)
# Navigates to the website with chrome
driver.get(BASE_URL + 'users/sign_in')
# Wait 5 seconds, for the website to load
sleep(5)
print("Opened Browser")
return driver
def login(driver):
name = driver.find_element_by_xpath(
'//*[@id="user_email"]'
)
name.send_keys(EMAIL_ID)
pwd = driver.find_element_by_xpath(
'//*[@id="user_password"]'
)
pwd.send_keys(PASSWORD)
submitButton = driver.find_element_by_xpath(
'//*[@id="new_user"]/input[3]'
)
submitButton.click()
print("Logged In")
sleep(5)
def attendanceWindow(driver):
# Load Attendance Page
driver.get(
BASE_URL + 'course/60e1fa251d41c80b44389b30'
)
sleep(5)
# Load all dates
dropDownArrow = driver.find_element_by_xpath(
'//*[@id="ld-lesson-list-56"]/div/div[1]/span/span[1]'
)
dropDownArrow.click()
sleep(5)
# Find today's attendance link
datesTable = driver.find_element_by_xpath('//*[@id="ld-nav-content-list-68"]/div/div')
today = datesTable.find_elements_by_xpath('./*')[-1]
todayDropDown = today.find_elements_by_xpath('./*')
sleep(1)
print("Selecting Date: ", todayDropDown[0].text)
# Opening today's dropdown
todayDownArrow = todayDropDown[0].find_elements_by_xpath('./*')[1]
todayDownArrow.click()
sleep(1)
# Get Current Hour
hour = int(datetime.now().strftime("%H"))
sessions = todayDropDown[1].find_elements_by_xpath('./*')[0].find_elements_by_xpath('./*')[0]
# Morning Session
if hour in [10, 11, 12]:
morning = sessions.find_elements_by_xpath('./*')[0]
morning.click()
# Afternoon Session
else:
evening = sessions.find_elements_by_xpath('./*')[1]
evening.click()
sleep(5)
# Start Quiz
testButton = driver.find_element_by_xpath(
'//*[@id="ld-table-list-item-66"]/a[1]/button'
)
testButton.click()
print("Opened Quiz")
goToQuiz(driver)
def goToQuiz(driver):
# Wait for quiz to load
with tqdm(total=35) as t:
for i in range(35):
sleep(1)
t.update(1)
# Mark presence
yesButton = driver.find_element_by_xpath('//*[@id="options_quiz"]/div/label')
yesButton.click()
sleep(2)
# Submit Result
submitButton = driver.find_element_by_xpath('//*[@id="end_test"]')
submitButton.click()
sleep(3)
# Confirm Submission
endTestButton = driver.find_element_by_xpath('//*[@id="end_test_button"]')
endTestButton.click()
print("Submitted Attendance")
sleep(5)
def quit(driver):
sleep(10)
driver.close()
driver.quit()
print("Exit Program")
if __name__ == '__main__':
driver = openBrowser()
login(driver)
attendanceWindow(driver)
quit(driver)
|
py | 7df8be9b8e66c969db0973b3c4c7842539022f31 | from __future__ import print_function, absolute_import, division
import math
import numbers
import numpy as np
import operator
from llvmlite import ir
from llvmlite.llvmpy.core import Type, Constant
import llvmlite.llvmpy.core as lc
from .imputils import (lower_builtin, lower_getattr, lower_getattr_generic,
lower_cast, lower_constant,
impl_ret_borrowed, impl_ret_untracked)
from . import optional
from .. import typing, types, cgutils, utils, errors
from ..extending import intrinsic, overload_method
from ..unsafe.numbers import viewer
def _int_arith_flags(rettype):
"""
Return the modifier flags for integer arithmetic.
"""
if rettype.signed:
# Ignore the effects of signed overflow. This is important for
# optimization of some indexing operations. For example
# array[i+1] could see `i+1` trigger a signed overflow and
# give a negative number. With Python's indexing, a negative
# index is treated differently: its resolution has a runtime cost.
# Telling LLVM to ignore signed overflows allows it to optimize
# away the check for a negative `i+1` if it knows `i` is positive.
return ['nsw']
else:
return []
def int_add_impl(context, builder, sig, args):
[va, vb] = args
[ta, tb] = sig.args
a = context.cast(builder, va, ta, sig.return_type)
b = context.cast(builder, vb, tb, sig.return_type)
res = builder.add(a, b, flags=_int_arith_flags(sig.return_type))
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_sub_impl(context, builder, sig, args):
[va, vb] = args
[ta, tb] = sig.args
a = context.cast(builder, va, ta, sig.return_type)
b = context.cast(builder, vb, tb, sig.return_type)
res = builder.sub(a, b, flags=_int_arith_flags(sig.return_type))
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_mul_impl(context, builder, sig, args):
[va, vb] = args
[ta, tb] = sig.args
a = context.cast(builder, va, ta, sig.return_type)
b = context.cast(builder, vb, tb, sig.return_type)
res = builder.mul(a, b, flags=_int_arith_flags(sig.return_type))
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_divmod_signed(context, builder, ty, x, y):
"""
Reference Objects/intobject.c
xdivy = x / y;
xmody = (long)(x - (unsigned long)xdivy * y);
/* If the signs of x and y differ, and the remainder is non-0,
* C89 doesn't define whether xdivy is now the floor or the
* ceiling of the infinitely precise quotient. We want the floor,
* and we have it iff the remainder's sign matches y's.
*/
if (xmody && ((y ^ xmody) < 0) /* i.e. and signs differ */) {
xmody += y;
--xdivy;
assert(xmody && ((y ^ xmody) >= 0));
}
*p_xdivy = xdivy;
*p_xmody = xmody;
"""
assert x.type == y.type
ZERO = y.type(0)
ONE = y.type(1)
# NOTE: On x86 at least, dividing the lowest representable integer
# (e.g. 0x80000000 for int32) by -1 causes a SIFGPE (division overflow),
# causing the process to crash.
# We return 0, 0 instead (more or less like Numpy).
resdiv = cgutils.alloca_once_value(builder, ZERO)
resmod = cgutils.alloca_once_value(builder, ZERO)
is_overflow = builder.and_(
builder.icmp_signed('==', x, x.type(ty.minval)),
builder.icmp_signed('==', y, y.type(-1)))
with builder.if_then(builder.not_(is_overflow), likely=True):
# Note LLVM will optimize this to a single divmod instruction,
# if available on the target CPU (e.g. x86).
xdivy = builder.sdiv(x, y)
xmody = builder.srem(x, y)
y_xor_xmody_ltz = builder.icmp_signed('<', builder.xor(y, xmody), ZERO)
xmody_istrue = builder.icmp_signed('!=', xmody, ZERO)
cond = builder.and_(xmody_istrue, y_xor_xmody_ltz)
with builder.if_else(cond) as (if_different_signs, if_same_signs):
with if_same_signs:
builder.store(xdivy, resdiv)
builder.store(xmody, resmod)
with if_different_signs:
builder.store(builder.sub(xdivy, ONE), resdiv)
builder.store(builder.add(xmody, y), resmod)
return builder.load(resdiv), builder.load(resmod)
def int_divmod(context, builder, ty, x, y):
"""
Integer divmod(x, y). The caller must ensure that y != 0.
"""
if ty.signed:
return int_divmod_signed(context, builder, ty, x, y)
else:
return builder.udiv(x, y), builder.urem(x, y)
def _int_divmod_impl(context, builder, sig, args, zerodiv_message):
va, vb = args
ta, tb = sig.args
ty = sig.return_type
if isinstance(ty, types.UniTuple):
ty = ty.dtype
a = context.cast(builder, va, ta, ty)
b = context.cast(builder, vb, tb, ty)
quot = cgutils.alloca_once(builder, a.type, name="quot")
rem = cgutils.alloca_once(builder, a.type, name="rem")
with builder.if_else(cgutils.is_scalar_zero(builder, b), likely=False
) as (if_zero, if_non_zero):
with if_zero:
if not context.error_model.fp_zero_division(
builder, (zerodiv_message,)):
# No exception raised => return 0
# XXX We should also set the FPU exception status, but
# there's no easy way to do that from LLVM.
builder.store(b, quot)
builder.store(b, rem)
with if_non_zero:
q, r = int_divmod(context, builder, ty, a, b)
builder.store(q, quot)
builder.store(r, rem)
return quot, rem
@lower_builtin(divmod, types.Integer, types.Integer)
def int_divmod_impl(context, builder, sig, args):
quot, rem = _int_divmod_impl(context, builder, sig, args,
"integer divmod by zero")
return cgutils.pack_array(builder,
(builder.load(quot), builder.load(rem)))
@lower_builtin(operator.floordiv, types.Integer, types.Integer)
@lower_builtin(operator.ifloordiv, types.Integer, types.Integer)
def int_floordiv_impl(context, builder, sig, args):
quot, rem = _int_divmod_impl(context, builder, sig, args,
"integer division by zero")
return builder.load(quot)
if not utils.IS_PY3:
lower_builtin(operator.div, types.Integer, types.Integer)(int_floordiv_impl)
lower_builtin(operator.idiv, types.Integer, types.Integer)(int_floordiv_impl)
@lower_builtin(operator.truediv, types.Integer, types.Integer)
@lower_builtin(operator.itruediv, types.Integer, types.Integer)
def int_truediv_impl(context, builder, sig, args):
[va, vb] = args
[ta, tb] = sig.args
a = context.cast(builder, va, ta, sig.return_type)
b = context.cast(builder, vb, tb, sig.return_type)
with cgutils.if_zero(builder, b):
context.error_model.fp_zero_division(builder, ("division by zero",))
res = builder.fdiv(a, b)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower_builtin(operator.mod, types.Integer, types.Integer)
@lower_builtin(operator.imod, types.Integer, types.Integer)
def int_rem_impl(context, builder, sig, args):
quot, rem = _int_divmod_impl(context, builder, sig, args,
"integer modulo by zero")
return builder.load(rem)
def _get_power_zerodiv_return(context, return_type):
if (isinstance(return_type, types.Integer)
and not context.error_model.raise_on_fp_zero_division):
# If not raising, return 0x8000... when computing 0 ** <negative number>
return -1 << (return_type.bitwidth - 1)
else:
return False
def int_power_impl(context, builder, sig, args):
"""
a ^ b, where a is an integer or real, and b an integer
"""
is_integer = isinstance(sig.args[0], types.Integer)
tp = sig.return_type
zerodiv_return = _get_power_zerodiv_return(context, tp)
def int_power(a, b):
# Ensure computations are done with a large enough width
r = tp(1)
a = tp(a)
if b < 0:
invert = True
exp = -b
if exp < 0:
raise OverflowError
if is_integer:
if a == 0:
if zerodiv_return:
return zerodiv_return
else:
raise ZeroDivisionError("0 cannot be raised to a negative power")
if a != 1 and a != -1:
return 0
else:
invert = False
exp = b
if exp > 0x10000:
# Optimization cutoff: fallback on the generic algorithm
return math.pow(a, float(b))
while exp != 0:
if exp & 1:
r *= a
exp >>= 1
a *= a
return 1.0 / r if invert else r
res = context.compile_internal(builder, int_power, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower_builtin(operator.pow, types.Integer, types.IntegerLiteral)
@lower_builtin(operator.ipow, types.Integer, types.IntegerLiteral)
@lower_builtin(operator.pow, types.Float, types.IntegerLiteral)
@lower_builtin(operator.ipow, types.Float, types.IntegerLiteral)
def static_power_impl(context, builder, sig, args):
"""
a ^ b, where a is an integer or real, and b a constant integer
"""
exp = sig.args[1].value
if not isinstance(exp, numbers.Integral):
raise NotImplementedError
if abs(exp) > 0x10000:
# Optimization cutoff: fallback on the generic algorithm above
raise NotImplementedError
invert = exp < 0
exp = abs(exp)
tp = sig.return_type
is_integer = isinstance(tp, types.Integer)
zerodiv_return = _get_power_zerodiv_return(context, tp)
val = context.cast(builder, args[0], sig.args[0], tp)
lty = val.type
def mul(a, b):
if is_integer:
return builder.mul(a, b)
else:
return builder.fmul(a, b)
# Unroll the exponentiation loop
res = lty(1)
a = val
while exp != 0:
if exp & 1:
res = mul(res, val)
exp >>= 1
val = mul(val, val)
if invert:
# If the exponent was negative, fix the result by inverting it
if is_integer:
# Integer inversion
def invert_impl(a):
if a == 0:
if zerodiv_return:
return zerodiv_return
else:
raise ZeroDivisionError("0 cannot be raised to a negative power")
if a != 1 and a != -1:
return 0
else:
return a
else:
# Real inversion
def invert_impl(a):
return 1.0 / a
res = context.compile_internal(builder, invert_impl,
typing.signature(tp, tp), (res,))
return res
def int_slt_impl(context, builder, sig, args):
res = builder.icmp(lc.ICMP_SLT, *args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_sle_impl(context, builder, sig, args):
res = builder.icmp(lc.ICMP_SLE, *args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_sgt_impl(context, builder, sig, args):
res = builder.icmp(lc.ICMP_SGT, *args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_sge_impl(context, builder, sig, args):
res = builder.icmp(lc.ICMP_SGE, *args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_ult_impl(context, builder, sig, args):
res = builder.icmp(lc.ICMP_ULT, *args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_ule_impl(context, builder, sig, args):
res = builder.icmp(lc.ICMP_ULE, *args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_ugt_impl(context, builder, sig, args):
res = builder.icmp(lc.ICMP_UGT, *args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_uge_impl(context, builder, sig, args):
res = builder.icmp(lc.ICMP_UGE, *args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_eq_impl(context, builder, sig, args):
res = builder.icmp(lc.ICMP_EQ, *args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_ne_impl(context, builder, sig, args):
res = builder.icmp(lc.ICMP_NE, *args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_abs_impl(context, builder, sig, args):
[x] = args
ZERO = Constant.null(x.type)
ltz = builder.icmp(lc.ICMP_SLT, x, ZERO)
negated = builder.neg(x)
res = builder.select(ltz, negated, x)
return impl_ret_untracked(context, builder, sig.return_type, res)
def uint_abs_impl(context, builder, sig, args):
[x] = args
return impl_ret_untracked(context, builder, sig.return_type, x)
def int_shl_impl(context, builder, sig, args):
[valty, amtty] = sig.args
[val, amt] = args
val = context.cast(builder, val, valty, sig.return_type)
amt = context.cast(builder, amt, amtty, sig.return_type)
res = builder.shl(val, amt)
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_shr_impl(context, builder, sig, args):
[valty, amtty] = sig.args
[val, amt] = args
val = context.cast(builder, val, valty, sig.return_type)
amt = context.cast(builder, amt, amtty, sig.return_type)
if sig.return_type.signed:
res = builder.ashr(val, amt)
else:
res = builder.lshr(val, amt)
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_and_impl(context, builder, sig, args):
[at, bt] = sig.args
[av, bv] = args
cav = context.cast(builder, av, at, sig.return_type)
cbc = context.cast(builder, bv, bt, sig.return_type)
res = builder.and_(cav, cbc)
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_or_impl(context, builder, sig, args):
[at, bt] = sig.args
[av, bv] = args
cav = context.cast(builder, av, at, sig.return_type)
cbc = context.cast(builder, bv, bt, sig.return_type)
res = builder.or_(cav, cbc)
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_xor_impl(context, builder, sig, args):
[at, bt] = sig.args
[av, bv] = args
cav = context.cast(builder, av, at, sig.return_type)
cbc = context.cast(builder, bv, bt, sig.return_type)
res = builder.xor(cav, cbc)
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_negate_impl(context, builder, sig, args):
[typ] = sig.args
[val] = args
# Negate before upcasting, for unsigned numbers
res = builder.neg(val)
res = context.cast(builder, res, typ, sig.return_type)
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_positive_impl(context, builder, sig, args):
[typ] = sig.args
[val] = args
res = context.cast(builder, val, typ, sig.return_type)
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_invert_impl(context, builder, sig, args):
[typ] = sig.args
[val] = args
# Invert before upcasting, for unsigned numbers
res = builder.xor(val, Constant.all_ones(val.type))
res = context.cast(builder, res, typ, sig.return_type)
return impl_ret_untracked(context, builder, sig.return_type, res)
def int_sign_impl(context, builder, sig, args):
"""
np.sign(int)
"""
[x] = args
POS = Constant.int(x.type, 1)
NEG = Constant.int(x.type, -1)
ZERO = Constant.int(x.type, 0)
cmp_zero = builder.icmp(lc.ICMP_EQ, x, ZERO)
cmp_pos = builder.icmp(lc.ICMP_SGT, x, ZERO)
presult = cgutils.alloca_once(builder, x.type)
bb_zero = builder.append_basic_block(".zero")
bb_postest = builder.append_basic_block(".postest")
bb_pos = builder.append_basic_block(".pos")
bb_neg = builder.append_basic_block(".neg")
bb_exit = builder.append_basic_block(".exit")
builder.cbranch(cmp_zero, bb_zero, bb_postest)
with builder.goto_block(bb_zero):
builder.store(ZERO, presult)
builder.branch(bb_exit)
with builder.goto_block(bb_postest):
builder.cbranch(cmp_pos, bb_pos, bb_neg)
with builder.goto_block(bb_pos):
builder.store(POS, presult)
builder.branch(bb_exit)
with builder.goto_block(bb_neg):
builder.store(NEG, presult)
builder.branch(bb_exit)
builder.position_at_end(bb_exit)
res = builder.load(presult)
return impl_ret_untracked(context, builder, sig.return_type, res)
def bool_negate_impl(context, builder, sig, args):
[typ] = sig.args
[val] = args
res = context.cast(builder, val, typ, sig.return_type)
res = builder.neg(res)
return impl_ret_untracked(context, builder, sig.return_type, res)
def bool_unary_positive_impl(context, builder, sig, args):
[typ] = sig.args
[val] = args
res = context.cast(builder, val, typ, sig.return_type)
return impl_ret_untracked(context, builder, sig.return_type, res)
lower_builtin(operator.eq, types.boolean, types.boolean)(int_eq_impl)
lower_builtin(operator.ne, types.boolean, types.boolean)(int_ne_impl)
lower_builtin(operator.lt, types.boolean, types.boolean)(int_ult_impl)
lower_builtin(operator.le, types.boolean, types.boolean)(int_ule_impl)
lower_builtin(operator.gt, types.boolean, types.boolean)(int_ugt_impl)
lower_builtin(operator.ge, types.boolean, types.boolean)(int_uge_impl)
lower_builtin(operator.neg, types.boolean)(bool_negate_impl)
lower_builtin(operator.pos, types.boolean)(bool_unary_positive_impl)
def _implement_integer_operators():
ty = types.Integer
lower_builtin(operator.add, ty, ty)(int_add_impl)
lower_builtin(operator.iadd, ty, ty)(int_add_impl)
lower_builtin(operator.sub, ty, ty)(int_sub_impl)
lower_builtin(operator.isub, ty, ty)(int_sub_impl)
lower_builtin(operator.mul, ty, ty)(int_mul_impl)
lower_builtin(operator.imul, ty, ty)(int_mul_impl)
lower_builtin(operator.eq, ty, ty)(int_eq_impl)
lower_builtin(operator.ne, ty, ty)(int_ne_impl)
lower_builtin(operator.lshift, ty, ty)(int_shl_impl)
lower_builtin(operator.ilshift, ty, ty)(int_shl_impl)
lower_builtin(operator.rshift, ty, ty)(int_shr_impl)
lower_builtin(operator.irshift, ty, ty)(int_shr_impl)
lower_builtin(operator.neg, ty)(int_negate_impl)
lower_builtin(operator.pos, ty)(int_positive_impl)
lower_builtin(operator.pow, ty, ty)(int_power_impl)
lower_builtin(operator.ipow, ty, ty)(int_power_impl)
lower_builtin(pow, ty, ty)(int_power_impl)
for ty in types.unsigned_domain:
lower_builtin(operator.lt, ty, ty)(int_ult_impl)
lower_builtin(operator.le, ty, ty)(int_ule_impl)
lower_builtin(operator.gt, ty, ty)(int_ugt_impl)
lower_builtin(operator.ge, ty, ty)(int_uge_impl)
lower_builtin(operator.pow, types.Float, ty)(int_power_impl)
lower_builtin(operator.ipow, types.Float, ty)(int_power_impl)
lower_builtin(pow, types.Float, ty)(int_power_impl)
lower_builtin(abs, ty)(uint_abs_impl)
lower_builtin(operator.lt, types.IntegerLiteral, types.IntegerLiteral)(int_slt_impl)
lower_builtin(operator.gt, types.IntegerLiteral, types.IntegerLiteral)(int_slt_impl)
lower_builtin(operator.le, types.IntegerLiteral, types.IntegerLiteral)(int_slt_impl)
lower_builtin(operator.ge, types.IntegerLiteral, types.IntegerLiteral)(int_slt_impl)
for ty in types.signed_domain:
lower_builtin(operator.lt, ty, ty)(int_slt_impl)
lower_builtin(operator.le, ty, ty)(int_sle_impl)
lower_builtin(operator.gt, ty, ty)(int_sgt_impl)
lower_builtin(operator.ge, ty, ty)(int_sge_impl)
lower_builtin(operator.pow, types.Float, ty)(int_power_impl)
lower_builtin(operator.ipow, types.Float, ty)(int_power_impl)
lower_builtin(pow, types.Float, ty)(int_power_impl)
lower_builtin(abs, ty)(int_abs_impl)
def _implement_bitwise_operators():
for ty in (types.Boolean, types.Integer):
lower_builtin(operator.and_, ty, ty)(int_and_impl)
lower_builtin(operator.iand, ty, ty)(int_and_impl)
lower_builtin(operator.or_, ty, ty)(int_or_impl)
lower_builtin(operator.ior, ty, ty)(int_or_impl)
lower_builtin(operator.xor, ty, ty)(int_xor_impl)
lower_builtin(operator.ixor, ty, ty)(int_xor_impl)
lower_builtin(operator.invert, ty)(int_invert_impl)
_implement_integer_operators()
_implement_bitwise_operators()
def real_add_impl(context, builder, sig, args):
res = builder.fadd(*args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def real_sub_impl(context, builder, sig, args):
res = builder.fsub(*args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def real_mul_impl(context, builder, sig, args):
res = builder.fmul(*args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def real_div_impl(context, builder, sig, args):
with cgutils.if_zero(builder, args[1]):
context.error_model.fp_zero_division(builder, ("division by zero",))
res = builder.fdiv(*args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def real_divmod(context, builder, x, y):
assert x.type == y.type
floatty = x.type
module = builder.module
fname = context.mangler(".numba.python.rem", [x.type])
fnty = Type.function(floatty, (floatty, floatty, Type.pointer(floatty)))
fn = module.get_or_insert_function(fnty, fname)
if fn.is_declaration:
fn.linkage = lc.LINKAGE_LINKONCE_ODR
fnbuilder = lc.Builder(fn.append_basic_block('entry'))
fx, fy, pmod = fn.args
div, mod = real_divmod_func_body(context, fnbuilder, fx, fy)
fnbuilder.store(mod, pmod)
fnbuilder.ret(div)
pmod = cgutils.alloca_once(builder, floatty)
quotient = builder.call(fn, (x, y, pmod))
return quotient, builder.load(pmod)
def real_divmod_func_body(context, builder, vx, wx):
# Reference Objects/floatobject.c
#
# float_divmod(PyObject *v, PyObject *w)
# {
# double vx, wx;
# double div, mod, floordiv;
# CONVERT_TO_DOUBLE(v, vx);
# CONVERT_TO_DOUBLE(w, wx);
# mod = fmod(vx, wx);
# /* fmod is typically exact, so vx-mod is *mathematically* an
# exact multiple of wx. But this is fp arithmetic, and fp
# vx - mod is an approximation; the result is that div may
# not be an exact integral value after the division, although
# it will always be very close to one.
# */
# div = (vx - mod) / wx;
# if (mod) {
# /* ensure the remainder has the same sign as the denominator */
# if ((wx < 0) != (mod < 0)) {
# mod += wx;
# div -= 1.0;
# }
# }
# else {
# /* the remainder is zero, and in the presence of signed zeroes
# fmod returns different results across platforms; ensure
# it has the same sign as the denominator; we'd like to do
# "mod = wx * 0.0", but that may get optimized away */
# mod *= mod; /* hide "mod = +0" from optimizer */
# if (wx < 0.0)
# mod = -mod;
# }
# /* snap quotient to nearest integral value */
# if (div) {
# floordiv = floor(div);
# if (div - floordiv > 0.5)
# floordiv += 1.0;
# }
# else {
# /* div is zero - get the same sign as the true quotient */
# div *= div; /* hide "div = +0" from optimizers */
# floordiv = div * vx / wx; /* zero w/ sign of vx/wx */
# }
# return Py_BuildValue("(dd)", floordiv, mod);
# }
pmod = cgutils.alloca_once(builder, vx.type)
pdiv = cgutils.alloca_once(builder, vx.type)
pfloordiv = cgutils.alloca_once(builder, vx.type)
mod = builder.frem(vx, wx)
div = builder.fdiv(builder.fsub(vx, mod), wx)
builder.store(mod, pmod)
builder.store(div, pdiv)
# Note the use of negative zero for proper negating with `ZERO - x`
ZERO = vx.type(0.0)
NZERO = vx.type(-0.0)
ONE = vx.type(1.0)
mod_istrue = builder.fcmp_unordered('!=', mod, ZERO)
wx_ltz = builder.fcmp_ordered('<', wx, ZERO)
mod_ltz = builder.fcmp_ordered('<', mod, ZERO)
with builder.if_else(mod_istrue, likely=True) as (if_nonzero_mod, if_zero_mod):
with if_nonzero_mod:
# `mod` is non-zero or NaN
# Ensure the remainder has the same sign as the denominator
wx_ltz_ne_mod_ltz = builder.icmp(lc.ICMP_NE, wx_ltz, mod_ltz)
with builder.if_then(wx_ltz_ne_mod_ltz):
builder.store(builder.fsub(div, ONE), pdiv)
builder.store(builder.fadd(mod, wx), pmod)
with if_zero_mod:
# `mod` is zero, select the proper sign depending on
# the denominator's sign
mod = builder.select(wx_ltz, NZERO, ZERO)
builder.store(mod, pmod)
del mod, div
div = builder.load(pdiv)
div_istrue = builder.fcmp(lc.FCMP_ONE, div, ZERO)
with builder.if_then(div_istrue):
realtypemap = {'float': types.float32,
'double': types.float64}
realtype = realtypemap[str(wx.type)]
floorfn = context.get_function(math.floor,
typing.signature(realtype, realtype))
floordiv = floorfn(builder, [div])
floordivdiff = builder.fsub(div, floordiv)
floordivincr = builder.fadd(floordiv, ONE)
HALF = Constant.real(wx.type, 0.5)
pred = builder.fcmp(lc.FCMP_OGT, floordivdiff, HALF)
floordiv = builder.select(pred, floordivincr, floordiv)
builder.store(floordiv, pfloordiv)
with cgutils.ifnot(builder, div_istrue):
div = builder.fmul(div, div)
builder.store(div, pdiv)
floordiv = builder.fdiv(builder.fmul(div, vx), wx)
builder.store(floordiv, pfloordiv)
return builder.load(pfloordiv), builder.load(pmod)
@lower_builtin(divmod, types.Float, types.Float)
def real_divmod_impl(context, builder, sig, args, loc=None):
x, y = args
quot = cgutils.alloca_once(builder, x.type, name="quot")
rem = cgutils.alloca_once(builder, x.type, name="rem")
with builder.if_else(cgutils.is_scalar_zero(builder, y), likely=False
) as (if_zero, if_non_zero):
with if_zero:
if not context.error_model.fp_zero_division(
builder, ("modulo by zero",), loc):
# No exception raised => compute the nan result,
# and set the FP exception word for Numpy warnings.
q = builder.fdiv(x, y)
r = builder.frem(x, y)
builder.store(q, quot)
builder.store(r, rem)
with if_non_zero:
q, r = real_divmod(context, builder, x, y)
builder.store(q, quot)
builder.store(r, rem)
return cgutils.pack_array(builder,
(builder.load(quot), builder.load(rem)))
def real_mod_impl(context, builder, sig, args, loc=None):
x, y = args
res = cgutils.alloca_once(builder, x.type)
with builder.if_else(cgutils.is_scalar_zero(builder, y), likely=False
) as (if_zero, if_non_zero):
with if_zero:
if not context.error_model.fp_zero_division(
builder, ("modulo by zero",), loc):
# No exception raised => compute the nan result,
# and set the FP exception word for Numpy warnings.
rem = builder.frem(x, y)
builder.store(rem, res)
with if_non_zero:
_, rem = real_divmod(context, builder, x, y)
builder.store(rem, res)
return impl_ret_untracked(context, builder, sig.return_type,
builder.load(res))
def real_floordiv_impl(context, builder, sig, args, loc=None):
x, y = args
res = cgutils.alloca_once(builder, x.type)
with builder.if_else(cgutils.is_scalar_zero(builder, y), likely=False
) as (if_zero, if_non_zero):
with if_zero:
if not context.error_model.fp_zero_division(
builder, ("division by zero",), loc):
# No exception raised => compute the +/-inf or nan result,
# and set the FP exception word for Numpy warnings.
quot = builder.fdiv(x, y)
builder.store(quot, res)
with if_non_zero:
quot, _ = real_divmod(context, builder, x, y)
builder.store(quot, res)
return impl_ret_untracked(context, builder, sig.return_type,
builder.load(res))
def real_power_impl(context, builder, sig, args):
x, y = args
module = builder.module
if context.implement_powi_as_math_call:
imp = context.get_function(math.pow, sig)
res = imp(builder, args)
else:
fn = lc.Function.intrinsic(module, lc.INTR_POW, [y.type])
res = builder.call(fn, (x, y))
return impl_ret_untracked(context, builder, sig.return_type, res)
def real_lt_impl(context, builder, sig, args):
res = builder.fcmp(lc.FCMP_OLT, *args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def real_le_impl(context, builder, sig, args):
res = builder.fcmp(lc.FCMP_OLE, *args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def real_gt_impl(context, builder, sig, args):
res = builder.fcmp(lc.FCMP_OGT, *args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def real_ge_impl(context, builder, sig, args):
res = builder.fcmp(lc.FCMP_OGE, *args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def real_eq_impl(context, builder, sig, args):
res = builder.fcmp(lc.FCMP_OEQ, *args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def real_ne_impl(context, builder, sig, args):
res = builder.fcmp(lc.FCMP_UNE, *args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def real_abs_impl(context, builder, sig, args):
[ty] = sig.args
sig = typing.signature(ty, ty)
impl = context.get_function(math.fabs, sig)
return impl(builder, args)
def real_negate_impl(context, builder, sig, args):
from . import mathimpl
res = mathimpl.negate_real(builder, args[0])
return impl_ret_untracked(context, builder, sig.return_type, res)
def real_positive_impl(context, builder, sig, args):
[typ] = sig.args
[val] = args
res = context.cast(builder, val, typ, sig.return_type)
return impl_ret_untracked(context, builder, sig.return_type, res)
def real_sign_impl(context, builder, sig, args):
"""
np.sign(float)
"""
[x] = args
POS = Constant.real(x.type, 1)
NEG = Constant.real(x.type, -1)
ZERO = Constant.real(x.type, 0)
presult = cgutils.alloca_once(builder, x.type)
is_pos = builder.fcmp(lc.FCMP_OGT, x, ZERO)
is_neg = builder.fcmp(lc.FCMP_OLT, x, ZERO)
with builder.if_else(is_pos) as (gt_zero, not_gt_zero):
with gt_zero:
builder.store(POS, presult)
with not_gt_zero:
with builder.if_else(is_neg) as (lt_zero, not_lt_zero):
with lt_zero:
builder.store(NEG, presult)
with not_lt_zero:
# For both NaN and 0, the result of sign() is simply
# the input value.
builder.store(x, presult)
res = builder.load(presult)
return impl_ret_untracked(context, builder, sig.return_type, res)
ty = types.Float
lower_builtin(operator.add, ty, ty)(real_add_impl)
lower_builtin(operator.iadd, ty, ty)(real_add_impl)
lower_builtin(operator.sub, ty, ty)(real_sub_impl)
lower_builtin(operator.isub, ty, ty)(real_sub_impl)
lower_builtin(operator.mul, ty, ty)(real_mul_impl)
lower_builtin(operator.imul, ty, ty)(real_mul_impl)
lower_builtin(operator.floordiv, ty, ty)(real_floordiv_impl)
lower_builtin(operator.ifloordiv, ty, ty)(real_floordiv_impl)
lower_builtin(operator.truediv, ty, ty)(real_div_impl)
lower_builtin(operator.itruediv, ty, ty)(real_div_impl)
if not utils.IS_PY3:
lower_builtin(operator.div, ty, ty)(real_div_impl)
lower_builtin(operator.idiv, ty, ty)(real_div_impl)
lower_builtin(operator.mod, ty, ty)(real_mod_impl)
lower_builtin(operator.imod, ty, ty)(real_mod_impl)
lower_builtin(operator.pow, ty, ty)(real_power_impl)
lower_builtin(operator.ipow, ty, ty)(real_power_impl)
lower_builtin(pow, ty, ty)(real_power_impl)
lower_builtin(operator.eq, ty, ty)(real_eq_impl)
lower_builtin(operator.ne, ty, ty)(real_ne_impl)
lower_builtin(operator.lt, ty, ty)(real_lt_impl)
lower_builtin(operator.le, ty, ty)(real_le_impl)
lower_builtin(operator.gt, ty, ty)(real_gt_impl)
lower_builtin(operator.ge, ty, ty)(real_ge_impl)
lower_builtin(abs, ty)(real_abs_impl)
lower_builtin(operator.neg, ty)(real_negate_impl)
lower_builtin(operator.pos, ty)(real_positive_impl)
del ty
@lower_getattr(types.Complex, "real")
def complex_real_impl(context, builder, typ, value):
cplx = context.make_complex(builder, typ, value=value)
res = cplx.real
return impl_ret_untracked(context, builder, typ, res)
@lower_getattr(types.Complex, "imag")
def complex_imag_impl(context, builder, typ, value):
cplx = context.make_complex(builder, typ, value=value)
res = cplx.imag
return impl_ret_untracked(context, builder, typ, res)
@lower_builtin("complex.conjugate", types.Complex)
def complex_conjugate_impl(context, builder, sig, args):
from . import mathimpl
z = context.make_complex(builder, sig.args[0], args[0])
z.imag = mathimpl.negate_real(builder, z.imag)
res = z._getvalue()
return impl_ret_untracked(context, builder, sig.return_type, res)
def real_real_impl(context, builder, typ, value):
return impl_ret_untracked(context, builder, typ, value)
def real_imag_impl(context, builder, typ, value):
res = cgutils.get_null_value(value.type)
return impl_ret_untracked(context, builder, typ, res)
def real_conjugate_impl(context, builder, sig, args):
return impl_ret_untracked(context, builder, sig.return_type, args[0])
for cls in (types.Float, types.Integer):
lower_getattr(cls, "real")(real_real_impl)
lower_getattr(cls, "imag")(real_imag_impl)
lower_builtin("complex.conjugate", cls)(real_conjugate_impl)
@lower_builtin(operator.pow, types.Complex, types.Complex)
@lower_builtin(operator.ipow, types.Complex, types.Complex)
@lower_builtin(pow, types.Complex, types.Complex)
def complex_power_impl(context, builder, sig, args):
[ca, cb] = args
ty = sig.args[0]
fty = ty.underlying_float
a = context.make_helper(builder, ty, value=ca)
b = context.make_helper(builder, ty, value=cb)
c = context.make_helper(builder, ty)
module = builder.module
pa = a._getpointer()
pb = b._getpointer()
pc = c._getpointer()
# Optimize for square because cpow loses a lot of precision
TWO = context.get_constant(fty, 2)
ZERO = context.get_constant(fty, 0)
b_real_is_two = builder.fcmp_ordered('==', b.real, TWO)
b_imag_is_zero = builder.fcmp_ordered('==', b.imag, ZERO)
b_is_two = builder.and_(b_real_is_two, b_imag_is_zero)
with builder.if_else(b_is_two) as (then, otherwise):
with then:
# Lower as multiplication
res = complex_mul_impl(context, builder, sig, (ca, ca))
cres = context.make_helper(builder, ty, value=res)
c.real = cres.real
c.imag = cres.imag
with otherwise:
# Lower with call to external function
func_name = {
types.complex64: "numba_cpowf",
types.complex128: "numba_cpow",
}[ty]
fnty = Type.function(Type.void(), [pa.type] * 3)
cpow = module.get_or_insert_function(fnty, name=func_name)
builder.call(cpow, (pa, pb, pc))
res = builder.load(pc)
return impl_ret_untracked(context, builder, sig.return_type, res)
def complex_add_impl(context, builder, sig, args):
[cx, cy] = args
ty = sig.args[0]
x = context.make_complex(builder, ty, value=cx)
y = context.make_complex(builder, ty, value=cy)
z = context.make_complex(builder, ty)
a = x.real
b = x.imag
c = y.real
d = y.imag
z.real = builder.fadd(a, c)
z.imag = builder.fadd(b, d)
res = z._getvalue()
return impl_ret_untracked(context, builder, sig.return_type, res)
def complex_sub_impl(context, builder, sig, args):
[cx, cy] = args
ty = sig.args[0]
x = context.make_complex(builder, ty, value=cx)
y = context.make_complex(builder, ty, value=cy)
z = context.make_complex(builder, ty)
a = x.real
b = x.imag
c = y.real
d = y.imag
z.real = builder.fsub(a, c)
z.imag = builder.fsub(b, d)
res = z._getvalue()
return impl_ret_untracked(context, builder, sig.return_type, res)
def complex_mul_impl(context, builder, sig, args):
"""
(a+bi)(c+di)=(ac-bd)+i(ad+bc)
"""
[cx, cy] = args
ty = sig.args[0]
x = context.make_complex(builder, ty, value=cx)
y = context.make_complex(builder, ty, value=cy)
z = context.make_complex(builder, ty)
a = x.real
b = x.imag
c = y.real
d = y.imag
ac = builder.fmul(a, c)
bd = builder.fmul(b, d)
ad = builder.fmul(a, d)
bc = builder.fmul(b, c)
z.real = builder.fsub(ac, bd)
z.imag = builder.fadd(ad, bc)
res = z._getvalue()
return impl_ret_untracked(context, builder, sig.return_type, res)
NAN = float('nan')
def complex_div_impl(context, builder, sig, args):
def complex_div(a, b):
# This is CPython's algorithm (in _Py_c_quot()).
areal = a.real
aimag = a.imag
breal = b.real
bimag = b.imag
if not breal and not bimag:
raise ZeroDivisionError("complex division by zero")
if abs(breal) >= abs(bimag):
# Divide tops and bottom by b.real
if not breal:
return complex(NAN, NAN)
ratio = bimag / breal
denom = breal + bimag * ratio
return complex(
(areal + aimag * ratio) / denom,
(aimag - areal * ratio) / denom)
else:
# Divide tops and bottom by b.imag
if not bimag:
return complex(NAN, NAN)
ratio = breal / bimag
denom = breal * ratio + bimag
return complex(
(a.real * ratio + a.imag) / denom,
(a.imag * ratio - a.real) / denom)
res = context.compile_internal(builder, complex_div, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def complex_negate_impl(context, builder, sig, args):
from . import mathimpl
[typ] = sig.args
[val] = args
cmplx = context.make_complex(builder, typ, value=val)
res = context.make_complex(builder, typ)
res.real = mathimpl.negate_real(builder, cmplx.real)
res.imag = mathimpl.negate_real(builder, cmplx.imag)
res = res._getvalue()
return impl_ret_untracked(context, builder, sig.return_type, res)
def complex_positive_impl(context, builder, sig, args):
[val] = args
return impl_ret_untracked(context, builder, sig.return_type, val)
def complex_eq_impl(context, builder, sig, args):
[cx, cy] = args
typ = sig.args[0]
x = context.make_complex(builder, typ, value=cx)
y = context.make_complex(builder, typ, value=cy)
reals_are_eq = builder.fcmp(lc.FCMP_OEQ, x.real, y.real)
imags_are_eq = builder.fcmp(lc.FCMP_OEQ, x.imag, y.imag)
res = builder.and_(reals_are_eq, imags_are_eq)
return impl_ret_untracked(context, builder, sig.return_type, res)
def complex_ne_impl(context, builder, sig, args):
[cx, cy] = args
typ = sig.args[0]
x = context.make_complex(builder, typ, value=cx)
y = context.make_complex(builder, typ, value=cy)
reals_are_ne = builder.fcmp(lc.FCMP_UNE, x.real, y.real)
imags_are_ne = builder.fcmp(lc.FCMP_UNE, x.imag, y.imag)
res = builder.or_(reals_are_ne, imags_are_ne)
return impl_ret_untracked(context, builder, sig.return_type, res)
def complex_abs_impl(context, builder, sig, args):
"""
abs(z) := hypot(z.real, z.imag)
"""
def complex_abs(z):
return math.hypot(z.real, z.imag)
res = context.compile_internal(builder, complex_abs, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
ty = types.Complex
lower_builtin(operator.add, ty, ty)(complex_add_impl)
lower_builtin(operator.iadd, ty, ty)(complex_add_impl)
lower_builtin(operator.sub, ty, ty)(complex_sub_impl)
lower_builtin(operator.isub, ty, ty)(complex_sub_impl)
lower_builtin(operator.mul, ty, ty)(complex_mul_impl)
lower_builtin(operator.imul, ty, ty)(complex_mul_impl)
lower_builtin(operator.truediv, ty, ty)(complex_div_impl)
lower_builtin(operator.itruediv, ty, ty)(complex_div_impl)
if not utils.IS_PY3:
lower_builtin(operator.div, ty, ty)(complex_div_impl)
lower_builtin(operator.idiv, ty, ty)(complex_div_impl)
lower_builtin(operator.neg, ty)(complex_negate_impl)
lower_builtin(operator.pos, ty)(complex_positive_impl)
# Complex modulo is deprecated in python3
lower_builtin(operator.eq, ty, ty)(complex_eq_impl)
lower_builtin(operator.ne, ty, ty)(complex_ne_impl)
lower_builtin(abs, ty)(complex_abs_impl)
del ty
@lower_builtin("number.item", types.Boolean)
@lower_builtin("number.item", types.Number)
def number_item_impl(context, builder, sig, args):
"""
The no-op .item() method on booleans and numbers.
"""
return args[0]
#------------------------------------------------------------------------------
def number_not_impl(context, builder, sig, args):
[typ] = sig.args
[val] = args
istrue = context.cast(builder, val, typ, sig.return_type)
res = builder.not_(istrue)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower_builtin(bool, types.boolean)
def bool_as_bool(context, builder, sig, args):
[val] = args
return val
@lower_builtin(bool, types.Integer)
def int_as_bool(context, builder, sig, args):
[val] = args
return builder.icmp_unsigned('!=', val, ir.Constant(val.type, 0))
@lower_builtin(bool, types.Float)
def float_as_bool(context, builder, sig, args):
[val] = args
return builder.fcmp(lc.FCMP_UNE, val, ir.Constant(val.type, 0.0))
@lower_builtin(bool, types.Complex)
def complex_as_bool(context, builder, sig, args):
[typ] = sig.args
[val] = args
cmplx = context.make_complex(builder, typ, val)
real, imag = cmplx.real, cmplx.imag
zero = ir.Constant(real.type, 0.0)
real_istrue = builder.fcmp(lc.FCMP_UNE, real, zero)
imag_istrue = builder.fcmp(lc.FCMP_UNE, imag, zero)
return builder.or_(real_istrue, imag_istrue)
for ty in (types.Integer, types.Float, types.Complex):
lower_builtin(operator.not_, ty)(number_not_impl)
lower_builtin(operator.not_, types.boolean)(number_not_impl)
#------------------------------------------------------------------------------
# Hashing numbers, see hashing.py
#-------------------------------------------------------------------------------
# Implicit casts between numerics
@lower_cast(types.IntegerLiteral, types.Integer)
@lower_cast(types.IntegerLiteral, types.Float)
@lower_cast(types.IntegerLiteral, types.Complex)
def literal_int_to_number(context, builder, fromty, toty, val):
lit = context.get_constant_generic(
builder,
fromty.literal_type,
fromty.literal_value,
)
return context.cast(builder, lit, fromty.literal_type, toty)
@lower_cast(types.Integer, types.Integer)
def integer_to_integer(context, builder, fromty, toty, val):
if toty.bitwidth == fromty.bitwidth:
# Just a change of signedness
return val
elif toty.bitwidth < fromty.bitwidth:
# Downcast
return builder.trunc(val, context.get_value_type(toty))
elif fromty.signed:
# Signed upcast
return builder.sext(val, context.get_value_type(toty))
else:
# Unsigned upcast
return builder.zext(val, context.get_value_type(toty))
@lower_cast(types.Integer, types.voidptr)
def integer_to_voidptr(context, builder, fromty, toty, val):
return builder.inttoptr(val, context.get_value_type(toty))
@lower_cast(types.Float, types.Float)
def float_to_float(context, builder, fromty, toty, val):
lty = context.get_value_type(toty)
if fromty.bitwidth < toty.bitwidth:
return builder.fpext(val, lty)
else:
return builder.fptrunc(val, lty)
@lower_cast(types.Integer, types.Float)
def integer_to_float(context, builder, fromty, toty, val):
lty = context.get_value_type(toty)
if fromty.signed:
return builder.sitofp(val, lty)
else:
return builder.uitofp(val, lty)
@lower_cast(types.Float, types.Integer)
def float_to_integer(context, builder, fromty, toty, val):
lty = context.get_value_type(toty)
if toty.signed:
return builder.fptosi(val, lty)
else:
return builder.fptoui(val, lty)
@lower_cast(types.Float, types.Complex)
@lower_cast(types.Integer, types.Complex)
def non_complex_to_complex(context, builder, fromty, toty, val):
real = context.cast(builder, val, fromty, toty.underlying_float)
imag = context.get_constant(toty.underlying_float, 0)
cmplx = context.make_complex(builder, toty)
cmplx.real = real
cmplx.imag = imag
return cmplx._getvalue()
@lower_cast(types.Complex, types.Complex)
def complex_to_complex(context, builder, fromty, toty, val):
srcty = fromty.underlying_float
dstty = toty.underlying_float
src = context.make_complex(builder, fromty, value=val)
dst = context.make_complex(builder, toty)
dst.real = context.cast(builder, src.real, srcty, dstty)
dst.imag = context.cast(builder, src.imag, srcty, dstty)
return dst._getvalue()
@lower_cast(types.Any, types.Boolean)
def any_to_boolean(context, builder, fromty, toty, val):
return context.is_true(builder, fromty, val)
@lower_cast(types.Boolean, types.Number)
def boolean_to_any(context, builder, fromty, toty, val):
# Casting from boolean to anything first casts to int32
asint = builder.zext(val, Type.int())
return context.cast(builder, asint, types.int32, toty)
@lower_cast(types.IntegerLiteral, types.Boolean)
def literal_int_to_boolean(context, builder, fromty, toty, val):
lit = context.get_constant_generic(
builder,
fromty.literal_type,
fromty.literal_value,
)
return context.is_true(builder, fromty.literal_type, lit)
#-------------------------------------------------------------------------------
# Constants
@lower_constant(types.Complex)
def constant_complex(context, builder, ty, pyval):
fty = ty.underlying_float
real = context.get_constant_generic(builder, fty, pyval.real)
imag = context.get_constant_generic(builder, fty, pyval.imag)
return ir.Constant.literal_struct((real, imag))
@lower_constant(types.Integer)
@lower_constant(types.Float)
@lower_constant(types.Boolean)
def constant_integer(context, builder, ty, pyval):
lty = context.get_value_type(ty)
return lty(pyval)
#-------------------------------------------------------------------------------
# View
def scalar_view(scalar, viewty):
""" Typing for the np scalar 'view' method. """
if (isinstance(scalar, (types.Float, types.Integer))
and isinstance(viewty, types.abstract.DTypeSpec)):
if scalar.bitwidth != viewty.dtype.bitwidth:
raise errors.TypingError(
"Changing the dtype of a 0d array is only supported if the "
"itemsize is unchanged")
def impl(scalar, viewty):
return viewer(scalar, viewty)
return impl
overload_method(types.Float, 'view')(scalar_view)
overload_method(types.Integer, 'view')(scalar_view)
|
py | 7df8bf1787ff95f0551c3f0ec475aeee05e07a48 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
DocBook output support for Lore.
"""
import os, cgi
from xml.dom import minidom as dom
from twisted.lore import latex
class DocbookSpitter(latex.BaseLatexSpitter):
currentLevel = 1
def writeNodeData(self, node):
self.writer(node.data)
def visitNode_body(self, node):
self.visitNodeDefault(node)
self.writer('</section>'*self.currentLevel)
def visitNodeHeader(self, node):
level = int(node.tagName[1])
difference, self.currentLevel = level-self.currentLevel, level
self.writer('<section>'*difference+'</section>'*-difference)
if difference<=0:
self.writer('</section>\n<section>')
self.writer('<title>')
self.visitNodeDefault(node)
def visitNode_a_listing(self, node):
fileName = os.path.join(self.currDir, node.getAttribute('href'))
self.writer('<programlisting>\n')
self.writer(cgi.escape(open(fileName).read()))
self.writer('</programlisting>\n')
def visitNode_a_href(self, node):
self.visitNodeDefault(node)
def visitNode_a_name(self, node):
self.visitNodeDefault(node)
def visitNode_li(self, node):
for child in node.childNodes:
if getattr(child, 'tagName', None) != 'p':
new = dom.Element('p')
new.childNodes = [child]
node.replaceChild(new, child)
self.visitNodeDefault(node)
visitNode_h2 = visitNode_h3 = visitNode_h4 = visitNodeHeader
end_h2 = end_h3 = end_h4 = '</title><para />'
start_title, end_title = '<section><title>', '</title><para />'
start_p, end_p = '<para>', '</para>'
start_strong, end_strong = start_em, end_em = '<emphasis>', '</emphasis>'
start_span_footnote, end_span_footnote = '<footnote><para>', '</para></footnote>'
start_q = end_q = '"'
start_pre, end_pre = '<programlisting>', '</programlisting>'
start_div_note, end_div_note = '<note>', '</note>'
start_li, end_li = '<listitem>', '</listitem>'
start_ul, end_ul = '<itemizedlist>', '</itemizedlist>'
start_ol, end_ol = '<orderedlist>', '</orderedlist>'
start_dl, end_dl = '<variablelist>', '</variablelist>'
start_dt, end_dt = '<varlistentry><term>', '</term>'
start_dd, end_dd = '<listitem><para>', '</para></listitem></varlistentry>'
|
py | 7df8c00ab40f18dedf72d386b23c22cd66197216 | # -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
class ConnectionStateEnum(object):
"""Implementation of the 'ConnectionState' enum.
Specifies the connection state of the Object and are only valid for
ESXi hosts ('kHostSystem') or Virtual Machines ('kVirtualMachine').
These enums are equivalent to the connection states documented in
VMware's reference documentation.
Examples of Cohesity connection states include 'kConnected',
'kDisconnected', 'kInacccessible', etc.
'kConnected' indicates that server has access to virtual machine.
'kDisconnected' indicates that server is currently disconnected to
virtual
machine.
'kInaccessible' indicates that one or more configuration files are
inacccessible.
'kInvalid' indicates that virtual machine configuration is invalid.
'kOrphaned' indicates that virtual machine is no longer registered on the
host it is associated with.
'kNotResponding' indicates that virtual machine is failed to response
due to external issues such as network connectivity, hostd not running
etc.
Attributes:
KCONNECTED: TODO: type description here.
KDISCONNECTED: TODO: type description here.
KINACCESSIBLE: TODO: type description here.
KINVALID: TODO: type description here.
KORPHANED: TODO: type description here.
KNOTRESPONDING: TODO: type description here.
"""
KCONNECTED = 'kConnected'
KDISCONNECTED = 'kDisconnected'
KINACCESSIBLE = 'kInaccessible'
KINVALID = 'kInvalid'
KORPHANED = 'kOrphaned'
KNOTRESPONDING = 'kNotResponding'
|
py | 7df8c03adf5200f5b1151e5c58f0e87277fdea90 | import numpy as np
import h5py
import os
import wobble
import learningRates
if __name__ == "__main__":
# change these keywords:
starname = '101501_expres'
datafile = '../data/{}.hdf5'.format(starname)
R = 86 # the number of echelle orders total in the data set
orders = np.arange(35,80) # list of indices for the echelle orders to be tuned
K_star = 0 # number of variable components for stellar spectrum
K_t = 2 # number of variable components for telluric spectrum
reg_star_file = f'../regularization/55cnc_expres_star_K{K_star}.hdf5'
reg_t_file = f'../regularization/55cnc_expres_t_K{K_t}.hdf5'
lr_star_file = '../learning_rates/{0}_star_K{1}.hdf5'.format(starname, K_star)
lr_t_file = '../learning_rates/{0}_t_K{1}.hdf5'.format(starname, K_t)
get_fine = False # Whether to try learning rates between orders of magnitude
plot = True
verbose = True # warning: this will print a lot of info & progress bars!
# create directory for plots if it doesn't exist:
if plot:
plot_dir = f'../learning_rates/{starname}/'
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
# create learning rate parameter files if they don't exist:
star_filename = lr_star_file
if not os.path.isfile(star_filename):
learningRates.generate_learningRate_file(star_filename, R, type='star')
tellurics_filename = lr_t_file
if not os.path.isfile(tellurics_filename):
learningRates.generate_learningRate_file(tellurics_filename, R, type='telluric')
# load up the data we'll use for training:
data = wobble.Data(datafile, orders=orders) # to get N_epochs
#data.trim_bad_edges()
# improve each order's regularization:
results = wobble.Results(data=data)
for r,o in enumerate(orders):
if verbose:
print('---- STARTING ORDER {0} ----'.format(o))
print("starting values:")
print("star:")
with h5py.File(star_filename, 'r') as f:
for key in list(f.keys()):
print("{0}: {1:.0e}".format(key, f[key][o]))
print("tellurics:")
with h5py.File(tellurics_filename, 'r') as f:
for key in list(f.keys()):
print("{0}: {1:.0e}".format(key, f[key][o]))
model = learningRates.modelSetup(data, results, r,
reg_star_file, reg_t_file,
K_star=K_star, K_t=K_t)
lr_t, lr_s = learningRates.improve_learningRates(model, finer_grid=get_fine,
plot=plot, plot_dir=plot_dir)
with h5py.File(star_filename, 'r+') as f:
f['learning_rate_template'][o] = np.copy(lr_s)
with h5py.File(tellurics_filename, 'r+') as f:
f['learning_rate_template'][o] = np.copy(lr_t)
if verbose:
print('---- ORDER {0} COMPLETE ({1}/{2}) ----'.format(o,r,len(orders)-1))
print("best values:")
print("star:")
with h5py.File(star_filename, 'r') as f:
for key in list(f.keys()):
print("{0}: {1:.0e}".format(key, f[key][o]))
print("tellurics:")
with h5py.File(tellurics_filename, 'r') as f:
for key in list(f.keys()):
print("{0}: {1:.0e}".format(key, f[key][o])) |
py | 7df8c0af09e2a9cc36d4312ed0d104c5e52b55c6 | #############################################################################
#
# Author: Ruth HUEY, Michel F. SANNER
#
# Copyright: M. Sanner TSRI 2008
#
#############################################################################
# $Header: /opt/cvs/python/packages/share1.5/AutoDockTools/autoflex41Commands.py,v 1.4 2013/05/09 17:01:27 rhuey Exp $
#
# $Id: autoflex41Commands.py,v 1.4 2013/05/09 17:01:27 rhuey Exp $
#
#
#
#
#
"""
This Module facilitates producing a formatted flexible residue file for AutoDock. The steps in this process are:
* Set the macromolecule:
o Read a PDBQT Macromolecule
o Choose Macromol...
* Select which residues are to be flexible in macromolecule using Pmv selection tools:
o ICOM Select
o SelectFromString
o Select Spherical Region
* Set which torsions in the sidechains of those residues are to be flexible interactively
* The results of the previous steps are written to two files:
o one containing the sidechains of the flexible residues with special keywords
o a second containing the rigid portion of the macromolecule
"""
from ViewerFramework.VFCommand import CommandGUI
from AutoDockTools.autoflexCommands import AF_MacroReader,\
AF_MacroChooser, AF_SelectResidues, AF_ProcessResidues,\
AF_ProcessHingeResidues, AF_EditHinge, AF_SetHinge,\
AF_SetBondRotatableFlag, AF_StepBack, AF_FlexFileWriter,\
AF_RigidFileWriter, AF_LigandDirectoryWriter, AF_SetupCovalentFlexibleResidue,\
menuText
AF_MacroReaderGUI=CommandGUI()
AF_MacroReaderGUI.addMenuCommand('AutoTools41Bar', menuText['AutoFlexMB'], \
menuText['Read Macro'], cascadeName = menuText['InputMB'])
AF_MacroChooserGUI=CommandGUI()
AF_MacroChooserGUI.addMenuCommand('AutoTools41Bar', menuText['AutoFlexMB'],
menuText['Choose Macro'], cascadeName = menuText['InputMB'])
AF_SelectResiduesGUI = CommandGUI()
AF_SelectResiduesGUI.addMenuCommand('AutoTools41Bar', menuText['AutoFlexMB'],menuText['Set Residues'])
AF_ProcessResiduesGUI = CommandGUI()
AF_ProcessHingeResiduesGUI = CommandGUI()
AF_EditHingeGUI = CommandGUI()
AF_EditHingeGUI.addMenuCommand('AutoTools41Bar', menuText['AutoFlexMB'],\
menuText['Edit Hinge'])
AF_SetHingeGUI = CommandGUI()
AF_SetHingeGUI.addMenuCommand('AutoTools41Bar', menuText['AutoFlexMB'],\
menuText['Set Hinge'])
AF_SetupCovalentFlexibleResidueGUI = CommandGUI()
AF_SetupCovalentFlexibleResidueGUI.addMenuCommand('AutoTools41Bar', menuText['AutoFlexMB'],\
menuText['Setup Covalent Residue'])
AF_StepBackGUI = CommandGUI()
AF_StepBackGUI.addMenuCommand('AutoTools41Bar', menuText['AutoFlexMB'], menuText['Step Back'])
AF_FlexFileWriterGUI = CommandGUI()
AF_FlexFileWriterGUI.addMenuCommand('AutoTools41Bar', menuText['AutoFlexMB'], \
menuText['writeFlexible'], cascadeName = menuText['WriteMB'])
AF_RigidFileWriterGUI = CommandGUI()
AF_RigidFileWriterGUI.addMenuCommand('AutoTools41Bar', menuText['AutoFlexMB'], \
menuText['writeRigid'], cascadeName = menuText['WriteMB'])
AF_LigandDirectoryWriterGUI = CommandGUI()
AF_LigandDirectoryWriterGUI.addMenuCommand('AutoTools41Bar', menuText['AutoFlexMB'], \
menuText['writeDir'], cascadeName = menuText['WriteMB'])
commandList = [
{'name':'AD41flex_readMacro','cmd':AF_MacroReader(),'gui':AF_MacroReaderGUI},
{'name':'AD41flex_chooseMacro','cmd':AF_MacroChooser(),'gui':AF_MacroChooserGUI},
{'name':'AD41flex_setResidues','cmd':AF_SelectResidues(),'gui':AF_SelectResiduesGUI},
#{'name':'AD41flex_processResidues','cmd':AF_ProcessResidues(),'gui':None},
#{'name':'AD41flex_processHingeResidues','cmd':AF_ProcessHingeResidues(),'gui':None},
{'name':'AD41flex_setupCovalentResidue', 'cmd':AF_SetupCovalentFlexibleResidue(), 'gui':AF_SetupCovalentFlexibleResidueGUI},
#{'name':'AD41flex_setBondRotatableFlag','cmd':AF_SetBondRotatableFlag(),'gui':None},
#{'name':'AD41flex_setHinge','cmd':AF_SetHinge(),'gui':AF_SetHingeGUI},
#{'name':'AD41flex_editHinge','cmd':AF_EditHinge(),'gui':None},
{'name':'AD41flex_stepBack','cmd':AF_StepBack(),'gui':AF_StepBackGUI},
{'name':'AD41flex_writeFlexFile','cmd':AF_FlexFileWriter(),'gui':AF_FlexFileWriterGUI},
{'name':'AD41flex_writeRigidFile','cmd':AF_RigidFileWriter(),'gui':AF_RigidFileWriterGUI},
#{'name':'AD41flex_writeFlexDir','cmd':AF_LigandDirectoryWriter(),'gui':AF_LigandDirectoryWriterGUI}
]
def initModule(vf):
for dict in commandList:
vf.addCommand(dict['cmd'], dict['name'], dict['gui'])
if not hasattr(vf, 'ADflex_processResidues'):
vf.addCommand(AF_ProcessResidues(), 'ADflex_processResidues', None)
if not hasattr(vf, 'ADflex_setBondRotatableFlag'):
vf.addCommand(AF_SetBondRotatableFlag(), 'ADflex_setBondRotatableFlag', None)
vf.ADflex_setResidues = vf.AD41flex_setResidues
if vf.hasGui and 'AutoTools41Bar' in vf.GUI.menuBars.keys():
vf.GUI.menuBars['AutoTools41Bar'].menubuttons[menuText['AutoFlexMB']].config(bg='tan',underline='-1')
if not hasattr(vf.GUI, 'adtBar'):
vf.GUI.adtBar = vf.GUI.menuBars['AutoTools41Bar']
vf.GUI.adtFrame = vf.GUI.adtBar.menubuttons.values()[0].master
|
py | 7df8c0f545b9efd3e00dc14b49c012d6ecb8f73e | from typing import List, Optional
from collections import defaultdict
from uuid import uuid4
from fastapi import APIRouter, Depends, Header, Body, BackgroundTasks
from sqlalchemy.orm import Session
from sqlalchemy import insert, and_, or_
from starlette.requests import Request
from starlette.responses import JSONResponse
from app.database.conn import db
from app.database.schema import Users, Characters, CharacterHates, CharacterLikes, Follows, CharacterBlocks, UserBlocks
from app.routes.auth import create_access_token
from app.models import CharacterMe, IDWithToken, UserToken, Message, CharacterCard, CharacterInfo, UserMini, \
UserCharacters, CharacterUpdate, ID, Token, CharacterName
from app.utils.examples import update_character_requests
from app.middlewares.token_validator import token_decode
from app.utils.notification_utils import send_notification
from app.errors.exceptions import APIException
router = APIRouter(prefix='/character')
@router.post('/change', status_code=200, response_model=Token, responses={
400: dict(description="Given character doesn't belong to you", model=Message),
404: dict(description="Given character doesn't exist", model=Message)
})
async def change_my_character(request: Request, character_id: int, session: Session = Depends(db.session)):
user = request.state.user
character = Characters.get(session, id=character_id)
if not character:
return JSONResponse(status_code=404, content=dict(msg="NO_MATCH_CHARACTER"))
elif character.user_id != user.id:
return JSONResponse(status_code=400, content=dict(msg="WRONG_CHARACTER_ID"))
else:
user.default_character_id = character_id
token = f"Bearer {create_access_token(data=dict(user))}"
return JSONResponse(status_code=201, content=dict(Authorization=token))
@router.post('', status_code=201, response_model=IDWithToken, responses={
202: dict(description="Given character name already exists", model=Message),
500: dict(description="Something went wrong with the database", model=Message)
})
async def create_my_character(request: Request, character: CharacterMe, session: Session = Depends(db.session)):
user = request.state.user
is_exist = bool(Characters.get(session, name=character.name))
if is_exist:
return JSONResponse(status_code=202, content=dict(msg="CHARACTER_NAME_EXISTS"))
character = dict(character)
character["user_id"] = user.id
character["num_follows"] = 0
character["num_followers"] = 0
likes = character.pop('likes')
hates = character.pop('hates')
character = Characters(**character)
try:
session.add(character)
session.flush()
character_id = character.id
session.bulk_insert_mappings(CharacterLikes, [{'like': like, 'character_id': character_id} for like in likes])
session.bulk_insert_mappings(CharacterHates, [{'hate': hate, 'character_id': character_id} for hate in hates])
session.query(Users).filter_by(id=user.id).update({'default_character_id': character_id})
session.flush()
session.commit()
except:
session.rollback()
return JSONResponse(status_code=500, content=dict(msg="DB_PROBLEM"))
user.default_character_id = character_id
token = f"Bearer {create_access_token(data=UserToken.from_orm(user).dict(exclude={'pw', 'marketing_agree'}))}"
return JSONResponse(status_code=201, content=dict(id=character_id, Authorization=token))
@router.get('/user/{user_name}', status_code=200, response_model=UserCharacters, responses={
400: dict(description="Blocked user", model=Message),
404: dict(description="No such user", model=Message)
})
async def get_user_characters(user_name: str, token: Optional[str] = Header(None),
session: Session = Depends(db.session)):
target = Users.get(session, name=user_name)
if not target:
return JSONResponse(status_code=404, content=dict(msg="NO_MATCH_USER"))
if token:
user = await token_decode(access_token=token)
user_blocks = session.query(UserBlocks).filter(
or_(and_(UserBlocks.user_id == user['id'], UserBlocks.blocked_id == target.id),
and_(UserBlocks.user_id == target.id, UserBlocks.blocked_id == user['id']))).all()
if user_blocks:
return JSONResponse(status_code=400, content=dict(msg="BLOCKED_USER"))
character_blocks = session.query(CharacterBlocks).filter(
or_(CharacterBlocks.character_id == user['default_character_id'],
CharacterBlocks.blocked_id == user['default_character_id'])).all()
blocked_characters = []
for block in character_blocks:
if block.character_id == user['default_character_id']:
blocked_characters.append(block.blocked_id)
else:
blocked_characters.append(block.character_id)
user_characters = session.query(Users, Characters).filter(Users.name == user_name,
Characters.id.in_(blocked_characters)).join(
Users.character).all()
else:
user_characters = session.query(Users, Characters).filter(Users.name == user_name).join(Users.character).all()
if not user_characters:
user_info = UserMini.from_orm(target).dict()
user_info['num_followers'] = 0
user_info['num_characters'] = 0
result = dict(user_info=user_info, characters=[])
else:
user_info = UserMini.from_orm(user_characters[0][0]).dict()
user_info['num_followers'] = sum(character[1].num_followers for character in user_characters)
user_info['num_characters'] = len(user_characters)
result = dict(user_info=user_info,
characters=[CharacterCard.from_orm(character[1]).dict() for character in user_characters])
return JSONResponse(status_code=200, content=result)
@router.post('/me', status_code=200, response_model=ID, responses={
404: dict(description="No such character", model=Message)
})
async def who_am_i(request: Request, session: Session = Depends(db.session)):
user = request.state.user
character = Characters.get(session, id=user.default_character_id)
if not character:
return JSONResponse(status_code=404, content=dict(msg="WRONG_CHARACTER_ID"))
return JSONResponse(status_code=200, content=dict(id=character.id))
@router.post('/block', status_code=201, description="Successfully blocked character", responses={
400: dict(description="You can't block yourself", model=Message),
404: dict(description="Given character doesn't exist", model=Message),
500: dict(description="Something went wrong with the database", model=Message)
})
async def block(request: Request, block_name: CharacterName, session: Session = Depends(db.session)):
user = request.state.user
my_character = Characters.get(session, id=user.default_character_id)
block_character = Characters.get(session, name=block_name.character_name)
if not block_character:
return JSONResponse(status_code=404, content=dict(msg="NO_MATCH_CHARACTER"))
elif block_character.name == my_character.name:
return JSONResponse(status_code=400, content=dict(msg="WRONG_CHARACTER_NAME"))
try:
CharacterBlocks.create(session, False, character_id=user.default_character_id, blocked_id=block_character.id)
session.commit()
return JSONResponse(status_code=201)
except:
session.rollback()
return JSONResponse(status_code=500, content=dict(msg="DB_PROBLEM"))
@router.get('/{character_name}', status_code=200, response_model=CharacterInfo, responses={
400: dict(description="Blocked", model=Message),
404: dict(description="No such character", model=Message)
})
async def get_character(character_name: str, token: Optional[str] = Header(None),
session: Session = Depends(db.session)):
target = Characters.get(session, name=character_name)
if not target:
return JSONResponse(status_code=404, content=dict(msg="WRONG_CHARACTER_NAME"))
if token:
user = await token_decode(access_token=token)
user_block = session.query(UserBlocks).filter(and_(UserBlocks.user_id == user['id'],
UserBlocks.blocked_id == target.user_id) |
and_(UserBlocks.user_id == target.user_id,
UserBlocks.blocked_id == user['id'])).all()
if user_block:
return JSONResponse(status_code=400, content=dict(msg="BLOCKED_USER"))
character_block = session.query(CharacterBlocks).filter(
and_(CharacterBlocks.character_id == user['default_character_id'],
CharacterBlocks.blocked_id == target.id) | and_(
CharacterBlocks.blocked_id == user['default_character_id'],
CharacterBlocks.character_id == target.id)).all()
if character_block:
return JSONResponse(status_code=400, content=dict(msg="BLOCKED_CHARACTER"))
setattr(target, 'user_info', UserMini.from_orm(Users.get(session, id=target.user_id)).dict())
character = target
likes = CharacterLikes.filter(session, character_id=character.id).all()
hates = CharacterHates.filter(session, character_id=character.id).all()
setattr(character, 'likes', [like.like for like in likes])
setattr(character, 'hates', [hate.hate for hate in hates])
if token is None:
setattr(character, 'followed', False)
else:
follower_id = user['default_character_id']
setattr(character, 'followed', bool(Follows.get(session, character_id=character.id, follower_id=follower_id)))
character = CharacterInfo.from_orm(character).dict()
return JSONResponse(status_code=200, content=character)
@router.patch('', status_code=204, responses={
400: dict(description="Given character doesn't belong to you", model=Message),
500: dict(description="Something went wrong with the database", model=Message)
})
async def update_my_character(request: Request,
character: CharacterUpdate = Body(..., examples=update_character_requests),
session: Session = Depends(db.session)):
user = request.state.user
old_character = Characters.get(session, id=user.default_character_id)
if old_character.user_id != user.id:
return JSONResponse(status_code=400, content=dict(msg="WRONG_USER"))
character = dict(character)
character = {key: character[key] for key in character if character[key] is not None}
character_row = {k: character[k] for k in character.keys() - ['likes', 'hates']}
Characters.filter(session, id=character['id']).update(True, **character_row)
try:
session.query(Characters).filter_by(id=character['id']).update(character_row)
if 'likes' in character:
session.query(CharacterLikes).filter_by(character_id=character['id']).delete()
session.bulk_insert_mappings(CharacterLikes, [{'like': like, 'character_id': character['id']} for like in
character['likes']])
if 'hates' in character:
session.query(CharacterHates).filter_by(character_id=character['id']).delete()
session.bulk_insert_mappings(CharacterHates, [{'hate': hate, 'character_id': character['id']} for hate in
character['hates']])
session.flush()
session.commit()
return JSONResponse(status_code=204)
except:
session.rollback()
return JSONResponse(status_code=500, content=dict(msg="DB_PROBLEM"))
@router.delete('/{character_name}', status_code=204, responses={
400: dict(description="Given character doesn't belong to you", model=Message),
404: dict(description="No such character", model=Message),
500: dict(description="Something went wrong with the database", model=Message)
})
async def delete_my_character(request: Request, character_name: str, session: Session = Depends(db.session)):
user = request.state.user
character = Characters.get(session, name=character_name)
if not character:
return JSONResponse(status_code=404, content=dict(msg="WRONG_CHARACTER_NAME"))
if character.user_id != user.id:
return JSONResponse(status_code=400, content=dict(msg="WRONG_USER"))
try:
Characters.filter(session, id=character.id, user_id=user.id).delete(auto_commit=True)
return JSONResponse(status_code=204)
except:
session.rollback()
return JSONResponse(status_code=500, content=dict(msg="DB_PROBLEM"))
@router.post('/follow', status_code=200, response_model=Message, responses={
400: dict(description="Given character doesn't belong to you", model=Message),
404: dict(description="No such character", model=Message),
500: dict(description="Something went wrong with the database", model=Message)
})
async def follow(request: Request, character_id: ID, background_tasks: BackgroundTasks,
session: Session = Depends(db.session)):
user = request.state.user
follower = Characters.get(session, id=user.default_character_id)
followee = Characters.get(session, id=character_id.id)
if not follower or not followee:
return JSONResponse(status_code=404, content=dict(msg="WRONG_CHARACTER_ID"))
if follower.user_id != user.id:
return JSONResponse(status_code=400, content=dict(msg="WRONG_USER"))
follow_exists = Follows.get(session, character_id=followee.id, follower_id=follower.id)
try:
if follow_exists:
session.query(Characters).filter_by(id=followee.id) \
.update({Characters.num_followers: Characters.num_followers - 1})
session.query(Characters).filter_by(id=follower.id) \
.update({Characters.num_follows: Characters.num_follows - 1})
session.query(Follows).filter_by(character_id=followee.id, follower_id=follower.id).delete()
session.flush()
session.commit()
return JSONResponse(status_code=200, content=dict(msg="UNFOLLOW_SUCCESS"))
else:
session.query(Characters).filter_by(id=followee.id) \
.update({Characters.num_followers: Characters.num_followers + 1})
session.query(Characters).filter_by(id=follower.id) \
.update({Characters.num_follows: Characters.num_follows + 1})
session.execute(insert(Follows).values(character_id=followee.id, follower_id=follower.id))
session.flush()
session.commit()
background_tasks.add_task(send_notification, follower.id, followee.id, 'Follow', session=session)
return JSONResponse(status_code=200, content=dict(msg="FOLLOW_SUCCESS"))
except:
session.rollback()
return JSONResponse(status_code=500, content=dict(msg="DB_PROBLEM"))
|
py | 7df8c17a2646d329a2db48e7213b0bf424a41fba | import komand
from .schema import ConnectionSchema
# Custom imports below
from komand.exceptions import ConnectionTestException
import requests
from ..investigate import *
class Connection(komand.Connection):
def __init__(self):
super(self.__class__, self).__init__(input=ConnectionSchema())
self.test_url = 'https://investigate.api.umbrella.com/domains/score/example.com'
#self.test_url = 'https://investigate.api.umbrella.com/domains/categorization'
def connect(self, params={}):
self.logger.info("Connect: Connecting..")
self.key = params.get('api_key').get('secretKey')
self.investigate = investigate.Investigate(self.key)
def test(self):
# Check key format
pattern = re.compile("^[A-Za-z0-9]{8}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{12}$")
if not pattern.match(self.key):
raise ConnectionTestException(
cause='Invalid API key.',
assistance='The API key is a UUID-v4 key. For more information, see: https://docs.umbrella.com/developer/enforcement-api/authentication-and-versioning/'
)
# Authenticate to API
# Modified from https://github.com/opendns/investigate-examples/blob/master/scripts.py
headers = {
'Authorization': 'Bearer ' + self.key
}
response = requests.get(
self.test_url,
headers=headers
)
# https://docs.umbrella.com/investigate-api/docs/error-handling-1
if response.status_code == 200:
return True
elif response.status_code == 403:
raise ConnectionTestException(preset=ConnectionTestException.Preset.API_KEY)
elif response.status_code == 404:
raise ConnectionTestException(preset=ConnectionTestException.Preset.NOT_FOUND)
elif response.status_code == 429:
raise ConnectionTestException(preset=ConnectionTestException.Preset.RATE_LIMIT)
else:
raise ConnectionTestException(preset=ConnectionTestException.Preset.UNKNOWN, data=response.text)
return False
|
py | 7df8c19595381c9ce7426a800071779751694e42 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=24
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=16
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=17
c.append(cirq.H.on(input_qubit[0])) # number=18
c.append(cirq.H.on(input_qubit[0])) # number=19
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=20
c.append(cirq.H.on(input_qubit[0])) # number=21
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=8
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=9
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=10
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=11
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=12
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=13
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=14
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=15
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=22
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=23
# circuit end
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2820
info = cirq.final_state_vector(circuit)
qubits = round(log2(len(info)))
frequencies = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
writefile = open("../data/startCirq_Class922.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() |
py | 7df8c22723f3d82249ddddb8891304c22e36f27c | from flask_restful import Resource, reqparse, request
from app.helpers.rest import response
from app.helpers import cmd_parser as cmd
from app import psycopg2,db
from app.libs import util as utils
from app.models import model
class CompanyProducts(Resource):
def get(self):
command = utils.get_command(request.path)
command = "dt_"+command
try:
results = model.get_all(command)
obj_userdata = list()
for i in results:
data = {
"id_company_product": str(i['id_company_product']),
"id_company": str(i['id_company']),
"id_product": str(i['id_product']),
"id_worker": str(i['id_worker']),
"nm_company_product": str(i['nm_company_product'])
}
obj_userdata.append(data)
except Exception:
results = None
else:
return response(200, data=obj_userdata)
def post(self):
json_req = request.get_json(force=True)
command = utils.get_command(request.path)
command = 'dt_'+command
init_data = cmd.parser(json_req, command)
respons = {}
if init_data['action'] == 'insert':
table = init_data['data'][0]['table']
fields = init_data['data'][0]['fields']
try:
result = model.insert(table, fields)
except Exception as e:
respons = {
"status": False,
"error": str(e)
}
else:
respons = {
"status": True,
"messages": "Success",
"id": result
}
finally:
return response(200, data=fields , message=respons)
if init_data['action'] == 'remove':
table = ""
tags = dict()
fields = ""
for i in init_data['data']:
table = i['table']
tags = i['tags']
fields = str(list(tags.keys())[0])
try:
result = model.delete(table, fields, tags[fields])
except Exception as e:
respons = {
"status": False,
"messages": str(e)
}
else:
respons = {
"status": result,
"messages": "Success!"
}
finally:
return response(200, data=tags, message=respons)
if init_data['action'] == 'where':
obj_userdata = list()
table = ""
fields = ""
tags = dict()
for i in init_data['data']:
table = i['table']
tags = i['tags']
for a in tags:
if tags[a] is not None:
fields = a
try:
result = model.get_by_id(table,fields,tags[fields])
except Exception as e:
respons = {
"status": False,
"messages": str(e)
}
else:
for i in result :
data = {
"id_company_product": str(i['id_company_product']),
"id_company": str(i['id_company']),
"id_product": str(i['id_product']),
"id_worker": str(i['id_worker']),
"nm_company_product": i['nm_company_product']
}
obj_userdata.append(data)
respons = {
"status": True,
"messages": "Fine!"
}
finally:
return response(200, data=obj_userdata , message=respons) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.