metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jof2jc/custom1",
"score": 2
} |
#### File: doctype/testdoctype/asset.py
```python
from __future__ import unicode_literals
import frappe
from frappe import _
# test_records = frappe.get_test_records('testdoctype')
def set_vehicle_status(self, method):
#batch_doc = frappe.get_doc("Batch", batch_id)
#pi_doc = frappe.get_doc("Purchase Invoice", pi_name)
#frappe.throw(_("hai {0}").format(self.name))
if self.vehicles is not None:
for d in self.vehicles:
#has_batch_no = frappe.db.get_value('Item', d.item_code, 'has_batch_no')
if d.vehicle_no:
#frappe.throw(_("hai {0}").format(d.vehicle_no))
vehicle_doc = frappe.get_doc("Asset", d.vehicle_no)
if self.docstatus == 1:
vehicle_doc.vehicle_status = "In Use"
vehicle_doc.reference_doc_name = self.name
if d.is_finish:
#frappe.msgprint(_("hai finish"))
vehicle_doc.vehicle_status = "Available"
vehicle_doc.reference_doc_name = ""
else:
#frappe.msgprint(_("hai cancel"))
vehicle_doc.vehicle_status = "Available"
vehicle_doc.reference_doc_name = ""
vehicle_doc.save()
```
#### File: report/scan_print_sheet/scan_print_sheet.py
```python
from __future__ import unicode_literals
import frappe
from frappe import msgprint, _
from frappe.utils import flt,cstr, nowdate, date_diff, getdate, format_datetime, format_time,formatdate, get_datetime, get_time
def execute(filters=None):
if not filters: filters = {}
data_map = {}
columns = []
columns = get_columns(filters)
data = []
data_map = get_data(filters)
for d in data_map:
data.append([d.name, d.marketplace_courier, d.sales_team, d.pack or 1.0, d.printed, getdate(get_datetime(d.transaction_date)),
format_time(get_datetime(d.transaction_date)),
d.delivered, getdate(get_datetime(d.delivery_date)) if d.delivery_date else '',
format_time(get_datetime(d.delivery_date)) if d.delivery_date else '',
d.territory, d.cancelled, d.company, d.scan_print_ref, d.scan_out_ref
])
return columns, data
def get_columns(filters):
"""return columns based on filters"""
columns = [_("AWB No") + ":Link/AWB Invoice:150", _("Courier") + ":Link/Marketplace Courier:120", _("Sales Team") + ":Link/Sales Person:100",
_("Pack") + ":Float:50", _("Printed") + ":Int:80",
_("Date") + ":Date:80", _("Time") + ":Time:80", _("Delivered") + ":Int:80",
_("Delivery Date") + ":Date:100", _("Delivery Time") + ":Time:100", _("Territory") + ":Link/Territory:100", _("Cancelled") + "::80",
_("Company") + ":Link/Company:100", _("Scan Print Ref") + ":Link/Scan Print:120",
_("Scan Out Ref") + ":Link/Scan Print:120"
]
return columns
def get_conditions(filters):
conditions = []
if filters.get("company"):
conditions.append("si.company=%(company)s")
if not filters.get("cancelled"):
conditions.append("si.cancelled <> 1")
if filters.get("from_date"):
conditions.append("date(si.transaction_date) between %(from_date)s and %(to_date)s")
if filters.get("territory"):
conditions.append("si.territory=%(territory)s")
return "and {}".format(" and ".join(conditions)) if conditions else ""
def get_data(filters):
"""returns all data details"""
data_map = {}
data_map = frappe.db.sql("""select name, marketplace_courier, sales_team, pack, printed, transaction_date, delivered, delivery_date, territory, cancelled, scan_print_ref, scan_out_ref, company
from `tabAWB Invoice` si
where printed=1 {conditions} order by si.transaction_date desc"""\
.format(conditions=get_conditions(filters)), filters, as_dict=1)
#print data_map
return data_map
def get_pl_conditions(filters):
conditions = []
if filters.get("item_code"):
conditions.append("ip.item_code=%(item_code)s")
return "and {}".format(" and ".join(conditions)) if conditions else ""
```
#### File: report/sts_item_summary/sts_item_summary.py
```python
from __future__ import unicode_literals
import frappe
from frappe import msgprint, _
from frappe.utils import flt
def execute(filters=None):
if not filters: filters = {}
columns = get_columns(filters)
item_map = get_item_details()
#pl = get_price_list()
pl = get_sales_price_list()
last_purchase_rate = get_last_purchase_rate()
#bom_rate = get_item_bom_rate()
val_rate_map = get_bin_details()
from erpnext.accounts.utils import get_currency_precision
precision = get_currency_precision() or 2
data = []
for item in sorted(item_map):
#price = pl[item.name]
if ("Accounts Manager" in frappe.get_roles(frappe.session.user)):
data.append([item.name, item.get("item_name"), item.actual_qty, item.stock_uom, item.warehouse,
pl.get(item.name, {}).get("base"),
pl.get(item.name, {}).get("agen1"),pl.get(item.name, {}).get("agen2"),pl.get(item.name, {}).get("partai"),
pl.get(item.name, {}).get("tk1"),pl.get(item.name, {}).get("tk2"),
pl.get(item.name, {}).get("tb1"),pl.get(item.name, {}).get("tb2"),
#pl.price1 if price else 0, pl.price2 if price else 0,
#item.price1, item.price2,
item.valuation_rate, #val_rate_map[item]["val_rate"], #flt(val_rate_map.get(item, 0), precision),
flt(last_purchase_rate.get(item.name, 0), precision), item.item_group, item.description
#pl.get(item, {}).get("Buying"),
#flt(bom_rate.get(item, 0), precision)
])
else:
data.append([item.name, item.get("item_name"), item.actual_qty, item.stock_uom, item.warehouse,
pl.get(item.name, {}).get("base"),
pl.get(item.name, {}).get("agen1"),pl.get(item.name, {}).get("agen2"),pl.get(item.name, {}).get("partai"),
pl.get(item.name, {}).get("tk1"),pl.get(item.name, {}).get("tk2"),
pl.get(item.name, {}).get("tb1"),pl.get(item.name, {}).get("tb2"),
#pl.price1 if price else 0, pl.price2 if price else 0,
#item.price1, item.price2,
item.item_group, item.description
])
return columns, data
def get_columns(filters):
"""return columns based on filters"""
if ("Accounts Manager" in frappe.get_roles(frappe.session.user)):
columns = [_("Item") + ":Link/Item:120", _("Item Name") + "::200", _("Actual Qty") + ":Float:75", _("UOM") + ":Link/UOM:80",
_("Warehouse") + ":Link/Warehouse:125",
_("Base") + ":Currency:90", _("Agen1") + ":Currency:90", _("Agen2") + ":Currency:90",
_("Partai") + ":Currency:90", _("TK1") + ":Currency:90", _("TK2") + ":Currency:90",
_("TB1") + ":Currency:90", _("TB2") + ":Currency:90",
_("Valuation Rate") + ":Currency:80", _("Last Purchase Rate") + ":Currency:90",
_("Item Group") + ":Link/Item Group:125", _("Description") + "::150"]
#_("Purchase Price List") + "::180", _("BOM Rate") + ":Currency:90"]
else:
columns = [_("Item") + ":Link/Item:120", _("Item Name") + "::200", _("Actual Qty") + ":Float:75", _("UOM") + ":Link/UOM:80",
_("Warehouse") + ":Link/Warehouse:125",
_("Base") + ":Currency:90", _("Agen1") + ":Currency:90", _("Agen2") + ":Currency:90",
_("Partai") + ":Currency:90", _("TK1") + ":Currency:90", _("TK2") + ":Currency:90",
_("TB1") + ":Currency:90", _("TB2") + ":Currency:90",
_("Item Group") + ":Link/Item Group:125", _("Description") + "::150"]
#_("Purchase Price List") + "::180", _("BOM Rate") + ":Currency:90"]
return columns
def get_item_details():
"""returns all items details"""
#item_map = {}
item_map = frappe.db.sql("select it.name, it.item_group, it.item_name, it.description, bin.actual_qty, bin.warehouse, \
it.stock_uom, bin.valuation_rate from tabItem it left join tabBin bin on (it.name=bin.item_code and it.stock_uom = bin.stock_uom) \
order by it.item_code, it.item_group", as_dict=1)
#left join `tabItem Price` ip on (it.item_code=ip.item_code) where ip.price_list='Standard Selling'
#print item_map
return item_map
def get_sales_price_list():
"""Get selling price list of every item"""
rate = {}
price_list = frappe.db.sql("""select ip.item_code, #ip.buying, ip.selling,
round(ip.price_list_rate,2) as base,
round(ip.agen1,2) as agen1, round(ip.agen2,2) as agen2, round(ip.partai,2) as partai,
round(ip.tk1,2) as tk1, round(ip.tk2,2) as tk2,
round(ip.tb1,2) as tb1, round(ip.tb2,2) as tb2
from `tabItem Price` ip where ip.price_list='Standard Selling'""", as_dict=1)
#from `tabItem` it left join `tabItem Price` ip on (it.item_code=ip.item_code) where ip.price_list='Price 1'""", as_dict=1)
for j in price_list:
rate.setdefault(j.item_code, j)
#print price_list
return rate
def get_price_list():
"""Get selling & buying price list of every item"""
rate = {}
price_list = frappe.db.sql("""select ip.item_code, ip.buying, ip.selling,
concat(ifnull(cu.symbol,ip.currency), " ", round(ip.price_list_rate,2), " - ", ip.price_list) as price
from `tabItem Price` ip, `tabPrice List` pl, `tabCurrency` cu
where ip.price_list=pl.name and pl.currency=cu.name and pl.enabled=1""", as_dict=1)
for j in price_list:
if j.price:
rate.setdefault(j.item_code, {}).setdefault("Buying" if j.buying else "Selling", []).append(j.price)
item_rate_map = {}
print rate
for item in rate:
for buying_or_selling in rate[item]:
item_rate_map.setdefault(item, {}).setdefault(buying_or_selling,
", ".join(rate[item].get(buying_or_selling, [])))
print item_rate_map
return item_rate_map
def get_last_purchase_rate():
item_last_purchase_rate_map = {}
query = """select * from (select
result.item_code,
result.base_rate
from (
(select
po_item.item_code,
po_item.item_name,
po.transaction_date as posting_date,
po_item.base_price_list_rate,
po_item.discount_percentage,
po_item.base_rate
from `tabPurchase Order` po, `tabPurchase Order Item` po_item
where po.name = po_item.parent and po.docstatus = 1)
union
(select
pr_item.item_code,
pr_item.item_name,
pr.posting_date,
pr_item.base_price_list_rate,
pr_item.discount_percentage,
pr_item.base_rate
from `tabPurchase Receipt` pr, `tabPurchase Receipt Item` pr_item
where pr.name = pr_item.parent and pr.docstatus = 1)
union
(select
pi_item.item_code,
pi_item.item_name,
pi.posting_date,
pi_item.base_price_list_rate,
pi_item.discount_percentage,
pi_item.base_rate
from `tabPurchase Invoice` pi, `tabPurchase Invoice Item` pi_item
where pi.name = pi_item.parent and pi.docstatus = 1)
) result
order by result.item_code asc, result.posting_date desc) result_wrapper
group by item_code"""
for d in frappe.db.sql(query, as_dict=1):
item_last_purchase_rate_map.setdefault(d.item_code, d.base_rate)
print item_last_purchase_rate_map
return item_last_purchase_rate_map
def get_item_bom_rate():
"""Get BOM rate of an item from BOM"""
item_bom_map = {}
for b in frappe.db.sql("""select item, (total_cost/quantity) as bom_rate
from `tabBOM` where is_active=1 and is_default=1""", as_dict=1):
item_bom_map.setdefault(b.item, flt(b.bom_rate))
return item_bom_map
def get_bin_details():
"""Get bin details from all warehouses"""
bin_details = frappe.db.sql("""select name, item_code, actual_qty, valuation_rate as val_rate, warehouse
from `tabBin` order by item_code""", as_dict=1)
#print bin_details
bin_map = frappe._dict()
for i in bin_details:
bin_map.setdefault(i.item_code, i)
print bin_map
return bin_map
```
#### File: patches/v10/reset_default_icons.py
```python
from __future__ import unicode_literals
import frappe
from frappe import _, scrub
from frappe.utils import nowdate, nowtime, now_datetime, flt, cstr, formatdate, get_datetime, add_days, getdate, get_time
from frappe.utils.dateutils import parse_date
from frappe.model.naming import make_autoname
import json
import datetime
from erpnext.setup.utils import get_exchange_rate
from erpnext.accounts.utils import get_account_currency, get_balance_on
import erpnext.accounts.utils
from re import sub
from decimal import Decimal
def execute():
frappe.db.sql ("""DELETE from `tabDesktop Icon` where standard=0 and module_name not in ('Accounts','Selling','Buying','Stock','Setup'""")
#frappe.db.sql ("""Update `tabDesktop Icon` set hidden=0, blocked=0 where standard=1""")
#frappe.db.sql ("""Update `tabDesktop Icon` set standard=1, hidden=1, blocked=1 where module_name in ('Accounts','Selling','Buying','Stock','Setup')""")
frappe.db.sql ("""Update `tabDesktop Icon` set hidden=1, blocked=1 where
module_name in ('File Manager','Tools','ptdun','Report','Website','CRM','Integrations',
'Email Inbox','Issue','Lead','Profit and Loss Statement','Profit and Loss Statment','Human Resources','Manufacturing', 'POS',
'Leaderboard','Support','Learn','Maintenance','Account Receivable','Account Payable',
'Student','Student Group', 'Course Schedule','Sales Register', 'Student Attendance', 'Course', 'Student Attendance Tool',
'Program', 'Student Applicant', 'Examination', 'Assessment', 'Fees', 'Instructor', 'Room', 'Schools', 'Healthcare',
'Education', 'Hub', 'Data Import Tool', 'Restaurant', 'Agriculture', 'Crop', 'Crop Cycle', 'Fertilizer',
'Land Unit', 'Disease', 'Plant Analysis', 'Soil Analysis', 'Water Analysis', 'Soil Texture', 'Weather', 'Grant Application',
'Donor', 'Volunteer', 'Member','Chapter', 'Delivery Note Trends', 'Non Profit')""")
#frappe.db.sql ("""Update `tabDesktop Icon` set hidden=1, blocked=1 where
# module_name in ('Custom1','Item','Customer','Supplier','Sales Order','Delivery Note','Sales Invoice',
# 'Purchase Order','Purchase Receipt','Purchase Invoice','Payment Entry','Journal Entry','Note','ToDo', 'Task',
# 'Stock Entry','Stock Reconciliation','Item Summary','Item Price','Chart of Accounts','Item wise Sales Register',
# 'Stock Reorder Projection','IMEI', 'Project','Projects', 'Product Bundle', 'Warehouse', 'Terms and Conditions',
# 'Sales Overview By Period', 'Adresses and Contacts', 'Material Request', 'Quotation', 'Supplier Quotation')""")
#frappe.db.sql ("""Update `tabDesktop Icon` set hidden=1 where standard=1 and
# module_name in ('Accounts','Selling','Buying','Stock','Setup')""")
if frappe.db.exists("DocType", "Payment Term"):
frappe.db.sql ("""Update `tabCustomer Group` set payment_terms=''""")
frappe.db.sql ("""Update `tabSupplier Type` set payment_terms=''""")
frappe.db.sql ("""Update `tabCompany` set payment_terms=''""")
doc = frappe.get_doc("Stock Settings")
doc.show_barcode_field = 0
doc.save()
doc = frappe.get_doc("Website Settings")
doc.home_page = "desk"
doc.save()
``` |
{
"source": "jof2jc/customamd",
"score": 2
} |
#### File: report/stock_ledger_detail/stock_ledger_detail.py
```python
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe import msgprint, _
def execute(filters=None):
columns = get_columns()
sl_entries = get_stock_ledger_entries(filters)
item_details = get_item_details(filters)
opening_row = get_opening_balance(filters, columns)
party_details = {}
party = []
data = []
if opening_row:
data.append(opening_row)
for sle in sl_entries:
item_detail = item_details[sle.item_code]
party_details = get_party_details(sle.item_code, sle.voucher_type, sle.voucher_no)
if sle.voucher_type in ("Sales Invoice", "Purchase Invoice", "Purchase Receipt", "Delivery Note"):
party = party_details[sle.voucher_no]
data.append([sle.date, sle.item_code, item_detail.item_name, item_detail.item_group,
item_detail.brand, item_detail.description, sle.warehouse,
item_detail.stock_uom, sle.actual_qty, sle.qty_after_transaction,
(sle.incoming_rate if sle.actual_qty > 0 else 0.0), # party.outgoing_rate,
sle.valuation_rate, sle.stock_value, party.party,
sle.voucher_type, sle.voucher_no, party.party_type,
sle.batch_no, sle.serial_no, sle.company])
else:
data.append([sle.date, sle.item_code, item_detail.item_name, item_detail.item_group,
item_detail.brand, item_detail.description, sle.warehouse,
item_detail.stock_uom, sle.actual_qty, sle.qty_after_transaction,
(sle.incoming_rate if sle.actual_qty > 0 else 0.0), # 0.0,
sle.valuation_rate, sle.stock_value, "",
sle.voucher_type, sle.voucher_no, "",
sle.batch_no, sle.serial_no, sle.company])
return columns, data
def get_columns():
return [_("Date") + ":Datetime:95", _("Item") + ":Link/Item:130", _("Item Name") + "::100", _("Item Group") + ":Link/Item Group:100",
_("Brand") + ":Link/Brand:100", _("Description") + "::200", _("Warehouse") + ":Link/Warehouse:100",
_("Stock UOM") + ":Link/UOM:100", _("Qty") + ":Float:50", _("Balance Qty") + ":Float:100",
_("Incoming Rate") + ":Currency:110", # _("Outgoing Rate") + ":Currency:110",
_("Valuation Rate") + ":Currency:110", _("Balance Value") + ":Currency:110",
_("Customer/Supplier") + ":Dynamic Link/Party Type:150",
_("Voucher Type") + "::110", _("Voucher #") + ":Dynamic Link/Voucher Type:100",
_("Party Type") + "::100",
_("Batch") + ":Link/Batch:100", _("Serial #") + ":Link/Serial No:100", _("Company") + ":Link/Company:100"
]
def get_stock_ledger_entries(filters):
return frappe.db.sql("""select concat_ws(" ", posting_date, posting_time) as date,
item_code, warehouse, actual_qty, qty_after_transaction, incoming_rate, valuation_rate,
stock_value, voucher_type, voucher_no, batch_no, serial_no, company
from `tabStock Ledger Entry` sle
where company = %(company)s and
posting_date between %(from_date)s and %(to_date)s
{sle_conditions}
order by posting_date asc, posting_time asc, name asc"""\
.format(sle_conditions=get_sle_conditions(filters)), filters, as_dict=1)
def get_item_details(filters):
item_details = {}
for item in frappe.db.sql("""select name, item_name, description, item_group,
brand, stock_uom from `tabItem` {item_conditions}"""\
.format(item_conditions=get_item_conditions(filters)), filters, as_dict=1):
item_details.setdefault(item.name, item)
return item_details
def get_party_details(item_code,voucher_type,voucher_no):
party_details = {}
params = {
'item_code' : item_code,
'voucher_no' : voucher_no,
'voucher_type' : voucher_type
}
if voucher_type == "Sales Invoice":
for party in frappe.db.sql("""select dt_item.parent, dt_item.item_code, dt.customer as party, 'Customer' as party_type, 0.0 as outgoing_rate
from `tabSales Invoice` dt, `tabSales Invoice Item` dt_item
where dt.name = dt_item.parent and dt_item.parent = %(voucher_no)s and dt_item.item_code = %(item_code)s""", params, as_dict=1):
party_details.setdefault(party.parent, party)
elif voucher_type == "Delivery Note":
for party in frappe.db.sql("""select dt_item.parent, dt_item.item_code, dt.customer as party, 'Customer' as party_type, 0.0 as outgoing_rate
from `tabDelivery Note` dt, `tabDelivery Note Item` dt_item
where dt.name = dt_item.parent and dt_item.parent = %(voucher_no)s and dt_item.item_code = %(item_code)s""", params, as_dict=1):
party_details.setdefault(party.parent, party)
elif voucher_type == "Purchase Invoice":
for party in frappe.db.sql("""select dt_item.parent, dt_item.item_code, dt.supplier as party, 'Supplier' as party_type, 0.0 as outgoing_rate
from `tabPurchase Invoice` dt, `tabPurchase Invoice Item` dt_item
where dt.name = dt_item.parent and dt_item.parent = %(voucher_no)s and dt_item.item_code = %(item_code)s""", params, as_dict=1):
party_details.setdefault(party.parent, party)
elif voucher_type == "Purchase Receipt":
for party in frappe.db.sql("""select dt_item.parent, dt_item.item_code, dt.supplier as party, 'Supplier' as party_type, 0.0 as outgoing_rate
from `tabPurchase Receipt` dt, `tabPurchase Receipt Item` dt_item
where dt.name = dt_item.parent and dt_item.parent = %(voucher_no)s and dt_item.item_code = %(item_code)s""", params, as_dict=1):
party_details.setdefault(party.parent, party)
return party_details
def get_party_conditions(item_code, voucher_no):
conditions = []
if item_code:
conditions.append("dt_item.item_code=''")
if voucher_no:
conditions.append("dt_item.parent=''")
return "and {}".format(" and ".join(conditions)) if conditions else ""
def get_item_conditions(filters):
conditions = []
if filters.get("item_code"):
conditions.append("name=%(item_code)s")
if filters.get("brand"):
conditions.append("brand=%(brand)s")
return "where {}".format(" and ".join(conditions)) if conditions else ""
def get_sle_conditions(filters):
conditions = []
item_conditions=get_item_conditions(filters)
if item_conditions:
conditions.append("""item_code in (select name from tabItem
{item_conditions})""".format(item_conditions=item_conditions))
if filters.get("warehouse"):
conditions.append(get_warehouse_condition(filters.get("warehouse"))) #conditions.append("warehouse=%(warehouse)s")
if filters.get("voucher_no"):
conditions.append("voucher_no=%(voucher_no)s")
return "and {}".format(" and ".join(conditions)) if conditions else ""
def get_opening_balance(filters, columns):
if not (filters.item_code and filters.warehouse and filters.from_date):
return
from erpnext.stock.stock_ledger import get_previous_sle
last_entry = get_previous_sle({
"item_code": filters.item_code,
"warehouse": get_warehouse_condition(filters.warehouse),
"posting_date": filters.from_date,
"posting_time": "00:00:00"
})
row = [""]*len(columns)
row[1] = _("'Opening'")
for i, v in ((9, 'qty_after_transaction'), (11, 'valuation_rate'), (12, 'stock_value')):
row[i] = last_entry.get(v, 0)
return row
def get_warehouse_condition(warehouse):
warehouse_details = frappe.db.get_value("Warehouse", warehouse, ["lft", "rgt"], as_dict=1)
if warehouse_details:
return " exists (select name from `tabWarehouse` wh \
where wh.lft >= %s and wh.rgt <= %s and sle.warehouse = wh.name)"%(warehouse_details.lft,
warehouse_details.rgt)
return ''
``` |
{
"source": "jof2jc/jd",
"score": 2
} |
#### File: api/rest/ComJdQlBasicWsGlscGlscBasicSecondaryWSGetAssortByFidRequest.py
```python
from jd.api.base import RestApi
class ComJdQlBasicWsGlscGlscBasicSecondaryWSGetAssortByFidRequest(RestApi):
def __init__(self,domain,port=80):
RestApi.__init__(self,domain, port)
self.assFid = None
def getapiname(self):
return 'jingdong.com.jd.ql.basic.ws.glsc.GlscBasicSecondaryWS.getAssortByFid'
```
#### File: api/rest/SellerOrderPrintOrderRequest.py
```python
from jd.api.base import RestApi
class SellerOrderPrintOrderRequest(RestApi):
def __init__(self,domain,port=80):
RestApi.__init__(self,domain, port)
self.printType = None
self.printNum = None
self.orderId = None
def getapiname(self):
return 'jingdong.seller.order.printOrder'
```
#### File: api/rest/SellerOrderSendGoodsOpenApiRequest.py
```python
from jd.api.base import RestApi
class SellerOrderSendGoodsOpenApiRequest(RestApi):
def __init__(self,domain,port=80):
RestApi.__init__(self,domain, port)
self.orderId = None
self.expressNo = None
self.expressCompany = None
def getapiname(self):
return 'jingdong.seller.order.sendGoodsOpenApi'
```
#### File: api/rest/SellerPromoQueryPlummetedInfoByConditionRequest.py
```python
from jd.api.base import RestApi
class SellerPromoQueryPlummetedInfoByConditionRequest(RestApi):
def __init__(self,domain,port=80):
RestApi.__init__(self,domain, port)
self.thirdArea = None
self.pageSize = None
self.ltCreateTime = None
self.secondArea = None
self.promoId = None
self.skuId = None
self.childType = None
self.userGrade = None
self.ltBeginTime = None
self.promoState = None
self.firstArea = None
self.page = None
self.gtCreateTime = None
self.riskLevel = None
self.gtBeginTime = None
self.gtEndTime = None
self.promoName = None
self.ltEndTime = None
self.spuId = None
self.storeIds = None
def getapiname(self):
return 'jingdong.seller.promo.queryPlummetedInfoByCondition'
```
#### File: api/rest/SellerPromoSingleCreatePlummetedPromoRequest.py
```python
from jd.api.base import RestApi
class SellerPromoSingleCreatePlummetedPromoRequest(RestApi):
def __init__(self,domain,port=80):
RestApi.__init__(self,domain, port)
self.riskLevel = None
self.promoChannel = None
self.bindToken = None
self.promoNum = None
self.limitBuyType = None
self.quota = None
self.promoAdword = None
self.beginTime = None
self.areaId = None
self.pId = None
self.areaTag = None
self.skuId = None
self.childType = None
self.userGrade = None
self.promoType = None
self.promoName = None
self.activityUrl = None
self.limitBuyMaxNum = None
self.limitBuyMinNum = None
self.endTime = None
self.mobileActivityUrl = None
self.promoReason = None
self.storeId = None
def getapiname(self):
return 'jingdong.seller.promo.singleCreatePlummetedPromo'
```
#### File: jd/controllers/jdapi.py
```python
from urllib.parse import urlparse
import jd
from jd.api.base import RestApi
import frappe, json
from jd.controllers.jdauth import AuthClient, AuthRequest
from frappe.utils import logger, cstr, cint, convert_utc_to_user_timezone, now, flt, time_diff_in_hours, now_datetime, nowdate, getdate, get_weekdays, add_days, add_to_date, today, get_time, get_datetime
class JdRequest(RestApi):
def __init__(self, api , endpoint):
parsed_uri = urlparse(endpoint)
scheme = parsed_uri.scheme
domain = parsed_uri.netloc
port = 443 if scheme == "https" else 80
self.ssl = True if port == 443 else False
self.api_name = api
RestApi.__init__(self, domain, port)
def getapiname(self):
return self.api_name
def add_api_param(self, name, value):
setattr(self, name, value)
def get_response(self, access_token=None):
return self.getResponse(access_token=access_token,ssl=self.ssl)
class JdClient(object):
def __init__(self, key, secret):
self.app_key = key
self.app_secret = secret
self.__setup_client()
def __setup_client(self):
jd.setDefaultAppInfo(self.app_key, self.app_secret)
def execute(self,request, access_token=None):
try:
print("access_token:", access_token)
print("JdClient execute >>>", request.__dict__)
response = request.get_response(access_token=access_token)
print(cstr(response))
return response
except Exception as e:
error_trace = frappe.get_traceback()
if error_trace:
frappe.log_error(cstr(request.__dict__) + "\n\n" + error_trace,title="Error JdClient execute")
def get_access_token_jd(api_doc=None,headers=None,body="", commit_flag=False):
access_token = ""
for t in api_doc.get("api_end_point",{"disabled":0,"type":("in",["Refresh Token","Access Token"])}):
if api_doc.marketplace_type == 'JDID' and t.type == "Refresh Token":
body={}
hour_factor=0
import ast
access_token = t.access_token or "" #use previous token first
if t.body:
body = ast.literal_eval(t.body) or {}
if body.get("expires_in"):
hour_factor = flt(body.get("expires_in")/3600.0)-4
if hour_factor <=0: hour_factor = 20
if (not t.last_token_created or not t.access_token) or (time_diff_in_hours(now_datetime(),t.last_token_created or t.modified) >= hour_factor):
response = get_jd_response(t, api_doc) or {}
if response.get("access_token"):
#print("access_token: ", response.get("access_token"))
access_token = cstr(response.get("access_token")) or ""
if access_token:
t.db_set("last_token_created",now_datetime())
t.db_set("body",cstr(response))
t.db_set("access_token", access_token)
t.db_set("refresh_token", cstr(response.get("refresh_token")) or "")
if commit_flag: frappe.db.commit()
return access_token or ""
def get_jd_response(t, api_doc, access_token='', d=None, item=None):
r = t
request, response, created_before = None, None, None
headers = json.loads(r.header.strip())
client = eval(headers.get("client"))
request = eval(headers.get("request"))
for k,v in headers.items():
if "param" in k: eval(v)
#else: k = eval(v)
if client and request:
try:
#print(vars(request))
response = client.execute(request=request,access_token=access_token)
#print(api_doc.name, "JdRequest execute >>>", cstr(response))
except Exception as e:
error_trace = frappe.get_traceback()
if error_trace:
frappe.log_error(cstr(vars(request)) + "\n\n" + error_trace,title="%s, %s: Error get_jd_response" % (api_doc.name, t.type))
return response
```
#### File: jd/controllers/order.py
```python
from __future__ import unicode_literals
import frappe
import requests
import json
from frappe.model.document import Document
from frappe.utils import logger, cstr, cint, convert_utc_to_user_timezone, now, flt, time_diff_in_hours, now_datetime, nowdate, getdate, get_weekdays, add_days, add_to_date, today, get_time, get_datetime
import datetime
from frappe.desk.doctype.tag.tag import add_tag
from jd.controllers.jdapi import JdClient, JdRequest, get_jd_response, get_access_token_jd
from jd.controllers.jdauth import AuthClient, AuthRequest
from custom1.marketplace_flow.marketplace_integration import insert_new_sales_order
def run_get_marketplace_order_jd():
if "is_marketplace_shop" not in frappe.db.get_table_columns("Customer"): return
shop_list = frappe.get_list("Customer",fields=["name"],filters=[["marketplace_type","=","JDID"],["is_marketplace_shop","=",1],["api_key","!=",""], ["run_order_job","=","1"]])
if not shop_list: return
for shop in shop_list:
print(shop.name, 'processing get_order_list JDID')
access_token=""
jd_doc = None
api_doc = frappe.get_doc("Customer", shop.name)
if api_doc:
if api_doc.marketplace_type == "JDID": jd_doc = frappe.get_doc("Customer", shop.name)
api_template = frappe.get_doc(api_doc.doctype,{"marketplace_type": api_doc.marketplace_type,"is_template":1})
if api_template:
api_doc.api_end_point = api_template.api_end_point
if jd_doc:
access_token = get_access_token_jd(jd_doc, commit_flag=True) or ""
order_list = []
result = {}
order_detail_json = order_item_json = None
if api_doc and api_doc.get("marketplace_type") and api_doc.get("api_end_point"):
for r in api_doc.get("api_end_point",{"disabled":0,"type":("in",["Update Order Status","Get Order List","Get Order Detail"])}):
if api_doc.marketplace_type == "JDID":
if r.type == "Get Order List" and access_token:
body={}
header = r.header
for k,v in json.loads(r.body.strip()).items():
body.update({k:eval(v)})
if k != "[page]":
header = header.replace(k,cstr(body.get(k)))
r.header = header.replace("[page]","1") #first time first page
n=1
while n >= 1:
response = get_jd_response(r, api_doc, access_token)
if response:
if response.get("jingdong_seller_order_getOrderIdListByCondition_response"):
result = response.get("jingdong_seller_order_getOrderIdListByCondition_response").get("result") or {}
if result.get("model"):
order_list += result.get("model") or [] #concat array list of orders
if len(result.get("model")) >= cint(body.get("[page_size]") or "100"): #continue api_call for next page
n = 1
body["[page]"] += 1
r.header = header.replace("[page]",cstr(body.get("[page]")))
else: n = 0
elif cint(result.get("code")) == 0: #failed
n= 0
frappe.log_error(cstr(result),title="%s: Error get_jd_response: %s" % (shop.name, r.type))
else: n = 0
else: n = 0
else: n = 0
print(shop.name, 'order_list:', order_list)
result={}
elif r.type == "Get Order Detail" and order_list and access_token:
for d in order_list:
print(shop.name, "Get Order Detail:", d)
response = get_jd_response(r, api_doc, access_token, d)
if response:
if response.get("jingdong_seller_order_getOrderInfoByOrderId_response"):
result = response.get("jingdong_seller_order_getOrderInfoByOrderId_response").get("result")
if result:
order_data = result.get("model")
order_items_data = order_data.get("orderSkuinfos")
insert_new_sales_order(api_doc, order_data, order_items_data, r)
elif cint(result.get("code"))==0 or not result.get("model"): #failed
frappe.log_error(cstr(result),title="%s: Error get_jd_response: %s" % (shop.name, r.type))
```
#### File: security/tde_client/http_report_client.py
```python
from socket import AddressFamily
import psutil
import platform
from threading import RLock
import os
from jd.api.base import RestApi
from jd import appinfo
import random
import time
import json
ALPHABET = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz=+-*/_|<>^~@?%&"
class HttpReportClient(object):
def __init__(self, tde_client, server_url, access_token, app_key, app_secret):
self.reports = dict()
self.host_info = HttpReportClient.__get_host_info()
self.parent = tde_client
self.server_url = server_url
self.access_token = access_token
self.app_key = app_key
self.app_secret = app_secret
self.env_label = platform.system() + "|" + platform.version() + "|" + platform.python_version()
self.type = {"init": 1, "exception": 2, "statistic": 3, "event": 4}
self.level = {"info": 1, "warn": 2, "error": 3, "severe": 4}
self.__lock = RLock()
self.__add_cup_info()
# statistic record present in long array
# index: enccnt(0) deccnt(1) encerrcnt(2) decerrcnt(3)
self.statistic = [0, 0, 0, 0]
@staticmethod
def __get_host_info():
for name, info in psutil.net_if_addrs().items():
for ip_address in info:
if AddressFamily.AF_INET == ip_address.family:
return ip_address.address
return "Unknown host"
def __add_cup_info(self):
lower_env_label = self.env_label.lower()
cpu_info = 'Unknown'
if lower_env_label.find('linux') != -1:
cpu_info = os.popen('cat /proc/cpuinfo | grep "model name" | uniq').read().split(':')[1].rstrip(
'\n').strip()
elif lower_env_label.find('mac') != -1:
cpu_info = os.popen('sysctl -n machdep.cpu.brand_string').read().rstrip('\n').strip()
elif lower_env_label.find('windows') != -1:
cmd_result = os.popen('wmic cpu get name')
cpu_info = cmd_result.read().replace('Name', '').replace('\n', '', -1).strip()
cmd_result.read()
self.env_label = self.env_label + '|' + cpu_info
def flush(self):
self.insert_statistic_report()
self.send_all_reports()
def send_all_reports(self):
with self.__lock:
for key in self.reports.keys():
val = self.reports[key]
request = JosSecretApiReportRequest(self.server_url, 80)
request.businessId = val['businessId']
request.text = val['text']
request.attribute = json.dumps(val['attributes'])
request.set_app_info(appinfo(self.app_key, self.app_secret))
res = request.getResponse(self.access_token)
if res is not None and res.get('serviceCode') == 0:
del self.reports[key]
def insert_init_report(self):
with self.__lock:
init_msg = {
'businessId': ''.join(random.sample(ALPHABET, 40)),
'text': 'INIT',
'attributes': {
'type': self.type['init'],
'host': self.host_info,
'level': self.level['info'],
'service': self.parent.token['service'],
'sdk_ver': self.parent.sdk_version,
'env': self.env_label,
'ts': round(time.time() * 1000)
}
}
self.reports[self.type['init']] = init_msg
def insert_statistic_report(self):
with self.__lock:
statistic_msg = {
'businessId': ''.join(random.sample(ALPHABET, 40)),
'text': 'STATISTIC',
'attributes': {
'enccnt': str(self.statistic[0]),
'deccnt': str(self.statistic[1]),
'encerrcnt': str(self.statistic[2]),
'decerrcnt': str(self.statistic[3]),
'type': self.type['statistic'],
'host': self.host_info,
'level': self.level['info'],
'service': self.parent.token['service'],
'sdk_ver': self.parent.sdk_version,
'env': self.env_label,
'ts': round(time.time() * 1000)
}
}
self.reports[self.type['statistic']] = statistic_msg
self.statistic = [0, 0, 0, 0]
def insert_event_report(self, event_code, event_detail):
with self.__lock:
event_msg = {
'businessId': ''.join(random.sample(ALPHABET, 40)),
'text': 'EVENT',
'attributes': {
'code': event_code,
'event': event_detail,
'type': self.type['event'],
'host': self.host_info,
'level': self.level['info'],
'service': self.parent.token['service'],
'sdk_ver': self.parent.sdk_version,
'env': self.env_label,
'ts': round(time.time() * 1000)
}
}
request = JosSecretApiReportRequest(self.server_url, 80)
request.businessId = event_msg['businessId']
request.text = event_msg['text']
request.set_app_info(appinfo(self.app_key, self.app_secret))
request.getResponse(self.access_token)
def insert_key_update_event_report(self, event_code, event_detail, major_key_ver, key_list):
with self.__lock:
key_update_event_msg = {
'businessId': ''.join(random.sample(ALPHABET, 40)),
'text': 'EVENT',
'attributes': {
'cur_key': major_key_ver,
'keylist': key_list,
'type': self.type['event'],
'host': self.host_info,
'level': self.level['info'],
'service': self.parent.token['service'],
'sdk_ver': self.parent.sdk_version,
'env': self.env_label,
'ts': round(time.time() * 1000)
}
}
request = JosSecretApiReportRequest(self.server_url, 80)
request.businessId = key_update_event_msg['businessId']
request.text = key_update_event_msg['text']
request.attribute = json.dumps(key_update_event_msg['attributes'])
request.set_app_info(appinfo(self.app_key, self.app_secret))
request.getResponse(self.access_token)
def insert_err_report(self, code, detail, stack_trace, level):
with self.__lock:
err_msg = {
'businessId': ''.join(random.sample(ALPHABET, 40)),
'text': 'EXCEPTION',
'attributes': {
'type': self.type['exception'],
'host': self.host_info,
'level': level,
'service': self.parent.token['service'],
'sdk_ver': self.parent.sdk_version,
'env': self.env_label,
'ts': round(time.time() * 1000),
'code': code,
'msg': detail,
'heap': stack_trace
}
}
if level == self.level['error'] or level == self.level['severe']:
request = JosSecretApiReportRequest(self.server_url, 80)
request.businessId = err_msg['businessId']
request.text = err_msg['text']
request.attribute = json.dumps(err_msg['attributes'])
request.set_app_info(appinfo(self.app_key, self.app_secret))
request.getResponse(self.access_token)
else:
self.reports[code] = err_msg
class JosSecretApiReportRequest(RestApi):
def __init__(self, domain, port=80):
RestApi.__init__(self, domain, port)
self.serverUrl = None
self.businessId = None
self.text = None
self.attribute = None
def process_with_url_before_request(self, url):
self.serverUrl = url
def getapiname(self):
return 'jingdong.jos.secret.api.report.get'
```
#### File: tde/util/key_encryption.py
```python
from Crypto.Cipher import AES
import os
IV_SIZE = 16
def aes_encrypt(m_key, pt):
"""
:param m_key: ref to class Mkey in tde_client.py
:param pt: plain text to encrypt
:return: encrypted value in byte form
"""
iv = os.urandom(IV_SIZE)
add = add_bytes_count(pt)
data = pt + chr(IV_SIZE - len(pt.encode('utf-8')) % IV_SIZE) * add
crypto = AES.new(m_key.s_key, AES.MODE_CBC, iv)
encrypt_aes = crypto.encrypt(data.encode('utf-8'))
return iv + encrypt_aes
def aes_decrypt(m_key, ct):
"""
:param m_key: ref to class Mkey in tde_client.py
:param ct: encrypted value in byte form
:return: plain text
"""
iv = ct[0:IV_SIZE]
crypto = AES.new(m_key.s_key, AES.MODE_CBC, iv)
decrypt_text = crypto.decrypt(ct[IV_SIZE:])
str_text_decrypted = bytes.decode(decrypt_text, encoding='utf-8')
return str_text_decrypted[:-ord(str_text_decrypted[-1])]
def wrap(m_key, d_key):
"""
this wrap method will do AES.CBC directly, without padding operation,
so if the byte length of d_key not times to 16, None will be returned
:param m_key: ref to class Mkey in tde_client.py
:param d_key: plain text, byte length must be times of 16
:return: None or wrap result in byte form
"""
if len(d_key.encode('utf-8')) % IV_SIZE != 0:
return None
crypto = AES.new(m_key.s_key, AES.MODE_CBC, bytes(16))
return crypto.encrypt(d_key.encode('utf-8'))
def unwrap(m_key, ct):
"""
did the reverse operation of wrap
:param m_key: ref to class Mkey in tde_client.py
:param ct: data to be unwraped in byte form
:return:plain text
"""
crypto = AES.new(m_key.s_key, AES.MODE_CBC, bytes(16))
text_decrypted = crypto.decrypt(ct)
str_text_decrypted = bytes.decode(text_decrypted, encoding='utf-8')
return str_text_decrypted
def add_bytes_count(data):
count = len(data.encode('utf-8'))
if count % IV_SIZE != 0:
add = IV_SIZE - (count % IV_SIZE)
else:
add = 16
return add
```
#### File: tde/util/tde_status.py
```python
class TDEStatus(object):
def __init__(self, code, message):
self.code = code
self.message = message
SUCCESS = TDEStatus(0, "Success")
# Generic error codes across the whole TDE system. Range 1 to 99
INTERNAL_ERROR = TDEStatus(1, "Internal system error.")
DB_ERROR = TDEStatus(2, "Internal database error.")
INVALID_JSON = TDEStatus(3, "Invalid json input.")
# Request json is well - formed but some data field is not valid
INVALID_REQUEST_DATA = TDEStatus(4, "Invalid data in the request json.")
REQUEST_SIG_VERFIFY_ERROR = TDEStatus(5, "Validation of the request signature failed.")
# KMS specific errors.Range 100 to 199
KMS_INTERNAL_ERROR = TDEStatus(100, "KMS internal system error.")
KMS_NO_KEY_FOUND = TDEStatus(101, "No key found on KMS.")
KMS_KEY_CREATED_ALREADY = TDEStatus(102, "MK already created for this service.")
KMS_KEY_REGISTRATION_FAILED = TDEStatus(103, "Failed to register key by RKS.")
KMS_SERVICE_ALREADY_REGISTERED = TDEStatus(104, "The service is already registered")
KMS_TMS_CONNECTION_ERROR = TDEStatus(105, "Failed to connect to TMS server")
KMS_RKS_CONNECTION_ERROR = TDEStatus(106, "Failed to connect to RKS server")
KMS_KEY_ALREADY_ACTIVATED = TDEStatus(107, "Latest key already activated")
KMS_FAIL_TO_REMOVE_REDIS_CACHE = TDEStatus(108, "KMS fail to remove redis cache.")
# SDK specific errors.Range 200 to 299
SDK_INTERNAL_ERROR = TDEStatus(200, "SDK generic exception error.")
# token related
SDK_USE_INEFFECTIVE_TOKEN = TDEStatus(201, "SDK uses an ineffective token.")
SDK_USE_HARD_EXPIRED_TOKEN = TDEStatus(202, "SDK uses an expired token with hard deadline.")
SDK_USE_SOFT_EXPIRED_TOKEN = TDEStatus(203, "SDK uses an expired token with soft deadline.")
# recovery procedure related
SDK_FAIL_TO_READ_BACKUP = TDEStatus(204, "SDK cannot fetch any function keys from backup file.")
# key request / response related
SDK_RECEIVED_WRONG_KEYRESPONSE1 = TDEStatus(205, "SDK received key response with unmatched service name.")
SDK_RECEIVED_WRONG_KEYRESPONSE2 = TDEStatus(206, "SDK received key response with unmatched token id.")
SDK_CANNOT_REACH_KMS = TDEStatus(207, "SDK cannot reach KMS server.")
# Encrypt / Decrypt related
SDK_HAS_NO_AVAILABLE_ENC_KEYS = TDEStatus(208, "SDK holds a decrypt-only token or has no key to encrypt data.")
SDK_HAS_NO_CORRESPONDING_DEC_KEYS = TDEStatus(209, "SDK has no corresponding key to decrypt data.")
SDK_OPERATE_WITH_EXPIRED_KEYS = TDEStatus(210, "SDK uses old keys to encrypt/decrypt data.")
SDK_OPERATE_WITH_INACTIVE_KEYS = TDEStatus(211, "SDK uses suspended/revoked keys to encrypt/decrypt data.")
SDK_THROW_JDK_EXCEPTION = TDEStatus(212, "SDK threw generic JDK exception.")
SDK_USE_INVALID_TOKEN = TDEStatus(213, "SDK uses an invalid token.")
SDK_HAS_NO_AVAILABLE_KEYS = TDEStatus(214, "SDK has no keys in internal cache.")
SDK_HAS_CORRUPTED_KEYS = TDEStatus(215, "SDK has corrupted keys in internal cache.")
SDK_HAS_CORRUPTED_CIPHER = TDEStatus(216, "SDK tries to decrypt corrupted cipher.")
SDK_DIDNOT_SETUP_RPATH = TDEStatus(217, "SDK did not set resource path correctly.")
SDK_FAIL_TO_WRITE_KEYCACHE = TDEStatus(218, "SDK cannot write key cache file to the given resource path.")
SDK_FAIL_TO_DELETE_KEYCACHE = TDEStatus(219, "SDK fails to delete all key cache files.")
SDK_FAIL_TO_READ_KEYCACHE = TDEStatus(220, "SDK cannot fetch any function keys from cache file.")
SDK_FAIL_TO_DELETE_KEYBACKUP = TDEStatus(221, "SDK fails to delete backup file.")
# Event related
SDK_FAILS_TO_FETCH_UPDATED_KEYS = TDEStatus(227, "SDK failed to fetch rotated keys from KMS.")
SDK_TRIGGER_ROTATED_KEY_FETCH = TDEStatus(228, "SDK trigger key fetching because ciphertext is encrypted with newer keys.")
SDK_REPORT_CUR_KEYVER = TDEStatus(229, "CurKeyVer=")
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** TMSabout ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **
# TMS specific errors.Range 300 to 399
TMS_INTERNAL_ERROR = TDEStatus(300, "TMS internal system error.")
TMS_DB_DATA_NOTFOUND_ERROR = TDEStatus(301, "TMS-db's data not found.")
TMS_REQUEST_ARGS_ERROR = TDEStatus(302, "Request argument error.")
TMS_DB_DATA_ERROR = TDEStatus(303, "Tms db data error.")
TMS_KMS_REQUEST_EXPIRE = TDEStatus(304, " KMS request timeout.")
TMS_REQUEST_VERIFY_FAILED = TDEStatus(305, "Request signature validation failed.")
TMS_TOKEN_EXPIRE = TDEStatus(306, "The request token is expired.")
TMS_TOKEN_IS_FROZEN = TDEStatus(307, "The request token is frozen.")
TMS_TOKEN_IS_REVOKE = TDEStatus(308, "The request token is revoked.")
TMS_TOKEN_IS_NOT_IN_THE_EFFECT_TIME_RANGE = TDEStatus(309, "The token is ineffective.")
TMS_TOKEN_IN_DB_IS_NULL = TDEStatus(310, "The token in the db is null.")
TMS_NO_AVAILABLE_GRANTS_FOR_SERVICE = TDEStatus(311, "The token has no granted service.")
# RKS specific errors.Range 400 to 499
RKS_INTERNAL_ERROR = TDEStatus(400, "RKS internal system error.")
RKS_REQUEST_FORMAT_ERROR = TDEStatus(401, "Registration request format error.")
RKS_SIG_VERIFY_ERROR = TDEStatus(402, "Registration request signature validation failed.")
RKS_BACKUP_CLOSE = TDEStatus(403, "Backup service is not available.")
``` |
{
"source": "jof2jc/ptmac",
"score": 2
} |
#### File: ptmac/ptmac/custom_ptmac.py
```python
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import flt, cstr
from frappe.desk.reportview import get_match_cond, get_filters_cond
from frappe.utils import nowdate
from collections import defaultdict
from frappe.model.naming import make_autoname
# test_records = frappe.get_test_records('testdoctype')
def autoname(self, method):
# concat first and last name
_name = " ".join(filter(None,
[cstr(self.get(f)).strip().upper() for f in ["first_name", "last_name"]]))
#self.name = cstr(self.first_name).strip() + cstr(self.last_name).strip()
#frappe.throw(_("{0}").format(_name))
if self.phone:
_name = _name + ' - ' + cstr(self.phone).strip()
elif self.mobile_no:
_name = _name + " - " + cstr(self.mobile_no).strip()
if frappe.db.exists("Contact", _name):
self.name = make_autoname(_name + '/.##')
else:
self.name = _name
#frappe.throw(_("{0}").format(self.name))
def item_autoname(self, method):
if frappe.db.get_default("item_naming_by")=="Naming Series":
if self.variant_of:
if not self.item_code:
template_item_name = frappe.db.get_value("Item", self.variant_of, "item_name")
self.item_code = make_variant_item_code(self.variant_of, template_item_name, self)
else:
from frappe.model.naming import make_autoname
self.item_code = make_autoname(self.naming_series+'.#####')
elif not self.item_code:
msgprint(_("Item Code is mandatory because Item is not automatically numbered"), raise_exception=1)
self.item_code = self.item_code.strip().upper()
self.item_name = self.item_code
self.name = self.item_code
def item_query(doctype, txt, searchfield, start, page_len, filters, as_dict=False):
conditions = []
txt = txt.replace(" ","%")
return frappe.db.sql("""select tabItem.name, tabItem.item_group, tabItem.image,
if(length(tabItem.item_name) > 40,
concat(substr(tabItem.item_name, 1, 40), "..."), item_name) as item_name,
if(length(tabItem.description) > 40, \
concat(substr(tabItem.description, 1, 40), "..."), description) as decription
from tabItem
where tabItem.docstatus < 2
and tabItem.has_variants=0
and tabItem.disabled=0
and (tabItem.end_of_life > %(today)s or ifnull(tabItem.end_of_life, '0000-00-00')='0000-00-00')
and (tabItem.`{key}` LIKE %(txt)s
or tabItem.item_group LIKE %(txt)s
or tabItem.item_name LIKE %(txt)s
or tabItem.description LIKE %(txt)s)
{fcond} {mcond}
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
if(locate(%(_txt)s, item_name), locate(%(_txt)s, item_name), 99999),
idx desc,
name, item_name
limit %(start)s, %(page_len)s """.format(key=searchfield,
fcond=get_filters_cond(doctype, filters, conditions).replace('%', '%%'),
mcond=get_match_cond(doctype).replace('%', '%%')),
{
"today": nowdate(),
"txt": "%%%s%%" % txt,
"_txt": txt.replace("%", ""),
"start": start,
"page_len": 50
}, as_dict=as_dict)
def set_delivery_status_per_billed(self, method):
if self.docstatus == 1 or self.docstatus == 2:
for d in self.items:
if d.delivery_note:
ref_doc_qty = flt(frappe.db.sql("""select ifnull(sum(qty), 0) from `tabDelivery Note Item`
where parent=%s""", (d.delivery_note))[0][0])
print 'ref_doc_qty=' + cstr(ref_doc_qty)
billed_qty = flt(frappe.db.sql("""select ifnull(sum(qty), 0) from `tabSales Invoice Item`
where delivery_note=%s and docstatus=1""", (d.delivery_note))[0][0])
print 'billed_qty=' + cstr(billed_qty)
per_billed = ((ref_doc_qty if billed_qty > ref_doc_qty else billed_qty)\
/ ref_doc_qty)*100
print 'per_billed=' + cstr(per_billed)
doc = frappe.get_doc("Delivery Note", d.delivery_note)
#frappe.throw(_("doc.per_billed = {0} per_billed = {1}").format(doc.per_billed, per_billed))
if self.docstatus == 1 and doc.per_billed < 100.00:
doc.db_set("per_billed", per_billed)
else:
doc.db_set("per_billed", "0")
doc.set_status(update=True)
def patch_delivery_status_per_billed():
_list = frappe.db.sql ("""SELECT it.delivery_note, ifnull(sum(qty), 0) as billed_qty FROM `tabSales Invoice` si INNER JOIN `tabSales Invoice Item` it
ON si.name=it.parent where si.docstatus=1 and it.delivery_note <> '' group by it.delivery_note""", as_dict=1)
print _list
for d in _list:
print 'd.delivery_note=' + cstr(d.delivery_note)
ref_doc_qty = flt(frappe.db.sql("""select ifnull(sum(qty), 0) from `tabDelivery Note Item`
where parent=%s""", (d.delivery_note))[0][0])
print 'ref_doc_qty=' + cstr(ref_doc_qty)
#billed_qty = flt(frappe.db.sql("""select ifnull(sum(qty), 0) from `tabSales Invoice Item`
#where delivery_note=%s and docstatus=1""", (d.delivery_note))[0][0])
print 'd.billed_qty=' + cstr(d.billed_qty)
per_billed = ((ref_doc_qty if d.billed_qty > ref_doc_qty else d.billed_qty)\
/ ref_doc_qty)*100
print 'per_billed=' + cstr(per_billed)
doc = frappe.get_doc("Delivery Note", d.delivery_note)
if doc.per_billed < 100:
doc.db_set("per_billed", per_billed)
doc.set_status(update=True)
``` |
{
"source": "jof2jc/ptmsa",
"score": 2
} |
#### File: ptmsa/config/ptmsa.py
```python
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("Main Reports"),
"icon": "icon-table",
"items": [
{
"type": "report",
"name": "Batch-Wise Balance History",
"doctype": "Batch",
"is_query_report": True
},
{
"type": "report",
"name": "Stock Ledger Detail",
"doctype": "testdoctype",
"is_query_report": True
},
{
"type": "report",
"name": "Stock Balance Summary",
"doctype": "testdoctype",
"is_query_report": True
},
{
"type": "report",
"name": "Gross Profit",
"doctype": "Sales Invoice",
"is_query_report": True
},
{
"type": "report",
"name": "Accounts Receivable",
"doctype": "Sales Invoice",
"is_query_report": True
},
{
"type": "report",
"name": "Accounts Payable",
"doctype": "Purchase Invoice",
"is_query_report": True
},
{
"type": "report",
"name": "Sales Register",
"doctype": "Sales Invoice",
"is_query_report": True
},
{
"type": "report",
"name": "Sales Summary",
"doctype": "Sales Invoice",
"is_query_report": True
},
{
"type": "report",
"name": "Tanda Terima",
"label": _("Tanda Terima Faktur"),
"doctype": "Sales Invoice",
"is_query_report": True
},
{
"type": "report",
"name": "Purchase Register",
"doctype": "Purchase Invoice",
"is_query_report": True
},
{
"type": "report",
"name": "Purchase Summary",
"doctype": "Purchase Invoice",
"is_query_report": True
},
{
"type": "report",
"name": "General Ledger",
"doctype": "GL Entry",
"is_query_report": True
}
]
},
{
"label": _("Standard Reports"),
"icon": "icon-list",
"items": [
{
"type": "report",
"name": "Bank Clearance Summary",
"is_query_report": True,
"doctype": "Journal Entry"
},
{
"type": "report",
"name": "Payment Period Based On Invoice Date",
"is_query_report": True,
"doctype": "Journal Entry"
},
{
"type": "report",
"name": "Delivered Items To Be Billed",
"is_query_report": True,
"doctype": "Sales Invoice"
},
{
"type": "report",
"name": "Received Items To Be Billed",
"is_query_report": True,
"doctype": "Purchase Invoice"
},
{
"type": "report",
"name": "Item-wise Sales Register",
"is_query_report": True,
"doctype": "Sales Invoice"
},
{
"type": "report",
"name": "Item-wise Purchase Register",
"is_query_report": True,
"doctype": "Purchase Invoice"
},
{
"type": "report",
"name": "Purchase Invoice Trends",
"is_query_report": True,
"doctype": "Purchase Invoice"
},
{
"type": "report",
"name": "Sales Invoice Trends",
"is_query_report": True,
"doctype": "Sales Invoice"
},
{
"type": "report",
"name": "Accounts Receivable Summary",
"doctype": "Sales Invoice",
"is_query_report": True
},
{
"type": "report",
"name": "Accounts Payable Summary",
"doctype": "Purchase Invoice",
"is_query_report": True
},
{
"type": "report",
"is_query_report": True,
"name": "Customer Credit Balance",
"doctype": "Customer"
},
]
},
{
"label": _("Financial Reports"),
"icon": "icon-list",
"items": [
{
"type": "report",
"name": "Trial Balance",
"doctype": "GL Entry",
"is_query_report": True
},
{
"type": "report",
"name": "Trial Balance for Party",
"doctype": "GL Entry",
"is_query_report": True
},
{
"type": "report",
"name": "Balance Sheet",
"doctype": "GL Entry",
"is_query_report": True
},
{
"type": "report",
"name": "Profit and Loss Statement",
"doctype": "GL Entry",
"is_query_report": True
}
]
},
{
"label": _("Analytics"),
"icon": "icon-list",
"items": [
{
"type": "report",
"is_query_report": True,
"name": "Profitability Analysis",
"doctype": "GL Entry"
},
{
"type": "page",
"name": "sales-analytics",
"label": _("Sales Analytics"),
"icon": "icon-bar-chart"
},
{
"type": "page",
"name": "purchase-analytics",
"label": _("Purchase Analytics"),
"icon": "icon-bar-chart"
},
{
"type": "report",
"is_query_report": True,
"name": "Supplier-Wise Sales Analytics",
"label": _("Sales Analytics By Supplier"),
"doctype": "Stock Ledger Entry"
}
]
}
]
``` |
{
"source": "jofandaj0f/pythonhttpserver",
"score": 2
} |
#### File: jofandaj0f/pythonhttpserver/helloworld.py
```python
def helloworld():
print 'Hello World!'
helloworld
``` |
{
"source": "jofas/bachelor_thesis",
"score": 2
} |
#### File: jofas/bachelor_thesis/experiment.py
```python
import numpy as np
import os
import math
from abstain import AbstainClassifier
import matplotlib.pyplot as plt
plt.style.use("ggplot")
class SingleExperiment:
def __init__(self, label, label_rf):
self.label = label
self.label_rf = label_rf
self.result = ([], [], [])
self.points = []
def add_points(self, pts, label):
self.points.append((pts, label))
class Experiment:
def __init__(
self, label, X, y, clfs, regs, rw_fns, ratio=0.1
):
self.label = label
idxs = np.arange(len(X))
np.random.shuffle(idxs)
self.X, self.y = X[idxs], y[idxs]
self.ratio = ratio
self.clfs = clfs
self.regs = regs
self.rw_fns = rw_fns
self.exps = []
def start(self):
train = int(len(self.X) * (1 - self.ratio))
for clf_proto in self.clfs:
for rw_fn in self.rw_fns:
clf_instance = clf_proto()
abstain = AbstainClassifier(
clf_instance, rw_fn, self.regs
)
self.exps.append(abstain.experiment)
abstain.train_kfold(
self.X[:train], self.y[:train]
)
print(clf_instance.label, rw_fn.label)
abstain.score(
self.X[train:], self.y[train:]
).stdout()
def export(self, path):
path += self.label
self._mkdir(path)
for exp in self.exps:
path_exp = path + "/" + exp.label
self._mkdir(path_exp)
path_rf = path_exp + "/" + exp.label_rf
self._mkdir(path_rf)
for pts, label in exp.points:
path_ds = path_rf + "/" + label + ".csv"
with open(path_ds, "w") as f:
for row in pts:
f.write("{};{}\n".format(*row))
with open(path_rf + "/result.csv", "w") as f:
f.write("Regressor;Reward;Rejected\n")
for rew, rej, label in zip(*exp.result):
f.write("{};{};{}\n".format(
label, rew, rej
))
def _mkdir(self, path):
try: os.mkdir(path)
except FileExistsError: pass
def plot(self):
rows, cols = len(self.clfs), len(self.rw_fns)
fig, axs = plt.subplots(
rows, cols, figsize=(rows*6.4, cols*4.8)
)
if rows == 1 and cols == 1:
self._plot(axs, self.exps[0])
else:
axs = np.reshape(axs, (-1,))
for i, ax in enumerate(axs):
self._plot(ax, self.exps[i])
plt.show()
def _plot(self, ax, exp):
ax.set_title(exp.label + " " + exp.label_rf)
for pts, label in exp.points:
ax.plot(pts[:,0], pts[:,1], label=label)
ax.legend()
```
#### File: jofas/bachelor_thesis/main.py
```python
from data import *
from clfs import CLFS
from regs import REGS
from reward_fns import RFNS
from experiment import Experiment
def main():
print("\nusps")
X, y = load_usps()
e = Experiment("usps_01", X, y, CLFS, REGS, RFNS)
e.start()
e.export("experiments/")
print("\ncredit card")
X, y = load_credit_card()
e = Experiment("credit_card_01", X, y, CLFS, REGS, RFNS)
e.start()
e.export("experiments/")
print("\nwine quality")
X, y = load_wine_quality()
e = Experiment("wine_quality_01", X, y, CLFS, REGS, RFNS)
e.start()
e.export("experiments/")
print("\ncar")
X, y = load_car()
e = Experiment("car_01", X, y, CLFS, REGS, RFNS)
e.start()
e.export("experiments/")
print("\nbank")
X, y = load_bank()
e = Experiment("bank_01", X, y, CLFS, REGS, RFNS)
e.start()
e.export("experiments/")
print("\nbank additional")
X, y = load_bank_additional()
e = Experiment("bank_additional_01", X, y, CLFS, REGS, RFNS)
e.start()
e.export("experiments/")
if __name__ == "__main__":
main()
``` |
{
"source": "jofas/conform",
"score": 2
} |
#### File: libconform/vtx/base.py
```python
class VTXBase:
def train(self, X, y):
pass
def category(self, x, y, contains_x):
pass
``` |
{
"source": "jofaval/search",
"score": 3
} |
#### File: search/search/utils.py
```python
import os
import psutil
def get_memory_usage() -> int:
process = psutil.Process(os.getpid())
mem_usage_in_bytes = process.memory_info().rss
return mem_usage_in_bytes // (1024 * 1024)
```
#### File: search/tests/test_engine.py
```python
import json
import tempfile
import unittest
from pathlib import Path
import numpy as np
from search.engine import Document, Engine, Result, Sentence
def _round(results):
for r in results:
r.score = round(r.score, 2)
return results
class EngineTest(unittest.TestCase):
def test_engine(self):
with tempfile.TemporaryDirectory() as temp_dir_str:
temp_dir = Path(temp_dir_str)
with temp_dir.joinpath("doc_0.json").open("tw") as writer:
json.dump(
[
{
"pageid": 0,
"title": "Person A",
"sentences": [
"sent A 0",
"sent A 1",
],
},
{
"pageid": 1,
"title": "Person B",
"sentences": [
"sent B 0",
"sent B 1",
"sent B 2",
],
},
],
writer,
)
with temp_dir.joinpath("title_embedding_0.npy").open(
"bw"
) as writer:
np.save(writer, np.array([[0, 0, 1], [0, 0, -1]], dtype=float))
with temp_dir.joinpath("sentence_embedding_0.npy").open(
"bw"
) as writer:
np.save(
writer,
np.array(
[
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[0, 1, 0],
[0, -1, 0],
],
dtype=float,
),
)
engine = Engine(Path(temp_dir))
results = _round(
engine.search(np.array([1, 0, 1], dtype=float), limit=1)
)
self.assertEqual(
results,
[
Result(
doc=Document(
pageid="0",
title="Person A",
sentences=["sent A 0", "sent A 1"],
),
sentence=Sentence(text="sent A 0", doc_index=0),
score=1.41,
),
],
)
results = _round(
engine.search(np.array([0, -1, -1], dtype=float), limit=1)
)
self.assertEqual(
results,
[
Result(
doc=Document(
pageid="1",
title="Person B",
sentences=["sent B 0", "sent B 1", "sent B 2"],
),
sentence=Sentence(text="sent B 2", doc_index=1),
score=1.41,
),
],
)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jof/drf-url-filters",
"score": 3
} |
#### File: drf-url-filters/filters/metaclasses.py
```python
from .decorators import decorate_get_queryset
class MetaFiltersMixin(type):
def __new__(cls, name, bases, dct):
if 'get_queryset' in dct:
dct['get_queryset'] = decorate_get_queryset(dct['get_queryset'])
return super(MetaFiltersMixin, cls).__new__(cls, name, bases, dct)
def __setattr__(self, attr, val):
if attr == 'get_queryset':
val = decorate_get_queryset(val)
return super(MetaFiltersMixin, self).__setattr__(attr, val)
```
#### File: filters/tests/tests.py
```python
import unittest
class MyTest(unittest.TestCase):
def test(self):
self.assertEqual(4, 4)
```
#### File: filters/tests/test_validations.py
```python
import unittest
from nose2.tools.such import helper
from voluptuous import Invalid
from django.core.exceptions import ImproperlyConfigured
from filters import validations
INT = 1
LONG = long(INT)
INT_FLOAT = float(INT)
INT_STR = str(INT)
INT_UNICODE = unicode(INT)
ALNUM_STR = "hello123"
ALNUM_UNICODE = unicode(ALNUM_STR)
NON_INT_FLOAT = 1.5
NON_INT_STR = str(NON_INT_FLOAT)
NON_INT_UNICODE = unicode(NON_INT_FLOAT)
NON_ALNUM_STR = "hello 123"
NON_ALNUM_UNICODE = unicode(NON_ALNUM_STR)
INT_CSV = "1,2,3"
NON_INT_CSV = "a,b,c"
class BaseValidationTestCase(object):
def f(self, v):
return self.base_function()(v)
def transform_val(self, v):
return v
def test_valid_values(self):
for v in self.valid_values:
self.assertEqual(self.f(v), self.transform_val(v))
def test_invalid_values(self):
for v in self.invalid_values:
self.assertRaises(Invalid, self.f, v)
class IntegerLikeTestCase(BaseValidationTestCase, unittest.TestCase):
base_function = validations.IntegerLike
valid_values = [
INT, LONG, INT_FLOAT, INT_STR, INT_UNICODE
]
invalid_values = [
NON_INT_FLOAT, NON_INT_STR, ALNUM_STR, ALNUM_UNICODE,
NON_INT_UNICODE, NON_ALNUM_STR, NON_ALNUM_UNICODE
]
class AlphanumericTestCase(BaseValidationTestCase, unittest.TestCase):
base_function = validations.Alphanumeric
valid_values = [
INT, LONG, INT_FLOAT, INT_STR, INT_UNICODE, ALNUM_STR, ALNUM_UNICODE
]
invalid_values = [
NON_INT_FLOAT, NON_INT_STR, NON_INT_UNICODE, NON_ALNUM_STR,
NON_ALNUM_UNICODE
]
class StrictlyAlphanumericTestCase(BaseValidationTestCase, unittest.TestCase):
base_function = validations.StrictlyAlphanumeric
valid_values = [ALNUM_STR, ALNUM_UNICODE]
invalid_values = [
INT, LONG, INT_FLOAT, NON_INT_FLOAT, NON_INT_STR, NON_INT_UNICODE,
NON_ALNUM_STR, NON_ALNUM_UNICODE, INT_STR, INT_UNICODE
]
class CSVofIntegersTestCase(BaseValidationTestCase, unittest.TestCase):
base_function = validations.CSVofIntegers
transform_val = lambda self, v: map(int, v.split(","))
valid_values = [INT_CSV, INT_STR, INT_UNICODE]
invalid_values = [
INT_FLOAT, ALNUM_STR, ALNUM_UNICODE, NON_INT_FLOAT, NON_INT_STR,
NON_INT_UNICODE, NON_ALNUM_STR, NON_ALNUM_UNICODE, NON_INT_CSV,
INT, LONG
]
class GenericSeparatedValidatorTestCase(unittest.TestCase):
def test_default_separator(self):
validator = validations.GenericSeparatedValidator(int)
self.assertEqual(validator('1,2,3'), [1,2,3])
self.assertRaises(Invalid, validator, 'a,b,c')
self.assertEqual(validator('1', [1]))
def test_custom_separator(self):
validator = validations.GenericSeparatedValidator(int, 'mm')
self.assertEqual(validator('1mm2mm3'), [1,2,3])
self.assertRaises(Invalid, validator, 'ammbmmc')
self.assertEqual(validator('1', [1]))
def test_custom_type(self):
validator = validations.GenericSeparatedValidator(
validations.IntegerLike())
self.assertEqual(validator('1,2,3'), ['1','2','3'])
self.assertRaises(Invalid, validator, 'a,b,c')
self.assertEqual(validator('1', ['1']))
def test_invalid_separator(self):
self.assertRaises(ImproperlyConfigured,
validations.GenericSeparatedValidator, 12)
``` |
{
"source": "Jofemago/Administrador_vehiculos",
"score": 3
} |
#### File: projectoAdmVeh/modelo/BD_tipoVehiculo.py
```python
import sqlalchemy
from sqlalchemy import Column, Integer, String
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class _TipoVehiculo(Base):
__tablename__ = "TipoVehiculo"
id = Column(Integer, primary_key = True)
tipo = Column(String(50))
def makeTable(eng):
Base.metadata.create_all(bind =eng)
```
#### File: projectoAdmVeh/negocio/tipoVehiculo.py
```python
from modelo.BD_tipoVehiculo import _TipoVehiculo
from modelo.createDatabase import makeEngine
from modelo.createDatabase import makeBase
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
class TipoVehiculo:
base = makeBase()
eng = makeEngine()
def __init__(self,id,tipo):
self.id = id
self.tipo = tipo
def __str__(self):
return self.tipo
def getAllTipos(self):
Session = sessionmaker(bind=self.eng)
ses = Session()
query = None
try:
query = ses.query(_TipoVehiculo).all()
except:
print("no se puede generar la consulta")
ses.close()
return [TipoVehiculo( int(i.id), str(i.tipo)) for i in query]
def selectTipo(self, tipo, listoftipos):
#print(tipo, listoftipos)
for t in listoftipos:
if t.tipo == tipo:
return t
if __name__ == "__main__":
#tv = TipoVehiculo()
TipoVehiculo.getAllTipos(TipoVehiculo)
```
#### File: projectoAdmVeh/vistas/ingresarCorreo.py
```python
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.uix.gridlayout import GridLayout
from kivy.uix.popup import Popup
from kivy.properties import StringProperty
from kivy.lang import Builder
from db.creator import Creator
Builder.load_string("""
#Si meto el popup con todas las vistas, se duplica su contenido, poniendo el cod kv aca se soluciona ese inconveniente
<ConfirmPopup>: #Es para asegurar que sea el correo a guardar
cols:1
Label:
text: root.text # Obtiene el texto Desea guardar el correo
GridLayout:
cols: 2
size_hint_y: None
height: '44sp'
Button:
text: 'Si'
on_release: root.dispatch('on_answer','si')
Button:
text: 'No'
on_release: root.dispatch('on_answer', 'no')
"""
)
class ConfirmPopup(GridLayout): #Se crea el contenido para el popup de confirmacion
text = StringProperty() #Segun entiendo, esto captura "Desea guardar el correo?"
def __init__(self,**kwargs):
self.register_event_type('on_answer')
super(ConfirmPopup,self).__init__(**kwargs)
def on_answer(self, *args): #Me permite recibir la respuesta del dialogo, si o no
pass
class MainWindow(Screen): #Por el momento es la ventana inicial que es para agregar el correo, su contenido esta en el kv
def __init__(self, **kwargs):
super(MainWindow, self).__init__(**kwargs)
def direccionadoBotonGuardarCorreo(self):
informacionDelPopup="Desea guardar el correo?\n"+str(self.ids.correo.text)
content = ConfirmPopup(text=informacionDelPopup) #Este texto que paso lo captura el stringProperty
content.bind(on_answer=self._on_answer) #segun mi analisis, es para dar el mando de on_answer a _on_answer
self.popup = Popup(title="Confirmacion",
content=content,
size_hint=(0.5, 0.5),
auto_dismiss= False)
self.popup.open()
def _on_answer(self, instance, answer): #al oprimir el boton si o no se activa esta funcion
self.popup.dismiss()
if answer=='si':
#write the mail in the config.json
mail = str(self.ids.correo.text)
creator = Creator()
config = creator.makeConfigJSON()
config["mail"] = mail
config["mailvalidate"] = True
creator.writeConfigJson(config)
self.manager.current="segunda" #Cambio de ventana aqui porque depende de lo que seleccione en el popup si, no.
elif answer=='no':
pass
``` |
{
"source": "Jofemago/Computaci-n-Grafica",
"score": 3
} |
#### File: Computaci-n-Grafica/ALGEBRA LINEAL/EjCap01.py
```python
from cap01Lineal import *
def isPerpendicular(A1, A2, B1, B2):
''' Valida si puntos dados forman vectores perpendiculares
Parametros:
A1 punto 1 vector 1
A2 punto 2 vector 1
B1 punto 1 vector 2
B2 punto 2 vector 2
'''
v = Vector(A1, A2)
u = Vector(B1, B2)
ppunto = Punto(v, u)
if ppunto == 0:
return True
else:
return False
def IsParalelo(A1, A2, B1, B2):
''' Valida si puntos dados forman vectores paralelos
Parametros:
A1 punto 1 vector 1
A2 punto 2 vector 1
B1 punto 1 vector 2
B2 punto 2 vector 2
'''
v = Vector(A1, A2)
u = Vector(B1, B2)
if __name__ == "__main__":
print(isPerpendicular([2,1],[5,1],[4,0],[4,4]))
```
#### File: Disparos/TareaDisparos/Enemys.py
```python
import random
import pygame
from configuraciones import *
class EnemyDos(pygame.sprite.Sprite):
def __init__(self,timeAct,col = ROJO):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface([25, 25])
self.image.fill(col)
self.rect = self.image.get_rect()
self.rect.x = ANCHO
self.dir = 1
self.var_x = 3
self.timeAc = timeAct
self.temporizador = timeAct
self.disparo = False
def update(self):
if self.rect.x < ANCHO /2:
self.var_x = 0
#self.var_x+= 2
if self.temporizador < 0:
self.temporizador = self.timeAc
self.disparo = True
self.temporizador -= 1
self.rect.x -= self.var_x
class Enemy(pygame.sprite.Sprite):
def __init__(self,timeAct,col = ROJO):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface([25, 25])
self.image.fill(col)
self.rect = self.image.get_rect()
self.rect.x = ANCHO
self.dir = 1
self.var_x = 3
self.timeAc = timeAct
self.temporizador = timeAct
self.disparo = False
def update(self):
if self.rect.x < -25:
self.kill()
#self.var_x+= 2
if self.temporizador < 0:
self.temporizador = self.timeAc
self.disparo = True
self.temporizador -= 1
self.rect.x -= self.var_x
```
#### File: Disparos/TareaDisparos/Player.py
```python
import pygame
from configuraciones import *
class Vida(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface([30,30])
self.image.fill(VERDE)
self.rect= self.image.get_rect()
class Player(pygame.sprite.Sprite):
def __init__(self, ancho, alto,vidas, col = AZUL):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface([ancho, alto])
self.image.fill(col)
self.rect = self.image.get_rect()
self.setPos(100,400)
#variables de movimiento
self.var_x = 0
self.var_y = 0
self.vidas = vidas
def setPos(self, x, y):
self.rect.x = x
self.rect.y = y
def setX(self, x):
self.rect.x = x
def setX(self, y):
self.rect.x = y
def movX(self):
if self.rect.x >= ANCHO -50 and self.var_x >= 0:
self.var_x = 0
if self.rect.x <= 0 and self.var_x <= 0:
self.var_x = 0
self.rect.x += self.var_x
def movY(self):
if self.rect.y >= ALTO -50 and self.var_y >= 0:
self.var_y = 0
if self.rect.y <= 100 and self.var_y <= 0:
self.var_y = 0
self.rect.y += self.var_y
def update(self):
self.movX()
self.movY()
```
#### File: Computaci-n-Grafica/Fondos/animacion.py
```python
import pygame
#from Plano import *
#from LibPolares import *
#from Images import *
#from Algebralineal import *
import random
ANCHO=640
ALTO=480
BLANCO=(255,255,255)
NEGRO=(0,0,0)
ROJO=(255,0,0)
AZUL=(0,0,255)
VERDE=(0,255,0)
def recortar(archivo, an , al):
fondo = pygame.image.load(archivo).convert_alpha()
info = fondo.get_size()
img_ancho = info[0] #alto y ancho de cada sprite
img_alto = info[1]
corte_x = img_ancho /an
corte_y = img_alto/al
m = []
for i in range(an):
fila = []
for j in range(al):
cuadro = [i*corte_x,j*corte_y,corte_x,corte_y]
recorte = fondo.subsurface(cuadro)
fila.append(recorte)
m.append(fila)
return m
class Jugador(pygame.sprite.Sprite):
def __init__(self, img_sprite):
pygame.sprite.Sprite.__init__(self)
self.m = img_sprite
self.image = self.m[0][0]
self.rect = self.image.get_rect()
self.dir = 0
self.i = 0
self.var_x = 0
self.var_y = 0
def update(self):
if self.var_y != 0 or self.var_x != 0:
if self.i < 2 :
self.i += 1
else:
self.i = 0
self.image = self.m[self.i][self.dir]
#self.rect = self.image.get_rect()
self.rect.x += self.var_x
self.rect.y += self.var_y
if __name__ =='__main__':
pygame.init()
pantalla=pygame.display.set_mode([ANCHO, ALTO])
anm = recortar('animales.png',12,8)
'''
pantalla.blit(anm[0][0],[0,0])
pygame.display.flip()'''
jp = Jugador(anm)
general = pygame.sprite.Group()
general.add(jp)
reloj = pygame.time.Clock()
fin = False
puntos = 0
while not fin:
for event in pygame.event.get():
if event.type == pygame.QUIT:
fin=True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
jp.dir = 1
jp.var_x = -5
if event.key == pygame.K_RIGHT:
jp.dir = 2
jp.var_x = 5
if event.key == pygame.K_UP:
jp.dir = 3
jp.var_y = -5
if event.key == pygame.K_DOWN:
jp.dir = 0
jp.var_y = 5
if event.key == pygame.K_SPACE:
jp.var_y = 0
jp.var_x = 0
general.update()
pantalla.fill(NEGRO)
general.draw(pantalla)
pygame.display.flip()
reloj.tick(30)
```
#### File: Computaci-n-Grafica/JuegoFrostRana/Player.py
```python
import pygame
BLANCO=(255,255,255)
NEGRO=(0,0,0)
ROJO=(255,0,0)
AZUL=(0,0,255)
VERDE=(0,255,0)
class Player(pygame.sprite.Sprite):
def __init__(self, archivos):
# Call the parent class (Sprite) constructor
pygame.sprite.Sprite.__init__(self)
#Crear la imagen que va ser un bloque que representa el Jugador
self.sprites = archivos
self.image = None
self.SetImage(0)
#posiciion del objeto, importante para poder ubicarlo en patanlla
self.rect = self.image.get_rect()
#defino variable de movimiento que define la direccion va de 1 a 4
self.dir = 0
self.pos = 0 # que pos del sprite se usara
self.timeSalto = 20
#defino la velociad con la que se movera
self.var_x =0
self.var_y = 0
self.salto = False
#defino el numero de vidas que tiene el Jugadores
self.Vidas = 5
def SetImage(self,pos):
self.image = pygame.image.load(self.sprites[pos]).convert_alpha()
def setPos(self, x, y):
self.rect.x = x
self.rect.y = y
def movDer(self):
self.rect.x += self.vel
def movIzq(self):
self.rect.x -= self.vel
def update(self):
'''if self.dir == 1:
if self.rect.x > 0:
self.movIzq()
#self.dir = 0
elif self.dir == 2:
if self.rect.x < 350:
self.movDer()
#self.dir = 0'''
if self.salto:
self.timeSalto = 20
self.rect.x += self.var_x
self.rect.y += self.var_y
self.salto=False
self.SetImage(self.dir + 4)
self.timeSalto -= 1
if self.timeSalto < 0:
self.timeSalto = 10
self.SetImage(self.dir)
```
#### File: Librery/colisionesJugEnm/Rival.py
```python
import pygame
ANCHO=640
ALTO=480
BLANCO=(255,255,255)
NEGRO=(0,0,0)
ROJO=(255,0,0)
AZUL=(0,0,255)
VERDE=(0,255,0)
class Rival(pygame.sprite.Sprite):
def __init__(self, alto, ancho, col ):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface([ancho, alto])
self.image.fill(col)
self.rect = self.image.get_rect()
```
#### File: plataforma/arrastarysoltar/bloque.py
```python
import pygame
from configuraciones import *
class bloque(pygame.sprite.Sprite):
def __init__(self, ancho,alto, col = AZUL):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface([ancho, alto])
self.image.fill(col)
self.rect = self.image.get_rect()
#self.setPos(100,400)
self.rect.x = 200
self.rect.y = 200
self.click = False #indicarle si se ha dado click sobre el
def update(self):
self.rect.y += 1
if self.click:
self.rect.center = pygame.mouse.get_pos() #cada que doy click actulizo
```
#### File: plataforma/fondos/montar.py
```python
import pygame
import ConfigParser
#from Plano import *
#from LibPolares import *
#from Images import *
#from Algebralineal import *
import random
ANCHO=700
ALTO=700
BLANCO=(255,255,255)
NEGRO=(0,0,0)
ROJO=(255,0,0)
AZUL=(0,0,255)
VERDE=(0,255,0)
class Pantalla:
def __init__(self,p, recortes,inte):
self.p = pantalla
self.recortes = recortes
self.interprete = inte
#obtengo el mapa y lo divido en listas por filas desde el interprete
self.mapa = self.interprete.get('nivel1','mapa').split('\n')
#self.dict( interprete.items('.'))
def dibujarMapa(self):
anal = 32 #ancho y alto de cada sprite que hay en recortes
i = 0
j = 0
for lista in self.mapa:
for e in lista:
x = int(self.interprete.get(e,'x')) #a cada tipo de elemento que existe en el mapa busco su pos en x
y = int(self.interprete.get(e,'y'))#a cada tipo de elemento que existe en el mapa busco su pos en y
self.p.blit(recortes[x][y],[i * anal,j * anal])#lo dibujo sobre la pantalla
i+= 1
j += 1
i = 0
pygame.display.flip()#redibujo la pantalla
def recortar(archivo, an , al):
fondo = pygame.image.load(archivo).convert()
info = fondo.get_size()
img_ancho = info[0] #alto y ancho de cada sprite
img_alto = info[1]
corte_x = img_ancho /an
corte_y = img_alto/al
m = []
for i in range(an):
fila = []
for j in range(al):
cuadro = [i*corte_x,j*corte_y,corte_x,corte_y]
recorte = fondo.subsurface(cuadro)
fila.append(recorte)
m.append(fila)
return m
if __name__ =='__main__':
pygame.init()
pantalla=pygame.display.set_mode([ANCHO, ALTO])
#fondo = pygame.image.load('terrenogen.png').convert()
#info = fondo.get_size() #puedo optener ancho y alto de la imagen
#img_ancho = info[0]
#img_alto = info[1]
#cuadro= [20*32,0,32,32]
#recorte = fondo.subsurface(cuadro) #recorte de la superficie
recortes = recortar('terrenogen.png',32,12)
'''for i in range(32):
for j in range(12):
pantalla.blit(recortes[i][j],[0,0])'''
#cargo el mapa
archivo = 'map.map'
interprete = ConfigParser.ConfigParser()
interprete.read(archivo)
#pantalla.blit(recortes[10][0],[0,0])
#pygame.display.flip()
pan = Pantalla(pantalla,recortes,interprete)
pan.dibujarMapa()
fin = False
puntos = 0
while not fin:
for event in pygame.event.get():
if event.type == pygame.QUIT:
fin=True
``` |
{
"source": "Jofemago/K-MEANS",
"score": 3
} |
#### File: distribuite/proxy/broker.py
```python
import zmq
import sys
import math
import numpy
class Broker:
context = zmq.Context()
router = context.socket(zmq.ROUTER)
#poller = zmq.Poller()
p = 0
def __init__(self, n):
self.op = {"WorkDone":self.serverResponse, "serverFREE":self.serverFree}
self.router.bind("tcp://*:5000")
#elementos para realizar la operacion
self.n = n
self.bestK = None
self.obtenido = {}
#self.colaK = [1, self.n//2, self.n]
#self.colaK = [1, numpy.random.randint(2,self.n/2),numpy.random.randint(self.n/2,self.n + 1)]
self.colaK = [1,int(self.n/8), int(self.n/4 + 1)]
self.cantKCalculate = 0
def serverResponse(self, data):
print("el servidor acabo de evaluar un k")
print(data[1])
print(data)
#guardar el resultado en una estructura de datos, evaluar estructura de datos
kObtenido = int(data[2].decode())
ssdobtenido = float(data[3].decode())
if kObtenido in self.obtenido:
print("el k ya habia sido calculado")
else:
self.obtenido[kObtenido] = ssdobtenido
print("obtenido k: " , kObtenido, "su ssd:", ssdobtenido)
self.cantKCalculate += 1
def serverFree(self, data):
print("un servidor esta libre se le va asignar un k para que trabaje")
print(data[1])
#validar si no tengo que conseguir mas k
#enviar un mensaje diciendole al sever que termino para que no mande mas trabajo
#sacar k que necesito pedir de alguna estructura de datos
#msj = None
if self.cantKCalculate <= math.sqrt(self.n):
if len(self.colaK): #hay elementos para enviar
ktocalc = self.colaK.pop(0)
msj = [data[0], b'KMEANS', str(ktocalc).encode()]#, b"probando"]
self.router.send_multipart(msj)
#self.p += 1#tengo un k adicional
else:#espere que no hay trabajo
msj = [data[0], b'WAIT', b"0"]#, b"probando"]
self.router.send_multipart(msj)
#self.p += 1#tengo un k adicional
else:
print("ha finalizado el proceso no puedo enviar mas")
msj = [data[0], b'Finish', b"0"]#, b"probando"]
self.router.send_multipart(msj)
#self.p += 1
def run(self):
print("Running the server Broker....")
while True:#cambiar esto hasta que el numero que K que haya pedido sea raiz de n
#print("revisando si un server ha solicitado")
if self.router.poll(100):
print("-----------un servidor ha hecho una solicitud--------------")
msj = self.router.recv_multipart()
#print("lo que me envia el server:", msj[1])
self.op[msj[1].decode()](msj)
#validar los k que tengo si son suficientes
#validar el k apropiado hasta este momento
#agregar a una cola que K's voy a pedir
if len(list(self.obtenido.keys())) >= 3:
print("calculando elbow")
a,b,c = self.elbow2()
print("k a buscar", a,b,c)
try:
self.colaK.append(numpy.random.randint(a,b+1))
self.colaK.append(numpy.random.randint(b, c+1))
#self.colaK.append(numpy.random.randint(1, 3000))
#self.colaK.append(numpy.random.randint(1, 3000))
except Exception as e:
print("hubo un erro y no se peuden agregar k a la cola")
#self.colaK.append(numpy.random.randint(l,m+1))
#self.colaK.append(numpy.random.randint(m, r+1))
#distribuciones y agregar a la cola de k
print("el mejor k hasta el momento:" , self.bestK)
def dist(self, x, y):
return math.sqrt(x*x + y*y)
def calculoTheta(self, x1, y1, x2, y2) :
var = (x1*x2+y2*y2)/(self.dist(x1, y1)*self.dist(x2, y2))
print("el valor a calcular en el acos", var)
if var > 1:
var = 1
if var < -1:
var = -1
res = math.acos(var)
print("el valor del theta calculado es:", res)
return res
def elbow2(self):
listaOrdenada = list(self.obtenido.keys())#los value represetan los y
listaOrdenada.sort()#tomo las llaves que representan los x
l = 0
r = len(listaOrdenada) - 1
k = (l+r)>>1#dividir entre dos
theta = self.calculoTheta(listaOrdenada[l]-listaOrdenada[k],
self.obtenido[listaOrdenada[l]] - self.obtenido[listaOrdenada[k]],
listaOrdenada[r]-listaOrdenada[k],
self.obtenido[listaOrdenada[r]] - self.obtenido[listaOrdenada[k]])
flag = True
while flag:
flag = False
midI = math.ceil((k+l)/2)#techo
midD = math.floor((k+r)/2)
thetaD = 4
thetaI = 4
orientation = 0
if midI < k:
thetaI = self.calculoTheta(listaOrdenada[l]-listaOrdenada[midI],
self.obtenido[listaOrdenada[l]] - self.obtenido[listaOrdenada[midI]],
listaOrdenada[k]-listaOrdenada[midI],
self.obtenido[listaOrdenada[k]] - self.obtenido[listaOrdenada[midI]])
if midD > k:
thetaD = self.calculoTheta(listaOrdenada[k]-listaOrdenada[midD],
self.obtenido[listaOrdenada[k]] - self.obtenido[listaOrdenada[midD]],
listaOrdenada[r]-listaOrdenada[midD],
self.obtenido[listaOrdenada[r]] - self.obtenido[listaOrdenada[midD]])
#validar primero si los id son validos
if (thetaD < theta) or (thetaI < theta):
#tanteo las thetas xD
print("posiciones")
print(l)
print(k)
print(r)
if thetaD < thetaI:
print("derecha")
print("mid", midD)
flag = True
theta = thetaD
l = k
k = midD
self.bestK = listaOrdenada[k]
orientation = 0
else:
print("izquierda")
print("mid", midI)
flag = True
theta = thetaI
r = k
k = midI
self.bestK = listaOrdenada[k]
orientation = 1
print("posiciones actualizadas")
print(l)
print(k)
print(r)
"""if orientation:
return listaOrdenada[k], listaOrdenada[r]
else:
return listaOrdenada[l], listaOrdenada[k]"""
print(listaOrdenada)
return listaOrdenada[l], listaOrdenada[k], listaOrdenada[r]
def elbow(self):
listaOrdenada = list(self.obtenido.keys())#los value represetan los y
listaOrdenada.sort()#tomo las llaves que representan los x
l = 0
r = len(listaOrdenada) - 1
k = (l+r)>>1#dividir entre dos
self.bestK = k
# En la posicion 0 esta el 'x' y en la posicion 1 esta el 'y'
# calculamos el theta inicial
#theta = calculoTheta(listaOrdenada[l][0]-listaOrdenada[k][0], listaOrdenada[l][1]-listaOrdenada[k][1],listaOrdenada[r][0]-listaOrdenada[k][0], listaOrdenada[r][1]-listaOrdenada[k][1])
theta = self.calculoTheta(listaOrdenada[l]-listaOrdenada[k],
self.obtenido[listaOrdenada[l]] - self.obtenido[listaOrdenada[k]],
listaOrdenada[r]-listaOrdenada[k],
self.obtenido[listaOrdenada[r]] - self.obtenido[listaOrdenada[k]])
print("valor de thetha", theta)
flag = True
while(flag) :
flag = False
#mid = (k+r)>>1#piso
mid = math.floor((k+r)/2)
print("el valor de mid", mid)
print("el valor de r", r)
print("el valor de k", k)
print("el valor de l", l)
print(listaOrdenada)
print(list(self.obtenido.items()))
#auxmid = 0
#k mid r
# calculamos el theta temp por el lado derecho
temp = self.calculoTheta(listaOrdenada[k]-listaOrdenada[mid],
self.obtenido[listaOrdenada[k]] - self.obtenido[listaOrdenada[mid]],
listaOrdenada[r]-listaOrdenada[mid],
self.obtenido[listaOrdenada[r]] - self.obtenido[listaOrdenada[mid]])
# Comprobamos si el theta temp es menor que el tetha actual
if(theta > temp) :
flag = True
theta = temp
l = k
k = mid
self.bestK = k
mid = math.ceil((k+l)/2)#techo
# calculamos el theta temp por el lado izquierdo
#temp = calculoTheta(listaOrdenada[l][0]-listaOrdenada[mid][0], listaOrdenada[l][1]-listaOrdenada[mid][1],
#listaOrdenada[k][0]-listaOrdenada[mid][0], listaOrdenada[k][1]-listaOrdenada[mid][1])
temp = self.calculoTheta(listaOrdenada[l]-listaOrdenada[mid],
self.obtenido[listaOrdenada[l]] - self.obtenido[listaOrdenada[mid]],
listaOrdenada[k]-listaOrdenada[mid],
self.obtenido[listaOrdenada[k]] - self.obtenido[listaOrdenada[mid]])
# comprobamos si el theta es menor
if(theta > temp) :
flag = True
theta = temp
r = k
k = mid
self.bestK = k
#l2,k5,r9
return l,k,r
if __name__ == '__main__':
cantPoints = int(sys.argv[1])
print("cantidad de puntos:", cantPoints)
b = Broker(cantPoints)
b.run()
``` |
{
"source": "Jofemago/La-deshonra-del-ninja",
"score": 3
} |
#### File: Jofemago/La-deshonra-del-ninja/EntreNiveles1.py
```python
import pygame
from configuraciones import *
def EntreNiveles1(pantalla,bonus,nivel):
pygame.init()
pygame.display.flip()
fin = False
background = pygame.image.load('Imagenes/Menu/background.jpg')
title = "FELICIDADES"
t1 = "Ganaste el nivel " + str(nivel)
t2 = "Bonus: " + str(bonus)
t3 = "Presiona una tecla para continuar"
fTitle = pygame.font.SysFont("monospace",40)
fT = pygame.font.SysFont("monospace",30)
imgTitle = fTitle.render(title,1,BLANCO)
imgT1 = fT.render(t1,1,BLANCO)
imgT2 = fT.render(t2,1,BLANCO)
imgT3 = fT.render(t3,1,BLANCO)
sonidoGanar = pygame.mixer.Sound('Sonidos/Mario/ganar.wav')
sonidoGanar.play()
while not fin:
for event in pygame.event.get():
if event.type == pygame.QUIT:
fin = True
return True
if event.type == pygame.KEYDOWN:
return False
if nivel == 1:
pantalla.blit(background,[0,-250])
pantalla.blit(imgTitle,[CENTRO[0] - 200,10])
pantalla.blit(imgT1,[CENTRO[0] - 200,100])
pantalla.blit(imgT2,[CENTRO[0] - 200,200])
pantalla.blit(imgT3,[CENTRO[0] - 200,300])
pygame.display.flip()
```
#### File: La-deshonra-del-ninja/Jugadores/Mario.py
```python
import pygame
from configuraciones import *
from Objetos.Fireball import *
#from MapaNivel1 import *
from Mapa.MapaNivel1 import *
class Mario(pygame.sprite.Sprite):
def __init__(self,imgpeque,imggrande, imgfuego = None, col = AZUL):
pygame.sprite.Sprite.__init__(self)
self.m = imgfuego
self.vidas = 3
self.imgpeque = imgpeque
self.imggrande = imggrande
self.imgfuego = imgfuego
self.image = self.m[0][0]
self.rect = self.image.get_rect()
self.setPos(100,0)
self.bonus = 0
#estado para saber que mario es, peque fuego o grande
self.estado = 1
#variables para los movimientos de sprites
self.dir = 0
self.i = 0
#valor de la gravedad
self.g = -15
#controla la velocidad de movimiento en X
self.vel_x = 0.07
#Velocidad de actualiza
self.sprite = 4
self.var_basesprite = self.sprite
self.var_sprite = self.var_basesprite
#variables de movimiento
self.var_x = 0
self.var_y = 0
#variable que calcula el salto de el mario significa que en faso
self.saltar = False
#self.vidas = vidas
#self.plataformas = None
self.suelos = pygame.sprite.Group()
#self.Enemigos = pygame.sprite.Group()
self.col = False
#control de disparo
self.disparo = False
self.tiempodis = 20 # tiempo que debe esperar despues de dispara para seguir disparando
self.controldis = self.tiempodis#tiempo que se reducria
self.inmune = False
self.inmunebase = 30
self.tiempoinmune = self.inmunebase
self.morir = False
def inmunidad(self):
if self.inmune == True:
if self.tiempoinmune <= 0:
self.inmune = False
self.tiempoinmune -= 1
else:
self.tiempoinmune = self.inmunebase
def disparar(self):
if self.disparo:
if self.controldis <=0:
self.disparo = False
self.controldis = self.tiempodis
self.controldis -= 1
def setPos(self, x, y):
self.rect.x = x
self.rect.y = y
def setX(self, x):
self.rect.x = x
def setX(self, y):
self.rect.x = y
def movX(self):
#VALIDA QUE ESTE DENTRO DE LOS BORDES
if self.rect.x >= ANCHO -self.rect.width and self.var_x >= 0:
self.var_x = 0
if self.rect.x <= 0 and self.var_x <= 0:
self.var_x = 0
#self.rect.x += self.var_x
def movY(self):
if self.rect.y >= ALTO - self.rect.height and self.var_y >= 0:
self.var_y = 0
self.saltar = False
if self.rect.y <= 100 and self.var_y <= 0:
self.var_y = 0
#self.saltar = False
#self.rect.y += self.var_y
def gravedad(self):
#si no hya colision que empiece a bajar haciendo efecto de gravedad
if not self.col:
if self.var_y == 0:
self.var_y = 1
else:
self.var_y += 1
#bordes
#if self.rect.y >= ALTO - self.rect.height and self.var_y >= 0:
# self.var_y = 1
# self.saltar = False
# self.rect.y = ALTO - self.rect.height
def caer(self):
self.rect.y += self.var_y #para que siempre juegue la gravedad
#self.validarColY()
def salto(self):
if not self.saltar:
if self.var_x <= 0:
self.var_y = self.g + self.var_x/2*2 #haga la variable de salto y mueva elsprite hasta la posicion indicada
#self.var_x =-1
else:
self.var_y = self.g - self.var_x/2*2
#self.var_x =1
#else:
# self.var_y = -7
self.saltar = True
#este mientras sube ira perdiendo la altura con la gravedad activa
def validarColision(self):
ls_bl = pygame.sprite.spritecollide(self,self.suelos, False)
if len(ls_bl) > 0:# si de verdad hay colision
for m in ls_bl:#otra solucion es que cuando toque por la parte de ariba del objeto la variacion en y sea 0
if self.var_x > 0:
self.rect.right = m.rect.left
self.col = True # haga colision true para que no afecte la gravedad
#self.gravedad()
elif self.var_x < 0:
self.rect.left = m.rect.right
self.col = True
#self.gravedad()
else:
self.col = False# si no hay colision active de nuevo la gravedad
self.rect.y += self.var_y #para que siempre juegue la gravedad
ls_bl = pygame.sprite.spritecollide(self,self.suelos, False)
if len(ls_bl) > 0:
for m in ls_bl:
if self.var_y > 0:
self.rect.bottom = m.rect.top
#como choca no puede estar en sprite de salto, esta montado en una platafoma
self.saltar = False
self.col = True
elif self.var_y < 0:
self.rect.top = m.rect.bottom
self.ChoqueCabezaMario(m)
self.var_y = 0
self.col = True
else:
self.col = False
#como actualizar determiandos suelos cuando la cabeza del mario los toque
def ChoqueCabezaMario(self, m):
#QUE TIENE QUE HACER CUANDO CHOQUE LOS DISTINTOS TIPOS DE CUADROS POR DEBAJO
if m.getTipo() == 'bonus':
m.modificarEstado(self.estado)
#SE ENCARGA DEL MOVIMIENTO TANTO COMO DEL SPRITE COMO DE INCREMETAR EL MOVMINETO DEL JUGADOR
def movSprite(self):
if self.var_sprite <= 0:
if self.var_x != 0:
if self.i < 3 :
self.i += 1
else:
self.i = 0
else:
self.i = 0
self.var_sprite = self.var_basesprite
self.var_sprite -= 1
if self.saltar:
self.image = self.m[self.i][self.dir+2]
else:
self.image = self.m[self.i][self.dir]
#self.rect = self.image.get_rect()
self.rect.x += self.var_x
#self.validarColX()
#Se encarga de controlar la velocidad con la que se mueve mario en el X va ir aumentando a medida que este se mueva
def aumentoVelocidad(self):
if self.var_x >= -5 and self.var_x <= 5:
if self.var_x != 0:
if self.var_x > 0:
self.var_x += self.vel_x
#self.validarColX()
self.var_basesprite -= self.vel_x/3
else:
self.var_x -= self.vel_x
#self.validarColX()
self.var_basesprite -= self.vel_x/3
#hace crecer a mario
def crecer(self):
if self.estado < 3:
if self.estado == 1:
self.rect.y -= 60
self.estado += 1
#hacer enchiquetecer a mario
def enano(self):
self.estado = 1
#validar que juego de sprites debe tomar mario dependiendo de su estado
def validarImagen(self):
if self.estado == 3:
self.m = self.imgfuego
elif self.estado == 2:
self.m = self.imggrande
else:
self.m = self.imgpeque
if self.saltar:
self.image = self.m[self.i][self.dir+2]
else:
self.image = self.m[self.i][self.dir]
x = self.rect.x
y = self.rect.y
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
def update(self):
if not self.morir:
self.inmunidad()#valida si acaba de chocar contra algun enemigo
self.movX()
#self.rect.y += self.var_y
self.aumentoVelocidad()
#movimiento de los sprites
self.movSprite()
#self.validarColX()
self.gravedad()
#colisiones
self.validarColision()
#self.caer()
#validamos siempre de que tamano es mario
self.validarImagen()
# si se ha disparado espera un momento corto para volverlo a hacer
self.disparar()
else:
#self.caer()
self.var_y -= 1
self.rect.y -= self.var_y
self.image = self.m[1][4]
``` |
{
"source": "joferkington/euler_pole",
"score": 3
} |
#### File: euler_pole/euler_pole/euler_pole.py
```python
import numpy as np
class EulerPole(object):
earth_radius = 6371.009 # Km (Mean radius from IUGG)
def __init__(self, lat, lon, rate):
"""
Parameters:
-----------
lat: The latitude of the pole in degrees
lon: The longitude of the pole in degrees
rate: The rotation velocity of the pole in deg/myr
(counterclockwise-positive)
"""
self.lat = lat
self.lon = lon
self.rot_velocity = -rate
def __sub__(self, other):
lat, lon, vel = cart2sph(*(self.omega - other.omega))
return EulerPole(lat, lon, np.degrees(vel))
def __add__(self, other):
lat, lon, vel = cart2sph(*(self.omega + other.omega))
return EulerPole(lat, lon, np.degrees(vel))
def __neg__(self):
lat, lon, vel = cart2sph(*-self.omega)
return EulerPole(lat, lon, np.degrees(vel))
def __repr__(self):
template = 'EulerPole(lat={0}, lon={1}, rot_velocity={2})'
return template.format(self.lat, self.lon, -self.rot_velocity)
@property
def coord_basis(self):
"""The coordinate basis for the pole's reference frame.
(a 3x3 numpy array)"""
x, y, z = sph2cart(self.lat, self.lon)
pole = np.array([x,y,z])
vec1 = np.cross(pole, [0, 0, 1])
vec2 = np.cross(pole, vec1)
basis = np.vstack([pole, vec1, vec2]).T
return basis
@property
def pole_transform(self):
"""Transformation matrix to go between earth reference coordinates
and the euler pole reference coordinates (a 3x3 numpy array)"""
return np.linalg.inv(self.coord_basis)
@property
def inv_pole_transform(self):
"""Transformation matrix to go between the euler pole's reference
coordinates and the earth's reference coordinates (a 3x3 numpy
array)"""
return self.coord_basis
def rotate(self, lat, lon, angle):
"""Rotates a feature (given by arrays of "lat" and "lon") around
the Euler pole by the given angle. Returns 2 arrays of lat and lon
with the same length as the input arrays."""
# Convert the input into Euler pole basis coordinates (with the
# "north pole" at the euler pole)
x, y, z = sph2cart(lat, lon)
inp_xyz = np.vstack([x,y,z]).T
xx, yy, zz = np.dot(inp_xyz, self.pole_transform).T
lat_prime, lon_prime, r_prime = cart2sph(xx, yy, zz)
# Add the rotation angle to the longitude...
lon_prime += angle
xx, yy, zz = sph2cart(lat_prime, lon_prime, r_prime)
# ...And convert back into lat, long.
xyz = np.vstack([xx,yy,zz]).T
xx, yy, zz = np.dot(xyz, self.inv_pole_transform).T
new_lat, new_lon, _ = cart2sph(xx, yy, zz)
return new_lat, new_lon
def move(self, lat, lon, time):
"""Moves a feature _back_ in time by "time" million years.
Use a negative time to move the feature "into the future"."""
angle = time * self.rot_velocity
new_lat, new_lon = self.rotate(lat, lon, angle)
return new_lat, new_lon
@property
def omega(self):
"""The Euler vector for the pole in geocentric Cartesian Coordinates."""
vec = sph2cart(self.lat, self.lon, np.radians(self.rot_velocity))
return np.array(vec)
def velocity(self, lat, lon):
"""
Calculates the azimuth (in degrees) and rate of plate motion
(in millimeters per year) at a given point.
Parameters:
-----------
lat : The latitude of the point in degrees
lon : The longitude of the point in degrees
Returns:
azimuth : The azimuth in degrees clockwise from north
rate : The rate in mm/yr
"""
east, north, down = self.velocity_components(lat, lon)
azi = azimuth(east, north)
rate = np.sqrt(north**2 + east**2 + down**2)
return azi, rate
def velocity_components(self, lat, lon):
"""
Calculates the eastward, northward, and downward componenents (in mm/yr)
of plate velocity at a given point.
Parameters:
-----------
lat : The latitude of the point in degrees
lon : The longitude of the point in degrees
Returns:
--------
east, north : The eastward and northward components of the plate
velocity in millimeters per year at the given point.
"""
# Convert position (from lat, lon) into geocentric cartesian coords
r = sph2cart(lat, lon, self.earth_radius)
# Velocity at the earth's surface is then just omega x r
v = np.cross(self.omega, r)
# We can then convert this back to local (north, east, down) coordinates
east, north, down = local_coords(lat, lon, v[0], v[1], v[2])
return east, north, down
#-- Utility Functions --------------------------------------------------------
def sph2cart(lat, lon, r=1):
"""Convert spherical coordinates to cartesian. Default raduis is 1 (unit
length). Input is in degrees, output is in km."""
lat, lon = np.radians(lat), np.radians(lon)
x = r * np.cos(lat) * np.cos(lon)
y = r * np.cos(lat) * np.sin(lon)
z = r * np.sin(lat)
return x,y,z
def cart2sph(x,y,z):
"""Convert cartesian geocentric coordinates to spherical coordinates.
Output is in degrees (for lat & lon) and whatever input units are for the
radius."""
r = np.sqrt(x**2 + y**2 + z**2)
lat = np.arcsin(z / r)
lon = np.arctan2(y, x)
return np.degrees(lat), np.degrees(lon), r
def local_coords(lat, lon, x,y,z):
"""Calculate local east,north,down components of x,y,z at lat,lon"""
lat, lon = np.radians(lat), np.radians(lon)
north = - np.sin(lat) * np.cos(lon) * x \
- np.sin(lat) * np.sin(lon) * y \
+ np.cos(lat) * z
east = - np.sin(lon) * x + np.cos(lon) * y
down = - np.cos(lat) * np.cos(lon) * x \
- np.cos(lat) * np.sin(lon) \
- np.sin(lat) * z
return east, north, down
def azimuth(east, north):
"""Returns azimuth in degrees counterclockwise from North given north and
east components"""
azi = np.degrees(np.arctan2(north, east))
azi = 90 - azi
if azi <= 0:
azi +=360
return azi
```
#### File: euler_pole/tests/test_euler_pole.py
```python
import itertools
import numpy as np
import euler_pole
class TestCartesianSphericalConversions:
def setup_method(self, method):
self.cart_sph = [[(0, 0, 1), (90, 0, 1)],
[(0, 1, 0), (0, 90, 1)],
[(1, 0, 0), (0, 0, 1)],
[(0, 0, -1), (-90, 0, 1)],
[(0, -1, 0), (0, -90, 1)],
[(0, -1, 0), (0, 270, 1)],
[(-1, 0, 0), (0, 180, 1)],
[(-1, 0, 0), (0, -180, 1)],
[(1, 0, 0), (0, 360, 1)],
[(3, 4, 0), (0, 53.13, 5)],
]
def positive_longitude(self, lon):
if lon < 0:
lon += 360
while lon >= 360:
lon -= 360
return lon
def equal_sph(self, coord1, coord2):
coord1, coord2 = list(coord1), list(coord2)
for coord in [coord1, coord2]:
coord[1] = self.positive_longitude(coord[1])
return np.allclose(coord1, coord)
def test_cart2sph(self):
for cart, sph in self.cart_sph:
assert self.equal_sph(sph, euler_pole.cart2sph(*cart))
def test_sph2cart(self):
for cart, sph in self.cart_sph:
assert np.allclose(cart, euler_pole.sph2cart(*sph))
def test_chain_cart2sph(self):
for cart, sph in self.cart_sph:
trans_cart = euler_pole.sph2cart(*euler_pole.cart2sph(*cart))
trans_sph = euler_pole.cart2sph(*euler_pole.sph2cart(*sph))
assert np.allclose(cart, trans_cart)
assert self.equal_sph(sph, trans_sph)
def test_magnitude(self):
x, y, z = euler_pole.sph2cart(0, 0, 1)
assert np.allclose(np.sqrt(x**2 + y**2 + z**2), 1)
class TestLocalCoords:
def test_simple(self):
lat, lon = 0, 0
x, y, z = 0, 1, 0
assert np.allclose([0, 1, 0], euler_pole.local_coords(lat, lon, x,y,z))
def test_zero_down(self):
positions = itertools.product([45, -45], [45, -45, 135, 215])
for lat, lon in positions:
north_pole = [0, 0, 1]
x, y, z = np.cross(north_pole, euler_pole.sph2cart(lat, lon))
assert np.allclose(z, 0)
assert np.allclose(np.hypot(x, y), np.sqrt(2) / 2)
class TestEulerPole:
def test_velocity(self):
pass
``` |
{
"source": "joferkington/fault_kinematics",
"score": 3
} |
#### File: fault_kinematics/fault_kinematics/homogeneous_simple_shear.py
```python
import numpy as np
import scipy.interpolate as interpolate
import scipy.optimize
def invert_slip(faultxyz, horizonxyz, alpha=None, guess=(0,0),
return_metric=False, verbose=False, overlap_thresh=0.3,
**kwargs):
"""
Given a fault, horizon, and optionally, a shear angle, and starting guess,
find the offset vector that best flattens the horizon using Powell's method.
If the shear angle (`alpha`) is not specified, invert for an offset vector
and a shear angle.
Uses the variance as a metric of "flatness".
Parameters:
-----------
faultxyz : An Nx3 array of points making up the fault surface
horxyz : An Mx3 array of points along the horizon surface
alpha : A shear angle (in degrees) measured from vertical (i.e.
vertical shear corresponds to alpha=0) through which the hanging
wall deforms. This is constrained to lie in the vertical plane
defined by the slip vector. If alpha is None, it will be solved for.
guess : An initial displacement vector of (dx, dy) or (dx, dy, alpha)
if alpha is not specified.
return_metric : If True, return the minimized "roughness".
overlap_thresh : If less than `overlap_thresh*100` percent of the
"moved" horizon's points are within the bounds of the fault, the
result is penalized.
Additional keyword arguments are passed on to scipy.optimize.fmin_powell
Returns:
--------
slip : A sequence of `(dx, dy)` or `(dx, dy, alpha)` if alpha is not
manually specified, defining the offset vector (and/or shear angle)
that best flattens the horizon
metric : (Only if return_metric is True) The resulting "flattness".
"""
if (alpha is None) and (len(guess) == 2):
guess = guess + (0,)
func = _Shear(faultxyz, horizonxyz, alpha, overlap_thresh)
# Set a few defaults...
kwargs['disp'] = kwargs.get('disp', False)
kwargs['full_output'] = True
# Powell's method appears more reliable than CG for this problem...
items = scipy.optimize.fmin_powell(func, guess, **kwargs)
slip = items[0]
if return_metric:
return slip, items[1]
else:
return slip
def vertical_shear(faultxyz, horxyz, slip, remove_invalid=True):
"""
Models vertical shear along a fault. Uses Piecewise linear interpolation
to define surfaces from the given, unordered, points.
Parameters:
-----------
faultxyz : An Nx3 array of points making up the fault surface
horxyz : An Mx3 array of points along the horizon surface
slip : A displacement vector in 2 or 3 dimensions. If 2D, the
last element is assumed to be 0. (3D is allowed so that this
function can be used easily within the inclined_shear function.)
remove_invalid : A boolean indicating whether points that have been
moved off the fault's surface and have undefined values should be
removed from the results. If True, only valid points will be
returned, if False, the result may have NaN's.
Returns:
--------
movedxyz : An Mx3 array of points representing the "moved" horizon.
"""
try:
dx, dy = slip
dz = 0
except ValueError:
dx, dy, dz = slip
# Interpolate the fault's elevation values at the starting and ending
# positions for the horizon's xy values.
interp = interpolate.LinearNDInterpolator(faultxyz[:,:2], faultxyz[:,-1])
zorig = interp(horxyz[:,:2])
zfinal = interp(horxyz[:,:2] + [dx, dy])
# Calculate the z-offset for the horizon by the difference in the _fault's_
# elevation at the starting and ending horizon xy positions.
dz = (zfinal - zorig) + dz
# Remove points that have been moved off the fault, as their values are
# undefined.
if remove_invalid:
mask = np.isfinite(dz)
horxyz = horxyz[mask]
else:
mask = np.ones(dz.shape, dtype=np.bool)
# Update the horizon's position
horxyz[:,:2] += [dx, dy]
horxyz[:,-1] += dz[mask]
return horxyz
def inclined_shear(faultxyz, horxyz, slip, alpha, remove_invalid=True):
"""
Models homogenous inclined shear along a fault. This assumes that the
shear angle lies in the plane of slip. Uses Piecewise linear interpolation
to define surfaces from the given, unordered, points.
Parameters:
-----------
faultxyz : An Nx3 array of points making up the fault surface
horxyz : An Mx3 array of points along the horizon surface
slip : A displacement vector in 2 dimensions.
alpha : A shear angle (in degrees) measured from vertical (i.e.
vertical shear corresponds to alpha=0) through which the hanging
wall deforms. This is constrained to lie in the vertical plane
defined by the slip vector. Alternately, a sequence of three euler
angles (representing rotations about the z, y, & x axes in degrees)
may be specified.
remove_invalid : A boolean indicating whether points that have been
moved off the fault's surface and have undefined values should be
removed from the results. If True, only valid points will be
returned, if False, the result may have NaN's.
Returns:
--------
movedxyz : An Mx3 array of points representing the "moved" horizon.
"""
dx, dy = slip
try:
theta, alpha, phi = [np.radians(item) for item in alpha]
except TypeError:
theta = np.arctan2(dy, dx)
alpha = np.radians(alpha)
phi = 0
# Rotate slip vector, horizon, and fault into a new reference frame such
# that "down" is parallel to alpha.
dx, dy, ignored = rotate([dx, dy, 0], theta, alpha, phi).ravel()
rotated_horxyz = rotate(horxyz, theta, alpha)
rotated_faultxyz = rotate(faultxyz, theta, alpha)
# In the new reference frame, we can just use vertical shear...
moved_xyz = vertical_shear(rotated_faultxyz, rotated_horxyz, (dx, dy),
remove_invalid)
# Then we rotate things back to the original reference frame.
return rotate(moved_xyz, theta, alpha, phi, inverse=True)
def rotate(xyz, theta, alpha, phi=0, inverse=False):
"""
Rotates a point cloud `xyz` by the three Euler angles `theta`, `alpha`, and
`phi` given in radians. Preforms the inverse rotation if `inverse` is True.
(Intended for internal use. Subject to "gimbal lock".)
Rotations are preformed first around the "z" axis (by theta), then around
the "y" axis (by alpha), then around the "x" axis (by phi).
All angles are in radians
Parameters:
-----------
xyz : An Nx3 array of points.
theta : The rotation angle about the z axis (in the xy plane).
alpha : The rotation angle about the y axis. (After being rotated about
the z axis by theta.)
phi : The rotation angle a about the x axis. (After being rotated by
theta and alpha.)
inverse : (boolean) If True, preform the inverse rotation.
Returns:
--------
rotated_xyz : An Nx3 array of points rotated into the new coordinate
system.
"""
xyz = np.atleast_2d(xyz)
Rz = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
Ry = np.array([[np.cos(phi), 0, np.sin(phi)],
[0, 1, 0],
[-np.sin(phi), 0, np.cos(phi)]])
Rx = np.array([[1, 0, 0],
[0, np.cos(alpha), -np.sin(alpha)],
[0, np.sin(alpha), np.cos(alpha)]])
rot = Rz.dot(Ry).dot(Rx)
if inverse:
rot = np.linalg.inv(rot)
return rot.dot(xyz.T).T
class _Shear(object):
"""
A convience class to calculate "roughness" (deviation from horizontal) for
multiple offsets and/or shear angles using a given fault and horizon.
"""
def __init__(self, fault, horizon, alpha=None, overlap_thresh=0.3):
"""
Parameters:
-----------
fault : An Nx3 array of points making up the fault surface
hor : An Mx3 array of points along the horizon surface
alpha : A shear angle (in degrees) measured from vertical (i.e.
vertical shear corresponds to alpha=0) through which the
hanging wall deforms. This is constrained to lie in the
vertical plane defined by the slip vector. If alpha is None, it
is assumed to be given when the _Shear instance is called.
overlap_thresh : If less than `overlap_thresh*100` percent of the
"moved" horizon's points are within the bounds of the fault,
the result is penalized.
"""
self.fault, self.horizon = fault, horizon
self.alpha = alpha
# Tracking these for non-overlap penalty
self.starting_metric = self.horizon[:,-1].var()
self.overlap_thresh = overlap_thresh
self.numpoints = horizon.shape[0]
def __call__(self, model):
"""
Return the misfit ("roughness") metric for a given slip and/or shear
angle.
Parameters:
-----------
model : A displacement vector of (dx, dy) or (dx, dy, alpha)
if alpha was not specified during initialization.
"""
if self.alpha is None:
slip = model[:2]
alpha = model[-1]
else:
slip = model
alpha = self.alpha
hor = inclined_shear(self.fault, self.horizon, slip, alpha)
metric = self.metric(hor, slip)
return metric
def metric(self, result, slip):
"""The "roughness" of the result."""
if len(result) > 1:
# Variance of the elevation values
roughness = result[:,-1].var()
else:
roughness = self.starting_metric
if result.shape[0] < self.overlap_thresh * self.numpoints:
# If we're mostly off of the fault, penalize the result.
# We want to make sure it's never better than the roughness
# of the "unmoved" horizon. (Also trying to avoid "hard" edges
# here... If we just make it 1e10, it leads to instabilities.
var = max(self.starting_metric, roughness)
roughness = var * (1 + np.hypot(*slip))
return roughness
``` |
{
"source": "joferkington/joanns_mba_plots",
"score": 3
} |
#### File: joferkington/joanns_mba_plots/plots.py
```python
import numpy as np
import matplotlib.pyplot as plt
import data
def style_setup():
# There are better ways to do this, but I'm being lazy.
plt.rcParams['font.size'] = 14.0
plt.rcParams['legend.fontsize'] = 'medium'
plt.rcParams['axes.labelsize'] = 'large'
def cluster_plot(cluster):
style_setup()
linear_position = position_factory(cluster)
height = 0.2
fig, ax = plt.subplots(figsize=(12, 10))
for year in data.years:
offset = -(0.5 + (len(cluster) // 2)) * height
for key in cluster:
size, performance = data.centers.ix[year][[key + ' X', key + ' Y']]
x0 = linear_position(size, performance)
ax.bar(x0 - data.outer_radius, height, 2 * data.outer_radius, year + offset,
align='edge',
color=data.colors[key], alpha=0.5)
ax.bar(x0 - data.inner_radius, height, 2 * data.inner_radius, year + offset,
align='edge',
color=data.colors[key], label=key if year == 0 else None)
x1 = linear_position(*data.ideals.ix[year][[key + ' X', key + ' Y']])
ax.plot(x1, year + offset + 0.5 * height, marker='.', color='k')
offset += height
ax.set(yticks=data.years, xticks=[], xlabel='Position', ylabel='Year')
ax.legend(loc='upper right')
return fig, ax, linear_position
def plot_products(ax, products, reproject):
names = sorted(set([x.rstrip('XY ') for x in products.columns]))
for name in names:
cols = [name + ' X', name + ' Y']
data = products[cols].dropna()
x, y = [], []
for year, xy in data.iterrows():
x0 = reproject(*xy)
x.extend([x0, x0, x0])
y.extend([year - 0.25, year, year + 0.25])
ax.annotate(name, xy=(x[0], -0.25), xytext=(0, 5), xycoords='data',
textcoords='offset points', ha='center', va='bottom')
ax.plot(x, y, color='gray', lw=2)
ax.set_ylim([-.7, 8.5])
ax.invert_yaxis()
def position_factory(keys):
"""
Define basis vectors based on a market cluster and return a function that
projects size/performance data into a 1D space along the market cluster's
trajectory over time.
"""
# Define new basis vectors for center cluster
cluster = []
for key in keys:
cluster.append(data.centers[[key + ' X', key + ' Y']].values)
cluster = np.vstack(cluster)
vals, vecs = np.linalg.eigh(np.cov(cluster.T))
_idx = np.argmax(vals)
direc = -1 if vecs[0,1] < 0 else 1
def linear_position(x, y):
return direc * vecs.dot([[x], [y]])[_idx][0]
return linear_position
``` |
{
"source": "joferkington/oost_paper_code",
"score": 2
} |
#### File: joferkington/oost_paper_code/basic.py
```python
import numpy as np
import matplotlib.pyplot as plt
import geoprobe
from fault_kinematics.homogeneous_simple_shear import invert_slip
import utilities
import data
def main():
slips, heaves, _, _ = restore_horizons()
plot_restored_locations(slips, heaves)
plt.show()
def restore_horizons(func=invert_slip):
"""
Restore each of the uplifted horizons individually.
"func" just allows overriding of the specific inversion (e.g. see
"invert_slip_fixed_azimuth.py") without code duplication.
"""
# Note that we start each horizon at zero offset and restore independently
guess = (0,0)
variances, planar_variances = [], []
slips, heaves = [], []
for hor in data.horizons[::-1]:
hor_xyz = data.world_xyz(hor)
# Downsample the horizon for faster runtime
# (No need to include millions of points along the horizon's surface)
hor_xyz = hor_xyz[::50]
# Invert for the slip along the fault needed to restore the horizon
# to horizontal.
slip, metric = func(data.fault_xyz, hor_xyz, alpha=data.alpha,
guess=guess, overlap_thresh=1, return_metric=True)
heave = utilities.calculate_heave(slip, hor)
variances.append(metric)
planar_var = planar_variance(hor_xyz)
planar_variances.append(planar_var)
slips.append(slip)
heaves.append(heave)
# Note: We're plotting "metric / planar_var" to allow a direct
# comparison of the quality of the fit between different horizons.
print 'Restoring', hor.name
print ' Roughness (lower is better):', metric / planar_var
return slips, heaves, variances, planar_variances
def plot_restored_locations(slips, heaves):
"""Plot the map-view location of each horizon's restored position."""
# Prepend the present-day location of 0,0 for plotting...
slips = [(0,0)] + slips
heaves = [(0,0,0)] + heaves
slip_x, slip_y = np.array(slips).T
heave_x, heave_y, heave_z = np.array(heaves).T
fig, ax = plt.subplots()
ax.plot(slip_x, slip_y, 'bo-')
ax.plot(heave_x, heave_y, 'go-')
utilities.plot_plate_motion(time=2e5, xy=slips[3])
plt.axis('equal')
return fig, ax
def planar_variance(xyz):
"""
Effectively the "roughness" (the metric minimized during inversion) left
over after a planar fit.
"""
vecs, vals = geoprobe.utilities.principal_axes(*xyz.T, return_eigvals=True)
return vals[-1]
if __name__ == '__main__':
main()
```
#### File: joferkington/oost_paper_code/data.py
```python
import os
import geoprobe
import numpy as np
basedir = os.path.dirname(__file__)
basedir = os.path.join(basedir, 'data')
faultname = os.path.join(basedir, 'swFaults',
'jdk_oos_splay_large_area_depth-mod.swf')
fault = geoprobe.swfault(faultname)
volname = os.path.join(basedir, 'Volumes', 'example.hdf')
vol = geoprobe.volume(volname)
# These are in stratigraphic order from oldest to youngest
horizon_names = [
'jdk_forearc_horizon_7.hzn',
'jdk_forearc_horizon_6.hzn',
'jdk_forearc_horizon_5.hzn',
'jdk_forearc_horizon_4.hzn',
'jdk_forearc_horizon_3.5.hzn',
'jdk_forearc_horizon_3.hzn',
'jdk_forearc_horizon_2.5.hzn',
'jdk_forearc_horizon_2.hzn',
'jdk_forearc_horizon_1.5.hzn',
'jdk_forearc_horizon_1.hzn',
]
horizon_names = [os.path.join(basedir, 'Horizons', item) for item in horizon_names]
horizons = [geoprobe.horizon(item) for item in horizon_names]
gulick_names = ['7', '6', '5', '4', '3.5', '3', '2.5', '2', '1.5', '1']
# Best fit shear angle for inclined shear
alpha = 70
def to_xyz(hor):
return np.vstack([hor.x, hor.y, hor.z]).T
def xyz2hor(xyz):
return geoprobe.horizon(*xyz.T)
def to_world(points):
points = np.atleast_2d(points)
if points.shape[1] == 2:
return np.vstack(vol.model2world(*points.T)).T
else:
x, y, z = points.T
x, y = vol.model2world(x, y)
return np.vstack([x, y, -z]).T
def to_model(points):
points = np.atleast_2d(points)
if points.shape[1] == 2:
return np.vstack(vol.world2model(*points.T)).T
else:
x, y, z = points.T
x, y = vol.world2model(x, y)
return np.vstack([x, y, -z]).T
def world_xyz(hor):
return to_world(to_xyz(hor))
fault_xyz = world_xyz(fault)
fault_strike, fault_dip = geoprobe.utilities.points2strikeDip(*fault_xyz.T)
```
#### File: joferkington/oost_paper_code/error_ellipse.py
```python
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
def plot_point_cov(points, nstd=2, ax=None, **kwargs):
"""
Plots an `nstd` sigma ellipse based on the mean and covariance of a point
"cloud" (points, an Nx2 array).
Parameters
----------
points : An Nx2 array of the data points.
nstd : The radius of the ellipse in numbers of standard deviations.
Defaults to 2 standard deviations.
ax : The axis that the ellipse will be plotted on. Defaults to the
current axis.
Additional keyword arguments are pass on to the ellipse patch.
Returns
-------
A matplotlib ellipse artist
"""
pos = points.mean(axis=0)
cov = np.cov(points, rowvar=False)
return plot_cov_ellipse(cov, pos, nstd, ax, **kwargs)
def plot_cov_ellipse(cov, pos, nstd=2, ax=None, **kwargs):
"""
Plots an `nstd` sigma error ellipse based on the specified covariance
matrix (`cov`). Additional keyword arguments are passed on to the
ellipse patch artist.
Parameters
----------
cov : The 2x2 covariance matrix to base the ellipse on
pos : The location of the center of the ellipse. Expects a 2-element
sequence of [x0, y0].
nstd : The radius of the ellipse in numbers of standard deviations.
Defaults to 2 standard deviations.
ax : The axis that the ellipse will be plotted on. Defaults to the
current axis.
Additional keyword arguments are pass on to the ellipse patch.
Returns
-------
A matplotlib ellipse artist
"""
def eigsorted(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:,order]
if ax is None:
ax = plt.gca()
vals, vecs = eigsorted(cov)
theta = np.degrees(np.arctan2(*vecs[:,0][::-1]))
# Width and height are "full" widths, not radius
width, height = 2 * nstd * np.sqrt(vals)
ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, **kwargs)
ax.add_artist(ellip)
return ellip
if __name__ == '__main__':
#-- Example usage -----------------------
# Generate some random, correlated data
points = np.random.multivariate_normal(
mean=(1,1), cov=[[0.4, 9],[9, 10]], size=1000
)
# Plot the raw points...
x, y = points.T
plt.plot(x, y, 'ro')
# Plot a transparent 3 standard deviation covariance ellipse
plot_point_cov(points, nstd=3, alpha=0.5, color='green')
plt.show()
```
#### File: joferkington/oost_paper_code/fit_shear_angle.py
```python
import matplotlib.pyplot as plt
import sys
from fault_kinematics.homogeneous_simple_shear import invert_slip
import data
def optimize_single_alpha():
"""Find the single shear angle that best flattens all horizons using a grid
search."""
fault = data.to_world(data.to_xyz(data.fault))
alphas = range(-80, 85, 5)
roughness = []
for alpha in alphas:
update('Alpha = %i: ' % alpha)
rough = 0
for i, hor in enumerate(data.horizons):
xyz = data.to_world(data.to_xyz(hor))[::50]
update('%i ' % (len(data.horizons) - i))
slip, metric = invert(fault, xyz, alpha)
rough += metric
update('\n')
roughness.append(rough)
fig, ax = plt.subplots()
ax.plot(alphas, roughness)
ax.set_title('Optimizing Shear Angle')
ax.set_ylabel('Summed misfit (m)')
ax.set_xlabel('Shear angle (degrees)')
fig.savefig('optimize_single_alpha.pdf')
plt.show()
def optimize_individual_alpha():
"""Find the best shear angle for each horizon using a grid search."""
fault = data.to_world(data.to_xyz(data.fault))
alphas = range(-80, 85, 5)
for hor in data.horizons:
update(hor.name + ': ')
xyz = data.to_world(data.to_xyz(hor))[::50]
roughness = []
for i, alpha in enumerate(alphas):
update('%i ' % (len(alphas) - i))
slip, metric = invert(fault, xyz, alpha)
roughness.append(metric)
update('\n')
fig, ax = plt.subplots()
ax.plot(alphas, roughness)
ax.set_title(hor.name)
ax.set_ylabel('Misfit (m)')
ax.set_xlabel('Shear angle (degrees)')
fig.savefig('optimize_alpha_individual_%s.pdf'%hor.name)
plt.show()
def invert(fault, xyz, alpha):
return invert_slip(fault, xyz, alpha, overlap_thresh=1, return_metric=True)
def update(text):
sys.stdout.write(str(text))
sys.stdout.flush()
optimize_single_alpha()
#optimize_individual_alpha()
```
#### File: joferkington/oost_paper_code/interactive_inclined_shear.py
```python
from traits.api import HasTraits, Range, Instance, \
on_trait_change
from traitsui.api import View, Item, Group
from mayavi.core.api import PipelineBase
from mayavi.core.ui.api import MayaviScene, SceneEditor, \
MlabSceneModel
from shapely.geometry import Polygon
import numpy as np
from scipy.interpolate import LinearNDInterpolator
import geoprobe
from fault_kinematics.homogeneous_simple_shear import inclined_shear
import data
class FaultModel(HasTraits):
azimuth = Range(-180., 180., data.fault_strike - 90, mode='slider')
slip = Range(-20., 40., 0., mode='slider')
alpha = Range(-80., 80., data.alpha, mode='slider')
scene = Instance(MlabSceneModel, ())
plot = Instance(PipelineBase)
def __init__(self, fault, horxyz, origxyz=None, ve=2, calc_fault=None,
**kwargs):
self.ve = ve
self.origxyz = origxyz
self.fault = fault
if calc_fault is None:
self.faultxyz = data.to_world(data.to_xyz(fault))
else:
self.faultxyz = calc_fault
self.horxyz = horxyz
HasTraits.__init__(self, **kwargs)
def setup_plot(self):
tri = triangles(self.fault)
fault = self.view_triangles(data.world_xyz(self.fault), tri)
# fault = self.view_xyz_surface(data.world_xyz(self.fault))
if self.origxyz is not None:
self.view_xyz_surface(self.origxyz)
self.scene.mlab.orientation_axes()
self.scene.mlab.outline(fault)
return self.view_xyz_surface(self.horxyz)
@on_trait_change('azimuth,slip,alpha,scene.activated')
def update_plot(self):
if self.plot is None:
self.plot = self.setup_plot()
azi = np.radians(90 - self.azimuth)
dx, dy = self.slip * np.cos(azi), self.slip * np.sin(azi)
dx, dy = 1000 * dx, 1000 * dy
moved = inclined_shear(self.faultxyz, self.horxyz, (dx, dy), self.alpha,
remove_invalid=False)
x, y, z = moved.T
z *= self.ve
self.plot.mlab_source.set(x=x, y=y, z=z, scalars=z)
def view_triangles(self, xyz, tri):
x, y, z = xyz.T
z = z * self.ve
return self.scene.mlab.triangular_mesh(x, y, z, tri)
def view_xyz_surface(self, xyz):
tri = LinearNDInterpolator(xyz[:,:2], xyz[:,-1])
return self.view_triangles(xyz, tri.tri.vertices)
view = View(Item('scene', editor=SceneEditor(scene_class=MayaviScene),
height=600, width=800, show_label=False),
Group(
'_', 'azimuth', 'slip', 'alpha',
),
resizable=True,
)
def triangles(fault):
if isinstance(fault, basestring):
fault = geoprobe.swfault(fault)
# Iterate through triangles in internal coords and select those inside
# outline the non-convex outline of the fault...
xyz = fault._internal_xyz
rotated_tri = LinearNDInterpolator(xyz[:,:2], xyz[:,-1])
rotated_xyz = fault._internal_xyz
rotated_outline = Polygon(fault._rotated_outline)
def inside_outline(tri):
return rotated_outline.contains(Polygon(rotated_xyz[tri]))
triangles = rotated_tri.tri.vertices
return np.array([tri for tri in triangles if inside_outline(tri)])
if __name__ == '__main__':
fault = geoprobe.swfault('/data/nankai/data/swFaults/jdk_oos_splay_large_area_depth.swf')
hor = data.horizons[0]
horxyz = data.to_world(data.to_xyz(hor))[::100]
plot = FaultModel(fault, horxyz)
plot.configure_traits()
```
#### File: joferkington/oost_paper_code/shortening_calculations.py
```python
from uncertainties import ufloat
from utilities import min_value, max_value
def main():
print 'Plate motion rate parallel to section'
print plate_motion()
print 'Shortening (including ductile) from bed-length'
print bed_length_shortening()
print 'Estimated total shortening accomodated by OOSTS'
print oost_shortening()
print 'Shortening accommodated by seaward branch of OOSTS'
print seaward_shortening()
print 'Percentage of OOST shortening'
print total_oost_percentage()
print 'Landward Percentage'
print landward_percentage()
print 'Seaward Percentage'
print seaward_percentage()
def bed_length_balancing():
"""Summed fault heaves from bed-length balancing."""
present_length = 32
# 2km error from range in restored pin lines + 10% interpretation error
restored_length = ufloat(82, 10)
shortening = restored_length - present_length
return shortening
def bed_length_shortening():
"""Shortening estimate including volume loss."""
alpha = ufloat(0.35, 0.1)
heaves = bed_length_balancing()
return heaves * (1 + alpha)
def age():
"""
Age of the oldest in-sequence structures from Strasser, 2009.
Returns:
--------
avg_age : A ufloat with an assumed 2 sigma uncertainty
min_age : The "hard" minimum from Strasser, et al, 2009
max_age : The "hard" maximum from Strasser, et al, 2009
"""
min_age = 1.95 # Ma
max_age = 2.512 # Ma
# Strasser perfers an older age within this range, so we model this as
# 2.3 +/- 0.2, but provide mins and maxs
avg_age = ufloat(2.3, 0.2) # Ma
return avg_age, min_age, max_age
def plate_motion():
"""
Plate motion rate (forearc relative to oceanic plate) _parallel_ _to_
_section_ (Not full plate vector!) based on elastic block modeling
(Loveless&Meade, 2010).
Returns:
--------
rate : A ufloat in mm/yr with a 2 sigma error
"""
# See /data/MyCode/VariousJunk/loveless_meade_block_model_slip_vector.py
# for details of derivation... Uses block segment nearest study area instead
# of derived euler pole.
# I'm assuming that Loveless's reported errors are 2 sigma...
section_parallel_rate = ufloat(42.9, 2.1)
return section_parallel_rate
def total_convergence():
"""
Total shortening parallel to section from plate motion and ages.
Returns:
--------
shortening : A ufloat representing the plate motion integrated over the
age of deformation with a 2 sigma confidence interal.
min_shortening : A "hard" minimum using the uncertainty in the plate
motion and minimum constraints on the age.
max_shortening : A "hard" maximum using the uncertainty in the plate
motion and maximum constraints on the age.
"""
avg_age, min_age, max_age = age()
rate = plate_motion()
shortening = rate * avg_age
min_shortening = min_value(min_age * rate)
max_shortening = max_value(max_age * rate)
return shortening, min_shortening, max_shortening
def oost_shortening():
"""
Shortening on the out-of-sequence thrust system based on integrated plate
convergence minus the shortening predicted in the outer wedge from line
balancing results.
Returns:
--------
shortening : A ufloat with a 2 sigma error estimate
"""
total_shortening, min_total, max_total = total_convergence()
return total_shortening - bed_length_shortening()
def seaward_shortening():
"""Shortening accomodated on the seaward branch of the OOSTS based on
comparing the total (`oost_shortening()`) shortening with the shortening
predicted on the landward branch from forearc uplift.
Returns:
--------
shortening : a ufloat with 2 sigma error in kilometers.
"""
from process_bootstrap_results import shortening_parallel_to_section
landward_shortening = shortening_parallel_to_section() / 1000
return oost_shortening() - landward_shortening
def total_oost_percentage():
"""
Percentage of shortening accommdated by out-of-sequence thrusting during
the development of the present-day outer wedge.
Returns:
--------
percentage : A ufloat with a 2 sigma error representing a unitless
ratio (e.g. multiply by 100 to get percentage).
"""
total_shortening, min_total, max_total = total_convergence()
return oost_shortening() / total_shortening
def seaward_percentage():
"""
Percentage of total plate convergence accomodated by the seaward branch of
the OOSTS during its period of activity.
Returns:
--------
percentage : A ufloat with a 2 sigma error representing a unitless
ratio (e.g. multiply by 100 to get percentage).
"""
# Duration in myr from Strasser, 2009
duration = 1.95 - 1.24
rate = plate_motion()
total = duration * rate
return seaward_shortening() / total
def landward_percentage():
"""
Maximum percentage of total plate convergence accomodated by the landward
branch of the OOSTS during its period of activity.
Returns:
--------
percentage : A ufloat with a 2 sigma error representing a unitless
ratio (e.g. multiply by 100 to get percentage).
"""
from process_bootstrap_results import shortening_parallel_to_section
landward_shortening = shortening_parallel_to_section() / 1000
duration = ufloat(0.97, 0.07) - ufloat(0.25, 0.25)
rate = plate_motion()
total = duration * rate
return landward_shortening / total
if __name__ == '__main__':
main()
``` |
{
"source": "joferkington/scipy2015-3d_printing",
"score": 2
} |
#### File: joferkington/scipy2015-3d_printing/make_base.py
```python
import tempfile
from mayavi import mlab
import Image
import numpy as np
import geoprobe
import scipy.ndimage
import matplotlib.colors as mcolors
import matplotlib.cm as cm
import utils
def main():
vol, top = load_data()
downsample = 3
fig = mlab.figure(bgcolor=(1,1,1))
x, y, z = hor2xyz(top, vol, downsample)
build_sides(vol, x, y, z, vol.nz)
# Build top
seafloor = top_texture(top, vol)
top_mesh = mlab.mesh(x, y, z)
texture(top_mesh, np.flipud(seafloor.T), cm.gray)
build_base(x, y, z, vol)
utils.present(fig)
def load_data():
top = geoprobe.horizon('data/seismic/Horizons/channels.hzn')
vol = geoprobe.volume('data/seismic/Volumes/example.vol')
vol.data = vol.load()
top = smooth_horizon(top)
return vol, top
def build_base(x, y, z, vol, wall_thick=5):
z0 = -vol.nz * np.ones_like(z)
sl = np.s_[wall_thick:-wall_thick, wall_thick:-wall_thick]
z0[sl] = z[sl] - wall_thick
return mlab.mesh(x, y, z0, color=(1, 1, 1))
def build_sides(vol, x, y, z, base, zbase=None):
for axis in [0, 1]:
for val in [0, -1]:
slices = [slice(None), slice(None), slice(None, base)]
slices[axis] = val
slices = tuple(slices)
build_side(vol, slices, x, y, z, zbase)
def build_side(vol, sl, x, y, z, zbase=None):
data = vol.data
full = sl
sl = sl[:2]
z0, x0, y0 = z[sl], x[sl], y[sl]
if zbase is None:
base = -np.arange(data.shape[2])[full[-1]].max()
base = base * np.ones_like(z0)
else:
base = zbase[sl]
z0 = np.vstack([z0, base])
x0 = np.vstack([x0, x0])
y0 = np.vstack([y0, y0])
mesh = mlab.mesh(x0, y0, z0)
sl = slice(-z0.max(), -z0.min(), full[-1].step)
full = tuple([full[0], full[1], sl])
dat = data[full].T
cmap = geoprobe.colormap('data/seismic/Colormaps/brown_black').as_matplotlib
texture(mesh, dat, cmap)
return mesh
def hor2xyz(hor, vol, downsample=1):
z = hor.grid
z = vol.model2index(z, axis='z', int_conversion=False)
z = -z.T
ds = downsample
y, x = np.mgrid[:z.shape[0], :z.shape[1]]
x,y,z = x[::ds, ::ds], y[::ds, ::ds], z[::ds, ::ds]
return x, y, z
def top_texture(hor, vol):
"""RMS Amplitude Extraction on Bottom Horizon."""
chan = geoprobe.utilities.extractWindow(hor, vol, 0, 4)
chan = (chan.astype(float) - 128.0)**2
chan = np.sqrt(chan.mean(axis=-1))
return chan
def texture(mesh, data, cmap, vmin=None, vmax=None):
if vmin is None:
vmin = data.min()
if vmax is None:
vmax = data.max()
dat = scipy.ndimage.zoom(data, 3)
norm = mcolors.Normalize(vmin, vmax)
rgba = cmap(norm(dat))
rgba = (255 * rgba).astype(np.uint8)
im = Image.fromarray(rgba).convert('RGB')
# Evil, ugly hack. Still don't understand why RGB texturing isn't working
# correctly without bringing in an image. Fix later!
_, fname = tempfile.mkstemp()
with open(fname, 'w') as f:
im.save(f, 'PNG')
utils.texture(mesh, fname=fname)
def smooth_horizon(hor):
z = hor.grid
z = scipy.ndimage.median_filter(z.astype(float), 4)
z = scipy.ndimage.gaussian_filter(z, 1.5)
xmin, xmax, ymin, ymax = hor.grid_extents
y, x = np.mgrid[ymin:ymax+1, xmin:xmax+1]
return geoprobe.horizon(x.flatten(), y.flatten(), z.flatten())
main()
```
#### File: joferkington/scipy2015-3d_printing/shapeways_io.py
```python
import os
import binascii
import tempfile
from zipfile import ZipFile, ZIP_DEFLATED
from cStringIO import StringIO
import numpy as np
import Image
def save_vrml(fig, output_filename):
"""
Saves a Mayavi figure as shapeways-formatted VRML in a zip file.
Parameters
----------
fig : a Mayavi/mlab figure
output_filename : string
"""
_, fname = tempfile.mkstemp()
fig.scene.save_vrml(fname)
wrl_name = os.path.basename(output_filename).rstrip('.zip')
vrml2shapeways(fname, output_filename, wrl_name)
os.remove(fname)
def vrml2shapeways(filename, output_filename, wrl_name=None):
"""
Un-embededs images from a vrml file and creates a zip archive with the
images saved as .png's and the vrml file with links to the images.
Parameters
----------
filename : string
The name of the input VRML file
output_filename : string
The filename of the zip archive that will be created.
wrl_name : string or None (optional)
The name of the VRML file in the zip archive. If None, this will be
taken from *filename*.
"""
if not output_filename.endswith('.zip'):
output_filename += '.zip'
with ZipFile(output_filename, 'w', ZIP_DEFLATED) as z:
if wrl_name is None:
wrl_name = os.path.basename(filename)
if not wrl_name.endswith('.wrl'):
wrl_name += '.wrl'
outfile = StringIO()
with open(filename, 'r') as infile:
images = unembed_wrl_images(infile, outfile)
z.writestr(wrl_name, outfile.getvalue())
for fname, im in images.iteritems():
outfile = StringIO()
im.save(outfile, format='png')
z.writestr(fname, outfile.getvalue())
def unembed_wrl_images(infile, outfile):
"""
Converts embedded images in a VRML file to linked .png's.
Parameters
----------
infile : file-like object
outfile: file-like object
Returns
-------
images : a dict of filename : PIL Image pairs
Notes:
-----
Should use a proper parser instead of just iterating line-by-line...
"""
i = 1
images = {}
for line in infile:
if 'texture' in line:
data, width, height = read_texture_wrl(infile)
image_filename = 'texture_{}.png'.format(i)
im = ascii2image_wrl(data, width, height)
line = ' texture ImageTexture {{ url ["{}"]}}'
line = line.format(image_filename)
images[image_filename] = im
i += 1
outfile.write(line)
return images
def read_texture_wrl(infile):
"""
Reads hexlified image data from the current position in a VRML file.
"""
header = next(infile).strip().split()
width, height, nbands = map(int, header[1:])
data = []
for line in infile:
line = line.strip().split()
for item in line:
if item.startswith('0x'):
data.append(item)
else:
return data, width, height
def ascii2image_wrl(data, width, height):
"""
Converts hexlified data in VRML to a PIL image.
"""
if len(data[0]) == 8:
nbands = 3
elif len(data[0]) == 10:
nbands = 4
else:
raise ValueError('Unrecognized data type for image data')
results = []
for item in data:
results.append(binascii.unhexlify(item[2:]))
data = results
data = ''.join(data)
dat = np.fromstring(data, dtype=np.uint8).reshape(height, width, nbands)
dat = np.roll(dat, nbands, -1)
dat = np.flipud(dat)
im = Image.fromarray(dat)
return im
```
#### File: joferkington/scipy2015-3d_printing/texture_example.py
```python
from numpy import arange, zeros, float32, float64, uint8, \
atleast_3d, exp, sqrt, pi
from tvtk.api import tvtk
from tvtk.common import configure_input_data, configure_source_data, \
is_old_pipeline
# Source for glyph. Note that you need to pick a source that has
# texture co-ords already set. If not you'll have to generate them.
# This is easily done -- its just a 2d array of (u,v) coords each
# between [0, 1] that you can set via something like
# point_data.t_coords = <array>.
#
# In this case CubeSource already defines texture coords for us (as of
# VTK-4.4).
cs = tvtk.CubeSource(x_length=2, y_length=1.0, z_length=0.5)
# Create input for the glyph -- the sources are placed at these input
# points.
pts = [[1,1,1],[0,0,0], [-1,-1,-1]]
pd = tvtk.PolyData(points=pts, polys=[[0],[1],[2]])
# Orientation/scaling is as per the vector attribute.
vecs = [[1,0,0], [0,1,0], [0,0,1]]
pd.point_data.vectors = vecs
# Create the glyph3d and set up the pipeline.
g = tvtk.Glyph3D(scale_mode='data_scaling_off', vector_mode = 'use_vector')
configure_input_data(g, pd)
# Note that VTK's vtkGlyph.SetSource is special because it has two
# call signatures: SetSource(src) and SetSource(int N, src) (which
# sets the N'th source). In tvtk it is represented as both a property
# and as a method. Using the `source` property will work fine if all
# you want is the first `source`. OTOH if you want the N'th `source`
# use get_source(N).
# g.source = cs.output
configure_source_data(g, cs.output)
cs.update()
g.update()
m = tvtk.PolyDataMapper()
configure_input_data(m, g.output)
a = tvtk.Actor(mapper=m)
# Read the texture from image and set the texture on the actor. If
# you don't like this image, replace with your favorite -- any image
# will do (you must use a suitable reader though).
def image_from_array(ary):
""" Create a VTK image object that references the data in ary.
The array is either 2D or 3D with. The last dimension
is always the number of channels. It is only tested
with 3 (RGB) or 4 (RGBA) channel images.
Note: This works no matter what the ary type is (accept
probably complex...). uint8 gives results that make since
to me. Int32 and Float types give colors that I am not
so sure about. Need to look into this...
"""
sz = ary.shape
dims = len(sz)
# create the vtk image data
img = tvtk.ImageData()
if dims == 2:
# 1D array of pixels.
img.whole_extent = (0, sz[0]-1, 0, 0, 0, 0)
img.dimensions = sz[0], 1, 1
img.point_data.scalars = ary
elif dims == 3:
# 2D array of pixels.
if is_old_pipeline():
img.whole_extent = (0, sz[0]-1, 0, sz[1]-1, 0, 0)
else:
img.extent = (0, sz[0]-1, 0, sz[1]-1, 0, 0)
img.dimensions = sz[0], sz[1], 1
# create a 2d view of the array
ary_2d = ary[:]
ary_2d.shape = sz[0]*sz[1],sz[2]
img.point_data.scalars = ary_2d
else:
raise ValueError, "ary must be 3 dimensional."
return img
sz = (256, 256, 3)
array_3d = zeros(sz, uint8)
img = image_from_array(array_3d)
t = tvtk.Texture(interpolate = 1)
configure_input_data(t, img)
a.texture = t
# Renderwindow stuff and add actor.
rw = tvtk.RenderWindow(size=(600, 600))
ren = tvtk.Renderer(background=(0.1, 0.2, 0.4))
rw.add_renderer(ren)
rwi = tvtk.RenderWindowInteractor(render_window=rw)
ren.add_actor(a)
rwi.initialize()
# create a little wave to slide across the image.
wave = 1/sqrt(2*pi)*exp(-arange(-2, 2, .05)**2/2)*255
# have to use += here because = doesn't respect broadcasting correctly.
array_3d[:len(wave)] += wave.astype(uint8)[:,None,None]
import time
t1 = time.time()
N = 256
for i in range(N):
array_3d[1:] = array_3d[:-1]
img.modified()
rwi.render()
#print i
t2 = time.time()
print 'texture size:', array_3d.shape
print 'fps:', N/(t2-t1)
rwi.start()
```
#### File: joferkington/scipy2015-3d_printing/utils.py
```python
import numpy as np
from mayavi import mlab
import mayavi.tools
from tvtk.api import tvtk
def scale(fig, ratio, reset=False):
"""
Scales a Mayavi figure and resets the camera.
Parameters
----------
"""
for actor in fig.scene.renderer.actors:
if reset:
actor.scale = np.ones(3) * ratio
else:
actor.scale = actor.scale * ratio
mayavi.tools.camera.view(distance='auto', focalpoint='auto', figure=fig)
def present(fig):
"""Makes a mayavi scene full screen and exits when "a" is pressed."""
fig.scene.full_screen = True
Closer(fig)
mlab.show()
class Closer:
"""Close mayavi window when "a" is pressed."""
def __init__(self, fig):
self.fig = fig
fig.scene.interactor.add_observer('KeyPressEvent', self)
def __call__(self, vtk_obj, event):
if vtk_obj.GetKeyCode() == 'a':
self.fig.parent.close_scene(self.fig)
def texture(mesh, data=None, fname=None, clamp=1):
"""
Apply a texture to a mayavi module (as produced by mlab.mesh, etc).
Parameters:
-----------
mesh :
The mesh/surface to apply the texture to
"""
if data is not None:
img = image_from_array(data)
elif fname is not None:
img = tvtk.PNGReader(file_name=fname).output
else:
raise ValueError('You must specify either "data" or "fname".')
t = tvtk.Texture(input=img, interpolate=1, edge_clamp=clamp)
mesh.actor.enable_texture = True
mesh.actor.tcoord_generator_mode = 'plane'
mesh.actor.actor.texture = t
mesh.actor.mapper.scalar_visibility = False
def image_from_array(ary):
"""
Create a VTK image object that references the data in ary. The array is
either 2D or 3D with. The last dimension is always the number of channels.
It is only tested with 3 (RGB) or 4 (RGBA) channel images.
Parameters
----------
ary : 2D or 3D ndarray
The texture data
Returns
-------
img : a tvtk.ImageData instance
Notes:
------
Note: This works no matter what the ary type is (except probably
complex...). uint8 gives results that make since to me. Int32 and Float
types give colors that I am not so sure about. Need to look into this...
Taken from the mayavi examples.
# Authors: <NAME>, <NAME>
# Copyright (c) 2006, Enthought, Inc.
# License: BSD Style.
"""
# Expects top of image as first row...
ary = np.ascontiguousarray(np.flipud(ary))
sz = ary.shape
dims = len(sz)
# create the vtk image data
img = tvtk.ImageData()
if dims == 2:
# 1D array of pixels.
img.extent = (0, sz[0]-1, 0, 0, 0, 0)
img.dimensions = sz[0], 1, 1
img.point_data.scalars = ary
elif dims == 3:
# 2D array of pixels.
img.extent = (0, sz[0]-1, 0, sz[1]-1, 0, 0)
img.dimensions = sz[1], sz[0], 1
# create a 2d view of the array
ary_2d = ary[:]
ary_2d.shape = sz[0]*sz[1],sz[2]
img.point_data.scalars = ary_2d
else:
raise ValueError, "ary must be 3 dimensional."
return img
``` |
{
"source": "joferkington/seismic_plotting",
"score": 3
} |
#### File: seismic_plotting/seismic_plotting/horizons.py
```python
import itertools
import geoprobe
class HorizonSet(object):
def __init__(self, horizons, styles=None):
self.horizons = [geoprobe.horizon(hor) for hor in horizons]
if styles is None:
styles = [dict() for _ in horizons]
self.styles = styles
def plot(self, sec):
styles = itertools.cycle(self.styles)
def plot_item(hor, style):
x, y = sec.slice_horizon(hor)
# Skip first and last points...
l, = sec.ax.plot(x, y, **style)
return l
limits = sec.ax.axis()
lines = []
for hor, style in zip(self.horizons, styles):
lines.append(plot_item(hor, style))
sec.ax.axis(limits)
return lines
``` |
{
"source": "Joffcom/PaperCutExamples",
"score": 2
} |
#### File: PaperCutExamples/Authentication/customUser.py
```python
import sys
import logging
import xmlrpc.client
from ssl import create_default_context, Purpose
auth="token"
host="https://localhost:9192/rpc/api/xmlrpc"
userDatabase = {
"john": {"fullname":"<NAME>", "email":"<EMAIL>", "dept":"Accounts", "office":"Melbourne", "cardno":"1234", "otherEmails":"<EMAIL>", "secondarycardno":"01234","password":"<PASSWORD>"},
"jane": {"fullname":"<NAME>", "email":"<EMAIL>", "dept":"Sales", "office":"Docklands", "cardno":"5678", "otherEmails":"<EMAIL>", "secondarycardno":"05678", "password":"<PASSWORD>"},
"ahmed":{"fullname":"<NAME>", "email":"<EMAIL>", "dept":"Marketing", "office":"Home Office", "cardno":"4321", "otherEmails":"<EMAIL>", "secondarycardno":"04321", "password":"<PASSWORD>"},
}
groupDatabase = {
"groupA":["john"],
"groupB":["ahmed", "jane"],
}
# logging.basicConfig(level=logging.DEBUG, filename="/tmp/logfile", filemode="a+",
# format="%(asctime)-15s %(levelname)-8s %(message)s")
# logging.info("Called with {}".format(sys.argv))
def formatUserDetails(userName):
if userName in userDatabase:
if extraData:
return '\t'.join([userName, userDatabase[userName]["fullname"], userDatabase[userName]["email"], userDatabase[userName]["dept"], userDatabase[userName]["office"],
userDatabase[userName]["cardno"], userDatabase[userName]["otherEmails"], userDatabase[userName]["secondarycardno"]])
else:
return '\t'.join([userName, userDatabase[userName]["fullname"], userDatabase[userName]["email"], userDatabase[userName]["dept"], userDatabase[userName]["office"]])
else:
print("Call to formatUserDetails error for username {}".format(userName), file=sys.stderr)
sys.exit(-1)
# Should be return short or long form user data? Let's ask PaperCut MF/NG
proxy = xmlrpc.client.ServerProxy(host, verbose=False,
context = create_default_context(Purpose.CLIENT_AUTH))
try:
extraData = "N" != proxy.api.getConfigValue(auth, "user-source.update-user-details-card-id")
except Exception:
print("Cannot use web services API. Please configure", file=sys.stderr)
sys.exit(-1)
# Being called as user auth program
if len(sys.argv) == 1:
name = input()
password = input()
if name in userDatabase and userDatabase[name]["password"] == password:
print("OK\n{}\n".format(name)) # Note: return canonical user name
sys.exit(0)
else:
print("Wrong username or password", file=sys.stderr)
print("ERROR\n")
sys.exit(-1)
if len(sys.argv) < 2 or sys.argv[1] != '-':
print("incorrect argument passed {0}".format(sys.argv), file=sys.stderr)
sys.exit(-1)
# Being called as user sync program
if sys.argv[2] == "is-valid":
print('Y')
print("long form user data record will provided" if extraData else "short form user data record will provided")
sys.exit(0)
if sys.argv[2] == "all-users":
for name in userDatabase:
print(formatUserDetails(name))
sys.exit(0)
if sys.argv[2] == "all-groups":
print('\n'.join([g for g in groupDatabase]))
sys.exit(0)
if sys.argv[2] == "get-user-details":
name = input()
if name in userDatabase:
print(formatUserDetails(name))
sys.exit(0)
else:
print("Can't find user {0}".format(name), file=sys.stderr)
sys.exit(-1)
if sys.argv[2] == "group-member-names":
if sys.argv[3] in groupDatabase:
for user in groupDatabase[sys.argv[3]]:
if user in userDatabase:
print(user)
else:
print("Invalid user name {} found in group list {}".format(user, group), file=sys.stderr)
sys.exit(-1)
sys.exit(0)
else:
print("Group name {} not found".format(sys.argv[3]), file=sys.stderr)
sys.exit(-1)
if sys.argv[2] == "group-members":
if sys.argv[3] in groupDatabase:
for user in groupDatabase[sys.argv[3]]:
if user in userDatabase:
print(formatUserDetails(user))
else:
print("Invalid user name {} found in group list {}".format(user, group), file=sys.stderr)
sys.exit(-1)
sys.exit(0)
else:
print("Group name {} not found".format(sys.argv[3]), file=sys.stderr)
sys.exit(-1)
if sys.argv[2] == "is-user-in-group":
if sys.argv[3] in groupDatabase:
if sys.argv[4] in groupDatabase[sys.argv[3]]:
print('Y')
sys.exit(0)
print('N')
sys.exit(0)
print("Invalid Group name {}".format(sys.argv[3]), file=sys.stderr)
sys.exit(-1)
print("Can't process arguments {0}".format(sys.argv), file=sys.stderr)
``` |
{
"source": "jofferdal/harbor",
"score": 2
} |
#### File: apitests/python/test_retention.py
```python
from __future__ import absolute_import
import unittest
import time
from testutils import ADMIN_CLIENT
from testutils import TEARDOWN
from testutils import harbor_server
from library.repository import push_special_image_to_project
from library.retention import Retention
from library.project import Project
from library.repository import Repository
from library.user import User
from library.system import System
class TestProjects(unittest.TestCase):
"""
Test case:
Tag Retention
Setup:
Create Project test-retention
Push image test1:1.0, test1:2.0, test1:3.0,latest, test2:1.0, test2:latest, test3:1.0, test4:1.0
Test Steps:
1. Create Retention Policy
2. Add rule "For the repositories matching **, retain always with tags matching latest*"
3. Add rule "For the repositories matching test3*, retain always with tags matching **"
4. Dry run, check execution and tasks
5. Real run, check images retained
Tear Down:
1. Delete project test-retention
"""
@classmethod
def setUpClass(self):
self.user = User()
self.system = System()
self.repo= Repository()
self.project = Project()
self.retention=Retention()
def testTagRetention(self):
user_ra_password = "<PASSWORD>"
user_ra_id, user_ra_name = self.user.create_user(user_password=<PASSWORD>, **ADMIN_CLIENT)
print("Created user: %s, id: %s" % (user_ra_name, user_ra_id))
TestProjects.USER_RA_CLIENT = dict(endpoint=ADMIN_CLIENT["endpoint"],
username=user_ra_name,
password=<PASSWORD>_ra_password)
TestProjects.user_ra_id = int(user_ra_id)
TestProjects.project_src_repo_id, project_src_repo_name = self.project.create_project(metadata = {"public": "false"}, **TestProjects.USER_RA_CLIENT)
# Push image test1:1.0, test1:2.0, test1:3.0,latest, test2:1.0, test2:latest, test3:1.0
push_special_image_to_project(project_src_repo_name, harbor_server, user_ra_name, user_ra_password, "<PASSWORD>", ['1.0'])
push_special_image_to_project(project_src_repo_name, harbor_server, user_ra_name, user_ra_password, "<PASSWORD>", ['2.0'])
push_special_image_to_project(project_src_repo_name, harbor_server, user_ra_name, user_ra_password, "<PASSWORD>", ['3.0','latest'])
push_special_image_to_project(project_src_repo_name, harbor_server, user_ra_name, user_ra_password, "<PASSWORD>", ['1.0'])
push_special_image_to_project(project_src_repo_name, harbor_server, user_ra_name, user_ra_password, "<PASSWORD>", ['latest'])
push_special_image_to_project(project_src_repo_name, harbor_server, user_ra_name, user_ra_password, "<PASSWORD>", ['1.0'])
push_special_image_to_project(project_src_repo_name, harbor_server, user_ra_name, user_ra_password, "<PASSWORD>", ['1.0'])
resp=self.repo.get_repository(TestProjects.project_src_repo_id, **TestProjects.USER_RA_CLIENT)
self.assertEqual(len(resp), 4)
# Create Retention Policy
retention_id = self.retention.create_retention_policy(TestProjects.project_src_repo_id, selector_repository="**", selector_tag="latest*", expect_status_code = 201, **TestProjects.USER_RA_CLIENT)
# Add rule
self.retention.update_retention_add_rule(retention_id,selector_repository="test3*", selector_tag="**", expect_status_code = 200, **TestProjects.USER_RA_CLIENT)
# Dry run
self.retention.trigger_retention_policy(retention_id, dry_run=True, **TestProjects.USER_RA_CLIENT)
time.sleep(10)
resp=self.retention.get_retention_executions(retention_id, **TestProjects.USER_RA_CLIENT)
self.assertTrue(len(resp)>0)
execution=resp[0]
resp=self.retention.get_retention_exec_tasks(retention_id,execution.id, **TestProjects.USER_RA_CLIENT)
self.assertEqual(len(resp), 4)
resp=self.retention.get_retention_exec_task_log(retention_id,execution.id,resp[0].id, **TestProjects.USER_RA_CLIENT)
print(resp)
# Real run
self.retention.trigger_retention_policy(retention_id, dry_run=False, **TestProjects.USER_RA_CLIENT)
time.sleep(10)
resp=self.retention.get_retention_executions(retention_id, **TestProjects.USER_RA_CLIENT)
self.assertTrue(len(resp)>1)
execution=resp[0]
resp=self.retention.get_retention_exec_tasks(retention_id,execution.id, **TestProjects.USER_RA_CLIENT)
self.assertEqual(len(resp), 4)
resp=self.retention.get_retention_exec_task_log(retention_id,execution.id,resp[0].id, **TestProjects.USER_RA_CLIENT)
print(resp)
# TODO As the repository isn't deleted when no tags left anymore
# TODO we should check the artifact/tag count here
# resp=self.repo.get_repository(TestProjects.project_src_repo_id, **TestProjects.USER_RA_CLIENT)
# self.assertEqual(len(resp), 3)
@classmethod
def tearDownClass(self):
print "Case completed"
@unittest.skipIf(TEARDOWN == False, "Test data won't be erased.")
def test_ClearData(self):
resp=self.repo.get_repository(TestProjects.project_src_repo_id, **TestProjects.USER_RA_CLIENT)
for repo in resp:
self.repo.delete_repoitory(repo.name, **TestProjects.USER_RA_CLIENT)
self.project.delete_project(TestProjects.project_src_repo_id, **TestProjects.USER_RA_CLIENT)
self.user.delete_user(TestProjects.user_ra_id, **ADMIN_CLIENT)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joffilyfe/articles_meta",
"score": 2
} |
#### File: articles_meta/processing/load_mixedcitations.py
```python
import re
import os
import argparse
import logging
import codecs
import json
import unicodedata
from articlemeta import controller
from xylose.scielodocument import Article
logger = logging.getLogger(__name__)
SENTRY_DSN = os.environ.get('SENTRY_DSN', None)
LOGGING_LEVEL = os.environ.get('LOGGING_LEVEL', 'DEBUG')
MONGODB_HOST = os.environ.get('MONGODB_HOST', None)
DOI_REGEX = re.compile(r'[0-9][0-9]\.[0-9].*/.*\S')
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'console': {
'format': '%(asctime)s - %(name)s - %(levelname)s - %(message)s',
'datefmt': '%H:%M:%S',
},
},
'handlers': {
'console': {
'level': LOGGING_LEVEL,
'class': 'logging.StreamHandler',
'formatter': 'console'
}
},
'loggers': {
'': {
'handlers': ['console'],
'level': LOGGING_LEVEL,
'propagate': False,
},
'processing.load_doi': {
'level': LOGGING_LEVEL,
'propagate': True,
},
}
}
if SENTRY_DSN:
LOGGING['handlers']['sentry'] = {
'level': 'ERROR',
'class': 'raven.handlers.logging.SentryHandler',
'dsn': SENTRY_DSN,
}
LOGGING['loggers']['']['handlers'].append('sentry')
try:
articlemeta_db = controller.DataBroker.from_dsn(MONGODB_HOST).db
except:
logger.error('Fail to connect to (%s)', MONGODB_HOST)
def remove_control_characters(data):
return "".join(ch for ch in data if unicodedata.category(ch)[0] != "C")
def html_decode(string):
string = remove_control_characters(string)
return string
def _config_logging(logging_level='INFO', logging_file=None):
allowed_levels = {
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL
}
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger.setLevel(allowed_levels.get(logging_level, 'INFO'))
if logging_file:
hl = logging.FileHandler(logging_file, mode='a')
else:
hl = logging.StreamHandler()
hl.setFormatter(formatter)
hl.setLevel(allowed_levels.get(logging_level, 'INFO'))
logger.addHandler(hl)
return logger
def audity(mixed, document):
logger.debug('Auditing mixed citation')
if int(mixed['order']) > len(document.citations or []):
return False
check = mixed['mixed'].lower()
citation_index = int(mixed['order'])-1
citation_titles = [i.lower() for i in [
document.citations[citation_index].title() or '',
document.citations[citation_index].source or '',
document.citations[citation_index].chapter_title or '',
document.citations[citation_index].article_title or '',
document.citations[citation_index].thesis_title or '',
document.citations[citation_index].link_title or '',
document.citations[citation_index].issue_title or '',
document.citations[citation_index].conference_title or ''
] if i]
citation_authors = document.citations[citation_index].authors or []
for title in citation_titles:
if title in check:
return True
for author in citation_authors:
if author.get('surname', '').lower() in check:
return True
if author.get('given_names', '').lower() in check:
return True
return False
def get_document(collection, code):
logger.debug('Loading document from database')
document = articlemeta_db['articles'].find_one({'collection': collection, 'code': code})
if not document:
return
return Article(document)
def update_document(mixed, document):
logger.debug('Updating citation in database')
citation_field = 'citations.%s.mixed' % str(int(mixed['order'])-1)
articlemeta_db['articles'].update(
{
'collection': document.collection_acronym,
'code': document.publisher_id
},
{
'$set': {
citation_field: mixed['mixed']
}
}
)
def run(mixed_citations_file, import_data):
with codecs.open(mixed_citations_file, encoding='utf-8') as mixed_citations:
for line in mixed_citations:
mixed = json.loads(line)
mixed['mixed'] = html_decode(mixed['mixed'])
document = get_document(mixed['collection'], mixed['pid'])
logger.info('Trying to import %s %s %s', mixed['collection'], mixed['pid'], mixed['order'])
if not document:
logger.error('Document not found in Article Meta %s %s %s', mixed['collection'], mixed['pid'], mixed['order'])
continue
if not audity(mixed, document):
logger.error('Document did not pass in auditory %s %s %s', mixed['collection'], mixed['pid'], mixed['order'])
continue
logger.debug('Document pass in auditory %s %s %s', mixed['collection'], mixed['pid'], mixed['order'])
if import_data:
logger.debug('Importing data for %s %s %s', mixed['collection'], mixed['pid'], mixed['order'])
update_document(mixed, document)
def main():
parser = argparse.ArgumentParser(
description="Load mixed citations according to a given json file"
)
parser.add_argument(
'--csv_file',
'-f',
help='A json file with the mixed citations of each article'
)
parser.add_argument(
'--import_data',
'-i',
action='store_true',
help='Import data'
)
parser.add_argument(
'--logging_file',
'-o',
help='Full path to the log file'
)
parser.add_argument(
'--logging_level',
'-l',
default='DEBUG',
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
help='Logggin level'
)
args = parser.parse_args()
_config_logging(args.logging_level, args.logging_file)
run(args.csv_file, args.import_data)
``` |
{
"source": "joffilyfe/document-store-migracao",
"score": 3
} |
#### File: documentstore_migracao/utils/scielo_ids_generator.py
```python
from math import log2, ceil
from uuid import UUID, uuid4
DIGIT_CHARS = "bcdfghjkmnpqrstvwxyzBCDFGHJKLMNPQRSTVWXYZ3456789"
chars_map = {dig: idx for idx, dig in enumerate(DIGIT_CHARS)}
def uuid2str(value):
result = []
unevaluated = value.int
for unused in range(ceil(128 / log2(len(DIGIT_CHARS)))):
unevaluated, remainder = divmod(unevaluated, len(DIGIT_CHARS))
result.append(DIGIT_CHARS[remainder])
return "".join(result)
def str2uuid(value):
acc = 0
mul = 1
for digit in value:
acc += chars_map[digit] * mul
mul *= len(DIGIT_CHARS)
return UUID(int=acc)
def generate_scielo_pid():
"""Funções para a geração e conversão do novo PID dos documentos do SciELO
"""
return uuid2str(uuid4())
def issue_id(issn_id, year, volume=None, number=None, supplement=None):
labels = ["issn_id", "year", "volume", "number", "supplement"]
values = [issn_id, year, volume, number, supplement]
data = dict([(label, value) for label, value in zip(labels, values)])
labels = ["issn_id", "year"]
_id = []
for label in labels:
value = data.get(label)
if value:
_id.append(value)
labels = [("volume", "v"), ("number", "n"), ("supplement", "s")]
for label, prefix in labels:
value = data.get(label)
if value:
if value.isdigit():
value = str(int(value))
_id.append(prefix + value)
return "-".join(_id)
def aops_bundle_id(issn_id):
return issn_id + "-aop"
```
#### File: document-store-migracao/tests/test_inserting.py
```python
import unittest
from unittest.mock import patch, Mock, MagicMock, ANY, call
from copy import deepcopy
from .apptesting import Session
from . import (
SAMPLE_ISSUES_KERNEL,
SAMPLE_AOPS_KERNEL,
SAMPLE_KERNEL_JOURNAL,
SAMPLES_PATH,
SAMPLES_JOURNAL,
)
import os
import shutil
import json
from documentstore_migracao.processing import inserting
from documentstore_migracao.utils import manifest
from documentstore_migracao import config
from documentstore.domain import DocumentsBundle
from documentstore.exceptions import DoesNotExist
from documentstore_migracao.processing.inserting import (
get_document_assets_path,
put_static_assets_into_storage,
)
from documentstore_migracao.utils.xml import loadToXML
class TestLinkDocumentsBundleWithDocuments(unittest.TestCase):
def setUp(self):
self.session = Session()
manifest = inserting.ManifestDomainAdapter(SAMPLE_ISSUES_KERNEL[0])
self.session.documents_bundles.add(manifest)
self.documents_bundle = self.session.documents_bundles.fetch(manifest.id())
def test_should_link_documents_bundle_with_documents(self):
inserting.link_documents_bundles_with_documents(
self.documents_bundle,
[{"id": "doc-1", "order": "0001"}, {"id": "doc-2", "order": "0002"}],
self.session,
)
self.assertEqual(
[{"id": "doc-1", "order": "0001"}, {"id": "doc-2", "order": "0002"}],
self.documents_bundle.documents,
)
def test_should_not_insert_duplicated_documents(self):
inserting.link_documents_bundles_with_documents(
self.documents_bundle,
[{"id": "doc-1", "order": "0001"}, {"id": "doc-1", "order": "0001"}],
self.session,
)
self.assertEqual(
[{"id": "doc-1", "order": "0001"}], self.documents_bundle.documents
)
def test_should_register_changes(self):
inserting.link_documents_bundles_with_documents(
self.documents_bundle,
[{"id": "doc-1", "order": "0001"}, {"id": "doc-2", "order": "0002"}],
self.session,
)
_changes = self.session.changes.filter()
self.assertEqual(1, len(_changes))
self.assertEqual(self.documents_bundle.id(), _changes[0]["id"])
self.assertEqual("DocumentsBundle", _changes[0]["entity"])
class TestProcessingInserting(unittest.TestCase):
def setUp(self):
self.data = dict(
[
("eissn", "1234-5678"),
("pissn", "0001-3714"),
("issn", "0987-0987"),
("year", "1998"),
("volume", "29"),
("number", "3"),
("supplement", None),
]
)
self.aop_data = dict(
[("eissn", "0001-3714"), ("issn", "0001-3714"), ("year", "2019")]
)
self.bundle_id = "0001-3714-1998-v29-n3"
self.issn = "0987-0987"
if not os.path.isdir(config.get("ERRORS_PATH")):
os.makedirs(config.get("ERRORS_PATH"))
def tearDown(self):
shutil.rmtree(config.get("ERRORS_PATH"))
def test_get_documents_bundle_success(self):
session_db = Session()
session_db.documents_bundles.add(
inserting.ManifestDomainAdapter(SAMPLE_ISSUES_KERNEL[0])
)
result = inserting.get_documents_bundle(
session_db, self.bundle_id, True, self.issn
)
self.assertIsInstance(result, DocumentsBundle)
self.assertEqual(result.id(), "0001-3714-1998-v29-n3")
def test_get_documents_bundle_raises_exception_if_issue_and_not_found(self):
session_db = MagicMock()
session_db.documents_bundles.fetch.side_effect = DoesNotExist
self.assertRaises(
ValueError,
inserting.get_documents_bundle,
session_db,
self.bundle_id,
True,
self.issn,
)
@patch("documentstore_migracao.processing.inserting.create_aop_bundle")
def test_get_documents_bundle_creates_aop_bundle_is_aop_and_not_found(
self, mk_create_aop_bundle
):
session_db = MagicMock()
session_db.documents_bundles.fetch.side_effect = DoesNotExist
mk_create_aop_bundle.side_effect = DoesNotExist
self.assertRaises(
ValueError,
inserting.get_documents_bundle,
session_db,
self.bundle_id,
False,
self.issn,
)
mk_create_aop_bundle.assert_any_call(session_db, self.issn)
@patch(
"documentstore_migracao.processing.inserting.scielo_ids_generator.aops_bundle_id"
)
@patch("documentstore_migracao.processing.inserting.create_aop_bundle")
def test_get_documents_bundle_raises_exception_if_creates_aop_bundle_none(
self, mk_create_aop_bundle, mk_aops_bundle_id
):
session_db = MagicMock()
session_db.documents_bundles.fetch.side_effect = DoesNotExist
mk_create_aop_bundle.side_effect = DoesNotExist
self.assertRaises(
ValueError,
inserting.get_documents_bundle,
session_db,
self.bundle_id,
False,
self.issn,
)
@patch("documentstore_migracao.processing.inserting.create_aop_bundle")
def test_get_documents_bundle_returns_created_aop_bundle(
self, mk_create_aop_bundle
):
session_db = MagicMock()
mocked_aop_bundle = Mock()
session_db.documents_bundles.fetch.side_effect = DoesNotExist
mk_create_aop_bundle.return_value = mocked_aop_bundle
result = inserting.get_documents_bundle(
session_db, self.bundle_id, False, self.issn
)
self.assertEqual(result, mocked_aop_bundle)
def test_create_aop_bundle_gets_journal(self):
issn = "1234-0001"
session_db = MagicMock()
inserting.create_aop_bundle(session_db, issn)
session_db.journals.fetch.assert_called_once_with(issn)
def test_create_aop_bundle_raises_exception_if_journal_not_found(self):
issn = "1234-0001"
session_db = MagicMock()
session_db.journals.fetch.side_effect = DoesNotExist
self.assertRaises(DoesNotExist, inserting.create_aop_bundle, session_db, issn)
@patch(
"documentstore_migracao.processing.inserting.scielo_ids_generator.aops_bundle_id"
)
def test_create_aop_bundle_uses_scielo_ids_generator_aops_bundle_id(
self, mk_aops_bundle_id
):
session_db = MagicMock()
session_db.journals.fetch.return_value = inserting.ManifestDomainAdapter(
manifest=SAMPLE_KERNEL_JOURNAL
)
inserting.create_aop_bundle(session_db, "0001-3714")
mk_aops_bundle_id.assert_called_once_with("0001-3714")
@patch("documentstore_migracao.processing.inserting.utcnow")
@patch("documentstore_migracao.processing.inserting.ManifestDomainAdapter")
def test_create_aop_bundle_registers_aop_bundle(
self, MockManifestDomainAdapter, mk_utcnow
):
mk_utcnow.return_value = "2019-01-02T05:00:00.000000Z"
expected = {
"_id": "0001-3714-aop",
"created": "2019-01-02T05:00:00.000000Z",
"updated": "2019-01-02T05:00:00.000000Z",
"items": [],
"metadata": {},
"id": "0001-3714-aop",
}
mk_bundle_manifest = Mock()
MockManifestDomainAdapter.return_value = mk_bundle_manifest
session_db = MagicMock()
session_db.journals.fetch.return_value = inserting.ManifestDomainAdapter(
manifest=SAMPLE_KERNEL_JOURNAL
)
inserting.create_aop_bundle(session_db, SAMPLE_KERNEL_JOURNAL["id"])
MockManifestDomainAdapter.assert_any_call(manifest=expected)
session_db.documents_bundles.add.assert_called_once_with(
data=mk_bundle_manifest
)
session_db.changes.add.assert_any_call(
{
"timestamp": "2019-01-02T05:00:00.000000Z",
"entity": "DocumentsBundle",
"id": "0001-3714-aop",
}
)
@patch("documentstore_migracao.processing.inserting.utcnow")
def test_create_aop_bundle_links_aop_bundle_to_journal(self, mk_utcnow):
mk_utcnow.return_value = "2019-01-02T05:00:00.000000Z"
mocked_journal_data = inserting.ManifestDomainAdapter(
manifest=SAMPLE_KERNEL_JOURNAL
)
mk_bundle_manifest = Mock()
session_db = MagicMock()
session_db.journals.fetch.return_value = mocked_journal_data
inserting.create_aop_bundle(session_db, SAMPLE_KERNEL_JOURNAL["id"])
session_db.journals.update.assert_called()
session_db.changes.add.assert_any_call(
{
"timestamp": "2019-01-02T05:00:00.000000Z",
"entity": "Journal",
"id": SAMPLE_KERNEL_JOURNAL["id"],
}
)
self.assertEqual(mocked_journal_data.ahead_of_print_bundle, "0001-3714-aop")
def test_create_aop_bundle_returns_bundle(self):
session_db = Session()
mocked_journal_data = inserting.ManifestDomainAdapter(
manifest=SAMPLE_KERNEL_JOURNAL
)
session_db.journals.add(mocked_journal_data)
result = inserting.create_aop_bundle(session_db, SAMPLE_KERNEL_JOURNAL["id"])
self.assertIsInstance(result, DocumentsBundle)
self.assertEqual(result.id(), "0001-3714-aop")
@patch("documentstore_migracao.processing.inserting.reading.read_json_file")
@patch(
"documentstore_migracao.processing.inserting.link_documents_bundles_with_documents"
)
def test_register_documents_in_documents_bundle(
self, mk_link_documents_bundle_with_documents, mk_read_json_file
):
documents = {
"JwqGdMDrdcV3Z7MFHgtKvVk": {
"acron": "aiss",
"eissn": None,
"issn": "0036-3634",
"number": "04",
"order": "00349",
"pid": "S0021-25712009000400001",
"pissn": "0036-3634",
"supplement": None,
"volume": "45",
"year": "2009",
}
}
journals = [SAMPLES_JOURNAL]
mk_read_json_file.side_effect = [journals, documents]
err_filename = os.path.join(
config.get("ERRORS_PATH"), "insert_documents_in_bundle.err"
)
session_db = Session()
manifest = inserting.ManifestDomainAdapter(SAMPLE_ISSUES_KERNEL[0])
session_db.documents_bundles.add(manifest)
inserting.register_documents_in_documents_bundle(
session_db, "/tmp/documents.json", "/tmp/journals.json"
)
self.assertEqual(os.path.isfile(err_filename), True)
with open(err_filename) as fp:
content = fp.read()
self.assertEqual(content, "0036-3634-2009-v45-n4\n")
@patch("documentstore_migracao.processing.inserting.get_documents_bundle")
@patch("documentstore_migracao.processing.inserting.reading.read_json_file")
@patch("documentstore_migracao.processing.inserting.scielo_ids_generator")
def test_register_documents_in_documents_bundle_scielo_ids_generator(
self, mk_scielo_ids_generator, mk_read_json_file, mk_get_documents_bundle
):
documents = {
"JwqGdMDrdcV3Z7MFHgtKvVk": {
"acron": "aiss",
"eissn": None,
"issn": "0036-3634",
"number": "04",
"order": "00349",
"pid": "S0021-25712009000400001",
"pissn": "0036-3634",
"supplement": None,
"volume": "45",
"year": "2009",
},
"WCDX9F8pMhHDzy3fDYvth9x": {
"acron": "aiss",
"eissn": None,
"issn": "0036-3634",
"order": "00349",
"pid": "S0021-25712009000400007",
"pissn": "0036-3634",
"supplement": None,
"year": "2009",
},
}
journals = [SAMPLES_JOURNAL]
mk_read_json_file.side_effect = [journals, documents]
session_db = Session()
inserting.register_documents_in_documents_bundle(
session_db, "/tmp/documents.json", "/tmp/journals.json"
)
mk_scielo_ids_generator.issue_id.assert_any_call(
"0036-3634", "2009", "45", "04", None
)
mk_scielo_ids_generator.aops_bundle_id.assert_any_call("0036-3634")
@patch("documentstore_migracao.processing.inserting.reading.read_json_file")
@patch("documentstore_migracao.processing.inserting.get_documents_bundle")
def test_register_documents_in_documents_bundle_get_documents_bundle(
self, mk_get_documents_bundle, mk_read_json_file
):
documents = {
"JwqGdMDrdcV3Z7MFHgtKvVk": {
"acron": "aiss",
"eissn": None,
"issn": "0036-3634",
"number": "04",
"order": "00349",
"pid": "S0021-25712009000400001",
"pissn": "0036-3634",
"supplement": None,
"volume": "45",
"year": "2009",
},
"WCDX9F8pMhHDzy3fDYvth9x": {
"acron": "aiss",
"eissn": None,
"issn": "0036-3634",
"order": "00349",
"pid": "S0021-25712009000400007",
"pissn": "0036-3634",
"supplement": None,
"year": "2009",
},
}
journals = [SAMPLES_JOURNAL]
mk_read_json_file.side_effect = [journals, documents]
session_db = Session()
inserting.register_documents_in_documents_bundle(
session_db, "/tmp/documents.json", "/tmp/journals.json"
)
mk_get_documents_bundle.assert_any_call(
session_db, "0036-3634-2009-v45-n4", True, "0036-3634"
)
mk_get_documents_bundle.assert_any_call(
session_db, "0036-3634-aop", False, "0036-3634"
)
@patch(
"documentstore_migracao.processing.inserting.link_documents_bundles_with_documents"
)
@patch("documentstore_migracao.processing.inserting.reading.read_json_file")
@patch("documentstore_migracao.processing.inserting.get_documents_bundle")
def test_register_documents_in_documents_bundle_link_documents_bundles_with_documents(
self,
mk_get_documents_bundle,
mk_read_json_file,
mk_link_documents_bundles_with_documents,
):
documents = {
"JwqGdMDrdcV3Z7MFHgtKvVk": {
"acron": "aiss",
"eissn": None,
"issn": "0036-3634",
"number": "04",
"order": "00349",
"pid": "S0021-25712009000400001",
"pissn": "0036-3634",
"supplement": None,
"volume": "45",
"year": "2009",
"scielo_id": "JwqGdMDrdcV3Z7MFHgtKvVk",
},
"WCDX9F8pMhHDzy3fDYvth9x": {
"acron": "aiss",
"eissn": None,
"issn": "0036-3634",
"order": "00350",
"pid": "S0021-25712009000400007",
"pissn": "0036-3634",
"supplement": None,
"year": "2009",
"scielo_id": "WCDX9F8pMhHDzy3fDYvth9x",
},
}
journals = [SAMPLES_JOURNAL]
mk_read_json_file.side_effect = [journals, documents]
documents_bundle = Mock()
mk_get_documents_bundle.return_value = documents_bundle
session_db = Session()
inserting.register_documents_in_documents_bundle(
session_db, "/tmp/documents.json", "/tmp/journals.json"
)
mk_link_documents_bundles_with_documents.assert_any_call(
documents_bundle,
[{"id": "JwqGdMDrdcV3Z7MFHgtKvVk", "order": "00349"}],
session_db,
)
class TestDocumentManifest(unittest.TestCase):
@patch("documentstore_migracao.object_store.minio.MinioStorage")
def setUp(self, mock_minio_storage):
self.package_path = os.path.join(SAMPLES_PATH, "0034-8910-rsp-47-02-0231")
self.renditions_names = [
"0034-8910-rsp-47-02-0231.pdf",
"0034-8910-rsp-47-02-0231-en.pdf",
]
self.renditions_urls_mock = [
"prefix/0034-8910-rsp-47-02-0231.pdf.pdf",
"prefix/0034-8910-rsp-47-02-0231.pdf-en.pdf",
]
mock_minio_storage.register.side_effect = self.renditions_urls_mock
self.json_manifest = os.path.join(self.package_path, "manifest.json")
with open(self.json_manifest, 'w') as json_file:
json_file.write(
json.dumps({
"pt": "rsp/v47n2/0034-8910-rsp-47-02-0231.pdf",
"en": "rsp/v47n2/0034-8910-rsp-47-02-0231-en.pdf",
})
)
self.renditions = inserting.get_document_renditions(
self.package_path, self.renditions_names, "prefix", mock_minio_storage
)
def tearDown(self):
os.remove(self.json_manifest)
def test_rendition_should_contains_file_name(self):
self.assertEqual("0034-8910-rsp-47-02-0231.pdf", self.renditions[0]["filename"])
self.assertEqual(
"0034-8910-rsp-47-02-0231-en.pdf", self.renditions[1]["filename"]
)
def test_rendition_should_contains_url_link(self):
self.assertEqual(self.renditions_urls_mock[0], self.renditions[0]["url"])
self.assertEqual(self.renditions_urls_mock[1], self.renditions[1]["url"])
def test_rendition_should_contains_size_bytes(self):
self.assertEqual(110104, self.renditions[0]["size_bytes"])
self.assertEqual(106379, self.renditions[1]["size_bytes"])
def test_rendition_should_contains_mimetype(self):
self.assertEqual("application/pdf", self.renditions[0]["mimetype"])
self.assertEqual("application/pdf", self.renditions[1]["mimetype"])
def test_renditon_should_contains_language(self):
self.assertEqual("en", self.renditions[1]["lang"])
def test_rendtion_should_not_contains_language(self):
self.assertEqual("pt", self.renditions[0]["lang"])
class TestDocumentRegister(unittest.TestCase):
def setUp(self):
self.package_path = os.path.join(SAMPLES_PATH, "0034-8910-rsp-47-02-0231")
self.xml_path = os.path.join(self.package_path, "0034-8910-rsp-47-02-0231.xml")
self.xml_etree = loadToXML(self.xml_path)
self.package_files = [
"0034-8910-rsp-47-02-0231-en.pdf",
"0034-8910-rsp-47-02-0231-gf01-en.jpg",
"0034-8910-rsp-47-02-0231-gf01-en.tif",
"0034-8910-rsp-47-02-0231-gf01.jpg",
"0034-8910-rsp-47-02-0231-gf01.tif",
"0034-8910-rsp-47-02-0231.pdf",
"0034-8910-rsp-47-02-0231.xml",
]
self.second_package_path = os.path.join(
SAMPLES_PATH, "0034-8910-rsp-47-02-0403"
)
self.second_xml_path = os.path.join(
self.second_package_path, "0034-8910-rsp-47-02-0403.xml"
)
self.second_xml_etree = loadToXML(self.second_xml_path)
self.second_package_files = [
"0034-8910-rsp-47-02-0403-gf01.jpg",
"0034-8910-rsp-47-02-0403-gf01.tif",
"0034-8910-rsp-47-02-0403.pdf",
"0034-8910-rsp-47-02-0403.xml",
]
self.session = Session()
def test_get_documents_assets_should_return_assets_and_additionals(self):
assets, additionals = get_document_assets_path(
self.xml_etree, self.package_files, self.package_path
)
self.assertEqual(
["0034-8910-rsp-47-02-0231-gf01", "0034-8910-rsp-47-02-0231-gf01-en"],
list(assets.keys()),
)
for additional in additionals.values():
with self.subTest(additional=additional):
self.assertNotIn(".tif", additional)
def test_get_documents_must_prefers_tif_files_instead_jpeg(self):
assets, _ = get_document_assets_path(
self.xml_etree, self.package_files, self.package_path
)
self.assertIn(
"0034-8910-rsp-47-02-0231-gf01.tif", assets["0034-8910-rsp-47-02-0231-gf01"]
)
def test_get_documents_assets_must_contain_additional_files_with_no_prefered_files(
self
):
_, additionals = get_document_assets_path(
self.xml_etree, self.package_files, self.package_path
)
self.assertIn(
"0034-8910-rsp-47-02-0231-gf01.jpg",
additionals["0034-8910-rsp-47-02-0231-gf01"],
)
def test_get_documents_assets_must_contain_additional_files_when_references_is_not_complete(
self
):
_, additionals = get_document_assets_path(
self.second_xml_etree, self.second_package_files, self.second_package_path
)
self.assertIn(
"0034-8910-rsp-47-02-0403-gf01.jpg",
additionals["0034-8910-rsp-47-02-0403-gf01"],
)
def test_get_documents_assets_should_not_return_assets_path(self):
assets, additionals = get_document_assets_path(
self.xml_etree, [], self.package_path
)
self.assertEqual({}, additionals)
self.assertEqual([None, None], list(assets.values()))
@patch("documentstore_migracao.object_store.minio.MinioStorage")
def test_put_assets_into_storage_should_ignore_missing_assets(self, mk_store):
mk_store.register.side_effect = ["http://storage.io/mock-url.pdf"]
assets = {"first-asset": "path-to.pdf", "second-asset": None}
results = put_static_assets_into_storage(assets, "some-prefix", mk_store)
for result in results:
with self.subTest(result=result):
self.assertNotEqual("second-asset", result["asset_id"])
@patch("documentstore_migracao.object_store.minio.MinioStorage")
def test_put_assets_should_raise_exception_when_ignore_missing_is_turned_off(
self, mk_store
):
mk_store.register.side_effect = ["http://storage.io/mock-url.pdf", TypeError]
assets = {"first-asset": "path-to.pdf", "second-asset": None}
with self.assertRaises(TypeError):
put_static_assets_into_storage(
assets, "some-prefix", mk_store, ignore_missing_assets=False
)
@patch("documentstore_migracao.tools.constructor.article_xml_constructor")
@patch("documentstore_migracao.processing.inserting.register_document")
@patch("documentstore_migracao.processing.inserting.os")
@patch("documentstore_migracao.processing.inserting.DocumentsSorter")
@patch("documentstore_migracao.object_store.minio.MinioStorage")
def test_register_documents_should_import_sps_package(
self,
mk_store,
mk_documents_sorter,
mk_os,
mk_register_document,
mk_article_xml_constructor,
):
mk_article_xml_constructor.side_effect = None
mk_os.walk.side_effect = [
[("/root/path/to/folder", "", ["file1", "file2", "file3.xml"])]
]
mk_register_document.side_effect = [["/root/path/to/folder/file3.xml", None]]
inserting.register_documents(
self.session, mk_store, mk_documents_sorter, "/root"
)
mk_article_xml_constructor.assert_called()
mk_register_document.assert_called_with(
"/root/path/to/folder", self.session, mk_store
)
mk_documents_sorter.insert_document.assert_called()
@patch("documentstore_migracao.utils.files.write_file")
@patch("documentstore_migracao.processing.inserting.os")
@patch("documentstore_migracao.processing.inserting.DocumentsSorter")
@patch("documentstore_migracao.object_store.minio.MinioStorage")
def test_register_documents_should_not_find_xml_file(
self, mk_store, mk_documents_sorter, mk_os, mk_write_file
):
mk_os.walk.side_effect = [[("/root/path/to/folder", "", ["file1", "file2"])]]
with self.assertLogs(level="ERROR") as log:
inserting.register_documents(
self.session, mk_store, mk_documents_sorter, "/root"
)
self.assertIn("list index out of range", log[1][0])
mk_write_file.assert_called()
@patch("documentstore_migracao.processing.inserting.register_document")
@patch("documentstore_migracao.processing.inserting.os")
@patch("documentstore_migracao.processing.inserting.DocumentsSorter")
@patch("documentstore_migracao.object_store.minio.MinioStorage")
def test_register_documents_should_not_register_package_if_files_is_not_found(
self, mk_store, mk_documents_sorter, mk_os, mk_register_document
):
mk_os.walk.side_effect = [[("/root/path/to/folder", "", [])]]
inserting.register_documents(
self.session, mk_store, mk_documents_sorter, "/root"
)
mk_register_document.assert_not_called()
``` |
{
"source": "joffilyfe/doi_request",
"score": 2
} |
#### File: doi_request/doi_request/controller.py
```python
import logging
from tasks.celery import registry_dispatcher_document
logger = logging.getLogger(__name__)
class Depositor(object):
def deposit_by_pids(self, pids_list):
"""
Receive a list of pids and collection to registry their dois.
scl
"""
for item in pids_list:
collection, code = item.split('_')
registry_dispatcher_document.delay(code, collection)
logger.info('enqueued deposit for "%s"', item)
```
#### File: doi_request/models/__init__.py
```python
import os
from contextlib import contextmanager
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy import create_engine
DBSession = scoped_session(sessionmaker())
Base = declarative_base()
def create_engine_from_env():
return create_engine(os.environ.get('SQL_ENGINE', 'sqlite:///:memory:'))
def configure_session_engine(engine):
DBSession.configure(bind=engine)
def initialize_sql(engine):
configure_session_engine(engine)
Base.metadata.bind = engine
Base.metadata.create_all(engine)
PlainSession = sessionmaker(bind=create_engine_from_env())
@contextmanager
def transactional_session():
session = PlainSession()
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
``` |
{
"source": "joffilyfe/opac",
"score": 3
} |
#### File: webapp/utils/related_articles_urls.py
```python
from flask import current_app
def related_links(article_url, titles):
return [
(
"Google",
"Similares no",
get_google_results_searched_by_article_titles(titles)[0],
),
(
"Google Scholar",
"Citados e Similares no",
get_scholar_results_searched_by_article_titles(titles)[0],
),
]
def get_google_results_searched_by_article_titles(titles):
"""
Retorna os links do resultado de busca para o Google usando os
títulos do artigo
"""
return [current_app.config.get("OPAC_GOOGLE_LINK") + title for title in titles]
def get_scholar_results_searched_by_article_titles(titles):
"""
Retorna os links do resultado de busca para o Google Scholar usando
os títulos do artigo
"""
return [
current_app.config.get("OPAC_GOOGLE_SCHOLAR_LINK") + title for title in titles
]
``` |
{
"source": "joffilyfe/packtools",
"score": 2
} |
#### File: packtools/packtools/utils.py
```python
from __future__ import unicode_literals
import logging
import functools
import itertools
import os
import glob
import sys
import json
import unicodedata
import zipfile
from lxml import etree, isoschematron
try:
import pygments # NOQA
from pygments.lexers import get_lexer_for_mimetype
from pygments.formatters import TerminalFormatter
except ImportError:
pygments = False # NOQA
from packtools import catalogs
LOGGER = logging.getLogger(__name__)
PY2 = sys.version_info[0] == 2
try:
# available on lxml >= 3.4.0
NOIDS_XMLPARSER = etree.XMLParser(collect_ids=False)
except TypeError:
LOGGER.info('cannot instantiate an XML parser that avoids the collection '
'of ids from elements.')
NOIDS_XMLPARSER = etree.XMLParser()
def setdefault(object, attribute, producer):
"""
Like dict().setdefault but for object attributes.
"""
if not hasattr(object, attribute):
setattr(object, attribute, producer())
return getattr(object, attribute)
def cachedmethod(wrappee):
"""Caches method calls within known arguments.
"""
@functools.wraps(wrappee)
def wrapper(self, *args, **kwargs):
key = (args, tuple(kwargs.items()))
cache_attrname = '__' + wrappee.__name__
cache = setdefault(self, cache_attrname, lambda: {})
if key not in cache:
cache[key] = wrappee(self, *args, **kwargs)
return cache[key]
return wrapper
def get_static_assets(xml_et):
"""Returns an iterable with all static assets referenced by xml_et.
"""
paths = [
'//graphic[@xlink:href]',
'//media[@xlink:href]',
'//inline-graphic[@xlink:href]',
'//supplementary-material[@xlink:href]',
'//inline-supplementary-material[@xlink:href]',
]
iterators = [xml_et.iterfind(path, namespaces={'xlink': 'http://www.w3.org/1999/xlink'})
for path in paths]
elements = itertools.chain(*iterators)
return [element.attrib['{http://www.w3.org/1999/xlink}href'] for element in elements]
def XML(file, no_network=True, load_dtd=True):
"""Parses `file` to produce an etree instance.
The XML can be retrieved given its filesystem path,
an URL or a file-object.
:param file: Path to the XML file, URL or file-object.
:param no_network: (optional) prevent network access for external DTD.
:param load_dtd: (optional) load DTD during parse-time.
"""
parser = etree.XMLParser(remove_blank_text=True,
load_dtd=load_dtd,
no_network=no_network)
xml = etree.parse(file, parser)
return xml
def get_schematron_from_buffer(buff, parser=NOIDS_XMLPARSER):
"""Returns an ``isoschematron.Schematron`` for ``buff``.
The default parser doesn't collect ids on a hash table, i.e.:
``collect_ids=False``.
"""
xmlschema_doc = etree.parse(buff, parser)
return isoschematron.Schematron(xmlschema_doc)
def get_schematron_from_filepath(filepath):
with open(filepath, mode='rb') as buff:
return get_schematron_from_buffer(buff)
def config_xml_catalog(wrapped):
"""Decorator that wraps the execution of a function, setting-up and
tearing-down the ``XML_CATALOG_FILES`` environment variable for the current
process.
.. code-block:: python
@config_xml_catalog
def main(xml_filepath):
xml = XMLValidator(xml_filepath)
# do some work here
"""
@functools.wraps(wrapped)
def wrapper(*args, **kwargs):
try:
os.environ['XML_CATALOG_FILES'] = catalogs.XML_CATALOG
_return = wrapped(*args, **kwargs)
finally:
del(os.environ['XML_CATALOG_FILES'])
return _return
return wrapper
def flatten(paths):
""" Produces absolute path for each path in paths.
Glob expansions are allowed.
:param paths: Collection of paths. A path can be relative, absolute or a
glob expression.
"""
def _inner_generator():
for path in paths:
ylock = True
if not path.startswith(('http:', 'https:')):
# try to expand wildchars and get the absolute path
for fpath in glob.iglob(path):
yield os.path.abspath(fpath).strip()
ylock = False
# args must not be suppressed, even the invalid
if ylock:
yield path.strip()
for path in _inner_generator():
if PY2:
yield path.decode(encoding=sys.getfilesystemencoding())
else:
yield path
def prettify(jsonobj, colorize=True):
""" Serialize and prettify a Python object as JSON.
On windows, bypass pygments colorization.
Function copied from Circus process manager:
https://github.com/circus-tent/circus/blob/master/circus/circusctl.py
"""
json_str = json.dumps(jsonobj, indent=2, sort_keys=True)
if colorize and pygments and not sys.platform.startswith('win'):
LOGGER.info('using pygments to highlight the output')
try:
lexer = get_lexer_for_mimetype("application/json")
return pygments.highlight(json_str, lexer, TerminalFormatter())
except Exception as exc:
LOGGER.exception(exc)
return json_str
def normalize_string(unistr):
"""Return the NFKC form for the unicode string ``unistr``.
The normal form KD (NFKD) will apply the compatibility decomposition, i.e.
replace all compatibility characters with their equivalents, followed by
the canonical composition.
"""
return unicodedata.normalize('NFKC', unistr)
class Xray(object):
"""Zip-file introspector.
:param zip_file: instance of ``zipfile.ZipFile``.
"""
def __init__(self, zip_file):
self._zipfile = zip_file
@classmethod
def fromfile(cls, filepath):
if not zipfile.is_zipfile(filepath):
raise ValueError('cannot read "%s": not a valid zipfile' % filepath)
zip_file = zipfile.ZipFile(filepath, 'r')
return cls(zip_file)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def show_sorted_members(self):
"""Shows the package members sorted by their file extensions.
"""
sorted_members = {}
for member in self.show_members():
_, ext = member.rsplit('.', 1)
ext_node = sorted_members.setdefault(ext.lower(), [])
ext_node.append(member)
return sorted_members
def show_members(self):
"""Shows the package members.
"""
return [filename for fileinfo, filename
in zip(self._zipfile.infolist(), self._zipfile.namelist())
if fileinfo.file_size]
def get_file(self, member, mode='r'):
"""Get file object for member.
A complete list of members can be checked
calling :meth:`show_members`.
:param member: a zip member, e.g. 'foo.xml'
"""
try:
return self._zipfile.open(member, mode)
except KeyError:
raise ValueError('cannot open file "%s": file doesn\'t exist' % member)
def close(self):
"""Close the archive file.
"""
self._zipfile.close()
def resolve_schematron_filepath(value):
"""Determine the filepath for ``value``.
The lookup is run against all known schemas from
:data:`packtools.catalogs.SCH_SCHEMAS`. If ``value`` is already a filepath,
than it is returned as it is.
"""
try:
lookup_builtin = value.startswith('@')
except AttributeError as exc:
# the `from` clause cannot be used due to compatibility with python 2.
raise TypeError('invalid input type for text string: "value"')
if lookup_builtin:
path = catalogs.SCH_SCHEMAS.get(value[1:])
if path:
return path
else:
raise ValueError('cannot resolve schematron "%s"' % value)
elif os.path.lexists(value):
return value
else:
raise ValueError('could not locate file "%s" (I/O failure)' % value)
``` |
{
"source": "Joffref/neutron",
"score": 2
} |
#### File: neutron/db/port_vlan_db.py
```python
from neutron_lib.api.definitions import port_vlan as pv
from neutron.objects.port.extensions import port_vlan as pv_obj
class PortVlanMixin(object):
"""Mixin class to add vlan_number (Cyborg) to a port"""
def _process_create_port(self, context, data, result):
if not data.get(pv.VLANIDNUMBER):
result[pv.VLANIDNUMBER] = None
return
obj = pv_obj.PortVlan(
context, port_id=result['id'],
vlan_number=data[pv.VLANIDNUMBER])
obj.create()
result[pv.VLANIDNUMBER] = data[pv.VLANIDNUMBER]
def _extend_port_dict(self, port_db, result):
if port_db.vlan_number:
result[pv.VLANIDNUMBER] = port_db.vlan_number.vlan_number
else:
result[pv.VLANIDNUMBER] = None
``` |
{
"source": "Joffreybvn/alan-discord",
"score": 3
} |
#### File: src/attendance/message.py
```python
from datetime import datetime
from typing import Tuple
from pytz import timezone
from typing import Union
from discord import Client, TextChannel, DMChannel, User, Embed, Message, PartialMessage
from discord.errors import Forbidden
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from config import config
from src.database import Database
from src.utils import Emoji, mention_to_id
from src.attendance.enums import Periods
# Create the scheduler and set the timezone.
scheduler = AsyncIOScheduler()
tz = timezone('Europe/Brussels')
class AttendanceMessage:
def __init__(self, client: Client, database: Database, period: Periods,
display_time: tuple, hide_time: tuple, display_days: str = "mon, tue, wed, thu, fri"):
# Discord client
self.client = client
self.database = database
# Schedule values
self.time: str = period.value
self.days: str = display_days
self.display_time: Tuple[int, int] = display_time
self.hide_time: Tuple[int, int] = hide_time
# Message text
self.text = "C'est le moment de **pointer** sur My BeCode !"
# Schedule the message
self.schedule()
@staticmethod
def get_embed(hide_hour: int, hide_minute: int) -> Embed:
embed = Embed(
title="My BeCode",
description=f"""
In Attendance We Trust ! Pointez maintenant sur [my.becode.org]({config.MY_BECODE_URL}).
Ou cliquez directement sur l'une des réactions ci-dessous.""",
url=config.MY_BECODE_URL,
colour=5747135
)
embed.set_thumbnail(url="https://i.imgur.com/ixU2HdV.gif") # https://i.imgur.com/cg4xd66.png
embed.set_footer(text=f"This message will disappear @ {hide_hour}:{hide_minute:02d}")
return embed
async def __send_attendance_message(self, context: Union[TextChannel, User],
hide_hour: int, hide_minute: int) -> Union[Message, bool]:
"""
Send an attendance message into a TextChannel or in Direct Message
to a user, and append the reactions. Return the id of the send message.
"""
try:
# Send the message
message: Message = await context.send(self.text, embed=self.get_embed(hide_hour, hide_minute))
# Append the reactions
await message.add_reaction(emoji=Emoji.HOUSE.value)
await message.add_reaction(emoji=Emoji.CITY.value)
except Forbidden as error:
print(f"""[!] Forbidden - Unable to send attendance message to {context}. Maybe the user is not on the same server, or has disabled Direct Message.""")
return False
except AttributeError as error:
print(f"""[!] AttributeError - Unable to send attendance message to {context}. Maybe the bot is not connected and has no access to this server.""")
return False
else:
return message
def schedule(self):
display_hour, display_minute = self.display_time
hide_hour, hide_minute = self.hide_time
# Display
@scheduler.scheduled_job('cron', day_of_week=self.days, hour=display_hour, minute=display_minute, timezone=tz)
async def display():
nonlocal self
for channel_id in self.database.get_channels('attendance'):
channel: TextChannel = self.client.get_channel(channel_id)
# Send the message with reactions
if message := await self.__send_attendance_message(channel, hide_hour, hide_minute):
# Save the message to later detect clicks ont reactions
config.ATTENDANCE_MESSAGES.add(self.time, channel_id, message.id)
# Log: job triggered
print(f"[i] Display server attendance - Triggered @ {datetime.now()}")
for user_id in self.database.get_pingable_users():
user: User = await self.client.fetch_user(mention_to_id(user_id))
# Send the message with reactions
if message := await self.__send_attendance_message(user, hide_hour, hide_minute):
channel: DMChannel = message.channel
# Save the message to later detect clicks ont reactions
config.ATTENDANCE_MESSAGES.add(self.time, channel.id, message.id)
# Log: job triggered
print(f"[i] Display direct message attendance - Triggered @ {datetime.now()}")
# Hide
@scheduler.scheduled_job('cron', day_of_week=self.days, hour=hide_hour, minute=hide_minute, timezone=tz)
async def hide():
nonlocal self
# Get each channel and message to delete, and delete them
for channel_id, message_id in config.ATTENDANCE_MESSAGES.get(self.time).items():
channel: Union[TextChannel, DMChannel] = self.client.get_channel(channel_id)
message: PartialMessage = channel.get_partial_message(message_id)
await message.delete()
# Clean the attendance message dict
config.ATTENDANCE_MESSAGES.empty(self.time)
# Job registered
print(f"[+] Attendance jobs scheduled: {self.days} @ {display_hour}h{display_minute}")
@staticmethod
def start():
"""Start all schedulers."""
scheduler.start()
```
#### File: src/attendance/request.py
```python
import aiohttp
from typing import Tuple, Union
from src.attendance.enums import Locations, Periods
# URL = "https://postman-echo.com/post"
URL = "https://graph.becode.org/"
class AttendanceRequest:
def __init__(self, period: str, at_home: Locations, token: str):
self.period: str = Periods(period).name
self.at_home: bool = at_home.value[1]
self.token: str = token
def get_json(self) -> dict:
return {
"operationName": "record_attendance_time",
"variables": {
"period": self.period,
"atHome": self.at_home
},
"extensions": {
"persistedQuery": {
"version": 1,
"sha256Hash": "553ae433516c13a97e348d4a48dd0114d1949f791ab21d97bed27030a65e85a8"
}
}
}
async def send(self) -> Tuple[bool, Union[dict, None]]:
headers = {"Authorization": f"Bearer {self.token}"}
async with aiohttp.ClientSession(headers=headers) as session:
async with session.post(URL, json=self.get_json()) as response:
if response.status == 200:
body = await response.json()
try:
if body['data']['recordAttendanceTime']:
return True, None
else:
return False, body
except Exception as error:
return False, body
elif body := await response.json():
return False, body
else:
return False, {'status': response.status}
return False, None
```
#### File: src/cogs/attendance.py
```python
from discord import User, RawReactionActionEvent, PartialEmoji
from discord.ext import commands
from discord.ext.commands import Bot
from discord.ext.commands.context import Context
from config import config
from src.database import Database
from src.attendance.enums import Locations
from src.attendance import AttendanceRequest
from src.utils import Emoji
class AttendanceCog(commands.Cog):
def __init__(self, bot: Bot, database: Database):
self.bot: Bot = bot
self.db: Database = database
@commands.command(name="token", pass_context=True)
async def add_token(self, context: Context, token: str):
"""Add the the given token to the database."""
# Save the action to the history
self.bot.history[context.message.id] = True
# Get the author
mention: str = context.message.author.mention
# Check if the token is provided
if len(token) > 1:
# Update the database
self.db.upsert_user(user_id=mention, becode_token=token)
await context.send(f"{mention}, your token has been added")
else:
await context.send(f"{mention}, your token is not valid")
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload: RawReactionActionEvent):
"""Event triggered when a user click a reaction to send an attendance to Becode."""
# Get the user object
user: User = await self.bot.fetch_user(payload.user_id)
# If the reaction is added by a human
if not user.bot:
# Get all attendance message posted
messages = config.ATTENDANCE_MESSAGES.get_by_messages()
# Get the emoji and message id
emoji: PartialEmoji = payload.emoji
message_id: int = payload.message_id
# Check when a user add a reaction
if message_id in messages.keys():
location = None
# Emoji: House
if str(emoji == Emoji.HOUSE.value):
location = Locations.HOME
# Emoji: City
elif str(emoji == Emoji.CITY.value):
location = Locations.BECODE
if location:
print("[!] User added reaction.")
# Get the mention
mention: str = user.mention
# Retrieve the token and check if it's not None
if token := self.db.get_token(mention):
# Send an attendance request to Becode
status, message = await AttendanceRequest(messages[message_id], location, token).send()
if status:
print(f"[!] Attendance was correctly send for {mention}.")
await user.send(f"{mention} J'ai bien pointé pour toi sur Becode !")
else:
print(f"[!] Attendance was NOT correctly send for {mention}.")
await user.send(f"{mention} OUPS ! Une **erreur** s'est produite: Ton token est probablement expiré. Passe par https://my.becode.org pour pointer.")
if message:
await user.send(str(message))
else:
print(f"[!] Missing token for {mention}.")
await user.send(f"{mention} OUPS ! Une **erreur** s'est produite: Je n'ai pas trouvé ton token... Ajoute un token avec la commande **!token**.")
``` |
{
"source": "Joffreybvn/backdriveb2",
"score": 3
} |
#### File: api/handlers/accounts_handler.py
```python
from b2sdk.v1 import Bucket as B2Bucket
import uuid
import json
from typing import List, Dict, Tuple
from ..objects import Account
class AccountsHandler:
def __init__(self):
self.accounts: Dict[str, Account] = {}
self._load_accounts()
self._connect_accounts()
def update(self, name: str, key: str = None, key_id: str = None) -> Tuple[bool, str]:
"""Update or create the account with the given index."""
# Update the account
try:
self.accounts[name].credentials = [key, key_id]
self._save_accounts()
# If the account doesn't exist yet, create it
except (KeyError, TypeError) as error:
name = uuid.uuid4().hex.upper()[0:6]
self.accounts[name] = Account(name, [key, key_id])
# Save to disk
self._save_accounts()
# Try to connect
return self._connect(name), name
def get_accounts(self) -> List[list]:
"""Return a list of all accounts."""
accounts = []
for account in self.accounts.values():
accounts.append([account.name] + account.credentials)
return accounts
def get_buckets(self) -> List[B2Bucket]:
"""Return a list of all buckets of all accounts."""
buckets = []
for account in self.accounts.values():
buckets += account.buckets
return buckets
# Account connection
# -------------------------------------------------------------------------
def _connect(self, account: str) -> bool:
"""Connect the given account to BackBlaze."""
return self.accounts[account].connect()
def _connect_accounts(self):
"""Connect all accounts to BackBlaze."""
for account in self.accounts.keys():
self._connect(account)
# Load / Save account
# -------------------------------------------------------------------------
def _load_accounts(self) -> None:
"""Load all accounts from the disk."""
# Load the accounts from disk
try:
with open('./accounts.json') as handler:
credentials = json.load(handler)
# If not exists, create a new dictionary
except FileNotFoundError as error:
print("ERROR: Could not load accounts.json. Create an empty one.")
credentials = self._generate_credential()
# Create Account objects
for account_name, keys in credentials["accounts"].items():
self.accounts[account_name] = Account(account_name, keys)
def _save_accounts(self) -> None:
"""Save accounts to the disk"""
credentials = self._generate_credential()
# Populate the credential dictionary
for account in self.accounts.values():
credentials["accounts"][account.name] = account.credentials
# Save it to disk as JSON
with open('./accounts.json', 'w') as handler:
json.dump(credentials, handler)
@staticmethod
def _generate_credential() -> dict:
"""Generate an empty credential dictionary."""
return {
"accounts": {}
}
```
#### File: api/handlers/buckets_handler.py
```python
from b2sdk.v1 import Bucket as B2Bucket
from typing import List, Dict
from ..objects import Bucket
class BucketsHandler:
def __init__(self, buckets: List[B2Bucket]):
self.buckets: Dict[str, Bucket] = {}
self._load_buckets(buckets)
def get_buckets(self) -> List[str]:
"""Return a list of bucket names."""
return list(self.buckets.keys())
def get_files(self, bucket_name: str, folder: str = "", reload: bool = False):
"""Return the file list for the given bucket name."""
return self.buckets[bucket_name].get_files(folder, reload)
def download_file(self, bucket_name: str, file_name: str):
self.buckets[bucket_name].download_file(file_name)
def upload_file(self, bucket_name: str, local_file_name: str, bucket_directory: str):
self.buckets[bucket_name].upload_file(local_file_name, bucket_directory)
def _load_buckets(self, buckets: List[B2Bucket]):
"""Create the bucket object."""
for bucket in buckets:
self.buckets[bucket.name] = Bucket(bucket)
``` |
{
"source": "Joffreybvn/botbuilder-discord",
"score": 3
} |
#### File: client/listener/listener.py
```python
import threading
from http import HTTPStatus
from flask import Flask, request, Response
from flask.views import MethodView
from botbuilder_discord.client import response_queue
class Listener(threading.Thread):
def __init__(self, host: str = "127.0.0.1", port: int = 5789):
super().__init__()
self.host = host
self.port = port
# Create Flask server
self.app = Flask("Flask")
# Add routes
self.app.add_url_rule(
'/v3/conversations/<conversation_id>/activities/<activity_id>',
view_func=Conversation.as_view('conversation'),
methods=['POST']
)
# self.app.add_url_rule('/{tail:.*}', view_func=CatchAll.as_view('catch_all'), methods=['POST'])
def run(self):
self.app.run(host=self.host, port=self.port, debug=False, use_reloader=False)
class Conversation(MethodView):
def post(self, conversation_id: str, activity_id: str):
"""
:param conversation_id: The id of the conversation. This id is also
the discord id of the user involved into the conversation.
:param activity_id: The unique id of the message, created when the
user send its message to the bot, and passed back to the listener.
:return:
"""
# Check if the data send is a JSON
if "application/json" in request.headers["Content-Type"]:
# Get the message
body = request.json or {}
text = body.get('text', None)
# Set the data into the queue
response_queue.append(activity_id, {
'message_id': activity_id,
'user_id': conversation_id,
'text': text
})
# If no JSON is send, return "Unsupported media type"
else:
return Response(status=HTTPStatus.UNSUPPORTED_MEDIA_TYPE)
# Return HTTP 200 OK
return Response(status=HTTPStatus.OK)
```
#### File: client/sender/start_conversation_message.py
```python
from . import BaseMessage
class StartConversationMessage(BaseMessage):
def create_request_body(self) -> dict:
activity_id = self.options.get('activity_id')
user_id = self.options.get('user_id')
return {
"type": "conversationUpdate",
"serviceUrl": f"http://{self.listener_host}:{self.listener_port}",
"id": activity_id,
"from": {
"id": f"bot_{user_id}",
},
"conversation": {
"id": user_id,
},
"recipient": {
"id": user_id,
},
"membersAdded": [
{
"id": f"bot_{user_id}",
},
{
"id": user_id,
}
],
"membersRemoved": []
}
``` |
{
"source": "Joffreybvn/jobgazing",
"score": 3
} |
#### File: jobgazing/jobgazing/database.py
```python
from typing import Union
from . import Job, Enterprise
class Database:
def __init__(self):
pass
def get_enterprise(self, job_type: Union[Job.AI, Job.WEB]):
return Enterprise(
name='Radix',
locations=['Brussels'],
website='https://radix.ai'
)
```
#### File: jobgazing/jobgazing/models.py
```python
import json
from enum import Enum
class Job(str, Enum):
AI = 'ai',
WEB = 'web'
class Enterprise:
"""Enterprise data model."""
# Prevent any other attributes from being created.
__slots__ = ('name', 'locations', 'website')
def __init__(self, **kwargs):
"""
Create a class to store enterprise data.
Parameters
----------
name : str
Name of the enterprise.
locations : list
List of the locations where the enterprise is situated at.
website : str
Website's URL of the enterprise.
"""
# Use super to set around __setattr__ definition.
for parameter, value in kwargs.items():
if parameter in self.__slots__:
super(Enterprise, self).__setattr__(parameter, value)
def __setattr__(self, obj, value):
"""Override setattr. Prevent user from modifying this class' attributes"""
raise AttributeError('Enterprise is an immutable object.')
def __str__(self) -> str:
"""Customize stdout when called with print()."""
locations: str = ','.join(self.locations)
return f'Enterprise: {self.name}, locations: {locations} - {self.website}'
def __dict__(self) -> dict:
"""
Implement the dictionary representation of this class.
"""
# By default, all classes implement the __dict__ class variable, but
# the custom declared __slot__ class variable remove it. Thus, we
# re-implement a custom __dict__() class method.
return {key: getattr(self, key) for key in self.__slots__}
def __iter__(self):
"""
Return this object's content as Iterator of tuple(key, value).
Allow this object to be called with dict() and list().
"""
for key in self.dict():
yield key, getattr(self, key)
def __getitem__(self, key: str):
"""
Implement the behavior of a dictionary to this object.
Return the attribute's value corresponding to the given key.
"""
if isinstance(key, str):
if key in self.__slots__:
return getattr(self, key)
# Raise KeyError when a missing attributes is passed.
raise KeyError(f'Key must be one of {self.__slots__}, not {key}')
# Raise a TypeError if the given key is not a string.
else:
raise TypeError(f'Key must be str, not {type(key).__name__}')
def __len__(self) -> int:
"""Return the amount of this object's attributes."""
return len(self.__slots__)
def dict(self) -> dict:
"""Return a dictionary representation of this object."""
return self.__dict__()
def json(self, indent: int = 4) -> str:
"""Return a JSON-formatted representation of this object."""
return json.dumps(self.dict(), indent=indent)
def resume(self) -> dict:
"""Return a dictionary of this object's name, locations and website."""
content = self.dict()
keys_to_filter = ('name', 'locations', 'website')
return {key: content[key] for key in keys_to_filter if key in content}
``` |
{
"source": "Joffreybvn/lidario",
"score": 3
} |
#### File: lidario/lidario/translator.py
```python
import numpy as np
from lidario.io import InputHandler, OutputHandler
class Translator:
"""
Instantiate a Translator object which will handle the translation between
given input and desired output type.
:param input_type: Type of raster data provided: "**geotiff**" or "**mask**".
- "geotiff": a .tif raster file.
- "mask", a *rasterio.mask.mask()* result.
:param output_type: Type of point cloud data to return: "**csv**",
"**numpy**", "**pandas**", "**dictionary**", "**list**", "**tuple**".
- "csv": a CSV file.
- "ply": a .ply file.
- "numpy": a Numpy array. Alternatives: "np", "array".
- "dataframe": A Pandas dataframe: Alternatives: "pandas", "pd", "df".
- "dictionary": A pure Python dictionary: Alternative: "dict".
- "list" a pure Python list.
- "tuple": a pure Python tuple.
:param affine_transform: If True (default), apply an affine
geo-transformation to the translated coordinates.
:param metadata: If True, the "translate" method will return a tuple
with the point cloud and the metadata. If False (default), it will
only return the point cloud.
:type input_type: str
:type output_type: str
:type affine_transform: bool, optional
:type metadata: bool, optional
"""
def __init__(self, input_type, output_type, affine_transform=True, metadata=False):
# Handle the input and output files/objects
self.input_handler = InputHandler(input_type)
self.output_handler = OutputHandler(output_type)
# True point cloud has to be geo-transformed
self.affine_transform = affine_transform
self.return_metadata = metadata
def translate(self, input_values, out_file="output1.csv", out_format="binary",
no_data=None, decimal=None, transpose=False, band=1):
"""
Translate a given "input_values" into a X, Y, Z point cloud.
:param input_values: Data values to translate. Depend on the
Translator's "input_type" parameter:
- For a "**geotiff**": Takes the path to your .tif file (string).
- For a "**mask**": Takes the np.array returned by a rasterio.mask.mask() method.
:param out_file: Name of the file to save the point cloud.
Used only if the Translator's "output_type" is a file type: "csv", "ply".
Optional, default: "output.csv".
:param out_format: Data format to save the file: "**binary**" (default)
or "**ascii**" (not recommended, may be slow). Used only when "ply"
is specified as "output_type". Optional.
:param no_data: Value to exclude from the translation.
- For a "**geotiff**": By default, use the nodata value stored in the tif file. If this value is missing, use -9999.
- For a "**mask**": By default, use -9999.
:param band: Band of the raster to translate. Used only if Translator's
"input_values" is "geotiff". Default: 1.
:param decimal: Round the coordinate numbers to the given decimal.
Default: None.
:param transpose: If True, transpose the coordinates. Default: False.
:type input_values: str or np.array
:type out_file: str, optional
:param out_format: str, optional
:type no_data: int, optional
:type decimal: int, optional
:type transpose: bool, optional
:type band: bool, optional
:return: The translated point cloud, typed as specified. If
Translator's "output_type" is set to "csv", return None instead
and save the CSV file. If Translator's "metadata" is set to True,
return a tuple with the point cloud and the metadata.
"""
# Load the raster and metadata
raster, metadata = self.input_handler.load(True, input_values, band)
if no_data is None:
no_data = metadata['nodata']
# Create a (x, y, z) point cloud from raster data
x, y, z = self.__create_xyz_points(raster, no_data)
# Geo-transform the coordinates
if self.affine_transform:
x, y = self.__affine_geo_transformation(x, y, metadata['transform'])
# Round the numbers
if decimal is not None:
x, y, z = self.__round(x, y, z, decimal)
# Save the point cloud
point_cloud = self.output_handler.save(x, y, z, out_file, out_format, transpose)
# If self.return_metadata is True, return the metadata
if self.return_metadata:
return point_cloud, metadata
# If not, return only the point cloud
return point_cloud
@staticmethod
def __create_xyz_points(raster, no_data=-9999):
"""
Infer x, y, z points from raster data.
:param raster: Raster data as numpy array.
:param no_data: No data value of the raster.
:type raster: np.array
:type no_data: int
:return: Tuple of np.array containing the point cloud: (x, y, z).
:rtype tuple
"""
y, x = np.where(raster != no_data)
z = np.extract(raster != no_data, raster)
return x, y, z
@staticmethod
def __affine_geo_transformation(x, y, gtr):
"""
Create affine geo-transformed x and y.
An affine transformation preserves collinearity and ratios of
distances. It replace the point cloud into their original
space of coordinates.
:param x: X-array of coordinates.
:param y: Y-array of coordinates.
:param gtr: Affine geo-transformation data.
:return: gtr_x, gtr_y, the geo-transformed x and y, as np.array.
:rtype tuple
"""
# https://gdal.org/user/raster_data_model.html#affine-geotransform
# Affine transformation rewritten for rasterio:
gtr_x = gtr[2] + (x + 0.5) * gtr[0] + (y + 0.5) * gtr[1]
gtr_y = gtr[5] + (x + 0.5) * gtr[3] + (y + 0.5) * gtr[4]
return gtr_x, gtr_y
@staticmethod
def __round(x, y, z, decimal):
return np.around(x, decimal),\
np.around(y, decimal),\
np.around(z, decimal)
``` |
{
"source": "Joffreybvn/resa-chatbot",
"score": 3
} |
#### File: nlu/preprocessing/tokenizer.py
```python
from transformers import BertTokenizer
from config import Config
config = Config()
class Tokenizer:
def __init__(self):
self.tokenizer = BertTokenizer.from_pretrained(config.MODEL_CLASSIFIER)
def get_dataset(self, text: str):
"""Return a torch Dataset from a given text."""
# Tokenize the text and return it
return self.tokenizer(text, return_tensors="pt")
``` |
{
"source": "Joffreybvn/snake-wars",
"score": 3
} |
#### File: Joffreybvn/snake-wars/main.py
```python
import numpy as np
from scipy.spatial import distance
import random
from snake_wars.server import Server, SynchronisedServer
from snake_wars.client import Client
from snake_wars.commons import Direction, GameState
from snake_wars.learn import Agent
def start_solo():
"""Local solo starting script"""
Server(slots=2).start()
for i in range(2):
Client(580, 580).start()
def start_online():
"""
Online starting script. Change the IP and port to connect
to your desired server.
"""
Client(580, 580, ip='172.16.31.10', port=5081).start()
class Reinforcement(Agent):
def __init__(self):
super().__init__()
def train(self):
self.new_game()
while True:
self.play_step(random.choice(list(Direction)))
state = self.get_state()
def get_state(self):
"""Return the current state of the game as Numpy array of 0 and 1."""
# Get the raw game state from the Agent
state: GameState = super().get_state()
# Find nearest food
food = None
if state.foods.any():
# Math the city-block distance between head and all foods
head = np.array([state.head], dtype=int)
distances = distance.cdist(state.foods, head, metric='cityblock')
# Save the nearest food
nearest_indexes = np.where(distances == np.amin(distances))[0]
food = state.foods[nearest_indexes[0]]
# Check if there's possible collisions with the surroundings
is_collision = {
'left': self.is_collision(state.surroundings['left']), # left cell
'right': self.is_collision(state.surroundings['right']), # right cell
'up': self.is_collision(state.surroundings['up']), # top cell
'down': self.is_collision(state.surroundings['down']) # bottom cell
}
# Return a binary array of the game state
return np.array([
# Danger straight
(state.direction['left'] and is_collision['left']) or # Goes left and danger left
(state.direction['right'] and is_collision['right']) or # Goes right and danger right
(state.direction['up'] and is_collision['up']) or # Goes up and danger up
(state.direction['down'] and is_collision['down']), # Goes down and danger down
# Danger left
(state.direction['left'] and is_collision['down']) or # Goes left and danger down
(state.direction['right'] and is_collision['up']) or # Goes right and danger up
(state.direction['up'] and is_collision['left']) or # Goes up and danger left
(state.direction['down'] and is_collision['right']), # Goes down and danger right
# Danger right
(state.direction['left'] and is_collision['up']) or # Goes left and danger up
(state.direction['right'] and is_collision['down']) or # Goes right and danger down
(state.direction['up'] and is_collision['right']) or # Goes up and danger right
(state.direction['down'] and is_collision['left']), # Goes down and danger left
# Move direction
*state.direction.values(),
# Food location
food is not None and food[0] < state.head[0], # Food is on the left
food is not None and food[0] > state.head[0], # Food is on the right
food is not None and food[1] < state.head[1], # Food is up
food is not None and food[1] > state.head[1], # Food is down
], dtype=int)
def start_train():
SynchronisedServer(slots=1).start()
Reinforcement().start()
if __name__ == '__main__':
# start_solo()
# start_online()
start_train()
```
#### File: snake_wars/client/sync_client.py
```python
from itertools import chain
from typing import Dict
import numpy as np
from snake_wars.client import Client
from snake_wars.client.entities import Snake
from snake_wars.commons import Direction, Location, GameState
class SynchronizedClient(Client):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def run(self):
while not self.lobby_loop():
pass
def play_step(self, direction: Direction):
"""Perform a game loop. Refresh the game from the server."""
# Send the direction
self.send_state(direction)
self.pump()
# Display the game
self.renderer.render(self.snakes.values(), self.foods)
def get_state(self) -> GameState:
"""
Create and return a dictionary with the game state,
from the point of view of this client's snake.
"""
# Get client's own snake
snake: Snake = self.snakes[self.id]
head: Location = snake.get_head_position()
# Get cells (x, y) location surrounding the head
surroundings: Dict[str, tuple] = {
'left': ((head.x - 1) % self.grid_size.width, head.y), # left
'right': ((head.x + 1) % self.grid_size.width, head.y), # right
'up': (head.x, (head.y - 1) % self.grid_size.height), # up
'down': (head.x, (head.y + 1) % self.grid_size.height) # down
}
# Create a boolean list with the direction of the snake
direction: Dict[str, bool] = {
'left': snake.direction == Direction.LEFT.value, # left
'right': snake.direction == Direction.RIGHT.value, # right
'up': snake.direction == Direction.UP.value, # up
'down': snake.direction == Direction.DOWN.value # down
}
# Get vectors of all foods coordinates and head coordinate
foods = np.array(list(self.foods.keys()), dtype=int)
# Return the data
return GameState(head.tuple(), surroundings, direction, foods)
def is_collision(self, location: tuple) -> bool:
"""
Return True if there's a collision between the given location
and any snake on the grid.
"""
# Iterate over all positions of all snakes
for pos in chain(snake.get_all_raw_positions() for snake in self.snakes.values()):
# If there's a collision, return True
if pos == location:
return True
return False
```
#### File: snake_wars/server/player.py
```python
import random
from typing import Union
from PodSixNet.Channel import Channel
from snake_wars.server.entities import Snake
from snake_wars.commons import RandomLocation, Size
class Player(Channel):
"""Player socket representation on the server."""
def __init__(self, *args, **kwargs):
# Create a random id
self.id = random.randint(0, 10000)
self.snake: Union[Snake, None] = None
# The client step - Used to check synchronization
self.step = 0
super().__init__(*args, **kwargs)
def create_snake(self):
"""Create a Snake at a random location in the game grid."""
# Create a random spawn location for the Snake
grid_size: Size = self._server.grid_size
location = RandomLocation(grid_size.width, grid_size.height)
# Instantiate a new Snake
self.snake = Snake(location, grid_size)
def move(self):
"""Move its Snake and return its positions."""
# Move the snake
self.snake.move()
# If the snake is still alive, return its positions
if not self.snake.death:
return list(self.snake.get_all_raw_positions())
# If not, return an empty list
return []
# NETWORK related functions
# -------------------------------------------------------------------------
def Network(self, data):
pass
def Network_state(self, data):
"""
Triggered when the client send its direction inputs.
(turn left, right, top, bottom).
"""
message = data['message']
self.snake.turn(message["direction"])
self.step = message['step']
def Network_step(self, data):
"""
Triggered when the client send its step.
"""
self.step = data['message']
def Network_disconnect(self, data):
"""
Triggered when the client disconnect.
"""
# Remove the client from the server
self._server.disconnect_player(self, self.id)
``` |
{
"source": "Joffrey-Liu/Dilation-FCN",
"score": 3
} |
#### File: Joffrey-Liu/Dilation-FCN/model.py
```python
import tensorflow as tf
def dilation_model_pretrained(dataset, input_tensor, w_pretrained, trainable):
def conv(name, input, strides, padding, add_bias, apply_relu, atrous_rate=None):
"""
Helper function for loading convolution weights from weight dictionary.
"""
with tf.variable_scope(name):
# Load kernel weights and apply convolution
w_kernel = w_pretrained[name + '/kernel:0']
w_kernel = tf.Variable(initial_value=w_kernel, trainable=trainable)
if not atrous_rate:
conv_out = tf.nn.conv2d(input, w_kernel, strides, padding)
else:
conv_out = tf.nn.atrous_conv2d(input, w_kernel, atrous_rate, padding)
if add_bias:
# Load bias values and add them to conv output
w_bias = w_pretrained[name + '/bias:0']
w_bias = tf.Variable(initial_value=w_bias, trainable=trainable)
conv_out = tf.nn.bias_add(conv_out, w_bias)
if apply_relu:
# Apply ReLu nonlinearity
conv_out = tf.nn.relu(conv_out)
return conv_out
# Sanity check on dataset name
if dataset not in ['cityscapes', 'camvid']:
raise ValueError('Dataset "{}" not supported.'.format(dataset))
# Start building the model
else:
h = conv('conv1_1', input_tensor, strides=[1, 1, 1, 1], padding='VALID', add_bias=True, apply_relu=True)
h = conv('conv1_2', h, strides=[1, 1, 1, 1], padding='VALID', add_bias=True, apply_relu=True)
h = tf.layers.max_pooling2d(h, pool_size=(2, 2), strides=(2, 2), padding='valid', name='pool1')
h = conv('conv2_1', h, strides=[1, 1, 1, 1], padding='VALID', add_bias=True, apply_relu=True)
h = conv('conv2_2', h, strides=[1, 1, 1, 1], padding='VALID', add_bias=True, apply_relu=True)
h = tf.layers.max_pooling2d(h, pool_size=(2, 2), strides=(2, 2), padding='valid', name='pool2')
h = conv('conv3_1', h, strides=[1, 1, 1, 1], padding='VALID', add_bias=True, apply_relu=True)
h = conv('conv3_2', h, strides=[1, 1, 1, 1], padding='VALID', add_bias=True, apply_relu=True)
h = conv('conv3_3', h, strides=[1, 1, 1, 1], padding='VALID', add_bias=True, apply_relu=True)
h = tf.layers.max_pooling2d(h, pool_size=(2, 2), strides=(2, 2), padding='valid', name='pool3')
h = conv('conv4_1', h, strides=[1, 1, 1, 1], padding='VALID', add_bias=True, apply_relu=True)
h = conv('conv4_2', h, strides=[1, 1, 1, 1], padding='VALID', add_bias=True, apply_relu=True)
h = conv('conv4_3', h, strides=[1, 1, 1, 1], padding='VALID', add_bias=True, apply_relu=True)
h = conv('conv5_1', h, strides=[1, 1, 1, 1], padding='VALID', add_bias=True, apply_relu=True, atrous_rate=2)
h = conv('conv5_2', h, strides=[1, 1, 1, 1], padding='VALID', add_bias=True, apply_relu=True, atrous_rate=2)
h = conv('conv5_3', h, strides=[1, 1, 1, 1], padding='VALID', add_bias=True, apply_relu=True, atrous_rate=2)
h = conv('fc6', h, strides=[1, 1, 1, 1], padding='VALID', add_bias=True, apply_relu=True, atrous_rate=4)
h = tf.layers.dropout(h, rate=0.5, name='drop6')
h = conv('fc7', h, strides=[1, 1, 1, 1], padding='VALID', add_bias=True, apply_relu=True)
h = tf.layers.dropout(h, rate=0.5, name='drop7')
h = conv('final', h, strides=[1, 1, 1, 1], padding='VALID', add_bias=True, apply_relu=True)
h = tf.pad(h, [[0, 0], [1, 1], [1, 1], [0, 0]], mode='CONSTANT', name='ctx_pad1_1')
h = conv('ctx_conv1_1', h, strides=[1, 1, 1, 1], padding='VALID', add_bias=True, apply_relu=True)
h = tf.pad(h, [[0, 0], [1, 1], [1, 1], [0, 0]], mode='CONSTANT', name='ctx_pad1_2')
h = conv('ctx_conv1_2', h, strides=[1, 1, 1, 1], padding='VALID', add_bias=True, apply_relu=True)
h = tf.pad(h, [[0, 0], [2, 2], [2, 2], [0, 0]], mode='CONSTANT', name='ctx_pad2_1')
h = conv('ctx_conv2_1', h, strides=[1, 1, 1, 1], padding='VALID', add_bias=True, apply_relu=True, atrous_rate=2)
h = tf.pad(h, [[0, 0], [4, 4], [4, 4], [0, 0]], mode='CONSTANT', name='ctx_pad3_1')
h = conv('ctx_conv3_1', h, strides=[1, 1, 1, 1], padding='VALID', add_bias=True, apply_relu=True, atrous_rate=4)
h = tf.pad(h, [[0, 0], [8, 8], [8, 8], [0, 0]], mode='CONSTANT', name='ctx_pad4_1')
h = conv('ctx_conv4_1', h, strides=[1, 1, 1, 1], padding='VALID', add_bias=True, apply_relu=True, atrous_rate=8)
h = tf.pad(h, [[0, 0], [16, 16], [16, 16], [0, 0]], mode='CONSTANT', name='ctx_pad5_1')
h = conv('ctx_conv5_1', h, strides=[1, 1, 1, 1], padding='VALID', add_bias=True, apply_relu=True, atrous_rate=16)
if dataset == 'cityscapes':
h = tf.pad(h, [[0, 0], [32, 32], [32, 32], [0, 0]], mode='CONSTANT', name='ctx_pad6_1')
h = conv('ctx_conv6_1', h, strides=[1, 1, 1, 1], padding='VALID', add_bias=True, apply_relu=True, atrous_rate=32)
h = tf.pad(h, [[0, 0], [64, 64], [64, 64], [0, 0]], mode='CONSTANT', name='ctx_pad7_1')
h = conv('ctx_conv7_1', h, strides=[1, 1, 1, 1], padding='VALID', add_bias=True, apply_relu=True, atrous_rate=64)
h = tf.pad(h, [[0, 0], [1, 1], [1, 1], [0, 0]], mode='CONSTANT', name='ctx_pad_fc1')
h = conv('ctx_fc1', h, strides=[1, 1, 1, 1], padding='VALID', add_bias=True, apply_relu=True)
h = conv('ctx_final', h, strides=[1, 1, 1, 1], padding='VALID', add_bias=True, apply_relu=False)
if dataset == 'cityscapes':
h = tf.image.resize_bilinear(h, size=(1024, 1024))
logits = conv('ctx_upsample', h, strides=[1, 1, 1, 1], padding='SAME', add_bias=False, apply_relu=True)
else:
logits = h
softmax = tf.nn.softmax(logits, dim=3, name='softmax')
return softmax
``` |
{
"source": "JoffreyN/HTMLReport",
"score": 3
} |
#### File: HTMLReport/tests/HTMLReport_test.py
```python
import base64
import logging
import unittest
import HTMLReport
from HTMLReport import AddImage
from HTMLReport import logger
def parse_int(s):
return int(s)
LOG = logging.getLogger(__name__)
class test_1th(unittest.TestCase):
def test_isupper(self):
"""测试isupper"""
logger().info("测试isupper")
LOG.info("11111111111111111111111111111111111111111111111111111")
self.assertTrue('FOO'.isupper(), "真")
self.assertFalse('Foo'.isupper(), '假')
def test_split(self):
"""测试split"""
logger().info("测试split")
s = 'hello world'
self.assertEqual(s.split(), ['hello', 'world'], "相等")
with self.assertRaises(TypeError):
s.split(2)
def test_error(self):
"""测试错误"""
logger().error("测试错误")
with open("baidu.png", 'rb') as f:
image = base64.b64encode(f.read())
AddImage(image, "百度1")
AddImage(image, "百度12")
AddImage(image, "百度123")
raise ValueError
def test_fail(self):
"""测试失败"""
logger().info("测试失败")
self.assertEqual(1, 2, "相等")
@unittest.skip("这是一个跳过的测试")
def test_skip(self):
"""测试跳过"""
logger().warning("测试跳过")
pass
class test_2th(unittest.TestCase):
def test_bad_int(self):
"""测试异常类型"""
logger().info("测试异常类型")
self.assertRaises(ValueError, parse_int("1.5"), 'N/A')
def test_upper(self):
"""测试相等"""
logger().critical('测试相等')
self.assertEqual('foo'.upper(), '00')
class test_第三个测试(unittest.TestCase):
a = None
@classmethod
def setUpClass(cls):
"""公共"""
cls.a = 1
LOG.info("a : {}".format(cls.a))
@classmethod
def tearDownClass(cls):
LOG.info("a : {}".format(cls.a))
def test_True(self):
"""测试True"""
test_第三个测试.a += 1
self.assertTrue(False, self.a)
def test_False(self):
"""测试FALSE"""
test_第三个测试.a += 1
self.assertFalse(True, self.a)
if __name__ == '__main__':
# 测试套件
suite = unittest.TestSuite()
# 测试用例加载器
loader = unittest.TestLoader()
# 把测试用例加载到测试套件中
suite.addTests(loader.loadTestsFromTestCase(test_1th))
suite.addTests(loader.loadTestsFromTestCase(test_2th))
suite.addTests(loader.loadTestsFromTestCase(test_第三个测试))
# 测试用例执行器
runner = HTMLReport.TestRunner(report_file_name='test', # 报告文件名,如果未赋值,将采用“test+时间戳”
output_path='report', # 保存文件夹名,默认“report”
title='一个简单的测试报告', # 报告标题,默认“测试报告”
description='随意描述', # 报告描述,默认“无测试描述”
thread_count=2, # 并发线程数量(无序执行测试),默认数量 1
thread_start_wait=0, # 各线程启动延迟,默认 0 s
sequential_execution=True, # 是否按照套件添加(addTests)顺序执行,
# 会等待一个addTests执行完成,再执行下一个,默认 False
# 如果用例中存在 tearDownClass ,建议设置为True,
# 否则 tearDownClass 将会在所有用例线程执行完后才会执行。
# lang='en'
lang='cn' # 支持中文与英文,默认中文
)
# 执行测试用例套件
runner.run(suite)
```
#### File: HTMLReport/tests/loggerTest.py
```python
import logging
import threading
from concurrent.futures import ThreadPoolExecutor
from HTMLReport import GeneralLogger
from HTMLReport.log.HandlerFactory import HandlerFactory
def worker(message):
logger = GeneralLogger().get_logger(True)
print("Start")
logger.info(message + ' info')
logger.debug(message + ' 调试')
logger.warning(message + ' 警告')
logger.error(message + ' 错误')
print("end")
print("--------------\n", str(threading.current_thread().ident),
HandlerFactory.get_stream_value(), "\n-----------------")
GeneralLogger().set_log_path('report/test.log')
GeneralLogger().set_log_by_thread_log(True)
GeneralLogger().set_log_level(logging.DEBUG)
main_logger = GeneralLogger().get_logger()
main_logger.debug('debug')
main_logger.warning('warning')
main_logger.info('info')
main_logger.error('error')
with ThreadPoolExecutor(10) as pool:
args = 'worker '
for arg in range(2):
pool.submit(worker, args + str(arg))
``` |
{
"source": "joffy/Leanplum-API-Samples",
"score": 3
} |
#### File: joffy/Leanplum-API-Samples/export_push_notification_stats.py
```python
import datetime
import json
import sys
import time
import urllib2
APP_ID = 'YOUR_APP_ID'
CONTENT_KEY = 'YOUR_CONTENT_READONLY_KEY'
EXPORT_KEY = 'YOUR_EXPORT_KEY'
# This is set to the last week. You can customize the dates, or provide specific dates like this:
# datetime.date(2015, 11, 10)
END_DATE = datetime.date.today()
START_DATE = END_DATE - datetime.timedelta(days=7)
API_ENDPOINT = 'https://www.leanplum.com/api'
def call_leanplum_api(action, client_key, request_json=None):
"""Makes a call to the Leanplum API."""
print 'Making Leanplum API call to %s...' % action
if request_json is None:
request_json = {}
request_json.update({
'action': action,
'appId': APP_ID,
'clientKey': client_key,
'apiVersion': '1.0.6',
})
request = urllib2.Request(API_ENDPOINT, json.dumps(request_json))
try:
response = urllib2.urlopen(request)
except urllib2.HTTPError, e:
response = e
response = json.loads(response.read())['response'][0]
if not response['success']:
print 'Error: %s' % response['error']['message']
sys.exit(1)
return response
def get_message_stats():
"""Returns a two-dimensional array containing the message stats."""
# Get messages.
messages = call_leanplum_api('getMessages', CONTENT_KEY)['messages']
pushes = filter(lambda message: message['messageType'] == 'Push Notification', messages)
# Format message events.
# Message event names are encoded like this:
# ".mMESSAGE_ID" for send, ".mMESSAGE_ID Open" for open.
# E.g. .m1234, .m1234 Open
event_names = []
for message in pushes:
message['sendEvent'] = '.m%d' % message['id']
message['openEvent'] = '.m%d Open' % message['id']
event_names.extend([
message['sendEvent'],
message['openEvent']
])
# Export report.
job_id = call_leanplum_api('exportReport', EXPORT_KEY, {
'startDate': START_DATE,
'endDate': END_DATE,
'dataType': 'UserActivity',
'eventNames': event_names
})['jobId']
data = None
while not data:
results = call_leanplum_api('getExportResults', EXPORT_KEY, {
'jobId': job_id,
})
if results['state']['value'] != 'RUNNING':
data = results['data']
break
time.sleep(5)
# Print CSV header.
rows = []
header = ['Date']
for message in pushes:
header.extend([
'%s Sent' % message['name'],
'%s Open' % message['name'],
'%s Open Rate' % message['name'],
])
rows.append(header)
# Print CSV rows.
for date in sorted(data.keys()):
date_data = data.get(date)
row = [date]
def get_occurrences(event_name):
return int((date_data.get(event_name, {})).get('Occurrences', 0))
for message in pushes:
sends = get_occurrences(message['sendEvent'])
opens = get_occurrences(message['openEvent'])
row.extend([str(sends), str(opens), str(100.0 * opens / sends) if sends else ''])
rows.append(row)
return rows
def print_csv(csv):
"""Prints a two-dimensional array as CSV."""
print '\n'.join([','.join([cell for cell in row]) for row in csv])
if __name__ == '__main__':
if sys.version_info < (2, 7, 10):
print 'This script requires Python 2.7.10 or higher.'
sys.exit(1)
print_csv(get_message_stats())
``` |
{
"source": "jofiedler/ansible-icinga2",
"score": 2
} |
#### File: ansible-icinga2/filter_plugins/icinga2.py
```python
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
# from ansible.errors import AnsibleError, AnsibleParserError
# from ansible.plugins.lookup import LookupBase
from ansible.utils.display import Display
# https://docs.ansible.com/ansible/latest/dev_guide/developing_plugins.html
# https://blog.oddbit.com/post/2019-04-25-writing-ansible-filter-plugins/
display = Display()
class FilterModule(object):
"""
Ansible file jinja2 tests
"""
def filters(self):
return {
'primary_master': self.filter_primary,
'reorder_master': self.filter_reorder,
'satellite_zone': self.satellite_zone
}
"""
return the primary icinga2 master
icinga2_masters:
blackbox.matrix.lan:
type: primary
ip: 192.168.0.5
second:
# overwrite: icinga.xanhaem.de
ip: 192.168.10.10
returns 'blackbox.matrix.lan'
"""
def filter_primary(self, mydict):
seen = ''
count = len(mydict.keys())
display.vv("found: {} entries in {}".format(count, mydict))
if(count == 1):
k = mydict.keys()
keys = list(k)
display.v("key: {}".format(k))
display.v("{}".format(keys))
seen = keys[0]
else:
for k, i in mydict.items():
_type = None
if(isinstance(i, dict)):
_type = i.get('type', None)
if(_type is not None and _type == 'primary'):
seen = k
break
display.vv("found primary: {}".format(seen))
if(seen == ''):
k = mydict.keys()
keys = list(k)
display.v("key: {}".format(k))
display.v("{}".format(keys))
seen = keys[0]
display.v("return primary: {}".format(seen))
return seen
"""
reorganize 'icinga2_masters' dict
icinga2_masters:
blackbox.matrix.lan:
overwrite: icinga.boone-schulz.de
to:
icinga2_masters:
icinga.boone-schulz.de:
"""
def filter_reorder(self, mydict):
seen = ''
count = len(mydict.keys())
display.vv("found: {} entries in {}".format(count, mydict))
seen = self.__transform(mydict)
display.v("return reorder: {}".format(seen))
return seen
"""
"""
def satellite_zone(self, mydict, ansible_fqdn):
seen = ansible_fqdn
count = len(mydict.keys())
display.vv("found: {} entries in {}".format(count, mydict))
display.vv("search zone for '{}'".format(ansible_fqdn))
for zone, zone_entries in mydict.items():
keys = zone_entries.keys()
key_list = list(keys)
found = self.__search(key_list, ansible_fqdn)
display.vv("zone : {} -> values {} ({})".format(zone, key_list, found))
if(found):
seen = zone
display.v("return zone '{}' for {}".format(seen, ansible_fqdn))
return seen
"""
"""
def __transform(self, multilevelDict):
new = {}
for key, value in multilevelDict.items():
display.v("key: {} == value: {}".format(key, value))
if value is None:
value = {}
if value.get('overwrite'):
new_key = value.get('overwrite')
_ = value.pop('overwrite')
new[new_key] = value
else:
new[key] = value
return new
"""
"""
def __search(self, list, fqdn):
for i in range(len(list)):
if list[i] == fqdn:
return True
return False
``` |
{
"source": "JOFLIX/grapevines",
"score": 2
} |
#### File: openpyxl/reader/style.py
```python
from __future__ import absolute_import
# Copyright (c) 2010-2014 openpyxl
"""Read shared style definitions"""
# package imports
from openpyxl.collections import IndexedList
from openpyxl.xml.functions import fromstring, safe_iterator, localname
from openpyxl.exceptions import MissingNumberFormat
from openpyxl.styles import (
Style,
numbers,
Font,
PatternFill,
GradientFill,
Border,
Side,
Protection,
Alignment,
borders,
)
from openpyxl.styles.colors import COLOR_INDEX, Color
from openpyxl.xml.constants import SHEET_MAIN_NS
from copy import deepcopy
class SharedStylesParser(object):
def __init__(self, xml_source):
self.root = fromstring(xml_source)
self.shared_styles = IndexedList()
self.cond_styles = []
self.style_prop = {}
self.color_index = COLOR_INDEX
def parse(self):
self.parse_custom_num_formats()
self.parse_color_index()
self.style_prop['color_index'] = self.color_index
self.font_list = list(self.parse_fonts())
self.fill_list = list(self.parse_fills())
self.border_list = list(self.parse_borders())
self.parse_dxfs()
self.parse_cell_xfs()
def parse_custom_num_formats(self):
"""Read in custom numeric formatting rules from the shared style table"""
custom_formats = {}
num_fmts = self.root.find('{%s}numFmts' % SHEET_MAIN_NS)
if num_fmts is not None:
num_fmt_nodes = safe_iterator(num_fmts, '{%s}numFmt' % SHEET_MAIN_NS)
for num_fmt_node in num_fmt_nodes:
fmt_id = int(num_fmt_node.get('numFmtId'))
fmt_code = num_fmt_node.get('formatCode').lower()
custom_formats[fmt_id] = fmt_code
self.custom_num_formats = custom_formats
def parse_color_index(self):
"""Read in the list of indexed colors"""
colors = self.root.find('{%s}colors' % SHEET_MAIN_NS)
if colors is not None:
indexedColors = colors.find('{%s}indexedColors' % SHEET_MAIN_NS)
if indexedColors is not None:
color_nodes = safe_iterator(indexedColors, '{%s}rgbColor' % SHEET_MAIN_NS)
self.color_index = [node.get('rgb') for node in color_nodes]
def parse_dxfs(self):
"""Read in the dxfs effects - used by conditional formatting."""
dxf_list = []
dxfs = self.root.find('{%s}dxfs' % SHEET_MAIN_NS)
if dxfs is not None:
nodes = dxfs.findall('{%s}dxf' % SHEET_MAIN_NS)
for dxf in nodes:
dxf_item = {}
font_node = dxf.find('{%s}font' % SHEET_MAIN_NS)
if font_node is not None:
dxf_item['font'] = self.parse_font(font_node)
fill_node = dxf.find('{%s}fill' % SHEET_MAIN_NS)
if fill_node is not None:
dxf_item['fill'] = self.parse_fill(fill_node)
border_node = dxf.find('{%s}border' % SHEET_MAIN_NS)
if border_node is not None:
dxf_item['border'] = self.parse_border(border_node)
dxf_list.append(dxf_item)
self.cond_styles = dxf_list
def parse_fonts(self):
"""Read in the fonts"""
fonts = self.root.find('{%s}fonts' % SHEET_MAIN_NS)
if fonts is not None:
for node in safe_iterator(fonts, '{%s}font' % SHEET_MAIN_NS):
yield self.parse_font(node)
def parse_font(self, font_node):
"""Read individual font"""
font = {}
for child in safe_iterator(font_node):
if child is not font_node:
tag = localname(child)
font[tag] = child.get("val", True)
underline = font_node.find('{%s}u' % SHEET_MAIN_NS)
if underline is not None:
font['u'] = underline.get('val', 'single')
color = font_node.find('{%s}color' % SHEET_MAIN_NS)
if color is not None:
font['color'] = Color(**dict(color.attrib))
return Font(**font)
def parse_fills(self):
"""Read in the list of fills"""
fills = self.root.find('{%s}fills' % SHEET_MAIN_NS)
if fills is not None:
for fill_node in safe_iterator(fills, '{%s}fill' % SHEET_MAIN_NS):
yield self.parse_fill(fill_node)
def parse_fill(self, fill_node):
"""Read individual fill"""
pattern = fill_node.find('{%s}patternFill' % SHEET_MAIN_NS)
gradient = fill_node.find('{%s}gradientFill' % SHEET_MAIN_NS)
if pattern is not None:
return self.parse_pattern_fill(pattern)
if gradient is not None:
return self.parse_gradient_fill(gradient)
def parse_pattern_fill(self, node):
fill = dict(node.attrib)
for child in safe_iterator(node):
if child is not node:
tag = localname(child)
fill[tag] = Color(**dict(child.attrib))
return PatternFill(**fill)
def parse_gradient_fill(self, node):
fill = dict(node.attrib)
color_nodes = safe_iterator(node, "{%s}color" % SHEET_MAIN_NS)
fill['stop'] = tuple(Color(**dict(node.attrib)) for node in color_nodes)
return GradientFill(**fill)
def parse_borders(self):
"""Read in the boarders"""
borders = self.root.find('{%s}borders' % SHEET_MAIN_NS)
if borders is not None:
for border_node in safe_iterator(borders, '{%s}border' % SHEET_MAIN_NS):
yield self.parse_border(border_node)
def parse_border(self, border_node):
"""Read individual border"""
border = dict(border_node.attrib)
for side in ('left', 'right', 'top', 'bottom', 'diagonal'):
node = border_node.find('{%s}%s' % (SHEET_MAIN_NS, side))
if node is not None:
bside = dict(node.attrib)
color = node.find('{%s}color' % SHEET_MAIN_NS)
if color is not None:
bside['color'] = Color(**dict(color.attrib))
border[side] = Side(**bside)
return Border(**border)
def parse_cell_xfs(self):
"""Read styles from the shared style table"""
cell_xfs = self.root.find('{%s}cellXfs' % SHEET_MAIN_NS)
_styles = []
if cell_xfs is None: # can happen on bad OOXML writers (e.g. Gnumeric)
return
builtin_formats = numbers.BUILTIN_FORMATS
xfs = safe_iterator(cell_xfs, '{%s}xf' % SHEET_MAIN_NS)
for index, xf in enumerate(xfs):
_style = {}
num_fmt = xf.get('numFmtId')
if num_fmt is not None:
num_fmt = int(num_fmt)
if num_fmt < 164:
format_code = builtin_formats.get(num_fmt, 'General')
else:
fmt_code = self.custom_num_formats.get(num_fmt)
if fmt_code is not None:
format_code = fmt_code
else:
raise MissingNumberFormat('%s' % num_fmt)
_style['number_format'] = format_code
if bool_attrib(xf, 'applyAlignment'):
alignment = {}
al = xf.find('{%s}alignment' % SHEET_MAIN_NS)
if al is not None:
alignment = al.attrib
_style['alignment'] = Alignment(**alignment)
if bool_attrib(xf, 'applyFont'):
_style['font'] = self.font_list[int(xf.get('fontId'))].copy()
if bool_attrib(xf, 'applyFill'):
_style['fill'] = self.fill_list[int(xf.get('fillId'))].copy()
if bool_attrib(xf, 'applyBorder'):
_style['border'] = self.border_list[int(xf.get('borderId'))].copy()
if bool_attrib(xf, 'applyProtection'):
protection = {}
prot = xf.find('{%s}protection' % SHEET_MAIN_NS)
if prot is not None:
protection.update(prot.attrib)
_style['protection'] = Protection(**protection)
_styles.append(Style(**_style))
self.shared_styles = IndexedList(_styles)
def read_style_table(xml_source):
p = SharedStylesParser(xml_source)
p.parse()
return p.shared_styles, p.color_index, p.cond_styles
def bool_attrib(element, attr):
"""
Cast an XML attribute that should be a boolean to a Python equivalent
None, 'f', '0' and 'false' all cast to False, everything else to true
"""
value = element.get(attr)
if not value or value in ("false", "f", "0"):
return False
return True
```
#### File: stem/descriptor/extrainfo_descriptor.py
```python
import functools
import hashlib
import re
import stem.util.connection
import stem.util.enum
import stem.util.str_tools
from stem.descriptor import (
PGP_BLOCK_END,
Descriptor,
create_signing_key,
_descriptor_content,
_read_until_keywords,
_descriptor_components,
_value,
_values,
_parse_simple_line,
_parse_timestamp_line,
_parse_forty_character_hex,
_parse_key_block,
_append_router_signature,
_random_nickname,
_random_fingerprint,
_random_date,
_random_crypto_blob,
)
try:
# added in python 3.2
from functools import lru_cache
except ImportError:
from stem.util.lru_cache import lru_cache
# known statuses for dirreq-v2-resp and dirreq-v3-resp...
DirResponse = stem.util.enum.Enum(
('OK', 'ok'),
('NOT_ENOUGH_SIGS', 'not-enough-sigs'),
('UNAVAILABLE', 'unavailable'),
('NOT_FOUND', 'not-found'),
('NOT_MODIFIED', 'not-modified'),
('BUSY', 'busy'),
)
# known stats for dirreq-v2/3-direct-dl and dirreq-v2/3-tunneled-dl...
dir_stats = ['complete', 'timeout', 'running', 'min', 'max', 'q1', 'q3', 'md']
dir_stats += ['d%i' % i for i in range(1, 5)]
dir_stats += ['d%i' % i for i in range(6, 10)]
DirStat = stem.util.enum.Enum(*[(stat.upper(), stat) for stat in dir_stats])
# relay descriptors must have exactly one of the following
REQUIRED_FIELDS = (
'extra-info',
'published',
'router-signature',
)
# optional entries that can appear at most once
SINGLE_FIELDS = (
'read-history',
'write-history',
'geoip-db-digest',
'geoip6-db-digest',
'bridge-stats-end',
'bridge-ips',
'dirreq-stats-end',
'dirreq-v2-ips',
'dirreq-v3-ips',
'dirreq-v2-reqs',
'dirreq-v3-reqs',
'dirreq-v2-share',
'dirreq-v3-share',
'dirreq-v2-resp',
'dirreq-v3-resp',
'dirreq-v2-direct-dl',
'dirreq-v3-direct-dl',
'dirreq-v2-tunneled-dl',
'dirreq-v3-tunneled-dl',
'dirreq-read-history',
'dirreq-write-history',
'entry-stats-end',
'entry-ips',
'cell-stats-end',
'cell-processed-cells',
'cell-queued-cells',
'cell-time-in-queue',
'cell-circuits-per-decile',
'conn-bi-direct',
'exit-stats-end',
'exit-kibibytes-written',
'exit-kibibytes-read',
'exit-streams-opened',
)
_timestamp_re = re.compile('^(.*) \(([0-9]+) s\)( .*)?$')
_locale_re = re.compile('^[a-zA-Z0-9\?]{2}$')
def _parse_file(descriptor_file, is_bridge = False, validate = False, **kwargs):
"""
Iterates over the extra-info descriptors in a file.
:param file descriptor_file: file with descriptor content
:param bool is_bridge: parses the file as being a bridge descriptor
:param bool validate: checks the validity of the descriptor's content if
**True**, skips these checks otherwise
:param dict kwargs: additional arguments for the descriptor constructor
:returns: iterator for :class:`~stem.descriptor.extrainfo_descriptor.ExtraInfoDescriptor`
instances in the file
:raises:
* **ValueError** if the contents is malformed and validate is **True**
* **IOError** if the file can't be read
"""
while True:
if not is_bridge:
extrainfo_content = _read_until_keywords('router-signature', descriptor_file)
# we've reached the 'router-signature', now include the pgp style block
block_end_prefix = PGP_BLOCK_END.split(' ', 1)[0]
extrainfo_content += _read_until_keywords(block_end_prefix, descriptor_file, True)
else:
extrainfo_content = _read_until_keywords('router-digest', descriptor_file, True)
if extrainfo_content:
if extrainfo_content[0].startswith(b'@type'):
extrainfo_content = extrainfo_content[1:]
if is_bridge:
yield BridgeExtraInfoDescriptor(bytes.join(b'', extrainfo_content), validate, **kwargs)
else:
yield RelayExtraInfoDescriptor(bytes.join(b'', extrainfo_content), validate, **kwargs)
else:
break # done parsing file
def _parse_timestamp_and_interval(keyword, content):
"""
Parses a 'YYYY-MM-DD HH:MM:SS (NSEC s) *' entry.
:param str keyword: line's keyword
:param str content: line content to be parsed
:returns: **tuple** of the form (timestamp (**datetime**), interval
(**int**), remaining content (**str**))
:raises: **ValueError** if the content is malformed
"""
line = '%s %s' % (keyword, content)
content_match = _timestamp_re.match(content)
if not content_match:
raise ValueError('Malformed %s line: %s' % (keyword, line))
timestamp_str, interval, remainder = content_match.groups()
if remainder:
remainder = remainder[1:] # remove leading space
if not interval.isdigit():
raise ValueError("%s line's interval wasn't a number: %s" % (keyword, line))
try:
timestamp = stem.util.str_tools._parse_timestamp(timestamp_str)
return timestamp, int(interval), remainder
except ValueError:
raise ValueError("%s line's timestamp wasn't parsable: %s" % (keyword, line))
def _parse_extra_info_line(descriptor, entries):
# "extra-info" Nickname Fingerprint
value = _value('extra-info', entries)
extra_info_comp = value.split()
if len(extra_info_comp) < 2:
raise ValueError('Extra-info line must have two values: extra-info %s' % value)
elif not stem.util.tor_tools.is_valid_nickname(extra_info_comp[0]):
raise ValueError("Extra-info line entry isn't a valid nickname: %s" % extra_info_comp[0])
elif not stem.util.tor_tools.is_valid_fingerprint(extra_info_comp[1]):
raise ValueError('Tor relay fingerprints consist of forty hex digits: %s' % extra_info_comp[1])
descriptor.nickname = extra_info_comp[0]
descriptor.fingerprint = extra_info_comp[1]
def _parse_transport_line(descriptor, entries):
# "transport" transportname address:port [arglist]
# Everything after the transportname is scrubbed in published bridge
# descriptors, so we'll never see it in practice.
#
# These entries really only make sense for bridges, but have been seen
# on non-bridges in the wild when the relay operator configured it this
# way.
transports = {}
for value in _values('transport', entries):
name, address, port, args = None, None, None, None
if ' ' not in value:
# scrubbed
name = value
else:
# not scrubbed
value_comp = value.split()
if len(value_comp) < 1:
raise ValueError('Transport line is missing its transport name: transport %s' % value)
elif len(value_comp) < 2:
raise ValueError('Transport line is missing its address:port value: transport %s' % value)
elif ':' not in value_comp[1]:
raise ValueError("Transport line's address:port entry is missing a colon: transport %s" % value)
name = value_comp[0]
address, port_str = value_comp[1].rsplit(':', 1)
if not stem.util.connection.is_valid_ipv4_address(address) or \
stem.util.connection.is_valid_ipv6_address(address, allow_brackets = True):
raise ValueError('Transport line has a malformed address: transport %s' % value)
elif not stem.util.connection.is_valid_port(port_str):
raise ValueError('Transport line has a malformed port: transport %s' % value)
address.lstrip('[').rstrip(']')
port = int(port_str)
args = value_comp[2:] if len(value_comp) >= 3 else []
transports[name] = (address, port, args)
descriptor.transport = transports
def _parse_cell_circuits_per_decline_line(descriptor, entries):
# "cell-circuits-per-decile" num
value = _value('cell-circuits-per-decile', entries)
if not value.isdigit():
raise ValueError('Non-numeric cell-circuits-per-decile value: %s' % value)
elif int(value) < 0:
raise ValueError('Negative cell-circuits-per-decile value: %s' % value)
descriptor.cell_circuits_per_decile = int(value)
def _parse_padding_counts_line(descriptor, entries):
# "padding-counts" YYYY-MM-DD HH:MM:SS (NSEC s) key=val key=val...
value = _value('padding-counts', entries)
timestamp, interval, remainder = _parse_timestamp_and_interval('padding-counts', value)
entries = {}
for entry in remainder.split(' '):
if '=' not in entry:
raise ValueError('Entries in padding-counts line should be key=value mappings: padding-counts %s' % value)
k, v = entry.split('=', 1)
if not v:
raise ValueError('Entry in padding-counts line had a blank value: padding-counts %s' % value)
entries[k] = int(v) if v.isdigit() else v
setattr(descriptor, 'padding_counts_end', timestamp)
setattr(descriptor, 'padding_counts_interval', interval)
setattr(descriptor, 'padding_counts', entries)
def _parse_dirreq_line(keyword, recognized_counts_attr, unrecognized_counts_attr, descriptor, entries):
value = _value(keyword, entries)
recognized_counts = {}
unrecognized_counts = {}
is_response_stats = keyword in ('dirreq-v2-resp', 'dirreq-v3-resp')
key_set = DirResponse if is_response_stats else DirStat
key_type = 'STATUS' if is_response_stats else 'STAT'
error_msg = '%s lines should contain %s=COUNT mappings: %s %s' % (keyword, key_type, keyword, value)
if value:
for entry in value.split(','):
if '=' not in entry:
raise ValueError(error_msg)
status, count = entry.split('=', 1)
if count.isdigit():
if status in key_set:
recognized_counts[status] = int(count)
else:
unrecognized_counts[status] = int(count)
else:
raise ValueError(error_msg)
setattr(descriptor, recognized_counts_attr, recognized_counts)
setattr(descriptor, unrecognized_counts_attr, unrecognized_counts)
def _parse_dirreq_share_line(keyword, attribute, descriptor, entries):
value = _value(keyword, entries)
if not value.endswith('%'):
raise ValueError('%s lines should be a percentage: %s %s' % (keyword, keyword, value))
elif float(value[:-1]) < 0:
raise ValueError('Negative percentage value: %s %s' % (keyword, value))
# bug means it might be above 100%: https://lists.torproject.org/pipermail/tor-dev/2012-June/003679.html
setattr(descriptor, attribute, float(value[:-1]) / 100)
def _parse_cell_line(keyword, attribute, descriptor, entries):
# "<keyword>" num,...,num
value = _value(keyword, entries)
entries, exc = [], None
if value:
for entry in value.split(','):
try:
# Values should be positive but as discussed in ticket #5849
# there was a bug around this. It was fixed in tor 0.2.2.1.
entries.append(float(entry))
except ValueError:
exc = ValueError('Non-numeric entry in %s listing: %s %s' % (keyword, keyword, value))
setattr(descriptor, attribute, entries)
if exc:
raise exc
def _parse_timestamp_and_interval_line(keyword, end_attribute, interval_attribute, descriptor, entries):
# "<keyword>" YYYY-MM-DD HH:MM:SS (NSEC s)
timestamp, interval, _ = _parse_timestamp_and_interval(keyword, _value(keyword, entries))
setattr(descriptor, end_attribute, timestamp)
setattr(descriptor, interval_attribute, interval)
def _parse_conn_bi_direct_line(descriptor, entries):
# "conn-bi-direct" YYYY-MM-DD HH:MM:SS (NSEC s) BELOW,READ,WRITE,BOTH
value = _value('conn-bi-direct', entries)
timestamp, interval, remainder = _parse_timestamp_and_interval('conn-bi-direct', value)
stats = remainder.split(',')
if len(stats) != 4 or not (stats[0].isdigit() and stats[1].isdigit() and stats[2].isdigit() and stats[3].isdigit()):
raise ValueError('conn-bi-direct line should end with four numeric values: conn-bi-direct %s' % value)
descriptor.conn_bi_direct_end = timestamp
descriptor.conn_bi_direct_interval = interval
descriptor.conn_bi_direct_below = int(stats[0])
descriptor.conn_bi_direct_read = int(stats[1])
descriptor.conn_bi_direct_write = int(stats[2])
descriptor.conn_bi_direct_both = int(stats[3])
def _parse_history_line(keyword, end_attribute, interval_attribute, values_attribute, descriptor, entries):
# "<keyword>" YYYY-MM-DD HH:MM:SS (NSEC s) NUM,NUM,NUM,NUM,NUM...
value = _value(keyword, entries)
timestamp, interval, remainder = _parse_timestamp_and_interval(keyword, value)
history_values = []
if remainder:
try:
history_values = [int(entry) for entry in remainder.split(',')]
except ValueError:
raise ValueError('%s line has non-numeric values: %s %s' % (keyword, keyword, value))
setattr(descriptor, end_attribute, timestamp)
setattr(descriptor, interval_attribute, interval)
setattr(descriptor, values_attribute, history_values)
def _parse_port_count_line(keyword, attribute, descriptor, entries):
# "<keyword>" port=N,port=N,...
value, port_mappings = _value(keyword, entries), {}
error_msg = 'Entries in %s line should only be PORT=N entries: %s %s' % (keyword, keyword, value)
if value:
for entry in value.split(','):
if '=' not in entry:
raise ValueError(error_msg)
port, stat = entry.split('=', 1)
if (port == 'other' or stem.util.connection.is_valid_port(port)) and stat.isdigit():
if port != 'other':
port = int(port)
port_mappings[port] = int(stat)
else:
raise ValueError(error_msg)
setattr(descriptor, attribute, port_mappings)
def _parse_geoip_to_count_line(keyword, attribute, descriptor, entries):
# "<keyword>" CC=N,CC=N,...
#
# The maxmind geoip (https://www.maxmind.com/app/iso3166) has numeric
# locale codes for some special values, for instance...
# A1,"Anonymous Proxy"
# A2,"Satellite Provider"
# ??,"Unknown"
value, locale_usage = _value(keyword, entries), {}
error_msg = 'Entries in %s line should only be CC=N entries: %s %s' % (keyword, keyword, value)
if value:
for entry in value.split(','):
if '=' not in entry:
raise ValueError(error_msg)
locale, count = entry.split('=', 1)
if _locale_re.match(locale) and count.isdigit():
locale_usage[locale] = int(count)
else:
raise ValueError(error_msg)
setattr(descriptor, attribute, locale_usage)
def _parse_bridge_ip_versions_line(descriptor, entries):
value, ip_versions = _value('bridge-ip-versions', entries), {}
if value:
for entry in value.split(','):
if '=' not in entry:
raise stem.ProtocolError("The bridge-ip-versions should be a comma separated listing of '<protocol>=<count>' mappings: bridge-ip-versions %s" % value)
protocol, count = entry.split('=', 1)
if not count.isdigit():
raise stem.ProtocolError('IP protocol count was non-numeric (%s): bridge-ip-versions %s' % (count, value))
ip_versions[protocol] = int(count)
descriptor.ip_versions = ip_versions
def _parse_bridge_ip_transports_line(descriptor, entries):
value, ip_transports = _value('bridge-ip-transports', entries), {}
if value:
for entry in value.split(','):
if '=' not in entry:
raise stem.ProtocolError("The bridge-ip-transports should be a comma separated listing of '<protocol>=<count>' mappings: bridge-ip-transports %s" % value)
protocol, count = entry.split('=', 1)
if not count.isdigit():
raise stem.ProtocolError('Transport count was non-numeric (%s): bridge-ip-transports %s' % (count, value))
ip_transports[protocol] = int(count)
descriptor.ip_transports = ip_transports
def _parse_hs_stats(keyword, stat_attribute, extra_attribute, descriptor, entries):
# "<keyword>" num key=val key=val...
value, stat, extra = _value(keyword, entries), None, {}
if value is not None:
value_comp = value.split()
if not value_comp:
raise ValueError("'%s' line was blank" % keyword)
try:
stat = int(value_comp[0])
except ValueError:
raise ValueError("'%s' stat was non-numeric (%s): %s %s" % (keyword, value_comp[0], keyword, value))
for entry in value_comp[1:]:
if '=' not in entry:
raise ValueError('Entries after the stat in %s lines should only be key=val entries: %s %s' % (keyword, keyword, value))
key, val = entry.split('=', 1)
extra[key] = val
setattr(descriptor, stat_attribute, stat)
setattr(descriptor, extra_attribute, extra)
_parse_identity_ed25519_line = _parse_key_block('identity-ed25519', 'ed25519_certificate', 'ED25519 CERT')
_parse_master_key_ed25519_line = _parse_simple_line('master-key-ed25519', 'ed25519_certificate_hash')
_parse_geoip_db_digest_line = _parse_forty_character_hex('geoip-db-digest', 'geoip_db_digest')
_parse_geoip6_db_digest_line = _parse_forty_character_hex('geoip6-db-digest', 'geoip6_db_digest')
_parse_dirreq_v2_resp_line = functools.partial(_parse_dirreq_line, 'dirreq-v2-resp', 'dir_v2_responses', 'dir_v2_responses_unknown')
_parse_dirreq_v3_resp_line = functools.partial(_parse_dirreq_line, 'dirreq-v3-resp', 'dir_v3_responses', 'dir_v3_responses_unknown')
_parse_dirreq_v2_direct_dl_line = functools.partial(_parse_dirreq_line, 'dirreq-v2-direct-dl', 'dir_v2_direct_dl', 'dir_v2_direct_dl_unknown')
_parse_dirreq_v3_direct_dl_line = functools.partial(_parse_dirreq_line, 'dirreq-v3-direct-dl', 'dir_v3_direct_dl', 'dir_v3_direct_dl_unknown')
_parse_dirreq_v2_tunneled_dl_line = functools.partial(_parse_dirreq_line, 'dirreq-v2-tunneled-dl', 'dir_v2_tunneled_dl', 'dir_v2_tunneled_dl_unknown')
_parse_dirreq_v3_tunneled_dl_line = functools.partial(_parse_dirreq_line, 'dirreq-v3-tunneled-dl', 'dir_v3_tunneled_dl', 'dir_v3_tunneled_dl_unknown')
_parse_dirreq_v2_share_line = functools.partial(_parse_dirreq_share_line, 'dirreq-v2-share', 'dir_v2_share')
_parse_dirreq_v3_share_line = functools.partial(_parse_dirreq_share_line, 'dirreq-v3-share', 'dir_v3_share')
_parse_cell_processed_cells_line = functools.partial(_parse_cell_line, 'cell-processed-cells', 'cell_processed_cells')
_parse_cell_queued_cells_line = functools.partial(_parse_cell_line, 'cell-queued-cells', 'cell_queued_cells')
_parse_cell_time_in_queue_line = functools.partial(_parse_cell_line, 'cell-time-in-queue', 'cell_time_in_queue')
_parse_published_line = _parse_timestamp_line('published', 'published')
_parse_geoip_start_time_line = _parse_timestamp_line('geoip-start-time', 'geoip_start_time')
_parse_cell_stats_end_line = functools.partial(_parse_timestamp_and_interval_line, 'cell-stats-end', 'cell_stats_end', 'cell_stats_interval')
_parse_entry_stats_end_line = functools.partial(_parse_timestamp_and_interval_line, 'entry-stats-end', 'entry_stats_end', 'entry_stats_interval')
_parse_exit_stats_end_line = functools.partial(_parse_timestamp_and_interval_line, 'exit-stats-end', 'exit_stats_end', 'exit_stats_interval')
_parse_bridge_stats_end_line = functools.partial(_parse_timestamp_and_interval_line, 'bridge-stats-end', 'bridge_stats_end', 'bridge_stats_interval')
_parse_dirreq_stats_end_line = functools.partial(_parse_timestamp_and_interval_line, 'dirreq-stats-end', 'dir_stats_end', 'dir_stats_interval')
_parse_read_history_line = functools.partial(_parse_history_line, 'read-history', 'read_history_end', 'read_history_interval', 'read_history_values')
_parse_write_history_line = functools.partial(_parse_history_line, 'write-history', 'write_history_end', 'write_history_interval', 'write_history_values')
_parse_dirreq_read_history_line = functools.partial(_parse_history_line, 'dirreq-read-history', 'dir_read_history_end', 'dir_read_history_interval', 'dir_read_history_values')
_parse_dirreq_write_history_line = functools.partial(_parse_history_line, 'dirreq-write-history', 'dir_write_history_end', 'dir_write_history_interval', 'dir_write_history_values')
_parse_exit_kibibytes_written_line = functools.partial(_parse_port_count_line, 'exit-kibibytes-written', 'exit_kibibytes_written')
_parse_exit_kibibytes_read_line = functools.partial(_parse_port_count_line, 'exit-kibibytes-read', 'exit_kibibytes_read')
_parse_exit_streams_opened_line = functools.partial(_parse_port_count_line, 'exit-streams-opened', 'exit_streams_opened')
_parse_hidden_service_stats_end_line = _parse_timestamp_line('hidserv-stats-end', 'hs_stats_end')
_parse_hidden_service_rend_relayed_cells_line = functools.partial(_parse_hs_stats, 'hidserv-rend-relayed-cells', 'hs_rend_cells', 'hs_rend_cells_attr')
_parse_hidden_service_dir_onions_seen_line = functools.partial(_parse_hs_stats, 'hidserv-dir-onions-seen', 'hs_dir_onions_seen', 'hs_dir_onions_seen_attr')
_parse_dirreq_v2_ips_line = functools.partial(_parse_geoip_to_count_line, 'dirreq-v2-ips', 'dir_v2_ips')
_parse_dirreq_v3_ips_line = functools.partial(_parse_geoip_to_count_line, 'dirreq-v3-ips', 'dir_v3_ips')
_parse_dirreq_v2_reqs_line = functools.partial(_parse_geoip_to_count_line, 'dirreq-v2-reqs', 'dir_v2_requests')
_parse_dirreq_v3_reqs_line = functools.partial(_parse_geoip_to_count_line, 'dirreq-v3-reqs', 'dir_v3_requests')
_parse_geoip_client_origins_line = functools.partial(_parse_geoip_to_count_line, 'geoip-client-origins', 'geoip_client_origins')
_parse_entry_ips_line = functools.partial(_parse_geoip_to_count_line, 'entry-ips', 'entry_ips')
_parse_bridge_ips_line = functools.partial(_parse_geoip_to_count_line, 'bridge-ips', 'bridge_ips')
_parse_router_sig_ed25519_line = _parse_simple_line('router-sig-ed25519', 'ed25519_signature')
_parse_router_digest_sha256_line = _parse_simple_line('router-digest-sha256', 'router_digest_sha256')
_parse_router_digest_line = _parse_forty_character_hex('router-digest', '_digest')
_parse_router_signature_line = _parse_key_block('router-signature', 'signature', 'SIGNATURE')
class ExtraInfoDescriptor(Descriptor):
"""
Extra-info descriptor document.
:var str nickname: **\*** relay's nickname
:var str fingerprint: **\*** identity key fingerprint
:var datetime published: **\*** time in UTC when this descriptor was made
:var str geoip_db_digest: sha1 of the geoIP database file for IPv4 addresses
:var str geoip6_db_digest: sha1 of the geoIP database file for IPv6 addresses
:var dict transport: **\*** mapping of transport methods to their (address,
port, args) tuple, these usually appear on bridges in which case all of
those are **None**
**Bi-directional connection usage:**
:var datetime conn_bi_direct_end: end of the sampling interval
:var int conn_bi_direct_interval: seconds per interval
:var int conn_bi_direct_below: connections that read/wrote less than 20 KiB
:var int conn_bi_direct_read: connections that read at least 10x more than wrote
:var int conn_bi_direct_write: connections that wrote at least 10x more than read
:var int conn_bi_direct_both: remaining connections
**Bytes read/written for relayed traffic:**
:var datetime read_history_end: end of the sampling interval
:var int read_history_interval: seconds per interval
:var list read_history_values: bytes read during each interval
:var datetime write_history_end: end of the sampling interval
:var int write_history_interval: seconds per interval
:var list write_history_values: bytes written during each interval
**Cell relaying statistics:**
:var datetime cell_stats_end: end of the period when stats were gathered
:var int cell_stats_interval: length in seconds of the interval
:var list cell_processed_cells: measurement of processed cells per circuit
:var list cell_queued_cells: measurement of queued cells per circuit
:var list cell_time_in_queue: mean enqueued time in milliseconds for cells
:var int cell_circuits_per_decile: mean number of circuits in a decile
**Directory Mirror Attributes:**
:var datetime dir_stats_end: end of the period when stats were gathered
:var int dir_stats_interval: length in seconds of the interval
:var dict dir_v2_ips: mapping of locales to rounded count of requester ips
:var dict dir_v3_ips: mapping of locales to rounded count of requester ips
:var float dir_v2_share: percent of total directory traffic it expects to serve
:var float dir_v3_share: percent of total directory traffic it expects to serve
:var dict dir_v2_requests: mapping of locales to rounded count of requests
:var dict dir_v3_requests: mapping of locales to rounded count of requests
:var dict dir_v2_responses: mapping of :data:`~stem.descriptor.extrainfo_descriptor.DirResponse` to their rounded count
:var dict dir_v3_responses: mapping of :data:`~stem.descriptor.extrainfo_descriptor.DirResponse` to their rounded count
:var dict dir_v2_responses_unknown: mapping of unrecognized statuses to their count
:var dict dir_v3_responses_unknown: mapping of unrecognized statuses to their count
:var dict dir_v2_direct_dl: mapping of :data:`~stem.descriptor.extrainfo_descriptor.DirStat` to measurement over DirPort
:var dict dir_v3_direct_dl: mapping of :data:`~stem.descriptor.extrainfo_descriptor.DirStat` to measurement over DirPort
:var dict dir_v2_direct_dl_unknown: mapping of unrecognized stats to their measurement
:var dict dir_v3_direct_dl_unknown: mapping of unrecognized stats to their measurement
:var dict dir_v2_tunneled_dl: mapping of :data:`~stem.descriptor.extrainfo_descriptor.DirStat` to measurement over ORPort
:var dict dir_v3_tunneled_dl: mapping of :data:`~stem.descriptor.extrainfo_descriptor.DirStat` to measurement over ORPort
:var dict dir_v2_tunneled_dl_unknown: mapping of unrecognized stats to their measurement
:var dict dir_v3_tunneled_dl_unknown: mapping of unrecognized stats to their measurement
**Bytes read/written for directory mirroring:**
:var datetime dir_read_history_end: end of the sampling interval
:var int dir_read_history_interval: seconds per interval
:var list dir_read_history_values: bytes read during each interval
:var datetime dir_write_history_end: end of the sampling interval
:var int dir_write_history_interval: seconds per interval
:var list dir_write_history_values: bytes read during each interval
**Guard Attributes:**
:var datetime entry_stats_end: end of the period when stats were gathered
:var int entry_stats_interval: length in seconds of the interval
:var dict entry_ips: mapping of locales to rounded count of unique user ips
**Exit Attributes:**
:var datetime exit_stats_end: end of the period when stats were gathered
:var int exit_stats_interval: length in seconds of the interval
:var dict exit_kibibytes_written: traffic per port (keys are ints or 'other')
:var dict exit_kibibytes_read: traffic per port (keys are ints or 'other')
:var dict exit_streams_opened: streams per port (keys are ints or 'other')
**Hidden Service Attributes:**
:var datetime hs_stats_end: end of the sampling interval
:var int hs_rend_cells: rounded count of the RENDEZVOUS1 cells seen
:var int hs_rend_cells_attr: **\*** attributes provided for the hs_rend_cells
:var int hs_dir_onions_seen: rounded count of the identities seen
:var int hs_dir_onions_seen_attr: **\*** attributes provided for the hs_dir_onions_seen
**Padding Count Attributes:**
:var dict padding_counts: **\*** padding parameters
:var datetime padding_counts_end: end of the period when padding data is being collected
:var int padding_counts_interval: length in seconds of the interval
**Bridge Attributes:**
:var datetime bridge_stats_end: end of the period when stats were gathered
:var int bridge_stats_interval: length in seconds of the interval
:var dict bridge_ips: mapping of locales to rounded count of unique user ips
:var datetime geoip_start_time: replaced by bridge_stats_end (deprecated)
:var dict geoip_client_origins: replaced by bridge_ips (deprecated)
:var dict ip_versions: mapping of ip protocols to a rounded count for the number of users
:var dict ip_versions: mapping of ip transports to a count for the number of users
**\*** attribute is either required when we're parsed with validation or has
a default value, others are left as **None** if undefined
.. versionchanged:: 1.4.0
Added the hs_stats_end, hs_rend_cells, hs_rend_cells_attr,
hs_dir_onions_seen, and hs_dir_onions_seen_attr attributes.
.. versionchanged:: 1.6.0
Added the padding_counts, padding_counts_end, and padding_counts_interval
attributes.
"""
ATTRIBUTES = {
'nickname': (None, _parse_extra_info_line),
'fingerprint': (None, _parse_extra_info_line),
'published': (None, _parse_published_line),
'geoip_db_digest': (None, _parse_geoip_db_digest_line),
'geoip6_db_digest': (None, _parse_geoip6_db_digest_line),
'transport': ({}, _parse_transport_line),
'conn_bi_direct_end': (None, _parse_conn_bi_direct_line),
'conn_bi_direct_interval': (None, _parse_conn_bi_direct_line),
'conn_bi_direct_below': (None, _parse_conn_bi_direct_line),
'conn_bi_direct_read': (None, _parse_conn_bi_direct_line),
'conn_bi_direct_write': (None, _parse_conn_bi_direct_line),
'conn_bi_direct_both': (None, _parse_conn_bi_direct_line),
'read_history_end': (None, _parse_read_history_line),
'read_history_interval': (None, _parse_read_history_line),
'read_history_values': (None, _parse_read_history_line),
'write_history_end': (None, _parse_write_history_line),
'write_history_interval': (None, _parse_write_history_line),
'write_history_values': (None, _parse_write_history_line),
'cell_stats_end': (None, _parse_cell_stats_end_line),
'cell_stats_interval': (None, _parse_cell_stats_end_line),
'cell_processed_cells': (None, _parse_cell_processed_cells_line),
'cell_queued_cells': (None, _parse_cell_queued_cells_line),
'cell_time_in_queue': (None, _parse_cell_time_in_queue_line),
'cell_circuits_per_decile': (None, _parse_cell_circuits_per_decline_line),
'dir_stats_end': (None, _parse_dirreq_stats_end_line),
'dir_stats_interval': (None, _parse_dirreq_stats_end_line),
'dir_v2_ips': (None, _parse_dirreq_v2_ips_line),
'dir_v3_ips': (None, _parse_dirreq_v3_ips_line),
'dir_v2_share': (None, _parse_dirreq_v2_share_line),
'dir_v3_share': (None, _parse_dirreq_v3_share_line),
'dir_v2_requests': (None, _parse_dirreq_v2_reqs_line),
'dir_v3_requests': (None, _parse_dirreq_v3_reqs_line),
'dir_v2_responses': (None, _parse_dirreq_v2_resp_line),
'dir_v3_responses': (None, _parse_dirreq_v3_resp_line),
'dir_v2_responses_unknown': (None, _parse_dirreq_v2_resp_line),
'dir_v3_responses_unknown': (None, _parse_dirreq_v3_resp_line),
'dir_v2_direct_dl': (None, _parse_dirreq_v2_direct_dl_line),
'dir_v3_direct_dl': (None, _parse_dirreq_v3_direct_dl_line),
'dir_v2_direct_dl_unknown': (None, _parse_dirreq_v2_direct_dl_line),
'dir_v3_direct_dl_unknown': (None, _parse_dirreq_v3_direct_dl_line),
'dir_v2_tunneled_dl': (None, _parse_dirreq_v2_tunneled_dl_line),
'dir_v3_tunneled_dl': (None, _parse_dirreq_v3_tunneled_dl_line),
'dir_v2_tunneled_dl_unknown': (None, _parse_dirreq_v2_tunneled_dl_line),
'dir_v3_tunneled_dl_unknown': (None, _parse_dirreq_v3_tunneled_dl_line),
'dir_read_history_end': (None, _parse_dirreq_read_history_line),
'dir_read_history_interval': (None, _parse_dirreq_read_history_line),
'dir_read_history_values': (None, _parse_dirreq_read_history_line),
'dir_write_history_end': (None, _parse_dirreq_write_history_line),
'dir_write_history_interval': (None, _parse_dirreq_write_history_line),
'dir_write_history_values': (None, _parse_dirreq_write_history_line),
'entry_stats_end': (None, _parse_entry_stats_end_line),
'entry_stats_interval': (None, _parse_entry_stats_end_line),
'entry_ips': (None, _parse_entry_ips_line),
'exit_stats_end': (None, _parse_exit_stats_end_line),
'exit_stats_interval': (None, _parse_exit_stats_end_line),
'exit_kibibytes_written': (None, _parse_exit_kibibytes_written_line),
'exit_kibibytes_read': (None, _parse_exit_kibibytes_read_line),
'exit_streams_opened': (None, _parse_exit_streams_opened_line),
'hs_stats_end': (None, _parse_hidden_service_stats_end_line),
'hs_rend_cells': (None, _parse_hidden_service_rend_relayed_cells_line),
'hs_rend_cells_attr': ({}, _parse_hidden_service_rend_relayed_cells_line),
'hs_dir_onions_seen': (None, _parse_hidden_service_dir_onions_seen_line),
'hs_dir_onions_seen_attr': ({}, _parse_hidden_service_dir_onions_seen_line),
'padding_counts': ({}, _parse_padding_counts_line),
'padding_counts_end': (None, _parse_padding_counts_line),
'padding_counts_interval': (None, _parse_padding_counts_line),
'bridge_stats_end': (None, _parse_bridge_stats_end_line),
'bridge_stats_interval': (None, _parse_bridge_stats_end_line),
'bridge_ips': (None, _parse_bridge_ips_line),
'geoip_start_time': (None, _parse_geoip_start_time_line),
'geoip_client_origins': (None, _parse_geoip_client_origins_line),
'ip_versions': (None, _parse_bridge_ip_versions_line),
'ip_transports': (None, _parse_bridge_ip_transports_line),
}
PARSER_FOR_LINE = {
'extra-info': _parse_extra_info_line,
'geoip-db-digest': _parse_geoip_db_digest_line,
'geoip6-db-digest': _parse_geoip6_db_digest_line,
'transport': _parse_transport_line,
'cell-circuits-per-decile': _parse_cell_circuits_per_decline_line,
'dirreq-v2-resp': _parse_dirreq_v2_resp_line,
'dirreq-v3-resp': _parse_dirreq_v3_resp_line,
'dirreq-v2-direct-dl': _parse_dirreq_v2_direct_dl_line,
'dirreq-v3-direct-dl': _parse_dirreq_v3_direct_dl_line,
'dirreq-v2-tunneled-dl': _parse_dirreq_v2_tunneled_dl_line,
'dirreq-v3-tunneled-dl': _parse_dirreq_v3_tunneled_dl_line,
'dirreq-v2-share': _parse_dirreq_v2_share_line,
'dirreq-v3-share': _parse_dirreq_v3_share_line,
'cell-processed-cells': _parse_cell_processed_cells_line,
'cell-queued-cells': _parse_cell_queued_cells_line,
'cell-time-in-queue': _parse_cell_time_in_queue_line,
'published': _parse_published_line,
'geoip-start-time': _parse_geoip_start_time_line,
'cell-stats-end': _parse_cell_stats_end_line,
'entry-stats-end': _parse_entry_stats_end_line,
'exit-stats-end': _parse_exit_stats_end_line,
'bridge-stats-end': _parse_bridge_stats_end_line,
'dirreq-stats-end': _parse_dirreq_stats_end_line,
'conn-bi-direct': _parse_conn_bi_direct_line,
'read-history': _parse_read_history_line,
'write-history': _parse_write_history_line,
'dirreq-read-history': _parse_dirreq_read_history_line,
'dirreq-write-history': _parse_dirreq_write_history_line,
'exit-kibibytes-written': _parse_exit_kibibytes_written_line,
'exit-kibibytes-read': _parse_exit_kibibytes_read_line,
'exit-streams-opened': _parse_exit_streams_opened_line,
'hidserv-stats-end': _parse_hidden_service_stats_end_line,
'hidserv-rend-relayed-cells': _parse_hidden_service_rend_relayed_cells_line,
'hidserv-dir-onions-seen': _parse_hidden_service_dir_onions_seen_line,
'padding-counts': _parse_padding_counts_line,
'dirreq-v2-ips': _parse_dirreq_v2_ips_line,
'dirreq-v3-ips': _parse_dirreq_v3_ips_line,
'dirreq-v2-reqs': _parse_dirreq_v2_reqs_line,
'dirreq-v3-reqs': _parse_dirreq_v3_reqs_line,
'geoip-client-origins': _parse_geoip_client_origins_line,
'entry-ips': _parse_entry_ips_line,
'bridge-ips': _parse_bridge_ips_line,
'bridge-ip-versions': _parse_bridge_ip_versions_line,
'bridge-ip-transports': _parse_bridge_ip_transports_line,
}
def __init__(self, raw_contents, validate = False):
"""
Extra-info descriptor constructor. By default this validates the
descriptor's content as it's parsed. This validation can be disabled to
either improve performance or be accepting of malformed data.
:param str raw_contents: extra-info content provided by the relay
:param bool validate: checks the validity of the extra-info descriptor if
**True**, skips these checks otherwise
:raises: **ValueError** if the contents is malformed and validate is True
"""
super(ExtraInfoDescriptor, self).__init__(raw_contents, lazy_load = not validate)
entries = _descriptor_components(raw_contents, validate)
if validate:
for keyword in self._required_fields():
if keyword not in entries:
raise ValueError("Extra-info descriptor must have a '%s' entry" % keyword)
for keyword in self._required_fields() + SINGLE_FIELDS:
if keyword in entries and len(entries[keyword]) > 1:
raise ValueError("The '%s' entry can only appear once in an extra-info descriptor" % keyword)
expected_first_keyword = self._first_keyword()
if expected_first_keyword and expected_first_keyword != list(entries.keys())[0]:
raise ValueError("Extra-info descriptor must start with a '%s' entry" % expected_first_keyword)
expected_last_keyword = self._last_keyword()
if expected_last_keyword and expected_last_keyword != list(entries.keys())[-1]:
raise ValueError("Descriptor must end with a '%s' entry" % expected_last_keyword)
self._parse(entries, validate)
else:
self._entries = entries
def digest(self):
"""
Provides the upper-case hex encoded sha1 of our content. This value is part
of the server descriptor entry for this relay.
:returns: **str** with the upper-case hex digest value for this server
descriptor
"""
raise NotImplementedError('Unsupported Operation: this should be implemented by the ExtraInfoDescriptor subclass')
def _required_fields(self):
return REQUIRED_FIELDS
def _first_keyword(self):
return 'extra-info'
def _last_keyword(self):
return 'router-signature'
class RelayExtraInfoDescriptor(ExtraInfoDescriptor):
"""
Relay extra-info descriptor, constructed from data such as that provided by
'GETINFO extra-info/digest/\*', cached descriptors, and metrics
(`specification <https://gitweb.torproject.org/torspec.git/tree/dir-spec.txt>`_).
:var ed25519_certificate str: base64 encoded ed25519 certificate
:var ed25519_signature str: signature of this document using ed25519
:var str signature: **\*** signature for this extrainfo descriptor
**\*** attribute is required when we're parsed with validation
.. versionchanged:: 1.5.0
Added the ed25519_certificate and ed25519_signature attributes.
"""
ATTRIBUTES = dict(ExtraInfoDescriptor.ATTRIBUTES, **{
'ed25519_certificate': (None, _parse_identity_ed25519_line),
'ed25519_signature': (None, _parse_router_sig_ed25519_line),
'signature': (None, _parse_router_signature_line),
})
PARSER_FOR_LINE = dict(ExtraInfoDescriptor.PARSER_FOR_LINE, **{
'identity-ed25519': _parse_identity_ed25519_line,
'router-sig-ed25519': _parse_router_sig_ed25519_line,
'router-signature': _parse_router_signature_line,
})
@classmethod
def content(cls, attr = None, exclude = (), sign = False, signing_key = None):
base_header = (
('extra-info', '%s %s' % (_random_nickname(), _random_fingerprint())),
('published', _random_date()),
)
if signing_key:
sign = True
if sign:
if attr and 'router-signature' in attr:
raise ValueError('Cannot sign the descriptor if a router-signature has been provided')
if signing_key is None:
signing_key = create_signing_key()
content = _descriptor_content(attr, exclude, base_header) + b'\nrouter-signature\n'
return _append_router_signature(content, signing_key.private)
else:
return _descriptor_content(attr, exclude, base_header, (
('router-signature', _random_crypto_blob('SIGNATURE')),
))
@classmethod
def create(cls, attr = None, exclude = (), validate = True, sign = False, signing_key = None):
return cls(cls.content(attr, exclude, sign, signing_key), validate = validate)
@lru_cache()
def digest(self):
# our digest is calculated from everything except our signature
raw_content, ending = str(self), '\nrouter-signature\n'
raw_content = raw_content[:raw_content.find(ending) + len(ending)]
return hashlib.sha1(stem.util.str_tools._to_bytes(raw_content)).hexdigest().upper()
class BridgeExtraInfoDescriptor(ExtraInfoDescriptor):
"""
Bridge extra-info descriptor (`bridge descriptor specification
<https://metrics.torproject.org/collector.html#bridge-descriptors>`_)
:var str ed25519_certificate_hash: sha256 hash of the original identity-ed25519
:var str router_digest_sha256: sha256 digest of this document
.. versionchanged:: 1.5.0
Added the ed25519_certificate_hash and router_digest_sha256 attributes.
"""
ATTRIBUTES = dict(ExtraInfoDescriptor.ATTRIBUTES, **{
'ed25519_certificate_hash': (None, _parse_master_key_ed25519_line),
'router_digest_sha256': (None, _parse_router_digest_sha256_line),
'_digest': (None, _parse_router_digest_line),
})
PARSER_FOR_LINE = dict(ExtraInfoDescriptor.PARSER_FOR_LINE, **{
'master-key-ed25519': _parse_master_key_ed25519_line,
'router-digest-sha256': _parse_router_digest_sha256_line,
'router-digest': _parse_router_digest_line,
})
@classmethod
def content(cls, attr = None, exclude = (), sign = False):
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
return _descriptor_content(attr, exclude, (
('extra-info', 'ec2bridgereaac65a3 %s' % _random_fingerprint()),
('published', _random_date()),
), (
('router-digest', _random_fingerprint()),
))
def digest(self):
return self._digest
def _required_fields(self):
excluded_fields = [
'router-signature',
]
included_fields = [
'router-digest',
]
return tuple(included_fields + [f for f in REQUIRED_FIELDS if f not in excluded_fields])
def _last_keyword(self):
return None
```
#### File: stem/descriptor/networkstatus.py
```python
import collections
import io
import stem.descriptor.router_status_entry
import stem.util.str_tools
import stem.util.tor_tools
import stem.version
from stem.descriptor import (
PGP_BLOCK_END,
Descriptor,
DocumentHandler,
_descriptor_content,
_descriptor_components,
_read_until_keywords,
_value,
_parse_simple_line,
_parse_if_present,
_parse_timestamp_line,
_parse_forty_character_hex,
_parse_protocol_line,
_parse_key_block,
_random_nickname,
_random_fingerprint,
_random_ipv4_address,
_random_date,
_random_crypto_blob,
)
from stem.descriptor.router_status_entry import (
RouterStatusEntryV2,
RouterStatusEntryV3,
RouterStatusEntryMicroV3,
)
# Version 2 network status document fields, tuples of the form...
# (keyword, is_mandatory)
NETWORK_STATUS_V2_FIELDS = (
('network-status-version', True),
('dir-source', True),
('fingerprint', True),
('contact', True),
('dir-signing-key', True),
('client-versions', False),
('server-versions', False),
('published', True),
('dir-options', False),
('directory-signature', True),
)
# Network status document are either a 'vote' or 'consensus', with different
# mandatory fields for each. Both though require that their fields appear in a
# specific order. This is an ordered listing of the following...
#
# (field, in_votes, in_consensus, is_mandatory)
HEADER_STATUS_DOCUMENT_FIELDS = (
('network-status-version', True, True, True),
('vote-status', True, True, True),
('consensus-methods', True, False, False),
('consensus-method', False, True, False),
('published', True, False, True),
('valid-after', True, True, True),
('fresh-until', True, True, True),
('valid-until', True, True, True),
('voting-delay', True, True, True),
('client-versions', True, True, False),
('server-versions', True, True, False),
('package', True, True, False),
('known-flags', True, True, True),
('flag-thresholds', True, False, False),
('shared-rand-participate', True, False, False),
('shared-rand-commit', True, False, False),
('shared-rand-previous-value', True, True, False),
('shared-rand-current-value', True, True, False),
('recommended-client-protocols', True, True, False),
('recommended-relay-protocols', True, True, False),
('required-client-protocols', True, True, False),
('required-relay-protocols', True, True, False),
('params', True, True, False),
)
FOOTER_STATUS_DOCUMENT_FIELDS = (
('directory-footer', True, True, False),
('bandwidth-weights', False, True, False),
('directory-signature', True, True, True),
)
AUTH_START = 'dir-source'
ROUTERS_START = 'r'
FOOTER_START = 'directory-footer'
V2_FOOTER_START = 'directory-signature'
DEFAULT_PARAMS = {
'bwweightscale': 10000,
'cbtdisabled': 0,
'cbtnummodes': 3,
'cbtrecentcount': 20,
'cbtmaxtimeouts': 18,
'cbtmincircs': 100,
'cbtquantile': 80,
'cbtclosequantile': 95,
'cbttestfreq': 60,
'cbtmintimeout': 2000,
'cbtinitialtimeout': 60000,
'UseOptimisticData': 1,
'Support022HiddenServices': 1,
'usecreatefast': 1,
'max-consensuses-age-to-cache-for-diff': 72,
'try-diff-for-consensus-newer-than': 72,
'onion-key-rotation-days': 28,
'onion-key-grace-period-days': 7,
}
# KeyCertificate fields, tuple is of the form...
# (keyword, is_mandatory)
KEY_CERTIFICATE_PARAMS = (
('dir-key-certificate-version', True),
('dir-address', False),
('fingerprint', True),
('dir-identity-key', True),
('dir-key-published', True),
('dir-key-expires', True),
('dir-signing-key', True),
('dir-key-crosscert', False),
('dir-key-certification', True),
)
# all parameters are constrained to int32 range
MIN_PARAM, MAX_PARAM = -2147483648, 2147483647
PARAM_RANGE = {
'circwindow': (100, 1000),
'CircuitPriorityHalflifeMsec': (-1, MAX_PARAM),
'perconnbwrate': (-1, MAX_PARAM),
'perconnbwburst': (-1, MAX_PARAM),
'refuseunknownexits': (0, 1),
'bwweightscale': (1, MAX_PARAM),
'cbtdisabled': (0, 1),
'cbtnummodes': (1, 20),
'cbtrecentcount': (3, 1000),
'cbtmaxtimeouts': (3, 10000),
'cbtmincircs': (1, 10000),
'cbtquantile': (10, 99),
'cbtclosequantile': (MIN_PARAM, 99),
'cbttestfreq': (1, MAX_PARAM),
'cbtmintimeout': (500, MAX_PARAM),
'UseOptimisticData': (0, 1),
'Support022HiddenServices': (0, 1),
'usecreatefast': (0, 1),
'UseNTorHandshake': (0, 1),
'FastFlagMinThreshold': (4, MAX_PARAM),
'NumDirectoryGuards': (0, 10),
'NumEntryGuards': (1, 10),
'GuardLifetime': (2592000, 157766400), # min: 30 days, max: 1826 days
'NumNTorsPerTAP': (1, 100000),
'AllowNonearlyExtend': (0, 1),
'AuthDirNumSRVAgreements': (1, MAX_PARAM),
'max-consensuses-age-to-cache-for-diff': (0, 8192),
'try-diff-for-consensus-newer-than': (0, 8192),
'onion-key-rotation-days': (1, 90),
'onion-key-grace-period-days': (1, 90), # max is the highest onion-key-rotation-days
}
class PackageVersion(collections.namedtuple('PackageVersion', ['name', 'version', 'url', 'digests'])):
"""
Latest recommended version of a package that's available.
:var str name: name of the package
:var str version: latest recommended version
:var str url: package's url
:var dict digests: mapping of digest types to their value
"""
class SharedRandomnessCommitment(collections.namedtuple('SharedRandomnessCommitment', ['version', 'algorithm', 'identity', 'commit', 'reveal'])):
"""
Directory authority's commitment for generating the next shared random value.
:var int version: shared randomness protocol version
:var str algorithm: hash algorithm used to make the commitment
:var str identity: authority's sha1 identity fingerprint
:var str commit: base64 encoded commitment hash to the shared random value
:var str reveal: base64 encoded commitment to the shared random value,
**None** of not provided
"""
def _parse_file(document_file, document_type = None, validate = False, is_microdescriptor = False, document_handler = DocumentHandler.ENTRIES, **kwargs):
"""
Parses a network status and iterates over the RouterStatusEntry in it. The
document that these instances reference have an empty 'routers' attribute to
allow for limited memory usage.
:param file document_file: file with network status document content
:param class document_type: NetworkStatusDocument subclass
:param bool validate: checks the validity of the document's contents if
**True**, skips these checks otherwise
:param bool is_microdescriptor: **True** if this is for a microdescriptor
consensus, **False** otherwise
:param stem.descriptor.__init__.DocumentHandler document_handler: method in
which to parse :class:`~stem.descriptor.networkstatus.NetworkStatusDocument`
:param dict kwargs: additional arguments for the descriptor constructor
:returns: :class:`stem.descriptor.networkstatus.NetworkStatusDocument` object
:raises:
* **ValueError** if the document_version is unrecognized or the contents is
malformed and validate is **True**
* **IOError** if the file can't be read
"""
# we can't properly default this since NetworkStatusDocumentV3 isn't defined yet
if document_type is None:
document_type = NetworkStatusDocumentV3
if document_type == NetworkStatusDocumentV2:
document_type, router_type = NetworkStatusDocumentV2, RouterStatusEntryV2
elif document_type == NetworkStatusDocumentV3:
router_type = RouterStatusEntryMicroV3 if is_microdescriptor else RouterStatusEntryV3
elif document_type == BridgeNetworkStatusDocument:
document_type, router_type = BridgeNetworkStatusDocument, RouterStatusEntryV2
else:
raise ValueError("Document type %i isn't recognized (only able to parse v2, v3, and bridge)" % document_type)
if document_handler == DocumentHandler.DOCUMENT:
yield document_type(document_file.read(), validate, **kwargs)
return
# getting the document without the routers section
header = _read_until_keywords((ROUTERS_START, FOOTER_START, V2_FOOTER_START), document_file)
if header and header[0].startswith(b'@type'):
header = header[1:]
routers_start = document_file.tell()
_read_until_keywords((FOOTER_START, V2_FOOTER_START), document_file, skip = True)
routers_end = document_file.tell()
footer = document_file.readlines()
document_content = bytes.join(b'', header + footer)
if document_handler == DocumentHandler.BARE_DOCUMENT:
yield document_type(document_content, validate, **kwargs)
elif document_handler == DocumentHandler.ENTRIES:
desc_iterator = stem.descriptor.router_status_entry._parse_file(
document_file,
validate,
entry_class = router_type,
entry_keyword = ROUTERS_START,
start_position = routers_start,
end_position = routers_end,
extra_args = (document_type(document_content, validate),),
**kwargs
)
for desc in desc_iterator:
yield desc
else:
raise ValueError('Unrecognized document_handler: %s' % document_handler)
def _parse_file_key_certs(certificate_file, validate = False):
"""
Parses a file containing one or more authority key certificates.
:param file certificate_file: file with key certificates
:param bool validate: checks the validity of the certificate's contents if
**True**, skips these checks otherwise
:returns: iterator for :class:`stem.descriptor.networkstatus.KeyCertificate`
instance in the file
:raises:
* **ValueError** if the key certificate content is invalid and validate is
**True**
* **IOError** if the file can't be read
"""
while True:
keycert_content = _read_until_keywords('dir-key-certification', certificate_file)
# we've reached the 'router-signature', now include the pgp style block
block_end_prefix = PGP_BLOCK_END.split(' ', 1)[0]
keycert_content += _read_until_keywords(block_end_prefix, certificate_file, True)
if keycert_content:
yield stem.descriptor.networkstatus.KeyCertificate(bytes.join(b'', keycert_content), validate = validate)
else:
break # done parsing file
class NetworkStatusDocument(Descriptor):
"""
Common parent for network status documents.
"""
def _parse_version_line(keyword, attribute, expected_version):
def _parse(descriptor, entries):
value = _value(keyword, entries)
if not value.isdigit():
raise ValueError('Document has a non-numeric version: %s %s' % (keyword, value))
setattr(descriptor, attribute, int(value))
if int(value) != expected_version:
raise ValueError("Expected a version %i document, but got version '%s' instead" % (expected_version, value))
return _parse
def _parse_dir_source_line(descriptor, entries):
value = _value('dir-source', entries)
dir_source_comp = value.split()
if len(dir_source_comp) < 3:
raise ValueError("The 'dir-source' line of a v2 network status document must have three values: dir-source %s" % value)
if not dir_source_comp[0]:
# https://trac.torproject.org/7055
raise ValueError("Authority's hostname can't be blank: dir-source %s" % value)
elif not stem.util.connection.is_valid_ipv4_address(dir_source_comp[1]):
raise ValueError("Authority's address isn't a valid IPv4 address: %s" % dir_source_comp[1])
elif not stem.util.connection.is_valid_port(dir_source_comp[2], allow_zero = True):
raise ValueError("Authority's DirPort is invalid: %s" % dir_source_comp[2])
descriptor.hostname = dir_source_comp[0]
descriptor.address = dir_source_comp[1]
descriptor.dir_port = None if dir_source_comp[2] == '0' else int(dir_source_comp[2])
_parse_network_status_version_line = _parse_version_line('network-status-version', 'version', 2)
_parse_fingerprint_line = _parse_forty_character_hex('fingerprint', 'fingerprint')
_parse_contact_line = _parse_simple_line('contact', 'contact')
_parse_dir_signing_key_line = _parse_key_block('dir-signing-key', 'signing_key', 'RSA PUBLIC KEY')
_parse_client_versions_line = _parse_simple_line('client-versions', 'client_versions', func = lambda v: v.split(','))
_parse_server_versions_line = _parse_simple_line('server-versions', 'server_versions', func = lambda v: v.split(','))
_parse_published_line = _parse_timestamp_line('published', 'published')
_parse_dir_options_line = _parse_simple_line('dir-options', 'options', func = lambda v: v.split())
_parse_directory_signature_line = _parse_key_block('directory-signature', 'signature', 'SIGNATURE', value_attribute = 'signing_authority')
class NetworkStatusDocumentV2(NetworkStatusDocument):
"""
Version 2 network status document. These have been deprecated and are no
longer generated by Tor.
:var dict routers: fingerprints to :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV2`
contained in the document
:var int version: **\*** document version
:var str hostname: **\*** hostname of the authority
:var str address: **\*** authority's IP address
:var int dir_port: **\*** authority's DirPort
:var str fingerprint: **\*** authority's fingerprint
:var str contact: **\*** authority's contact information
:var str signing_key: **\*** authority's public signing key
:var list client_versions: list of recommended client tor version strings
:var list server_versions: list of recommended server tor version strings
:var datetime published: **\*** time when the document was published
:var list options: **\*** list of things that this authority decides
:var str signing_authority: **\*** name of the authority signing the document
:var str signature: **\*** authority's signature for the document
**\*** attribute is either required when we're parsed with validation or has
a default value, others are left as **None** if undefined
"""
ATTRIBUTES = {
'version': (None, _parse_network_status_version_line),
'hostname': (None, _parse_dir_source_line),
'address': (None, _parse_dir_source_line),
'dir_port': (None, _parse_dir_source_line),
'fingerprint': (None, _parse_fingerprint_line),
'contact': (None, _parse_contact_line),
'signing_key': (None, _parse_dir_signing_key_line),
'client_versions': ([], _parse_client_versions_line),
'server_versions': ([], _parse_server_versions_line),
'published': (None, _parse_published_line),
'options': ([], _parse_dir_options_line),
'signing_authority': (None, _parse_directory_signature_line),
'signatures': (None, _parse_directory_signature_line),
}
PARSER_FOR_LINE = {
'network-status-version': _parse_network_status_version_line,
'dir-source': _parse_dir_source_line,
'fingerprint': _parse_fingerprint_line,
'contact': _parse_contact_line,
'dir-signing-key': _parse_dir_signing_key_line,
'client-versions': _parse_client_versions_line,
'server-versions': _parse_server_versions_line,
'published': _parse_published_line,
'dir-options': _parse_dir_options_line,
'directory-signature': _parse_directory_signature_line,
}
@classmethod
def content(cls, attr = None, exclude = (), sign = False):
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
return _descriptor_content(attr, exclude, (
('network-status-version', '2'),
('dir-source', '%s %s 80' % (_random_ipv4_address(), _random_ipv4_address())),
('fingerprint', _random_fingerprint()),
('contact', '<EMAIL>'),
('published', _random_date()),
('dir-signing-key', _random_crypto_blob('RSA PUBLIC KEY')),
), (
('directory-signature', 'moria2' + _random_crypto_blob('SIGNATURE')),
))
def __init__(self, raw_content, validate = False):
super(NetworkStatusDocumentV2, self).__init__(raw_content, lazy_load = not validate)
# Splitting the document from the routers. Unlike v3 documents we're not
# bending over backwards on the validation by checking the field order or
# that header/footer attributes aren't in the wrong section. This is a
# deprecated descriptor type - patches welcome if you want those checks.
document_file = io.BytesIO(raw_content)
document_content = bytes.join(b'', _read_until_keywords((ROUTERS_START, V2_FOOTER_START), document_file))
router_iter = stem.descriptor.router_status_entry._parse_file(
document_file,
validate,
entry_class = RouterStatusEntryV2,
entry_keyword = ROUTERS_START,
section_end_keywords = (V2_FOOTER_START,),
extra_args = (self,),
)
self.routers = dict((desc.fingerprint, desc) for desc in router_iter)
entries = _descriptor_components(document_content + b'\n' + document_file.read(), validate)
if validate:
self._check_constraints(entries)
self._parse(entries, validate)
# 'client-versions' and 'server-versions' are only required if 'Versions'
# is among the options
if 'Versions' in self.options and not ('client-versions' in entries and 'server-versions' in entries):
raise ValueError("Version 2 network status documents must have a 'client-versions' and 'server-versions' when 'Versions' is listed among its dir-options:\n%s" % str(self))
else:
self._entries = entries
def _check_constraints(self, entries):
required_fields = [field for (field, is_mandatory) in NETWORK_STATUS_V2_FIELDS if is_mandatory]
for keyword in required_fields:
if keyword not in entries:
raise ValueError("Network status document (v2) must have a '%s' line:\n%s" % (keyword, str(self)))
# all recognized fields can only appear once
single_fields = [field for (field, _) in NETWORK_STATUS_V2_FIELDS]
for keyword in single_fields:
if keyword in entries and len(entries[keyword]) > 1:
raise ValueError("Network status document (v2) can only have a single '%s' line, got %i:\n%s" % (keyword, len(entries[keyword]), str(self)))
if 'network-status-version' != list(entries.keys())[0]:
raise ValueError("Network status document (v2) are expected to start with a 'network-status-version' line:\n%s" % str(self))
def _parse_header_network_status_version_line(descriptor, entries):
# "network-status-version" version
value = _value('network-status-version', entries)
if ' ' in value:
version, flavor = value.split(' ', 1)
else:
version, flavor = value, None
if not version.isdigit():
raise ValueError('Network status document has a non-numeric version: network-status-version %s' % value)
descriptor.version = int(version)
descriptor.version_flavor = flavor
descriptor.is_microdescriptor = flavor == 'microdesc'
if descriptor.version != 3:
raise ValueError("Expected a version 3 network status document, got version '%s' instead" % descriptor.version)
def _parse_header_vote_status_line(descriptor, entries):
# "vote-status" type
#
# The consensus-method and consensus-methods fields are optional since
# they weren't included in version 1. Setting a default now that we
# know if we're a vote or not.
value = _value('vote-status', entries)
if value == 'consensus':
descriptor.is_consensus, descriptor.is_vote = True, False
elif value == 'vote':
descriptor.is_consensus, descriptor.is_vote = False, True
else:
raise ValueError("A network status document's vote-status line can only be 'consensus' or 'vote', got '%s' instead" % value)
def _parse_header_consensus_methods_line(descriptor, entries):
# "consensus-methods" IntegerList
if descriptor._lazy_loading and descriptor.is_vote:
descriptor.consensus_methods = [1]
value, consensus_methods = _value('consensus-methods', entries), []
for entry in value.split(' '):
if not entry.isdigit():
raise ValueError("A network status document's consensus-methods must be a list of integer values, but was '%s'" % value)
consensus_methods.append(int(entry))
descriptor.consensus_methods = consensus_methods
def _parse_header_consensus_method_line(descriptor, entries):
# "consensus-method" Integer
if descriptor._lazy_loading and descriptor.is_consensus:
descriptor.consensus_method = 1
value = _value('consensus-method', entries)
if not value.isdigit():
raise ValueError("A network status document's consensus-method must be an integer, but was '%s'" % value)
descriptor.consensus_method = int(value)
def _parse_header_voting_delay_line(descriptor, entries):
# "voting-delay" VoteSeconds DistSeconds
value = _value('voting-delay', entries)
value_comp = value.split(' ')
if len(value_comp) == 2 and value_comp[0].isdigit() and value_comp[1].isdigit():
descriptor.vote_delay = int(value_comp[0])
descriptor.dist_delay = int(value_comp[1])
else:
raise ValueError("A network status document's 'voting-delay' line must be a pair of integer values, but was '%s'" % value)
def _parse_versions_line(keyword, attribute):
def _parse(descriptor, entries):
value, entries = _value(keyword, entries), []
for entry in value.split(','):
try:
entries.append(stem.version._get_version(entry))
except ValueError:
raise ValueError("Network status document's '%s' line had '%s', which isn't a parsable tor version: %s %s" % (keyword, entry, keyword, value))
setattr(descriptor, attribute, entries)
return _parse
def _parse_header_flag_thresholds_line(descriptor, entries):
# "flag-thresholds" SP THRESHOLDS
value, thresholds = _value('flag-thresholds', entries).strip(), {}
if value:
for entry in value.split(' '):
if '=' not in entry:
raise ValueError("Network status document's 'flag-thresholds' line is expected to be space separated key=value mappings, got: flag-thresholds %s" % value)
entry_key, entry_value = entry.split('=', 1)
try:
if entry_value.endswith('%'):
# opting for string manipulation rather than just
# 'float(entry_value) / 100' because floating point arithmetic
# will lose precision
thresholds[entry_key] = float('0.' + entry_value[:-1].replace('.', '', 1))
elif '.' in entry_value:
thresholds[entry_key] = float(entry_value)
else:
thresholds[entry_key] = int(entry_value)
except ValueError:
raise ValueError("Network status document's 'flag-thresholds' line is expected to have float values, got: flag-thresholds %s" % value)
descriptor.flag_thresholds = thresholds
def _parse_header_parameters_line(descriptor, entries):
# "params" [Parameters]
# Parameter ::= Keyword '=' Int32
# Int32 ::= A decimal integer between -2147483648 and 2147483647.
# Parameters ::= Parameter | Parameters SP Parameter
if descriptor._lazy_loading:
descriptor.params = dict(DEFAULT_PARAMS) if descriptor._default_params else {}
value = _value('params', entries)
if value != '':
descriptor.params = _parse_int_mappings('params', value, True)
descriptor._check_params_constraints()
def _parse_directory_footer_line(descriptor, entries):
# nothing to parse, simply checking that we don't have a value
value = _value('directory-footer', entries)
if value:
raise ValueError("A network status document's 'directory-footer' line shouldn't have any content, got 'directory-footer %s'" % value)
def _parse_footer_directory_signature_line(descriptor, entries):
signatures = []
for sig_value, block_type, block_contents in entries['directory-signature']:
if sig_value.count(' ') not in (1, 2):
raise ValueError("Authority signatures in a network status document are expected to be of the form 'directory-signature [METHOD] FINGERPRINT KEY_DIGEST', received: %s" % sig_value)
if not block_contents or block_type != 'SIGNATURE':
raise ValueError("'directory-signature' should be followed by a SIGNATURE block, but was a %s" % block_type)
if sig_value.count(' ') == 1:
method = 'sha1' # default if none was provided
fingerprint, key_digest = sig_value.split(' ', 1)
else:
method, fingerprint, key_digest = sig_value.split(' ', 2)
signatures.append(DocumentSignature(method, fingerprint, key_digest, block_contents, True))
descriptor.signatures = signatures
def _parse_package_line(descriptor, entries):
package_versions = []
for value, _, _ in entries['package']:
value_comp = value.split()
if len(value_comp) < 3:
raise ValueError("'package' must at least have a 'PackageName Version URL': %s" % value)
name, version, url = value_comp[:3]
digests = {}
for digest_entry in value_comp[3:]:
if '=' not in digest_entry:
raise ValueError("'package' digest entries should be 'key=value' pairs: %s" % value)
key, value = digest_entry.split('=', 1)
digests[key] = value
package_versions.append(PackageVersion(name, version, url, digests))
descriptor.packages = package_versions
def _parsed_shared_rand_commit(descriptor, entries):
# "shared-rand-commit" Version AlgName Identity Commit [Reveal]
commitments = []
for value, _, _ in entries['shared-rand-commit']:
value_comp = value.split()
if len(value_comp) < 4:
raise ValueError("'shared-rand-commit' must at least have a 'Version AlgName Identity Commit': %s" % value)
version, algorithm, identity, commit = value_comp[:4]
reveal = value_comp[4] if len(value_comp) >= 5 else None
if not version.isdigit():
raise ValueError("The version on our 'shared-rand-commit' line wasn't an integer: %s" % value)
commitments.append(SharedRandomnessCommitment(int(version), algorithm, identity, commit, reveal))
descriptor.shared_randomness_commitments = commitments
def _parse_shared_rand_previous_value(descriptor, entries):
# "shared-rand-previous-value" NumReveals Value
value = _value('shared-rand-previous-value', entries)
value_comp = value.split(' ')
if len(value_comp) == 2 and value_comp[0].isdigit():
descriptor.shared_randomness_previous_reveal_count = int(value_comp[0])
descriptor.shared_randomness_previous_value = value_comp[1]
else:
raise ValueError("A network status document's 'shared-rand-previous-value' line must be a pair of values, the first an integer but was '%s'" % value)
def _parse_shared_rand_current_value(descriptor, entries):
# "shared-rand-current-value" NumReveals Value
value = _value('shared-rand-current-value', entries)
value_comp = value.split(' ')
if len(value_comp) == 2 and value_comp[0].isdigit():
descriptor.shared_randomness_current_reveal_count = int(value_comp[0])
descriptor.shared_randomness_current_value = value_comp[1]
else:
raise ValueError("A network status document's 'shared-rand-current-value' line must be a pair of values, the first an integer but was '%s'" % value)
_parse_header_valid_after_line = _parse_timestamp_line('valid-after', 'valid_after')
_parse_header_fresh_until_line = _parse_timestamp_line('fresh-until', 'fresh_until')
_parse_header_valid_until_line = _parse_timestamp_line('valid-until', 'valid_until')
_parse_header_client_versions_line = _parse_versions_line('client-versions', 'client_versions')
_parse_header_server_versions_line = _parse_versions_line('server-versions', 'server_versions')
_parse_header_known_flags_line = _parse_simple_line('known-flags', 'known_flags', func = lambda v: [entry for entry in v.split(' ') if entry])
_parse_footer_bandwidth_weights_line = _parse_simple_line('bandwidth-weights', 'bandwidth_weights', func = lambda v: _parse_int_mappings('bandwidth-weights', v, True))
_parse_shared_rand_participate_line = _parse_if_present('shared-rand-participate', 'is_shared_randomness_participate')
_parse_recommended_client_protocols_line = _parse_protocol_line('recommended-client-protocols', 'recommended_client_protocols')
_parse_recommended_relay_protocols_line = _parse_protocol_line('recommended-relay-protocols', 'recommended_relay_protocols')
_parse_required_client_protocols_line = _parse_protocol_line('required-client-protocols', 'required_client_protocols')
_parse_required_relay_protocols_line = _parse_protocol_line('required-relay-protocols', 'required_relay_protocols')
class NetworkStatusDocumentV3(NetworkStatusDocument):
"""
Version 3 network status document. This could be either a vote or consensus.
:var dict routers: fingerprint to :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3`
mapping for relays contained in the document
:var int version: **\*** document version
:var str version_flavor: **\*** flavor associated with the document (such as 'microdesc')
:var bool is_consensus: **\*** **True** if the document is a consensus
:var bool is_vote: **\*** **True** if the document is a vote
:var bool is_microdescriptor: **\*** **True** if this is a microdescriptor
flavored document, **False** otherwise
:var datetime valid_after: **\*** time when the consensus became valid
:var datetime fresh_until: **\*** time when the next consensus should be produced
:var datetime valid_until: **\*** time when this consensus becomes obsolete
:var int vote_delay: **\*** number of seconds allowed for collecting votes
from all authorities
:var int dist_delay: **\*** number of seconds allowed for collecting
signatures from all authorities
:var list client_versions: list of recommended client tor versions
:var list server_versions: list of recommended server tor versions
:var list packages: **\*** list of :data:`~stem.descriptor.networkstatus.PackageVersion` entries
:var list known_flags: **\*** list of :data:`~stem.Flag` for the router's flags
:var dict params: **\*** dict of parameter(**str**) => value(**int**) mappings
:var list directory_authorities: **\*** list of :class:`~stem.descriptor.networkstatus.DirectoryAuthority`
objects that have generated this document
:var list signatures: **\*** :class:`~stem.descriptor.networkstatus.DocumentSignature`
of the authorities that have signed the document
**Consensus Attributes:**
:var int consensus_method: method version used to generate this consensus
:var dict bandwidth_weights: dict of weight(str) => value(int) mappings
:var str shared_randomness_current_value: base64 encoded current shared
random value
:var str shared_randomness_previous_value: base64 encoded last shared random
value
**Vote Attributes:**
:var list consensus_methods: list of ints for the supported method versions
:var datetime published: time when the document was published
:var dict flag_thresholds: **\*** mapping of internal performance thresholds used while making the vote, values are **ints** or **floats**
:var dict recommended_client_protocols: recommended protocols for clients
:var dict recommended_relay_protocols: recommended protocols for relays
:var dict required_client_protocols: required protocols for clients
:var dict required_relay_protocols: required protocols for relays
**\*** attribute is either required when we're parsed with validation or has
a default value, others are left as None if undefined
.. versionchanged:: 1.4.0
Added the packages attribute.
.. versionchanged:: 1.5.0
Added the is_shared_randomness_participate, shared_randomness_commitments,
shared_randomness_previous_reveal_count,
shared_randomness_previous_value,
shared_randomness_current_reveal_count, and
shared_randomness_current_value attributes.
.. versionchanged:: 1.6.0
Added the recommended_client_protocols, recommended_relay_protocols,
required_client_protocols, and required_relay_protocols.
.. versionchanged:: 1.6.0
The shared randomness attributes were misdocumented in the tor spec and as
such never set. They're now an attribute of **directory_authorities**.
"""
ATTRIBUTES = {
'version': (None, _parse_header_network_status_version_line),
'version_flavor': (None, _parse_header_network_status_version_line),
'is_consensus': (True, _parse_header_vote_status_line),
'is_vote': (False, _parse_header_vote_status_line),
'is_microdescriptor': (False, _parse_header_network_status_version_line),
'consensus_methods': ([], _parse_header_consensus_methods_line),
'published': (None, _parse_published_line),
'consensus_method': (None, _parse_header_consensus_method_line),
'valid_after': (None, _parse_header_valid_after_line),
'fresh_until': (None, _parse_header_fresh_until_line),
'valid_until': (None, _parse_header_valid_until_line),
'vote_delay': (None, _parse_header_voting_delay_line),
'dist_delay': (None, _parse_header_voting_delay_line),
'client_versions': ([], _parse_header_client_versions_line),
'server_versions': ([], _parse_header_server_versions_line),
'packages': ([], _parse_package_line),
'known_flags': ([], _parse_header_known_flags_line),
'flag_thresholds': ({}, _parse_header_flag_thresholds_line),
'recommended_client_protocols': ({}, _parse_recommended_client_protocols_line),
'recommended_relay_protocols': ({}, _parse_recommended_relay_protocols_line),
'required_client_protocols': ({}, _parse_required_client_protocols_line),
'required_relay_protocols': ({}, _parse_required_relay_protocols_line),
'params': ({}, _parse_header_parameters_line),
'shared_randomness_previous_value': (None, _parse_shared_rand_previous_value),
'shared_randomness_current_value': (None, _parse_shared_rand_current_value),
'signatures': ([], _parse_footer_directory_signature_line),
'bandwidth_weights': ({}, _parse_footer_bandwidth_weights_line),
}
HEADER_PARSER_FOR_LINE = {
'network-status-version': _parse_header_network_status_version_line,
'vote-status': _parse_header_vote_status_line,
'consensus-methods': _parse_header_consensus_methods_line,
'consensus-method': _parse_header_consensus_method_line,
'published': _parse_published_line,
'valid-after': _parse_header_valid_after_line,
'fresh-until': _parse_header_fresh_until_line,
'valid-until': _parse_header_valid_until_line,
'voting-delay': _parse_header_voting_delay_line,
'client-versions': _parse_header_client_versions_line,
'server-versions': _parse_header_server_versions_line,
'package': _parse_package_line,
'known-flags': _parse_header_known_flags_line,
'flag-thresholds': _parse_header_flag_thresholds_line,
'recommended-client-protocols': _parse_recommended_client_protocols_line,
'recommended-relay-protocols': _parse_recommended_relay_protocols_line,
'required-client-protocols': _parse_required_client_protocols_line,
'required-relay-protocols': _parse_required_relay_protocols_line,
'params': _parse_header_parameters_line,
'shared-rand-previous-value': _parse_shared_rand_previous_value,
'shared-rand-current-value': _parse_shared_rand_current_value,
}
FOOTER_PARSER_FOR_LINE = {
'directory-footer': _parse_directory_footer_line,
'bandwidth-weights': _parse_footer_bandwidth_weights_line,
'directory-signature': _parse_footer_directory_signature_line,
}
@classmethod
def content(cls, attr = None, exclude = (), sign = False, authorities = None, routers = None):
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
attr = {} if attr is None else dict(attr)
is_vote = attr.get('vote-status') == 'vote'
if is_vote:
extra_defaults = {'consensus-methods': '1 9', 'published': _random_date()}
else:
extra_defaults = {'consensus-method': '9'}
if is_vote and authorities is None:
authorities = [DirectoryAuthority.create(is_vote = is_vote)]
for k, v in extra_defaults.items():
if exclude and k in exclude:
continue # explicitly excluding this field
elif k not in attr:
attr[k] = v
desc_content = _descriptor_content(attr, exclude, (
('network-status-version', '3'),
('vote-status', 'consensus'),
('consensus-methods', None),
('consensus-method', None),
('published', None),
('valid-after', _random_date()),
('fresh-until', _random_date()),
('valid-until', _random_date()),
('voting-delay', '300 300'),
('client-versions', None),
('server-versions', None),
('package', None),
('known-flags', 'Authority BadExit Exit Fast Guard HSDir Named Running Stable Unnamed V2Dir Valid'),
('params', None),
), (
('directory-footer', ''),
('bandwidth-weights', None),
('directory-signature', '%s %s%s' % (_random_fingerprint(), _random_fingerprint(), _random_crypto_blob('SIGNATURE'))),
))
# inject the authorities and/or routers between the header and footer
if authorities:
if b'directory-footer' in desc_content:
footer_div = desc_content.find(b'\ndirectory-footer') + 1
elif b'directory-signature' in desc_content:
footer_div = desc_content.find(b'\ndirectory-signature') + 1
else:
if routers:
desc_content += b'\n'
footer_div = len(desc_content) + 1
authority_content = stem.util.str_tools._to_bytes('\n'.join([str(a) for a in authorities]) + '\n')
desc_content = desc_content[:footer_div] + authority_content + desc_content[footer_div:]
if routers:
if b'directory-footer' in desc_content:
footer_div = desc_content.find(b'\ndirectory-footer') + 1
elif b'directory-signature' in desc_content:
footer_div = desc_content.find(b'\ndirectory-signature') + 1
else:
if routers:
desc_content += b'\n'
footer_div = len(desc_content) + 1
router_content = stem.util.str_tools._to_bytes('\n'.join([str(r) for r in routers]) + '\n')
desc_content = desc_content[:footer_div] + router_content + desc_content[footer_div:]
return desc_content
@classmethod
def create(cls, attr = None, exclude = (), validate = True, sign = False, authorities = None, routers = None):
return cls(cls.content(attr, exclude, sign, authorities, routers), validate = validate)
def __init__(self, raw_content, validate = False, default_params = True):
"""
Parse a v3 network status document.
:param str raw_content: raw network status document data
:param bool validate: **True** if the document is to be validated, **False** otherwise
:param bool default_params: includes defaults in our params dict, otherwise
it just contains values from the document
:raises: **ValueError** if the document is invalid
"""
super(NetworkStatusDocumentV3, self).__init__(raw_content, lazy_load = not validate)
document_file = io.BytesIO(raw_content)
# TODO: Tor misdocumented these as being in the header rather than the
# authority section. As such these have never been set but we need the
# attributes for stem 1.5 compatability. Drop these in 2.0.
self.is_shared_randomness_participate = False
self.shared_randomness_commitments = []
self.shared_randomness_previous_reveal_count = None
self.shared_randomness_current_reveal_count = None
self._default_params = default_params
self._header(document_file, validate)
self.directory_authorities = tuple(stem.descriptor.router_status_entry._parse_file(
document_file,
validate,
entry_class = DirectoryAuthority,
entry_keyword = AUTH_START,
section_end_keywords = (ROUTERS_START, FOOTER_START, V2_FOOTER_START),
extra_args = (self.is_vote,),
))
if validate and self.is_vote and len(self.directory_authorities) != 1:
raise ValueError('Votes should only have an authority entry for the one that issued it, got %i: %s' % (len(self.directory_authorities), self.directory_authorities))
router_iter = stem.descriptor.router_status_entry._parse_file(
document_file,
validate,
entry_class = RouterStatusEntryMicroV3 if self.is_microdescriptor else RouterStatusEntryV3,
entry_keyword = ROUTERS_START,
section_end_keywords = (FOOTER_START, V2_FOOTER_START),
extra_args = (self,),
)
self.routers = dict((desc.fingerprint, desc) for desc in router_iter)
self._footer(document_file, validate)
def validate_signatures(self, key_certs):
"""
Validates we're properly signed by the signing certificates.
.. versionadded:: 1.6.0
:param list key_certs: :class:`~stem.descriptor.networkstatus.KeyCertificates`
to validate the consensus against
:raises: **ValueError** if an insufficient number of valid signatures are present.
"""
# sha1 hash of the body and header
local_digest = self._digest_for_content(b'network-status-version', b'directory-signature ')
valid_digests, total_digests = 0, 0
required_digests = len(self.signatures) / 2.0
signing_keys = dict([(cert.fingerprint, cert.signing_key) for cert in key_certs])
for sig in self.signatures:
if sig.identity not in signing_keys:
continue
signed_digest = self._digest_for_signature(signing_keys[sig.identity], sig.signature)
total_digests += 1
if signed_digest == local_digest:
valid_digests += 1
if valid_digests < required_digests:
raise ValueError('Network Status Document has %i valid signatures out of %i total, needed %i' % (valid_digests, total_digests, required_digests))
def get_unrecognized_lines(self):
if self._lazy_loading:
self._parse(self._header_entries, False, parser_for_line = self.HEADER_PARSER_FOR_LINE)
self._parse(self._footer_entries, False, parser_for_line = self.FOOTER_PARSER_FOR_LINE)
self._lazy_loading = False
return super(NetworkStatusDocumentV3, self).get_unrecognized_lines()
def meets_consensus_method(self, method):
"""
Checks if we meet the given consensus-method. This works for both votes and
consensuses, checking our 'consensus-method' and 'consensus-methods'
entries.
:param int method: consensus-method to check for
:returns: **True** if we meet the given consensus-method, and **False** otherwise
"""
if self.consensus_method is not None:
return self.consensus_method >= method
elif self.consensus_methods is not None:
return bool([x for x in self.consensus_methods if x >= method])
else:
return False # malformed document
def _compare(self, other, method):
if not isinstance(other, NetworkStatusDocumentV3):
return False
return method(str(self).strip(), str(other).strip())
def _header(self, document_file, validate):
content = bytes.join(b'', _read_until_keywords((AUTH_START, ROUTERS_START, FOOTER_START), document_file))
entries = _descriptor_components(content, validate)
header_fields = [attr[0] for attr in HEADER_STATUS_DOCUMENT_FIELDS]
if validate:
# all known header fields can only appear once except
for keyword, values in list(entries.items()):
if len(values) > 1 and keyword in header_fields and keyword != 'package' and keyword != 'shared-rand-commit':
raise ValueError("Network status documents can only have a single '%s' line, got %i" % (keyword, len(values)))
if self._default_params:
self.params = dict(DEFAULT_PARAMS)
self._parse(entries, validate, parser_for_line = self.HEADER_PARSER_FOR_LINE)
# should only appear in consensus-method 7 or later
if not self.meets_consensus_method(7) and 'params' in list(entries.keys()):
raise ValueError("A network status document's 'params' line should only appear in consensus-method 7 or later")
_check_for_missing_and_disallowed_fields(self, entries, HEADER_STATUS_DOCUMENT_FIELDS)
# default consensus_method and consensus_methods based on if we're a consensus or vote
if self.is_consensus and not self.consensus_method:
self.consensus_method = 1
elif self.is_vote and not self.consensus_methods:
self.consensus_methods = [1]
else:
self._header_entries = entries
self._entries.update(entries)
def _footer(self, document_file, validate):
entries = _descriptor_components(document_file.read(), validate)
footer_fields = [attr[0] for attr in FOOTER_STATUS_DOCUMENT_FIELDS]
if validate:
for keyword, values in list(entries.items()):
# all known footer fields can only appear once except...
# * 'directory-signature' in a consensus
if len(values) > 1 and keyword in footer_fields:
if not (keyword == 'directory-signature' and self.is_consensus):
raise ValueError("Network status documents can only have a single '%s' line, got %i" % (keyword, len(values)))
self._parse(entries, validate, parser_for_line = self.FOOTER_PARSER_FOR_LINE)
# Check that the footer has the right initial line. Prior to consensus
# method 9 it's a 'directory-signature' and after that footers start with
# 'directory-footer'.
if entries:
if self.meets_consensus_method(9):
if list(entries.keys())[0] != 'directory-footer':
raise ValueError("Network status document's footer should start with a 'directory-footer' line in consensus-method 9 or later")
else:
if list(entries.keys())[0] != 'directory-signature':
raise ValueError("Network status document's footer should start with a 'directory-signature' line prior to consensus-method 9")
_check_for_missing_and_disallowed_fields(self, entries, FOOTER_STATUS_DOCUMENT_FIELDS)
else:
self._footer_entries = entries
self._entries.update(entries)
def _check_params_constraints(self):
"""
Checks that the params we know about are within their documented ranges.
"""
for key, value in self.params.items():
minimum, maximum = PARAM_RANGE.get(key, (MIN_PARAM, MAX_PARAM))
# there's a few dynamic parameter ranges
if key == 'cbtclosequantile':
minimum = self.params.get('cbtquantile', minimum)
elif key == 'cbtinitialtimeout':
minimum = self.params.get('cbtmintimeout', minimum)
if value < minimum or value > maximum:
raise ValueError("'%s' value on the params line must be in the range of %i - %i, was %i" % (key, minimum, maximum, value))
def __hash__(self):
return hash(str(self).strip())
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def _check_for_missing_and_disallowed_fields(document, entries, fields):
"""
Checks that we have mandatory fields for our type, and that we don't have
any fields exclusive to the other (ie, no vote-only fields appear in a
consensus or vice versa).
:param NetworkStatusDocumentV3 document: network status document
:param dict entries: ordered keyword/value mappings of the header or footer
:param list fields: expected field attributes (either
**HEADER_STATUS_DOCUMENT_FIELDS** or **FOOTER_STATUS_DOCUMENT_FIELDS**)
:raises: **ValueError** if we're missing mandatory fields or have fields we shouldn't
"""
missing_fields, disallowed_fields = [], []
for field, in_votes, in_consensus, mandatory in fields:
if mandatory and ((document.is_consensus and in_consensus) or (document.is_vote and in_votes)):
# mandatory field, check that we have it
if field not in entries.keys():
missing_fields.append(field)
elif (document.is_consensus and not in_consensus) or (document.is_vote and not in_votes):
# field we shouldn't have, check that we don't
if field in entries.keys():
disallowed_fields.append(field)
if missing_fields:
raise ValueError('Network status document is missing mandatory field: %s' % ', '.join(missing_fields))
if disallowed_fields:
raise ValueError("Network status document has fields that shouldn't appear in this document type or version: %s" % ', '.join(disallowed_fields))
def _parse_int_mappings(keyword, value, validate):
# Parse a series of 'key=value' entries, checking the following:
# - values are integers
# - keys are sorted in lexical order
results, seen_keys = {}, []
for entry in value.split(' '):
try:
if '=' not in entry:
raise ValueError("must only have 'key=value' entries")
entry_key, entry_value = entry.split('=', 1)
try:
# the int() function accepts things like '+123', but we don't want to
if entry_value.startswith('+'):
raise ValueError()
entry_value = int(entry_value)
except ValueError:
raise ValueError("'%s' is a non-numeric value" % entry_value)
if validate:
# parameters should be in ascending order by their key
for prior_key in seen_keys:
if prior_key > entry_key:
raise ValueError('parameters must be sorted by their key')
results[entry_key] = entry_value
seen_keys.append(entry_key)
except ValueError as exc:
if not validate:
continue
raise ValueError("Unable to parse network status document's '%s' line (%s): %s'" % (keyword, exc, value))
return results
def _parse_dirauth_source_line(descriptor, entries):
# "dir-source" nickname identity address IP dirport orport
value = _value('dir-source', entries)
dir_source_comp = value.split(' ')
if len(dir_source_comp) < 6:
raise ValueError("Authority entry's 'dir-source' line must have six values: dir-source %s" % value)
if not stem.util.tor_tools.is_valid_nickname(dir_source_comp[0].rstrip('-legacy')):
raise ValueError("Authority's nickname is invalid: %s" % dir_source_comp[0])
elif not stem.util.tor_tools.is_valid_fingerprint(dir_source_comp[1]):
raise ValueError("Authority's v3ident is invalid: %s" % dir_source_comp[1])
elif not dir_source_comp[2]:
# https://trac.torproject.org/7055
raise ValueError("Authority's hostname can't be blank: dir-source %s" % value)
elif not stem.util.connection.is_valid_ipv4_address(dir_source_comp[3]):
raise ValueError("Authority's address isn't a valid IPv4 address: %s" % dir_source_comp[3])
elif not stem.util.connection.is_valid_port(dir_source_comp[4], allow_zero = True):
raise ValueError("Authority's DirPort is invalid: %s" % dir_source_comp[4])
elif not stem.util.connection.is_valid_port(dir_source_comp[5]):
raise ValueError("Authority's ORPort is invalid: %s" % dir_source_comp[5])
descriptor.nickname = dir_source_comp[0]
descriptor.v3ident = dir_source_comp[1]
descriptor.hostname = dir_source_comp[2]
descriptor.address = dir_source_comp[3]
descriptor.dir_port = None if dir_source_comp[4] == '0' else int(dir_source_comp[4])
descriptor.or_port = int(dir_source_comp[5])
descriptor.is_legacy = descriptor.nickname.endswith('-legacy')
_parse_legacy_dir_key_line = _parse_forty_character_hex('legacy-dir-key', 'legacy_dir_key')
_parse_vote_digest_line = _parse_forty_character_hex('vote-digest', 'vote_digest')
class DirectoryAuthority(Descriptor):
"""
Directory authority information obtained from a v3 network status document.
Authorities can optionally use a legacy format. These are no longer found in
practice, but have the following differences...
* The authority's nickname ends with '-legacy'.
* There's no **contact** or **vote_digest** attribute.
:var str nickname: **\*** authority's nickname
:var str v3ident: **\*** identity key fingerprint used to sign votes and consensus
:var str hostname: **\*** hostname of the authority
:var str address: **\*** authority's IP address
:var int dir_port: **\*** authority's DirPort
:var int or_port: **\*** authority's ORPort
:var bool is_legacy: **\*** if the authority's using the legacy format
:var str contact: contact information, this is included if is_legacy is **False**
**Consensus Attributes:**
:var str vote_digest: digest of the authority that contributed to the consensus, this is included if is_legacy is **False**
**Vote Attributes:**
:var str legacy_dir_key: fingerprint of and obsolete identity key
:var stem.descriptor.networkstatus.KeyCertificate key_certificate: **\***
authority's key certificate
:var bool is_shared_randomness_participate: **\*** **True** if this authority
participates in establishing a shared random value, **False** otherwise
:var list shared_randomness_commitments: **\*** list of
:data:`~stem.descriptor.networkstatus.SharedRandomnessCommitment` entries
:var int shared_randomness_previous_reveal_count: number of commitments
used to generate the last shared random value
:var str shared_randomness_previous_value: base64 encoded last shared random
value
:var int shared_randomness_current_reveal_count: number of commitments
used to generate the current shared random value
:var str shared_randomness_current_value: base64 encoded current shared
random value
**\*** mandatory attribute
.. versionchanged:: 1.4.0
Renamed our 'fingerprint' attribute to 'v3ident' (prior attribute exists
for backward compatability, but is deprecated).
.. versionchanged:: 1.6.0
Added the is_shared_randomness_participate, shared_randomness_commitments,
shared_randomness_previous_reveal_count,
shared_randomness_previous_value,
shared_randomness_current_reveal_count, and
shared_randomness_current_value attributes.
"""
ATTRIBUTES = {
'nickname': (None, _parse_dirauth_source_line),
'v3ident': (None, _parse_dirauth_source_line),
'hostname': (None, _parse_dirauth_source_line),
'address': (None, _parse_dirauth_source_line),
'dir_port': (None, _parse_dirauth_source_line),
'or_port': (None, _parse_dirauth_source_line),
'is_legacy': (False, _parse_dirauth_source_line),
'contact': (None, _parse_contact_line),
'vote_digest': (None, _parse_vote_digest_line),
'legacy_dir_key': (None, _parse_legacy_dir_key_line),
'is_shared_randomness_participate': (False, _parse_shared_rand_participate_line),
'shared_randomness_commitments': ([], _parsed_shared_rand_commit),
'shared_randomness_previous_reveal_count': (None, _parse_shared_rand_previous_value),
'shared_randomness_previous_value': (None, _parse_shared_rand_previous_value),
'shared_randomness_current_reveal_count': (None, _parse_shared_rand_current_value),
'shared_randomness_current_value': (None, _parse_shared_rand_current_value),
}
PARSER_FOR_LINE = {
'dir-source': _parse_dirauth_source_line,
'contact': _parse_contact_line,
'legacy-dir-key': _parse_legacy_dir_key_line,
'vote-digest': _parse_vote_digest_line,
'shared-rand-participate': _parse_shared_rand_participate_line,
'shared-rand-commit': _parsed_shared_rand_commit,
'shared-rand-previous-value': _parse_shared_rand_previous_value,
'shared-rand-current-value': _parse_shared_rand_current_value,
}
@classmethod
def content(cls, attr = None, exclude = (), sign = False, is_vote = False):
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
attr = {} if attr is None else dict(attr)
# include mandatory 'vote-digest' if a consensus
if not is_vote and not ('vote-digest' in attr or (exclude and 'vote-digest' in exclude)):
attr['vote-digest'] = _random_fingerprint()
content = _descriptor_content(attr, exclude, (
('dir-source', '%s %s no.place.com %s 9030 9090' % (_random_nickname(), _random_fingerprint(), _random_ipv4_address())),
('contact', '<NAME> <email>'),
))
if is_vote:
content += b'\n' + KeyCertificate.content()
return content
@classmethod
def create(cls, attr = None, exclude = (), validate = True, sign = False, is_vote = False):
return cls(cls.content(attr, exclude, sign, is_vote), validate = validate, is_vote = is_vote)
def __init__(self, raw_content, validate = False, is_vote = False):
"""
Parse a directory authority entry in a v3 network status document.
:param str raw_content: raw directory authority entry information
:param bool validate: checks the validity of the content if True, skips
these checks otherwise
:param bool is_vote: True if this is for a vote, False if it's for a consensus
:raises: ValueError if the descriptor data is invalid
"""
super(DirectoryAuthority, self).__init__(raw_content, lazy_load = not validate)
content = stem.util.str_tools._to_unicode(raw_content)
# separate the directory authority entry from its key certificate
key_div = content.find('\ndir-key-certificate-version')
if key_div != -1:
self.key_certificate = KeyCertificate(content[key_div + 1:], validate)
content = content[:key_div + 1]
else:
self.key_certificate = None
entries = _descriptor_components(content, validate)
if validate and 'dir-source' != list(entries.keys())[0]:
raise ValueError("Authority entries are expected to start with a 'dir-source' line:\n%s" % (content))
# check that we have mandatory fields
if validate:
is_legacy, dir_source_entry = False, entries.get('dir-source')
if dir_source_entry:
is_legacy = dir_source_entry[0][0].split()[0].endswith('-legacy')
required_fields, excluded_fields = ['dir-source'], []
if not is_legacy:
required_fields += ['contact']
if is_vote:
if not self.key_certificate:
raise ValueError('Authority votes must have a key certificate:\n%s' % content)
excluded_fields += ['vote-digest']
elif not is_vote:
if self.key_certificate:
raise ValueError("Authority consensus entries shouldn't have a key certificate:\n%s" % content)
if not is_legacy:
required_fields += ['vote-digest']
excluded_fields += ['legacy-dir-key']
for keyword in required_fields:
if keyword not in entries:
raise ValueError("Authority entries must have a '%s' line:\n%s" % (keyword, content))
for keyword in entries:
if keyword in excluded_fields:
type_label = 'votes' if is_vote else 'consensus entries'
raise ValueError("Authority %s shouldn't have a '%s' line:\n%s" % (type_label, keyword, content))
# all known attributes can only appear at most once
for keyword, values in list(entries.items()):
if len(values) > 1 and keyword in ('dir-source', 'contact', 'legacy-dir-key', 'vote-digest'):
raise ValueError("Authority entries can only have a single '%s' line, got %i:\n%s" % (keyword, len(values), content))
self._parse(entries, validate)
else:
self._entries = entries
# TODO: Due to a bug we had a 'fingerprint' rather than 'v3ident' attribute
# for a long while. Keeping this around for backward compatability, but
# this will be dropped in stem's 2.0 release.
self.fingerprint = self.v3ident
def _compare(self, other, method):
if not isinstance(other, DirectoryAuthority):
return False
return method(str(self).strip(), str(other).strip())
def __hash__(self):
return hash(str(self).strip())
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def _parse_dir_address_line(descriptor, entries):
# "dir-address" IPPort
value = _value('dir-address', entries)
if ':' not in value:
raise ValueError("Key certificate's 'dir-address' is expected to be of the form ADDRESS:PORT: dir-address %s" % value)
address, dirport = value.rsplit(':', 1)
if not stem.util.connection.is_valid_ipv4_address(address):
raise ValueError("Key certificate's address isn't a valid IPv4 address: dir-address %s" % value)
elif not stem.util.connection.is_valid_port(dirport):
raise ValueError("Key certificate's dirport is invalid: dir-address %s" % value)
descriptor.address = address
descriptor.dir_port = int(dirport)
_parse_dir_key_certificate_version_line = _parse_version_line('dir-key-certificate-version', 'version', 3)
_parse_dir_key_published_line = _parse_timestamp_line('dir-key-published', 'published')
_parse_dir_key_expires_line = _parse_timestamp_line('dir-key-expires', 'expires')
_parse_identity_key_line = _parse_key_block('dir-identity-key', 'identity_key', 'RSA PUBLIC KEY')
_parse_signing_key_line = _parse_key_block('dir-signing-key', 'signing_key', 'RSA PUBLIC KEY')
_parse_dir_key_crosscert_line = _parse_key_block('dir-key-crosscert', 'crosscert', 'ID SIGNATURE')
_parse_dir_key_certification_line = _parse_key_block('dir-key-certification', 'certification', 'SIGNATURE')
class KeyCertificate(Descriptor):
"""
Directory key certificate for a v3 network status document.
:var int version: **\*** version of the key certificate
:var str address: authority's IP address
:var int dir_port: authority's DirPort
:var str fingerprint: **\*** authority's fingerprint
:var str identity_key: **\*** long term authority identity key
:var datetime published: **\*** time when this key was generated
:var datetime expires: **\*** time after which this key becomes invalid
:var str signing_key: **\*** directory server's public signing key
:var str crosscert: signature made using certificate's signing key
:var str certification: **\*** signature of this key certificate signed with
the identity key
**\*** mandatory attribute
"""
ATTRIBUTES = {
'version': (None, _parse_dir_key_certificate_version_line),
'address': (None, _parse_dir_address_line),
'dir_port': (None, _parse_dir_address_line),
'fingerprint': (None, _parse_fingerprint_line),
'identity_key': (None, _parse_identity_key_line),
'published': (None, _parse_dir_key_published_line),
'expires': (None, _parse_dir_key_expires_line),
'signing_key': (None, _parse_signing_key_line),
'crosscert': (None, _parse_dir_key_crosscert_line),
'certification': (None, _parse_dir_key_certification_line),
}
PARSER_FOR_LINE = {
'dir-key-certificate-version': _parse_dir_key_certificate_version_line,
'dir-address': _parse_dir_address_line,
'fingerprint': _parse_fingerprint_line,
'dir-key-published': _parse_dir_key_published_line,
'dir-key-expires': _parse_dir_key_expires_line,
'dir-identity-key': _parse_identity_key_line,
'dir-signing-key': _parse_signing_key_line,
'dir-key-crosscert': _parse_dir_key_crosscert_line,
'dir-key-certification': _parse_dir_key_certification_line,
}
@classmethod
def content(cls, attr = None, exclude = (), sign = False):
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
return _descriptor_content(attr, exclude, (
('dir-key-certificate-version', '3'),
('fingerprint', _random_fingerprint()),
('dir-key-published', _random_date()),
('dir-key-expires', _random_date()),
('dir-identity-key', _random_crypto_blob('RSA PUBLIC KEY')),
('dir-signing-key', _random_crypto_blob('RSA PUBLIC KEY')),
), (
('dir-key-certification', _random_crypto_blob('SIGNATURE')),
))
def __init__(self, raw_content, validate = False):
super(KeyCertificate, self).__init__(raw_content, lazy_load = not validate)
entries = _descriptor_components(raw_content, validate)
if validate:
if 'dir-key-certificate-version' != list(entries.keys())[0]:
raise ValueError("Key certificates must start with a 'dir-key-certificate-version' line:\n%s" % (raw_content))
elif 'dir-key-certification' != list(entries.keys())[-1]:
raise ValueError("Key certificates must end with a 'dir-key-certification' line:\n%s" % (raw_content))
# check that we have mandatory fields and that our known fields only
# appear once
for keyword, is_mandatory in KEY_CERTIFICATE_PARAMS:
if is_mandatory and keyword not in entries:
raise ValueError("Key certificates must have a '%s' line:\n%s" % (keyword, raw_content))
entry_count = len(entries.get(keyword, []))
if entry_count > 1:
raise ValueError("Key certificates can only have a single '%s' line, got %i:\n%s" % (keyword, entry_count, raw_content))
self._parse(entries, validate)
else:
self._entries = entries
def _compare(self, other, method):
if not isinstance(other, KeyCertificate):
return False
return method(str(self).strip(), str(other).strip())
def __hash__(self):
return hash(str(self).strip())
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
class DocumentSignature(object):
"""
Directory signature of a v3 network status document.
:var str method: algorithm used to make the signature
:var str identity: fingerprint of the authority that made the signature
:var str key_digest: digest of the signing key
:var str signature: document signature
:param bool validate: checks validity if **True**
:raises: **ValueError** if a validity check fails
"""
def __init__(self, method, identity, key_digest, signature, validate = False):
# Checking that these attributes are valid. Technically the key
# digest isn't a fingerprint, but it has the same characteristics.
if validate:
if not stem.util.tor_tools.is_valid_fingerprint(identity):
raise ValueError('Malformed fingerprint (%s) in the document signature' % identity)
if not stem.util.tor_tools.is_valid_fingerprint(key_digest):
raise ValueError('Malformed key digest (%s) in the document signature' % key_digest)
self.method = method
self.identity = identity
self.key_digest = key_digest
self.signature = signature
def _compare(self, other, method):
if not isinstance(other, DocumentSignature):
return False
for attr in ('method', 'identity', 'key_digest', 'signature'):
if getattr(self, attr) != getattr(other, attr):
return method(getattr(self, attr), getattr(other, attr))
return method(True, True) # we're equal
def __hash__(self):
return hash(str(self).strip())
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
class BridgeNetworkStatusDocument(NetworkStatusDocument):
"""
Network status document containing bridges. This is only available through
the metrics site.
:var dict routers: fingerprint to :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3`
mapping for relays contained in the document
:var datetime published: time when the document was published
"""
def __init__(self, raw_content, validate = False):
super(BridgeNetworkStatusDocument, self).__init__(raw_content)
self.published = None
document_file = io.BytesIO(raw_content)
published_line = stem.util.str_tools._to_unicode(document_file.readline())
if published_line.startswith('published '):
published_line = published_line.split(' ', 1)[1].strip()
try:
self.published = stem.util.str_tools._parse_timestamp(published_line)
except ValueError:
if validate:
raise ValueError("Bridge network status document's 'published' time wasn't parsable: %s" % published_line)
elif validate:
raise ValueError("Bridge network status documents must start with a 'published' line:\n%s" % stem.util.str_tools._to_unicode(raw_content))
router_iter = stem.descriptor.router_status_entry._parse_file(
document_file,
validate,
entry_class = RouterStatusEntryV2,
extra_args = (self,),
)
self.routers = dict((desc.fingerprint, desc) for desc in router_iter)
```
#### File: site-packages/stem/manual.py
```python
import os
import shutil
import sys
import tempfile
import stem.prereq
import stem.util.conf
import stem.util.enum
import stem.util.log
import stem.util.system
from stem.util import _hash_attr
try:
# added in python 2.7
from collections import OrderedDict
except ImportError:
from stem.util.ordereddict import OrderedDict
try:
# added in python 3.2
from functools import lru_cache
except ImportError:
from stem.util.lru_cache import lru_cache
try:
# account for urllib's change between python 2.x and 3.x
import urllib.request as urllib
except ImportError:
import urllib2 as urllib
Category = stem.util.enum.Enum('GENERAL', 'CLIENT', 'RELAY', 'DIRECTORY', 'AUTHORITY', 'HIDDEN_SERVICE', 'TESTING', 'UNKNOWN')
GITWEB_MANUAL_URL = 'https://gitweb.torproject.org/tor.git/plain/doc/tor.1.txt'
CACHE_PATH = os.path.join(os.path.dirname(__file__), 'cached_tor_manual.sqlite')
DATABASE = None # cache database connections
HAS_ENCODING_ARG = not stem.util.system.is_mac() and not stem.util.system.is_bsd() and not stem.util.system.is_slackware()
SCHEMA_VERSION = 1 # version of our scheme, bump this if you change the following
SCHEMA = (
'CREATE TABLE schema(version INTEGER)',
'INSERT INTO schema(version) VALUES (%i)' % SCHEMA_VERSION,
'CREATE TABLE metadata(name TEXT, synopsis TEXT, description TEXT, man_commit TEXT, stem_commit TEXT)',
'CREATE TABLE commandline(name TEXT PRIMARY KEY, description TEXT)',
'CREATE TABLE signals(name TEXT PRIMARY KEY, description TEXT)',
'CREATE TABLE files(name TEXT PRIMARY KEY, description TEXT)',
'CREATE TABLE torrc(key TEXT PRIMARY KEY, name TEXT, category TEXT, usage TEXT, summary TEXT, description TEXT, position INTEGER)',
)
CATEGORY_SECTIONS = OrderedDict((
('GENERAL OPTIONS', Category.GENERAL),
('CLIENT OPTIONS', Category.CLIENT),
('SERVER OPTIONS', Category.RELAY),
('DIRECTORY SERVER OPTIONS', Category.DIRECTORY),
('DIRECTORY AUTHORITY SERVER OPTIONS', Category.AUTHORITY),
('HIDDEN SERVICE OPTIONS', Category.HIDDEN_SERVICE),
('TESTING NETWORK OPTIONS', Category.TESTING),
))
class SchemaMismatch(IOError):
"""
Database schema doesn't match what Stem supports.
.. versionadded:: 1.6.0
:var int database_schema: schema of the database
:var tuple supported_schemas: schemas library supports
"""
def __init__(self, message, database_schema, library_schema):
super(SchemaMismatch, self).__init__(message)
self.database_schema = database_schema
self.library_schema = library_schema
def query(query, *param):
"""
Performs the given query on our sqlite manual cache. This database should
be treated as being read-only. File permissions generally enforce this, and
in the future will be enforced by this function as well.
.. versionadded:: 1.6.0
:param str query: query to run on the cache
:param list param: query parameters
:returns: :class:`sqlite3.Cursor` with the query results
:raises:
* **ImportError** if the sqlite3 module is unavailable
* **sqlite3.OperationalError** if query fails
"""
if not stem.prereq.is_sqlite_available():
raise ImportError('Querying requires the sqlite3 module')
import sqlite3
# The only reason to explicitly close the sqlite connection is to ensure
# transactions are committed. Since we're only using read-only access this
# doesn't matter, and can allow interpreter shutdown to do the needful.
#
# TODO: When we only support python 3.4+ we can use sqlite's uri argument
# to enforce a read-only connection...
#
# https://docs.python.org/3/library/sqlite3.html#sqlite3.connect
global DATABASE
if DATABASE is None:
DATABASE = sqlite3.connect(CACHE_PATH)
return DATABASE.execute(query, param)
class ConfigOption(object):
"""
Tor configuration attribute found in its torrc.
:var str name: name of the configuration option
:var stem.manual.Category category: category the config option was listed
under, this is Category.UNKNOWN if we didn't recognize the category
:var str usage: arguments accepted by the option
:var str summary: brief description of what the option does
:var str description: longer manual description with details
"""
def __init__(self, name, category = Category.UNKNOWN, usage = '', summary = '', description = ''):
self.name = name
self.category = category
self.usage = usage
self.summary = summary
self.description = description
def __hash__(self):
return _hash_attr(self, 'name', 'category', 'usage', 'summary', 'description')
def __eq__(self, other):
return hash(self) == hash(other) if isinstance(other, ConfigOption) else False
def __ne__(self, other):
return not self == other
@lru_cache()
def _config(lowercase = True):
"""
Provides a dictionary for our settings.cfg. This has a couple categories...
* manual.important (list) - configuration options considered to be important
* manual.summary.* (str) - summary descriptions of config options
:param bool lowercase: uses lowercase keys if **True** to allow for case
insensitive lookups
"""
config = stem.util.conf.Config()
config_path = os.path.join(os.path.dirname(__file__), 'settings.cfg')
try:
config.load(config_path)
config_dict = dict([(key.lower() if lowercase else key, config.get_value(key)) for key in config.keys() if key.startswith('manual.summary.')])
config_dict['manual.important'] = [name.lower() if lowercase else name for name in config.get_value('manual.important', [], multiple = True)]
return config_dict
except Exception as exc:
stem.util.log.warn("BUG: stem failed to load its internal manual information from '%s': %s" % (config_path, exc))
return {}
def _manual_differences(previous_manual, new_manual):
"""
Provides a description of how two manuals differ.
"""
lines = []
for attr in ('name', 'synopsis', 'description', 'commandline_options', 'signals', 'files', 'config_options'):
previous_attr = getattr(previous_manual, attr)
new_attr = getattr(new_manual, attr)
if previous_attr != new_attr:
lines.append("* Manual's %s attribute changed\n" % attr)
if attr in ('name', 'synopsis', 'description'):
lines.append(' Previously...\n\n%s\n' % previous_attr)
lines.append(' Updating to...\n\n%s' % new_attr)
elif attr == 'config_options':
for config_name, config_attr in new_attr.items():
previous = previous_attr.get(config_name)
if previous is None:
lines.append(' adding new config option => %s' % config_name)
elif config_attr != previous:
for attr in ('name', 'category', 'usage', 'summary', 'description'):
if getattr(config_attr, attr) != getattr(previous, attr):
lines.append(' modified %s (%s) => %s' % (config_name, attr, getattr(config_attr, attr)))
for config_name in set(previous_attr.keys()).difference(new_attr.keys()):
lines.append(' removing config option => %s' % config_name)
else:
added_items = set(new_attr.items()).difference(previous_attr.items())
removed_items = set(previous_attr.items()).difference(new_attr.items())
for added_item in added_items:
lines.append(' adding %s => %s' % added_item)
for removed_item in removed_items:
lines.append(' removing %s => %s' % removed_item)
lines.append('\n')
return '\n'.join(lines)
def is_important(option):
"""
Indicates if a configuration option of particularly common importance or not.
:param str option: tor configuration option to check
:returns: **bool** that's **True** if this is an important option and
**False** otherwise
"""
return option.lower() in _config()['manual.important']
def download_man_page(path = None, file_handle = None, url = GITWEB_MANUAL_URL, timeout = 20):
"""
Downloads tor's latest man page from `gitweb.torproject.org
<https://gitweb.torproject.org/tor.git/plain/doc/tor.1.txt>`_. This method is
both slow and unreliable - please see the warnings on
:func:`~stem.manual.Manual.from_remote`.
:param str path: path to save tor's man page to
:param file file_handle: file handler to save tor's man page to
:param str url: url to download tor's asciidoc manual from
:param int timeout: seconds to wait before timing out the request
:raises: **IOError** if unable to retrieve the manual
"""
if not path and not file_handle:
raise ValueError("Either the path or file_handle we're saving to must be provided")
elif not stem.util.system.is_available('a2x'):
raise IOError('We require a2x from asciidoc to provide a man page')
dirpath = tempfile.mkdtemp()
asciidoc_path = os.path.join(dirpath, 'tor.1.txt')
manual_path = os.path.join(dirpath, 'tor.1')
try:
try:
with open(asciidoc_path, 'wb') as asciidoc_file:
request = urllib.urlopen(url, timeout = timeout)
shutil.copyfileobj(request, asciidoc_file)
except:
exc = sys.exc_info()[1]
raise IOError("Unable to download tor's manual from %s to %s: %s" % (url, asciidoc_path, exc))
try:
stem.util.system.call('a2x -f manpage %s' % asciidoc_path)
if not os.path.exists(manual_path):
raise OSError('no man page was generated')
except stem.util.system.CallError as exc:
raise IOError("Unable to run '%s': %s" % (exc.command, exc.stderr))
if path:
try:
path_dir = os.path.dirname(path)
if not os.path.exists(path_dir):
os.makedirs(path_dir)
shutil.copyfile(manual_path, path)
except OSError as exc:
raise IOError(exc)
if file_handle:
with open(manual_path, 'rb') as manual_file:
shutil.copyfileobj(manual_file, file_handle)
file_handle.flush()
finally:
shutil.rmtree(dirpath)
class Manual(object):
"""
Parsed tor man page. Tor makes no guarantees about its man page format so
this may not always be compatible. If not you can use the cached manual
information stored with Stem.
This does not include every bit of information from the tor manual. For
instance, I've excluded the 'THE CONFIGURATION FILE FORMAT' section. If
there's a part you'd find useful then `file an issue
<https://trac.torproject.org/projects/tor/wiki/doc/stem/bugs>`_ and we can
add it.
:var str name: brief description of the tor command
:var str synopsis: brief tor command usage
:var str description: general description of what tor does
:var dict commandline_options: mapping of commandline arguments to their descripton
:var dict signals: mapping of signals tor accepts to their description
:var dict files: mapping of file paths to their description
:var dict config_options: :class:`~stem.manual.ConfigOption` tuples for tor configuration options
:var str man_commit: latest tor commit editing the man page when this
information was cached
:var str stem_commit: stem commit to cache this manual information
"""
def __init__(self, name, synopsis, description, commandline_options, signals, files, config_options):
self.name = name
self.synopsis = synopsis
self.description = description
self.commandline_options = commandline_options
self.signals = signals
self.files = files
self.config_options = config_options
self.man_commit = None
self.stem_commit = None
self.schema = None
@staticmethod
def from_cache(path = None):
"""
Provides manual information cached with Stem. Unlike
:func:`~stem.manual.Manual.from_man` and
:func:`~stem.manual.Manual.from_remote` this doesn't have any system
requirements, and is faster too. Only drawback is that this manual
content is only as up to date as the Stem release we're using.
.. versionchanged:: 1.6.0
Added support for sqlite cache. Support for
:class:`~stem.util.conf.Config` caches will be dropped in Stem 2.x.
:param str path: cached manual content to read, if not provided this uses
the bundled manual information
:returns: :class:`~stem.manual.Manual` with our bundled manual information
:raises:
* **ImportError** if cache is sqlite and the sqlite3 module is
unavailable
* **IOError** if a **path** was provided and we were unable to read
it or the schema is out of date
"""
# TODO: drop _from_config_cache() with stem 2.x
if path is None:
path = CACHE_PATH
if path is not None and path.endswith('.sqlite'):
return Manual._from_sqlite_cache(path)
else:
return Manual._from_config_cache(path)
@staticmethod
def _from_sqlite_cache(path):
if not stem.prereq.is_sqlite_available():
raise ImportError('Reading a sqlite cache requires the sqlite3 module')
import sqlite3
if not os.path.exists(path):
raise IOError("%s doesn't exist" % path)
with sqlite3.connect(path) as conn:
try:
schema = conn.execute('SELECT version FROM schema').fetchone()[0]
if schema != SCHEMA_VERSION:
raise SchemaMismatch("Stem's current manual schema version is %s, but %s was version %s" % (SCHEMA_VERSION, path, schema), schema, (SCHEMA_VERSION,))
name, synopsis, description, man_commit, stem_commit = conn.execute('SELECT name, synopsis, description, man_commit, stem_commit FROM metadata').fetchone()
except sqlite3.OperationalError as exc:
raise IOError('Failed to read database metadata from %s: %s' % (path, exc))
commandline = dict(conn.execute('SELECT name, description FROM commandline').fetchall())
signals = dict(conn.execute('SELECT name, description FROM signals').fetchall())
files = dict(conn.execute('SELECT name, description FROM files').fetchall())
config_options = OrderedDict()
for entry in conn.execute('SELECT name, category, usage, summary, description FROM torrc ORDER BY position').fetchall():
option, category, usage, summary, option_description = entry
config_options[option] = ConfigOption(option, category, usage, summary, option_description)
manual = Manual(name, synopsis, description, commandline, signals, files, config_options)
manual.man_commit = man_commit
manual.stem_commit = stem_commit
manual.schema = schema
return manual
@staticmethod
def _from_config_cache(path):
conf = stem.util.conf.Config()
conf.load(path, commenting = False)
config_options = OrderedDict()
for key in conf.keys():
if key.startswith('config_options.'):
key = key.split('.')[1]
if key not in config_options:
config_options[key] = ConfigOption(
conf.get('config_options.%s.name' % key, ''),
conf.get('config_options.%s.category' % key, ''),
conf.get('config_options.%s.usage' % key, ''),
conf.get('config_options.%s.summary' % key, ''),
conf.get('config_options.%s.description' % key, '')
)
manual = Manual(
conf.get('name', ''),
conf.get('synopsis', ''),
conf.get('description', ''),
conf.get('commandline_options', {}),
conf.get('signals', {}),
conf.get('files', {}),
config_options,
)
manual.man_commit = conf.get('man_commit', None)
manual.stem_commit = conf.get('stem_commit', None)
return manual
@staticmethod
def from_man(man_path = 'tor'):
"""
Reads and parses a given man page.
On OSX the man command doesn't have an '--encoding' argument so its results
may not quite match other platforms. For instance, it normalizes long
dashes into '--'.
:param str man_path: path argument for 'man', for example you might want
'/path/to/tor/doc/tor.1' to read from tor's git repository
:returns: :class:`~stem.manual.Manual` for the system's man page
:raises: **IOError** if unable to retrieve the manual
"""
man_cmd = 'man %s -P cat %s' % ('--encoding=ascii' if HAS_ENCODING_ARG else '', man_path)
try:
man_output = stem.util.system.call(man_cmd, env = {'MANWIDTH': '10000000'})
except OSError as exc:
raise IOError("Unable to run '%s': %s" % (man_cmd, exc))
categories, config_options = _get_categories(man_output), OrderedDict()
for category_header, category_enum in CATEGORY_SECTIONS.items():
_add_config_options(config_options, category_enum, categories.get(category_header, []))
for category in categories:
if category.endswith(' OPTIONS') and category not in CATEGORY_SECTIONS and category not in ('COMMAND-LINE OPTIONS', 'NON-PERSISTENT OPTIONS'):
_add_config_options(config_options, Category.UNKNOWN, categories.get(category, []))
return Manual(
_join_lines(categories.get('NAME', [])),
_join_lines(categories.get('SYNOPSIS', [])),
_join_lines(categories.get('DESCRIPTION', [])),
_get_indented_descriptions(categories.get('COMMAND-LINE OPTIONS', [])),
_get_indented_descriptions(categories.get('SIGNALS', [])),
_get_indented_descriptions(categories.get('FILES', [])),
config_options,
)
@staticmethod
def from_remote(timeout = 60):
"""
Reads and parses the latest tor man page `from gitweb.torproject.org
<https://gitweb.torproject.org/tor.git/plain/doc/tor.1.txt>`_. Note that
while convenient, this reliance on GitWeb means you should alway call with
a fallback, such as...
::
try:
manual = stem.manual.from_remote()
except IOError:
manual = stem.manual.from_cache()
In addition to our GitWeb dependency this requires 'a2x' which is part of
`asciidoc <http://asciidoc.org/INSTALL.html>`_ and... isn't quick.
Personally this takes ~7.41s, breaking down for me as follows...
* 1.67s to download tor.1.txt
* 5.57s to convert the asciidoc to a man page
* 0.17s for stem to read and parse the manual
:param int timeout: seconds to wait before timing out the request
:returns: latest :class:`~stem.manual.Manual` available for tor
:raises: **IOError** if unable to retrieve the manual
"""
with tempfile.NamedTemporaryFile() as tmp:
download_man_page(file_handle = tmp, timeout = timeout)
return Manual.from_man(tmp.name)
def save(self, path):
"""
Persists the manual content to a given location.
.. versionchanged:: 1.6.0
Added support for sqlite cache. Support for
:class:`~stem.util.conf.Config` caches will be dropped in Stem 2.x.
:param str path: path to save our manual content to
:raises:
* **ImportError** if saving as sqlite and the sqlite3 module is
unavailable
* **IOError** if unsuccessful
"""
# TODO: drop _save_as_config() with stem 2.x
if path.endswith('.sqlite'):
return self._save_as_sqlite(path)
else:
return self._save_as_config(path)
def _save_as_sqlite(self, path):
if not stem.prereq.is_sqlite_available():
raise ImportError('Saving a sqlite cache requires the sqlite3 module')
import sqlite3
tmp_path = path + '.new'
if os.path.exists(tmp_path):
os.remove(tmp_path)
with sqlite3.connect(tmp_path) as conn:
for cmd in SCHEMA:
conn.execute(cmd)
conn.execute('INSERT INTO metadata(name, synopsis, description, man_commit, stem_commit) VALUES (?,?,?,?,?)', (self.name, self.synopsis, self.description, self.man_commit, self.stem_commit))
for k, v in self.commandline_options.items():
conn.execute('INSERT INTO commandline(name, description) VALUES (?,?)', (k, v))
for k, v in self.signals.items():
conn.execute('INSERT INTO signals(name, description) VALUES (?,?)', (k, v))
for k, v in self.files.items():
conn.execute('INSERT INTO files(name, description) VALUES (?,?)', (k, v))
for i, v in enumerate(self.config_options.values()):
conn.execute('INSERT INTO torrc(key, name, category, usage, summary, description, position) VALUES (?,?,?,?,?,?,?)', (v.name.upper(), v.name, v.category, v.usage, v.summary, v.description, i))
if os.path.exists(path):
os.remove(path)
os.rename(tmp_path, path)
def _save_as_config(self, path):
conf = stem.util.conf.Config()
conf.set('name', self.name)
conf.set('synopsis', self.synopsis)
conf.set('description', self.description)
if self.man_commit:
conf.set('man_commit', self.man_commit)
if self.stem_commit:
conf.set('stem_commit', self.stem_commit)
for k, v in self.commandline_options.items():
conf.set('commandline_options', '%s => %s' % (k, v), overwrite = False)
for k, v in self.signals.items():
conf.set('signals', '%s => %s' % (k, v), overwrite = False)
for k, v in self.files.items():
conf.set('files', '%s => %s' % (k, v), overwrite = False)
for k, v in self.config_options.items():
conf.set('config_options.%s.category' % k, v.category)
conf.set('config_options.%s.name' % k, v.name)
conf.set('config_options.%s.usage' % k, v.usage)
conf.set('config_options.%s.summary' % k, v.summary)
conf.set('config_options.%s.description' % k, v.description)
conf.save(path)
def __hash__(self):
return _hash_attr(self, 'name', 'synopsis', 'description', 'commandline_options', 'signals', 'files', 'config_options')
def __eq__(self, other):
return hash(self) == hash(other) if isinstance(other, Manual) else False
def __ne__(self, other):
return not self == other
def _get_categories(content):
"""
The man page is headers followed by an indented section. First pass gets
the mapping of category titles to their lines.
"""
# skip header and footer lines
if content and 'TOR(1)' in content[0]:
content = content[1:]
if content and content[-1].startswith('Tor'):
content = content[:-1]
categories = OrderedDict()
category, lines = None, []
for line in content:
# replace non-ascii characters
#
# \u2019 - smart single quote
# \u2014 - extra long dash
# \xb7 - centered dot
char_for = chr if stem.prereq.is_python_3() else unichr
line = line.replace(char_for(0x2019), "'").replace(char_for(0x2014), '-').replace(char_for(0xb7), '*')
if line and not line.startswith(' '):
if category:
if lines and lines[-1] == '':
lines = lines[:-1] # sections end with an extra empty line
categories[category] = lines
category, lines = line.strip(), []
else:
if line.startswith(' '):
line = line[7:] # contents of a section have a seven space indentation
lines.append(line)
if category:
categories[category] = lines
return categories
def _get_indented_descriptions(lines):
"""
Parses the commandline argument and signal sections. These are options
followed by an indented description. For example...
::
-f FILE
Specify a new configuration file to contain further Tor configuration
options OR pass - to make Tor read its configuration from standard
input. (Default: /usr/local/etc/tor/torrc, or $HOME/.torrc if that file
is not found)
There can be additional paragraphs not related to any particular argument but
ignoring those.
"""
options, last_arg = OrderedDict(), None
for line in lines:
if line and not line.startswith(' '):
options[line], last_arg = [], line
elif last_arg and line.startswith(' '):
options[last_arg].append(line[4:])
return dict([(arg, ' '.join(desc_lines)) for arg, desc_lines in options.items() if desc_lines])
def _add_config_options(config_options, category, lines):
"""
Parses a section of tor configuration options. These have usage information,
followed by an indented description. For instance...
::
ConnLimit NUM
The minimum number of file descriptors that must be available to the
Tor process before it will start. Tor will ask the OS for as many file
descriptors as the OS will allow (you can find this by "ulimit -H -n").
If this number is less than ConnLimit, then Tor will refuse to start.
You probably don't need to adjust this. It has no effect on Windows
since that platform lacks getrlimit(). (Default: 1000)
"""
last_option, usage, description = None, None, []
if lines and lines[0].startswith('The following options'):
lines = lines[lines.index(''):] # drop the initial description
for line in lines:
if line and not line.startswith(' '):
if last_option:
summary = _config().get('manual.summary.%s' % last_option.lower(), '')
config_options[last_option] = ConfigOption(last_option, category, usage, summary, _join_lines(description).strip())
if ' ' in line:
last_option, usage = line.split(' ', 1)
else:
last_option, usage = line, ''
description = []
else:
if line.startswith(' '):
line = line[4:]
description.append(line)
if last_option:
summary = _config().get('manual.summary.%s' % last_option.lower(), '')
config_options[last_option] = ConfigOption(last_option, category, usage, summary, _join_lines(description).strip())
def _join_lines(lines):
"""
Simple join, except we want empty lines to still provide a newline.
"""
result = []
for line in lines:
if not line:
if result and result[-1] != '\n':
result.append('\n')
else:
result.append(line + '\n')
return ''.join(result).strip()
```
#### File: stem/util/conf.py
```python
import inspect
import os
import threading
import stem.prereq
from stem.util import log
try:
# added in python 2.7
from collections import OrderedDict
except ImportError:
from stem.util.ordereddict import OrderedDict
CONFS = {} # mapping of identifier to singleton instances of configs
class _SyncListener(object):
def __init__(self, config_dict, interceptor):
self.config_dict = config_dict
self.interceptor = interceptor
def update(self, config, key):
if key in self.config_dict:
new_value = config.get(key, self.config_dict[key])
if new_value == self.config_dict[key]:
return # no change
if self.interceptor:
interceptor_value = self.interceptor(key, new_value)
if interceptor_value:
new_value = interceptor_value
self.config_dict[key] = new_value
def config_dict(handle, conf_mappings, handler = None):
"""
Makes a dictionary that stays synchronized with a configuration.
This takes a dictionary of 'config_key => default_value' mappings and
changes the values to reflect our current configuration. This will leave
the previous values alone if...
* we don't have a value for that config_key
* we can't convert our value to be the same type as the default_value
If a handler is provided then this is called just prior to assigning new
values to the config_dict. The handler function is expected to accept the
(key, value) for the new values and return what we should actually insert
into the dictionary. If this returns None then the value is updated as
normal.
For more information about how we convert types see our
:func:`~stem.util.conf.Config.get` method.
**The dictionary you get from this is manged by the Config class and should
be treated as being read-only.**
:param str handle: unique identifier for a config instance
:param dict conf_mappings: config key/value mappings used as our defaults
:param functor handler: function referred to prior to assigning values
"""
selected_config = get_config(handle)
selected_config.add_listener(_SyncListener(conf_mappings, handler).update)
return conf_mappings
def get_config(handle):
"""
Singleton constructor for configuration file instances. If a configuration
already exists for the handle then it's returned. Otherwise a fresh instance
is constructed.
:param str handle: unique identifier used to access this config instance
"""
if handle not in CONFS:
CONFS[handle] = Config()
return CONFS[handle]
def uses_settings(handle, path, lazy_load = True):
"""
Provides a function that can be used as a decorator for other functions that
require settings to be loaded. Functions with this decorator will be provided
with the configuration as its 'config' keyword argument.
.. versionchanged:: 1.3.0
Omits the 'config' argument if the funcion we're decorating doesn't accept
it.
::
uses_settings = stem.util.conf.uses_settings('my_app', '/path/to/settings.cfg')
@uses_settings
def my_function(config):
print 'hello %s!' % config.get('username', '')
:param str handle: hande for the configuration
:param str path: path where the configuration should be loaded from
:param bool lazy_load: loads the configuration file when the decorator is
used if true, otherwise it's loaded right away
:returns: **function** that can be used as a decorator to provide the
configuration
:raises: **IOError** if we fail to read the configuration file, if
**lazy_load** is true then this arises when we use the decorator
"""
config = get_config(handle)
if not lazy_load and not config._settings_loaded:
config.load(path)
config._settings_loaded = True
def decorator(func):
def wrapped(*args, **kwargs):
if lazy_load and not config._settings_loaded:
config.load(path)
config._settings_loaded = True
if 'config' in inspect.getargspec(func).args:
return func(*args, config = config, **kwargs)
else:
return func(*args, **kwargs)
return wrapped
return decorator
def parse_enum(key, value, enumeration):
"""
Provides the enumeration value for a given key. This is a case insensitive
lookup and raises an exception if the enum key doesn't exist.
:param str key: configuration key being looked up
:param str value: value to be parsed
:param stem.util.enum.Enum enumeration: enumeration the values should be in
:returns: enumeration value
:raises: **ValueError** if the **value** isn't among the enumeration keys
"""
return parse_enum_csv(key, value, enumeration, 1)[0]
def parse_enum_csv(key, value, enumeration, count = None):
"""
Parses a given value as being a comma separated listing of enumeration keys,
returning the corresponding enumeration values. This is intended to be a
helper for config handlers. The checks this does are case insensitive.
The **count** attribute can be used to make assertions based on the number of
values. This can be...
* None to indicate that there's no restrictions.
* An int to indicate that we should have this many values.
* An (int, int) tuple to indicate the range that values can be in. This range
is inclusive and either can be None to indicate the lack of a lower or
upper bound.
:param str key: configuration key being looked up
:param str value: value to be parsed
:param stem.util.enum.Enum enumeration: enumeration the values should be in
:param int,tuple count: validates that we have this many items
:returns: list with the enumeration values
:raises: **ValueError** if the count assertion fails or the **value** entries
don't match the enumeration keys
"""
values = [val.upper().strip() for val in value.split(',')]
if values == ['']:
return []
if count is None:
pass # no count validateion checks to do
elif isinstance(count, int):
if len(values) != count:
raise ValueError("Config entry '%s' is expected to be %i comma separated values, got '%s'" % (key, count, value))
elif isinstance(count, tuple) and len(count) == 2:
minimum, maximum = count
if minimum is not None and len(values) < minimum:
raise ValueError("Config entry '%s' must have at least %i comma separated values, got '%s'" % (key, minimum, value))
if maximum is not None and len(values) > maximum:
raise ValueError("Config entry '%s' can have at most %i comma separated values, got '%s'" % (key, maximum, value))
else:
raise ValueError("The count must be None, an int, or two value tuple. Got '%s' (%s)'" % (count, type(count)))
result = []
enum_keys = [k.upper() for k in list(enumeration.keys())]
enum_values = list(enumeration)
for val in values:
if val in enum_keys:
result.append(enum_values[enum_keys.index(val)])
else:
raise ValueError("The '%s' entry of config entry '%s' wasn't in the enumeration (expected %s)" % (val, key, ', '.join(enum_keys)))
return result
class Config(object):
"""
Handler for easily working with custom configurations, providing persistence
to and from files. All operations are thread safe.
**Example usage:**
User has a file at '/home/atagar/myConfig' with...
::
destination.ip 1.2.3.4
destination.port blarg
startup.run export PATH=$PATH:~/bin
startup.run alias l=ls
And they have a script with...
::
from stem.util import conf
# Configuration values we'll use in this file. These are mappings of
# configuration keys to the default values we'll use if the user doesn't
# have something different in their config file (or it doesn't match this
# type).
ssh_config = conf.config_dict('ssh_login', {
'login.user': 'atagar',
'login.password': '<PASSWORD>!',
'destination.ip': '127.0.0.1',
'destination.port': 22,
'startup.run': [],
})
# Makes an empty config instance with the handle of 'ssh_login'. This is
# a singleton so other classes can fetch this same configuration from
# this handle.
user_config = conf.get_config('ssh_login')
# Loads the user's configuration file, warning if this fails.
try:
user_config.load("/home/atagar/myConfig")
except IOError as exc:
print "Unable to load the user's config: %s" % exc
# This replace the contents of ssh_config with the values from the user's
# config file if...
#
# * the key is present in the config file
# * we're able to convert the configuration file's value to the same type
# as what's in the mapping (see the Config.get() method for how these
# type inferences work)
#
# For instance in this case...
#
# * the login values are left alone because they aren't in the user's
# config file
#
# * the 'destination.port' is also left with the value of 22 because we
# can't turn "blarg" into an integer
#
# The other values are replaced, so ssh_config now becomes...
#
# {'login.user': 'atagar',
# 'login.password': '<PASSWORD>!',
# 'destination.ip': '1.2.3.4',
# 'destination.port': 22,
# 'startup.run': ['export PATH=$PATH:~/bin', 'alias l=ls']}
#
# Information for what values fail to load and why are reported to
# 'stem.util.log'.
"""
def __init__(self):
self._path = None # location we last loaded from or saved to
self._contents = OrderedDict() # configuration key/value pairs
self._listeners = [] # functors to be notified of config changes
# used for accessing _contents
self._contents_lock = threading.RLock()
# keys that have been requested (used to provide unused config contents)
self._requested_keys = set()
# flag to support lazy loading in uses_settings()
self._settings_loaded = False
def load(self, path = None, commenting = True):
"""
Reads in the contents of the given path, adding its configuration values
to our current contents. If the path is a directory then this loads each
of the files, recursively.
.. versionchanged:: 1.3.0
Added support for directories.
.. versionchanged:: 1.3.0
Added the **commenting** argument.
.. versionchanged:: 1.6.0
Avoid loading vim swap files.
:param str path: file or directory path to be loaded, this uses the last
loaded path if not provided
:param bool commenting: ignore line content after a '#' if **True**, read
otherwise
:raises:
* **IOError** if we fail to read the file (it doesn't exist, insufficient
permissions, etc)
* **ValueError** if no path was provided and we've never been provided one
"""
if path:
self._path = path
elif not self._path:
raise ValueError('Unable to load configuration: no path provided')
if os.path.isdir(self._path):
for root, dirnames, filenames in os.walk(self._path):
for filename in filenames:
if filename.endswith('.swp'):
continue # vim swap file
self.load(os.path.join(root, filename))
return
with open(self._path, 'r') as config_file:
read_contents = config_file.readlines()
with self._contents_lock:
while read_contents:
line = read_contents.pop(0)
# strips any commenting or excess whitespace
comment_start = line.find('#') if commenting else -1
if comment_start != -1:
line = line[:comment_start]
line = line.strip()
# parse the key/value pair
if line:
if ' ' in line:
key, value = line.split(' ', 1)
self.set(key, value.strip(), False)
else:
# this might be a multi-line entry, try processing it as such
multiline_buffer = []
while read_contents and read_contents[0].lstrip().startswith('|'):
content = read_contents.pop(0).lstrip()[1:] # removes '\s+|' prefix
content = content.rstrip('\n') # trailing newline
multiline_buffer.append(content)
if multiline_buffer:
self.set(line, '\n'.join(multiline_buffer), False)
else:
self.set(line, '', False) # default to a key => '' mapping
def save(self, path = None):
"""
Saves configuration contents to disk. If a path is provided then it
replaces the configuration location that we track.
:param str path: location to be saved to
:raises:
* **IOError** if we fail to save the file (insufficient permissions, etc)
* **ValueError** if no path was provided and we've never been provided one
"""
if path:
self._path = path
elif not self._path:
raise ValueError('Unable to save configuration: no path provided')
with self._contents_lock:
if not os.path.exists(os.path.dirname(self._path)):
os.makedirs(os.path.dirname(self._path))
with open(self._path, 'w') as output_file:
for entry_key in self.keys():
for entry_value in self.get_value(entry_key, multiple = True):
# check for multi line entries
if '\n' in entry_value:
entry_value = '\n|' + entry_value.replace('\n', '\n|')
output_file.write('%s %s\n' % (entry_key, entry_value))
def clear(self):
"""
Drops the configuration contents and reverts back to a blank, unloaded
state.
"""
with self._contents_lock:
self._contents.clear()
self._requested_keys = set()
def add_listener(self, listener, backfill = True):
"""
Registers the function to be notified of configuration updates. Listeners
are expected to be functors which accept (config, key).
:param functor listener: function to be notified when our configuration is changed
:param bool backfill: calls the function with our current values if **True**
"""
with self._contents_lock:
self._listeners.append(listener)
if backfill:
for key in self.keys():
listener(self, key)
def clear_listeners(self):
"""
Removes all attached listeners.
"""
self._listeners = []
def keys(self):
"""
Provides all keys in the currently loaded configuration.
:returns: **list** if strings for the configuration keys we've loaded
"""
return list(self._contents.keys())
def unused_keys(self):
"""
Provides the configuration keys that have never been provided to a caller
via :func:`~stem.util.conf.config_dict` or the
:func:`~stem.util.conf.Config.get` and
:func:`~stem.util.conf.Config.get_value` methods.
:returns: **set** of configuration keys we've loaded but have never been requested
"""
return set(self.keys()).difference(self._requested_keys)
def set(self, key, value, overwrite = True):
"""
Appends the given key/value configuration mapping, behaving the same as if
we'd loaded this from a configuration file.
.. versionchanged:: 1.5.0
Allow removal of values by overwriting with a **None** value.
:param str key: key for the configuration mapping
:param str,list value: value we're setting the mapping to
:param bool overwrite: replaces the previous value if **True**, otherwise
the values are appended
"""
with self._contents_lock:
unicode_type = str if stem.prereq.is_python_3() else unicode
if value is None:
if overwrite and key in self._contents:
del self._contents[key]
else:
pass # no value so this is a no-op
elif isinstance(value, (bytes, unicode_type)):
if not overwrite and key in self._contents:
self._contents[key].append(value)
else:
self._contents[key] = [value]
for listener in self._listeners:
listener(self, key)
elif isinstance(value, (list, tuple)):
if not overwrite and key in self._contents:
self._contents[key] += value
else:
self._contents[key] = value
for listener in self._listeners:
listener(self, key)
else:
raise ValueError("Config.set() only accepts str (bytes or unicode), list, or tuple. Provided value was a '%s'" % type(value))
def get(self, key, default = None):
"""
Fetches the given configuration, using the key and default value to
determine the type it should be. Recognized inferences are:
* **default is a boolean => boolean**
* values are case insensitive
* provides the default if the value isn't "true" or "false"
* **default is an integer => int**
* provides the default if the value can't be converted to an int
* **default is a float => float**
* provides the default if the value can't be converted to a float
* **default is a list => list**
* string contents for all configuration values with this key
* **default is a tuple => tuple**
* string contents for all configuration values with this key
* **default is a dictionary => dict**
* values without "=>" in them are ignored
* values are split into key/value pairs on "=>" with extra whitespace
stripped
:param str key: config setting to be fetched
:param default object: value provided if no such key exists or fails to be converted
:returns: given configuration value with its type inferred with the above rules
"""
is_multivalue = isinstance(default, (list, tuple, dict))
val = self.get_value(key, default, is_multivalue)
if val == default:
return val # don't try to infer undefined values
if isinstance(default, bool):
if val.lower() == 'true':
val = True
elif val.lower() == 'false':
val = False
else:
log.debug("Config entry '%s' is expected to be a boolean, defaulting to '%s'" % (key, str(default)))
val = default
elif isinstance(default, int):
try:
val = int(val)
except ValueError:
log.debug("Config entry '%s' is expected to be an integer, defaulting to '%i'" % (key, default))
val = default
elif isinstance(default, float):
try:
val = float(val)
except ValueError:
log.debug("Config entry '%s' is expected to be a float, defaulting to '%f'" % (key, default))
val = default
elif isinstance(default, list):
val = list(val) # make a shallow copy
elif isinstance(default, tuple):
val = tuple(val)
elif isinstance(default, dict):
val_map = OrderedDict()
for entry in val:
if '=>' in entry:
entry_key, entry_val = entry.split('=>', 1)
val_map[entry_key.strip()] = entry_val.strip()
else:
log.debug('Ignoring invalid %s config entry (expected a mapping, but "%s" was missing "=>")' % (key, entry))
val = val_map
return val
def get_value(self, key, default = None, multiple = False):
"""
This provides the current value associated with a given key.
:param str key: config setting to be fetched
:param object default: value provided if no such key exists
:param bool multiple: provides back a list of all values if **True**,
otherwise this returns the last loaded configuration value
:returns: **str** or **list** of string configuration values associated
with the given key, providing the default if no such key exists
"""
with self._contents_lock:
if key in self._contents:
self._requested_keys.add(key)
if multiple:
return self._contents[key]
else:
return self._contents[key][-1]
else:
message_id = 'stem.util.conf.missing_config_key_%s' % key
log.log_once(message_id, log.TRACE, "config entry '%s' not found, defaulting to '%s'" % (key, default))
return default
``` |
{
"source": "jof/pystack",
"score": 2
} |
#### File: jof/pystack/test_pystack.py
```python
from __future__ import absolute_import
import sys
import subprocess
import platform
import time
from pytest import fixture, mark, param, raises
from distutils.spawn import find_executable
from click.testing import CliRunner
from pystack import (
cli_main, tolerate_missing_locale, find_debugger, DebuggerNotFound)
skipif_non_gdb = mark.skipif(
not find_executable('gdb'), reason='gdb not found')
skipif_non_lldb = mark.skipif(
not find_executable('lldb'), reason='lldb not found')
skipif_darwin = mark.skipif(
platform.system().lower() == 'darwin', reason='gdb on darwin is unstable')
STATEMENTS = {
'sleep': '__import__("time").sleep(360)',
}
@fixture
def process(request):
args = [sys.executable, '-c', request.param]
process = subprocess.Popen(args)
try:
time.sleep(1)
yield process
finally:
process.terminate()
process.wait()
@fixture
def cli():
tolerate_missing_locale()
return CliRunner()
def test_find_debugger():
assert find_debugger('sh') == '/bin/sh'
with raises(DebuggerNotFound) as error:
find_debugger('shhhhhhhhhhhhhhhhhhhhhhhhh')
assert error.value.args[0] == (
'Could not find "shhhhhhhhhhhhhhhhhhhhhhhhh" in your'
' PATH environment variable')
@mark.parametrize(('process', 'debugger'), [
param(STATEMENTS['sleep'], 'gdb', marks=[skipif_non_gdb, skipif_darwin]),
param(STATEMENTS['sleep'], 'lldb', marks=skipif_non_lldb),
], indirect=['process'])
def test_smoke(cli, process, debugger):
result = cli.invoke(cli_main, [str(process.pid), '--debugger', debugger])
assert not result.exception
assert result.exit_code == 0
assert ' File "<string>", line 1, in <module>\n' in result.output
@mark.parametrize('process', [STATEMENTS['sleep']], indirect=['process'])
def test_smoke_debugger_not_found(cli, mocker, process):
mocker.patch('pystack.find_debugger', side_effect=DebuggerNotFound('oops'))
result = cli.invoke(cli_main, [str(process.pid)])
assert result.exit_code == 1
assert 'DebuggerNotFound: oops' in result.output
``` |
{
"source": "JoFrhwld/python-acoustic-similarity",
"score": 3
} |
#### File: analysis/pitch/praat.py
```python
import sys
import os
from ..praat import run_script, read_praat_out
from ..helper import fix_time_points, ASTemporaryWavFile
def file_to_pitch_praat(file_path, praat_path=None, time_step=0.01, min_pitch=75, max_pitch=600):
script_dir = os.path.dirname(os.path.abspath(__file__))
script = os.path.join(script_dir, 'pitch.praat')
if praat_path is None:
praat_path = 'praat'
if sys.platform == 'win32':
praat_path += 'con.exe'
listing = run_script(praat_path, script, file_path, time_step, min_pitch, max_pitch)
output = read_praat_out(listing)
return output
def signal_to_pitch_praat(signal, sr, praat_path=None,
time_step=0.01, min_pitch=75, max_pitch=600, begin=None, padding=None):
with ASTemporaryWavFile(signal, sr) as wav_path:
output = file_to_pitch_praat(wav_path, praat_path, time_step, min_pitch, max_pitch)
duration = signal.shape[0] / sr
return fix_time_points(output, begin, padding, duration)
```
#### File: acousticsim/analysis/specgram.py
```python
from numpy import log10,zeros,abs,arange, hanning, pad, spacing, ceil, log, floor
from numpy.fft import fft
import librosa
from acousticsim.representations.base import Representation
from .helper import preemphasize
def signal_to_powerspec(signal, sr, win_len, time_step, alpha=0.97):
x = preemphasize(signal, alpha)
nperseg = int(win_len * sr)
if nperseg % 2 != 0:
nperseg -= 1
nperstep = int(time_step*sr)
nfft = int(2**(ceil(log(nperseg)/log(2))))
window = hanning(nperseg+2)[1:nperseg+1]
halfperseg = int(nperseg/2)
indices = arange(halfperseg, x.shape[0] - (halfperseg + 1), nperstep)
num_frames = len(indices)
pspec = {}
for i in range(num_frames):
X = x[indices[i] - halfperseg:indices[i] + halfperseg]
X = X * window
fx = fft(X, n = nfft)
power = abs(fx[:int(nfft/2)+1])**2
pspec[indices[i] / sr] = power
return pspec
pass
def file_to_powerspec(file_path, win_len, time_step, alpha = 0.97):
signal, sr = librosa.load(file_path, sr=None, mono=False)
output = signal_to_powerspec(signal, sr, win_len, time_step, alpha)
return output
```
#### File: acousticsim/clustering/network.py
```python
from collections import OrderedDict
from numpy import zeros, array, abs
from numpy.random import RandomState
from sklearn import metrics
from networkx import Graph, empty_graph
from sklearn import manifold
from sklearn.decomposition import PCA
from .affinity import affinity_cluster
class ClusterNetwork(object):
def __init__(self, reps):
self.g = Graph()
self.N = len(reps.keys())
nodes = []
self.lookup = {}
self.attributes = None
for i,r in enumerate(sorted(reps.keys())):
self.lookup[r] = i
if self.attributes is None:
self.attributes = list(reps[r].attributes.keys())
nodes.append((i,{'rep':reps[r]}))
self.g.add_nodes_from(nodes)
self.clusters = None
def __iter__(self):
for i,d in self.g.nodes_iter(data=True):
yield d
def __len__(self):
return self.N
def __getitem__(self, key):
if isinstance(key,str):
return self.g.node[self.lookup[key]]
elif isinstance(key,tuple):
return self.simMat[key]
return self.g.node[key]
def cluster(self,scores,cluster_method,oneCluster):
#Clear any edges
self.g.remove_edges_from(list(self.g.edges_iter(data=False)))
if cluster_method is None:
return
if scores is not None:
self.simMat = zeros((self.N,self.N))
for k,v in scores.items():
indOne = self.lookup[k[0]]
indTwo = self.lookup[k[1]]
self.simMat[indOne,indTwo] = v
self.simMat[indTwo,indOne] = v
self.simMat = -1 * self.simMat
if cluster_method == 'affinity':
true_labels = array([ self[i]['rep']._true_label for i in range(self.N)])
self.clusters = affinity_cluster(self.simMat,true_labels,oneCluster)
edges = []
for k,v in self.clusters.items():
for v2 in v:
if v2[0] == k:
continue
edges.append((k,v2[0],v2[1]))
elif cluster_method == 'complete':
edges = []
for i in range(self.N):
for j in range(i+1,self.N):
edges.append((i,j,self.simMat[i,j]))
self.g.add_weighted_edges_from(edges)
seed = RandomState(seed=3)
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=4)
pos = mds.fit(-1 * self.simMat).embedding_
clf = PCA(n_components=2)
pos = clf.fit_transform(pos)
for i,p in enumerate(pos):
self.g.node[i]['pos'] = p
def calc_reduction(self):
if self.clusters is None:
return
means = {}
reverse_mapping = {}
for k,v in self.clusters.items():
s = 0
for ind in v:
reverse_mapping[ind[0]] = k
s += ind[1]
means[k] = s/len(v)
for i in self.g.nodes_iter():
clust_center = reverse_mapping[i]
if i == clust_center:
self.g.node[i]['HyperHypoMeasure'] = 0
continue
dist = self.g[i][clust_center]['weight']
norm_dist = abs(dist - means[clust_center])
len_diff = self[clust_center]['representation'].shape[0]-self[i]['representation'].shape[0]
if len_diff < 0:
norm_dist *= -1
self.g.node[i]['HyperHypoMeasure'] = norm_dist
if 'HyperHypoMeasure' not in self.attributes:
self.attributes.append('HyperHypoMeasure')
def get_edges(self):
return array(self.g.edges(data=False))
def labels(self):
labels = list(range(len(self.g)))
for k,v in self.clusters.items():
for v2 in v:
labels[v2[0]] = k
true_labels = list()
for i in range(len(labels)):
true_labels.append(self[i]['rep']._true_label)
levels = {x:i for i,x in enumerate(set(true_labels))}
for i in range(len(true_labels)):
true_labels[i] = levels[true_labels[i]]
return array(labels),array(true_labels)
def silhouette_coefficient(self):
labels,true_labels = self.labels()
return metrics.silhouette_score(self.simMat, labels, metric = 'precomputed')
def homogeneity(self):
labels,true_labels = self.labels()
return metrics.homogeneity_score(true_labels, labels)
def completeness(self):
labels,true_labels = self.labels()
return metrics.completeness_score(true_labels, labels)
def v_score(self):
labels,true_labels = self.labels()
return metrics.v_measure_score(true_labels, labels)
def adjusted_mutual_information(self):
labels,true_labels = self.labels()
return metrics.adjusted_mutual_info_score(true_labels, labels)
def adjusted_rand_score(self):
labels,true_labels = self.labels()
return metrics.adjusted_rand_score(true_labels, labels)
```
#### File: python-acoustic-similarity/acousticsim/exceptions.py
```python
import sys
import traceback
class AcousticSimError(Exception):
def __init__(self, value = None):
if value is not None:
self.value = value
else:
self.value = 'There was an error with acoustic similarity.'
def __str__(self):
return self.value
class NoWavError(AcousticSimError):
def __init__(self, directory, files):
self.value = 'No wav files were found.'
self.main = self.value
self.information = 'The directory \'{}\' did not contain any wav files.'.format(directory)
self.details = 'The following files were found in {}:\n\n'.format(directory)
for f in files:
self.details += '{}\n'.format(f)
class MfccError(AcousticSimError):
pass
class AcousticSimPythonError(AcousticSimError):
"""
Exception wrapper around unanticipated exceptions to better display
them to users.
Parameters
----------
exc : Exception
Uncaught exception to be be output in a way that can be interpreted
"""
def __init__(self, details):
self.main = 'Something went wrong that wasn\'t handled by acousticsim.'
self.information = 'Please forward to the details below to the developers.'
self.details = ''.join(details)
def __str__(self):
return '\n'.join([self.main, self.information, self.details])
class AcousticSimPraatError(AcousticSimError):
pass
```
#### File: python-acoustic-similarity/acousticsim/helper.py
```python
import os
from multiprocessing import Process, Manager, Queue, cpu_count, Value, Lock, JoinableQueue
import time
from queue import Empty, Full
from collections import OrderedDict
from numpy import zeros
from functools import partial
from acousticsim.representations import Envelopes, Mfcc
from acousticsim.distance import dtw_distance, xcorr_distance, dct_distance
from acousticsim.exceptions import AcousticSimError,NoWavError
from acousticsim.multiprocessing import generate_cache, calc_asim
from textgrid import TextGrid
def _build_to_rep(**kwargs):
rep = kwargs.get('rep', 'mfcc')
num_filters = kwargs.get('num_filters',None)
num_coeffs = kwargs.get('num_coeffs', 20)
min_freq = kwargs.get('min_freq', 80)
max_freq = kwargs.get('max_freq', 7800)
win_len = kwargs.get('win_len', None)
time_step = kwargs.get('time_step', None)
use_power = kwargs.get('use_power', True)
if num_filters is None:
if rep == 'envelopes':
num_filters = 8
else:
num_filters = 26
if win_len is None:
win_len = 0.025
if time_step is None:
time_step = 0.01
if rep == 'envelopes':
to_rep = partial(Envelopes,
num_bands=num_filters,
min_freq=min_freq, max_freq=max_freq)
elif rep == 'mfcc':
to_rep = partial(Mfcc, min_freq=min_freq, max_freq = max_freq,
num_coeffs=num_coeffs,
num_filters = num_filters,
win_len=win_len,
time_step=time_step,
use_power = use_power)
elif rep in ['mhec','gammatone','melbank','formats','pitch','prosody']:
raise(NotImplementedError)
else:
raise(Exception("The type of representation must be one of: 'envelopes', 'mfcc'."))
#elif rep == 'mhec':
# to_rep = partial(to_mhec, freq_lims=freq_lims,
# num_coeffs=num_coeffs,
# num_filters = num_filters,
# window_length=win_len,
# time_step=time_step,
# use_power = use_power)
#elif rep == 'gammatone':
#if use_window:
#to_rep = partial(to_gammatone_envelopes,num_bands = num_filters,
#freq_lims=freq_lims,
#window_length=win_len,
#time_step=time_step)
#else:
#to_rep = partial(to_gammatone_envelopes,num_bands = num_filters,
#freq_lims=freq_lims)
#elif rep == 'melbank':
#to_rep = partial(to_melbank,freq_lims=freq_lims,
#win_len=win_len,
#time_step=time_step,
#num_filters = num_filters)
#elif rep == 'prosody':
#to_rep = partial(to_prosody,time_step=time_step)
return to_rep
def load_attributes(path):
from csv import DictReader
outdict = OrderedDict()
with open(path,'r') as f:
reader = DictReader(f,delimiter='\t')
for line in reader:
name = line['filename']
del line['filename']
linedict = OrderedDict()
for k in reader.fieldnames:
if k == 'filename':
continue
try:
linedict[k] = float(line[k])
except ValueError:
linedict[k] = line[k]
outdict[name] = linedict
return outdict
def get_vowel_points(textgrid_path, tier_name = 'Vowel', vowel_label = 'V'):
tg = TextGrid()
tg.read(textgrid_path)
vowel_tier = tg.getFirst(tier_name)
for i in vowel_tier:
if i.mark == vowel_label:
begin = i.minTime
end = i.maxTime
break
else:
raise(AcousticSimError('No vowel label was found in \'{}\'.'.format(textgrid_path)))
return begin, end
```
#### File: acousticsim/representations/amplitude_envelopes.py
```python
import numpy as np
from acousticsim.representations.base import Representation
from ..analysis.amplitude_envelopes import file_to_amplitude_envelopes
from acousticsim.exceptions import AcousticSimError
class Envelopes(Representation):
"""Generate amplitude envelopes from a full path to a .wav, following
Lewandowski (2012).
Parameters
----------
file_path : str
Full path to .wav file to process.
num_bands : int
Number of frequency bands to use.
min_freq : int
Minimum frequency in Hertz
max_freq : int
Maximum frequency in Hertz
"""
def __init__(self, file_path, num_bands, min_freq, max_freq, data=None, attributes=None):
Representation.__init__(self, file_path, data, attributes)
self.num_bands = num_bands
self.min_freq = min_freq
self.max_freq = max_freq
def window(self, win_len, time_step):
if self.is_windowed:
return
nperseg = int(win_len * self.sr)
if nperseg % 2 != 0:
nperseg -= 1
nperstep = int(time_step * self.sr)
halfperseg = int(nperseg/2)
print(nperseg, halfperseg)
num_samps, num_bands = self.shape
indices = np.arange(halfperseg, num_samps - halfperseg + 1, nperstep)
num_frames = len(indices)
print(indices)
new_rep = {}
for i in range(num_frames):
print(indices[i])
time_key = indices[i]/self.sr
rep_line = list()
print(indices[i] - halfperseg, indices[i] + halfperseg)
array = self[indices[i] - halfperseg, indices[i] + halfperseg]
print(array.shape)
for b in range(num_bands):
rep_line.append(sum(array[:, b]))
new_rep[time_key] = rep_line
self.data = new_rep
self.is_windowed = True
def process(self, reset=False):
if reset:
self.data = {}
if self.data:
raise AcousticSimError('Data already exists for this representation, use reset=True to generate new data.')
self.data = file_to_amplitude_envelopes(self.file_path, self.num_bands, self.min_freq, self.max_freq)
```
#### File: python-acoustic-similarity/tests/test_main_func.py
```python
import pytest
from acousticsim.main import (acoustic_similarity_mapping,
acoustic_similarity_directories,
analyze_file_segments,
analyze_directory, analyze_long_file)
slow = pytest.mark.skipif(
not pytest.config.getoption("--runslow"),
reason="need --runslow option to run"
)
# @slow
def test_analyze_directory(soundfiles_dir, call_back):
kwargs = {'rep': 'mfcc', 'win_len': 0.025,
'time_step': 0.01, 'num_coeffs': 13,
'freq_lims': (0, 8000), 'return_rep': True,
'use_multi': True}
scores, reps = analyze_directory(soundfiles_dir, call_back=call_back, **kwargs)
def test_analyze_long_file_reaper(acoustic_corpus_path, reaper_func):
segments = [(1, 2, 0)]
output = analyze_long_file(acoustic_corpus_path, segments, reaper_func)
print(sorted(output[(1, 2, 0)].keys()))
assert (all(x >= 1 for x in output[(1, 2, 0)].keys()))
assert (all(x <= 2 for x in output[(1, 2, 0)].keys()))
output = analyze_long_file(acoustic_corpus_path, segments, reaper_func, padding=0.5)
print(sorted(output[(1, 2, 0)].keys()))
assert (all(x >= 1 for x in output[(1, 2, 0)].keys()))
assert (all(x <= 2 for x in output[(1, 2, 0)].keys()))
def test_analyze_file_segments_reaper(acoustic_corpus_path, reaper_func):
seg = (acoustic_corpus_path, 1, 2, 0)
segments = [seg]
output = analyze_file_segments(segments, reaper_func)
print(sorted(output[seg].keys()))
assert (all(x >= 1 for x in output[seg].keys()))
assert (all(x <= 2 for x in output[seg].keys()))
output = analyze_file_segments(segments, reaper_func, padding=0.5)
print(sorted(output[seg].keys()))
assert (all(x >= 1 for x in output[seg].keys()))
assert (all(x <= 2 for x in output[seg].keys()))
def test_analyze_long_file_formants(acoustic_corpus_path, formants_func):
segments = [(1, 2, 0)]
output = analyze_long_file(acoustic_corpus_path, segments, formants_func)
print(sorted(output[(1, 2, 0)].keys()))
assert (all(x >= 1 for x in output[(1, 2, 0)].keys()))
assert (all(x <= 2 for x in output[(1, 2, 0)].keys()))
output = analyze_long_file(acoustic_corpus_path, segments, formants_func, padding=0.5)
print(sorted(output[(1, 2, 0)].keys()))
assert (all(x >= 1 for x in output[(1, 2, 0)].keys()))
assert (all(x <= 2 for x in output[(1, 2, 0)].keys()))
def test_analyze_long_file_pitch(acoustic_corpus_path, pitch_func):
segments = [(1, 2, 0)]
output = analyze_long_file(acoustic_corpus_path, segments, pitch_func)
print(sorted(output[(1, 2, 0)].keys()))
assert (all(x >= 1 for x in output[(1, 2, 0)].keys()))
assert (all(x <= 2 for x in output[(1, 2, 0)].keys()))
assert (all(x >= 1 for x in output[(1, 2, 0)].keys()))
assert (all(x <= 2 for x in output[(1, 2, 0)].keys()))
output = analyze_long_file(acoustic_corpus_path, segments, pitch_func, padding=0.5)
print(sorted(output[(1, 2, 0)].keys()))
assert (all(x >= 1 for x in output[(1, 2, 0)].keys()))
assert (all(x <= 2 for x in output[(1, 2, 0)].keys()))
assert (all(x >= 1 for x in output[(1, 2, 0)].keys()))
assert (all(x <= 2 for x in output[(1, 2, 0)].keys()))
```
#### File: python-acoustic-similarity/tests/test_rep_mfcc.py
```python
import os
import pytest
from acousticsim.representations.mfcc import Mfcc
from scipy.io import loadmat
from numpy.testing import assert_array_almost_equal
@pytest.mark.xfail
def test(base_filenames):
for f in base_filenames:
print(f)
if f.startswith('silence'):
continue
wavpath = f+'.wav'
matpath = f+'_mfcc.mat'
if not os.path.exists(matpath):
continue
m = loadmat(matpath)
mfcc = Mfcc(wavpath, min_freq=0, max_freq=8000, num_coeffs = 13 , win_len = 0.025,
time_step = 0.01,num_filters=20,
use_power=True
)
mfcc.process()
#assert_array_almost_equal(m['pspectrum'].T,pspec,decimal=4)
#assert_array_almost_equal(m['aspectrum'].T,aspec,decimal=4)
assert_array_almost_equal(m['cepstra'].T,mfcc.to_array())
def test_deltas(base_filenames):
for f in base_filenames:
print(f)
if f.startswith('silence'):
continue
wavpath = f+'.wav'
mfcc = Mfcc(wavpath, min_freq=0, max_freq=8000, num_coeffs = 13 , win_len = 0.025,
time_step = 0.01,num_filters=20,
use_power = False, deltas = True
)
@pytest.mark.xfail
def test_norm_amp(base_filenames):
for f in base_filenames:
print(f)
if f.startswith('silence'):
continue
wavpath = f+'.wav'
mfcc = Mfcc(wavpath,min_freq=0, max_freq=8000, num_coeffs = 1 , win_len = 0.025,
time_step = 0.01,num_filters=20,
use_power = True
)
mfcc.norm_amp([(0,1)])
``` |
{
"source": "jofrony/Neuromodulation",
"score": 3
} |
#### File: Neuromodulation/neuromodcell/experimental_data.py
```python
class Experimental:
def __init__(self):
self.experimental_data = dict()
def define_exp(self, **kwargs):
for exp_type, data in kwargs.items():
self.experimental_data.update({exp_type: data})
```
#### File: Neuromodulation/neuromodcell/modulation_functions.py
```python
import numpy as np
def alpha(parameter=None):
time_step_array = parameter['time_step_array']
tstart = parameter['tstart']
gmax = parameter['gmax']
tau = parameter['tau']
mag = list()
'''
calc and returns a "magnitude" using an alpha function -> used for modulation
transients
sim_time = simulation time (h.t)
tstart = time when triggering the function
gmax = maximal amplitude of curve (default 1; transient must lie between 0-1)
tau = time constant of alpha function
'''
for t_step in time_step_array:
if t_step >= tstart:
t = (t_step - tstart) / tau
e = np.exp(1 - t)
mag.append(gmax * t * e)
else:
mag.append(0)
return mag
def step(parameter=None):
time_step_array = parameter['time_step_array']
tstart = parameter['tstart']
step_stop = parameter['duration'] + parameter['tstart']
gmax = parameter['gmax']
mag = list()
for t_step in time_step_array:
if t_step > tstart and t_step < step_stop:
mag.append(gmax)
else:
mag.append(0)
return mag
def bath_application(parameter=None):
time_step_array = parameter['time_step_array']
gmax = parameter['gmax']
mag = list()
for t_step in time_step_array:
mag.append(gmax)
return mag
def alpha_background(parameter=None):
time_step_array = parameter['time_step_array']
tstart = parameter['tstart']
gmax_decrease = parameter['gmax_decrease']
tau = parameter['tau']
tonic = parameter['tonic']
mag = list()
for t_step in time_step_array:
mag_intermediate = 0
if t_step >= tstart:
t = (t_step - tstart) / tau
e = np.exp(1 - t)
mag_intermediate = mag_intermediate + gmax_decrease * t * e
if mag_intermediate > 0.99:
mag_intermediate = 1
mag.append(tonic - mag_intermediate)
return mag
def time_series(parameter=None):
mag = eval(parameter['array'])
return mag
```
#### File: Neuromodulation/neuromodcell/optimisation_setup.py
```python
import neuromodcell.modulation_functions as mf
from neuromodcell.model_functions import define_mechanisms
from neuromodcell.model_functions import define_parameters
from neuromodcell.model_functions import define_morphology
from neuromodcell.model_functions import define_modulation
from neuromodcell.model_functions import files
from neuromodcell.Neuron_model_extended import NeuronModel
import numpy as np
import json
import logging
import os
class OptimisationSetup:
def __init__(self, modulation_setup):
self.modulation_setup = modulation_setup
self.cellDir = self.modulation_setup["cellDir"]
self.neurons = dict()
self.sim = None
self.mod_file = None
self.ion_channels = dict()
self.modulation_opt_setup = dict()
self.neuromodulation_name = dict()
self.t_save = None
self.v_save = dict()
self.gidlist = None
self.i_stim = list()
self.receptors = list()
self.synapses = list()
self.netcon = list()
self.unit_modulation = list()
def set_gidlist(self, gidlist):
self.gidlist = gidlist
def start_logging(self):
directory = "logfiles"
if not os.path.exists(directory):
os.makedirs(directory)
logging.basicConfig(filename="logfiles/log-file-" + str(self.gidlist[0]) + ".log", level=logging.DEBUG)
@staticmethod
def section_translation(section_name):
translation = {'dend': 'basal', 'somatic': 'soma', 'axon': 'axon'}
return translation[section_name]
def define_neuromodulation(self):
self.neuromodulation_name.update({self.modulation_setup["name"]: self.modulation_setup["key"]})
self.modulation_type()
def setup_neurons(self, unit_modulation):
self.unit_modulation = unit_modulation
param_file, morph_file, self.mod_file, mech_file = files(self.cellDir)
param = define_parameters(parameter_config=param_file, parameter_id=self.modulation_setup["parameterID"])
mech = define_mechanisms(mech_file)
morph = define_morphology(morph_file=morph_file)
logging.info('This worker : ' + str(len(self.gidlist)))
for k in range(len(self.gidlist)):
modulation = define_modulation(param_set=unit_modulation["param_set"][k])
self.neurons[k] = NeuronModel(cell_name=self.modulation_setup["cell_name"], morph=morph, mech=mech,
param=param, modulation=modulation)
def control_neuron(self):
param_file, morph_file, self.mod_file, mech_file = files(self.cellDir)
param = define_parameters(parameter_config=param_file, parameter_id=self.modulation_setup["parameterID"])
mech = define_mechanisms(mech_file)
morph = define_morphology(morph_file=morph_file)
self.neurons[-1] = NeuronModel(cell_name=self.modulation_setup["cell_name"], morph=morph, mech=mech,
param=param, modulation=[])
def modulation_type(self):
name = self.modulation_setup["name"]
parameters = self.modulation_setup["modulation_function"]
mod_type = parameters["function"]
method = getattr(mf, mod_type)
parameters.update({"ht": np.arange(0, parameters["tstop"], parameters["dt"])})
from neuron import h
vector = h.Vector(method(parameter=parameters))
self.modulation_opt_setup.update({self.neuromodulation_name[name]: {"function": method,
"parameters": parameters,
"vector": vector}})
def instantiate(self, sim):
self.sim = sim
for i, cell in self.neurons.items():
self.neurons[i].instantiate(sim=self.sim)
def define_ion_channel(self):
for i, ion_channel_infos in enumerate(self.unit_modulation["param_set"]):
ion_channel_modulation = dict()
for param in ion_channel_infos:
if param["sectionlist"] not in ion_channel_modulation.keys():
ion_channel_modulation.update({param["sectionlist"]: list()})
ion_channel_modulation[param["sectionlist"]].append(param)
else:
ion_channel_modulation[param["sectionlist"]].append(param)
for section in ion_channel_modulation.keys():
for neuron_part in getattr(self.neurons[i].icell, section):
for seg in neuron_part:
mod_factors = ion_channel_modulation[section]
for mod_factor in mod_factors:
if "mod" in mod_factor["mech_param"]:
setattr(seg, mod_factor["param_name"], 1)
if "level" in mod_factor["mech_param"]:
dt = self.modulation_setup["modulation_function"]["dt"]
self.modulation_opt_setup[mod_factor["name"]]['vector'].play(
getattr(getattr(seg, mod_factor["mech"]), "_ref_" + mod_factor["mech_param"]), dt)
def define_receptor(self):
for i, synapse_infos in enumerate(self.unit_modulation["receptor"]):
for modulation_unit in synapse_infos:
for synapse_name, modulation_parameters in modulation_unit.items():
if "syn_param" != synapse_name:
synapse = getattr(self.sim.neuron.h, synapse_name)
syn = synapse(self.neurons[i].icell.soma[0](0.5))
if "syn_param" in modulation_unit.keys():
for syn_parameter, value in modulation_unit["syn_param"].items():
setattr(syn, syn_parameter, value)
for mod_param in modulation_parameters:
setattr(syn, mod_param["param_name"], mod_param["value"])
setattr(syn, mod_param["modON"], 1)
dt = self.modulation_setup["modulation_function"]["dt"]
self.modulation_opt_setup[mod_param["name"]]['vector'].play(
getattr(syn, "_ref_" + mod_param["level_param"]), dt)
self.receptors.append(syn)
"""
Check if control neuron is on index -1
"""
if -1 in self.neurons.keys():
for modulation_unit in self.modulation_setup["receptor_modulation"]:
for synapse_name, modulation_parameters in modulation_unit.items():
if "syn_param" != synapse_name:
synapse = getattr(self.sim.neuron.h, synapse_name)
syn = synapse(self.neurons[-1].icell.soma[0](0.5))
if "syn_param" in modulation_unit.keys():
for syn_parameter, value in modulation_unit["syn_param"].items():
setattr(syn, syn_parameter, value)
self.receptors.append(syn)
def define_protocol(self):
for i, cell in self.neurons.items():
for protocol in self.modulation_setup["protocols"]:
if 'current_clamp' == protocol['type']:
cur_stim = self.sim.neuron.h.IClamp(0.5, sec=self.neurons[i].icell.soma[0])
cur_stim.delay = protocol['parameters']["start"]
cur_stim.dur = protocol['parameters']["duration"]
cur_stim.amp = protocol['parameters']["amp"]
self.i_stim.append(cur_stim)
elif 'synaptic_input' == protocol['type']:
for syn in self.receptors:
if "spiketimes" in protocol['parameters']:
vec_stim = self.sim.neuron.h.VecStim()
spike_time = self.sim.neuron.h.Vector(protocol["parameters"]["spiketimes"])
vec_stim.play(spike_time)
nc_to_synapse = self.sim.neuron.h.NetCon(vec_stim, syn)
nc_to_synapse.delay = protocol["parameters"]["delay"]
nc_to_synapse.threshold = protocol["parameters"]["threshold"]
nc_to_synapse.weight[0] = protocol["parameters"]["conductance"]
self.synapses.append(vec_stim)
self.netcon.append(nc_to_synapse)
def time_save(self):
self.t_save = self.sim.neuron.h.Vector()
self.t_save.record(self.sim.neuron.h._ref_t)
return self.t_save
def voltage_save(self):
for i, cell in self.neurons.items():
v = self.sim.neuron.h.Vector()
v.record(getattr(cell.icell.soma[0](0.5), '_ref_v'))
self.v_save[i] = v
return self.v_save
def run(self):
self.sim.neuron.h.tstop = self.modulation_setup["tstop"]
self.sim.neuron.h.run()
```
#### File: Neuromodulation/neuromodcell/previous_setup.py
```python
import pathlib
import json
class OldSetup:
def __init__(self, dir_path):
self.previous_setup = json.load(open(pathlib.Path(dir_path) / 'modulation_setup.json', 'rb'))
def return_population(self):
pop_num = self.previous_setup['population']
return pop_num
```
#### File: Neuromodulation/tests/test_selection_criteria.py
```python
import neuromodcell.selection_criteria as sc
import numpy as np
def test_number_AP_decrease():
voltage_control = np.array([-100, -100, 100, -100, -100, -100])
voltage = np.array([-100, -100, -100, -100, -100, -100])
criteria = {"selection": {"mean": 1, "std": 1, "threshold": 1}, "parameters": {'dt': 0.1}}
result = sc.number_AP_decrease(criteria, [voltage_control, voltage])
boolean = result['boolean']
zscore = result['zscore']
assert boolean == True
assert zscore == 0
def test_number_AP_increase():
voltage_control = np.array([-100, -100, -100, -100, -100, -100])
voltage = np.array([-100, -100, 100, -100, -100, -100])
criteria = {"selection": {"mean": 1, "std": 1, "threshold": 1}, "parameters": {'dt': 0.1}}
result = sc.number_AP_increase(criteria, [voltage_control, voltage])
boolean = result['boolean']
zscore = result['zscore']
assert boolean == True
assert zscore == 0
def test_frequency_change():
voltage_control = np.array([-100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100])
voltage = np.array([-100, 100, -100, 100, -100, 100, -100, 100, -100, 100, -100])
criteria = {"selection": {"mean": 5, "std": 1, "threshold": 1},
"parameters": {"tstart": 0, "tstop": 1000, 'dt': 100}}
result = sc.frequency_change(criteria, [voltage_control, voltage])
boolean = result['boolean']
zscore = result['zscore']
assert boolean == True
assert zscore == 0
def test_frequency_increase():
voltage_control = np.array([-100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100])
voltage = np.array([-100, 100, -100, 100, -100, 100, -100, 100, -100, 100, -100])
criteria = {"selection": {"mean": 5, "std": 1, "threshold": 1},
"parameters": {"tstart": 0, "tstop": 1000, 'dt': 100}}
result = sc.frequency_change_increase(criteria, [voltage_control, voltage])
boolean = result['boolean']
zscore = result['zscore']
assert boolean == True
assert zscore == 0
def test_frequency_decrease():
voltage_control = np.array([-100, 100, -100, 100, -100, 100, -100, 100, -100, 100, -100])
voltage = np.array([-100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100])
criteria = {"selection": {"mean": 5, "std": 1, "threshold": 1},
"parameters": {"tstart": 0, "tstop": 1000, 'dt': 100}}
result = sc.frequency_change_decrease(criteria, [voltage_control, voltage])
boolean = result['boolean']
zscore = result['zscore']
assert boolean == True
assert zscore == 0
def test_cv_change():
voltage_control = np.array([-100, 100, -100, 100, -100, 100, -100, 100, -100, 100, -100])
voltage = np.array([-100, -100, -100, 100, -100, -100, -100, 100, -100, 100, -100])
criteria = {"selection": {"mean": 0.333, "std": 1, "threshold": 1}, "parameters": {'dt': 100}}
result = sc.cv_change(criteria, [voltage_control, voltage])
boolean = result['boolean']
zscore = result['zscore']
assert boolean == True
def test_membrane_amplitude_increase():
voltage_control = np.array([-100, -100, -100, -100, -90, -90, -90, -90, -100, -100, -100, -100])
voltage = np.array([-100, -100, -100, -100, -80, -80, -80, -80, -100, -100, -100, -100])
criteria = {"selection": {"mean": 10, "std": 1, "threshold": 1},
"parameters": {'start_base': 0, 'stop_base': 0.2, 'start_measure': 0.4, 'stop_measure': 0.8, 'dt': 0.1}}
result = sc.membrane_amplitude_increase(criteria, [voltage_control, voltage])
boolean = result['boolean']
zscore = result['zscore']
assert boolean == True
assert zscore == 0
def test_membrane_amplitude_increase_percentage():
voltage_control = np.array([-100, -100, -100, -100, -90, -90, -90, -90, -100, -100, -100, -100])
voltage = np.array([-100, -100, -100, -100, -80, -80, -80, -80, -100, -100, -100, -100])
criteria = {"selection": {"mean": 200, "std": 1, "threshold": 1},
"parameters": {'start_base': 0, 'stop_base': 0.2, 'start_measure': 0.4, 'stop_measure': 0.8, 'dt': 0.1}}
result = sc.membrane_amplitude_increase_percentage(criteria, [voltage_control, voltage])
boolean = result['boolean']
zscore = result['zscore']
assert boolean == True
assert zscore == 0
def test_membrane_amplitude_decrease_percentage():
voltage_control = np.array([-100, -100, -100, -100, -80, -80, -80, -80, -100, -100, -100, -100])
voltage = np.array([-100, -100, -100, -100, -90, -90, -90, -90, -100, -100, -100, -100])
criteria = {"selection": {"mean": 50, "std": 1, "threshold": 1},
"parameters": {'start_base': 0, 'stop_base': 0.2, 'start_measure': 0.4, 'stop_measure': 0.8, 'dt': 0.1}}
result = sc.membrane_amplitude_decrease_percentage(criteria, [voltage_control, voltage])
boolean = result['boolean']
zscore = result['zscore']
assert boolean == True
assert zscore == 0
``` |
{
"source": "jo-fu/TLC-OverviewPlugin",
"score": 3
} |
#### File: ternip/formats/tern.py
```python
import xml.dom.minidom
import timex2
class TernDocument(timex2.Timex2XmlDocument):
"""
A class which can handle TERN documents
"""
@staticmethod
def create(sents, docid, tok_offsets=None, add_S=False, add_LEX=False, pos_attr=False, dct=''):
"""
Creates a TERN document from the internal representation
sents is the [[(word, pos, timexes), ...], ...] format.
tok_offsets is used to correctly reinsert whitespace lost in
tokenisation. It's in the format of a list of lists of integers, where
each integer is the offset from the start of the sentence of that token.
If set to None (the default), then a single space is assumed between
all tokens.
If add_S is set to something other than false, then the tags to indicate
sentence boundaries are added, with the name of the tag being the value
of add_S
add_LEX is similar, but for token boundaries
pos_attr is similar but refers to the name of the attribute on the LEX
(or whatever) tag that holds the POS tag.
dct is the document creation time string
"""
# Create a blank XML document
impl = xml.dom.minidom.getDOMImplementation()
doc = impl.createDocument(None, 'DOC', None)
# Add necessary tags
docid_tag = doc.createElement('DOCNO')
docid_tag.appendChild(doc.createTextNode(docid))
doc.documentElement.appendChild(docid_tag)
if dct != '':
dct_tag = doc.createElement('DATE_TIME')
dct_tag.appendChild(doc.createTextNode(dct[4:6] + '/' + dct[6:8] + '/' + dct[:4]))
doc.documentElement.appendChild(dct_tag)
body_tag = doc.createElement('BODY')
doc.documentElement.appendChild(body_tag)
text_tag = doc.createElement('TEXT')
body_tag.appendChild(text_tag)
# Add text to document
TernDocument._add_words_to_node_from_sents(doc, text_tag, sents, tok_offsets)
# Now create the object
x = TernDocument(doc)
# Now reconcile the S, LEX and TIMEX tags
x.reconcile(sents, add_S, add_LEX, pos_attr)
return x
def __init__(self, file, nodename='TEXT', has_S=False, has_LEX=False, pos_attr=False):
timex2.Timex2XmlDocument.__init__(self, file, nodename, has_S, has_LEX, pos_attr)
def _dct_to_xml_body(self):
"""
Set the XML body to be the tag containing the document creation time
"""
dtags = self._xml_doc.documentElement.getElementsByTagName('DATE_TIME')
if len(dtags) == 1:
self._xml_body = dtags[0]
else:
dtags = self._xml_doc.documentElement.getElementsByTagName('DATE')
if len(dtags) == 1:
self._xml_body = dtags[0]
else:
return False
def get_dct_sents(self):
"""
Returns the creation time sents for this document.
"""
old_xml_body = self._xml_body
if self._dct_to_xml_body() is False:
return [[]]
s = self.get_sents()
self._xml_body = old_xml_body
return s
def reconcile_dct(self, dct, add_S=False, add_LEX=False, pos_attr=False):
"""
Adds a TIMEX to the DCT tag and return the DCT
"""
old_xml_body = self._xml_body
old_has_S = self._has_S
old_has_LEX = self._has_LEX
old_pos_attr = self._pos_attr
if self._dct_to_xml_body() is False:
return
# Set functionInDocument
for sent in dct:
for (doc, pos, ts) in sent:
for t in ts:
t.document_role = 'CREATION_TIME'
self.reconcile(dct, add_S, add_LEX, pos_attr)
self._xml_body = old_xml_body
self._has_S = old_has_S
self._has_LEX = old_has_LEX
self._pos_attr = old_pos_attr
```
#### File: ternip/formats/timeml.py
```python
import xml.dom.minidom
from timex3 import Timex3XmlDocument
class TimeMlDocument(Timex3XmlDocument):
"""
A class which holds a TimeML representation of a document.
Suitable for use with the AQUAINT dataset.
"""
@staticmethod
def create(sents, tok_offsets=None, add_S=False, add_LEX=False, pos_attr=False):
"""
Creates a TimeML document from the internal representation
sents is the [[(word, pos, timexes), ...], ...] format.
tok_offsets is used to correctly reinsert whitespace lost in
tokenisation. It's in the format of a list of lists of integers, where
each integer is the offset from the start of the sentence of that token.
If set to None (the default), then a single space is assumed between
all tokens.
If add_S is set to something other than false, then the tags to indicate
sentence boundaries are added, with the name of the tag being the value
of add_S
add_LEX is similar, but for token boundaries
pos_attr is similar but refers to the name of the attribute on the LEX
(or whatever) tag that holds the POS tag.
"""
# Create a blank XML document
impl = xml.dom.minidom.getDOMImplementation()
doc = impl.createDocument('http://www.timeml.org/site/publications/timeMLdocs/timeml_1.2.1.dtd', 'TimeML', None)
# Add text to document
TimeMlDocument._add_words_to_node_from_sents(doc, doc.documentElement, sents, tok_offsets)
# Now create the object
x = TimeMlDocument(doc)
# Now reconcile the S, LEX and TIMEX tags
x.reconcile(sents, add_S, add_LEX, pos_attr)
return x
```
#### File: ternip/formats/timex3.py
```python
from ternip.formats.xml_doc import XmlDocument
from ternip.timex import Timex
class Timex3XmlDocument(XmlDocument):
"""
A class which takes any random XML document and adds TIMEX3 tags to it.
Suitable for use with Timebank, which contains many superfluous tags that
aren't in the TimeML spec, even though it claims to be TimeML.
"""
_timex_tag_name = 'TIMEX3'
def _timex_from_node(self, node):
"""
Given a node representing a TIMEX3 element, return a timex object
representing it
"""
t = Timex()
if node.hasAttribute('tid'):
t.id = int(node.getAttribute('tid')[1:])
if node.hasAttribute('value'):
t.value = node.getAttribute('value')
if node.hasAttribute('mod'):
t.mod = node.getAttribute('mod')
if node.hasAttribute('type'):
t.type = node.getAttribute('type')
if node.hasAttribute('freq'):
t.freq = node.getAttribute('freq')
if node.hasAttribute('quant'):
t.quant = node.getAttribute('quant')
if node.hasAttribute('comment'):
t.comment = node.getAttribute('comment')
if node.getAttribute('temporalFunction'):
t.temporal_function = True
if node.hasAttribute('functionInDocument'):
t.document_role = node.getAttribute('functionInDocument')
if node.hasAttribute('beginPoint'):
t.begin_timex = int(node.getAttribute('beginPoint')[1:])
if node.hasAttribute('endPoint'):
t.end_timex = int(node.getAttribute('endPoint')[1:])
if node.hasAttribute('anchorTimeID'):
t.context = int(node.getAttribute('anchorTimeID')[1:])
return t
def _annotate_node_from_timex(self, timex, node):
"""
Add attributes to this TIMEX3 node
"""
if timex.id is not None:
node.setAttribute('tid', 't' + str(timex.id))
if timex.value is not None:
node.setAttribute('value', timex.value)
if timex.mod is not None:
node.setAttribute('mod', timex.mod)
if timex.type is not None:
node.setAttribute('type', timex.type.upper())
if timex.freq is not None:
node.setAttribute('freq', timex.freq)
if timex.comment is not None:
node.setAttribute('comment', timex.comment)
if timex.quant is not None:
node.setAttribute('quant', timex.quant)
if timex.temporal_function:
node.setAttribute('temporalFunction', 'true')
if timex.document_role is not None:
node.setAttribute('functionInDocument', timex.document_role)
if timex.begin_timex is not None:
node.setAttribute('beginPoint', 't' + str(timex.begin_timex.id))
if timex.end_timex is not None:
node.setAttribute('endPoint', 't' + str(timex.end_timex.id))
if timex.context is not None:
node.setAttribute('anchorTimeID', 't' + str(timex.context.id))
```
#### File: rule_engine/normalisation_functions/string_conversions.py
```python
import re
# Mapping of month abbreviations to month index
_month_to_num = {
'jan': 1,
'feb': 2,
'mar': 3,
'apr': 4,
'may': 5,
'jun': 6,
'jul': 7,
'aug': 8,
'sep': 9,
'oct': 10,
'nov': 11,
'dec': 12
}
def month_to_num(m):
"""
Given a name of a month, get the number of that month. Invalid data gets 0.
Returned as an integer.
"""
if m[:3].lower() in _month_to_num:
return _month_to_num[m[:3].lower()]
else:
return 0
# Mapping of days to day index
_day_to_num = {
"sunday": 0,
"monday": 1,
"tuesday": 2,
"wednesday": 3,
"thursday": 4,
"friday": 5,
"saturday": 6
}
def day_to_num(day):
"""
Given the name of a day, the number of that day. Sunday is 0. Invalid data
gets 7. All returned as integers.
"""
if day.lower() in _day_to_num:
return _day_to_num[day.lower()]
else:
return 7
# The decade number that a year component (-ty) refers to
_decade_nums = {
"twen": 2,
"thir": 3,
"for": 4,
"fif": 5,
"six": 6,
"seven": 7,
"eigh": 8,
"nine": 9
}
def decade_nums(dec):
"""
Given the decade component (less the ty suffix) of a year, the number of
that year as an integer.
"""
if dec.lower() in _decade_nums:
return _decade_nums[dec.lower()]
else:
return 1
# Season to TIDES identifiers
#_season = {
# "spring": "SP",
# "summer": "SU",
# "autumn": "FA",
# "fall": "FA",
# "winter": "WI"
#}
# Season to TIDES identifiers
_season = {
"spring": "03",
"summer": "07",
"autumn": "10",
"fall": "10",
"winter": "12"
}
def season(s):
"""
Transforms a season name into an identifer from TIDES. Invalid data gets
returned as is
"""
if s.lower() in _season:
return _season[s.lower()]
else:
return s
_season_to_month = {
'SP': 'april',
'SU': 'june',
'FA': 'september',
'WI': 'december'
}
def season_to_month(s):
"""
Convert seasons to months (roughly), returns an int
"""
s = season(s)
if s in _season_to_month:
return _season_to_month[s]
else:
return ''
# Words (or parts of words) and then unit identifier
_units_to_gran = {
'dai': 'D',
'night': 'D',
'day': 'D',
'week': 'W',
'fortnight': 'F',
'month': 'M',
'year': 'Y',
'annual': 'Y',
'decade': 'E',
'century': 'C',
'centurie': 'C'
}
def units_to_gran(unit):
"""
Given a word, or part of a word, that represents a unit of time, return the
single character representing the granuality of that unit of time
"""
if unit.lower() in _units_to_gran:
return _units_to_gran[unit.lower()]
else:
return unit
# Dates of holidays which are on the same date every year MMDD. Keys have spaces
# removed
_fixed_holiday_dates = {
"newyear": "0101",
"inauguration": "0120",
"valentine": "0214",
"ground": "0202",
"candlemas": "0202",
"patrick": "0317",
"fool": "0401",
"st\.george": "0423",
"saintgeorge": "0423",
"walpurgisnacht": "0430",
"mayday": "0501",
"beltane": "0501",
"cinco": "0505",
"flag": "0614",
"baptiste": "0624",
"dominion": "0701",
"canada": "0701",
"independence": "0704",
"bastille": "0714",
"halloween": "1031",
"allhallow": "1101",
"allsaints": "1101",
"allsouls": "1102",
"dayofthedead": "1102",
"fawkes": "1105",
"veteran": "1111",
"christmas": "1225",
"xmas": "1225"
}
def fixed_holiday_date(hol):
"""
Get the date string MMDD of a holiday
"""
hol = re.sub(r'<([^~]*)~[^>]*>', r'\1', hol).lower()
if hol in _fixed_holiday_dates:
return _fixed_holiday_dates[hol]
else:
return ''
# Mapping of holidays which always occur on the Nth X of some month, where X is
# day of week. Mapping is of tuples of the form (month, dow, n)
_nth_dow_holiday_date = {
"mlk": (1, 1, 3),
"king": (1, 1, 3),
"president": (2, 1, 3),
"canberra": (3, 1, 3),
"mother": (5, 7, 2),
"father": (6, 7, 3),
"labor": (9, 1, 1),
"columbus": (10, 1, 2),
"thanksgiving": (11, 4, 4)
}
def nth_dow_holiday_date(hol):
"""
Given the name of a holiday which always occur on the Nth X of some month,
where X is day of week, returns tuples of the form (month, dow, n)
representing the information about that holiday.
"""
if hol.lower() in _nth_dow_holiday_date:
return _nth_dow_holiday_date[hol.lower()]
else:
return (0, 0, 0)
# Mapping of units to multipliers, duration representations, and whether or not
# it needs to be prepended with T
_duration_values = {
'second': (1, 'S', True),
'minute': (1, 'M', True),
'hour': (1, 'H', True),
'day': (1, 'D', False),
'month': (1, 'M', False),
'year': (1, 'Y', False),
'week': (1, 'W', False),
'fortnight': (2, 'W', False),
'decade': (10, 'Y', False),
'century': (100, 'Y', False),
'centurie': (100, 'Y', False),
'millenium': (1000, 'Y', False),
'millenia': (1000, 'Y', False)
}
def build_duration_value(num, unit):
if unit.lower() in _duration_values:
du = _duration_values[unit.lower()]
if num == 'X':
return 'X' + du[1]
else:
return ('T' if du[2] else '') + str(num * du[0]) + du[1]
else:
return str(num) + 'X'
```
#### File: ternip/rule_engine/normalisation_rule.py
```python
import calendar
import logging
import re
from ternip.rule_engine import rule
from ternip.rule_engine.expressions import *
from ternip.rule_engine.normalisation_functions.date_functions import *
from ternip.rule_engine.normalisation_functions.relative_date_functions import *
from ternip.rule_engine.normalisation_functions.string_conversions import *
from ternip.rule_engine.normalisation_functions.words_to_num import *
LOGGER = logging.getLogger(__name__)
class NormalisationRule(rule.Rule):
""" A class that represents normalisation rules """
# If debug mode is enabled, then the comment in the TIMEX tag is set to
# the ID of the rule which normalised it
_DEBUG = False
def __init__(self, match, type=None, id='', value=None, change_type=None, freq=None, quant=None, mod=None,
guards=None, after_guards=None, before_guards=None, sent_guards=None, after=None, tokenise=True,
deliminate_numbers=False):
"""
Create a normalisation rule, with a number of optional arguments. If
tokenise is set to true, then regex's are in the form to be used with
nltk.TokenSearcher.findall (http://nltk.googlecode.com/svn/trunk/doc/api/nltk.text.TokenSearcher-class.html#findall)
however with the amendment that the body of the tokens are actually in
the form <token~POS>, e.g., <about~.+> would match about with any POS
tag.
match is a regex which the body of the timex must match to run this
rule. Subgroups of this expression are available to later
expressions.
type means the type of TIMEX which this rule applies to
id is a unique string other rules can refer to in order to express an
ordering.
value is a Python expression which returns a value (in ISO 8601 format,
as modified in TimeML). Subgroups from the match expression are
available in the form {#[0-9]+}
guard is a list of regexes which must be satisfied for this rule to be
applied. Defauts to an empty list. If the first character in the
regex is a !, then it means that it's a negative guard - the guard
must NOT match for this rule to be applied.
after_guards are like guards, but match against the text proceeding the
annotation in the sentence
before_guards are like after_guards, but match against preceeding text.
after is a list of IDs which must have preceeded the execution of this
rule
tokenise is whether or not the regular expressions to be matched against
care about token boundaries/POS tags. If it is not true, it is
considered to be the separator for tokens.
"""
if not after: after = []
if not sent_guards: sent_guards = []
if not before_guards: before_guards = []
if not after_guards: after_guards = []
if not guards: guards = []
self.id = id
self._type = type
self._match = re.compile(self._prep_re(match, tokenise), re.IGNORECASE)
self.after = after
self._tokenise = tokenise
self._deliminate_numbers = deliminate_numbers
self._value_exp = self._compile_exp(value, 'value')
self._type_exp = self._compile_exp(change_type, 'change-type')
self._freq_exp = self._compile_exp(freq, 'freq')
self._quant_exp = self._compile_exp(quant, 'quant')
self._mod_exp = self._compile_exp(mod, 'mod')
# Load guards
self._guards = self._load_guards(guards, tokenise)
self._before_guards = self._load_guards(before_guards, tokenise)
self._after_guards = self._load_guards(after_guards, tokenise)
self._sent_guards = self._load_guards(sent_guards, tokenise)
def _compile_exp(self, exp, type):
"""
Replace our group short form in value expressions, e.g., {#6} with
actual Python code so that matched regular expressions get subbed in
"""
# it would be nice to support named groups, but this'll do for now
if exp is not None:
return compile(re.sub(r'\{#(\d+)\}', r'match.group(\1)', exp), self.id + ':' + type, 'eval')
else:
return None
def apply(self, timex, cur_context, dct, body, before, after):
"""
Applies this rule to this timex, where body is the full extent covered
by this timex, before is the preceeding text in the sentence, and after
is the proceeding text in the sentence, in the [(token, POS), ...] form
A boolean indicating whether or not application was successful is
returned. The timex may also be modified, so should be passed in by
reference.
"""
# Check this rule type matches the timex type
if self._type is not None and timex.type.lower() != self._type.lower():
return False, cur_context
# Check before, after and whole sentence guards
if not self._check_guards(self._toks_to_str(before), self._before_guards):
return False, cur_context
if not self._check_guards(self._toks_to_str(after), self._after_guards):
return False, cur_context
if not self._check_guards(self._toks_to_str(body), self._guards):
return False, cur_context
if not self._check_guards(self._toks_to_str(before + body + after), self._sent_guards):
return False, cur_context
# Now, check if we match:
if self._tokenise is True:
senttext = self._toks_to_str(body)
if self._deliminate_numbers:
senttext = self._do_deliminate_numbers(senttext)
else:
senttext = self._tokenise.join([tok for (tok, pos, ts) in body])
match = self._match.search(senttext)
# If we do, then calculate attributes for the timex
if match:
if self._DEBUG:
timex.comment = self.id
try:
if self._value_exp is not None:
timex.value = eval(self._value_exp)
if self._type_exp is not None:
timex.type = eval(self._type_exp)
if self._freq_exp is not None:
timex.freq = eval(self._freq_exp)
if self._quant_exp is not None:
timex.quant = eval(self._quant_exp)
if self._mod_exp is not None:
timex.mod = eval(self._mod_exp)
except Exception:
LOGGER.exception('Malformed rule expression')
# Need to update current time context, if necessary
return True, cur_context
else:
# Rule did not match
return False, cur_context
```
#### File: ternip/rule_engine/recognition_rule_engine.py
```python
import re
from ternip.rule_engine.recognition_rule import RecognitionRule
from ternip.rule_engine.recognition_rule_block import RecognitionRuleBlock
from ternip.rule_engine.rule_engine import RuleEngine, RuleLoadError
class RecognitionRuleEngine(RuleEngine):
"""
A class which does recognition using a rule engine
Complex rules must have a string member called 'id', which is used for
after ordering, a list of strings called 'after' (which can be an empty
list) which consists of IDs that must have run before this rule.
Additionally, a function called 'apply' which takes a list of
(token, pos, timexes) tuples and returns them in the same form with
potentially modified timexes.
"""
_block_type = RecognitionRuleBlock
def _load_rule(self, filename, rulelines):
"""
Load a 'simple' recognition rule
"""
# get key/value dictionaries
d = self._parse_rule(filename, rulelines)
# Set defaults
type = None
match = None
id = filename
squelch = False
guards = []
before_guards = []
after_guards = []
after = []
case_sensitive = False
deliminate_numbers = False
for key in d:
# Only one 'Type field allowed
if key == 'type':
if len(d[key]) != 1:
raise RuleLoadError(filename, "There must be exactly 1 'Type' field")
else:
type = d[key][0]
# Only one 'Match' field allowed
elif key == 'match':
if len(d[key]) != 1:
raise RuleLoadError(filename, "There must be exactly 1 'Match' field")
else:
match = d[key][0]
# No more than one ID key allowed
elif key == 'id':
if len(d[key]) == 1:
id = d[key][0]
elif len(d[key]) > 1:
raise RuleLoadError(filename, "Too many 'ID' fields")
# Squelch is an optional field, defaulting to False, which accepts
# either true or false (case-insensitive) as values
elif key == 'squelch':
if len(d[key]) == 1:
squelch = d[key][0].lower()
if squelch == 'true':
squelch = True
elif squelch == 'false':
squelch = False
else:
raise RuleLoadError(filename, "Squelch must be either 'True' or 'False'")
elif len(d[key]) > 1:
raise RuleLoadError(filename, "Too many 'Squelch' fields")
# Case-sensitive is an optional field, defaulting to False, which
# accepts either true or false (case-insensitive) as values
elif key == 'case-sensitive':
if len(d[key]) == 1:
case_sensitive = d[key][0].lower()
if case_sensitive == 'true':
case_sensitive = True
elif case_sensitive == 'false':
case_sensitive = False
else:
raise RuleLoadError(filename, "Case-Sensitive must be either 'True' or 'False'")
elif (len(d[key]) > 1):
raise RuleLoadError(filename, "Too many 'Case-Sensitive' fields")
# Deliminate-Numbers is an optional field, defaulting to False, which
# accepts either true or false (case-insensitive) as values
elif key == 'deliminate-numbers':
if len(d[key]) == 1:
deliminate_numbers = d[key][0].lower()
if deliminate_numbers == 'true':
deliminate_numbers = True
elif deliminate_numbers == 'false':
deliminate_numbers = False
else:
raise RuleLoadError(filename, "Deliminate-Numbers must be either 'True' or 'False'")
elif (len(d[key]) > 1):
raise RuleLoadError(filename, "Too many 'Deliminate-Numbers' fields")
# set optional fields
elif key == 'guard':
guards = d[key]
elif key == 'after':
after = d[key]
elif key == 'before-guard':
before_guards = d[key]
elif key == 'after-guard':
after_guards = d[key]
# error on unknown fields
else:
raise RuleLoadError(filename, "Unknown field '" + key + "'")
if type is None:
raise RuleLoadError(filename, "'Type' is a compulsory field")
if match is None:
raise RuleLoadError(filename, "'Match' is a compulsory field")
# Guard against any RE errors
try:
return RecognitionRule(match, type, id, guards, after_guards, before_guards, after, squelch, case_sensitive,
deliminate_numbers)
except re.error as e:
raise RuleLoadError(filename, "Malformed regular expression: " + str(e))
except (SyntaxError, ValueError) as e:
raise RuleLoadError(filename, "Malformed Python expression: " + str(e))
def tag(self, sents):
"""
This function actually does word recognition. It expects content to be
split into tokenised, POS tagged, sentences. i.e., a list of lists of
tuples ([[(token, pos-tag, timexes), ...], ...]). Rules are applied one
at a time.
What is returned is in the same form, except the token tuples contain a
third element consisting of the set of timexes associated with that
token.
"""
# Apply rules on one sentence at a time
r = []
for sent in sents:
rules_run = set()
rules_to_run = set(self._rules)
# Apply rules until all rules have been applied
while rules_to_run:
for rule in rules_to_run.copy():
# Check that if 'after' is defined, the rules we must run
# after have run
after_ok = True
for aid in rule.after:
if aid not in rules_run:
after_ok = False
# Apply this rule, and update our states of rules waiting to
# run and rules that have been run
if after_ok:
(sent, success) = rule.apply(sent)
rules_run.add(rule.id)
rules_to_run.remove(rule)
r.append(sent)
return r
```
#### File: ternip/rule_engine/recognition_rule.py
```python
import re
from ternip.rule_engine.rule import Rule
from ternip.timex import Timex
class RecognitionRule(Rule):
""" A class that represents identification rules """
# If debug mode is enabled, then the comment in the TIMEX tag is set to
# the ID of the rule which created it
_DEBUG = False
def __init__(self, match, type, id, guards=None, after_guards=None, before_guards=None, after=None, squelch=False,
case_sensitive=False, deliminate_numbers=False):
"""
Create a recognition rule, with a number of optional arguments. All
regex's are in the form to be used with nltk.TokenSearcher.findall
(http://nltk.googlecode.com/svn/trunk/doc/api/nltk.text.TokenSearcher-class.html#findall)
however with the amendment that the body of the tokens are actually in
the form <token~POS>, e.g., <about~.+> would match about with any POS
tag.
match is a regex. The text that is matched by this regex is annotated as
a timex. Compulsory.
type can be date, time or duration (TIMEX3 annotation guidelines). This
is a compulsory value.
id is a unique value other rules can refer to in order to express an
ordering.
guards is a list of regexes which must be satisfied for this rule to be
applied. Defauts to an empty list. If the first character in the
regex is a !, then it means that it's a negative guard - the guard
must NOT match for this rule to be applied.
after_guards is a list of regexes, like normal guards, but is only
matched against the string immediately proceeding a match to check
if that is satisfied
before_guards is like after_guards, but matches against the string
immediately preceeding a match
after is a list of IDs which must have preceeded the execution of this
rule
squelch is a Boolean. If true, then if the 'match' regex matches some
stuff that's already been timex annotated, those timexes are removed
and no timex is added to the match. Defaults to false.
case_sensitive is a Boolean indicating whether or not this rule should
be matched case sensitively or not.
deliminate_numbers is a Boolean indicating whether or not this rule
requires the sentence to have deliminated numbers
"""
if not after: after = []
if not before_guards: before_guards = []
if not after_guards: after_guards = []
if not guards: guards = []
self.id = id
self._type = type
if case_sensitive:
self._match = re.compile(self._prep_re(match))
else:
self._match = re.compile(self._prep_re(match), re.IGNORECASE)
self._squelch = squelch
self.after = after
self._deliminate_numbers = deliminate_numbers
# Load guards
self._guards = self._load_guards(guards)
self._before_guards = self._load_guards(before_guards)
self._after_guards = self._load_guards(after_guards)
def apply(self, sent):
"""
Applies this rule to the tokenised sentence. The 'after' ordering
must be checked by the caller to ensure correct rule application.
sent is a list of tuples (token, POS, [timexes])
A tuple is returned where the first element is a list in the same form
as sent, with additional timexes added to the 3rd element if need be,
and the second element in the tuple is whether or not this rule matched
anything
"""
senttext = self._toks_to_str(sent)
if self._deliminate_numbers:
senttext = self._do_deliminate_numbers(senttext)
success = False
# Ensure the sentence-level guards are satisfied
if not self._check_guards(senttext, self._guards):
return sent, success
# Now see if this rule actually matches anything
for match in self._match.finditer(senttext):
# Now check before guards
if not self._check_guards(senttext[:match.start()], self._before_guards):
continue
# and after guards
if not self._check_guards(senttext[match.end():], self._after_guards):
continue
# okay, first we need to find which tokens we matched, can do this
# by using our token markers
ti = senttext.count('<', 0, match.start())
tj = senttext.count('<', 0, match.end())
if not self._squelch:
t = Timex(self._type) # only create a new timex if not squelching
if self._DEBUG:
t.comment = self.id
else:
t = None
# Add TIMEX
self._set_timex_extents(t, sent, ti, tj, self._squelch)
success = True
return sent, success
``` |
{
"source": "jog1997/Capstone_1_Jose_Garcia",
"score": 3
} |
#### File: jog1997/Capstone_1_Jose_Garcia/Forms.py
```python
from flask_wtf import FlaskForm
from wtforms import StringField, IntegerField, SubmitField, validators, SelectField
class AddSaleForm(FlaskForm):
#sales_id = IntegerField('Sales Id:') #[validators.DataRequired('number must be unique'), validators.InputRequired('A number is required')])
'''def validate_sales_id(FlaskForm, field):
if sales_id ==
raise ValidationError('Sales ID must be unique. Please try another')'''
emp_id = SelectField('Employee ID: ', choices =[ ('EMP234','EMP234'),('EMP244','EMP244'),('EMP256','EMP256'),('EMP267','EMP267'), ('EMP290','EMP290')])
prod_code= SelectField('Product Code:', choices = [('PROD_001','PROD_001'),('PROD_002','PROD_002'),('PROD_003','PROD_003'),('PROD_004','PROD_004'),('PROD_005','PROD_005'),('PROD_006','PROD_006'),('PROD_007','PROD_007',),('PROD_008','PROD_008')])
prod_quantity = IntegerField('Product Quantity',[validators.NumberRange(min=0, max=100, message='Input between 0-100 required.'), validators.InputRequired()])
warr_code = SelectField('Warranty Code', choices = [('ESP_001', 'ESP_001'), ('ESP_002', 'ESP_002'), ('ESP_003', 'ESP_003'), ('ESP_004', 'ESP_004'), ('ESP_005', 'ESP_005'), ('ESP_006', 'ESP_006'), ('ESP_007', 'ESP_007'),('ESP_008', 'ESP_008')])
warr_quantity = IntegerField('Warranty Quantity',[validators.NumberRange(min=0, max=100, message='Input between 0-100 required.'), validators.InputRequired()])
current_week = IntegerField('Current Week',[validators.InputRequired(), validators.NumberRange(min=1, max=52, message='Input between 1-52 required.')])
current_year = IntegerField('Current Year',[validators.InputRequired(), validators.NumberRange(min=2021, max=None, message='Input from 2021 and beyond required.')])
submit = SubmitField('Add Sale')
class DelForm(FlaskForm):
sale_id = IntegerField('ID of Sale to remove')
#emp_id = SelectField('Employee ID: ', choices =[ ('EMP234','EMP234'),('EMP244','EMP244'),('EMP256','EMP256'),('EMP267','EMP267'), ('EMP290','EMP290')])
submit = SubmitField('Remove Sale')
``` |
{
"source": "jogacolhue/RETASASscrapper",
"score": 3
} |
#### File: jogacolhue/RETASASscrapper/webservice.py
```python
from flask import Flask
from flask import request
import retasasinicial
import retasasproducto
import retasascondicion
import retasas
app = Flask(__name__)
@app.route('/inicial', methods=['GET'])
def informacionInicial():
return retasasinicial.informacionInicial()
@app.route('/producto/<departamento>/<tipoProducto>', methods=['GET'])
def obtenerProductos(departamento, tipoProducto):
return retasasproducto.obtenerProducto(departamento, tipoProducto)
@app.route('/condicion/<departamento>/<tipoProducto>/<producto>', methods=['GET'])
def obtenerCondiciones(departamento, tipoProducto, producto):
return retasascondicion.obtenerCondicion(departamento, tipoProducto, producto)
@app.route('/retasas/<departamento>/<tipoProducto>/<producto>/<condicion>', methods=['GET'])
def ratasas(departamento, tipoProducto, producto, condicion):
return retasas.obtenerRetasas(departamento, tipoProducto, producto, condicion)
# Se deja el modo de depuración activo
app.run(debug=True, use_reloader=False)#app.run(host='0.0.0.0', port=5000)
``` |
{
"source": "JogadorJNC/LoL-Pro-TrueSkill",
"score": 3
} |
#### File: LoL-Pro-TrueSkill/Trueskill LoL pro/GetMatchHistoryRaw.py
```python
import mwclient
import time
import datetime as dt
from datetime import date, timedelta, datetime
import json
# All the data is taken from LoL Gamepedia's API, more info on it can be found at lol.gamepedia.com/Help:API_Documentation
# This scripts gets the match history up to one day before the day it's run and saves it in a json file.
# The raw match history then has to be fixed so that all the players have their most recent name.
# I still need to get around to doing that with the teams as well.
# This function takes a date as the input and outputs a list of the games that happened that day.
# The output is a list of dictionaries, each with the date, the tournament, the teams, the winner, the patch,
# the players on each team along with their role, champion, summoner spells, and keystone.
def getmh(date):
site = mwclient.Site('lol.gamepedia.com', path='/')
new_query = site.api('cargoquery',
limit = "500",
tables = "ScoreboardGames=SG, ScoreboardPlayers=SP",
join_on = "SG.UniqueGame=SP.UniqueGame",
fields = "SG.Tournament, SG.DateTime_UTC, SG.Team1, SG.Team2, SG.Winner, SG.Patch, SP.Link, SP.Team, SP.Champion, SP.SummonerSpells, SP.KeystoneMastery, SP.KeystoneRune, SP.Role, SP.UniqueGame, SP.Side",
where = "SG.DateTime_UTC >= '" + str(date-dt.timedelta(days=1)) + "' AND SG.DateTime_UTC <= '" + str(date) + "'",
offset = "0"
)
ofst=500
response={}
response["cargoquery"]=[]
while len(new_query["cargoquery"])!=0:
response["cargoquery"]+=new_query["cargoquery"]
new_query = site.api('cargoquery',
limit = "500",
tables = "ScoreboardGames=SG, ScoreboardPlayers=SP",
join_on = "SG.UniqueGame=SP.UniqueGame",
fields = "SG.Tournament, SG.DateTime_UTC, SG.Team1, SG.Team2, SG.Winner, SG.Patch, SP.Link, SP.Team, SP.Champion, SP.SummonerSpells, SP.KeystoneMastery, SP.KeystoneRune, SP.Role, SP.UniqueGame, SP.Side",
where = "SG.DateTime_UTC >= '" + str(date-dt.timedelta(days=1)) + "' AND SG.DateTime_UTC <= '" + str(date) + "'",
offset = str(ofst)
)
ofst+=500
mh=[]
previousGame=""
for i in response["cargoquery"]:
if i["title"]["UniqueGame"]!=previousGame:
previousGame=i["title"]["UniqueGame"]
mh.append({})
mh[-1]["Tournament"]=i["title"]["Tournament"]
mh[-1]["Date"]=str(dt.datetime.strptime(i["title"]["DateTime UTC"], "%Y-%m-%d %H:%M:%S").date())
mh[-1]["Team1"]=i["title"]["Team1"]
mh[-1]["Team2"]=i["title"]["Team2"]
mh[-1]["Winner"]=i["title"]["Winner"]
mh[-1]["Patch"]=i["title"]["Patch"]
mh[-1]["Team1Players"]={}
mh[-1]["Team2Players"]={}
mh[-1]["Team1Players"][i["title"]["Link"]]={}
mh[-1]["Team1Players"][i["title"]["Link"]]["Role"]=i["title"]["Role"]
mh[-1]["Team1Players"][i["title"]["Link"]]["Champion"]=i["title"]["Champion"]
mh[-1]["Team1Players"][i["title"]["Link"]]["SummonerSpells"]=i["title"]["SummonerSpells"]
if i["title"]["KeystoneMastery"]!="":
mh[-1]["Team1Players"][i["title"]["Link"]]["KeystoneMastery"]=i["title"]["KeystoneMastery"]
if i["title"]["KeystoneRune"]!="":
mh[-1]["Team1Players"][i["title"]["Link"]]["KeystoneRune"]=i["title"]["KeystoneRune"]
else:
mh[-1]["Team" + i["title"]["Side"] + "Players"][i["title"]["Link"]]={}
mh[-1]["Team" + i["title"]["Side"] + "Players"][i["title"]["Link"]]["Role"]=i["title"]["Role"]
mh[-1]["Team" + i["title"]["Side"] + "Players"][i["title"]["Link"]]["Champion"]=i["title"]["Champion"]
mh[-1]["Team" + i["title"]["Side"] + "Players"][i["title"]["Link"]]["SummonerSpells"]=i["title"]["SummonerSpells"]
if i["title"]["KeystoneMastery"]!="":
mh[-1]["Team" + i["title"]["Side"] + "Players"][i["title"]["Link"]]["KeystoneMastery"]=i["title"]["KeystoneMastery"]
if i["title"]["KeystoneRune"]!="":
mh[-1]["Team" + i["title"]["Side"] + "Players"][i["title"]["Link"]]["KeystoneRune"]=i["title"]["KeystoneRune"]
return mh
games_between_saves=500
# Opens the match history it's gotten before and checks the date of the last game.
# If the match history is empty it starts at a date just before the first game recorded on gamepedia.
with open("RawMatchHistory.json", mode="r", encoding="utf8") as f:
mh=json.load(f)
if len(mh)>0:
day=dt.datetime.strptime(mh[-1]["Date"], "%Y-%m-%d")
else:
day=dt.datetime.strptime("2011-06-18", "%Y-%m-%d")
# Gets all the games between the date of the last game and the date of the previous day the program is run.
Counter=0
while day<dt.datetime.now()-dt.timedelta(days=1):
print(day.strftime("%Y-%m-%d"), ": ", len(mh))
time.sleep(1)
mh+=getmh(day)
day=day+dt.timedelta(days=1)
Counter+=1
# Every once in a while the current progress is sorted and saved.
if Counter%games_between_saves==0:
sorted_mh=sorted(mh, key = lambda i: dt.datetime.strptime(i['Date'], "%Y-%m-%d"))
with open("RawMatchHistory.json", mode="w", encoding="utf8") as f:
json.dump(mh, f)
# Sorts the match history by date and saves.
sorted_mh=sorted(mh, key = lambda i: dt.datetime.strptime(i['Date'], "%Y-%m-%d"))
with open("RawMatchHistory.json", mode="w", encoding="utf8") as f:
json.dump(sorted_mh, f)
```
#### File: LoL-Pro-TrueSkill/Trueskill LoL pro/PlayerRedirects.py
```python
import mwclient
import json
import time
# Gets the player redirects and saves them.
# All the data is taken from LoL Gamepedia's API, more info on it can be found at lol.gamepedia.com/Help:API_Documentation
def getPlayerRedirects():
os=0
site = mwclient.Site('lol.gamepedia.com', path='/')
response = site.api('cargoquery',
limit = "500",
offset= os,
tables = "PlayerRedirects=PR",
fields = "PR.AllName, PR._pageName=Page",
)
ret={}
for i in response["cargoquery"]:
ret[i["title"]["AllName"]]=i["title"]["Page"]
while len(response["cargoquery"])>0:
os+=500
time.sleep(1)
site = mwclient.Site('lol.gamepedia.com', path='/')
response = site.api('cargoquery',
limit = "500",
offset= os,
tables = "PlayerRedirects=PR",
fields = "PR.AllName, PR._pageName=Page",
)
for i in response["cargoquery"]:
ret[i["title"]["AllName"]]=i["title"]["Page"]
print(os)
return ret
pr_dict=getPlayerRedirects()
with open("PlayerRedirects.json", mode="w", encoding="utf8") as f:
json.dump(pr_dict, f)
``` |
{
"source": "JoG-Dev/checker-framework",
"score": 2
} |
#### File: checker-framework/release/sanity_checks.py
```python
from release_vars import *
from release_utils import *
import urllib
import zipfile
def javac_sanity_check( checker_framework_website, release_version ):
"""
Download the release of the Checker Framework from the development website
and NullnessExampleWithWarnings.java from the Google Code repository.
Run the Nullness Checker on NullnessExampleWithWarnings and verify the output
Fails if the expected errors are not found in the output.
"""
new_checkers_release_zip = os.path.join( checker_framework_website, "releases", release_version, "checker-framework.zip" )
javac_sanity_dir = os.path.join( SANITY_DIR, "javac" )
if os.path.isdir( javac_sanity_dir ):
delete_path( javac_sanity_dir )
execute( "mkdir -p " + javac_sanity_dir )
javac_sanity_zip = os.path.join( javac_sanity_dir, "checker-framework.zip" )
print( "Attempting to download %s to %s" % ( new_checkers_release_zip, javac_sanity_zip ) )
download_binary( new_checkers_release_zip, javac_sanity_zip, MAX_DOWNLOAD_SIZE )
nullness_example_url = "https://checker-framework.googlecode.com/hg/checker/examples/NullnessExampleWithWarnings.java"
nullness_example = os.path.join( javac_sanity_dir, "NullnessExampleWithWarnings.java" )
if os.path.isfile( nullness_example ):
delete( nullness_example )
wget_file( nullness_example_url, javac_sanity_dir )
deploy_dir = os.path.join( javac_sanity_dir, "checker-framework-" + release_version )
if os.path.exists( deploy_dir ):
print( "Deleting existing path: " + deploy_dir )
delete_path(deploy_dir)
with zipfile.ZipFile(javac_sanity_zip, "r") as z:
z.extractall( javac_sanity_dir )
cmd = "chmod -R u+rwx " + deploy_dir
execute( cmd )
sanity_javac = os.path.join( deploy_dir, "checker", "bin", "javac" )
nullness_output = os.path.join( deploy_dir, "output.log" )
cmd = sanity_javac + " -processor org.checkerframework.checker.nullness.NullnessChecker " + nullness_example + " -Anomsgtext"
execute_write_to_file( cmd, nullness_output, False )
check_results( "Javac sanity check", nullness_output, [
"NullnessExampleWithWarnings.java:25: error: (assignment.type.incompatible)",
"NullnessExampleWithWarnings.java:36: error: (argument.type.incompatible)"
])
#this is a smoke test for the built-in checker shorthand feature
#http://types.cs.washington.edu/checker-framework/current/checker-framework-manual.html#shorthand-for-checkers
nullness_shorthand_output = os.path.join( deploy_dir, "output_shorthand.log")
cmd = sanity_javac + " -processor NullnessChecker " + nullness_example + " -Anomsgtext"
execute_write_to_file( cmd, nullness_shorthand_output, False )
check_results( "Javac Shorthand Sanity Check", nullness_shorthand_output, [
"NullnessExampleWithWarnings.java:25: error: (assignment.type.incompatible)",
"NullnessExampleWithWarnings.java:36: error: (argument.type.incompatible)"
])
def maven_sanity_check( sub_sanity_dir_name, repo_url, release_version ):
"""
Download the Checker Framework maven plugin from the given repository. Download the
HelloGalaxy example for the Maven plugin and run the NullnessChecker on it. If we don't
encounter the expected errors fail.
"""
checker_dir = os.path.join(CHECKER_FRAMEWORK, "checker")
maven_sanity_dir = os.path.join( SANITY_DIR, sub_sanity_dir_name )
if os.path.isdir( maven_sanity_dir ):
delete_path( maven_sanity_dir )
execute( "mkdir -p " + maven_sanity_dir )
path_to_artifacts = os.path.join( os.path.expanduser("~"), ".m2", "repository", "org", "checkerframework" )
print("This script must delete your Maven Checker Framework artifacts.\n" +
"See README-maintainers.html#Maven-Plugin dependencies. These artifacts " +
"will need to be re-downloaded the next time you need them. This will be " +
"done automatically by Maven next time you use the plugin." )
continue_check = prompt_w_suggestion("Delete your Checker Framework artifacts?", "yes")
if is_no( continue_check ):
print("Please run the Maven tutorial manually using the Maven plugin at repo %s" % ( MAVEN_DEV_REPO ) )
else:
if os.path.isdir( path_to_artifacts ):
delete_path( path_to_artifacts )
hello_galaxy_dir = os.path.join( maven_sanity_dir, "HelloGalaxy" )
output_log = os.path.join( hello_galaxy_dir, "output.log" )
ant_release_script = os.path.join( CHECKER_FRAMEWORK_RELEASE, "release.xml" )
get_example_dir_cmd = "ant -f %s update-and-copy-hello-galaxy -Dchecker=%s -Dversion=%s -Ddest.dir=%s" % ( ant_release_script, checker_dir, release_version, maven_sanity_dir )
execute( get_example_dir_cmd )
hello_galaxy_pom = os.path.join( hello_galaxy_dir, "pom.xml" )
add_repo_information( hello_galaxy_pom, repo_url )
execute_write_to_file( "mvn checkerframework:check", output_log, False, hello_galaxy_dir )
check_results( "Maven sanity check", output_log, [
"HelloGalaxy.java:[30,29] [assignment.type.incompatible] incompatible types in assignment."
])
delete_path( path_to_artifacts )
def check_results( title, output_log, expected_errors ):
found_errors = are_in_file( output_log, expected_errors )
if not found_errors:
raise Exception( title + " did not work!\n" +
"File: " + output_log + "\n" +
"should contain the following errors: [ " + ", ".join(expected_errors) )
else:
print( "%s check: passed!\n" % title )
def add_repo_information( pom, repo_url ):
"""Adds development maven repo to pom file so that the artifacts used are the development artifacts"""
to_insert = """
<repositories>
<repository>
<id>checker-framework-repo</id>
<url>%s</url>
</repository>
</repositories>
<pluginRepositories>
<pluginRepository>
<id>checker-framework-repo</id>
<url>%s</url>
</pluginRepository>
</pluginRepositories>
""" % (repo_url, repo_url)
result_str = execute( 'grep -nm 1 "<build>" %s' % pom, True, True )
line_no_str = result_str.split(":")[0]
line_no = int( line_no_str )
print(" LINE_NO: " + line_no_str )
insert_before_line( to_insert, pom, line_no )
``` |
{
"source": "JoGed/EnsembleDFT-ML",
"score": 2
} |
#### File: JoGed/EnsembleDFT-ML/XC_Model.py
```python
import os
import torch
from torch import nn
import pytorch_lightning as pl
from argparse import ArgumentParser
import KohnShamSpin
from tqdm import tqdm
from scipy import optimize
import sys
import numpy as np
class XC_Model(pl.LightningModule):
def __init__(self, **hparams):
"""
Pass in parsed HyperOptArgumentParser to the model
:param hparams:
"""
super().__init__()
self.save_hyperparameters()
# -------------------------------
# Define Layer Architecture
# -------------------------------
self.channels = 1 if not self.hparams.Spin else 2
if self.hparams.Disc: self.channels += 1
self.input_dim_Slope = 1 if not self.hparams.Spin else 2
self.Conv_Channels = [self.channels] + list(self.hparams.Conv_OutChannels)
self.Slope_Channels = [self.input_dim_Slope] + list(self.hparams.Conv_OutChannels)
self.LDA_LayerDims = [self.hparams.LDA_in_channels] + list(self.hparams.LDA_LayerOutDims)
self.padding = int((self.hparams.kernelsize - 1)/2)
self.Con0 = nn.ModuleList([nn.Conv1d(in_channels=self.Conv_Channels[i], out_channels=self.Conv_Channels[i+1],
kernel_size=self.hparams.kernelsize, padding=self.padding)
for i in range(len(self.Conv_Channels)-1)])
self.Slope = nn.ModuleList([nn.Conv1d(in_channels=self.Slope_Channels[i], out_channels=self.Slope_Channels[i+1],
kernel_size=self.hparams.kernelsize, padding=self.padding)
for i in range(len(self.Slope_Channels)-1)])
self.convPreLDA2 = nn.Conv1d(in_channels=self.channels, out_channels=self.hparams.LDA_in_channels,
kernel_size=self.hparams.kernelsize, padding=self.padding, bias=True)
self.convPreLDA1 = nn.Conv1d(in_channels=1 if not self.hparams.Spin else 2, out_channels=self.hparams.LDA_in_channels,
kernel_size=self.hparams.kernelsize, padding=self.padding, bias=True)
self.LDA1 = nn.ModuleList([nn.Linear(self.LDA_LayerDims[i], self.LDA_LayerDims[i+1])
for i in range(len(self.LDA_LayerDims)-1)])
self.LDA2 = nn.ModuleList([nn.Linear(self.LDA_LayerDims[i], self.LDA_LayerDims[i+1])
for i in range(len(self.LDA_LayerDims)-1)])
self.actsConvs = nn.ModuleList([nn.SiLU() for _ in range(len(self.Conv_Channels))])
self.actsLDA = nn.ModuleList([nn.SiLU() for _ in range(len(self.LDA_LayerDims))])
def DisconFun(self, pointsAndDens, eps):
# ---------------------------------------
# non-differentiable auxiliary function
# ---------------------------------------
points, x = pointsAndDens
dx = points[0][1] - points[0][0]
x_slope = x
for fc, act in zip(self.Slope, self.actsConvs):
x_slope= act(fc(x_slope))
return 1 + ((torch.sum(x_slope, (1, 2))) *
torch.abs(torch.sin((x.sum(dim=(1, 2)) * dx - eps) * np.pi)) /
(x.sum(dim=(1, 2)) * dx + 2)
).unsqueeze(-1)
def forward(self, pointsAndDens):
points, x = pointsAndDens
dx = points[0][1] - points[0][0]
if self.hparams.Disc:
if self.hparams.WindowPlusLDA:
u = self.convPreLDA1(x)
u = torch.transpose(u, 1, 2) # -> [B, dim_out , channels]
for fc, act in zip(self.LDA1, self.actsLDA):
u = act(fc(u))
u = torch.transpose(u, 1, 2)
u = 1 + ((torch.sum(u, (1, 2))) *
torch.abs(torch.sin((x.sum(dim=(1, 2)) * dx - 1e-14) * np.pi)) /
(x.sum(dim=(1, 2)) * dx + 2)
).unsqueeze(-1)
f = self.convPreLDA2(torch.cat((x, u.repeat(1, points.shape[-1]).unsqueeze(1)), dim=1))
f = torch.transpose(f, 1, 2)
for fc, act in zip(self.LDA2, self.actsLDA):
f = act(fc(f))
f = torch.transpose(f, 1, 2)
x_out = -0.5 * torch.sum(torch.multiply(x.sum(1).unsqueeze(1), f), -1) # -> [B, 1]
else:
u = self.DisconFun((points, x), eps=1e-14).repeat(1, points.shape[-1]).unsqueeze(1)
f = torch.cat((x, u), dim=1)
for fc, act in zip(self.Con0, self.actsConvs):
f = act(fc(f))
x_out = -0.5 * torch.sum(torch.multiply(x.sum(1).unsqueeze(1), f), -1)
else:
if self.hparams.WindowPlusLDA:
f = self.convPreLDA2(x)
f = torch.transpose(f, 1, 2)
for fc, act in zip(self.LDA2, self.actsLDA):
f = act(fc(f))
f = torch.transpose(f, 1, 2)
x_out = -0.5 * torch.sum(torch.multiply(x.sum(1).unsqueeze(1), f), -1)
else:
f = x
for fc, act in zip(self.Con0, self.actsConvs):
f = act(fc(f))
x_out = -0.5 * torch.sum(torch.multiply(x.sum(1).unsqueeze(1), f), -1)
return x_out
def loss(self, E_xc_out, E_xc_ref, V_xc_out, V_xc_ref):
E_xc_ref = E_xc_ref.unsqueeze(1)
MSE1 = nn.MSELoss(reduction="mean")
l_E_xc = MSE1(E_xc_out, E_xc_ref)
l_V_xc = MSE1(V_xc_out, V_xc_ref)
return l_E_xc, l_V_xc, 10*l_E_xc +100*l_V_xc
def lossJump(self, Dens_total_1, Dens_total_2, V_xc_out1, V_xc_out2,
Dens_total_mix1, Dens_total_mix2, V_xc_out_mix1, V_xc_out_mix2,
evals_Int1, evals_Int2, E_tot_Triplet):
IONminusAFF1 = E_tot_Triplet[0][1] - 2 * E_tot_Triplet[0][0] + 0
IONminusAFF2 = E_tot_Triplet[0][2] - 2 * E_tot_Triplet[0][1] + E_tot_Triplet[0][0]
KSgap1 = evals_Int1[0][0] - evals_Int1[0][0] #?????
KSgap2 = evals_Int2[0][1] - evals_Int2[0][0]
JumpXC1 = IONminusAFF1 - KSgap1
JumpXC2 = IONminusAFF2 - KSgap2
#print(JumpXC1.item(), JumpXC2.item())
MSE = nn.MSELoss(reduction="mean")
Id_fun = torch.ones(V_xc_out1.shape[-1]).view(1,1,-1)
if self.hparams.gpus_num != 0 or len(self.hparams.gpus_devices) != 0:
Id_fun = Id_fun.cuda()
JumpXC1_fun = JumpXC1 * Id_fun
JumpXC2_fun = JumpXC2 * Id_fun
JumpLoss1 = MSE((V_xc_out_mix1 - V_xc_out1), JumpXC1_fun)
JumpLoss2 = MSE((V_xc_out_mix2 - V_xc_out2), JumpXC2_fun)
return JumpLoss1, JumpLoss2
def training_step(self, batch, batch_idx):
eps=1.1e-14
if hasattr(self.hparams, "train_jump"):
if self.hparams.train_jump:
batch = {key: batch[key].squeeze(0) for key in batch.keys()}
points = batch["points"]
Dens_total = batch["Dens_total"]
Dens_up = batch["Dens_up"]
Dens_down = batch["Dens_down"]
V_ext = batch["v_ext"]
V_xc_NoSpin = batch["v_xc_NoSpin"]
V_xc_up = batch["v_xc_up"]
V_xc_down = batch["v_xc_down"]
E_xc_NoSpin = batch["E_xc_NoSpin"]
E_xc_Spin = batch["E_xc_Spin"]
E_tot = batch["E_tot"]
evals_Int1 = batch["evals_Int1"]
evals_Int2 = batch["evals_Int2"]
E_tot_Triplet = batch["E_tot_Triplet"]
dx = points[0][1] - points[0][0]
# ------------------------------------------------------------------------------------------------------------------
# Compute E_xc & V_xc
# ------------------------------------------------------------------------------------------------------------------
if self.hparams.Spin:
DensUpAndDown = torch.cat((Dens_up.unsqueeze(1), Dens_down.unsqueeze(1)), 1)
V_xcUpAndDown = torch.cat((V_xc_up.unsqueeze(1), V_xc_down.unsqueeze(1)), 1)
DensUpAndDown.requires_grad = True
E_xc_Spin_out = self((points, DensUpAndDown))
E_xc_Spin_out_deriv = \
torch.autograd.grad(inputs=DensUpAndDown, outputs=E_xc_Spin_out, create_graph=True,
retain_graph=True, grad_outputs=torch.ones_like(E_xc_Spin_out))[0] / dx
l_Exc, l_V_xc, loss = self.loss(E_xc_out=E_xc_Spin_out, E_xc_ref=E_xc_Spin, V_xc_out=E_xc_Spin_out_deriv,
V_xc_ref=V_xcUpAndDown)
if hasattr(self.hparams, "SpinMirror"):
if self.hparams.SpinMirror:
DensUpAndDown_mirr = torch.cat((Dens_down.detach().clone().unsqueeze(1), Dens_up.detach().clone().unsqueeze(1)), 1)
V_xcUpAndDown_mirr = torch.cat((V_xc_down.detach().clone().unsqueeze(1), V_xc_up.detach().clone().unsqueeze(1)), 1)
DensUpAndDown_mirr.requires_grad = True
E_xc_Spin_out_mirr = self((points, DensUpAndDown_mirr))
E_xc_Spin_out_deriv_mirr = \
torch.autograd.grad(inputs=DensUpAndDown_mirr , outputs=E_xc_Spin_out_mirr , create_graph=True,
retain_graph=True, grad_outputs=torch.ones_like(E_xc_Spin_out_mirr))[0] / dx
l_Exc_mirr , l_V_xc_mirr , loss_mirr = self.loss(E_xc_out=E_xc_Spin_out_mirr, E_xc_ref=E_xc_Spin,
V_xc_out=E_xc_Spin_out_deriv_mirr,
V_xc_ref=V_xcUpAndDown_mirr)
l_Exc = (l_Exc + l_Exc_mirr) / 2.
l_V_xc = (l_V_xc + l_V_xc_mirr) / 2.
loss = (loss + loss_mirr) / 2.
else:
Dens_total = Dens_total.unsqueeze(1)
V_xc_NoSpin = V_xc_NoSpin.unsqueeze(1)
Dens_total.requires_grad = True
E_xc_NoSpin_out = self((points, Dens_total))
E_xc_NoSpin_out_deriv = \
torch.autograd.grad(inputs=Dens_total, outputs=E_xc_NoSpin_out, create_graph=True,
retain_graph=True, grad_outputs=torch.ones_like(E_xc_NoSpin_out))[0] / dx
l_Exc, l_V_xc, loss = self.loss(E_xc_out=E_xc_NoSpin_out, E_xc_ref=E_xc_NoSpin, V_xc_out=E_xc_NoSpin_out_deriv,
V_xc_ref=V_xc_NoSpin)
if hasattr(self.hparams, "train_jump"):
if self.hparams.train_jump:
mid_idx = int(len(self.hparams.DimsToTrain) / 2)
Dens_total_mix1 = (1 - eps) * Dens_total.detach().clone()[0] + eps * Dens_total.detach().clone()[mid_idx]
Dens_total_mix2 = (1 - eps) * Dens_total.detach().clone()[mid_idx] + eps * Dens_total.detach().clone()[-1]
# ---------------------------------------------------------
Dens_total_mix1 = Dens_total_mix1.unsqueeze(0)
Dens_total_mix1.requires_grad = True
E_xc_NoSpin_out_mix1 = self((points, Dens_total_mix1))
E_xc_NoSpin_out_deriv_mix1 = \
torch.autograd.grad(inputs=Dens_total_mix1, outputs=E_xc_NoSpin_out_mix1, create_graph=True,
retain_graph=True, grad_outputs=torch.ones_like(E_xc_NoSpin_out_mix1))[0] / dx
# ---------------------------------------------------------
Dens_total_mix2 = Dens_total_mix2.unsqueeze(0)
Dens_total_mix2.requires_grad = True
E_xc_NoSpin_out_mix2 = self((points, Dens_total_mix2))
E_xc_NoSpin_out_deriv_mix2 = \
torch.autograd.grad(inputs=Dens_total_mix2, outputs=E_xc_NoSpin_out_mix2, create_graph=True,
retain_graph=True, grad_outputs=torch.ones_like(E_xc_NoSpin_out_mix2))[0] / dx
# ---------------------------------------------------------
lossJump1, lossJump2 = self.lossJump(Dens_total_1=Dens_total[0].unsqueeze(0),
Dens_total_2=Dens_total[mid_idx].unsqueeze(0),
V_xc_out1=E_xc_NoSpin_out_deriv[0].unsqueeze(0),
V_xc_out2=E_xc_NoSpin_out_deriv[mid_idx].unsqueeze(0),
Dens_total_mix1=Dens_total_mix1,
Dens_total_mix2=Dens_total_mix2,
V_xc_out_mix1=E_xc_NoSpin_out_deriv_mix1,
V_xc_out_mix2=E_xc_NoSpin_out_deriv_mix2,
evals_Int1=evals_Int1,
evals_Int2=evals_Int2,
E_tot_Triplet=E_tot_Triplet)
self.log('val Jump1', lossJump1)
self.log('val Jump2', lossJump2)
loss += 10 * (lossJump1 + lossJump2)
self.log('train_loss', loss, prog_bar=True)
self.log('train MSE EXC', l_Exc)
self.log('train MSE VXC', l_V_xc)
# ------------------------------------------------------------------------------------------------------------------
return loss
def validation_step(self, batch, batch_idx):
eps = 1.1e-14
if hasattr(self.hparams, "train_jump"):
if self.hparams.train_jump:
batch = {key: batch[key].squeeze(0) for key in batch.keys()}
points = batch["points"]
Dens_total = batch["Dens_total"]
Dens_up = batch["Dens_up"]
Dens_down = batch["Dens_down"]
V_ext = batch["v_ext"]
V_xc_NoSpin = batch["v_xc_NoSpin"]
V_xc_up = batch["v_xc_up"]
V_xc_down = batch["v_xc_down"]
E_xc_NoSpin = batch["E_xc_NoSpin"]
E_xc_Spin = batch["E_xc_Spin"]
E_tot = batch["E_tot"]
evals_Int1 = batch["evals_Int1"]
evals_Int2 = batch["evals_Int2"]
E_tot_Triplet = batch["E_tot_Triplet"]
dx = points[0][1] - points[0][0]
torch.set_grad_enabled(True)
# ----------------------------------------------------------------------------------------------------------------
# Compute E_xc & V_xc
# ----------------------------------------------------------------------------------------------------------------
if self.hparams.Spin:
DensUpAndDown = torch.cat((Dens_up.unsqueeze(1), Dens_down.unsqueeze(1)), 1)
V_xcUpAndDown = torch.cat((V_xc_up.unsqueeze(1), V_xc_down.unsqueeze(1)), 1)
DensUpAndDown.requires_grad = True
E_xc_Spin_out = self((points, DensUpAndDown))
E_xc_Spin_out_deriv = \
torch.autograd.grad(inputs=DensUpAndDown, outputs=E_xc_Spin_out, create_graph=True,
retain_graph=True, grad_outputs=torch.ones_like(E_xc_Spin_out))[0] / dx
l_Exc, l_V_xc, loss = self.loss(E_xc_out=E_xc_Spin_out, E_xc_ref=E_xc_Spin, V_xc_out=E_xc_Spin_out_deriv,
V_xc_ref=V_xcUpAndDown)
self.log('val_loss', loss, prog_bar=True)
self.log('val MSE EXC', l_Exc)
self.log('val MSE VXC', l_V_xc)
return {"val_loss": loss, "Tuple": (points, DensUpAndDown, V_ext, E_xc_Spin_out,
E_xc_Spin_out_deriv, V_xcUpAndDown, E_tot, E_xc_Spin, loss)}
else:
Dens_total = Dens_total.unsqueeze(1)
V_xc_NoSpin = V_xc_NoSpin.unsqueeze(1)
Dens_total.requires_grad = True
E_xc_NoSpin_out = self((points, Dens_total))
E_xc_NoSpin_out_deriv = \
torch.autograd.grad(inputs=Dens_total, outputs=E_xc_NoSpin_out, create_graph=False,
retain_graph=True, grad_outputs=torch.ones_like(E_xc_NoSpin_out))[0] / dx
l_Exc, l_V_xc, loss = self.loss(E_xc_out=E_xc_NoSpin_out, E_xc_ref=E_xc_NoSpin, V_xc_out=E_xc_NoSpin_out_deriv,
V_xc_ref=V_xc_NoSpin)
self.log('val MSE EXC', l_Exc)
self.log('val MSE VXC', l_V_xc)
if hasattr(self.hparams, "train_jump"):
if self.hparams.train_jump:
mid_idx = int(len(self.hparams.DimsToTrain) / 2)
Dens_total_mix1 = (1 - eps) * Dens_total.detach().clone()[0] + eps * Dens_total.detach().clone()[mid_idx]
Dens_total_mix2 = (1 - eps) * Dens_total.detach().clone()[mid_idx] + eps * Dens_total.detach().clone()[-1]
Dens_total_mix1 = Dens_total_mix1.unsqueeze(0)
Dens_total_mix1.requires_grad = True
E_xc_NoSpin_out_mix1 = self((points, Dens_total_mix1))
E_xc_NoSpin_out_deriv_mix1 = \
torch.autograd.grad(inputs=Dens_total_mix1, outputs=E_xc_NoSpin_out_mix1, create_graph=True,
retain_graph=True, grad_outputs=torch.ones_like(E_xc_NoSpin_out_mix1))[0] / dx
# ---------------------------------------------------------
Dens_total_mix2 = Dens_total_mix2.unsqueeze(0)
Dens_total_mix2.requires_grad = True
E_xc_NoSpin_out_mix2 = self((points, Dens_total_mix2))
E_xc_NoSpin_out_deriv_mix2 = \
torch.autograd.grad(inputs=Dens_total_mix2, outputs=E_xc_NoSpin_out_mix2, create_graph=True,
retain_graph=True, grad_outputs=torch.ones_like(E_xc_NoSpin_out_mix2))[0] / dx
# ---------------------------------------------------------
lossJump1, lossJump2 = self.lossJump(Dens_total_1=Dens_total[0].unsqueeze(0),
Dens_total_2=Dens_total[mid_idx].unsqueeze(0),
V_xc_out1=E_xc_NoSpin_out_deriv[0].unsqueeze(0),
V_xc_out2=E_xc_NoSpin_out_deriv[mid_idx].unsqueeze(0),
Dens_total_mix1=Dens_total_mix1,
Dens_total_mix2=Dens_total_mix2,
V_xc_out_mix1=E_xc_NoSpin_out_deriv_mix1,
V_xc_out_mix2=E_xc_NoSpin_out_deriv_mix2,
evals_Int1=evals_Int1,
evals_Int2=evals_Int2,
E_tot_Triplet=E_tot_Triplet)
self.log('val Jump1', lossJump1)
self.log('val Jump2', lossJump2)
loss += 10 * (lossJump1 + lossJump2)
# ---------------------------------------------------------
self.log('val_loss', loss, prog_bar=True)
return {"val_loss": loss, "Tuple": (points, Dens_total, V_ext, E_xc_NoSpin_out, E_xc_NoSpin_out_deriv, V_xc_NoSpin,
E_tot, E_xc_NoSpin, loss)}
def GS_dens_splitter(self, particles):
# ------------------------------------------------------
# returns occupation (array_like) of Kohn Sham orbitals
# ------------------------------------------------------
if particles < 1 - 1e-14:
raise Exception("particles < 1!")
rounded, append = int(particles), particles - int(particles)
if rounded % 2 == 0:
s = int(rounded / 2.)
up_occ = np.ones(s + 1)
up_occ[-1] = append
down_occ = np.ones(s + 1)
down_occ[-1] = 0
else:
s = int((rounded - 1) / 2.)
up_occ = np.ones(s + 1)
down_occ = np.ones(s + 1)
down_occ[-1] = append
return up_occ + down_occ, up_occ, down_occ
def KSIterations(self, KSsystem, Dens_inp, V_xc_inp, E_xc):
# ---------------------------------------------------------------------------------------------------------
# SOLVING KOHN SHAM EQUATIONS
# ---------------------------------------------------------------------------------------------------------
Dens_KS_init = Dens_inp.detach().cpu().clone().numpy()
V_xc_in = V_xc_inp.detach().cpu().clone().numpy()
KSsystem["v_ext"] = KSsystem["v_ext"].cpu().numpy()
KSsystem['points'] = KSsystem['points'].cpu().numpy()
v_ext_diag = KSsystem['v_ext']
def selfcons(x):
x_last = x.copy()
if self.hparams.Spin:
v_H_diag = KohnShamSpin.V_Hartree(KSsystem, x[0] + x[1])
v_eff_diag_up = v_H_diag + v_ext_diag + V_xc_in[0]
v_eff_diag_down = v_H_diag + v_ext_diag + V_xc_in[1]
evals_up, selfcons.Psi_up, D_Matrix_up = KohnShamSpin.Orbitals(P=KSsystem,
v_eff_diag=v_eff_diag_up,
occ=KSsystem["up_occ"])
evals_down, selfcons.Psi_down, D_Matrix_down = KohnShamSpin.Orbitals(P=KSsystem,
v_eff_diag=v_eff_diag_down,
occ=KSsystem["down_occ"])
Dens_KS_new_up = KohnShamSpin.Density(P=KSsystem, D_arr=D_Matrix_up, occ=KSsystem["up_occ"])
Dens_KS_new_down = KohnShamSpin.Density(P=KSsystem, D_arr=D_Matrix_down, occ=KSsystem["down_occ"])
Dens_KS_new = np.stack((Dens_KS_new_up, Dens_KS_new_down))
else:
v_H_diag = KohnShamSpin.V_Hartree(KSsystem, x[0])
v_eff_diag = v_H_diag + v_ext_diag + V_xc_in[0]
evals, selfcons.Psi, D_Matrix = KohnShamSpin.Orbitals(P=KSsystem,
v_eff_diag=v_eff_diag,
occ=KSsystem["occ"])
Dens_KS_new = KohnShamSpin.Density(P=KSsystem, D_arr=D_Matrix, occ=KSsystem["occ"])
return Dens_KS_new - x_last
Dens_KS_out = torch.tensor(optimize.broyden1(selfcons, Dens_KS_init, f_tol=1e-8))
E_ext_KS, E_H_KS = KohnShamSpin.Energies(P=KSsystem,
Dens=torch.sum(Dens_KS_out, dim=0),
v_ext=KSsystem["v_ext"],
v_H=KohnShamSpin.V_Hartree(KSsystem, torch.sum(Dens_KS_out, dim=0)))
if not self.hparams.Spin:
E_kin_KS = KohnShamSpin.E_kinetic(P=KSsystem, Psi=selfcons.Psi, occ=KSsystem["occ"])
E_tot_KS = E_xc + E_kin_KS + E_ext_KS + E_H_KS
else:
E_kin_KS_up = KohnShamSpin.E_kinetic(P=KSsystem, Psi=selfcons.Psi_up, occ=KSsystem["up_occ"])
E_kin_KS_down = KohnShamSpin.E_kinetic(P=KSsystem, Psi=selfcons.Psi_down, occ=KSsystem["down_occ"])
E_tot_KS = E_xc + (E_kin_KS_up + E_kin_KS_down) + E_ext_KS + E_H_KS
return Dens_KS_out, E_tot_KS
def test_step(self, batch, batch_idx):
import pprint
import gzip
import pickle
import re
batch, test_params = batch
test_params = {key: test_params[key][0] for key in test_params.keys()}
test_params["CompareWith"]=list(test_params["CompareWith"].replace('[', '').
replace(']', '').replace(',', ' ').split())
if batch_idx == 0:
print("\n\nTEST PARAMETERS:")
pprint.pprint(test_params)
print("\n")
# ---------------------------------------------------------------------------------------------------------
# COMPUTE TOTAL ENERGY
# ---------------------------------------------------------------------------------------------------------
def CalcXCData(batch):
eps = 1.1e-14
points = batch["points"]
Dens_total = batch["Dens_total"]
Dens_up = batch["Dens_up"]
Dens_down = batch["Dens_down"]
V_ext = batch["v_ext"]
V_xc_NoSpin = batch["v_xc_NoSpin"]
V_xc_up = batch["v_xc_up"]
V_xc_down = batch["v_xc_down"]
E_xc_NoSpin = batch["E_xc_NoSpin"]
E_xc_Spin = batch["E_xc_Spin"]
E_tot = batch["E_tot"]
#print(batch["dim"])
dx = points[0][1] - points[0][0]
torch.set_grad_enabled(True)
# ----------------------------------------------------------------------------------------------------------------
# Compute E_xc & V_xc
# ----------------------------------------------------------------------------------------------------------------
if self.hparams.Spin:
DensUpAndDown = torch.cat((Dens_up.unsqueeze(1), Dens_down.unsqueeze(1)), 1)
V_xcUpAndDown = torch.cat((V_xc_up.unsqueeze(1), V_xc_down.unsqueeze(1)), 1)
DensUpAndDown.requires_grad = True
E_xc_Spin_out = self((points, DensUpAndDown))
E_xc_Spin_out_deriv = \
torch.autograd.grad(inputs=DensUpAndDown, outputs=E_xc_Spin_out, create_graph=True,
retain_graph=True, grad_outputs=torch.ones_like(E_xc_Spin_out))[0] / dx
return points, DensUpAndDown, V_ext, E_xc_Spin_out, E_xc_Spin_out_deriv, V_xcUpAndDown, E_tot, E_xc_Spin
else:
Dens_total = Dens_total.unsqueeze(1)
V_xc_NoSpin = V_xc_NoSpin.unsqueeze(1)
Dens_total.requires_grad = True
E_xc_NoSpin_out = self((points, Dens_total))
E_xc_NoSpin_out_deriv = \
torch.autograd.grad(inputs=Dens_total, outputs=E_xc_NoSpin_out, create_graph=False,
retain_graph=True, grad_outputs=torch.ones_like(E_xc_NoSpin_out))[0] / dx
return points, Dens_total, V_ext, E_xc_NoSpin_out, E_xc_NoSpin_out_deriv, V_xc_NoSpin, E_tot, E_xc_NoSpin
points, Dens, V_ext, E_xc_out, E_xc_out_deriv, V_xc, E_tot_ref, E_xc_ref = CalcXCData(batch)
alpha = torch.linspace(0, 1, test_params["FracPoints"], dtype=torch.float64)
points = points[0]
dx = points[1] - points[0]
Exc_mix_arr = torch.zeros((2, len(alpha)))
Etot_mix_arr = torch.zeros((2, len(alpha)))
if self.hparams.Spin:
D_mix_up_arr = torch.zeros((2, len(alpha), len(points)))
D_mix_down_arr = torch.zeros((2, len(alpha), len(points)))
Vxc_up_mix_arr = torch.zeros((2, len(alpha), len(points)))
Vxc_down_mix_arr = torch.zeros((2, len(alpha), len(points)))
D_KS_mix_up_arr = torch.zeros((2, len(alpha), len(points)))
D_KS_mix_down_arr = torch.zeros((2, len(alpha), len(points)))
else:
D_mix_arr = torch.zeros((2, len(alpha), len(points)))
Vxc_mix_arr = torch.zeros((2, len(alpha), len(points)))
D_KS_mix_arr = torch.zeros((2, len(alpha), len(points)))
#mix_doubles = [0, int(len(self.hparams.DimsToTrain) / 2.)]
mix_doubles = [0, 1]
for m in range(len(mix_doubles)):
for i in tqdm(range(len(alpha))):
if i == 0: # "left" integer particle number
Dens_mix, E_xc_mix, V_xc_mix = Dens[mix_doubles[m]], E_xc_out[mix_doubles[m]], E_xc_out_deriv[
mix_doubles[m]]
elif i == len(alpha) - 1: # "right" integer particle number
Dens_mix, E_xc_mix, V_xc_mix = Dens[mix_doubles[m] + (mix_doubles[1] - mix_doubles[0])], \
E_xc_out[mix_doubles[m] + (mix_doubles[1] - mix_doubles[0])], \
E_xc_out_deriv[mix_doubles[m] + (mix_doubles[1] - mix_doubles[0])]
else: # fractional particle numbers
Dens_mix = (1 - alpha[i]) * Dens[mix_doubles[m]].detach().clone() \
+ alpha[i] * Dens[mix_doubles[m] + (mix_doubles[1] - mix_doubles[0])].detach().clone()
Dens_mix = Dens_mix.unsqueeze(0)
Dens_mix.requires_grad = True
E_xc_mix = self((points.unsqueeze(0), Dens_mix))
V_xc_mix = \
torch.autograd.grad(inputs=Dens_mix, outputs=E_xc_mix, create_graph=False,
retain_graph=True, grad_outputs=torch.ones_like(E_xc_mix))[0] / dx
Dens_mix = Dens_mix.squeeze(0)
E_xc_mix = E_xc_mix.squeeze(0)
V_xc_mix = V_xc_mix.squeeze(0)
# --------------------------------------------------------------------------------------------------------
# COMPUTE TOTAL ENERGY BY SOLVING KOHN SHAM EQUATIONS
# --------------------------------------------------------------------------------------------------------
Dens_integ = Dens_mix.sum().item() * dx
KSsystem = {
"dim": Dens_integ,
"v_ext": V_ext[mix_doubles[m]],
"points": points,
"N": len(points),
"dx": dx.item(),
'laplOrder': 4,
"occ": None,
"up_occ": None,
"down_occ": None,
}
# ---------------------------------------------------------------------------------------------------------
# ENSURE THAT DENSITIES WILL BE ASSIGNED TO CORRECT FRACTIONAL OCCUPATION
# ---------------------------------------------------------------------------------------------------------
Dims_Diff = np.abs(np.array(len([1,2,3]) * [KSsystem["dim"]]) - np.array([1,2,3]))
KSsystem['occ'], KSsystem['up_occ'], KSsystem['down_occ'] = self.GS_dens_splitter(KSsystem['dim'])
Dens_KS_out, E_tot_mix = self.KSIterations(KSsystem=KSsystem, Dens_inp=Dens_mix, V_xc_inp=V_xc_mix, E_xc=E_xc_mix)
Exc_mix_arr[m, i] = E_xc_mix
Etot_mix_arr[m, i] = E_tot_mix
if self.hparams.Spin:
D_mix_up_arr[m, i] = Dens_mix[0]
D_mix_down_arr[m, i] = Dens_mix[1]
Vxc_up_mix_arr[m, i] = V_xc_mix[0]
Vxc_down_mix_arr[m, i] = V_xc_mix[1]
D_KS_mix_up_arr[m, i] = Dens_KS_out[0]
D_KS_mix_down_arr[m, i] = Dens_KS_out[1]
else:
D_mix_arr[m, i] = Dens_mix[0]
Vxc_mix_arr[m, i] = V_xc_mix[0]
D_KS_mix_arr[m, i] = Dens_KS_out[0]
E_t_r = E_tot_ref.detach().cpu().numpy()
E_xc_r = E_xc_ref.detach().cpu().numpy()
def E_tot_exact_func(N):
#mix_doubles = [0, int(len(self.hparams.DimsToTrain) / 2.)]
mix_doubles = [0, 1]
m = 1 if N[-1] > 2.001 else 0
slope = E_t_r[mix_doubles[m] + (mix_doubles[1] - mix_doubles[0])] - E_t_r[mix_doubles[m]]
offset = (E_t_r[mix_doubles[m] + (mix_doubles[1] - mix_doubles[0])] + E_t_r[mix_doubles[m]] - slope * (
N[0] + N[-1])) * 0.5
return slope * N + offset
N_12arr = np.linspace(1, 2, test_params["FracPoints"])
N_23arr = np.linspace(2, 3, test_params["FracPoints"])
E_tot_exact12 = E_tot_exact_func(N_12arr)
E_tot_exact23 = E_tot_exact_func(N_23arr)
#quadCoeff_12 = np.polyfit(N_12arr, Etot_mix_arr[0].detach().cpu().numpy(), 2)[0]
#quadCoeff_23 = np.polyfit(N_23arr, Etot_mix_arr[1].detach().cpu().numpy(), 2)[0]
#L2Normsquared_12 = np.sum((Etot_mix_arr[0].detach().cpu().numpy() - E_tot_exact12) ** 2) * (
# alpha[1] - alpha[0]).item()
#L2Normsquared_23 = np.sum((Etot_mix_arr[1].detach().cpu().numpy() - E_tot_exact23) ** 2) * (
# alpha[1] - alpha[0]).item()
MSE_Etot = nn.MSELoss()(torch.cat((Etot_mix_arr[0].detach(),
Etot_mix_arr[1].detach())),
torch.cat((torch.from_numpy(E_tot_exact12),
torch.from_numpy(E_tot_exact23)))).item()
Var_Etot = torch.var(torch.cat((Etot_mix_arr[0].detach(),
Etot_mix_arr[1].detach()))
- torch.cat((torch.from_numpy(E_tot_exact12),
torch.from_numpy(E_tot_exact23)))).item()
#print(batch_idx, MSE_Etot, Var_Etot)
# ---------------------------------------------------------------------------------------------------------
# ERRORS INTEGER ENERGIES
# ---------------------------------------------------------------------------------------------------------
E_tot_Triple = torch.tensor(
(Etot_mix_arr[0, 0], Etot_mix_arr[0, -1], Etot_mix_arr[1, -1])) # machine output
E_tot_ref_Triple = E_tot_ref.clone()[[0, mix_doubles[1], -1]]
E_xc_ref_Triple = E_xc_ref.clone()[[0, mix_doubles[1], -1]]
E1Diff = (E_tot_Triple[0] - E_tot_ref_Triple[0]).item()
E2Diff = (E_tot_Triple[1] - E_tot_ref_Triple[1]).item()
E3Diff = (E_tot_Triple[2] - E_tot_ref_Triple[2]).item()
Exc1_exact = E_xc_ref_Triple[0].item()
Exc2_exact = E_xc_ref_Triple[1].item()
Exc3_exact = E_xc_ref_Triple[2].item()
Etot1_exact = E_tot_ref_Triple[0].item()
Etot2_exact = E_tot_ref_Triple[1].item()
Etot3_exact = E_tot_ref_Triple[2].item()
Errors1 = E1Diff
Errors2 = E2Diff
Errors3 = E3Diff
Energies_dic = {
'model_ckpt': test_params['model_ckpt'],
"plot_label": test_params["plot_label"],
"NPoints": [N_12arr, N_23arr[1::]],
"Diffs": [Etot_mix_arr[0].detach().cpu().numpy() - E_tot_exact12,
Etot_mix_arr[1].detach().cpu().numpy()[1::] - E_tot_exact23[1::]],
"Derivatives": [np.gradient(Etot_mix_arr[0].detach().cpu().numpy()),
np.gradient(Etot_mix_arr[1].detach().cpu().numpy()[1::])],
"SystemIdx": test_params["SystemIdx"].item(),
"ExtPotStartIdx": test_params["idxs_ExtPotsTest"][0].item()
}
if test_params["SystemIdx"].item()== batch_idx:
Energies_NN_file = gzip.open(re.sub('\.ckpt$', '', "".join([os.getcwd(), test_params['model_ckpt']])) +
"_ENERGIES_idx=" + str(Energies_dic["ExtPotStartIdx"])+"_" +
str(test_params["SystemIdx"].item()) +".gz", 'wb')
pickle.dump(Energies_dic, Energies_NN_file)
Energies_NN_file.close()
# ---------------------------------------------------------------------------------------------------------
# PLOT RESULTS
# ---------------------------------------------------------------------------------------------------------
import matplotlib
from matplotlib import pyplot as plt
matplotlib.rcParams['mathtext.fontset'] = 'cm'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
# ------------------------------------------------
def floatArray(string):
return np.array(string.replace('[', '').replace(']', '').replace(',', ' ').split()).astype(float)
# ------------------------------------------------
if (test_params["PlotDensitiesDim"] != -1) and (test_params["SystemIdx"].item() == batch_idx):
N = test_params["PlotDensitiesDim"].item()
#idx = np.where(self.hparams.DimsToTrain == N)[0][0]
#print(np.where(np.array([1,2,3]) == N))
#sys.exit()
idx = np.where(np.array([1,2,3]) == N)[0][0]
fig_height = 4
fig_width = 5
fvxc, ax = plt.subplots(1, sharex=True, sharey=True)
fvxc.set_figheight(fig_height)
fvxc.set_figwidth(fig_width)
plt.xlabel(r"$x\;[a.u.]$", fontsize=20)
plt.tick_params(labelsize=15)
plt.ylabel(r'$\rho, v^{\mathrm{xc}} \;[a.u.]$', fontsize=20)
plt.tick_params(labelsize=13)
ax.grid(linestyle='--', linewidth=0.6)
plt.xlim(-11.5, 11.5)
s = "up" if self.hparams.Spin else ""
linew = 2.5
#print(E_xc_out_deriv[idx][0].detach().cpu(), V_xc[idx][0].detach().cpu().numpy())
#sys.exit()
ax.plot(points.detach().cpu(), E_xc_out_deriv[idx][0].detach().cpu(), alpha=0.7, color="r", linewidth=linew,
label=r"$v^{\mathrm{xc}}_{\mathrm{" + s + "}, \mathrm{ML}}$" + "$\,(N=$" + str(N) + ")")
ax.plot(points.detach().cpu(), V_xc[idx][0].detach().cpu(), alpha=1, color="k", linestyle="dashed",
linewidth=linew / 2.,
label=r"$v^{\mathrm{xc}}_{\mathrm{" + s + "}, \mathrm{Exact}}$" + "$\,(N=$" + str(N) + ")")
leg1 = ax.legend(loc=(0., 1.02), fontsize=12)
if self.hparams.Spin:
s = "down"
# pl_1, = ax.plot(self.points, Dens[idx][1].detach().cpu().numpy(), alpha=0.5, color="g",
# label=r"$\rho_{\mathrm{" + s + "}}$" + "$\,(N=$" + str(N) + ")")
pl_2, = ax.plot(points.detach().cpu(), E_xc_out_deriv[idx][1].detach().cpu(), alpha=0.7, color="g", linewidth=linew,
label=r"$v^{\mathrm{xc}}_{\mathrm{" + s + "}, \mathrm{ML}}$" + "$\,(N=$" + str(N) + ")")
pl_3, = ax.plot(points.detach().cpu(), V_xc[idx][1].detach().cpu(), alpha=1, color="k", linestyle="dashdot",
linewidth=linew / 2.,
label=r"$v^{\mathrm{xc}}_{\mathrm{" + s + "}, \mathrm{Exact}}$" + "$\,(N=$" + str(
N) + ")")
leg2 = ax.legend([pl_2, pl_3],
[
r"$v^{\mathrm{xc}}_{\mathrm{" + s + "}, \mathrm{ML}}$" + "$\,(N=$" + str(N) + ")",
r"$v^{\mathrm{xc}}_{\mathrm{" + s + "}, \mathrm{Exact}}$" + "$\,(N=$" + str(
N) + ")"],
loc=(0.50, 1.02), fontsize=12)
ax.add_artist(leg1)
plt.savefig("".join([os.getcwd(), test_params["image_dir"]]) +
'/DensityPlot_'+test_params["plot_label"]+'.pdf', dpi=900, bbox_inches='tight')
plt.show()
if test_params["PlotEnergies"] and (test_params["SystemIdx"].item() == batch_idx):
fig_height = 5
fig_width = 5
fig, axTOT = plt.subplots(figsize=(fig_width, fig_height))
axTOT.set_axisbelow(True)
axXC = axTOT.twinx()
axXC.set_axisbelow(True)
axTOT.grid(linestyle='--', linewidth=0.6)
for i in range(2):
line_tot_k = matplotlib.lines.Line2D([1 + (i + alpha[0].item()), 1 + (i + alpha[-1].item())],
[E_tot_ref[mix_doubles[i]].detach().cpu().numpy(),
E_tot_ref[mix_doubles[i] + (
mix_doubles[1] - mix_doubles[0])].detach().cpu().numpy()],
color='k', linewidth=1.55, alpha=0.5)
line_tot_y = matplotlib.lines.Line2D([1 + (i + alpha[0].item()), 1 + (i + alpha[-1].item())],
[E_tot_ref[mix_doubles[i]].detach().cpu().numpy(),
E_tot_ref[mix_doubles[i] + (
mix_doubles[1] - mix_doubles[0])].detach().cpu().numpy()],
color='y', linewidth=1.5, alpha=0.5)
axTOT.scatter(x=1 + (i + alpha.detach().cpu().numpy()),
y=Etot_mix_arr[i].detach().cpu().numpy(), color='b', s=10, alpha=0.5,
label=r'$E^{\mathrm{Tot}}_{\mathrm{ML}}$' if i == 0 else "")
axXC.scatter(x=1 + (i + alpha.detach().cpu().numpy()),
y=Exc_mix_arr[i].detach().cpu().numpy(), color='g', s=10, alpha=0.5,
label=r'$E^{\mathrm{xc}}_{\mathrm{ML}}$' if i == 0 else "")
axTOT.add_line(line_tot_k)
axTOT.add_line(line_tot_y)
#axTOT.scatter(x=self.hparams.DimsToTrain, y=E_t_r, color='orange', s=30, alpha=0.9,
# label=r'$E^{\mathrm{Tot}}_{\mathrm{Exact}}$', edgecolors='k')
#axXC.scatter(x=self.hparams.DimsToTrain, y=E_xc_r, color='r', s=30, alpha=0.9,
# label=r'$E^{\mathrm{xc}}_{\mathrm{Exact}}$', edgecolors='k')
axTOT.scatter(x=[1, 2, 3], y=E_t_r, color='y', s=30, alpha=1,
label=r'$E^{\mathrm{Tot}}_{\mathrm{Exact}}$', edgecolors='k')
axXC.scatter(x=[1, 2, 3], y=E_xc_r, color='r', s=30, alpha=0.5,
label=r'$E^{\mathrm{xc}}_{\mathrm{Exact}}$', edgecolors='k')
axXC.legend(fontsize=15, loc=1)
axTOT.legend(fontsize=15, loc=3)
axXC.set_xlabel(r'$x\;[a.u.]$', fontsize=20)
axTOT.set_xlabel(r'$N$', fontsize=20)
axXC.set_ylabel(r'$E_{xc} \;[a.u.]$', fontsize=20, color="g")
axTOT.set_ylabel(r'$E_{tot} \;[a.u.]$', fontsize=20, color="b")
axTOT.tick_params(axis='y', color='b', which="major", labelcolor="b")
axXC.tick_params(axis='y', color='g', which="major", labelcolor="g")
axTOT.set_yticks(axTOT.get_yticks()[::2])
axXC.set_yticks(axXC.get_yticks()[::2])
axXC.tick_params(labelsize=15)
axTOT.tick_params(labelsize=15)
plt.savefig("".join([os.getcwd(), test_params["image_dir"]]) + '/EnergyCurve_'+
test_params["plot_label"]+'.pdf', dpi=900, bbox_inches='tight')
plt.show()
fig_height = 6.5
fig_width = 5
fig, (axDiff, axDeriv) = plt.subplots(2, 1, figsize=(fig_width, fig_height), sharex=True,
gridspec_kw={'hspace': 0.2})
axDeriv.plot(N_12arr,
np.gradient(E_tot_exact12), color='g', alpha=0.6, linewidth=3,
label=r'$\mathrm{d}E^{\mathrm{Tot}}_{\mathrm{Exact}}/\mathrm{d}N$')
axDeriv.plot(N_23arr[1::], np.gradient(E_tot_exact23[1::]), color='g', linewidth=3, alpha=0.6)
axDiff.scatter(x=N_12arr,
y=Etot_mix_arr[0].detach().cpu().numpy() - E_tot_exact12, color='b', s=10, alpha=0.5,
label=r'$E^{\mathrm{Tot}}_{\mathrm{ML}}-E^{\mathrm{Tot}}_{\mathrm{Exact}}$')
axDiff.scatter(x=N_23arr[1::],
y=Etot_mix_arr[1].detach().cpu().numpy()[1::] - E_tot_exact23[1::], color='b', s=10, alpha=0.5)
axDeriv.scatter(x=N_12arr,
y=np.gradient(Etot_mix_arr[0].detach().cpu().numpy()), color='r', s=10, alpha=0.5,
label=r'$\mathrm{d}E^{\mathrm{Tot}}_{\mathrm{ML}}/\mathrm{d}N$')
axDeriv.scatter(x=N_23arr[1::],
y=np.gradient(Etot_mix_arr[1].detach().cpu().numpy()[1::]), color='r', s=10, alpha=0.5)
axDiff.legend(fontsize=15, loc=(0.50, 1.06), framealpha=1)
axDeriv.legend(fontsize=15, loc=(0, 2.26), framealpha=1)
shift_diff = 0.01
axDiff.set_ylim(
-np.amax(np.abs(np.concatenate((Etot_mix_arr[0].detach().cpu().numpy() - E_tot_exact12, Etot_mix_arr[
1].detach().cpu().numpy() - E_tot_exact23)))) - shift_diff,
np.amax(np.abs(np.concatenate((Etot_mix_arr[0].detach().cpu().numpy() - E_tot_exact12, Etot_mix_arr[
1].detach().cpu().numpy() - E_tot_exact23)))) + shift_diff)
axDeriv.set_xlabel(r'$N$', fontsize=20)
axDiff.set_ylabel(r'$E^{\mathrm{Tot}}_{\mathrm{ML}}-E^{\mathrm{Tot}}_{\mathrm{Exact}}\;[a.u.]$', fontsize=20,
color="b")
axDeriv.set_ylabel(r'$\mathrm{d}E^{\mathrm{Tot}}_{\mathrm{ML}}/\mathrm{d}N\;[a.u.]$',
fontsize=20, color="r")
axDiff.tick_params(axis='y', color='b', which="major", labelcolor="b")
axDeriv.tick_params(axis='y', color='r', which="major", labelcolor="r")
# axDiff.set_yticks(axDiff.get_yticks()[::2])
# axDeriv.set_yticks(axDeriv.get_yticks()[::2])
axDeriv.tick_params(labelsize=15)
axDiff.tick_params(labelsize=15)
axDiff.grid(linestyle='--', linewidth=0.6, markevery=3)
axDeriv.grid(linestyle='--', linewidth=0.6, markevery=3)
# axDeriv.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
# axDiff.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
plt.savefig("".join([os.getcwd(), test_params["image_dir"]]) + '/EnergyDerivative_'+
test_params["plot_label"]+'.pdf', dpi=900, bbox_inches='tight')
plt.show()
if (len(test_params["CompareWith"]) > 0) and (test_params["SystemIdx"].item() == batch_idx):
import gzip
import pickle
import re
markersize = 5
# ----------------------------------------------------------
# Call Energy data of the other models
# ----------------------------------------------------------
model_list = [test_params['model_ckpt']]
if len(test_params["CompareWith"]) > 0:
model_list += [NNs for NNs in test_params["CompareWith"]]
Energies_NNs = [None] * len(model_list)
for n in tqdm(range(len(model_list) - 1)):
data_file = gzip.open(re.sub('\.ckpt$', '', "".join([os.getcwd(), model_list[n + 1]])) + "_ENERGIES_idx=" +
str(Energies_dic["ExtPotStartIdx"])+"_" + str(test_params["SystemIdx"].item()) +".gz", 'rb')
Energies_NNs[n + 1] = pickle.load(data_file)
data_file.close()
Energies_NNs[0] = Energies_dic
Energies_dims = np.array([len(Energies_NNs[i]["Diffs"][0]) for i in range(len(Energies_NNs))])
if np.sum(Energies_dims - np.ones(len(Energies_dims))*Energies_dims[0]) != 0:
raise Exception("Number of fractional points of the models must be the equal!" + "\n" +
"".join(["NN " + str(Energies_NNs[i]["plot_label"]) + " FracPoints: " + str(len(Energies_NNs[i]["Diffs"][0])) + "\n"
for i in range(len(Energies_NNs))]))
E_t_r = E_tot_ref.detach().cpu().numpy()
fig_height = 6.5
fig_width = 5
fig, (axDeriv, axDiff) = plt.subplots(2, 1, figsize=(fig_width, fig_height), sharex=True,
gridspec_kw={'hspace': 0.1})
E_totExactderiv_contin = np.concatenate((np.gradient(E_tot_exact12), np.gradient(E_tot_exact23[1::])))
axDeriv.plot(N_12arr, np.gradient(E_tot_exact12),
color='k', alpha=1, linewidth=1.5, linestyle="dashed",
label=r'$\mathrm{Exact}$')
axDeriv.plot(N_23arr[1::], np.gradient(E_tot_exact23[1::]),
color='k', alpha=1, linewidth=1.5, linestyle="dashed")
axDeriv.legend(fontsize=15, loc=(0, 2.26), framealpha=1)
axDiff.set_xlabel(r'$N$', fontsize=20)
axDiff.set_ylabel(r'$E^{\mathrm{Tot}}_{\mathrm{ML}}-E^{\mathrm{Tot}}_{\mathrm{Exact}}\;[a.u.]$', fontsize=20)
axDeriv.set_ylabel(r'$\mathrm{d}E^{\mathrm{Tot}}_{\mathrm{ML}}/\mathrm{d}N\;[a.u.]$', fontsize=20)
axDiff.tick_params(axis='y', which="major")
axDeriv.tick_params(axis='y', which="major")
axDeriv.tick_params(labelsize=15)
axDiff.tick_params(labelsize=15)
axDiff.grid(linestyle='--', linewidth=0.6, markevery=3)
axDeriv.grid(linestyle='--', linewidth=0.6, markevery=3)
e_diff_abs_max = [None] * len(model_list)
e_deriv_abs_max = [None] * len(model_list)
for n in tqdm(range(len(model_list))):
axDiff.scatter(
x=np.concatenate((Energies_NNs[n]["NPoints"][0], Energies_NNs[n]["NPoints"][1]), axis=None),
y=np.concatenate((Energies_NNs[n]["Diffs"][0], Energies_NNs[n]["Diffs"][1]), axis=None),
s=markersize, alpha=0.7,
label=Energies_NNs[n]["plot_label"])
axDeriv.scatter(
x=np.concatenate((Energies_NNs[n]["NPoints"][0], Energies_NNs[n]["NPoints"][1]), axis=None),
y=np.concatenate((Energies_NNs[n]["Derivatives"][0], Energies_NNs[n]["Derivatives"][1]), axis=None),
s=markersize, alpha=0.7,
label=Energies_NNs[n]["plot_label"])
e_diff_abs_max[n] = np.amax(
np.abs(np.concatenate((Energies_NNs[n]["Diffs"][0], Energies_NNs[n]["Diffs"][1]))))
e_deriv_abs_max[n] = np.amax(
np.abs(np.concatenate((Energies_NNs[n]["Derivatives"][0], Energies_NNs[n]["Derivatives"][1]))-
(E_totExactderiv_contin[0] + E_totExactderiv_contin[-1]) / 2.))
shift_diff = 0.02
shift_deriv = 0.02
axDiff.set_ylim(-np.amax(e_diff_abs_max) - shift_diff, np.amax(e_diff_abs_max) + shift_diff)
axDeriv.legend(fontsize=15, framealpha=1)
axDeriv.set_ylim(((E_totExactderiv_contin[0] + E_totExactderiv_contin[-1]) / 2.) - np.amax(e_deriv_abs_max) - shift_deriv,
((E_totExactderiv_contin[0] + E_totExactderiv_contin[-1]) / 2.) + np.amax(e_deriv_abs_max) + shift_deriv)
plt.savefig("".join([os.getcwd(), test_params["image_dir"]]) + '/EnergyCurveCompare_idx=' +
str(Energies_dic["ExtPotStartIdx"])+"_" + str(test_params["SystemIdx"].item()) +'.pdf', dpi=900, bbox_inches='tight')
plt.show()
# ---------------------------------------------------------------------------------------------------------
return Exc1_exact, Exc2_exact, Exc3_exact,\
Etot1_exact, Etot2_exact, Etot3_exact,\
Errors1, Errors2, Errors3, \
MSE_Etot, Var_Etot, test_params
def test_epoch_end(self, outputs):
test_params = outputs[0][-1]
Errors = []
for j in range(len(outputs[0][0:-1])):
Errors.append([outputs[i][j] for i in range(len(outputs))])
Exc1_exact, Exc2_exact, Exc3_exact, \
Etot1_exact, Etot2_exact, Etot3_exact, \
Errors1, Errors2, Errors3, \
MSE_Etot, Var_Etot, = Errors
print("____________________________________________________________________________________________________")
print("ENERGY_ERRORS / INTEGER PARTICLE_NUMBERS in a.u. (" + str(len(Etot1_exact)) + " systems examined):")
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("E1_exact:" + " max = " + str(np.round(np.amax(Etot1_exact), 4)) + " | " + "min = " + str(
np.round(np.amin(Etot1_exact), 4)))
print("E2_exact:" + " max = " + str(np.round(np.amax(Etot2_exact), 4)) + " | " + "min = " + str(
np.round(np.amin(Etot2_exact), 4)))
print("E3_exact:" + " max = " + str(np.round(np.amax(Etot3_exact), 4)) + " | " + "min = " + str(
np.round(np.amin(Etot3_exact), 4)))
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("abs(Etot1_Error)_mean / 1 = ", np.round(np.mean(np.abs(Errors1) / 1.), 4), " | ",
"abs(Etot1_Error)_max / 1 = ", np.round(np.amax(np.abs(Errors1) / 1.), 4))
print("abs(Etot2_Error)_mean / 2 = ", np.round(np.mean(np.abs(Errors2) / 2.), 4), " | ",
"abs(Etot2_Error)_max / 2 = ", np.round(np.amax(np.abs(Errors2) / 2.), 4))
print("abs(Etot3_Error)_mean / 3 = ", np.round(np.mean(np.abs(Errors3) / 3.), 4), " | ",
"abs(Etot3_Error)_max / 3 = ", np.round(np.amax(np.abs(Errors3) / 3.), 4))
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("abs(Etot1_Error / Exc1_exact)_mean / 1 = ",
np.round(np.mean(np.abs(Errors1 / np.array(Exc1_exact)) / 1.), 4), " | ",
"abs(Etot1_Error / Exc1_exact)_max / 1 = ",
np.round(np.amax(np.abs(Errors1 / np.array(Exc1_exact)) / 1.), 4))
print("abs(Etot2_Error / Exc2_exact)_mean / 2 = ",
np.round(np.mean(np.abs(Errors2 / np.array(Exc2_exact)) / 2.), 4), " | ",
"abs(Etot2_Error / Exc2_exact)_max / 2 = ",
np.round(np.amax(np.abs(Errors2 / np.array(Exc2_exact)) / 2.), 4))
print("abs(Etot3_Error / Exc3_exact)_mean / 3 = ",
np.round(np.mean(np.abs(Errors3 / np.array(Exc3_exact)) / 3.), 4), " | ",
"abs(Etot3_Error / Exc3_exact)_max / 3 = ",
np.round(np.amax(np.abs(Errors3 / np.array(Exc3_exact)) / 3.), 4))
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("____________________________________________________________________________________________________")
print("ENERGY_ERRORS / FRACTIONAL PARTICLE_NUMBERS in a.u. (" + str(len(Etot1_exact)) + " systems examined):")
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("Var_Etot_mean = ", np.round(np.mean(Var_Etot), 4))
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("MSE_Etot_mean = ", np.round(np.mean(MSE_Etot), 4))
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
self.log("Var_Etot_mean" , np.mean(Var_Etot))
self.log("MSE_Etot_mean", np.mean(MSE_Etot))
if len(test_params["H2Dissociation"]) > 0:
self.DissociationCurve(test_params)
if test_params["PlotVxcFrac"] != -1:
self.Vxc_Jump_Prediction(test_params)
def DissociationCurve(self, test_params):
import matplotlib
from matplotlib import pyplot as plt
import pandas as pd
import gzip
import pickle
import re
# ------------------------------------------------
# Latex Formatting
# ------------------------------------------------
matplotlib.rcParams['mathtext.fontset'] = 'cm'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
# ------------------------------------------------
def floatArray(string):
return np.array(string.replace('[', '').replace(']', '')
.replace(',', ' ').replace("'", ' ').split()).astype(float)
model_list = [test_params['model_ckpt']]
if len(test_params["CompareWith"]) > 0:
model_list += [NNs for NNs in test_params["CompareWith"]]
# ----------------------------------------------------------
# Call H2 data of the other models
# ----------------------------------------------------------
H2_NNs = [None] * len(model_list)
for n in tqdm(range(len(model_list) - 1)):
data_file = gzip.open(re.sub('\.ckpt$', '', "".join([os.getcwd(), model_list[n + 1]])) + "_H2DISSCOCIATION.gz", 'rb')
H2_NNs[n + 1] = pickle.load(data_file)
data_file.close()
# ----------------------------------------------------------
# Read exact H2 data from file
# ----------------------------------------------------------
H2_Exact = pd.read_csv("".join([os.getcwd(), test_params["H2Dissociation"]]))
H2_ExactEnergies = H2_Exact["Energy"].to_numpy()
data_lenght = len(H2_ExactEnergies)
min_dist = 2 * floatArray(H2_Exact["nucs_array"].iloc[0])[-1]
max_dist = 2 * floatArray(H2_Exact["nucs_array"].iloc[-1])[-1]
fig_height = 4
fig_width = 5
fH2, axH2 = plt.subplots(1, sharex=True, sharey=True)
fH2.set_figheight(fig_height)
fH2.set_figwidth(fig_width)
plt.scatter(np.linspace(min_dist, max_dist, data_lenght), H2_ExactEnergies, color="k", s=10, alpha=0.7, label="Exact")
plt.xlabel("Distance [a.u.]", fontsize=20)
plt.tick_params(labelsize=15)
plt.xlim(0, 10)
plt.ylabel("Energy [a.u.]", fontsize=20)
plt.tick_params(labelsize=13)
axH2.grid(linestyle='--', linewidth=0.6)
# ______________________________________
H2_ExactEnergies = torch.from_numpy(H2_ExactEnergies)
H2_NNEnergies = [None] * len(H2_ExactEnergies)
# ----------------------------------------------------------
# Compute H2 energies (as NN prediction)
# ----------------------------------------------------------
print("\nComputing H_2 Dissociation...")
for i in tqdm(range(len(H2_ExactEnergies))):
NucNucInteraction = 1. / np.sqrt((1. + (i * 0.1) ** 2)) # [a.u.]
points = torch.from_numpy(floatArray(H2_Exact['points'][i])).double()
dx = points[1] - points[0]
v_ext_H2 = torch.from_numpy(floatArray(H2_Exact['v_ext'][i])).double()
Dens_tot_H2 = torch.from_numpy(floatArray(H2_Exact['Dens_data_total'][i])).double().view(1, 1, -1)
Dens_up_H2 = torch.from_numpy(floatArray(H2_Exact['Dens_data_up'][i])).double().view(1, 1, -1)
Dens_down_H2 = torch.from_numpy(floatArray(H2_Exact['Dens_data_down'][i])).double().view(1, 1, -1)
DensUpAndDown_H2 = torch.cat((Dens_up_H2, Dens_down_H2), 1)
Dens_H2 = DensUpAndDown_H2 if self.hparams.Spin else Dens_tot_H2
if test_params["gpus_num"] != 0 or len(test_params["gpus_devices"]) != 0:
Dens_H2 = Dens_H2.cuda()
Dens_H2.requires_grad = True
E_xc_H2 = self((points.unsqueeze(0), Dens_H2))
V_xc_H2 = \
torch.autograd.grad(inputs=Dens_H2, outputs=E_xc_H2, create_graph=False,
retain_graph=True, grad_outputs=torch.ones_like(E_xc_H2))[0] / dx
Dens_H2 = Dens_H2.squeeze(0)
E_xc_H2 = E_xc_H2.squeeze(0)
V_xc_H2 = V_xc_H2.squeeze(0)
# ---------------------------------------------------------------------------------------------------------
# COMPUTE TOTAL ENERGY
# ---------------------------------------------------------------------------------------------------------
KSsystem = {
"dim": 2,
"v_ext": v_ext_H2,
"points": points,
"N": len(points),
"dx": dx.item(),
'laplOrder': 4,
"occ": None,
"up_occ": None,
"down_occ": None,
}
KSsystem['occ'], KSsystem['up_occ'], KSsystem['down_occ'] = self.GS_dens_splitter(KSsystem['dim'])
Dens_KS, E_tot_NN = self.KSIterations(KSsystem=KSsystem, Dens_inp=Dens_H2, V_xc_inp=V_xc_H2, E_xc=E_xc_H2)
H2_NNEnergies[i] = E_tot_NN.detach().cpu().numpy().item() + NucNucInteraction
H2_NNs[0] = {
'model_ckpt': test_params['model_ckpt'],
"plot_label": test_params["plot_label"],
"Energies": H2_NNEnergies,
}
H2_NN_file = gzip.open(re.sub('\.ckpt$', '', "".join([os.getcwd(), test_params['model_ckpt']])) + "_H2DISSCOCIATION.gz", 'wb')
pickle.dump(H2_NNs[0], H2_NN_file)
H2_NN_file.close()
for n in tqdm(range(len(model_list))):
plt.scatter(np.linspace(min_dist, max_dist, data_lenght), H2_NNs[n]["Energies"], s=10, alpha=0.7,
label=H2_NNs[n]["plot_label"])
plt.legend(fontsize=15)
plt.savefig("".join([os.getcwd(), test_params["image_dir"]]) + '/H2Dissociation_'+
test_params["plot_label"]+'.pdf', dpi=900, bbox_inches='tight')
plt.show()
def Vxc_Jump_Prediction(self, test_params):
import matplotlib
from matplotlib import pyplot as plt
import pandas as pd
matplotlib.rcParams['mathtext.fontset'] = 'cm'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
# ------------------------------------------------
def floatArray(string):
return np.array(string.replace('[', '').replace(']', '').replace(',', ' ').split()).astype(float)
# ------------------------------------------------
N = test_params["PlotVxcFrac"].item()
frac = N - int(N)
Set_exact = pd.read_csv("".join([os.getcwd(), test_params["VxcFracFile"]]))
Set_exact_int = Set_exact[(Set_exact["dim"] == N - frac)].reset_index(drop=True)
Set_exact_frac = Set_exact[(Set_exact["dim"] == N)].reset_index(drop=True)
points = torch.from_numpy(floatArray(Set_exact_int['points'][0])).double()
dx = points[1] - points[0]
i = test_params["SystemIdx"].item()
MSE_tot_int, MSE_up_int, MSE_down_int = Set_exact_int["MSE"][i], \
Set_exact_int["MSE_up"][i], \
Set_exact_int["MSE_down"][i]
MSE_tot_frac, MSE_up_frac, MSE_down_frac = Set_exact_frac["MSE"][i], \
Set_exact_frac["MSE_up"][i], \
Set_exact_frac["MSE_down"][i]
MSEs = np.array([MSE_tot_int, MSE_up_int, MSE_down_int, MSE_tot_frac, MSE_up_frac, MSE_down_frac]).astype(float)
if len(np.where(MSEs > 1.5*1e-8)[0]) > 0:
raise Exception("MSE > 1.5*1e-8 -> ", MSEs)
Dens_tot_int = torch.from_numpy(floatArray(Set_exact_int['Dens_data_total'][i])).double().view(1, 1, -1)
Dens_tot_frac = torch.from_numpy(floatArray(Set_exact_frac['Dens_data_total'][i])).double().view(1, 1, -1)
Dens_up_int = torch.from_numpy(floatArray(Set_exact_int['Dens_data_up'][i])).double().view(1, 1, -1)
Dens_up_frac = torch.from_numpy(floatArray(Set_exact_frac['Dens_data_up'][i])).double().view(1, 1, -1)
Dens_down_int = torch.from_numpy(floatArray(Set_exact_int['Dens_data_down'][i])).double().view(1, 1, -1)
Dens_down_frac = torch.from_numpy(floatArray(Set_exact_frac['Dens_data_down'][i])).double().view(1, 1, -1)
#---fix NORMALIZATION -----
Dens_tot_int = (Dens_tot_int / (Dens_tot_int.sum() * dx)) * int(N - frac)
Dens_tot_frac = (Dens_tot_frac / (Dens_tot_frac.sum() * dx)) * N
#---------------------------
DensUpAndDown_int = torch.cat((Dens_up_int, Dens_down_int), 1)
DensUpAndDown_frac = torch.cat((Dens_up_frac, Dens_down_frac), 1)
Dens_int = DensUpAndDown_int if self.hparams.Spin else Dens_tot_int
Dens_frac = DensUpAndDown_frac if self.hparams.Spin else Dens_tot_frac
if test_params["gpus_num"] != 0 or len(test_params["gpus_devices"]) != 0:
Dens_frac = Dens_frac.cuda()
Dens_int = Dens_int.cuda()
Dens_int.requires_grad = True
Dens_frac.requires_grad = True
E_xc_int = self((points.unsqueeze(0), Dens_int))
E_xc_frac = self((points.unsqueeze(0), Dens_frac))
V_xc_int = \
torch.autograd.grad(inputs=Dens_int, outputs=E_xc_int, create_graph=False,
retain_graph=True, grad_outputs=torch.ones_like(E_xc_int))[0] / dx
V_xc_frac = \
torch.autograd.grad(inputs=Dens_frac, outputs=E_xc_frac, create_graph=False,
retain_graph=True, grad_outputs=torch.ones_like(E_xc_frac))[0] / dx
if self.hparams.Spin:
V_xc_int_exact = [floatArray(Set_exact_int['v_xc_up'][i]), floatArray(Set_exact_int['v_xc_down'][i])]
V_xc_frac_exact = [floatArray(Set_exact_frac['v_xc_up'][i]),
floatArray(Set_exact_frac['v_xc_down'][i])]
else:
V_xc_int_exact = [floatArray(Set_exact_int['v_xc'][i])]
V_xc_frac_exact = [floatArray(Set_exact_frac['v_xc'][i])]
Dens_int = Dens_int.squeeze(0);
Dens_frac = Dens_frac.squeeze(0)
V_xc_int = V_xc_int.squeeze(0);
V_xc_frac = V_xc_frac.squeeze(0)
fig_height = 4
fig_width = 5
fvxc, ax = plt.subplots(1, sharex=True, sharey=True)
fvxc.set_figheight(fig_height)
fvxc.set_figwidth(fig_width)
plt.xlabel(r"$x\;[a.u.]$", fontsize=20)
plt.tick_params(labelsize=15)
plt.ylabel(r'$\rho, v^{\mathrm{xc}} \;[a.u.]$', fontsize=20)
plt.tick_params(labelsize=13)
ax.grid(linestyle='--', linewidth=0.6)
SpinPol = 0 # 0 == up, 1 == down
s = "tot"
if self.hparams.Spin:
s = "down" if SpinPol else "up"
else:
SpinPol = 0
linew = 2.5
ax.plot(points, Dens_int[SpinPol].detach().cpu().numpy(), alpha=0.4, color="k", linewidth=1,
label=r"$\rho_{\mathrm{" + s + "}}$" + "$\,(N=$" + str(N - frac) + ")")
ax.plot(points, V_xc_int[SpinPol].detach().cpu().numpy(), alpha=0.4, color="r", linewidth=linew,
label=r"$v^{\mathrm{xc}}_{\mathrm{" + s + "}, \mathrm{ML}}$" + "$\,(N=$" + str(N - frac) + ")")
ax.plot(points, V_xc_int_exact[SpinPol], alpha=0.4, color="b", linewidth=linew / 2.,
linestyle="dashed",
label=r"$v^{\mathrm{xc}}_{\mathrm{" + s + "}, \mathrm{Exact}}$" + "$\,(N=$" + str(N - frac) + ")")
pl_1, = ax.plot(points, Dens_frac[SpinPol].detach().cpu().numpy(), linewidth=1, linestyle="dashed", color="k")
pl_2, = ax.plot(points, V_xc_frac[SpinPol].detach().cpu().numpy(), linewidth=linew, color="r", )
pl_3, = ax.plot(points, V_xc_frac_exact[SpinPol], linewidth=linew / 2., color="b", linestyle="dashed")
leg1 = ax.legend(loc=(0., 1.02), fontsize=12)
leg2 = ax.legend([pl_1, pl_2, pl_3],
[r"$\rho_{\mathrm{" + s + "}}$" + "$\,(N=$" + str(N) + ")",
r"$v^{\mathrm{xc}}_{\mathrm{" + s + "}, \mathrm{ML}}$" + "$\,(N=$" + str(N) + ")",
r"$v^{\mathrm{xc}}_{\mathrm{" + s + "}, \mathrm{Exact}}$" + "$\,(N=$" + str(N) + ")"],
loc=(0.50, 1.02), fontsize=12)
ax.add_artist(leg1)
plt.xlim(points[0], points[-1])
plt.savefig("".join([os.getcwd(), test_params["image_dir"]]) + '/VxcJump_'+test_params["plot_label"]+'.pdf',
dpi=900, bbox_inches='tight')
plt.show()
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=self.hparams.learning_rate,
max_lr=15 * self.hparams.learning_rate, step_size_up=115000,
step_size_down=115000, gamma=0.999, cycle_momentum=False)
return optimizer
def validation_epoch_end(self, outputs):
global time_convergence
avg_loss = torch.stack(
[x["val_loss"] for x in outputs]).mean()
self.log("ptl/val_loss", avg_loss)
@staticmethod
def add_model_specific_args():
"""
Parameters you define here will be available to your model through self.hparams
"""
def strToArray(string, type):
return np.array(string.replace('[', '').replace(']', '').replace(',', ' ').split()).astype(type)
def floatArray(string): return strToArray(string, float)
def intArray(string): return strToArray(string, int)
parser = ArgumentParser(fromfile_prefix_chars='@', add_help=True)
parser.add_argument('--batch-size', '--batch_size', '--batchsize',
required=True,
type=int,
help="batch size used for the training"
)
parser.add_argument('--data_dir', '--data-dir',
type=str,
required=True,
help='Data used for the training'
)
parser.add_argument('--DimsToTrain',
type=floatArray,
required=True,
default="[1,2,3]",
help='Array of dimensions which will be used for the training. It must '
'contain at least the integer (1,2,3) densities. If fractionals desnities .x used, '
'they must appear as 1.x and 2.x in DimsToTrain.'
)
parser.add_argument('--Spin',
action="store_true",
help="Spin densities will be used"
)
parser.add_argument('--idxs_ExtPotsTrain',
type=intArray,
required=True,
help="1D array containg start and end index of the arrangement "
"of external potential for trainings sets"
)
parser.add_argument('--idxs_ExtPotsVal',
required=True,
type=intArray,
help="1D array containg start and end index of the arrangement "
"of external potential for validation sets"
)
parser.add_argument('--idxs_ExtPotsTest',
type=intArray,
default="[0,0]",
help="1D array containg start and end index of the arrangement "
"of external potential for test sets. This Option is NOT"
"necessary for the training!"
)
parser.add_argument('--Disc',
action="store_true",
help="Non-differnetiable auxiliary function will be implemented"
)
parser.add_argument('--kernelsize',
type=int,
default=9,
help="== scan window size, if WindowPlusLDA==True, otherwise "
"len(Conv_OutChannels) * kernelsize == scan window size"
)
parser.add_argument('--WindowPlusLDA',
action="store_true",
help="Jonathan's scanning method"
)
parser.add_argument('--LDA_in_channels',
type=int,
default=16,
help="Out channels of scan window, if WindowPlusLDA used"
)
parser.add_argument('--LDA_LayerOutDims',
type=intArray,
default="[16 16 16 16 1]",
help="array containing out dimensions of each linear layer"
)
parser.add_argument('--Conv_OutChannels',
type=intArray,
default="[16 16 16 1]",
help="Array containing out dimensions of each convolutional layer",
)
parser.add_argument('--gpus_num',
type=int,
default=0,
help="Specify number of GPUs to use"
)
parser.add_argument('--gpus_devices',
type=intArray,
default="[]",
help="Specify which GPUs to use (don't use when running on cluster)"
)
parser.add_argument("--num_workers",
default=0,
type=int,
help="Number of data loading workers (default: 0), crashes on some machines if used"
)
parser.add_argument("--epochs",
default=390,
type=int,
required=True,
metavar="N",
help="Number of total epochs to run"
)
parser.add_argument("--optim",
default="AdamW",
type=str,
metavar="str",
help="Choose an optimizer; SGD, Adam or AdamW"
)
parser.add_argument("--learning_rate", "--learning-rate", "--lr",
default=3e-4,
type=float,
metavar="float",
help="Initial learning rate (default: 3e-4)"
)
parser.add_argument("--continue_ckpt",
type=str,
help="If path given, model from checkpoint will continue to be trained"
)
parser.add_argument("--continue_hparams",
type=str,
help="path to hparams.yaml used if continue_ckpt is given"
)
parser.add_argument("--train_jump",
action="store_true",
help="XC_jump will be included in loss function"
)
parser.add_argument("--SpinMirror",
action="store_true",
help="Spin channels will swapped after each training iteration "
"and used for training additionally"
)
args = parser.parse_args()
mid_idx=int(len(args.DimsToTrain) / 2.)
if not args.Spin and args.SpinMirror:
raise Exception("SpinMirror selected, but not Spin!")
for d in range(len(args.DimsToTrain)):
if args.DimsToTrain[d] + 1 not in args.DimsToTrain:
raise Exception("Wrong declaration of DimsToTrain array")
if d == mid_idx: break
if args.train_jump:
if len(args.DimsToTrain) != args.batch_size:
raise Exception("len(DimsToTrain) must be equal to batch_size!")
if args.Spin:
raise Exception("train_jump not available for Spin yet")
return parser
``` |
{
"source": "jogehl/Stone-Soup",
"score": 2
} |
#### File: radar/tests/test_radar.py
```python
import datetime
from pytest import approx
import numpy as np
from ....functions import cart2pol
from ....types.angle import Bearing
from ....types.array import StateVector, CovarianceMatrix
from ....types.state import State
from ....types.groundtruth import GroundTruthState
from ..radar import RadarRangeBearing, RadarRotatingRangeBearing, AESARadar, \
RadarRasterScanRangeBearing
from ..beam_pattern import StationaryBeam
from ..beam_shape import Beam2DGaussian
from ....models.measurement.linear import LinearGaussian
def h2d(state_vector, translation_offset, rotation_offset):
xyz = [[state_vector[0, 0] - translation_offset[0, 0]],
[state_vector[1, 0] - translation_offset[1, 0]],
[0]]
# Get rotation matrix
theta_z = - rotation_offset[2, 0]
cos_z, sin_z = np.cos(theta_z), np.sin(theta_z)
rot_z = np.array([[cos_z, -sin_z, 0],
[sin_z, cos_z, 0],
[0, 0, 1]])
theta_y = - rotation_offset[1, 0]
cos_y, sin_y = np.cos(theta_y), np.sin(theta_y)
rot_y = np.array([[cos_y, 0, sin_y],
[0, 1, 0],
[-sin_y, 0, cos_y]])
theta_x = - rotation_offset[0, 0]
cos_x, sin_x = np.cos(theta_x), np.sin(theta_x)
rot_x = np.array([[1, 0, 0],
[0, cos_x, -sin_x],
[0, sin_x, cos_x]])
rotation_matrix = rot_z@rot_y@rot_x
xyz_rot = rotation_matrix @ xyz
x = xyz_rot[0, 0]
y = xyz_rot[1, 0]
# z = 0 # xyz_rot[2, 0]
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return np.array([[Bearing(phi)], [rho]])
def test_simple_radar():
# Input arguments
# TODO: pytest parametarization
noise_covar = CovarianceMatrix([[0.015, 0],
[0, 0.1]])
radar_position = StateVector([1, 1])
radar_orientation = StateVector([0, 0, 0])
target_state = State(radar_position +
np.array([[1], [1]]),
timestamp=datetime.datetime.now())
measurement_mapping = np.array([0, 1])
# Create a radar object
radar = RadarRangeBearing(
position=radar_position,
orientation=radar_orientation,
ndim_state=2,
mapping=measurement_mapping,
noise_covar=noise_covar)
# Assert that the object has been correctly initialised
assert(np.equal(radar.position, radar_position).all())
# Generate a noiseless measurement for the given target
measurement = radar.measure(target_state, noise=0)
rho, phi = cart2pol(target_state.state_vector[0, 0]
- radar_position[0, 0],
target_state.state_vector[1, 0]
- radar_position[1, 0])
# Assert correction of generated measurement
assert(measurement.timestamp == target_state.timestamp)
assert(np.equal(measurement.state_vector,
StateVector([phi, rho])).all())
def test_rotating_radar():
# Input arguments
# TODO: pytest parametarization
timestamp = datetime.datetime.now()
noise_covar = CovarianceMatrix(np.array([[0.015, 0],
[0, 0.1]]))
# The radar is positioned at (1,1)
radar_position = StateVector(
np.array(([[1], [1]])))
# The radar is facing left/east
radar_orientation = StateVector([[0], [0], [np.pi]])
# The radar antenna is facing opposite the radar orientation
dwell_center = State(StateVector([[-np.pi]]),
timestamp=timestamp)
rpm = 20 # 20 Rotations Per Minute
max_range = 100 # Max range of 100m
fov_angle = np.pi/3 # FOV angle of pi/3
target_state = State(radar_position +
np.array([[5], [5]]),
timestamp=timestamp)
measurement_mapping = np.array([0, 1])
# Create a radar object
radar = RadarRotatingRangeBearing(
position=radar_position,
orientation=radar_orientation,
ndim_state=2,
mapping=measurement_mapping,
noise_covar=noise_covar,
dwell_center=dwell_center,
rpm=rpm,
max_range=max_range,
fov_angle=fov_angle)
# Assert that the object has been correctly initialised
assert(np.equal(radar.position, radar_position).all())
# Generate a noiseless measurement for the given target
measurement = radar.measure(target_state, noise=0)
# Assert measurement is None since target is not in FOV
assert(measurement is None)
# Rotate radar such that the target is in FOV
timestamp = timestamp + datetime.timedelta(seconds=0.5)
target_state = State(radar_position +
np.array([[5], [5]]),
timestamp=timestamp)
measurement = radar.measure(target_state, noise=0)
eval_m = h2d(target_state.state_vector,
radar.position,
radar.orientation+[[0],
[0],
[radar.dwell_center.state_vector[0, 0]]])
# Assert correction of generated measurement
assert(measurement.timestamp == target_state.timestamp)
assert(np.equal(measurement.state_vector, eval_m).all())
def test_raster_scan_radar():
# Input arguments
# TODO: pytest parametarization
timestamp = datetime.datetime.now()
noise_covar = CovarianceMatrix(np.array([[0.015, 0],
[0, 0.1]]))
# The radar is positioned at (1,1)
radar_position = StateVector(
np.array(([[1], [1]])))
# The radar is facing left/east
radar_orientation = StateVector([[0], [0], [np.pi]])
# The radar antenna is facing opposite the radar orientation
dwell_center = State(StateVector([[np.pi / 4]]),
timestamp=timestamp)
rpm = 20 # 20 Rotations Per Minute Counter-clockwise
max_range = 100 # Max range of 100m
fov_angle = np.pi / 12 # FOV angle of pi/12 (15 degrees)
for_angle = np.pi + fov_angle # FOR angle of pi*(13/12) (195 degrees)
# This will be mean the dwell center will reach at the limits -pi/2 and
# pi/2. As the edge of the beam will reach the full FOV
target_state = State(radar_position +
np.array([[-5], [5]]),
timestamp=timestamp)
measurement_mapping = np.array([0, 1])
# Create a radar object
radar = RadarRasterScanRangeBearing(
position=radar_position,
orientation=radar_orientation,
ndim_state=2,
mapping=measurement_mapping,
noise_covar=noise_covar,
dwell_center=dwell_center,
rpm=rpm,
max_range=max_range,
fov_angle=fov_angle,
for_angle=for_angle)
# Assert that the object has been correctly initialised
assert np.array_equal(radar.position, radar_position)
# Generate a noiseless measurement for the given target
measurement = radar.measure(target_state, noise=0)
# Assert measurement is None since target is not in FOV
assert measurement is None
# Rotate radar
timestamp = timestamp + datetime.timedelta(seconds=0.5)
target_state = State(radar_position +
np.array([[-5], [5]]),
timestamp=timestamp)
measurement = radar.measure(target_state, noise=0)
# Assert measurement is None since target is not in FOV
assert measurement is None
# Rotate radar such that the target is in FOV
timestamp = timestamp + datetime.timedelta(seconds=1.0)
target_state = State(radar_position +
np.array([[-5], [5]]),
timestamp=timestamp)
measurement = radar.measure(target_state, noise=0)
eval_m = h2d(target_state.state_vector,
radar.position,
radar.orientation + [[0],
[0],
[radar.dwell_center.state_vector[0, 0]]])
# Assert correction of generated measurement
assert measurement.timestamp == target_state.timestamp
assert np.array_equal(measurement.state_vector, eval_m)
def test_aesaradar():
target = State([75e3, 0, 10e3, 0, 20e3, 0],
timestamp=datetime.datetime.now())
radar = AESARadar(antenna_gain=30,
mapping=[0, 2, 4],
translation_offset=StateVector([0.0] * 6),
frequency=100e6,
number_pulses=5,
duty_cycle=0.1,
band_width=30e6,
beam_width=np.deg2rad(10),
probability_false_alarm=1e-6,
rcs=10,
receiver_noise=3,
swerling_on=False,
beam_shape=Beam2DGaussian(peak_power=50e3),
beam_transition_model=StationaryBeam(
centre=[np.deg2rad(15), np.deg2rad(20)]),
measurement_model=None)
[prob_detection, snr, swer_rcs, tran_power, spoil_gain,
spoil_width] = radar.gen_probability(target)
assert approx(swer_rcs, 1) == 10.0
assert approx(prob_detection, 3) == 0.688
assert approx(spoil_width, 2) == 0.19
assert approx(spoil_gain, 2) == 29.58
assert approx(tran_power, 2) == 7715.00
assert approx(snr, 2) == 16.01
def test_swer(repeats=10000):
# initialise list or rcs (radar cross sections)
list_rcs = np.zeros(repeats)
# generic target
target = State([75e3, 0, 10e3, 0, 20e3, 0],
timestamp=datetime.datetime.now())
# define sensor
radar = AESARadar(antenna_gain=30,
frequency=100e6,
number_pulses=5,
duty_cycle=0.1,
band_width=30e6,
beam_width=np.deg2rad(10),
probability_false_alarm=1e-6,
rcs=10,
receiver_noise=3,
swerling_on=True,
beam_shape=Beam2DGaussian(peak_power=50e3),
beam_transition_model=StationaryBeam(
centre=[np.deg2rad(15), np.deg2rad(20)]),
measurement_model=None)
# populate list of random rcs
for i in range(0, repeats):
list_rcs[i] = radar.gen_probability(target)[2]
# check histogram follows the Swerling 1 case probability distribution
bin_height, bin_edge = np.histogram(list_rcs, 20, density=True)
x = (bin_edge[:-1] + bin_edge[1:]) / 2
height = 1 / (float(radar.rcs)) * np.exp(-x / float(radar.rcs))
assert np.allclose(height, bin_height, rtol=0.05,
atol=0.01 * np.max(bin_height))
def test_detection():
radar = AESARadar(antenna_gain=30,
translation_offset=StateVector([0.0] * 3),
frequency=100e6,
number_pulses=5,
duty_cycle=0.1,
band_width=30e6,
beam_width=np.deg2rad(10),
probability_false_alarm=1e-6,
rcs=10,
receiver_noise=3,
swerling_on=False,
beam_shape=Beam2DGaussian(peak_power=50e3),
beam_transition_model=StationaryBeam(
centre=[np.deg2rad(15), np.deg2rad(20)]),
measurement_model=LinearGaussian(
noise_covar=np.diag([1, 1, 1]),
mapping=[0, 1, 2],
ndim_state=3))
target = State([50e3, 10e3, 20e3], timestamp=datetime.datetime.now())
measurement = radar.measure(target)
assert np.allclose(measurement.state_vector,
StateVector([50e3, 10e3, 20e3]), atol=5)
def test_failed_detect():
target = State([75e3, 0, 10e3, 0, 20e3, 0],
timestamp=datetime.datetime.now())
radar = AESARadar(antenna_gain=30,
mapping=[0, 2, 4],
translation_offset=StateVector([0.0] * 6),
frequency=100e6,
number_pulses=5,
duty_cycle=0.1,
band_width=30e6,
beam_width=np.deg2rad(10),
probability_false_alarm=1e-6,
rcs=10,
receiver_noise=3,
swerling_on=False,
beam_shape=Beam2DGaussian(peak_power=50e3),
beam_transition_model=StationaryBeam(
centre=[np.deg2rad(30), np.deg2rad(40)]),
measurement_model=LinearGaussian(
noise_covar=np.diag([1, 1, 1]),
mapping=[0, 2, 4],
ndim_state=6))
assert radar.measure(target) is None
def test_target_rcs():
# targets with the rcs
rcs_10 = (GroundTruthState([150e3, 0.0, 0.0], timestamp=None))
rcs_10.rcs = 10
rcs_20 = (GroundTruthState([250e3, 0.0, 0.0], timestamp=None))
rcs_20.rcs = 20
radar_model = AESARadar(antenna_gain=36,
mapping=[0, 1, 2],
translation_offset=StateVector([0.0]*3),
frequency=10e9,
number_pulses=10,
duty_cycle=0.18,
band_width=24591.9,
beam_width=np.deg2rad(5),
rcs=None, # no default rcs
receiver_noise=5,
probability_false_alarm=5e-3,
beam_shape=Beam2DGaussian(peak_power=1e4),
measurement_model=None,
beam_transition_model=StationaryBeam(centre=[0,
0]))
(det_prob, snr, swer_rcs, _, _, _) = radar_model.gen_probability(rcs_10)
assert swer_rcs == 10
assert approx(snr, 3) == 8.197
(det_prob, snr, swer_rcs, _, _, _) = radar_model.gen_probability(rcs_20)
assert swer_rcs == 20
assert round(snr, 3) == 2.125
``` |
{
"source": "jogeo/rally-openstack",
"score": 2
} |
#### File: services/network/neutron.py
```python
import itertools
from rally.common import cfg
from rally.common import logging
from rally import exceptions
from rally.task import atomic
from rally.task import service
from rally_openstack.common.services.network import net_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def _args_adapter(arguments_map):
def wrapper(func):
def decorator(*args, **kwargs):
for source, dest in arguments_map.items():
if source in kwargs:
if dest in kwargs:
raise TypeError(
f"{func.__name__}() accepts either {dest} keyword "
f"argument or {source} but both were specified.")
kwargs[dest] = kwargs.pop(source)
return func(*args, **kwargs)
return decorator
return wrapper
_NETWORK_ARGS_MAP = {
"provider:network_type": "provider_network_type",
"provider:physical_network": "provider_physical_network",
"provider:segmentation_id": "provider_segmentation_id",
"router:external": "router_external"
}
def _create_network_arg_adapter():
"""A decorator for converting neutron's create kwargs to look pythonic."""
return _args_adapter(_NETWORK_ARGS_MAP)
class _NoneObj(object):
def __len__(self):
return 0
_NONE = _NoneObj()
def _clean_dict(**kwargs):
"""Builds a dict object from keyword arguments ignoring nullable values."""
return dict((k, v) for k, v in kwargs.items() if v != _NONE)
@service.service(service_name="neutron", service_type="network", version="2.0")
class NeutronService(service.Service):
"""A helper class for Neutron API"""
def __init__(self, *args, **kwargs):
super(NeutronService, self).__init__(*args, **kwargs)
self._cached_supported_extensions = None
self._client = None
@property
def client(self):
if self._client is None:
self._client = self._clients.neutron()
return self._client
def create_network_topology(
self, network_create_args=None,
router_create_args=None, router_per_subnet=False,
subnet_create_args=None, subnets_count=1, subnets_dualstack=False
):
"""Create net infrastructure(network, router, subnets).
:param network_create_args: A dict with creation arguments for a
network. The format is equal to the create_network method
:param router_create_args: A dict with creation arguments for an
external router that will add an interface to each created subnet.
The format is equal to the create_subnet method
In case of None value (default behaviour), no router is created.
:param router_per_subnet: whether or not to create router per subnet
or use one router for all subnets.
:param subnet_create_args: A dict with creation arguments for
subnets. The format is equal to the create_subnet method.
:param subnets_count: Number of subnets to create per network.
Defaults to 1
:param subnets_dualstack: Whether subnets should be of both IPv4 and
IPv6 (i.e first subnet will be created for IPv4, the second for
IPv6, the third for IPv4,..). If subnet_create_args includes one of
('cidr', 'start_cidr', 'ip_version') keys, subnets_dualstack
parameter will be ignored.
"""
subnet_create_args = dict(subnet_create_args or {})
network = self.create_network(**(network_create_args or {}))
subnet_create_args["network_id"] = network["id"]
routers = []
if router_create_args is not None:
for i in range(subnets_count if router_per_subnet else 1):
routers.append(self.create_router(**router_create_args))
subnets = []
ip_versions = itertools.cycle([4, 6] if subnets_dualstack else [4])
use_subnets_dualstack = (
"cidr" not in subnet_create_args
and "start_cidr" not in subnet_create_args
and "ip_version" not in subnet_create_args
)
for i in range(subnets_count):
if use_subnets_dualstack:
subnet_create_args["ip_version"] = next(ip_versions)
if routers:
if router_per_subnet:
router = routers[i]
else:
router = routers[0]
subnet_create_args["router_id"] = router["id"]
subnets.append(self.create_subnet(**subnet_create_args))
network["subnets"] = [s["id"] for s in subnets]
return {
"network": network,
"subnets": subnets,
"routers": routers
}
def delete_network_topology(self, topo):
"""Delete network topology
This method was developed to provide a backward compatibility with old
neutron helpers. It is not recommended way and we suggest to use
cleanup manager instead.
:param topo: Network topology as create_network_topology returned
"""
for router in topo["routers"]:
self.remove_gateway_from_router(router["id"])
network_id = topo["network"]["id"]
for port in self.list_ports(network_id=network_id):
self.delete_port(port)
for subnet in self.list_subnets(network_id=network_id):
self.delete_subnet(subnet["id"])
self.delete_network(network_id)
for router in topo["routers"]:
self.delete_router(router["id"])
@atomic.action_timer("neutron.create_network")
@_create_network_arg_adapter()
def create_network(self,
project_id=_NONE,
admin_state_up=_NONE,
dns_domain=_NONE,
mtu=_NONE,
port_security_enabled=_NONE,
provider_network_type=_NONE,
provider_physical_network=_NONE,
provider_segmentation_id=_NONE,
qos_policy_id=_NONE,
router_external=_NONE,
segments=_NONE,
shared=_NONE,
vlan_transparent=_NONE,
description=_NONE,
availability_zone_hints=_NONE):
"""Create neutron network.
:param project_id: The ID of the project that owns the resource. Only
administrative and users with advsvc role can specify a project ID
other than their own. You cannot change this value through
authorization policies.
:param admin_state_up: The administrative state of the network,
which is up (true) or down (false).
:param dns_domain: A valid DNS domain.
:param mtu: The maximum transmission unit (MTU) value to address
fragmentation. Minimum value is 68 for IPv4, and 1280 for IPv6.
:param port_security_enabled: The port security status of the network.
Valid values are enabled (true) and disabled (false). This value is
used as the default value of port_security_enabled field of a
newly created port.
:param provider_network_type: The type of physical network that this
network should be mapped to. For example, flat, vlan, vxlan,
or gre. Valid values depend on a networking back-end.
:param provider_physical_network: The physical network where this
network should be implemented. The Networking API v2.0 does not
provide a way to list available physical networks.
For example, the Open vSwitch plug-in configuration file defines
a symbolic name that maps to specific bridges on each compute host.
:param provider_segmentation_id: The ID of the isolated segment on the
physical network. The network_type attribute defines the
segmentation model. For example, if the network_type value is vlan,
this ID is a vlan identifier. If the network_type value is gre,
this ID is a gre key.
:param qos_policy_id: The ID of the QoS policy associated with the
network.
:param router_external: Indicates whether the network has an external
routing facility that’s not managed by the networking service.
:param segments: A list of provider segment objects.
:param shared: Indicates whether this resource is shared across all
projects. By default, only administrative users can change
this value.
:param vlan_transparent: Indicates the VLAN transparency mode of the
network, which is VLAN transparent (true) or not VLAN
transparent (false).
:param description: A human-readable description for the resource.
Default is an empty string.
:param availability_zone_hints: The availability zone candidate for
the network.
:returns: neutron network dict
"""
body = _clean_dict(
name=self.generate_random_name(),
tenant_id=project_id,
admin_state_up=admin_state_up,
dns_domain=dns_domain,
mtu=mtu,
port_security_enabled=port_security_enabled,
qos_policy_id=qos_policy_id,
segments=segments,
shared=shared,
vlan_transparent=vlan_transparent,
description=description,
availability_zone_hints=availability_zone_hints,
**{
"provider:network_type": provider_network_type,
"provider:physical_network": provider_physical_network,
"provider:segmentation_id": provider_segmentation_id,
"router:external": router_external
}
)
resp = self.client.create_network({"network": body})
return resp["network"]
@atomic.action_timer("neutron.show_network")
def get_network(self, network_id, fields=_NONE):
"""Get network by ID
:param network_id: Network ID to fetch data for
:param fields: The fields that you want the server to return. If no
fields list is specified, the networking API returns all
attributes allowed by the policy settings. By using fields
parameter, the API returns only the requested set of attributes.
"""
body = _clean_dict(fields=fields)
resp = self.client.show_network(network_id, **body)
return resp["network"]
def find_network(self, network_id_or_name):
"""Find network by identifier (id or name)
:param network_id_or_name: Network ID or name
"""
for net in self.list_networks():
if network_id_or_name in (net["name"], net["id"]):
return net
raise exceptions.GetResourceFailure(
resource="network",
err=f"no name or id matches {network_id_or_name}")
@atomic.action_timer("neutron.update_network")
@_create_network_arg_adapter()
def update_network(self,
network_id,
name=_NONE,
admin_state_up=_NONE,
dns_domain=_NONE,
mtu=_NONE,
port_security_enabled=_NONE,
provider_network_type=_NONE,
provider_physical_network=_NONE,
provider_segmentation_id=_NONE,
qos_policy_id=_NONE,
router_external=_NONE,
segments=_NONE,
shared=_NONE,
description=_NONE,
is_default=_NONE):
"""Update neutron network.
:param network_id: ID of the network to update
:param name: Human-readable name of the network.
:param admin_state_up: The administrative state of the network,
which is up (true) or down (false).
:param dns_domain: A valid DNS domain.
:param mtu: The maximum transmission unit (MTU) value to address
fragmentation. Minimum value is 68 for IPv4, and 1280 for IPv6.
:param port_security_enabled: The port security status of the network.
Valid values are enabled (true) and disabled (false). This value is
used as the default value of port_security_enabled field of a
newly created port.
:param provider_network_type: The type of physical network that this
network should be mapped to. For example, flat, vlan, vxlan,
or gre. Valid values depend on a networking back-end.
:param provider_physical_network: The physical network where this
network should be implemented. The Networking API v2.0 does not
provide a way to list available physical networks.
For example, the Open vSwitch plug-in configuration file defines
a symbolic name that maps to specific bridges on each compute host.
:param provider_segmentation_id: The ID of the isolated segment on the
physical network. The network_type attribute defines the
segmentation model. For example, if the network_type value is vlan,
this ID is a vlan identifier. If the network_type value is gre,
this ID is a gre key.
:param qos_policy_id: The ID of the QoS policy associated with the
network.
:param router_external: Indicates whether the network has an external
routing facility that’s not managed by the networking service.
:param segments: A list of provider segment objects.
:param shared: Indicates whether this resource is shared across all
projects. By default, only administrative users can change
this value.
:param description: A human-readable description for the resource.
Default is an empty string.
:param is_default: The network is default or not.
:returns: neutron network dict
"""
body = _clean_dict(
name=name,
admin_state_up=admin_state_up,
dns_domain=dns_domain,
mtu=mtu,
port_security_enabled=port_security_enabled,
qos_policy_id=qos_policy_id,
segments=segments,
shared=shared,
description=description,
is_default=is_default,
**{
"provider:network_type": provider_network_type,
"provider:physical_network": provider_physical_network,
"provider:segmentation_id": provider_segmentation_id,
"router:external": router_external
}
)
if not body:
raise TypeError("No updates for a network.")
resp = self.client.update_network(network_id, {"network": body})
return resp["network"]
@atomic.action_timer("neutron.delete_network")
def delete_network(self, network_id):
"""Delete network
:param network_id: Network ID
"""
self.client.delete_network(network_id)
@atomic.action_timer("neutron.list_networks")
def list_networks(self, name=_NONE, router_external=_NONE, status=_NONE,
**kwargs):
"""List networks.
:param name: Filter the list result by the human-readable name of the
resource.
:param router_external: Filter the network list result based on whether
the network has an external routing facility that’s not managed by
the networking service.
:param status: Filter the network list result by network status.
Values are ACTIVE, DOWN, BUILD or ERROR.
:param kwargs: additional network list filters
"""
kwargs["router:external"] = router_external
filters = _clean_dict(name=name, status=status, **kwargs)
return self.client.list_networks(**filters)["networks"]
IPv4_DEFAULT_DNS_NAMESERVERS = ["8.8.8.8", "8.8.4.4"]
IPv6_DEFAULT_DNS_NAMESERVERS = ["fdf8:f53e:61e4::18", "fc00:db20:35b:7399::5"]
@atomic.action_timer("neutron.create_subnet")
def create_subnet(self, network_id, router_id=_NONE, project_id=_NONE,
enable_dhcp=_NONE,
dns_nameservers=_NONE, allocation_pools=_NONE,
host_routes=_NONE, ip_version=_NONE, gateway_ip=_NONE,
cidr=_NONE, start_cidr=_NONE, prefixlen=_NONE,
ipv6_address_mode=_NONE, ipv6_ra_mode=_NONE,
segment_id=_NONE, subnetpool_id=_NONE,
use_default_subnetpool=_NONE, service_types=_NONE,
dns_publish_fixed_ip=_NONE):
"""Create neutron subnet.
:param network_id: The ID of the network to which the subnet belongs.
:param router_id: An external router and add as an interface to subnet.
:param project_id: The ID of the project that owns the resource.
Only administrative and users with advsvc role can specify a
project ID other than their own. You cannot change this value
through authorization policies.
:param enable_dhcp: Indicates whether dhcp is enabled or disabled for
the subnet. Default is true.
:param dns_nameservers: List of dns name servers associated with the
subnet. Default is a list of Google DNS
:param allocation_pools: Allocation pools with start and end IP
addresses for this subnet. If allocation_pools are not specified,
OpenStack Networking automatically allocates pools for covering
all IP addresses in the CIDR, excluding the address reserved for
the subnet gateway by default.
:param host_routes: Additional routes for the subnet. A list of
dictionaries with destination and nexthop parameters. Default
value is an empty list.
:param gateway_ip: Gateway IP of this subnet. If the value is null that
implies no gateway is associated with the subnet. If the gateway_ip
is not specified, OpenStack Networking allocates an address from
the CIDR for the gateway for the subnet by default.
:param ip_version: The IP protocol version. Value is 4 or 6. If CIDR
is specified, the value automatically can be detected from it,
otherwise defaults to 4.
Also, check start_cidr param description.
:param cidr: The CIDR of the subnet. If not specified, it will be
auto-generated based on start_cidr and ip_version parameters.
:param start_cidr:
:param prefixlen: he prefix length to use for subnet allocation from a
subnet pool. If not specified, the default_prefixlen value of the
subnet pool will be used.
:param ipv6_address_mode: The IPv6 address modes specifies mechanisms
for assigning IP addresses. Value is slaac, dhcpv6-stateful,
dhcpv6-stateless.
:param ipv6_ra_mode: The IPv6 router advertisement specifies whether
the networking service should transmit ICMPv6 packets, for a
subnet. Value is slaac, dhcpv6-stateful, dhcpv6-stateless.
:param segment_id: The ID of a network segment the subnet is
associated with. It is available when segment extension is enabled.
:param subnetpool_id: The ID of the subnet pool associated with the
subnet.
:param use_default_subnetpool: Whether to allocate this subnet from
the default subnet pool.
:param service_types: The service types associated with the subnet.
:param dns_publish_fixed_ip: Whether to publish DNS records for IPs
from this subnet. Default is false.
"""
if cidr == _NONE:
ip_version, cidr = net_utils.generate_cidr(
ip_version=ip_version, start_cidr=(start_cidr or None))
if ip_version == _NONE:
ip_version = net_utils.get_ip_version(cidr)
if dns_nameservers == _NONE:
if ip_version == 4:
dns_nameservers = self.IPv4_DEFAULT_DNS_NAMESERVERS
else:
dns_nameservers = self.IPv6_DEFAULT_DNS_NAMESERVERS
body = _clean_dict(
name=self.generate_random_name(),
network_id=network_id,
tenant_id=project_id,
enable_dhcp=enable_dhcp,
dns_nameservers=dns_nameservers,
allocation_pools=allocation_pools,
host_routes=host_routes,
ip_version=ip_version,
gateway_ip=gateway_ip,
cidr=cidr,
prefixlen=prefixlen,
ipv6_address_mode=ipv6_address_mode,
ipv6_ra_mode=ipv6_ra_mode,
segment_id=segment_id,
subnetpool_id=subnetpool_id,
use_default_subnetpool=use_default_subnetpool,
service_types=service_types,
dns_publish_fixed_ip=dns_publish_fixed_ip
)
subnet = self.client.create_subnet({"subnet": body})["subnet"]
if router_id:
self.add_interface_to_router(router_id=router_id,
subnet_id=subnet["id"])
return subnet
@atomic.action_timer("neutron.show_subnet")
def get_subnet(self, subnet_id):
"""Get subnet
:param subnet_id: Subnet ID
"""
return self.client.show_subnet(subnet_id)["subnet"]
@atomic.action_timer("neutron.update_subnet")
def update_subnet(self, subnet_id, name=_NONE, enable_dhcp=_NONE,
dns_nameservers=_NONE, allocation_pools=_NONE,
host_routes=_NONE, gateway_ip=_NONE, description=_NONE,
service_types=_NONE, segment_id=_NONE,
dns_publish_fixed_ip=_NONE):
"""Update neutron subnet.
:param subnet_id: The ID of the subnet to update.
:param name: Human-readable name of the resource.
:param description: A human-readable description for the resource.
Default is an empty string.
:param enable_dhcp: Indicates whether dhcp is enabled or disabled for
the subnet. Default is true.
:param dns_nameservers: List of dns name servers associated with the
subnet. Default is a list of Google DNS
:param allocation_pools: Allocation pools with start and end IP
addresses for this subnet. If allocation_pools are not specified,
OpenStack Networking automatically allocates pools for covering
all IP addresses in the CIDR, excluding the address reserved for
the subnet gateway by default.
:param host_routes: Additional routes for the subnet. A list of
dictionaries with destination and nexthop parameters. Default
value is an empty list.
:param gateway_ip: Gateway IP of this subnet. If the value is null that
implies no gateway is associated with the subnet. If the gateway_ip
is not specified, OpenStack Networking allocates an address from
the CIDR for the gateway for the subnet by default.
:param segment_id: The ID of a network segment the subnet is
associated with. It is available when segment extension is enabled.
:param service_types: The service types associated with the subnet.
:param dns_publish_fixed_ip: Whether to publish DNS records for IPs
from this subnet. Default is false.
"""
body = _clean_dict(
name=name,
enable_dhcp=enable_dhcp,
dns_nameservers=dns_nameservers,
allocation_pools=allocation_pools,
host_routes=host_routes,
gateway_ip=gateway_ip,
segment_id=segment_id,
service_types=service_types,
dns_publish_fixed_ip=dns_publish_fixed_ip,
description=description
)
if not body:
raise TypeError("No updates for a subnet.")
resp = self.client.update_subnet(subnet_id, {"subnet": body})["subnet"]
return resp
@atomic.action_timer("neutron.delete_subnet")
def delete_subnet(self, subnet_id):
"""Delete subnet
:param subnet_id: Subnet ID
"""
self.client.delete_subnet(subnet_id)
@atomic.action_timer("neutron.list_subnets")
def list_subnets(self, network_id=_NONE, **filters):
"""List subnets.
:param network_id: Filter the subnet list result by the ID of the
network to which the subnet belongs.
:param filters: additional subnet list filters
"""
if network_id:
filters["network_id"] = network_id
return self.client.list_subnets(**filters)["subnets"]
@atomic.action_timer("neutron.create_router")
def create_router(self, project_id=_NONE, admin_state_up=_NONE,
description=_NONE, discover_external_gw=False,
external_gateway_info=_NONE, distributed=_NONE, ha=_NONE,
availability_zone_hints=_NONE, service_type_id=_NONE,
flavor_id=_NONE):
"""Create router.
:param project_id: The ID of the project that owns the resource. Only
administrative and users with advsvc role can specify a project ID
other than their own. You cannot change this value through
authorization policies.
:param admin_state_up: The administrative state of the resource, which
is up (true) or down (false). Default is true.
:param description: A human-readable description for the resource.
:param discover_external_gw: Take one of available external networks
and use it as external gateway. The parameter can not be used in
combination of external_gateway_info parameter.
:param external_gateway_info: The external gateway information of
the router. If the router has an external gateway, this would be
a dict with network_id, enable_snat and external_fixed_ips.
:param distributed: true indicates a distributed router. It is
available when dvr extension is enabled.
:param ha: true indicates a highly-available router. It is available
when l3-ha extension is enabled.
:param availability_zone_hints: The availability zone candidates for
the router. It is available when router_availability_zone extension
is enabled.
:param service_type_id: The ID of the service type associated with
the router.
:param flavor_id: The ID of the flavor associated with the router.
"""
if external_gateway_info is _NONE and discover_external_gw:
for external_network in self.list_networks(router_external=True):
external_gateway_info = {"network_id": external_network["id"]}
if self.supports_extension("ext-gw-mode", silent=True):
external_gateway_info["enable_snat"] = True
break
body = _clean_dict(
name=self.generate_random_name(),
# tenant_id should work for both new and old neutron instances
tenant_id=project_id,
external_gateway_info=external_gateway_info,
description=description,
distributed=distributed,
ha=ha,
availability_zone_hints=availability_zone_hints,
service_type_id=service_type_id,
flavor_id=flavor_id,
admin_state_up=admin_state_up
)
resp = self.client.create_router({"router": body})
return resp["router"]
@atomic.action_timer("neutron.show_router")
def get_router(self, router_id, fields=_NONE):
"""Get router details
:param router_id: Router ID
:param fields: The fields that you want the server to return. If no
fields list is specified, the networking API returns all
attributes allowed by the policy settings. By using fields
parameter, the API returns only the requested set of attributes.
"""
body = _clean_dict(fields=fields)
return self.client.show_router(router_id, **body)["router"]
@atomic.action_timer("neutron.add_interface_router")
def add_interface_to_router(self, router_id, subnet_id=_NONE,
port_id=_NONE):
"""Add interface to router.
:param router_id: The ID of the router.
:param subnet_id: The ID of the subnet. One of subnet_id or port_id
must be specified.
:param port_id: The ID of the port. One of subnet_id or port_id must
be specified.
"""
if (subnet_id and port_id) or (not subnet_id and not port_id):
raise TypeError("One of subnet_id or port_id must be specified "
"while adding interface to router.")
body = _clean_dict(subnet_id=subnet_id, port_id=port_id)
return self.client.add_interface_router(router_id, body)
@atomic.action_timer("neutron.remove_interface_router")
def remove_interface_from_router(self, router_id, subnet_id=_NONE,
port_id=_NONE):
"""Remove interface from router
:param router_id: The ID of the router.
:param subnet_id: The ID of the subnet. One of subnet_id or port_id
must be specified.
:param port_id: The ID of the port. One of subnet_id or port_id must
be specified.
"""
from neutronclient.common import exceptions as neutron_exceptions
if (subnet_id and port_id) or (not subnet_id and not port_id):
raise TypeError("One of subnet_id or port_id must be specified "
"to remove interface from router.")
body = _clean_dict(subnet_id=subnet_id, port_id=port_id)
try:
self.client.remove_interface_router(router_id, body)
except (neutron_exceptions.BadRequest,
neutron_exceptions.NotFound):
# Some neutron plugins don't use router as
# the device ID. Also, some plugin doesn't allow
# to update the ha router interface as there is
# an internal logic to update the interface/data model
# instead.
LOG.exception("Failed to remove an interface from a router.")
@atomic.action_timer("neutron.add_gateway_router")
def add_gateway_to_router(self, router_id, network_id, enable_snat=None,
external_fixed_ips=None):
"""Adds an external network gateway to the specified router.
:param router_id: Router ID
:param enable_snat: whether SNAT should occur on the external gateway
or not
"""
gw_info = {"network_id": network_id}
if enable_snat is not None:
if self.supports_extension("ext-gw-mode", silent=True):
gw_info["enable_snat"] = enable_snat
if external_fixed_ips is not None:
gw_info["external_fixed_ips"] = external_fixed_ips
self.client.add_gateway_router(router_id, gw_info)
@atomic.action_timer("neutron.remove_gateway_router")
def remove_gateway_from_router(self, router_id):
"""Removes an external network gateway from the specified router.
:param router_id: Router ID
"""
self.client.remove_gateway_router(router_id)
@atomic.action_timer("neutron.update_router")
def update_router(self, router_id, name=_NONE, admin_state_up=_NONE,
description=_NONE, external_gateway_info=_NONE,
distributed=_NONE, ha=_NONE):
"""Update router.
:param router_id: The ID of the router to update.
:param name: Human-readable name of the resource.
:param admin_state_up: The administrative state of the resource, which
is up (true) or down (false). Default is true.
:param description: A human-readable description for the resource.
:param external_gateway_info: The external gateway information of
the router. If the router has an external gateway, this would be
a dict with network_id, enable_snat and external_fixed_ips.
:param distributed: true indicates a distributed router. It is
available when dvr extension is enabled.
:param ha: true indicates a highly-available router. It is available
when l3-ha extension is enabled.
"""
body = _clean_dict(
name=name,
external_gateway_info=external_gateway_info,
description=description,
distributed=distributed,
ha=ha,
admin_state_up=admin_state_up
)
if not body:
raise TypeError("No updates for a router.")
return self.client.update_router(router_id, {"router": body})["router"]
@atomic.action_timer("neutron.delete_router")
def delete_router(self, router_id):
"""Delete router
:param router_id: Router ID
"""
self.client.delete_router(router_id)
@staticmethod
def _filter_routers(routers, subnet_ids):
for router in routers:
gtw_info = router["external_gateway_info"]
if gtw_info is None:
continue
if any(fixed_ip["subnet_id"] in subnet_ids
for fixed_ip in gtw_info["external_fixed_ips"]):
yield router
@atomic.action_timer("neutron.list_routers")
def list_routers(self, subnet_ids=_NONE, **kwargs):
"""List routers.
:param subnet_ids: Filter routers by attached subnet(s). Can be a
string or and an array with strings.
:param kwargs: additional router list filters
"""
routers = self.client.list_routers(**kwargs)["routers"]
if subnet_ids != _NONE:
routers = list(self._filter_routers(routers,
subnet_ids=subnet_ids))
return routers
@atomic.action_timer("neutron.create_port")
def create_port(self, network_id, **kwargs):
"""Create neutron port.
:param network_id: neutron network dict
:param kwargs: other optional neutron port creation params
(name is restricted param)
:returns: neutron port dict
"""
kwargs["name"] = self.generate_random_name()
body = _clean_dict(
network_id=network_id,
**kwargs
)
return self.client.create_port({"port": body})["port"]
@atomic.action_timer("neutron.show_port")
def get_port(self, port_id, fields=_NONE):
"""Get port details
:param port_id: Port ID
:param fields: The fields that you want the server to return. If no
fields list is specified, the networking API returns all
attributes allowed by the policy settings. By using fields
parameter, the API returns only the requested set of attributes.
"""
body = _clean_dict(fields=fields)
return self.client.show_port(port_id, **body)["port"]
@atomic.action_timer("neutron.update_port")
def update_port(self, port_id, **kwargs):
"""Update neutron port.
:param port_id: The ID of the port to update.
:param kwargs: other optional neutron port creation params
(name is restricted param)
:returns: neutron port dict
"""
body = _clean_dict(**kwargs)
if not body:
raise TypeError("No updates for a port.")
return self.client.update_port(port_id, {"port": body})["port"]
ROUTER_INTERFACE_OWNERS = ("network:router_interface",
"network:router_interface_distributed",
"network:ha_router_replicated_interface")
ROUTER_GATEWAY_OWNER = "network:router_gateway"
@atomic.action_timer("neutron.delete_port")
def delete_port(self, port):
"""Delete port.
:param port: Port ID or object
:returns bool: False if neutron returns NotFound error on port delete
"""
from neutronclient.common import exceptions as neutron_exceptions
if not isinstance(port, dict):
port = {"id": port, "device_owner": False}
if (port["device_owner"] in self.ROUTER_INTERFACE_OWNERS
or port["device_owner"] == self.ROUTER_GATEWAY_OWNER):
if port["device_owner"] == self.ROUTER_GATEWAY_OWNER:
self.remove_gateway_from_router(port["device_id"])
self.remove_interface_from_router(
router_id=port["device_id"], port_id=port["id"])
else:
try:
self.client.delete_port(port["id"])
except neutron_exceptions.PortNotFoundClient:
# port is auto-removed
pass
@atomic.action_timer("neutron.list_ports")
def list_ports(self, network_id=_NONE, device_id=_NONE, device_owner=_NONE,
status=_NONE, **kwargs):
"""List ports.
:param network_id: Filter the list result by the ID of the attached
network.
:param device_id: Filter the port list result by the ID of the device
that uses this port. For example, a server instance or a logical
router.
:param device_owner: Filter the port result list by the entity type
that uses this port. For example, compute:nova (server instance),
network:dhcp (DHCP agent) or network:router_interface
(router interface).
:param status: Filter the port list result by the port status.
Values are ACTIVE, DOWN, BUILD and ERROR.
:param kwargs: additional port list filters
"""
filters = _clean_dict(
network_id=network_id,
device_id=device_id,
device_owner=device_owner,
status=status,
**kwargs
)
return self.client.list_ports(**filters)["ports"]
@atomic.action_timer("neutron.create_floating_ip")
def create_floatingip(self, floating_network=None, project_id=_NONE,
fixed_ip_address=_NONE, floating_ip_address=_NONE,
port_id=_NONE, subnet_id=_NONE, dns_domain=_NONE,
dns_name=_NONE):
"""Create floating IP with floating_network.
:param floating_network: external network associated with floating IP.
:param project_id: The ID of the project.
:param fixed_ip_address: The fixed IP address that is associated with
the floating IP. If an internal port has multiple associated IP
addresses, the service chooses the first IP address unless you
explicitly define a fixed IP address in the fixed_ip_address
parameter.
:param floating_ip_address: The floating IP address. Default policy
settings enable only administrative users to set floating IP
addresses and some non-administrative users might require a
floating IP address. If you do not specify a floating IP address
in the request, the operation automatically allocates one.
:param port_id: The ID of a port associated with the floating IP.
To associate the floating IP with a fixed IP at creation time,
you must specify the identifier of the internal port.
:param subnet_id: The subnet ID on which you want to create the
floating IP.
:param dns_domain: A valid DNS domain.
:param dns_name: A valid DNS name.
"""
from neutronclient.common import exceptions as neutron_exceptions
if isinstance(floating_network, dict):
net_id = floating_network["id"]
elif floating_network:
network = self.find_network(floating_network)
if not network.get("router:external", False):
raise exceptions.NotFoundException(
f"Network '{network['name']} (id={network['id']})' is not "
f"external.")
net_id = network["id"]
else:
ext_networks = self.list_networks(router_external=True)
if not ext_networks:
raise exceptions.NotFoundException(
"Failed to allocate floating IP since no external "
"networks found.")
net_id = ext_networks[0]["id"]
description = None
if not CONF.openstack.pre_newton_neutron:
description = self.generate_random_name()
body = _clean_dict(
tenant_id=project_id,
description=description,
floating_network_id=net_id,
fixed_ip_address=fixed_ip_address,
floating_ip_address=floating_ip_address,
port_id=port_id,
subnet_id=subnet_id,
dns_domain=dns_domain,
dns_name=dns_name
)
try:
resp = self.client.create_floatingip({"floatingip": body})
return resp["floatingip"]
except neutron_exceptions.BadRequest as e:
error = "%s" % e
if "Unrecognized attribute" in error and "'description'" in error:
LOG.info("It looks like you have Neutron API of pre-Newton "
"OpenStack release. Setting "
"openstack.pre_newton_neutron option via Rally "
"configuration should fix an issue.")
raise
@atomic.action_timer("neutron.show_floating_ip")
def get_floatingip(self, floatingip_id, fields=_NONE):
"""Get floating IP details
:param floatingip_id: Floating IP ID
:param fields: The fields that you want the server to return. If no
fields list is specified, the networking API returns all
attributes allowed by the policy settings. By using fields
parameter, the API returns only the requested set of attributes.
"""
body = _clean_dict(fields=fields)
resp = self.client.show_floatingip(floatingip_id, **body)
return resp["floatingip"]
@atomic.action_timer("neutron.update_floating_ip")
def update_floatingip(self, floating_ip_id, fixed_ip_address=_NONE,
port_id=_NONE, description=_NONE):
"""Update floating IP.
:param floating_ip_id: The ID of the floating IP to update.
:param fixed_ip_address: The fixed IP address that is associated with
the floating IP. If an internal port has multiple associated IP
addresses, the service chooses the first IP address unless you
explicitly define a fixed IP address in the fixed_ip_address
parameter.
:param port_id: The ID of a port associated with the floating IP.
To associate the floating IP with a fixed IP at creation time,
you must specify the identifier of the internal port.
:param description: A human-readable description for the resource.
Default is an empty string.
"""
body = _clean_dict(
description=description,
fixed_ip_address=fixed_ip_address,
port_id=port_id
)
if not body:
raise TypeError("No updates for a floating ip.")
return self.client.update_floatingip(
floating_ip_id, {"floatingip": body})["floatingip"]
@atomic.action_timer("neutron.delete_floating_ip")
def delete_floatingip(self, floatingip_id):
"""Delete floating IP.
:param floatingip_id: floating IP id
"""
self.client.delete_floatingip(floatingip_id)
@atomic.action_timer("neutron.associate_floating_ip")
def associate_floatingip(self, port_id=None, device_id=None,
floatingip_id=None, floating_ip_address=None,
fixed_ip_address=None):
"""Add floating IP to an instance
:param port_id: ID of the port to associate floating IP with
:param device_id: ID of the device to find port to use
:param floatingip_id: ID of the floating IP
:param floating_ip_address: IP address to find floating IP to use
:param fixed_ip_address: The fixed IP address to associate with the
floating ip
"""
if (device_id is None and port_id is None) or (device_id and port_id):
raise TypeError("One of device_id or port_id must be specified.")
if ((floating_ip_address is None and floatingip_id is None)
or (floating_ip_address and floatingip_id)):
raise TypeError("One of floating_ip_address or floatingip_id "
"must be specified.")
if port_id is None:
ports = self.list_ports(device_id=device_id)
if not ports:
raise exceptions.GetResourceFailure(
resource="port",
err=f"device '{device_id}' have no ports associated.")
port_id = ports[0]["id"]
if floatingip_id is None:
filtered_fips = self.list_floatingips(
floating_ip_address=floating_ip_address)
if not filtered_fips:
raise exceptions.GetResourceFailure(
resource="floating ip",
err=f"There is no floating ip with '{floating_ip_address}'"
f" address.")
floatingip_id = filtered_fips[0]["id"]
additional = {}
if fixed_ip_address:
additional["fixed_ip_address"] = fixed_ip_address
return self.update_floatingip(floatingip_id, port_id=port_id,
**additional)
@atomic.action_timer("neutron.dissociate_floating_ip")
def dissociate_floatingip(self, floatingip_id=None,
floating_ip_address=None):
"""Remove floating IP from an instance
:param floatingip_id: ID of the floating IP
:param floating_ip_address: IP address to find floating IP to use
"""
if ((floating_ip_address is None and floatingip_id is None)
or (floating_ip_address and floatingip_id)):
raise TypeError("One of floating_ip_address or floatingip_id "
"must be specified.")
if floatingip_id is None:
filtered_fips = self.list_floatingips(
floating_ip_address=floating_ip_address)
if not filtered_fips:
raise exceptions.GetResourceFailure(
resource="floating ip",
err=f"There is no floating ip with '{floating_ip_address}'"
f" address.")
floatingip_id = filtered_fips[0]["id"]
return self.update_floatingip(floatingip_id, port_id=None)
@atomic.action_timer("neutron.list_floating_ips")
def list_floatingips(self, router_id=_NONE, port_id=_NONE, status=_NONE,
description=_NONE, floating_network_id=_NONE,
floating_ip_address=_NONE, fixed_ip_address=_NONE,
**kwargs):
"""List floating IPs.
:param router_id: Filter the floating IP list result by the ID of the
router for the floating IP.
:param port_id: Filter the floating IP list result by the ID of a port
associated with the floating IP.
:param status: Filter the floating IP list result by the status of the
floating IP. Values are ACTIVE, DOWN and ERROR.
:param description: Filter the list result by the human-readable
description of the resource. (available only for OpenStack Newton+)
:param floating_network_id: Filter the floating IP list result by the
ID of the network associated with the floating IP.
:param fixed_ip_address: Filter the floating IP list result by the
fixed IP address that is associated with the floating IP address.
:param floating_ip_address: Filter the floating IP list result by the
floating IP address.
:param kwargs: additional floating IP list filters
"""
filters = _clean_dict(
router_id=router_id,
port_id=port_id,
status=status,
description=description,
floating_network_id=floating_network_id,
floating_ip_address=floating_ip_address,
fixed_ip_address=fixed_ip_address,
**kwargs
)
resp = self.client.list_floatingips(**filters)
return resp["floatingips"]
@atomic.action_timer("neutron.create_security_group")
def create_security_group(self, name=None, project_id=_NONE,
description=_NONE, stateful=_NONE):
"""Create a security group
:param name: Human-readable name of the resource.
:param project_id: The ID of the project.
:param description: A human-readable description for the resource.
Default is an empty string.
:param stateful: Indicates if the security group is stateful or
stateless.
"""
body = _clean_dict(
name=name or self.generate_random_name(),
tenant_id=project_id,
description=description,
stateful=stateful
)
resp = self.client.create_security_group({"security_group": body})
return resp["security_group"]
@atomic.action_timer("neutron.show_security_group")
def get_security_group(self, security_group_id, fields=_NONE):
"""Get security group
:param security_group_id: Security group ID
:param fields: The fields that you want the server to return. If no
fields list is specified, the networking API returns all
attributes allowed by the policy settings. By using fields
parameter, the API returns only the requested set of attributes.
"""
body = _clean_dict(fields=fields)
resp = self.client.show_security_group(security_group_id, **body)
return resp["security_group"]
@atomic.action_timer("neutron.update_security_group")
def update_security_group(self, security_group_id, name=_NONE,
description=_NONE, stateful=_NONE):
"""Update a security group
:param security_group_id: Security group ID
:param name: Human-readable name of the resource.
:param description: A human-readable description for the resource.
Default is an empty string.
:param stateful: Indicates if the security group is stateful or
stateless.
"""
body = _clean_dict(
name=name,
description=description,
stateful=stateful
)
if not body:
raise TypeError("No updates for a security group.")
resp = self.client.update_security_group(security_group_id,
{"security_group": body})
return resp["security_group"]
@atomic.action_timer("neutron.delete_security_group")
def delete_security_group(self, security_group_id):
"""Delete security group.
:param security_group_id: Security group ID
"""
return self.client.delete_security_group(security_group_id)
@atomic.action_timer("neutron.list_security_groups")
def list_security_groups(self, name=_NONE, **kwargs):
"""List security groups.
:param name: Filter the list result by the human-readable name of the
resource.
:param kwargs: additional security group list filters
"""
if name:
kwargs["name"] = name
resp = self.client.list_security_groups(**kwargs)
return resp["security_groups"]
@atomic.action_timer("neutron.create_security_group_rule")
def create_security_group_rule(self,
security_group_id,
direction="ingress",
protocol="tcp",
ethertype=_NONE,
port_range_min=_NONE,
port_range_max=_NONE,
remote_ip_prefix=_NONE,
description=_NONE):
"""Create security group rule.
:param security_group_id: The security group ID to associate with this
security group rule.
:param direction: Ingress or egress, which is the direction in which
the security group rule is applied.
:param protocol: The IP protocol can be represented by a string, an
integer, or null. Valid string or integer values are any or 0, ah
or 51, dccp or 33, egp or 8, esp or 50, gre or 47, icmp or 1,
icmpv6 or 58, igmp or 2, ipip or 4, ipv6-encap or 41,
ipv6-frag or 44, ipv6-icmp or 58, ipv6-nonxt or 59,
ipv6-opts or 60, ipv6-route or 43, ospf or 89, pgm or 113,
rsvp or 46, sctp or 132, tcp or 6, udp or 17, udplite or 136,
vrrp or 112. Additionally, any integer value between [0-255] is
also valid. The string any (or integer 0) means all IP protocols.
See the constants in neutron_lib.constants for the most
up-to-date list of supported strings.
:param ethertype: Must be IPv4 or IPv6, and addresses represented in
CIDR must match the ingress or egress rules.
:param port_range_min: The minimum port number in the range that is
matched by the security group rule. If the protocol is TCP, UDP,
DCCP, SCTP or UDP-Lite this value must be less than or equal to
the port_range_max attribute value. If the protocol is ICMP, this
value must be an ICMP type.
:param port_range_max: The maximum port number in the range that is
matched by the security group rule. If the protocol is TCP, UDP,
DCCP, SCTP or UDP-Lite this value must be greater than or equal to
the port_range_min attribute value. If the protocol is ICMP, this
value must be an ICMP code.
:param remote_ip_prefix: The remote IP prefix that is matched by this
security group rule.
:param description: A human-readable description for the resource.
Default is an empty string.
"""
body = _clean_dict(
security_group_id=security_group_id,
direction=direction,
protocol=protocol,
ethertype=ethertype,
port_range_min=port_range_min,
port_range_max=port_range_max,
remote_ip_prefix=remote_ip_prefix,
description=description
)
return self.client.create_security_group_rule(
{"security_group_rule": body})["security_group_rule"]
@atomic.action_timer("neutron.show_security_group_rule")
def get_security_group_rule(self, security_group_rule_id, verbose=_NONE,
fields=_NONE):
"""Get security group details
:param security_group_rule_id: Security group rule ID
:param verbose: Show detailed information.
:param fields: The fields that you want the server to return. If no
fields list is specified, the networking API returns all
attributes allowed by the policy settings. By using fields
parameter, the API returns only the requested set of attributes.
"""
body = _clean_dict(verbose=verbose, fields=fields)
resp = self.client.show_security_group_rule(
security_group_rule_id, **body)
return resp["security_group_rule"]
@atomic.action_timer("neutron.delete_security_group_rule")
def delete_security_group_rule(self, security_group_rule_id):
"""Delete a given security group rule.
:param security_group_rule_id: Security group rule ID
"""
self.client.delete_security_group_rule(
security_group_rule_id)
@atomic.action_timer("neutron.list_security_group_rules")
def list_security_group_rules(
self, security_group_id=_NONE, protocol=_NONE, direction=_NONE,
port_range_min=_NONE, port_range_max=_NONE, description=_NONE,
**kwargs):
"""List all security group rules.
:param security_group_id: Filter the security group rule list result
by the ID of the security group that associates with this security
group rule.
:param protocol: Filter the security group rule list result by the IP
protocol.
:param direction: Filter the security group rule list result by the
direction in which the security group rule is applied, which is
ingress or egress.
:param port_range_min: Filter the security group rule list result by
the minimum port number in the range that is matched by the
security group rule.
:param port_range_max: Filter the security group rule list result by
the maximum port number in the range that is matched by the
security group rule.
:param description: Filter the list result by the human-readable
description of the resource.
:param kwargs: additional security group rule list filters
:return: list of security group rules
"""
filters = _clean_dict(
security_group_id=security_group_id,
protocol=protocol,
direction=direction,
port_range_min=port_range_min,
port_range_max=port_range_max,
description=description,
**kwargs
)
resp = self.client.list_security_group_rules(**filters)
return resp["security_group_rules"]
@atomic.action_timer("neutron.list_agents")
def list_agents(self, **kwargs):
"""Fetches agents.
:param kwargs: filters
:returns: user agents list
"""
return self.client.list_agents(**kwargs)["agents"]
@atomic.action_timer("neutron.list_extension")
def list_extensions(self):
"""List neutron extensions."""
return self.client.list_extensions()["extensions"]
@property
def cached_supported_extensions(self):
"""Return cached list of extension if exist or fetch it if is missed"""
if self._cached_supported_extensions is None:
self._cached_supported_extensions = self.list_extensions()
return self._cached_supported_extensions
def supports_extension(self, extension, silent=False):
"""Check whether a neutron extension is supported.
:param extension: Extension to check
:param silent: Return boolean result of the search instead of raising
an exception
"""
exist = any(ext.get("alias") == extension
for ext in self.cached_supported_extensions)
if not silent and not exist:
raise exceptions.NotFoundException(
message=f"Neutron driver does not support {extension}")
return exist
```
#### File: services/storage/cinder_common.py
```python
import random
from rally import exceptions
from rally.task import atomic
from rally.task import utils as bench_utils
from rally_openstack.common.services.image import image
from rally_openstack.common.services.storage import block
CONF = block.CONF
class CinderMixin(object):
def _get_client(self):
return self._clients.cinder(self.version)
def _update_resource(self, resource):
try:
manager = getattr(resource, "manager", None)
if manager:
res = manager.get(resource.id)
else:
if isinstance(resource, block.Volume):
attr = "volumes"
elif isinstance(resource, block.VolumeSnapshot):
attr = "volume_snapshots"
elif isinstance(resource, block.VolumeBackup):
attr = "backups"
res = getattr(self._get_client(), attr).get(resource.id)
except Exception as e:
if getattr(e, "code", getattr(e, "http_status", 400)) == 404:
raise exceptions.GetResourceNotFound(resource=resource)
raise exceptions.GetResourceFailure(resource=resource, err=e)
return res
def _wait_available_volume(self, volume):
return bench_utils.wait_for_status(
volume,
ready_statuses=["available"],
update_resource=self._update_resource,
timeout=CONF.openstack.cinder_volume_create_timeout,
check_interval=CONF.openstack.cinder_volume_create_poll_interval
)
def get_volume(self, volume_id):
"""Get target volume information."""
aname = "cinder_v%s.get_volume" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().volumes.get(volume_id)
def delete_volume(self, volume):
"""Delete target volume."""
aname = "cinder_v%s.delete_volume" % self.version
with atomic.ActionTimer(self, aname):
self._get_client().volumes.delete(volume)
bench_utils.wait_for_status(
volume,
ready_statuses=["deleted"],
check_deletion=True,
update_resource=self._update_resource,
timeout=CONF.openstack.cinder_volume_delete_timeout,
check_interval=(CONF.openstack
.cinder_volume_delete_poll_interval)
)
def extend_volume(self, volume, new_size):
"""Extend the size of the specified volume."""
if isinstance(new_size, dict):
new_size = random.randint(new_size["min"], new_size["max"])
aname = "cinder_v%s.extend_volume" % self.version
with atomic.ActionTimer(self, aname):
self._get_client().volumes.extend(volume, new_size)
return self._wait_available_volume(volume)
def list_snapshots(self, detailed=True):
"""Get a list of all snapshots."""
aname = "cinder_v%s.list_snapshots" % self.version
with atomic.ActionTimer(self, aname):
return (self._get_client()
.volume_snapshots.list(detailed))
def set_metadata(self, volume, sets=10, set_size=3):
"""Set volume metadata.
:param volume: The volume to set metadata on
:param sets: how many operations to perform
:param set_size: number of metadata keys to set in each operation
:returns: A list of keys that were set
"""
key = "cinder_v%s.set_%s_metadatas_%s_times" % (self.version,
set_size,
sets)
with atomic.ActionTimer(self, key):
keys = []
for i in range(sets):
metadata = {}
for j in range(set_size):
key = self.generate_random_name()
keys.append(key)
metadata[key] = self.generate_random_name()
self._get_client().volumes.set_metadata(volume, metadata)
return keys
def delete_metadata(self, volume, keys, deletes=10, delete_size=3):
"""Delete volume metadata keys.
Note that ``len(keys)`` must be greater than or equal to
``deletes * delete_size``.
:param volume: The volume to delete metadata from
:param deletes: how many operations to perform
:param delete_size: number of metadata keys to delete in each operation
:param keys: a list of keys to choose deletion candidates from
"""
if len(keys) < deletes * delete_size:
raise exceptions.InvalidArgumentsException(
"Not enough metadata keys to delete: "
"%(num_keys)s keys, but asked to delete %(num_deletes)s" %
{"num_keys": len(keys),
"num_deletes": deletes * delete_size})
# make a shallow copy of the list of keys so that, when we pop
# from it later, we don't modify the original list.
keys = list(keys)
random.shuffle(keys)
action_name = ("cinder_v%s.delete_%s_metadatas_%s_times"
% (self.version, delete_size, deletes))
with atomic.ActionTimer(self, action_name):
for i in range(deletes):
to_del = keys[i * delete_size:(i + 1) * delete_size]
self._get_client().volumes.delete_metadata(volume, to_del)
def update_readonly_flag(self, volume, read_only):
"""Update the read-only access mode flag of the specified volume.
:param volume: The UUID of the volume to update.
:param read_only: The value to indicate whether to update volume to
read-only access mode.
:returns: A tuple of http Response and body
"""
aname = "cinder_v%s.update_readonly_flag" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().volumes.update_readonly_flag(
volume, read_only)
def upload_volume_to_image(self, volume, force=False,
container_format="bare", disk_format="raw"):
"""Upload the given volume to image.
Returns created image.
:param volume: volume object
:param force: flag to indicate whether to snapshot a volume even if
it's attached to an instance
:param container_format: container format of image. Acceptable
formats: ami, ari, aki, bare, and ovf
:param disk_format: disk format of image. Acceptable formats:
ami, ari, aki, vhd, vmdk, raw, qcow2, vdi and iso
:returns: Returns created image object
"""
aname = "cinder_v%s.upload_volume_to_image" % self.version
with atomic.ActionTimer(self, aname):
resp, img = self._get_client().volumes.upload_to_image(
volume, force, self.generate_random_name(), container_format,
disk_format)
# NOTE (e0ne): upload_to_image changes volume status to uploading
# so we need to wait until it will be available.
volume = self._wait_available_volume(volume)
image_id = img["os-volume_upload_image"]["image_id"]
glance = image.Image(self._clients)
image_inst = glance.get_image(image_id)
image_inst = bench_utils.wait_for_status(
image_inst,
ready_statuses=["active"],
update_resource=glance.get_image,
timeout=CONF.openstack.glance_image_create_timeout,
check_interval=(CONF.openstack
.glance_image_create_poll_interval)
)
return image_inst
def create_qos(self, specs):
"""Create a qos specs.
:param specs: A dict of key/value pairs to be set
:rtype: :class:'QoSSpecs'
"""
aname = "cinder_v%s.create_qos" % self.version
name = self.generate_random_name()
with atomic.ActionTimer(self, aname):
return self._get_client().qos_specs.create(name, specs)
def list_qos(self, search_opts=None):
"""Get a list of all qos specs.
:param search_opts: search options
:rtype: list of :class: 'QoSpecs'
"""
aname = "cinder_v%s.list_qos" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().qos_specs.list(search_opts)
def get_qos(self, qos_id):
"""Get a specific qos specs.
:param qos_id: The ID of the :class: 'QoSSpecs' to get
:rtype: :class: 'QoSSpecs'
"""
aname = "cinder_v%s.get_qos" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().qos_specs.get(qos_id)
def set_qos(self, qos_id, set_specs_args):
"""Add/Update keys in qos specs.
:param qos_id: The ID of the :class:`QoSSpecs` to get
:param set_specs_args: A dict of key/value pairs to be set
:rtype: class 'cinderclient.apiclient.base.DictWithMeta'
{"qos_specs": set_specs_args}
"""
aname = "cinder_v%s.set_qos" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().qos_specs.set_keys(qos_id,
set_specs_args)
def qos_associate_type(self, qos_specs, vol_type_id):
"""Associate qos specs from volume type.
:param qos_specs: The qos specs to be associated with
:param vol_type_id: The volume type id to be associated with
:returns: base on client response return True if the request
has been accepted or not
"""
aname = "cinder_v%s.qos_associate_type" % self.version
with atomic.ActionTimer(self, aname):
tuple_res = self._get_client().qos_specs.associate(qos_specs,
vol_type_id)
return (tuple_res[0].status_code == 202)
def qos_disassociate_type(self, qos_specs, vol_type_id):
"""Disassociate qos specs from volume type.
:param qos_specs: The qos specs to be disassociated with
:param vol_type_id: The volume type id to be disassociated with
:returns: base on client response return True if the request
has been accepted or not
"""
aname = "cinder_v%s.qos_disassociate_type" % self.version
with atomic.ActionTimer(self, aname):
tuple_res = self._get_client().qos_specs.disassociate(qos_specs,
vol_type_id)
return (tuple_res[0].status_code == 202)
def delete_snapshot(self, snapshot):
"""Delete the given snapshot.
Returns when the snapshot is actually deleted.
:param snapshot: snapshot object
"""
aname = "cinder_v%s.delete_snapshot" % self.version
with atomic.ActionTimer(self, aname):
self._get_client().volume_snapshots.delete(snapshot)
bench_utils.wait_for_status(
snapshot,
ready_statuses=["deleted"],
check_deletion=True,
update_resource=self._update_resource,
timeout=CONF.openstack.cinder_volume_delete_timeout,
check_interval=(CONF.openstack
.cinder_volume_delete_poll_interval)
)
def delete_backup(self, backup):
"""Delete the given backup.
Returns when the backup is actually deleted.
:param backup: backup instance
"""
aname = "cinder_v%s.delete_backup" % self.version
with atomic.ActionTimer(self, aname):
self._get_client().backups.delete(backup)
bench_utils.wait_for_status(
backup,
ready_statuses=["deleted"],
check_deletion=True,
update_resource=self._update_resource,
timeout=CONF.openstack.cinder_volume_delete_timeout,
check_interval=(CONF.openstack
.cinder_volume_delete_poll_interval)
)
def restore_backup(self, backup_id, volume_id=None):
"""Restore the given backup.
:param backup_id: The ID of the backup to restore.
:param volume_id: The ID of the volume to restore the backup to.
"""
aname = "cinder_v%s.restore_backup" % self.version
with atomic.ActionTimer(self, aname):
restore = self._get_client().restores.restore(backup_id, volume_id)
restored_volume = self._get_client().volumes.get(restore.volume_id)
return self._wait_available_volume(restored_volume)
def list_backups(self, detailed=True):
"""Return user volume backups list.
:param detailed: True if detailed information about backup
should be listed
"""
aname = "cinder_v%s.list_backups" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().backups.list(detailed)
def list_transfers(self, detailed=True, search_opts=None):
"""Get a list of all volume transfers.
:param detailed: If True, detailed information about transfer
should be listed
:param search_opts: Search options to filter out volume transfers
:returns: list of :class:`VolumeTransfer`
"""
aname = "cinder_v%s.list_transfers" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().transfers.list(detailed, search_opts)
def get_volume_type(self, volume_type):
"""get details of volume_type.
:param volume_type: The ID of the :class:`VolumeType` to get
:returns: :class:`VolumeType`
"""
aname = "cinder_v%s.get_volume_type" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().volume_types.get(volume_type)
def delete_volume_type(self, volume_type):
"""delete a volume type.
:param volume_type: Name or Id of the volume type
:returns: base on client response return True if the request
has been accepted or not
"""
aname = "cinder_v%s.delete_volume_type" % self.version
with atomic.ActionTimer(self, aname):
tuple_res = self._get_client().volume_types.delete(
volume_type)
return (tuple_res[0].status_code == 202)
def set_volume_type_keys(self, volume_type, metadata):
"""Set extra specs on a volume type.
:param volume_type: The :class:`VolumeType` to set extra spec on
:param metadata: A dict of key/value pairs to be set
:returns: extra_specs if the request has been accepted
"""
aname = "cinder_v%s.set_volume_type_keys" % self.version
with atomic.ActionTimer(self, aname):
return volume_type.set_keys(metadata)
def transfer_create(self, volume_id, name=None):
"""Create a volume transfer.
:param name: The name of created transfer
:param volume_id: The ID of the volume to transfer
:rtype: VolumeTransfer
"""
name = name or self.generate_random_name()
aname = "cinder_v%s.transfer_create" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().transfers.create(volume_id, name=name)
def transfer_accept(self, transfer_id, auth_key):
"""Accept a volume transfer.
:param transfer_id: The ID of the transfer to accept.
:param auth_key: The auth_key of the transfer.
:rtype: VolumeTransfer
"""
aname = "cinder_v%s.transfer_accept" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().transfers.accept(transfer_id, auth_key)
def create_encryption_type(self, volume_type, specs):
"""Create encryption type for a volume type. Default: admin only.
:param volume_type: the volume type on which to add an encryption type
:param specs: the encryption type specifications to add
:return: an instance of :class: VolumeEncryptionType
"""
aname = "cinder_v%s.create_encryption_type" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().volume_encryption_types.create(
volume_type, specs)
def get_encryption_type(self, volume_type):
"""Get the volume encryption type for the specified volume type.
:param volume_type: the volume type to query
:return: an instance of :class: VolumeEncryptionType
"""
aname = "cinder_v%s.get_encryption_type" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().volume_encryption_types.get(
volume_type)
def list_encryption_type(self, search_opts=None):
"""List all volume encryption types.
:param search_opts: Options used when search for encryption types
:return: a list of :class: VolumeEncryptionType instances
"""
aname = "cinder_v%s.list_encryption_type" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().volume_encryption_types.list(
search_opts)
def delete_encryption_type(self, volume_type):
"""Delete the encryption type information for the specified volume type.
:param volume_type: the volume type whose encryption type information
must be deleted
"""
aname = "cinder_v%s.delete_encryption_type" % self.version
with atomic.ActionTimer(self, aname):
resp = self._get_client().volume_encryption_types.delete(
volume_type)
if (resp[0].status_code != 202):
raise exceptions.RallyException(
"EncryptionType Deletion Failed")
def update_encryption_type(self, volume_type, specs):
"""Update the encryption type information for the specified volume type.
:param volume_type: the volume type whose encryption type information
must be updated
:param specs: the encryption type specifications to update
:return: an instance of :class: VolumeEncryptionType
"""
aname = "cinder_v%s.update_encryption_type" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().volume_encryption_types.update(
volume_type, specs)
class UnifiedCinderMixin(object):
@staticmethod
def _unify_backup(backup):
return block.VolumeBackup(id=backup.id, name=backup.name,
volume_id=backup.volume_id,
status=backup.status)
@staticmethod
def _unify_transfer(transfer):
return block.VolumeTransfer(
id=transfer.id,
name=transfer.name,
volume_id=transfer.volume_id,
# NOTE(andreykurilin): we need to access private field to avoid
# calling extra GET request when the object is not fully
# loaded.
auth_key=transfer._info.get("auth_key"))
@staticmethod
def _unify_qos(qos):
return block.QoSSpecs(id=qos.id, name=qos.name, specs=qos.specs)
@staticmethod
def _unify_encryption_type(encryption_type):
return block.VolumeEncryptionType(
id=encryption_type.encryption_id,
volume_type_id=encryption_type.volume_type_id)
def delete_volume(self, volume):
"""Delete a volume."""
self._impl.delete_volume(volume)
def set_metadata(self, volume, sets=10, set_size=3):
"""Update/Set a volume metadata.
:param volume: The updated/setted volume.
:param sets: how many operations to perform
:param set_size: number of metadata keys to set in each operation
:returns: A list of keys that were set
"""
return self._impl.set_metadata(volume, sets=sets, set_size=set_size)
def delete_metadata(self, volume, keys, deletes=10, delete_size=3):
"""Delete volume metadata keys.
Note that ``len(keys)`` must be greater than or equal to
``deletes * delete_size``.
:param volume: The volume to delete metadata from
:param deletes: how many operations to perform
:param delete_size: number of metadata keys to delete in each operation
:param keys: a list of keys to choose deletion candidates from
"""
self._impl.delete_metadata(volume, keys=keys, deletes=10,
delete_size=3)
def update_readonly_flag(self, volume, read_only):
"""Update the read-only access mode flag of the specified volume.
:param volume: The UUID of the volume to update.
:param read_only: The value to indicate whether to update volume to
read-only access mode.
:returns: A tuple of http Response and body
"""
return self._impl.update_readonly_flag(volume, read_only=read_only)
def upload_volume_to_image(self, volume, force=False,
container_format="bare", disk_format="raw"):
"""Upload the given volume to image.
Returns created image.
:param volume: volume object
:param force: flag to indicate whether to snapshot a volume even if
it's attached to an instance
:param container_format: container format of image. Acceptable
formats: ami, ari, aki, bare, and ovf
:param disk_format: disk format of image. Acceptable formats:
ami, ari, aki, vhd, vmdk, raw, qcow2, vdi and iso
:returns: Returns created image object
"""
return self._impl.upload_volume_to_image(
volume, force=force, container_format=container_format,
disk_format=disk_format)
def create_qos(self, specs):
"""Create a qos specs.
:param specs: A dict of key/value pairs to be set
:rtype: :class:'QoSSpecs'
"""
return self._unify_qos(self._impl.create_qos(specs))
def list_qos(self, search_opts=None):
"""Get a list of all qos specs.
:param search_opts: search options
:rtype: list of :class: 'QoSpecs'
"""
return [self._unify_qos(qos)
for qos in self._impl.list_qos(search_opts)]
def get_qos(self, qos_id):
"""Get a specific qos specs.
:param qos_id: The ID of the :class: 'QoSSpecs' to get
:rtype: :class: 'QoSSpecs'
"""
return self._unify_qos(self._impl.get_qos(qos_id))
def set_qos(self, qos, set_specs_args):
"""Add/Update keys in qos specs.
:param qos: The instance of the :class:`QoSSpecs` to set
:param set_specs_args: A dict of key/value pairs to be set
:rtype: :class: 'QoSSpecs'
"""
self._impl.set_qos(qos.id, set_specs_args)
return self._unify_qos(qos)
def qos_associate_type(self, qos_specs, vol_type_id):
"""Associate qos specs from volume type.
:param qos_specs: The qos specs to be associated with
:param vol_type_id: The volume type id to be associated with
"""
self._impl.qos_associate_type(qos_specs, vol_type_id)
return self._unify_qos(qos_specs)
def qos_disassociate_type(self, qos_specs, vol_type_id):
"""Disassociate qos specs from volume type.
:param qos_specs: The qos specs to be disassociated with
:param vol_type_id: The volume type id to be disassociated with
"""
self._impl.qos_disassociate_type(qos_specs, vol_type_id)
return self._unify_qos(qos_specs)
def delete_snapshot(self, snapshot):
"""Delete the given backup.
Returns when the backup is actually deleted.
:param backup: backup instance
"""
self._impl.delete_snapshot(snapshot)
def delete_backup(self, backup):
"""Delete a volume backup."""
self._impl.delete_backup(backup)
def list_backups(self, detailed=True):
"""Return user volume backups list."""
return [self._unify_backup(backup)
for backup in self._impl.list_backups(detailed=detailed)]
def list_transfers(self, detailed=True, search_opts=None):
"""Get a list of all volume transfers.
:param detailed: If True, detailed information about transfer
should be listed
:param search_opts: Search options to filter out volume transfers
:returns: list of :class:`VolumeTransfer`
"""
return [self._unify_transfer(transfer)
for transfer in self._impl.list_transfers(
detailed=detailed, search_opts=search_opts)]
def get_volume_type(self, volume_type):
"""get details of volume_type.
:param volume_type: The ID of the :class:`VolumeType` to get
:returns: :class:`VolumeType`
"""
return self._impl.get_volume_type(volume_type)
def delete_volume_type(self, volume_type):
"""delete a volume type.
:param volume_type: Name or Id of the volume type
:returns: base on client response return True if the request
has been accepted or not
"""
return self._impl.delete_volume_type(volume_type)
def set_volume_type_keys(self, volume_type, metadata):
"""Set extra specs on a volume type.
:param volume_type: The :class:`VolumeType` to set extra spec on
:param metadata: A dict of key/value pairs to be set
:returns: extra_specs if the request has been accepted
"""
return self._impl.set_volume_type_keys(volume_type, metadata)
def transfer_create(self, volume_id, name=None):
"""Creates a volume transfer.
:param name: The name of created transfer
:param volume_id: The ID of the volume to transfer.
:returns: Return the created transfer.
"""
return self._unify_transfer(
self._impl.transfer_create(volume_id, name=name))
def transfer_accept(self, transfer_id, auth_key):
"""Accept a volume transfer.
:param transfer_id: The ID of the transfer to accept.
:param auth_key: The auth_key of the transfer.
:returns: VolumeTransfer
"""
return self._unify_transfer(
self._impl.transfer_accept(transfer_id, auth_key=auth_key))
def create_encryption_type(self, volume_type, specs):
"""Create encryption type for a volume type. Default: admin only.
:param volume_type: the volume type on which to add an encryption type
:param specs: the encryption type specifications to add
:return: an instance of :class: VolumeEncryptionType
"""
return self._unify_encryption_type(
self._impl.create_encryption_type(volume_type, specs=specs))
def get_encryption_type(self, volume_type):
"""Get the volume encryption type for the specified volume type.
:param volume_type: the volume type to query
:return: an instance of :class: VolumeEncryptionType
"""
return self._unify_encryption_type(
self._impl.get_encryption_type(volume_type))
def list_encryption_type(self, search_opts=None):
"""List all volume encryption types.
:param search_opts: Options used when search for encryption types
:return: a list of :class: VolumeEncryptionType instances
"""
return [self._unify_encryption_type(encryption_type)
for encryption_type in self._impl.list_encryption_type(
search_opts=search_opts)]
def delete_encryption_type(self, volume_type):
"""Delete the encryption type information for the specified volume type.
:param volume_type: the volume type whose encryption type information
must be deleted
"""
return self._impl.delete_encryption_type(volume_type)
def update_encryption_type(self, volume_type, specs):
"""Update the encryption type information for the specified volume type.
:param volume_type: the volume type whose encryption type information
must be updated
:param specs: the encryption type specifications to update
:return: an instance of :class: VolumeEncryptionType
"""
return self._impl.update_encryption_type(volume_type, specs=specs)
```
#### File: rally-openstack/rally_openstack/_compat.py
```python
import importlib
import importlib.abc
import importlib.machinery
import importlib.util
import sys
import warnings
class _MoveSpec(object):
def __init__(self, deprecated, new, release):
"""init moved module info
:param deprecated: a module name that is deprecated
:param new: a module name that should be used instead
:param release: A release when the module was deprecated
"""
self.deprecated = deprecated
self.new = new
self.deprecated_path = self.deprecated.replace(".", "/")
self.new_path = self.new.replace(".", "/")
self.release = release
def get_new_name(self, fullname):
"""Get the new name for deprecated module."""
return fullname.replace(self.deprecated, self.new)
def get_deprecated_path(self, path):
"""Get a path to the deprecated module."""
return path.replace(self.new_path, self.deprecated_path)
_MOVES = [
_MoveSpec(
deprecated="rally_openstack.embedcharts",
new="rally_openstack.task.ui.charts",
release="2.0.0"
),
_MoveSpec(
deprecated="rally_openstack.cleanup",
new="rally_openstack.task.cleanup",
release="2.0.0"
),
_MoveSpec(
deprecated="rally_openstack.contexts",
new="rally_openstack.task.contexts",
release="2.0.0"
),
_MoveSpec(
deprecated="rally_openstack.hook",
new="rally_openstack.task.hooks",
release="2.0.0"
),
_MoveSpec(
deprecated="rally_openstack.scenario",
new="rally_openstack.task.scenario",
release="2.0.0"
),
_MoveSpec(
deprecated="rally_openstack.scenarios",
new="rally_openstack.task.scenarios",
release="2.0.0"
),
_MoveSpec(
deprecated="rally_openstack.types",
new="rally_openstack.task.types",
release="2.0.0"
),
_MoveSpec(
deprecated="rally_openstack.platforms",
new="rally_openstack.environment.platforms",
release="2.0.0"
),
_MoveSpec(
deprecated="rally_openstack.service",
new="rally_openstack.common.service",
release="2.0.0"
),
_MoveSpec(
deprecated="rally_openstack.services",
new="rally_openstack.common.services",
release="2.0.0"
),
_MoveSpec(
deprecated="rally_openstack.validators",
new="rally_openstack.common.validators",
release="2.0.0"
),
_MoveSpec(
deprecated="rally_openstack.wrappers",
new="rally_openstack.common.wrappers",
release="2.0.0"
),
_MoveSpec(
deprecated="rally_openstack.credential",
new="rally_openstack.common.credential",
release="2.0.0"
),
_MoveSpec(
deprecated="rally_openstack.osclients",
new="rally_openstack.common.osclients",
release="2.0.0"
),
_MoveSpec(
deprecated="rally_openstack.consts",
new="rally_openstack.common.consts",
release="2.0.0"
),
_MoveSpec(
deprecated="rally_openstack.exceptions",
new="rally_openstack.common.exceptions",
release="2.0.0"
),
_MoveSpec(
deprecated="rally_openstack.cfg",
new="rally_openstack.common.cfg",
release="2.0.0"
),
]
class ModuleLoader(object):
def __init__(self, move_spec):
self.move_spec = move_spec
def create_module(self, spec):
# Python interpreter will use the default module creator in case of
# None return value.
return None
def exec_module(self, module):
"""Module executor."""
full_name = self.move_spec.get_new_name(module.__name__)
original_module = importlib.import_module(full_name)
if original_module.__file__.endswith("__init__.py"):
# NOTE(andreykurilin): In case we need to list submodules the
# next code can be used:
#
# import pkgutil
#
# for m in pkgutil.iter_modules(original_module.__path__):
# module.__dict__[m.name] = importlib.import_module(
# f"{full_name}.{m.name}")
module.__path__ = [
self.move_spec.get_deprecated_path(original_module.__path__[0])
]
for item in dir(original_module):
if item.startswith("_"):
continue
module.__dict__[item] = original_module.__dict__[item]
module.__file__ = self.move_spec.get_deprecated_path(
original_module.__file__)
return module
class ModulesMovementsHandler(importlib.abc.MetaPathFinder):
@classmethod
def _process_spec(cls, fullname, spec):
"""Make module spec and print warning message if needed."""
if spec.deprecated == fullname:
warnings.warn(
f"Module {fullname} is deprecated since rally-openstack "
f"{spec.release}. Use {spec.get_new_name(fullname)} instead.",
stacklevel=3
)
return importlib.machinery.ModuleSpec(fullname, ModuleLoader(spec))
@classmethod
def find_spec(cls, fullname, path=None, target=None):
"""This functions is what gets executed by the loader."""
for spec in _MOVES:
if spec.deprecated in fullname:
return cls._process_spec(fullname, spec)
def init():
"""Adds our custom module loader."""
sys.meta_path.append(ModulesMovementsHandler())
```
#### File: rally_openstack/task/context.py
```python
import functools
from rally.task import context
configure = functools.partial(context.configure, platform="openstack")
class OpenStackContext(context.Context):
"""A base class for all OpenStack context classes."""
def _iterate_per_tenants(self, users=None):
"""Iterate of a single arbitrary user from each tenant
:type users: list of users
:return: iterator of a single user from each tenant
"""
if users is None:
users = self.context.get("users", [])
processed_tenants = set()
for user in users:
if user["tenant_id"] not in processed_tenants:
processed_tenants.add(user["tenant_id"])
yield user, user["tenant_id"]
```
#### File: contexts/network/allow_ssh.py
```python
from rally.common import logging
from rally.common import validation
from rally_openstack.common import osclients
from rally_openstack.common.wrappers import network
from rally_openstack.task import context
LOG = logging.getLogger(__name__)
# This method is simplified version to what neutron has
def _rule_to_key(rule):
def _normalize_rule_value(key, value):
# This string is used as a placeholder for str(None), but shorter.
none_char = "+"
default = {
"port_range_min": "1",
"port_range_max": "65535"
}
if key == "remote_ip_prefix":
all_address = ["0.0.0.0/0", "::/0", None]
if value in all_address:
return none_char
elif value is None:
return default.get(key, none_char)
return str(value)
# NOTE(andreykurilin): there are more actual comparison keys, but this set
# should be enough for us.
comparison_keys = [
"direction",
"port_range_max",
"port_range_min",
"protocol",
"remote_ip_prefix",
"security_group_id"
]
return "_".join([_normalize_rule_value(x, rule.get(x))
for x in comparison_keys])
def _prepare_open_secgroup(credential, secgroup_name):
"""Generate secgroup allowing all tcp/udp/icmp access.
In order to run tests on instances it is necessary to have SSH access.
This function generates a secgroup which allows all tcp/udp/icmp access.
:param credential: clients credential
:param secgroup_name: security group name
:returns: dict with security group details
"""
neutron = osclients.Clients(credential).neutron()
security_groups = neutron.list_security_groups()["security_groups"]
rally_open = [sg for sg in security_groups if sg["name"] == secgroup_name]
if not rally_open:
descr = "Allow ssh access to VMs created by Rally"
rally_open = neutron.create_security_group(
{"security_group": {"name": secgroup_name,
"description": descr}})["security_group"]
else:
rally_open = rally_open[0]
rules_to_add = [
{
"protocol": "tcp",
"port_range_max": 65535,
"port_range_min": 1,
"remote_ip_prefix": "0.0.0.0/0",
"direction": "ingress",
"security_group_id": rally_open["id"]
},
{
"protocol": "udp",
"port_range_max": 65535,
"port_range_min": 1,
"remote_ip_prefix": "0.0.0.0/0",
"direction": "ingress",
"security_group_id": rally_open["id"]
},
{
"protocol": "icmp",
"remote_ip_prefix": "0.0.0.0/0",
"direction": "ingress",
"security_group_id": rally_open["id"]
}
]
existing_rules = set(
_rule_to_key(r) for r in rally_open.get("security_group_rules", []))
for new_rule in rules_to_add:
if _rule_to_key(new_rule) not in existing_rules:
neutron.create_security_group_rule(
{"security_group_rule": new_rule})
return rally_open
@validation.add("required_platform", platform="openstack", users=True)
@context.configure(name="allow_ssh", platform="openstack", order=320)
class AllowSSH(context.OpenStackContext):
"""Sets up security groups for all users to access VM via SSH."""
def setup(self):
admin_or_user = (self.context.get("admin")
or self.context.get("users")[0])
net_wrapper = network.wrap(
osclients.Clients(admin_or_user["credential"]),
self, config=self.config)
use_sg, msg = net_wrapper.supports_extension("security-group")
if not use_sg:
LOG.info("Security group context is disabled: %s" % msg)
return
secgroup_name = self.generate_random_name()
for user in self.context["users"]:
user["secgroup"] = _prepare_open_secgroup(user["credential"],
secgroup_name)
def cleanup(self):
for user, tenant_id in self._iterate_per_tenants():
with logging.ExceptionLogger(
LOG,
"Unable to delete security group: %s."
% user["secgroup"]["name"]):
clients = osclients.Clients(user["credential"])
clients.neutron().delete_security_group(user["secgroup"]["id"])
```
#### File: scenarios/gnocchi/archive_policy_rule.py
```python
from rally.task import validation
from rally_openstack.common import consts
from rally_openstack.task import scenario
from rally_openstack.task.scenarios.gnocchi import utils as gnocchiutils
"""Scenarios for Gnocchi archive policy rule."""
@validation.add("required_services", services=[consts.Service.GNOCCHI])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(name="GnocchiArchivePolicyRule.list_archive_policy_rule")
class ListArchivePolicyRule(gnocchiutils.GnocchiBase):
def run(self):
"""List archive policy rules."""
self.gnocchi.list_archive_policy_rule()
@validation.add("required_services", services=[consts.Service.GNOCCHI])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(
context={"admin_cleanup@openstack": ["gnocchi.archive_policy_rule"]},
name="GnocchiArchivePolicyRule.create_archive_policy_rule")
class CreateArchivePolicyRule(gnocchiutils.GnocchiBase):
def run(self, metric_pattern="cpu_*", archive_policy_name="low"):
"""Create archive policy rule.
:param metric_pattern: Pattern for matching metrics
:param archive_policy_name: Archive policy name
"""
name = self.generate_random_name()
self.admin_gnocchi.create_archive_policy_rule(
name,
metric_pattern=metric_pattern,
archive_policy_name=archive_policy_name)
@validation.add("required_services", services=[consts.Service.GNOCCHI])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(
context={"admin_cleanup@openstack": ["gnocchi.archive_policy_rule"]},
name="GnocchiArchivePolicyRule.create_delete_archive_policy_rule")
class CreateDeleteArchivePolicyRule(gnocchiutils.GnocchiBase):
def run(self, metric_pattern="cpu_*", archive_policy_name="low"):
"""Create archive policy rule and then delete it.
:param metric_pattern: Pattern for matching metrics
:param archive_policy_name: Archive policy name
"""
name = self.generate_random_name()
self.admin_gnocchi.create_archive_policy_rule(
name,
metric_pattern=metric_pattern,
archive_policy_name=archive_policy_name)
self.admin_gnocchi.delete_archive_policy_rule(name)
```
#### File: scenarios/magnum/utils.py
```python
import os
import random
import string
import time
from kubernetes import client as k8s_config
from kubernetes.client.api import core_v1_api
from kubernetes.client import api_client
from kubernetes.client.rest import ApiException
from rally.common import cfg
from rally.common import utils as common_utils
from rally import exceptions
from rally.task import atomic
from rally.task import utils
from rally_openstack.task import scenario
CONF = cfg.CONF
class MagnumScenario(scenario.OpenStackScenario):
"""Base class for Magnum scenarios with basic atomic actions."""
@atomic.action_timer("magnum.list_cluster_templates")
def _list_cluster_templates(self, **kwargs):
"""Return list of cluster_templates.
:param limit: (Optional) The maximum number of results to return
per request, if:
1) limit > 0, the maximum number of cluster_templates to return.
2) limit param is NOT specified (None), the number of items
returned respect the maximum imposed by the Magnum API
(see Magnum's api.max_limit option).
:param kwargs: Optional additional arguments for cluster_templates
listing
:returns: cluster_templates list
"""
return self.clients("magnum").cluster_templates.list(**kwargs)
@atomic.action_timer("magnum.create_cluster_template")
def _create_cluster_template(self, **kwargs):
"""Create a cluster_template
:param kwargs: optional additional arguments for cluster_template
creation
:returns: magnum cluster_template
"""
kwargs["name"] = self.generate_random_name()
return self.clients("magnum").cluster_templates.create(**kwargs)
@atomic.action_timer("magnum.get_cluster_template")
def _get_cluster_template(self, cluster_template):
"""Return details of the specify cluster template.
:param cluster_template: ID or name of the cluster template to show
:returns: clustertemplate detail
"""
return self.clients("magnum").cluster_templates.get(cluster_template)
@atomic.action_timer("magnum.list_clusters")
def _list_clusters(self, limit=None, **kwargs):
"""Return list of clusters.
:param limit: Optional, the maximum number of results to return
per request, if:
1) limit > 0, the maximum number of clusters to return.
2) limit param is NOT specified (None), the number of items
returned respect the maximum imposed by the Magnum API
(see Magnum's api.max_limit option).
:param kwargs: Optional additional arguments for clusters listing
:returns: clusters list
"""
return self.clients("magnum").clusters.list(limit=limit, **kwargs)
@atomic.action_timer("magnum.create_cluster")
def _create_cluster(self, cluster_template, node_count, **kwargs):
"""Create a cluster
:param cluster_template: cluster_template for the cluster
:param node_count: the cluster node count
:param kwargs: optional additional arguments for cluster creation
:returns: magnum cluster
"""
name = self.generate_random_name()
cluster = self.clients("magnum").clusters.create(
name=name, cluster_template_id=cluster_template,
node_count=node_count, **kwargs)
common_utils.interruptable_sleep(
CONF.openstack.magnum_cluster_create_prepoll_delay)
cluster = utils.wait_for_status(
cluster,
ready_statuses=["CREATE_COMPLETE"],
failure_statuses=["CREATE_FAILED", "ERROR"],
update_resource=utils.get_from_manager(),
timeout=CONF.openstack.magnum_cluster_create_timeout,
check_interval=CONF.openstack.magnum_cluster_create_poll_interval,
id_attr="uuid"
)
return cluster
@atomic.action_timer("magnum.get_cluster")
def _get_cluster(self, cluster):
"""Return details of the specify cluster.
:param cluster: ID or name of the cluster to show
:returns: cluster detail
"""
return self.clients("magnum").clusters.get(cluster)
@atomic.action_timer("magnum.get_ca_certificate")
def _get_ca_certificate(self, cluster_uuid):
"""Get CA certificate for this cluster
:param cluster_uuid: uuid of the cluster
"""
return self.clients("magnum").certificates.get(cluster_uuid)
@atomic.action_timer("magnum.create_ca_certificate")
def _create_ca_certificate(self, csr_req):
"""Send csr to Magnum to have it signed
:param csr_req: {"cluster_uuid": <uuid>, "csr": <csr file content>}
"""
return self.clients("magnum").certificates.create(**csr_req)
def _get_k8s_api_client(self):
cluster_uuid = self.context["tenant"]["cluster"]
cluster = self._get_cluster(cluster_uuid)
cluster_template = self._get_cluster_template(
cluster.cluster_template_id)
key_file = None
cert_file = None
ca_certs = None
if not cluster_template.tls_disabled:
dir = self.context["ca_certs_directory"]
key_file = cluster_uuid + ".key"
key_file = os.path.join(dir, key_file)
cert_file = cluster_uuid + ".crt"
cert_file = os.path.join(dir, cert_file)
ca_certs = cluster_uuid + "_ca.crt"
ca_certs = os.path.join(dir, ca_certs)
if hasattr(k8s_config, "ConfigurationObject"):
# k8sclient < 4.0.0
config = k8s_config.ConfigurationObject()
else:
config = k8s_config.Configuration()
config.host = cluster.api_address
config.ssl_ca_cert = ca_certs
config.cert_file = cert_file
config.key_file = key_file
if hasattr(k8s_config, "ConfigurationObject"):
# k8sclient < 4.0.0
client = api_client.ApiClient(config=config)
else:
client = api_client.ApiClient(config)
return core_v1_api.CoreV1Api(client)
@atomic.action_timer("magnum.k8s_list_v1pods")
def _list_v1pods(self):
"""List all pods.
"""
k8s_api = self._get_k8s_api_client()
return k8s_api.list_node(namespace="default")
@atomic.action_timer("magnum.k8s_create_v1pod")
def _create_v1pod(self, manifest):
"""Create a pod on the specify cluster.
:param manifest: manifest use to create the pod
"""
k8s_api = self._get_k8s_api_client()
podname = manifest["metadata"]["name"] + "-"
for i in range(5):
podname = podname + random.choice(string.ascii_lowercase)
manifest["metadata"]["name"] = podname
for i in range(150):
try:
k8s_api.create_namespaced_pod(body=manifest,
namespace="default")
break
except ApiException as e:
if e.status != 403:
raise
time.sleep(2)
start = time.time()
while True:
resp = k8s_api.read_namespaced_pod(
name=podname, namespace="default")
if resp.status.conditions:
for condition in resp.status.conditions:
if condition.type.lower() == "ready" and \
condition.status.lower() == "true":
return resp
if (time.time() - start > CONF.openstack.k8s_pod_create_timeout):
raise exceptions.TimeoutException(
desired_status="Ready",
resource_name=podname,
resource_type="Pod",
resource_id=resp.metadata.uid,
resource_status=resp.status,
timeout=CONF.openstack.k8s_pod_create_timeout)
common_utils.interruptable_sleep(
CONF.openstack.k8s_pod_create_poll_interval)
@atomic.action_timer("magnum.k8s_list_v1rcs")
def _list_v1rcs(self):
"""List all rcs.
"""
k8s_api = self._get_k8s_api_client()
return k8s_api.list_namespaced_replication_controller(
namespace="default")
@atomic.action_timer("magnum.k8s_create_v1rc")
def _create_v1rc(self, manifest):
"""Create rc on the specify cluster.
:param manifest: manifest use to create the replication controller
"""
k8s_api = self._get_k8s_api_client()
suffix = "-"
for i in range(5):
suffix = suffix + random.choice(string.ascii_lowercase)
rcname = manifest["metadata"]["name"] + suffix
manifest["metadata"]["name"] = rcname
resp = k8s_api.create_namespaced_replication_controller(
body=manifest,
namespace="default")
expectd_status = resp.spec.replicas
start = time.time()
while True:
resp = k8s_api.read_namespaced_replication_controller(
name=rcname,
namespace="default")
status = resp.status.replicas
if status == expectd_status:
return resp
else:
if time.time() - start > CONF.openstack.k8s_rc_create_timeout:
raise exceptions.TimeoutException(
desired_status=expectd_status,
resource_name=rcname,
resource_type="ReplicationController",
resource_id=resp.metadata.uid,
resource_status=status,
timeout=CONF.openstack.k8s_rc_create_timeout)
common_utils.interruptable_sleep(
CONF.openstack.k8s_rc_create_poll_interval)
```
#### File: scenarios/nova/server_groups.py
```python
from rally.common import logging
from rally.task import validation
from rally_openstack.common import consts
from rally_openstack.task import scenario
from rally_openstack.task.scenarios.nova import utils
LOG = logging.getLogger(__name__)
"""Scenarios for Nova Group servers."""
@validation.add("required_services", services=[consts.Service.NOVA])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["nova"]},
name="NovaServerGroups.create_and_list_server_groups",
platform="openstack")
class CreateAndListServerGroups(utils.NovaScenario):
def run(self, policies=None, all_projects=False, kwargs=None):
"""Create a server group, then list all server groups.
Measure the "nova server-group-create" and "nova server-group-list"
command performance.
:param policies: Server group policy
:param all_projects: If True, display server groups from all
projects(Admin only)
:param kwargs: The server group specifications to add.
DEPRECATED, specify arguments explicitly.
"""
if kwargs is None:
kwargs = {
"policies": policies
}
else:
LOG.warning("The argument `kwargs` is deprecated since"
" Rally 0.10.0. Specify all arguments from it"
" explicitly.")
server_group = self._create_server_group(**kwargs)
msg = ("Server Groups isn't created")
self.assertTrue(server_group, err_msg=msg)
server_groups_list = self._list_server_groups(all_projects)
msg = ("Server Group not included into list of server groups\n"
"Created server group: {}\n"
"list of server groups: {}").format(server_group,
server_groups_list)
self.assertIn(server_group, server_groups_list, err_msg=msg)
@validation.add("required_services", services=[consts.Service.NOVA])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["nova"]},
name="NovaServerGroups.create_and_get_server_group",
platform="openstack")
class CreateAndGetServerGroup(utils.NovaScenario):
def run(self, policies=None, kwargs=None):
"""Create a server group, then get its detailed information.
Measure the "nova server-group-create" and "nova server-group-get"
command performance.
:param policies: Server group policy
:param kwargs: The server group specifications to add.
DEPRECATED, specify arguments explicitly.
"""
if kwargs is None:
kwargs = {
"policies": policies
}
else:
LOG.warning("The argument `kwargs` is deprecated since"
" Rally 0.10.0. Specify all arguments from it"
" explicitly.")
server_group = self._create_server_group(**kwargs)
msg = ("Server Groups isn't created")
self.assertTrue(server_group, err_msg=msg)
server_group_info = self._get_server_group(server_group.id)
self.assertEqual(server_group.id, server_group_info.id)
@validation.add("required_services", services=[consts.Service.NOVA])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["nova"]},
name="NovaServerGroups.create_and_delete_server_group",
platform="openstack")
class CreateAndDeleteServerGroup(utils.NovaScenario):
def run(self, policies=None, kwargs=None):
"""Create a server group, then delete it.
Measure the "nova server-group-create" and "nova server-group-delete"
command performance.
:param policies: Server group policy
:param kwargs: The server group specifications to add.
DEPRECATED, specify arguments explicitly.
"""
if kwargs is None:
kwargs = {
"policies": policies
}
else:
LOG.warning("The argument `kwargs` is deprecated since"
" Rally 0.10.0. Specify all arguments from it"
" explicitly.")
server_group = self._create_server_group(**kwargs)
msg = ("Server Group isn't created")
self.assertTrue(server_group, err_msg=msg)
self._delete_server_group(server_group.id)
```
#### File: prepare-for-rally-task/library/make_env_spec_with_existing_users.py
```python
import copy
import json
import uuid
from ansible.module_utils.basic import AnsibleModule
from rally import api
from rally.env import env_mgr
from rally import plugins
from rally_openstack.common import consts
from rally_openstack.common import credential
def fetch_parent_env_and_admin_creds(env_name):
"""Fetch parent environment spec and openstack admin creds from it."""
env_data = env_mgr.EnvManager.get(env_name).data
openstack_platform = env_data["platforms"]["openstack"]
admin_creds = credential.OpenStackCredential(
permission=consts.EndpointPermission.ADMIN,
**openstack_platform["platform_data"]["admin"])
return env_data["spec"], admin_creds
def create_projects_and_users(admin_creds, projects_count, users_per_project):
"""Create new projects and users via 'users@openstack' context.
:param admin_creds: admin credentials to use for creating new entities
:param projects_count: The number of keystone projects to create.
:param users_per_project: The number of keystone users to create per one
keystone project.
"""
# it should be imported after calling rally.api.API that setups oslo_config
from rally_openstack.task.contexts.keystone import users as users_ctx
ctx = {
"env": {
"platforms": {
"openstack": {
"admin": admin_creds.to_dict(),
"users": []
}
}
},
"task": {
"uuid": str(uuid.uuid4())
},
"config": {
"users@openstack": {
"tenants": projects_count,
"users_per_tenant": users_per_project
}
}
}
users_ctx.UserGenerator(ctx).setup()
users = []
for user in ctx["users"]:
users.append({
"username": user["credential"]["username"],
"password": user["credential"]["password"],
"project_name": user["credential"]["tenant_name"]
})
for optional in ("domain_name",
"user_domain_name",
"project_domain_name"):
if user["credential"][optional]:
users[-1][optional] = user["credential"][optional]
return users
def store_a_new_spec(original_spec, users, path_for_new_spec):
new_spec = copy.deepcopy(original_spec)
del new_spec["existing@openstack"]["admin"]
new_spec["existing@openstack"]["users"] = users
with open(path_for_new_spec, "w") as f:
f.write(json.dumps(new_spec, indent=4))
@plugins.ensure_plugins_are_loaded
def ansible_main():
module = AnsibleModule(argument_spec=dict(
projects_count=dict(
type="int",
default=1,
required=False
),
users_per_project=dict(
type="int",
default=1,
required=False
),
parent_env_name=dict(
type="str",
required=True
),
path_for_new_spec=dict(
type="str",
required=True
)
))
# init Rally API as it makes all work for logging and config initialization
api.API()
original_spec, admin_creds = fetch_parent_env_and_admin_creds(
module.params["parent_env_name"]
)
users = create_projects_and_users(
admin_creds,
projects_count=module.params["projects_count"],
users_per_project=module.params["users_per_project"]
)
store_a_new_spec(original_spec, users, module.params["path_for_new_spec"])
module.exit_json(changed=True)
if __name__ == "__main__":
ansible_main()
```
#### File: services/barbican/test_secrets.py
```python
from unittest import mock
from rally_openstack.common.services.key_manager import barbican
from tests.unit import test
class BarbicanServiceTestCase(test.TestCase):
def setUp(self):
super(BarbicanServiceTestCase, self).setUp()
self.clients = mock.MagicMock()
self.name_generator = mock.MagicMock()
self.service = barbican.BarbicanService(
self.clients,
name_generator=self.name_generator)
def atomic_actions(self):
return self.service._atomic_actions
def test__list_secrets(self):
self.assertEqual(
self.service.list_secrets(),
self.service._clients.barbican().secrets.list.return_value
)
self._test_atomic_action_timer(self.atomic_actions(),
"barbican.list_secrets")
def test__create_secret(self):
self.assertEqual(
self.service.create_secret(),
self.service._clients.barbican().secrets.create(
name="fake_secret", payload="rally_data")
)
self._test_atomic_action_timer(self.atomic_actions(),
"barbican.create_secret")
def test__get_secret(self):
self.service.get_secret("fake_secret")
self.service._clients.barbican().secrets.get \
.assert_called_once_with("fake_secret")
self._test_atomic_action_timer(self.atomic_actions(),
"barbican.get_secret")
def test__delete_secret(self):
self.service.delete_secret("fake_secret")
self.service._clients.barbican().secrets.delete \
.assert_called_once_with("fake_secret")
self._test_atomic_action_timer(self.atomic_actions(),
"barbican.delete_secret")
def test__list_containers(self):
self.assertEqual(
self.service.list_container(),
self.service._clients.barbican().containers.list.return_value)
self._test_atomic_action_timer(
self.atomic_actions(), "barbican.list_container")
def test__container_delete(self):
self.service.container_delete("fake_container")
self.service._clients.barbican().containers.delete \
.assert_called_once_with("fake_container")
self._test_atomic_action_timer(
self.atomic_actions(), "barbican.container_delete")
def test__container_create(self):
self.service.generate_random_name = mock.MagicMock(
return_value="container")
self.service.container_create()
self.service._clients.barbican().containers.create \
.assert_called_once_with(name="container", secrets=None)
def test__create_rsa_container(self):
self.service.generate_random_name = mock.MagicMock(
return_value="container")
self.service.create_rsa_container()
self.service._clients.barbican().containers.create_rsa \
.assert_called_once_with(
name="container", private_key=None,
private_key_passphrase=None, public_key=None)
def test__create_generate_container(self):
self.service.generate_random_name = mock.MagicMock(
return_value="container")
self.service.create_certificate_container()
self.service._clients.barbican().containers \
.create_certificate.assert_called_once_with(
certificate=None, intermediates=None,
name="container", private_key=None,
private_key_passphrase=None)
def test__list_orders(self):
self.assertEqual(
self.service.orders_list(),
self.service._clients.barbican().orders.list.return_value)
self._test_atomic_action_timer(
self.atomic_actions(), "barbican.orders_list")
def test__orders_get(self):
self.service.orders_get("fake_order")
self.service._clients.barbican().orders.get \
.assert_called_once_with("fake_order")
def test__orders_delete(self):
self.service.orders_delete("fake_order")
self.service._clients.barbican().orders.delete \
.assert_called_once_with("fake_order")
self._test_atomic_action_timer(
self.atomic_actions(), "barbican.orders_delete")
def test__create_key(self):
self.service.generate_random_name = mock.MagicMock(
return_value="key")
self.service.create_key()
self.service._clients.barbican().orders.create_key \
.assert_called_once_with(
name="key", algorithm="aes", bit_length=256, mode=None,
payload_content_type=None, expiration=None)
self._test_atomic_action_timer(
self.atomic_actions(), "barbican.create_key")
def test__create_asymmetric(self):
self.service.generate_random_name = mock.MagicMock(
return_value="key")
self.service.create_asymmetric()
self.service._clients.barbican().orders.create_asymmetric \
.assert_called_once_with(
algorithm="aes", bit_length=256, expiration=None, name="key",
pass_phrase=None, payload_content_type=None)
self._test_atomic_action_timer(
self.atomic_actions(), "barbican.create_asymmetric")
def test_create_certificate(self):
self.service.generate_random_name = mock.MagicMock(
return_value="key")
self.service.create_certificate()
self.service._clients.barbican().orders.create_certificate \
.assert_called_once_with(
name="key", request_type=None, subject_dn=None,
source_container_ref=None, ca_id=None, profile=None,
request_data=None)
self._test_atomic_action_timer(
self.atomic_actions(), "barbican.create_certificate")
```
#### File: services/gnocchi/test_metric.py
```python
from unittest import mock
from rally_openstack.common.services.gnocchi import metric
from tests.unit import test
class GnocchiServiceTestCase(test.TestCase):
def setUp(self):
super(GnocchiServiceTestCase, self).setUp()
self.clients = mock.MagicMock()
self.name_generator = mock.MagicMock()
self.service = metric.GnocchiService(
self.clients,
name_generator=self.name_generator)
def atomic_actions(self):
return self.service._atomic_actions
def test__create_archive_policy(self):
definition = [{"granularity": "0:00:01", "timespan": "1:00:00"}]
aggregation_methods = [
"std", "count", "95pct", "min", "max", "sum", "median", "mean"]
archive_policy = {"name": "fake_name"}
archive_policy["definition"] = definition
archive_policy["aggregation_methods"] = aggregation_methods
self.assertEqual(
self.service.create_archive_policy(
name="fake_name",
definition=definition,
aggregation_methods=aggregation_methods),
self.service._clients.gnocchi().archive_policy.create(
archive_policy)
)
self._test_atomic_action_timer(self.atomic_actions(),
"gnocchi.create_archive_policy")
def test__delete_archive_policy(self):
self.service.delete_archive_policy("fake_name")
self.service._clients.gnocchi().archive_policy.delete \
.assert_called_once_with("fake_name")
self._test_atomic_action_timer(self.atomic_actions(),
"gnocchi.delete_archive_policy")
def test__list_archive_policy(self):
self.assertEqual(
self.service.list_archive_policy(),
self.service._clients.gnocchi().archive_policy.list.return_value
)
self._test_atomic_action_timer(self.atomic_actions(),
"gnocchi.list_archive_policy")
def test__create_archive_policy_rule(self):
archive_policy_rule = {"name": "fake_name"}
archive_policy_rule["metric_pattern"] = "cpu_*"
archive_policy_rule["archive_policy_name"] = "low"
self.assertEqual(
self.service.create_archive_policy_rule(
name="fake_name",
metric_pattern="cpu_*",
archive_policy_name="low"),
self.service._clients.gnocchi().archive_policy_rule.create(
archive_policy_rule)
)
self._test_atomic_action_timer(self.atomic_actions(),
"gnocchi.create_archive_policy_rule")
def test__delete_archive_policy_rule(self):
self.service.delete_archive_policy_rule("fake_name")
self.service._clients.gnocchi().archive_policy_rule \
.delete.assert_called_once_with("fake_name")
self._test_atomic_action_timer(self.atomic_actions(),
"gnocchi.delete_archive_policy_rule")
def test__list_archive_policy_rule(self):
self.assertEqual(
self.service.list_archive_policy_rule(),
self.service._clients.gnocchi().archive_policy_rule.list
.return_value
)
self._test_atomic_action_timer(self.atomic_actions(),
"gnocchi.list_archive_policy_rule")
def test__list_capabilities(self):
self.assertEqual(
self.service.list_capabilities(),
self.service._clients.gnocchi().capabilities.list.return_value
)
self._test_atomic_action_timer(self.atomic_actions(),
"gnocchi.list_capabilities")
def test__get_measures_aggregation(self):
self.assertEqual(
self.service.get_measures_aggregation(
metrics=[1],
aggregation="mean",
refresh=False),
self.service._clients.gnocchi().metric.aggregation(
[1], "mean", False)
)
self._test_atomic_action_timer(self.atomic_actions(),
"gnocchi.get_measures_aggregation")
def test__get_measures(self):
self.assertEqual(
self.service.get_measures(
metric=1,
aggregation="mean",
refresh=False),
self.service._clients.gnocchi().metric.get_measures(
1, "mean", False)
)
self._test_atomic_action_timer(self.atomic_actions(),
"gnocchi.get_measures")
def test__create_metric(self):
param = {"name": "fake_name"}
param["archive_policy_name"] = "fake_archive_policy"
param["unit"] = "fake_unit"
param["resource_id"] = "fake_resource_id"
self.assertEqual(
self.service.create_metric(
name="fake_name",
archive_policy_name="fake_archive_policy",
unit="fake_unit",
resource_id="fake_resource_id"),
self.service._clients.gnocchi().metric.create(param)
)
self._test_atomic_action_timer(self.atomic_actions(),
"gnocchi.create_metric")
def test__delete_metric(self):
self.service.delete_metric("fake_metric_id")
self.service._clients.gnocchi().metric.delete.assert_called_once_with(
"fake_metric_id")
self._test_atomic_action_timer(self.atomic_actions(),
"gnocchi.delete_metric")
def test__list_metric(self):
self.service.list_metric(limit=0)
self.assertEqual(
1, self.service._clients.gnocchi().metric.list.call_count)
self._test_atomic_action_timer(self.atomic_actions(),
"gnocchi.list_metric")
def test__create_resource(self):
resource = {"id": "11111"}
self.assertEqual(
self.service.create_resource("fake_type"),
self.service._clients.gnocchi().resource.create(
"fake_type", resource)
)
self._test_atomic_action_timer(self.atomic_actions(),
"gnocchi.create_resource")
def test__delete_resource(self):
self.service.delete_resource("fake_resource_id")
self.service._clients.gnocchi().resource.delete \
.assert_called_once_with("fake_resource_id")
self._test_atomic_action_timer(self.atomic_actions(),
"gnocchi.delete_resource")
def test__list_resource(self):
self.assertEqual(
self.service.list_resource(),
self.service._clients.gnocchi().resource.list.return_value
)
self._test_atomic_action_timer(self.atomic_actions(),
"gnocchi.list_resource")
def test__create_resource_type(self):
resource_type = {"name": "fake_name"}
self.assertEqual(
self.service.create_resource_type("fake_name"),
self.service._clients.gnocchi().resource_type.create(resource_type)
)
self._test_atomic_action_timer(self.atomic_actions(),
"gnocchi.create_resource_type")
def test__delete_resource_type(self):
self.service.delete_resource_type("fake_resource_name")
self.service._clients.gnocchi().resource_type.delete \
.assert_called_once_with("fake_resource_name")
self._test_atomic_action_timer(self.atomic_actions(),
"gnocchi.delete_resource_type")
def test__list_resource_type(self):
self.assertEqual(
self.service.list_resource_type(),
self.service._clients.gnocchi().resource_type.list.return_value
)
self._test_atomic_action_timer(self.atomic_actions(),
"gnocchi.list_resource_type")
def test__get_status(self,):
self.assertEqual(
self.service.get_status(),
self.service._clients.gnocchi().status.get.return_value
)
self._test_atomic_action_timer(self.atomic_actions(),
"gnocchi.get_status")
```
#### File: services/network/test_net_utils.py
```python
from unittest import mock
from rally_openstack.common.services.network import net_utils
from tests.unit import test
PATH = "rally_openstack.common.services.network.net_utils"
class FunctionsTestCase(test.TestCase):
def test_generate_cidr(self):
with mock.patch("%s._IPv4_CIDR_INCR" % PATH, iter(range(1, 4))):
self.assertEqual((4, "10.2.1.0/24"), net_utils.generate_cidr())
self.assertEqual((4, "10.2.2.0/24"), net_utils.generate_cidr())
self.assertEqual((4, "10.2.3.0/24"), net_utils.generate_cidr())
with mock.patch("%s._IPv4_CIDR_INCR" % PATH, iter(range(1, 4))):
start_cidr = "1.1.0.0/26"
self.assertEqual(
(4, "1.1.0.64/26"),
net_utils.generate_cidr(start_cidr=start_cidr))
self.assertEqual(
(4, "1.1.0.128/26"),
net_utils.generate_cidr(start_cidr=start_cidr))
self.assertEqual(
(4, "192.168.127.12/26"),
net_utils.generate_cidr(start_cidr=start_cidr))
```
#### File: common/wrappers/test_network.py
```python
from unittest import mock
import ddt
from neutronclient.common import exceptions as neutron_exceptions
from rally.common import utils
from rally_openstack.common import consts
from rally_openstack.common.wrappers import network
from tests.unit import test
SVC = "rally_openstack.common.wrappers.network."
class Owner(utils.RandomNameGeneratorMixin):
task = {"uuid": "task-uuid"}
@ddt.ddt
class NeutronWrapperTestCase(test.TestCase):
def setUp(self):
super(NeutronWrapperTestCase, self).setUp()
self.owner = Owner()
self.owner.generate_random_name = mock.Mock()
self.wrapper = network.NeutronWrapper(mock.MagicMock(),
self.owner,
config={})
self._nc = self.wrapper.neutron.client
def test_SUBNET_IP_VERSION(self):
self.assertEqual(4, network.NeutronWrapper.SUBNET_IP_VERSION)
@mock.patch(
"rally_openstack.common.services.network.net_utils.generate_cidr")
def test__generate_cidr(self, mock_generate_cidr):
cidrs = iter(range(5))
def fake_gen_cidr(ip_version=None, start_cidr=None):
return 4, 3 + next(cidrs)
mock_generate_cidr.side_effect = fake_gen_cidr
self.assertEqual(3, self.wrapper._generate_cidr())
self.assertEqual(4, self.wrapper._generate_cidr())
self.assertEqual(5, self.wrapper._generate_cidr())
self.assertEqual(6, self.wrapper._generate_cidr())
self.assertEqual(7, self.wrapper._generate_cidr())
self.assertEqual([mock.call(start_cidr=self.wrapper.start_cidr)] * 5,
mock_generate_cidr.call_args_list)
def test_external_networks(self):
self._nc.list_networks.return_value = {"networks": "foo_networks"}
self.assertEqual("foo_networks", self.wrapper.external_networks)
self._nc.list_networks.assert_called_once_with(
**{"router:external": True})
def test_get_network(self):
neutron_net = {"id": "foo_id",
"name": "foo_name",
"tenant_id": "foo_tenant",
"status": "foo_status",
"router:external": "foo_external",
"subnets": "foo_subnets"}
expected_net = {"id": "foo_id",
"name": "foo_name",
"tenant_id": "foo_tenant",
"status": "foo_status",
"external": "foo_external",
"router_id": None,
"subnets": "foo_subnets"}
self._nc.show_network.return_value = {"network": neutron_net}
net = self.wrapper.get_network(net_id="foo_id")
self.assertEqual(expected_net, net)
self._nc.show_network.assert_called_once_with("foo_id")
self._nc.show_network.side_effect = (
neutron_exceptions.NeutronClientException)
self.assertRaises(network.NetworkWrapperException,
self.wrapper.get_network,
net_id="foo_id")
self._nc.list_networks.return_value = {"networks": [neutron_net]}
net = self.wrapper.get_network(name="foo_name")
self.assertEqual(expected_net, net)
self._nc.list_networks.assert_called_once_with(name="foo_name")
self._nc.list_networks.return_value = {"networks": []}
self.assertRaises(network.NetworkWrapperException,
self.wrapper.get_network,
name="foo_name")
def test_create_v1_pool(self):
subnet = "subnet_id"
tenant = "foo_tenant"
expected_pool = {"pool": {
"id": "pool_id",
"name": self.owner.generate_random_name.return_value,
"subnet_id": subnet,
"tenant_id": tenant}}
self.wrapper.client.create_pool.return_value = expected_pool
resultant_pool = self.wrapper.create_v1_pool(tenant, subnet)
self.wrapper.client.create_pool.assert_called_once_with({
"pool": {"lb_method": "ROUND_ROBIN",
"subnet_id": subnet,
"tenant_id": tenant,
"protocol": "HTTP",
"name": self.owner.generate_random_name.return_value}})
self.assertEqual(expected_pool, resultant_pool)
def test_create_network(self):
self._nc.create_network.return_value = {
"network": {"id": "foo_id",
"name": self.owner.generate_random_name.return_value,
"status": "foo_status"}}
net = self.wrapper.create_network("foo_tenant")
self._nc.create_network.assert_called_once_with({
"network": {"tenant_id": "foo_tenant",
"name": self.owner.generate_random_name.return_value}})
self.assertEqual({"id": "foo_id",
"name": self.owner.generate_random_name.return_value,
"status": "foo_status",
"external": False,
"tenant_id": "foo_tenant",
"router_id": None,
"subnets": []}, net)
def test_create_network_with_subnets(self):
subnets_num = 4
subnets_ids = iter(range(subnets_num))
self._nc.create_subnet.side_effect = lambda i: {
"subnet": {"id": "subnet-%d" % next(subnets_ids)}}
self._nc.create_network.return_value = {
"network": {"id": "foo_id",
"name": self.owner.generate_random_name.return_value,
"status": "foo_status"}}
net = self.wrapper.create_network("foo_tenant",
subnets_num=subnets_num)
self._nc.create_network.assert_called_once_with({
"network": {"tenant_id": "foo_tenant",
"name": self.owner.generate_random_name.return_value}})
self.assertEqual({"id": "foo_id",
"name": self.owner.generate_random_name.return_value,
"status": "foo_status",
"external": False,
"router_id": None,
"tenant_id": "foo_tenant",
"subnets": ["subnet-%d" % i
for i in range(subnets_num)]}, net)
self.assertEqual(
[mock.call({"subnet":
{"name": self.owner.generate_random_name.return_value,
"network_id": "foo_id",
"tenant_id": "foo_tenant",
"ip_version": self.wrapper.SUBNET_IP_VERSION,
"dns_nameservers": ["8.8.8.8", "8.8.4.4"],
"cidr": mock.ANY}})
for i in range(subnets_num)],
self.wrapper.client.create_subnet.call_args_list
)
def test_create_network_with_router(self):
self._nc.create_router.return_value = {"router": {"id": "foo_router"}}
self._nc.create_network.return_value = {
"network": {"id": "foo_id",
"name": self.owner.generate_random_name.return_value,
"status": "foo_status"}}
net = self.wrapper.create_network("foo_tenant", add_router=True)
self.assertEqual({"id": "foo_id",
"name": self.owner.generate_random_name.return_value,
"status": "foo_status",
"external": False,
"tenant_id": "foo_tenant",
"router_id": "foo_router",
"subnets": []}, net)
self._nc.create_router.assert_called_once_with({
"router": {
"name": self.owner.generate_random_name(),
"tenant_id": "foo_tenant"
}
})
def test_create_network_with_router_and_subnets(self):
subnets_num = 4
self.wrapper._generate_cidr = mock.Mock(return_value="foo_cidr")
self._nc.create_router.return_value = {"router": {"id": "foo_router"}}
self._nc.create_subnet.return_value = {"subnet": {"id": "foo_subnet"}}
self._nc.create_network.return_value = {
"network": {"id": "foo_id",
"name": self.owner.generate_random_name.return_value,
"status": "foo_status"}}
net = self.wrapper.create_network(
"foo_tenant", add_router=True, subnets_num=subnets_num,
dns_nameservers=["foo_nameservers"])
self.assertEqual({"id": "foo_id",
"name": self.owner.generate_random_name.return_value,
"status": "foo_status",
"external": False,
"tenant_id": "foo_tenant",
"router_id": "foo_router",
"subnets": ["foo_subnet"] * subnets_num}, net)
self._nc.create_router.assert_called_once_with(
{"router": {"name": self.owner.generate_random_name.return_value,
"tenant_id": "foo_tenant"}})
self.assertEqual(
[
mock.call(
{"subnet": {
"name": self.owner.generate_random_name.return_value,
"network_id": "foo_id",
"tenant_id": "foo_tenant",
"ip_version": self.wrapper.SUBNET_IP_VERSION,
"dns_nameservers": ["foo_nameservers"],
"cidr": mock.ANY
}}
)
] * subnets_num,
self._nc.create_subnet.call_args_list,
)
self.assertEqual(self._nc.add_interface_router.call_args_list,
[mock.call("foo_router", {"subnet_id": "foo_subnet"})
for i in range(subnets_num)])
def test_delete_v1_pool(self):
pool = {"pool": {"id": "pool-id"}}
self.wrapper.delete_v1_pool(pool["pool"]["id"])
self.wrapper.client.delete_pool.assert_called_once_with("pool-id")
def test_delete_network(self):
self._nc.list_ports.return_value = {"ports": []}
self._nc.list_subnets.return_value = {"subnets": []}
self._nc.delete_network.return_value = "foo_deleted"
self.wrapper.delete_network(
{"id": "foo_id", "router_id": None, "subnets": [], "name": "x",
"status": "y", "external": False})
self.assertFalse(self._nc.remove_gateway_router.called)
self.assertFalse(self._nc.remove_interface_router.called)
self.assertFalse(self._nc.client.delete_router.called)
self.assertFalse(self._nc.client.delete_subnet.called)
self._nc.delete_network.assert_called_once_with("foo_id")
def test_delete_network_with_router_and_ports_and_subnets(self):
subnets = ["foo_subnet", "bar_subnet"]
ports = [{"id": "foo_port", "device_owner": "network:router_interface",
"device_id": "rounttter"},
{"id": "bar_port", "device_owner": "network:dhcp"}]
self._nc.list_ports.return_value = ({"ports": ports})
self._nc.list_subnets.return_value = (
{"subnets": [{"id": id_} for id_ in subnets]})
self.wrapper.delete_network(
{"id": "foo_id", "router_id": "foo_router", "subnets": subnets,
"lb_pools": [], "name": "foo", "status": "x", "external": False})
self.assertEqual(self._nc.remove_gateway_router.mock_calls,
[mock.call("foo_router")])
self._nc.delete_port.assert_called_once_with(ports[1]["id"])
self._nc.remove_interface_router.assert_called_once_with(
ports[0]["device_id"], {"port_id": ports[0]["id"]})
self.assertEqual(
[mock.call(subnet_id) for subnet_id in subnets],
self._nc.delete_subnet.call_args_list
)
self._nc.delete_network.assert_called_once_with("foo_id")
@ddt.data({"exception_type": neutron_exceptions.NotFound,
"should_raise": False},
{"exception_type": neutron_exceptions.BadRequest,
"should_raise": False},
{"exception_type": KeyError,
"should_raise": True})
@ddt.unpack
def test_delete_network_with_router_throw_exception(
self, exception_type, should_raise):
# Ensure cleanup context still move forward even
# remove_interface_router throw NotFound/BadRequest exception
self._nc.remove_interface_router.side_effect = exception_type
subnets = ["foo_subnet", "bar_subnet"]
ports = [{"id": "foo_port", "device_owner": "network:router_interface",
"device_id": "rounttter"},
{"id": "bar_port", "device_owner": "network:dhcp"}]
self._nc.list_ports.return_value = {"ports": ports}
self._nc.list_subnets.return_value = {"subnets": [
{"id": id_} for id_ in subnets]}
if should_raise:
self.assertRaises(
exception_type, self.wrapper.delete_network,
{"id": "foo_id", "name": "foo", "router_id": "foo_router",
"subnets": subnets, "lb_pools": [], "status": "xxx",
"external": False})
self.assertFalse(self._nc.delete_subnet.called)
self.assertFalse(self._nc.delete_network.called)
else:
self.wrapper.delete_network(
{"id": "foo_id", "name": "foo", "status": "xxx",
"router_id": "foo_router", "subnets": subnets,
"lb_pools": [], "external": False})
self._nc.delete_port.assert_called_once_with(ports[1]["id"])
self._nc.remove_interface_router.assert_called_once_with(
ports[0]["device_id"], {"port_id": ports[0]["id"]})
self.assertEqual(
[mock.call(subnet_id) for subnet_id in subnets],
self._nc.delete_subnet.call_args_list
)
self._nc.delete_network.assert_called_once_with("foo_id")
self._nc.remove_gateway_router.assert_called_once_with(
"foo_router")
def test_list_networks(self):
self._nc.list_networks.return_value = {"networks": "foo_nets"}
self.assertEqual("foo_nets", self.wrapper.list_networks())
self._nc.list_networks.assert_called_once_with()
def test_create_floating_ip(self):
self._nc.create_port.return_value = {"port": {"id": "port_id"}}
self._nc.create_floatingip.return_value = {
"floatingip": {"id": "fip_id", "floating_ip_address": "fip_ip"}}
self.assertRaises(ValueError, self.wrapper.create_floating_ip)
self._nc.list_networks.return_value = {"networks": []}
self.assertRaises(network.NetworkWrapperException,
self.wrapper.create_floating_ip,
tenant_id="foo_tenant")
self._nc.list_networks.return_value = {"networks": [{"id": "ext_id"}]}
fip = self.wrapper.create_floating_ip(
tenant_id="foo_tenant", port_id="port_id")
self.assertEqual({"id": "fip_id", "ip": "fip_ip"}, fip)
self._nc.list_networks.return_value = {"networks": [
{"id": "ext_net_id", "name": "ext_net", "router:external": True}]}
self.wrapper.create_floating_ip(
tenant_id="foo_tenant", ext_network="ext_net", port_id="port_id")
self.assertRaises(
network.NetworkWrapperException,
self.wrapper.create_floating_ip, tenant_id="foo_tenant",
ext_network="ext_net_2")
def test_delete_floating_ip(self):
self.wrapper.delete_floating_ip("fip_id")
self.wrapper.delete_floating_ip("fip_id", ignored_kwarg="bar")
self.assertEqual([mock.call("fip_id")] * 2,
self._nc.delete_floatingip.call_args_list)
def test_create_router(self):
self._nc.create_router.return_value = {"router": "foo_router"}
self._nc.list_extensions.return_value = {
"extensions": [{"alias": "ext-gw-mode"}]}
self._nc.list_networks.return_value = {"networks": [{"id": "ext_id"}]}
router = self.wrapper.create_router()
self._nc.create_router.assert_called_once_with(
{"router": {"name": self.owner.generate_random_name.return_value}})
self.assertEqual("foo_router", router)
self.wrapper.create_router(external=True, flavor_id="bar")
self._nc.create_router.assert_called_with(
{"router": {"name": self.owner.generate_random_name.return_value,
"external_gateway_info": {
"network_id": "ext_id",
"enable_snat": True},
"flavor_id": "bar"}})
def test_create_router_without_ext_gw_mode_extension(self):
self._nc.create_router.return_value = {"router": "foo_router"}
self._nc.list_extensions.return_value = {"extensions": []}
self._nc.list_networks.return_value = {"networks": [{"id": "ext_id"}]}
router = self.wrapper.create_router()
self._nc.create_router.assert_called_once_with(
{"router": {"name": self.owner.generate_random_name.return_value}})
self.assertEqual(router, "foo_router")
self.wrapper.create_router(external=True, flavor_id="bar")
self._nc.create_router.assert_called_with(
{"router": {"name": self.owner.generate_random_name.return_value,
"external_gateway_info": {"network_id": "ext_id"},
"flavor_id": "bar"}})
def test_create_port(self):
self._nc.create_port.return_value = {"port": "foo_port"}
port = self.wrapper.create_port("foo_net")
self._nc.create_port.assert_called_once_with(
{"port": {"network_id": "foo_net",
"name": self.owner.generate_random_name.return_value}})
self.assertEqual("foo_port", port)
port = self.wrapper.create_port("foo_net", foo="bar")
self.wrapper.client.create_port.assert_called_with(
{"port": {"network_id": "foo_net",
"name": self.owner.generate_random_name.return_value,
"foo": "bar"}})
def test_supports_extension(self):
self._nc.list_extensions.return_value = (
{"extensions": [{"alias": "extension"}]})
self.assertTrue(self.wrapper.supports_extension("extension")[0])
self.wrapper.neutron._cached_supported_extensions = None
self._nc.list_extensions.return_value = (
{"extensions": [{"alias": "extension"}]})
self.assertFalse(self.wrapper.supports_extension("dummy-group")[0])
self.wrapper.neutron._cached_supported_extensions = None
self._nc.list_extensions.return_value = {"extensions": []}
self.assertFalse(self.wrapper.supports_extension("extension")[0])
class FunctionsTestCase(test.TestCase):
def test_wrap(self):
mock_clients = mock.Mock()
config = {"fakearg": "fake"}
owner = Owner()
mock_clients.services.return_value = {"foo": consts.Service.NEUTRON}
wrapper = network.wrap(mock_clients, owner, config)
self.assertIsInstance(wrapper, network.NeutronWrapper)
self.assertEqual(wrapper.owner, owner)
self.assertEqual(wrapper.config, config)
```
#### File: unit/rally_jobs/test_zuul_jobs.py
```python
import os
import re
import yaml
import rally_openstack
from tests.unit import test
class RallyJobsTestCase(test.TestCase):
root_dir = os.path.dirname(os.path.dirname(rally_openstack.__file__))
zuul_jobs_path = os.path.join(root_dir, ".zuul.d")
def setUp(self):
super(RallyJobsTestCase, self).setUp()
with open(os.path.join(self.zuul_jobs_path, "zuul.yaml")) as f:
self.zuul_cfg = yaml.safe_load(f)
self.project_cfg = None
for item in self.zuul_cfg:
if "project" in item:
self.project_cfg = item["project"]
break
if self.project_cfg is None:
self.fail("Cannot detect project section from zuul config.")
@staticmethod
def _parse_job(job):
if isinstance(job, dict):
job_name = list(job)[0]
job_cfg = job[job_name]
return job_name, job_cfg
return job, None
def _check_order_of_jobs(self, pipeline):
jobs = self.project_cfg[pipeline]["jobs"]
specific_jobs = ["rally-dsvm-tox-functional",
"rally-openstack-docker-build",
"rally-task-basic-with-existing-users",
"rally-task-simple-job"]
error_message = (
f"[{pipeline} pipeline] We are trying to display jobs in a "
f"specific order to simplify search and reading. Tox jobs should "
f"go first in alphabetic order. Next several specific jobs are "
f"expected ({', '.join(specific_jobs)}). "
f"Next - all other jobs in alphabetic order."
)
error_message += "\nPlease place '%s' at the position of '%s'."
jobs_names = [self._parse_job(job)[0] for job in jobs]
tox_jobs = sorted(job for job in jobs_names
if job.startswith("rally-tox"))
for i, job in enumerate(tox_jobs):
if job != jobs[i]:
self.fail(error_message % (job, jobs[i]))
for job in specific_jobs:
if job not in jobs_names:
continue
i += 1
if job != jobs_names[i]:
self.fail(error_message % (job, jobs_names[i]))
i += 1
other_jobs = sorted(jobs_names[i: len(jobs_names)])
for j, job in enumerate(other_jobs):
if job != jobs_names[i + j]:
self.fail(error_message % (job, jobs_names[i + j]))
def test_order_of_displaying_jobs(self):
for pipeline in ("check", "gate"):
self._check_order_of_jobs(pipeline=pipeline)
JOB_FILES_PARAMS = {"files", "irrelevant-files"}
def test_job_configs(self):
file_matchers = {}
for pipeline in ("check", "gate"):
for job in self.project_cfg[pipeline]["jobs"]:
job_name, job_cfg = self._parse_job(job)
if job_cfg is None:
continue
if pipeline == "gate":
params = set(job_cfg) - self.JOB_FILES_PARAMS
if params:
self.fail(
f"Invalid parameter(s) for '{job_name}' job at "
f"gate pipeline: {', '.join(params)}.")
for param in self.JOB_FILES_PARAMS:
if param in job_cfg:
for file_matcher in job_cfg[param]:
file_matchers.setdefault(
file_matcher,
{
"matcher": re.compile(file_matcher),
"used_by": []
}
)
file_matchers[file_matcher]["used_by"].append(
{
"pipeline": pipeline,
"job": job_name,
"param": param
}
)
not_matched = set(file_matchers)
for dir_name, _, files in os.walk(self.root_dir):
dir_name = os.path.relpath(dir_name, self.root_dir)
if dir_name in (".tox", ".git"):
continue
for f in files:
full_path = os.path.join(dir_name, f)
for key in list(not_matched):
if file_matchers[key]["matcher"].match(full_path):
not_matched.remove(key)
if not not_matched:
# stop iterating files if no more matchers to check
break
if not not_matched:
# stop iterating files if no more matchers to check
break
for key in not_matched:
user = file_matchers[key]["used_by"][0]
self.fail(
f"'{user['job']}' job configuration for "
f"'{user['pipeline']}' pipeline includes wrong "
f"matcher '{key}' at '{user['param']}'."
)
```
#### File: scenarios/ceilometer/test_events.py
```python
from unittest import mock
from rally import exceptions
from rally_openstack.task.scenarios.ceilometer import events
from tests.unit import test
class CeilometerEventsTestCase(test.ScenarioTestCase):
def setUp(self):
super(CeilometerEventsTestCase, self).setUp()
patch = mock.patch(
"rally_openstack.common.services.identity.identity.Identity")
self.addCleanup(patch.stop)
self.mock_identity = patch.start()
def get_test_context(self):
context = super(CeilometerEventsTestCase, self).get_test_context()
context["admin"] = {"id": "fake_user_id",
"credential": mock.MagicMock()
}
return context
def test_list_events(self):
scenario = events.CeilometerEventsCreateUserAndListEvents(self.context)
scenario._list_events = mock.MagicMock()
scenario.run()
self.mock_identity.return_value.create_user.assert_called_once_with()
scenario._list_events.assert_called_once_with()
def test_list_events_fails(self):
scenario = events.CeilometerEventsCreateUserAndListEvents(self.context)
scenario._list_events = mock.MagicMock(return_value=[])
self.assertRaises(exceptions.RallyException, scenario.run)
self.mock_identity.return_value.create_user.assert_called_once_with()
scenario._list_events.assert_called_once_with()
def test_list_event_types(self):
scenario = events.CeilometerEventsCreateUserAndListEventTypes(
self.context)
scenario._list_event_types = mock.MagicMock()
scenario.run()
self.mock_identity.return_value.create_user.assert_called_once_with()
scenario._list_event_types.assert_called_once_with()
def test_list_event_types_fails(self):
scenario = events.CeilometerEventsCreateUserAndListEventTypes(
self.context)
scenario._list_event_types = mock.MagicMock(return_value=[])
self.assertRaises(exceptions.RallyException, scenario.run)
self.mock_identity.return_value.create_user.assert_called_once_with()
scenario._list_event_types.assert_called_once_with()
def test_get_event(self):
scenario = events.CeilometerEventsCreateUserAndGetEvent(self.context)
scenario._get_event = mock.MagicMock()
scenario._list_events = mock.MagicMock(
return_value=[mock.Mock(message_id="fake_id")])
scenario.run()
self.mock_identity.return_value.create_user.assert_called_once_with()
scenario._list_events.assert_called_with()
scenario._get_event.assert_called_with(event_id="fake_id")
def test_get_event_fails(self):
scenario = events.CeilometerEventsCreateUserAndGetEvent(self.context)
scenario._list_events = mock.MagicMock(return_value=[])
scenario._get_event = mock.MagicMock()
self.assertRaises(exceptions.RallyException, scenario.run)
self.mock_identity.return_value.create_user.assert_called_once_with()
scenario._list_events.assert_called_with()
self.assertFalse(scenario._get_event.called)
```
#### File: scenarios/ceilometer/test_stats.py
```python
from unittest import mock
from rally_openstack.task.scenarios.ceilometer import stats
from tests.unit import test
class CeilometerStatsTestCase(test.ScenarioTestCase):
def test_get_stats(self):
scenario = stats.GetStats(self.context)
scenario._get_stats = mock.MagicMock()
context = {"user": {"tenant_id": "fake", "id": "fake_id"},
"tenant": {"id": "fake_id",
"resources": ["fake_resource"]}}
metadata_query = {"a": "test"}
period = 10
groupby = "user_id"
aggregates = "sum"
scenario.context = context
scenario.run("fake_meter", True, True, True, metadata_query,
period, groupby, aggregates)
scenario._get_stats.assert_called_once_with(
"fake_meter",
[{"field": "user_id", "value": "fake_id", "op": "eq"},
{"field": "project_id", "value": "fake_id", "op": "eq"},
{"field": "resource_id", "value": "fake_resource", "op": "eq"},
{"field": "metadata.a", "value": "test", "op": "eq"}],
10,
"user_id",
"sum"
)
```
#### File: scenarios/gnocchi/test_resource.py
```python
from unittest import mock
from rally_openstack.task.scenarios.gnocchi import resource
from tests.unit import test
class GnocchiResourceTestCase(test.ScenarioTestCase):
def get_test_context(self):
context = super(GnocchiResourceTestCase, self).get_test_context()
context.update({
"admin": {
"user_id": "fake",
"credential": mock.MagicMock()
},
"user": {
"user_id": "fake",
"credential": mock.MagicMock()
},
"tenant": {"id": "fake"}
})
return context
def setUp(self):
super(GnocchiResourceTestCase, self).setUp()
patch = mock.patch(
"rally_openstack.common.services.gnocchi.metric.GnocchiService")
self.addCleanup(patch.stop)
self.mock_metric = patch.start()
def test_create_resource(self):
resource_service = self.mock_metric.return_value
scenario = resource.CreateResource(self.context)
scenario.generate_random_name = mock.MagicMock(return_value="name")
scenario.run(resource_type="foo")
resource_service.create_resource.assert_called_once_with(
"name", resource_type="foo")
def test_create_delete_resource(self):
resource_service = self.mock_metric.return_value
scenario = resource.CreateDeleteResource(self.context)
scenario.generate_random_name = mock.MagicMock(return_value="name")
scenario.run(resource_type="foo")
resource_service.create_resource.assert_called_once_with(
"name", resource_type="foo")
self.assertEqual(1, resource_service.delete_resource.call_count)
```
#### File: scenarios/mistral/test_executions.py
```python
from unittest import mock
from rally_openstack.task.scenarios.mistral import executions
from tests.unit import test
BASE = "rally_openstack.task.scenarios.mistral.executions"
MISTRAL_WBS_BASE = "rally_openstack.task.scenarios.mistral.workbooks"
WB_DEFINITION = """---
version: 2.0
name: wb
workflows:
wf1:
type: direct
tasks:
noop_task:
action: std.noop
wf2:
type: direct
tasks:
noop_task:
action: std.noop
wf3:
type: direct
tasks:
noop_task:
action: std.noop
wf4:
type: direct
tasks:
noop_task:
action: std.noop
"""
WB_DEF_ONE_WF = """---
version: 2.0
name: wb
workflows:
wf1:
type: direct
tasks:
noop_task:
action: std.noop
"""
PARAMS_EXAMPLE = {"env": {"env_param": "env_param_value"}}
INPUT_EXAMPLE = """{"input1": "value1", "some_json_input": {"a": "b"}}"""
WB = type("obj", (object,), {"name": "wb", "definition": WB_DEFINITION})()
WB_ONE_WF = (
type("obj", (object,), {"name": "wb", "definition": WB_DEF_ONE_WF})()
)
class MistralExecutionsTestCase(test.ScenarioTestCase):
@mock.patch("%s.ListExecutions._list_executions" % BASE)
def test_list_executions(self, mock__list_executions):
executions.ListExecutions(self.context).run()
self.assertEqual(1, mock__list_executions.called)
@mock.patch("%s.CreateExecutionFromWorkbook._create_execution" % BASE)
@mock.patch("%s.CreateExecutionFromWorkbook._create_workbook" % BASE,
return_value=WB)
def test_create_execution(self, mock__create_workbook,
mock__create_execution):
executions.CreateExecutionFromWorkbook(self.context).run(WB_DEFINITION)
self.assertEqual(1, mock__create_workbook.called)
self.assertEqual(1, mock__create_execution.called)
@mock.patch("%s.CreateExecutionFromWorkbook._create_execution" % BASE)
@mock.patch("%s.CreateExecutionFromWorkbook._create_workbook" % BASE,
return_value=WB)
def test_create_execution_with_input(self, mock__create_workbook,
mock__create_execution):
executions.CreateExecutionFromWorkbook(self.context).run(
WB_DEFINITION, wf_input=INPUT_EXAMPLE)
self.assertEqual(1, mock__create_workbook.called)
self.assertEqual(1, mock__create_execution.called)
@mock.patch("%s.CreateExecutionFromWorkbook._create_execution" % BASE)
@mock.patch("%s.CreateExecutionFromWorkbook._create_workbook" % BASE,
return_value=WB)
@mock.patch("json.loads", return_value=PARAMS_EXAMPLE)
def test_create_execution_with_params(self, mock_loads,
mock__create_workbook,
mock__create_execution):
executions.CreateExecutionFromWorkbook(self.context).run(
WB_DEFINITION, params=str(PARAMS_EXAMPLE))
self.assertEqual(1, mock_loads.called)
self.assertEqual(1, mock__create_workbook.called)
self.assertEqual(1, mock__create_execution.called)
@mock.patch("%s.CreateExecutionFromWorkbook._create_execution" % BASE)
@mock.patch("%s.CreateExecutionFromWorkbook._create_workbook" % BASE,
return_value=WB)
def test_create_execution_with_wf_name(self, mock__create_workbook,
mock__create_execution):
executions.CreateExecutionFromWorkbook(self.context).run(
WB_DEFINITION, "wf4")
self.assertEqual(1, mock__create_workbook.called)
self.assertEqual(1, mock__create_execution.called)
# we concatenate workbook name with the workflow name in the test
# the workbook name is not random because we mock the method that
# adds the random part
mock__create_execution.assert_called_once_with("wb.wf4", None,)
@mock.patch("%s.CreateExecutionFromWorkbook._delete_execution" % BASE)
@mock.patch("%s.CreateExecutionFromWorkbook._delete_workbook" % BASE)
@mock.patch("%s.CreateExecutionFromWorkbook._create_execution" % BASE)
@mock.patch("%s.CreateExecutionFromWorkbook._create_workbook" % BASE,
return_value=WB)
def test_create_delete_execution(
self, mock__create_workbook, mock__create_execution,
mock__delete_workbook, mock__delete_execution):
executions.CreateExecutionFromWorkbook(self.context).run(
WB_DEFINITION, do_delete=True)
self.assertEqual(1, mock__create_workbook.called)
self.assertEqual(1, mock__create_execution.called)
self.assertEqual(1, mock__delete_workbook.called)
self.assertEqual(1, mock__delete_execution.called)
@mock.patch("%s.CreateExecutionFromWorkbook._delete_execution" % BASE)
@mock.patch("%s.CreateExecutionFromWorkbook._delete_workbook" % BASE)
@mock.patch("%s.CreateExecutionFromWorkbook._create_execution" % BASE)
@mock.patch("%s.CreateExecutionFromWorkbook._create_workbook" % BASE,
return_value=WB)
def test_create_delete_execution_with_wf_name(
self, mock__create_workbook, mock__create_execution,
mock__delete_workbook, mock__delete_execution):
executions.CreateExecutionFromWorkbook(self.context).run(
WB_DEFINITION, "wf4", do_delete=True)
self.assertEqual(1, mock__create_workbook.called)
self.assertEqual(1, mock__create_execution.called)
self.assertEqual(1, mock__delete_workbook.called)
self.assertEqual(1, mock__delete_execution.called)
# we concatenate workbook name with the workflow name in the test
# the workbook name is not random because we mock the method that
# adds the random part
mock__create_execution.assert_called_once_with("wb.wf4", None)
@mock.patch("%s.CreateExecutionFromWorkbook._delete_execution" % BASE)
@mock.patch("%s.CreateExecutionFromWorkbook._delete_workbook" % BASE)
@mock.patch("%s.CreateExecutionFromWorkbook._create_execution" % BASE)
@mock.patch("%s.CreateExecutionFromWorkbook._create_workbook" % BASE,
return_value=WB_ONE_WF)
def test_create_delete_execution_without_wf_name(
self, mock__create_workbook, mock__create_execution,
mock__delete_workbook, mock__delete_execution):
executions.CreateExecutionFromWorkbook(self.context).run(
WB_DEF_ONE_WF, do_delete=True)
self.assertEqual(1, mock__create_workbook.called)
self.assertEqual(1, mock__create_execution.called)
self.assertEqual(1, mock__delete_workbook.called)
self.assertEqual(1, mock__delete_execution.called)
# we concatenate workbook name with the workflow name in the test
# the workbook name is not random because we mock the method that
# adds the random part
mock__create_execution.assert_called_once_with("wb.wf1", None)
```
#### File: scenarios/quotas/test_utils.py
```python
from unittest import mock
from rally_openstack.task.scenarios.quotas import utils
from tests.unit import test
class QuotasScenarioTestCase(test.ScenarioTestCase):
def test__update_quotas(self):
tenant_id = "fake_tenant"
quotas = {
"metadata_items": 10,
"key_pairs": 10,
"injected_file_content_bytes": 1024,
"injected_file_path_bytes": 1024,
"ram": 5120,
"instances": 10,
"injected_files": 10,
"cores": 10,
}
self.admin_clients("nova").quotas.update.return_value = quotas
scenario = utils.QuotasScenario(self.context)
scenario._generate_quota_values = mock.MagicMock(return_value=quotas)
result = scenario._update_quotas("nova", tenant_id)
self.assertEqual(quotas, result)
self.admin_clients("nova").quotas.update.assert_called_once_with(
tenant_id, **quotas)
self._test_atomic_action_timer(scenario.atomic_actions(),
"quotas.update_quotas")
def test__update_quotas_fn(self):
tenant_id = "fake_tenant"
quotas = {
"metadata_items": 10,
"key_pairs": 10,
"injected_file_content_bytes": 1024,
"injected_file_path_bytes": 1024,
"ram": 5120,
"instances": 10,
"injected_files": 10,
"cores": 10,
}
self.admin_clients("nova").quotas.update.return_value = quotas
scenario = utils.QuotasScenario(self.context)
scenario._generate_quota_values = mock.MagicMock(return_value=quotas)
mock_quota = mock.Mock(return_value=quotas)
result = scenario._update_quotas("nova", tenant_id,
quota_update_fn=mock_quota)
self.assertEqual(quotas, result)
self._test_atomic_action_timer(scenario.atomic_actions(),
"quotas.update_quotas")
def test__generate_quota_values_nova(self):
max_quota = 1024
scenario = utils.QuotasScenario(self.context)
quotas = scenario._generate_quota_values(max_quota, "nova")
for k, v in quotas.items():
self.assertGreaterEqual(v, -1)
self.assertLessEqual(v, max_quota)
def test__generate_quota_values_cinder(self):
max_quota = 1024
scenario = utils.QuotasScenario(self.context)
quotas = scenario._generate_quota_values(max_quota, "cinder")
for k, v in quotas.items():
self.assertGreaterEqual(v, -1)
self.assertLessEqual(v, max_quota)
def test__generate_quota_values_neutron(self):
max_quota = 1024
scenario = utils.QuotasScenario(self.context)
quotas = scenario._generate_quota_values(max_quota, "neutron")
for v in quotas.values():
for v1 in v.values():
for v2 in v1.values():
self.assertGreaterEqual(v2, -1)
self.assertLessEqual(v2, max_quota)
def test__delete_quotas(self):
tenant_id = "fake_tenant"
scenario = utils.QuotasScenario(self.context)
scenario._delete_quotas("nova", tenant_id)
self.admin_clients("nova").quotas.delete.assert_called_once_with(
tenant_id)
self._test_atomic_action_timer(scenario.atomic_actions(),
"quotas.delete_quotas")
def test__get_quotas(self):
tenant_id = "fake_tenant"
scenario = utils.QuotasScenario(self.context)
scenario._get_quotas("nova", tenant_id)
self.admin_clients("nova").quotas.get.assert_called_once_with(
tenant_id)
self._test_atomic_action_timer(scenario.atomic_actions(),
"quotas.get_quotas")
```
#### File: scenarios/sahara/test_jobs.py
```python
from unittest import mock
from rally.common import cfg
from rally_openstack.task.scenarios.sahara import jobs
from tests.unit import test
CONF = cfg.CONF
BASE = "rally_openstack.task.scenarios.sahara.jobs"
class SaharaJobTestCase(test.ScenarioTestCase):
def setUp(self):
super(SaharaJobTestCase, self).setUp()
self.context = test.get_test_context()
CONF.set_override("sahara_cluster_check_interval", 0, "openstack")
CONF.set_override("sahara_job_check_interval", 0, "openstack")
@mock.patch("%s.CreateLaunchJob._run_job_execution" % BASE)
def test_create_launch_job_java(self, mock_run_job):
self.clients("sahara").jobs.create.return_value = mock.MagicMock(
id="42")
self.context.update({
"tenant": {
"sahara": {
"image": "test_image",
"mains": ["main_42"],
"libs": ["lib_42"],
"cluster": "cl_42",
"input": "in_42"
}
}
})
scenario = jobs.CreateLaunchJob(self.context)
scenario.generate_random_name = mock.Mock(
return_value="job_42")
scenario.run(job_type="java",
configs={"conf_key": "conf_val"},
job_idx=0)
self.clients("sahara").jobs.create.assert_called_once_with(
name="job_42",
type="java",
description="",
mains=["main_42"],
libs=["lib_42"]
)
mock_run_job.assert_called_once_with(
job_id="42",
cluster_id="cl_42",
input_id=None,
output_id=None,
configs={"conf_key": "conf_val"},
job_idx=0
)
@mock.patch("%s.CreateLaunchJob._run_job_execution" % BASE)
@mock.patch("%s.CreateLaunchJob._create_output_ds" % BASE,
return_value=mock.MagicMock(id="out_42"))
def test_create_launch_job_pig(self,
mock_create_output,
mock_run_job):
self.clients("sahara").jobs.create.return_value = mock.MagicMock(
id="42")
self.context.update({
"tenant": {
"sahara": {
"image": "test_image",
"mains": ["main_42"],
"libs": ["lib_42"],
"cluster": "cl_42",
"input": "in_42"
}
}
})
scenario = jobs.CreateLaunchJob(self.context)
scenario.generate_random_name = mock.Mock(return_value="job_42")
scenario.run(job_type="pig",
configs={"conf_key": "conf_val"},
job_idx=0)
self.clients("sahara").jobs.create.assert_called_once_with(
name="job_42",
type="pig",
description="",
mains=["main_42"],
libs=["lib_42"]
)
mock_run_job.assert_called_once_with(
job_id="42",
cluster_id="cl_42",
input_id="in_42",
output_id="out_42",
configs={"conf_key": "conf_val"},
job_idx=0
)
@mock.patch("%s.CreateLaunchJob._run_job_execution" % BASE)
@mock.patch("%s.CreateLaunchJob.generate_random_name" % BASE,
return_value="job_42")
def test_create_launch_job_sequence(self,
mock__random_name,
mock_run_job):
self.clients("sahara").jobs.create.return_value = mock.MagicMock(
id="42")
self.context.update({
"tenant": {
"sahara": {
"image": "test_image",
"mains": ["main_42"],
"libs": ["lib_42"],
"cluster": "cl_42",
"input": "in_42"
}
}
})
scenario = jobs.CreateLaunchJobSequence(self.context)
scenario.run(
jobs=[
{
"job_type": "java",
"configs": {"conf_key": "conf_val"}
}, {
"job_type": "java",
"configs": {"conf_key2": "conf_val2"}
}])
jobs_create_call = mock.call(name="job_42",
type="java",
description="",
mains=["main_42"],
libs=["lib_42"])
self.clients("sahara").jobs.create.assert_has_calls(
[jobs_create_call, jobs_create_call])
mock_run_job.assert_has_calls([
mock.call(job_id="42",
cluster_id="cl_42",
input_id=None,
output_id=None,
configs={"conf_key": "conf_val"},
job_idx=0),
mock.call(job_id="42",
cluster_id="cl_42",
input_id=None,
output_id=None,
configs={"conf_key2": "conf_val2"},
job_idx=1)
])
@mock.patch("%s.CreateLaunchJob.generate_random_name" % BASE,
return_value="job_42")
@mock.patch("%s.CreateLaunchJobSequenceWithScaling"
"._scale_cluster" % BASE)
@mock.patch("%s.CreateLaunchJob._run_job_execution" % BASE)
def test_create_launch_job_sequence_with_scaling(
self,
mock_run_job,
mock_create_launch_job_sequence_with_scaling__scale_cluster,
mock_create_launch_job_generate_random_name
):
self.clients("sahara").jobs.create.return_value = mock.MagicMock(
id="42")
self.clients("sahara").clusters.get.return_value = mock.MagicMock(
id="cl_42", status="active")
self.context.update({
"tenant": {
"sahara": {
"image": "test_image",
"mains": ["main_42"],
"libs": ["lib_42"],
"cluster": "cl_42",
"input": "in_42"
}
}
})
scenario = jobs.CreateLaunchJobSequenceWithScaling(self.context)
scenario.run(
jobs=[
{
"job_type": "java",
"configs": {"conf_key": "conf_val"}
}, {
"job_type": "java",
"configs": {"conf_key2": "conf_val2"}
}],
deltas=[1, -1])
jobs_create_call = mock.call(name="job_42",
type="java",
description="",
mains=["main_42"],
libs=["lib_42"])
self.clients("sahara").jobs.create.assert_has_calls(
[jobs_create_call, jobs_create_call])
je_0 = mock.call(job_id="42", cluster_id="cl_42", input_id=None,
output_id=None, configs={"conf_key": "conf_val"},
job_idx=0)
je_1 = mock.call(job_id="42", cluster_id="cl_42", input_id=None,
output_id=None,
configs={"conf_key2": "conf_val2"}, job_idx=1)
mock_run_job.assert_has_calls([je_0, je_1, je_0, je_1, je_0, je_1])
``` |
{
"source": "jogeorg/python-docs-hello-world",
"score": 3
} |
#### File: jogeorg/python-docs-hello-world/app.py
```python
from flask import Flask, render_template, request
app = Flask(__name__)
@app.route("/")
def home():
return render_template("home.html")
@app.route('/form')
def form():
return render_template('form.html')
@app.route('/data/', methods = ['POST', 'GET'])
def data():
if request.method == 'GET':
return f"The URL /data is accessed directly. Try going to '/form' to submit form"
if request.method == 'POST':
form_data = request.form
return render_template('data.html',form_data = form_data)
``` |
{
"source": "jogepari/crossvalmodel",
"score": 3
} |
#### File: crossvalmodel/crossvalmodel/crossvalmodel.py
```python
import pandas as pd
import numpy as np
import sklearn
from tqdm.auto import tqdm
import copy
import datetime
import scipy
import inspect
__all__ = [
'CrossValModel',
'CrossValRegressor',
'CrossValClassifier',
]
class CrossValModel:
"""
Cross-validation wrapper preserving trained models for prediction.
Base class, to be sublassed.
Use CrossValRegressor and CrossValClassifier instead.
"""
def __init__(self, base_estimator, cv_split, verbosity):
self.base_estimator = copy.deepcopy(base_estimator)
self.cv_split = cv_split
self.verbosity = verbosity
self._init_attributes()
def _init_attributes(self):
self.is_fit = False
self.models = []
self.oof_res_df = pd.DataFrame()
self.oof_proba_df = pd.DataFrame()
self.best_iteration_ = None
def fit(self, X, y, *data_args, data_wrapper=None,
eval_training_set=False, **base_fit_kwargs):
"""
Cross-validate: fit several models on data according to splits.
Parameters
----------
X, y: array-like, compatible with sklearn-like splitter
*data_args : array-like, compatible with sklearn-like splitter
additional fit data parameters, e.g. weights.
data_wrapper : callable, optional
applied after splitting to [X, y] + list(data_args)
e.g. for catboost:
lambda x, y, w: Pool(x, y, weight=w, cat_features = cat_feats)
If None (default), models receive data for fitting as
(X, y, *data_args)
eval_training_set : bool, optional
if True, adds train part of each split to eval_set list
**base_fit_kwargs: kwargs to pass to base_estimator's fit method
e.g. (verbose=100, plot=True)
Returns
-------
model: CrossValRegressor or CrossValClassifier
"""
self._init_attributes()
# delete ouside eval set because it will be made by cv_split
base_fit_kwargs.pop('eval_set', None)
self._alert('base_estimator fitting kwargs:', base_fit_kwargs)
try:
cvm_splits = self.cv_split.split(X, y)
n_splits = self.cv_split.get_n_splits()
except AttributeError:
cvm_splits = self.cv_split
n_splits = len(cvm_splits)
fit_signature = inspect.signature(self.base_estimator.fit)
provide_eval_set = 'eval_set' in fit_signature.parameters
data = [X, y] + list(data_args)
for model_id, (train_ids, val_ids) in enumerate(tqdm(cvm_splits, total=n_splits)):
self._alert(f'\n{datetime.datetime.now()} Fold {model_id}, getting train and val sets')
# pandas/numpy indexing
data_tr, data_val = [], []
for d in data:
d_tr, d_v = (d.iloc[train_ids], d.iloc[val_ids]) if \
isinstance(d, pd.core.generic.NDFrame) else \
(d[train_ids], d[val_ids])
data_tr.append(d_tr)
data_val.append(d_v)
(X_tr, _), (X_v, y_v) = data_tr[:2], data_val[:2]
self._alert('train and val shapes:', X_tr.shape, X_v.shape)
if data_wrapper is not None:
data_tr = data_wrapper(*data_tr)
data_val = data_wrapper(*data_val)
else:
data_tr, data_val = map(tuple, (data_tr, data_val))
self._fit_single_split(
model_id, data_tr, data_val, val_ids, X_v, y_v,
provide_eval_set, eval_training_set,
**base_fit_kwargs)
self.oof_res_df.sort_values(by='idx_split',
ignore_index=True, inplace=True)
self.oof_proba_df.sort_values(by='idx_split',
ignore_index=True, inplace=True)
try:
self.best_iteration_ = np.mean([m.best_iteration_ for m in self.models])
except AttributeError:
pass
self.is_fit = True
return self
def _fit_single_split(self, model_id, data_tr, data_val, val_ids, X_v, y_v,
provide_eval_set, eval_training_set,
**base_fit_kwargs):
est = copy.deepcopy(self.base_estimator)
self._alert(datetime.datetime.now(), 'fitting')
fold_fit_kwargs = base_fit_kwargs.copy()
if provide_eval_set:
eval_set = [data_tr, data_val] if eval_training_set else [data_val]
fold_fit_kwargs['eval_set'] = eval_set
if isinstance(data_tr, (tuple, list)):
data_shapes = [d.shape if hasattr(d, 'shape') else '???'
for d in data_tr]
self._alert(f'fit tuple of len: {len(data_tr)}, shapes:',
*data_shapes)
est.fit(*data_tr, **fold_fit_kwargs)
else:
self._alert(f'fit {type(data_tr)}')
est.fit(data_tr, **fold_fit_kwargs)
self._alert(datetime.datetime.now(), 'fit over')
self.models.append(est)
fold_res = pd.DataFrame(data={'idx_split': val_ids})
for data_obj in (y_v, X_v):
if isinstance(data_obj, pd.core.generic.NDFrame):
fold_res['idx_orig'] = data_obj.index
break
fold_res = fold_res.assign(model_id=model_id,
true=np.array(y_v))
fold_probas = fold_res.loc[:, :'true']
try:
# classification with probability
y_v_proba = est.predict_proba(X_v)
fold_res['pred'] = self.models[0].classes_[np.argmax(y_v_proba, axis=-1)]
if y_v_proba.shape[1] <= 2:
fold_res['proba'] = y_v_proba[:, -1]
else:
fold_res['proba'] = y_v_proba.max(axis=-1)
tmp_probas_df = pd.DataFrame(
data=y_v_proba,
columns=['pr_' + str(ci) for ci in range(y_v_proba.shape[-1])],
index=fold_res.index,
)
fold_probas = pd.concat((fold_probas, tmp_probas_df), axis=1)
self._alert(datetime.datetime.now(), 'proba over')
except AttributeError:
# regression and classification w/o probability
y_v_pred = est.predict(X_v)
fold_res['pred'] = np.array(y_v_pred)
self._alert(datetime.datetime.now(), 'predict over')
if self.oof_res_df.empty:
self.oof_res_df.reindex(columns=fold_res.columns)
self.oof_proba_df.reindex(columns=fold_probas.columns)
self.oof_res_df = self.oof_res_df.append(fold_res, ignore_index=True)
self.oof_proba_df = self.oof_proba_df.append(fold_probas, ignore_index=True)
def _alert(self, *message, alert_level=1, **kwargs):
if self.verbosity >= alert_level:
print(*message, **kwargs)
def get_oof_predictions(self):
"""
Get OOF probabilities for metric calculation.
Returns
-------
Tuple (oof_true, oof_pred) to pass into sklearn metrics, e.g.:
mean_squared_error(*cvm_reg.get_oof_predictions())
"""
return (self.oof_res_df['true'], self.oof_res_df['pred'])
def get_params(self, **kwargs):
try:
return self.base_estimator.get_params(**kwargs)
except AttributeError:
self._alert('base_estimator has no "get_params" method')
def set_params(self, **params):
self.base_estimator.set_params(**params)
class CrossValRegressor(CrossValModel):
def __init__(self, base_estimator, cv_split, verbosity=0):
"""
Cross-validation wrapper preserving trained regressors for prediction.
Parameters
----------
base_estimator : model with sklearn-like API
cv_split : either sklearn-like splitter (e.g. KFold())
or iterable of indices
verbosity : bool or int
0 - silent, 1 and above - debugging alerts
"""
super().__init__(base_estimator, cv_split, verbosity)
self.__name__ = 'CrossValRegressor'
def predict(self, X):
"""
Predict regression for X: simple mean of each model's predicton.
Parameters
----------
X : array-like, same features as X passed to fit method.
Returns
-------
y: ndarray
Predicted values - np.mean of predictions by all models.
"""
if not self.is_fit:
raise sklearn.exceptions.NotFittedError()
all_models_pred = [model.predict(X) for model in self.models]
return np.stack(all_models_pred, axis=-1).mean(axis=-1, keepdims=False)
class CrossValClassifier(CrossValModel):
def __init__(self, base_estimator, cv_split, verbosity=0):
"""
Cross-validation wrapper preserving trained classifiers for prediction.
Parameters
----------
base_estimator : model with sklearn-like API
cv_split : either sklearn-like splitter (e.g. KFold())
or iterable of indices
verbosity : bool or int
0 - silent, 1 and above - debugging alerts
"""
super().__init__(base_estimator, cv_split, verbosity)
self.__name__ = 'CrossValClassifier'
def predict_proba(self, X):
"""
Predict class probabilities for X: mean of each model's predict_proba.
Parameters
----------
X : array-like, same features as X passed to fit method.
Returns
-------
p: ndarray
Predicted values - np.mean of predictions by all models.
"""
if not self.is_fit:
raise sklearn.exceptions.NotFittedError()
all_models_proba = [model.predict_proba(X) for model in self.models]
return np.stack(all_models_proba, axis=-1).mean(axis=-1)
def predict(self, X, calc_probas=True):
"""
Predict class for X.
Parameters
----------
X : array-like, same features as X passed to fit method.
calc_probas : bool, optional
If True, predicts class with the largest average probability output
by all models.
If False, just takes mode of all predictions.
Returns
-------
y: ndarray
Predicted class.
"""
if not self.is_fit:
raise sklearn.exceptions.NotFittedError()
if calc_probas:
probas = self.predict_proba(X)
# probably might work wrong if models get different label sets
return self.models[0].classes_[np.argmax(probas, axis=-1)]
else:
all_models_preds = [model.predict(X) for model in self.models]
return scipy.stats.mode(np.stack(all_models_preds, axis=-1), axis=1)[0]
def get_oof_proba(self, squeeze_binary=True):
"""
Get OOF probabilities for metric calculation.
Parameters
----------
squeeze_binary : bool, optional
For binary classification, return proba just for positive class.
Returns
-------
Tuple (oof_true, oof_proba) to pass into sklearn metrics, e.g.:
roc_auc_score(*cvm_clf.get_oof_proba())
"""
oof_proba = self.oof_proba_df.loc[:, 'pr_0':]
if oof_proba.shape[1] == 2 and squeeze_binary:
oof_proba = oof_proba['pr_1']
return self.oof_proba_df['true'], oof_proba
``` |
{
"source": "jogerh/com_samples",
"score": 3
} |
#### File: com_samples/PyComTests/test_examples.py
```python
import unittest
import comtypes
from comtypes.client import GetModule, CreateObject
# To be able to use declared interfaces,
# we have to generate wrapper code from .tlb file, or from .dll
GetModule("..\Build\Output\Interfaces.tlb")
# Simple tests to demonstrate how COM classes can be used from python.
class Test_Examples(unittest.TestCase):
def test_example_how_to_use_managed_server_in_python(self):
# From prog id create IPetShop, this interface is declared in Interfaces\IPetShop.idl
# The implementation is in ManagedServer\PetShop.cs
shop = CreateObject("ManagedServer.PetShop.1", interface = comtypes.gen.Interfaces.IPetShop)
# Like in .Net, all COM related code is wrapped, so all
# arguments marked [out] or [out, retval] in the IDL are returned from a successful method call
# See https://pythonhosted.org/comtypes/#creating-and-accessing-com-objects for details
address = shop.GetAddress()
# Add asserts to ensure that return address is correct according to implementation
assert address.Street == "Suhms gate"
assert address.PostalCode == "0363"
assert address.City == "Oslo";
def test_example_how_to_use_atl_server_in_python(self):
# From class id create IHen, this interface is declared in Interfaces\IHen.idl
# The implementation is in AtlServer\AtlHen.cpp
hen = CreateObject("{9eedb943-b267-4f0c-b8b6-59fe3851f239}", interface = comtypes.gen.Interfaces.IHen)
hen.Cluck()
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jogerj/reddit-purge",
"score": 2
} |
#### File: jogerj/reddit-purge/run.py
```python
import multiprocessing as mp
import praw
import prawcore.exceptions
import refresh_token
from purge_reddit import PurgeReddit
import time
#### EDIT YOUR DETAILS BELOW ####
# Your login details
username = '' # optional
password = '' # optional
user_agent = 'PurgeBot' # Bot name
client_id = '##############' # '14 char client ID'
client_secret = '##############################' # '30 char client secret'
# Purge options
## Number of recent comments/submissions to delete.
## Set to None if no limits (purge ALL comments/submissions)
## Set to 10 will purge recent 10, etc.
limitation = None
## Only purge posts with score <= this number. Set to None if no threshold
max_score = None
## Set to False to not purge comments/submissions
purge_comments = True
purge_submissions = True
## Edit comments/submissions to this before deletion. This prevents archiving.
redact_msg = "[redacted]"
## Set to True to only edit posts to `redact_msg` without deleting them.
redact_only = False
## Use multiprocessing. Set to False if problems occur
use_multiprocessing = True
## Show comment body
show_comment = False
## Show submission titles
show_title = False
## Start purge from controversial first instead of newest
controversial_first = True
## Do not prompt at all. Use with EXTRA caution!
no_prompt = False
## Debug mode
debug = False
## Whitelist e.g.`['id1', 'id2', 'id3']`
comment_whitelist = []
submissions_whitelist = []
#### DO NOT EDIT BELOW ####
options = {'controversial_first': controversial_first,
'debug': debug,
'limitation': limitation,
'redact_msg': redact_msg,
'redact_only': redact_only,
'max_score': max_score,
'show_title': show_title,
'show_comment': show_comment,
'comment_whitelist': comment_whitelist,
'submissions_whitelist': submissions_whitelist}
def save_log(log_type: str, entries: list):
filename = f"log/{log_type} {time.asctime().replace(':', '.')}.log"
try:
f = open(filename, "w")
for entry in entries:
f.write(entry + '\n')
f.close()
except IOError:
print(f"Could not write to {filename}")
if __name__ == '__main__':
# Initialize reddit
if password != '' and username != '':
# use username and password
reddit = praw.Reddit(
client_id=client_id,
client_secret=client_secret,
user_agent=user_agent,
username=username,
password=password,
redirect_uri="http://localhost:8080")
else:
# use OAuth
reddit = praw.Reddit(
client_id=client_id,
client_secret=client_secret,
user_agent=user_agent,
redirect_uri="http://localhost:8080")
# Check authentication key
print("Checking authentication...")
if client_id == '##############' \
or client_secret == '###########################':
print("Missing client ID/secret key!")
exit()
elif len(client_id) != 14 or len(client_secret) != 30:
print("Failed to authenticate!",
"Your client ID/secret key isn't the correct length.")
print("Please check your configuration again!")
exit()
try:
# Test authentication
if reddit.user.me() is None:
refresh_token.authorize_token(reddit)
except prawcore.exceptions.ResponseException as exc:
if f'{exc}'.find('401') != -1:
# 401 error, invalid key ?
print("ERROR 401: There's a problem with your authentication key."
+ "\nPlease check your configuration again!")
else:
print("\nResponseException:", exc)
if debug:
raise exc
exit()
except prawcore.exceptions.OAuthException:
print("Failed to authenticate credentials! Possible causes:")
print("1. Wrong username/password.")
print("2. 2FA is enabled.")
print("3. Invalid client ID/secret key.")
try:
refresh_token.authorize_token(reddit)
except refresh_token.TokenException as exc:
print("TokenException:", exc)
if debug:
raise exc
exit()
except refresh_token.TokenException as exc:
print("TokenException:", exc)
print("Could not authorize token!")
exit()
# Authkey all good! Check total to purge and confirm
pr = PurgeReddit(reddit, options)
comment_count = 0
submission_count = 0
if purge_comments:
print("Calculating number of comments, please wait...")
comment_count = pr.get_comment_total()
if comment_count == 0:
print("Found no comments to delete.")
purge_comments = False
elif not no_prompt:
confirm = input(f"{comment_count} comments will be "
+ ("redacted" if redact_only else "deleted")
+ ". Are you sure? [y/N] ")
if not confirm.lower().startswith("y"):
print("Comment purge aborted.")
purge_comments = False
if purge_submissions:
print("Calculating number of submissions, please wait...")
submission_count = pr.get_submission_total()
if submission_count == 0:
print("Found no submissions to delete.")
purge_submissions = False
elif not no_prompt:
confirm = input(f"{submission_count} submissions will be "
+ ("redacted" if redact_only else "deleted")
+ ". Are you sure? [y/N] ")
if not confirm.lower().startswith("y"):
print("Submission purge aborted.")
purge_submissions = False
if not (purge_submissions or purge_comments):
print("Nothing to purge today. Have a nice day!")
exit()
# Begin purge
while True:
if use_multiprocessing:
# Init multiprocessing and start each thread
skipped_comments_queue = mp.Queue()
skipped_submissions_queue = mp.Queue()
if purge_comments:
p1 = mp.Process(target=pr.purge_comments,
args=(comment_count, skipped_comments_queue,))
p1.start()
time.sleep(2) # delay to avoid errors
if purge_submissions:
p2 = mp.Process(target=pr.purge_submissions,
args=(submission_count,
skipped_submissions_queue,))
p2.start()
# Get skipped posts
if purge_comments:
skipped_comments = skipped_comments_queue.get()
p1.join()
if
(skipped_comments) > 0:
skipped_id = list(map(
lambda c:
f"{c.submission}/{c} in {c.subreddit}",
skipped_comments))
print(f"Comments not purged:\n", skipped_id)
save_log('skipped_comments', skipped_id)
else:
print("All comments purged!")
if purge_submissions:
skipped_submissions = skipped_submissions_queue.get()
p2.join()
if len(skipped_submissions) > 0:
skipped_id = list(map(lambda s: f'{s} in {s.subreddit}',
skipped_submissions))
print("Submissions not purged:\n", skipped_id)
save_log('skipped_submissions', skipped_id)
else:
print("All submissions purged!")
else:
# Serial method
serial_msg = ""
if purge_comments:
skipped_comments = pr.purge_comments(comment_count)
if len(skipped_comments) > 0:
skipped_id = list(map(
lambda c:
f"{c.submission}/{c} in {c.subreddit}",
skipped_comments))
serial_msg += f"Comments not purged:\n{skipped_id}"
save_log('skipped_comments', skipped_id)
else:
serial_msg += "All comments purged!"
if purge_submissions:
skipped_submissions = pr.purge_submissions(submission_count)
if len(skipped_submissions) > 0:
skipped_id = list(map(lambda s: f'{s} in {s.subreddit}',
skipped_submissions))
serial_msg += f"Submissions not purged:\n{skipped_id}"
save_log('skipped_submissions', skipped_id)
else:
serial_msg += "All submissions purged!"
print(serial_msg)
# if there were more than 1000, prompt to delete more
if (submission_count >= 1000 or comment_count >= 1000) \
and not redact_only:
if not no_prompt:
confirm = input("There were more than 1000 submissions/comments!",
"Delete more? [y/N] ")
if no_prompt or confirm.lower().startswith('y'):
if limitation is not None:
limitation -= 1000
print("Calculating remaining submissions/comments...")
if purge_comments:
comment_count = pr.get_comment_total()
print(f"{comment_count} remaining...")
if purge_submissions:
submission_count = pr.get_submission_total()
print(f"{submission_count} remaining...")
else:
break
print("Done!")
``` |
{
"source": "Jogesh-Click2Cloud/AliCloudAnsibleFootmark",
"score": 2
} |
#### File: footmark/slb/connection.py
```python
import warnings
import six
import time
import json
from footmark.connection import ACSQueryConnection
from footmark.slb.regioninfo import RegionInfo
from footmark.slb.securitygroup import SecurityGroup
from footmark.exception import SLBResponseError
class SLBConnection(ACSQueryConnection):
SDKVersion = '2014-05-15'
DefaultRegionId = 'cn-hangzhou'
DefaultRegionName = u'??'.encode("UTF-8")
ResponseError = SLBResponseError
def __init__(self, acs_access_key_id=None, acs_secret_access_key=None,
region=None, sdk_version=None, security_token=None):
"""
Init method to create a new connection to SLB.
"""
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionId)
self.region = region
if sdk_version:
self.SDKVersion = sdk_version
self.SLBSDK = 'aliyunsdkslb.request.v' + self.SDKVersion.replace('-', '')
super(SLBConnection, self).__init__(acs_access_key_id,
acs_secret_access_key,
self.region, self.SLBSDK, security_token)
# C2C: Method added to create server load balancer
def create_load_balancer(self, region_id, name=None, address_type=None, internet_charge_type=None, bandwidth=None,
ids=None, vswitch_id=None, zones=None, listeners=None, helth_checkup=None, stickness=None):
"""
:type region: dict
:param region_id: The instance?s Region ID
:type name: dict
:param name: Name to the server load balancer
:type address_type: dict
:param address_type: Address type. value: internet or intranet
:type internet_charge_type: dict
:param internet_charge_type: Charging mode for the public network instance
Value: paybybandwidth or paybytraffic
:type bandwidth: dict
:param bandwidth: Bandwidth peak of the public network instance charged per fixed bandwidth
:type ids: dict
:param ids: To ensure idempotence of a request
:type vswitch_id: dict
:param vswitch_id: The vswitch id of the VPC instance. This option is invalid if address_type parameter is
provided as internet.
:return:
"""
params = {}
results = []
self.build_list_params(params, region_id, 'RegionId')
if name:
self.build_list_params(params, name, 'LoadBalancerName')
if address_type:
self.build_list_params(params, address_type, 'AddressType')
if internet_charge_type:
self.build_list_params(params, internet_charge_type, 'InternetChargeType')
if bandwidth:
self.build_list_params(params, bandwidth, 'Bandwidth')
if ids:
self.build_list_params(params, ids, 'ClientToken')
if vswitch_id:
self.build_list_params(params, vswitch_id, 'VSwitchId')
if zones:
for idx, val in enumerate(zones):
if idx == 0:
self.build_list_params(params, val, 'MasterZoneId')
else:
self.build_list_params(params, val, 'SlaveZoneId')
try:
results = self.get_status('CreateLoadBalancer', params)
except Exception as ex:
msg, stack = ex.args
results.append("Create Load Balancer Error:" + str(msg) + " " + str(stack))
else:
slb_id=str(results[u'LoadBalancerId'])
if slb_id:
for listener in listeners:
if listener:
if 'protocol' in listener:
if listener['protocol'] in ["HTTP", "http"]:
listener_result = self.create_load_balancer_http_listener(slb_id, listener,
helth_checkup, stickness)
if listener_result:
results.update({"http_listener_result": listener_result})
if listener['protocol'] in ["HTTPS", "https"]:
listener_result = self.create_load_balancer_https_listener(slb_id, listener,
helth_checkup, stickness)
if listener_result:
results.update({"https_listener_result": listener_result})
if listener['protocol'] in ["TCP", "tcp"]:
listener_result = self.create_load_balancer_tcp_listener(slb_id, listener,
helth_checkup)
if listener_result:
results.update({"tcp_listener_result": listener_result})
if listener['protocol'] in ["UDP", "udp"]:
listener_result = self.create_load_balancer_udp_listener(slb_id, listener,
helth_checkup)
if listener_result:
results.update({"udp_listener_result": listener_result})
return results
# C2C: Method added to create load balancer HTTP listener
def create_load_balancer_http_listener(self, slb_id, listener, helth_checkup, stickness):
"""
:param listener:
:param helth_checkup:
:param stickness:
:return:
"""
params = {}
results = []
if listener:
self.build_list_params(params, slb_id, 'LoadBalancerId')
if 'load_balancer_port' in listener:
self.build_list_params(params, listener['load_balancer_port'], 'ListenerPort')
if 'bandwidth' in listener:
self.build_list_params(params, listener['bandwidth'], 'Bandwidth')
#if 'instance_protocol' in listener:
# self.build_list_params(params, listener['instance_protocol'], '')
if 'instance_port' in listener:
self.build_list_params(params, listener['instance_port'], 'BackendServerPort')
#if 'proxy_protocol' in listener:
# self.build_list_params(params, listener['proxy_protocol'], '')
if helth_checkup:
if 'health_check' in helth_checkup:
self.build_list_params(params, helth_checkup['health_check'], 'HealthCheck')
if 'ping_port' in helth_checkup:
self.build_list_params(params, helth_checkup['ping_port'], 'HealthCheckConnectPort')
if 'ping_path' in helth_checkup:
self.build_list_params(params, helth_checkup['ping_path'], 'HealthCheckURI')
if 'response_timeout' in helth_checkup:
self.build_list_params(params, helth_checkup['response_timeout'], 'HealthCheckTimeout')
if 'interval' in helth_checkup:
self.build_list_params(params, helth_checkup['interval'], 'HealthCheckInterval')
if 'unhealthy_threshold' in helth_checkup:
self.build_list_params(params, helth_checkup['unhealthy_threshold'], 'UnhealthyThreshold')
if 'healthy_threshold' in helth_checkup:
self.build_list_params(params, helth_checkup['healthy_threshold'], 'HealthyThreshold')
if stickness:
if 'enabled' in stickness:
self.build_list_params(params, stickness['enabled'], 'StickySession')
if 'type' in stickness:
self.build_list_params(params, stickness['type'], 'StickySessionType')
#if 'expiration' in stickness:
# self.build_list_params(params, stickness['expiration'], '')
if 'cookie' in stickness:
self.build_list_params(params, stickness['cookie'], 'Cookie')
if 'cookie_timeout' in stickness:
self.build_list_params(params, stickness['cookie_timeout'], 'CookieTimeout')
try:
results = self.get_status('CreateLoadBalancerHTTPListener', params)
except Exception as ex:
msg, stack = ex.args
results.append("Create Load Balancer HTTP Listener Error:" + str(msg) + " " + str(stack))
return results
# C2C: Method added to create load balancer HTTPS listener
def create_load_balancer_https_listener(self, slb_id, listener, helth_checkup, stickness):
"""
:param listener:
:param helth_checkup:
:param stickness:
:return:
"""
params = {}
results = []
if listener:
self.build_list_params(params, slb_id, 'LoadBalancerId')
if 'load_balancer_port' in listener:
self.build_list_params(params, listener['load_balancer_port'], 'ListenerPort')
if 'bandwidth' in listener:
self.build_list_params(params, listener['bandwidth'], 'Bandwidth')
#if 'instance_protocol' in listener:
# self.build_list_params(params, listener['instance_protocol'], '')
if 'instance_port' in listener:
self.build_list_params(params, listener['instance_port'], 'BackendServerPort')
#if 'proxy_protocol' in listener:
# self.build_list_params(params, listener['proxy_protocol'], '')
if 'ssl_certificate_id' in listener:
self.build_list_params(params, listener['ssl_certificate_id'], 'ServerCertificateId')
if helth_checkup:
if 'health_check' in helth_checkup:
self.build_list_params(params, helth_checkup['health_check'], 'HealthCheck')
if 'ping_port' in helth_checkup:
self.build_list_params(params, helth_checkup['ping_port'], 'HealthCheckConnectPort')
if 'ping_path' in helth_checkup:
self.build_list_params(params, helth_checkup['ping_path'], 'HealthCheckURI')
if 'response_timeout' in helth_checkup:
self.build_list_params(params, helth_checkup['response_timeout'], 'HealthCheckTimeout')
if 'interval' in helth_checkup:
self.build_list_params(params, helth_checkup['interval'], 'HealthCheckInterval')
if 'unhealthy_threshold' in helth_checkup:
self.build_list_params(params, helth_checkup['unhealthy_threshold'], 'UnhealthyThreshold')
if 'healthy_threshold' in helth_checkup:
self.build_list_params(params, helth_checkup['healthy_threshold'], 'HealthyThreshold')
if stickness:
if 'enabled' in stickness:
self.build_list_params(params, stickness['enabled'], 'StickySession')
if 'type' in stickness:
self.build_list_params(params, stickness['type'], 'StickySessionType')
#if 'expiration' in stickness:
# self.build_list_params(params, stickness['expiration'], '')
if 'cookie' in stickness:
self.build_list_params(params, stickness['cookie'], 'Cookie')
if 'cookie_timeout' in stickness:
self.build_list_params(params, stickness['cookie_timeout'], 'CookieTimeout')
try:
results = self.get_status('CreateLoadBalancerHTTPSListener', params)
except Exception as ex:
msg, stack = ex.args
results.append("Create Load Balancer HTTPS Listener Error:" + str(msg) + " " + str(stack))
return results
# C2C: Method added to create load balancer TCP listener
def create_load_balancer_tcp_listener(self, slb_id, listener, helth_checkup):
"""
:param listener:
:param helth_checkup:
:return:
"""
params = {}
results = []
if listener:
self.build_list_params(params, slb_id, 'LoadBalancerId')
if 'load_balancer_port' in listener:
self.build_list_params(params, listener['load_balancer_port'], 'ListenerPort')
if 'bandwidth' in listener:
self.build_list_params(params, listener['bandwidth'], 'Bandwidth')
# if 'instance_protocol' in listener:
# self.build_list_params(params, listener['instance_protocol'], '')
if 'instance_port' in listener:
self.build_list_params(params, listener['instance_port'], 'BackendServerPort')
# if 'proxy_protocol' in listener:
# self.build_list_params(params, listener['proxy_protocol'], '')
if helth_checkup:
if 'ping_port' in helth_checkup:
self.build_list_params(params, helth_checkup['ping_port'], 'HealthCheckConnectPort')
if 'response_timeout' in helth_checkup:
self.build_list_params(params, helth_checkup['response_timeout'], 'HealthCheckConnectTimeout')
if 'interval' in helth_checkup:
self.build_list_params(params, helth_checkup['interval'], 'HealthCheckInterval')
if 'unhealthy_threshold' in helth_checkup:
self.build_list_params(params, helth_checkup['unhealthy_threshold'], 'UnhealthyThreshold')
if 'healthy_threshold' in helth_checkup:
self.build_list_params(params, helth_checkup['healthy_threshold'], 'HealthyThreshold')
try:
results = self.get_status('CreateLoadBalancerTCPListener', params)
except Exception as ex:
msg, stack = ex.args
results.append("Create Load Balancer TCP Listener Error:" + str(msg) + " " + str(stack))
return results
# C2C: Method added to create load balancer UDP listener
def create_load_balancer_udp_listener(self, slb_id, listener, helth_checkup):
"""
:param listener:
:param helth_checkup:
:return:
"""
params = {}
results = []
if listener:
self.build_list_params(params, slb_id, 'LoadBalancerId')
if 'load_balancer_port' in listener:
self.build_list_params(params, listener['load_balancer_port'], 'ListenerPort')
if 'bandwidth' in listener:
self.build_list_params(params, listener['bandwidth'], 'Bandwidth')
# if 'instance_protocol' in listener:
# self.build_list_params(params, listener['instance_protocol'], '')
if 'instance_port' in listener:
self.build_list_params(params, listener['instance_port'], 'BackendServerPort')
# if 'proxy_protocol' in listener:
# self.build_list_params(params, listener['proxy_protocol'], '')
if helth_checkup:
if 'ping_port' in helth_checkup:
self.build_list_params(params, helth_checkup['ping_port'], 'HealthCheckConnectPort')
if 'response_timeout' in helth_checkup:
self.build_list_params(params, helth_checkup['response_timeout'], 'HealthCheckConnectTimeout')
if 'interval' in helth_checkup:
self.build_list_params(params, helth_checkup['interval'], 'HealthCheckInterval')
if 'unhealthy_threshold' in helth_checkup:
self.build_list_params(params, helth_checkup['unhealthy_threshold'], 'UnhealthyThreshold')
if 'healthy_threshold' in helth_checkup:
self.build_list_params(params, helth_checkup['healthy_threshold'], 'HealthyThreshold')
try:
results = self.get_status('CreateLoadBalancerUDPListener', params)
except Exception as ex:
msg, stack = ex.args
results.append("Create Load Balancer UDP Listener Error:" + str(msg) + " " + str(stack))
return results
```
#### File: Jogesh-Click2Cloud/AliCloudAnsibleFootmark/__main__.py
```python
import footmark
#from footmark.ecs.connection import ECSConnection
def main():
print "Hello World!"
if __name__== "__main__":
main()
print "This is main Program"
#app=ECSConnection()
#app.run()
```
#### File: unit/ecs/test_instance.py
```python
from footmark.ecs.connection import ECSConnection
from tests.unit import ACSMockServiceTestCase
import json
DESCRIBE_INSTANCE = '''
{
"Instances": {
"Instance": [
{
"CreationTime": "2016-06-20T21:37Z",
"DeviceAvailable": true,
"EipAddress": {},
"ExpiredTime": "2016-10-22T16:00Z",
"HostName": "xiaozhu_test",
"ImageId": "centos6u5_64_40G_cloudinit_20160427.raw",
"InnerIpAddress": {
"IpAddress": [
"10.170.106.80"
]
},
"InstanceChargeType": "PostPaid",
"InstanceId": "i-94dehop6n",
"InstanceNetworkType": "classic",
"InstanceType": "ecs.s2.large",
"InternetChargeType": "PayByTraffic",
"InternetMaxBandwidthIn": -1,
"InternetMaxBandwidthOut": 1,
"IoOptimized": false,
"OperationLocks": {
"LockReason": []
},
"PublicIpAddress": {
"IpAddress": [
"192.168.127.12"
]
},
"RegionId": "cn-shenzhen",
"SecurityGroupIds": {
"SecurityGroupId": [
"sg-94kd0cyg0"
]
},
"SerialNumber": "51d1353b-22bf-4567-a176-8b3e12e43135",
"Status": "Running",
"Tags":{
"Tag":[
{
"TagValue":"1.20",
"TagKey":"xz_test"
},
{
"TagValue":"1.20",
"TagKey":"xz_test_2"
}
]
},
"VpcAttributes": {
"PrivateIpAddress": {
"IpAddress": []
}
},
"ZoneId": "cn-shenzhen-a"
}
]
},
"PageNumber": 1,
"PageSize": 10,
"RequestId": "14A07460-EBE7-47CA-9757-12CC4761D47A",
"TotalCount": 1
}
'''
MANAGE_INSTANCE = '''
{
"RequestId": "14A07460-EBE7-47CA-9757-12CC4761D47A",
}
'''
CREATE_INSTANCE = '''
{
"InstanceId":"i-2zeg0900kzwn7dpo7zrb",
"RequestId":"9206E7A7-BFD5-457F-9173-91CF4525DE21"
}
'''
MODIFY_INSTANCE= '''
{
"RequestId":"0C7EFCF3-1517-44CD-B61B-60FA49FEF04E"
}
'''
QUERYING_INSTANCE='''
{
"PageNumber": 1,
"InstanceStatuses":
{"InstanceStatus": [
{"Status": "Running", "InstanceId": "i-2zehcagr3vt06iyir7hc"},
{"Status": "Running", "InstanceId": "i-2zedup3d5p01daky1622"},
{"Status": "Stopped", "InstanceId": "i-2zei2zq55lx87st85x2j"},
{"Status": "Running", "InstanceId": "i-2zeaoq67u62vmkbo71o7"},
{"Status": "Running", "InstanceId": "i-2ze5wl5aeq8kbblmjsx1"}
]},
"TotalCount": 9,
"PageSize": 5,
"RequestId": "5D464158-D291-4C69-AA9E-84839A669B9D"
}
'''
JOIN_GROUP='''
{
"RequestId": "AF3991A3-5203-4F83-8FAD-FDC1253AF15D"
}
'''
LEAVE_GROUP='''
{
"RequestId": "AF3991A3-5203-4F83-8FAD-FDC1253AF15D"
}
'''
ATTACH_DISK='''
{
"RequestId": "AF3991A3-5203-4F83-8FAD-FDC1253AF15D"
}
'''
class TestDescribeInstances(ACSMockServiceTestCase):
connection_class = ECSConnection
def default_body(self):
return DESCRIBE_INSTANCE
def test_instance_attribute(self):
self.set_http_response(status_code=200, body=DESCRIBE_INSTANCE)
filters = {}
instance_ids = ["i-94dehop6n"]
tag_key = 'xz_test'
tag_value = '1.20'
filters['tag:' + tag_key] = tag_value
instances = self.service_connection.get_all_instances(instance_ids=instance_ids, filters=filters)
self.assertEqual(len(instances), 1)
instance = instances[0]
self.assertEqual(instance.id, 'i-94dehop6n')
print 'group_id:', instance.group_id
self.assertEqual(instance.group_id, 'sg-94kd0cyg0')
self.assertEqual(instance.public_ip, '192.168.127.12')
self.assertEqual(instance.tags, {"xz_test": "1.20", "xz_test_2": "1.20"})
self.assertFalse(instance.io_optimized)
self.assertEqual(instance.status, 'running')
self.assertEqual(instance.image_id, 'centos6u5_64_40G_cloudinit_20160427.raw')
return instances
def test_manage_instances(self):
self.set_http_response(status_code=200, body=MANAGE_INSTANCE)
instances = self.test_instance_attribute()
for inst in instances:
if inst.state == 'running':
inst.stop()
elif inst.state == 'stopped':
inst.start()
else:
inst.reboot()
class TestManageInstances(ACSMockServiceTestCase):
connection_class = ECSConnection
instance_ids = ['i-94dehop6n', 'i-95dertop6m']
def default_body(self):
return MANAGE_INSTANCE
def test_start_instance(self):
self.set_http_response(status_code=200)
result = self.service_connection.start_instances(instance_ids=self.instance_ids)
self.assertEqual(len(result), len(self.instance_ids))
self.assertIn(result[0], self.instance_ids)
def test_stop_instance(self):
self.set_http_response(status_code=200)
result = self.service_connection.stop_instances(instance_ids=self.instance_ids, force=True)
self.assertEqual(len(result), len(self.instance_ids))
self.assertIn(result[0], self.instance_ids)
def test_reboot_instance(self):
self.set_http_response(status_code=200)
result = self.service_connection.reboot_instances(instance_ids=self.instance_ids, force=True)
self.assertEqual(len(result), len(self.instance_ids))
self.assertIn(result[0], self.instance_ids)
def test_terminate_instance(self):
self.set_http_response(status_code=200)
result = self.service_connection.terminate_instances(instance_ids=self.instance_ids, force=True)
self.assertEqual(len(result), len(self.instance_ids))
self.assertIn(result[0], self.instance_ids)
# C2C : Unit Test For CreateInstance Method
class TestCreateInstance(ACSMockServiceTestCase):
connection_class = ECSConnection
acs_access_key_id = "<KEY>"
acs_secret_access_key = "fqbuZIKPxOdu36yhFvaBtihNqD2qQ2"
region_id = "cn-beijing"
image_id = "ubuntu1404_64_40G_cloudinit_20160727.raw"
instance_type = "ecs.n1.small"
group_id = "sg-25y6ag32b"
zone_id = "cn-beijing-b"
io_optimized = "optimized"
instance_name = "MyInstance"
description = None
internet_data = {
'charge_type': 'PayByBandwidth',
'max_bandwidth_in': 200,
'max_bandwidth_out': 0
}
host_name = None
password = <PASSWORD>
system_disk = {"disk_category": "cloud_efficiency", "disk_size": 50 }
volumes = [
{
"device_category": "cloud_efficiency",
"device_size": 20,
"device_name": "volume1",
"device_description": "volume 1 description comes here"
},
{
"device_category": "cloud_efficiency",
"device_size": 20,
"device_name": "volume2",
"device_description": "volume 2 description comes here"
}
]
vswitch_id = None
instance_tags = [
{
"tag_key": "create_test_1",
"tag_value": "0.01"
},
{
"tag_key": "create_test_2",
"tag_value": "0.02"
}
]
allocate_public_ip = True
bind_eip = False
instance_charge_type = None
period = None
auto_renew = False
ids = None
count = 1
def default_body(self):
return CREATE_INSTANCE
def test_create_instance(self):
self.set_http_response(status_code=200)
result = self.service_connection.create_instance(region_id=self.region_id, image_id=self.image_id,
instance_type=self.instance_type, group_id=self.group_id,
zone_id=self.zone_id, instance_name=self.instance_name,
description=self.description, internet_data=self.internet_data,
host_name=self.host_name, password=self.password,
io_optimized=self.io_optimized, system_disk=self.system_disk,
volumes=self.volumes, vswitch_id=self.vswitch_id,
instance_tags=self.instance_tags,
allocate_public_ip=self.allocate_public_ip,
bind_eip=self.bind_eip, count=self.count,
instance_charge_type=self.instance_charge_type,
period=self.period, auto_renew=self.auto_renew, ids=self.ids)
self.assertEqual(len(result), self.count)
self.assertIn(result[0], "i-2zeg0900kzwn7dpo7zrb")
class TestModifyInstance(ACSMockServiceTestCase):
connection_class = ECSConnection
attributes = [
{
"id": "i-2zebgzk74po3gx1dwvuo",
"name": "new_once_again",
"description": "volumedecsription",
"password": "<PASSWORD>",
"host_name": "hostingAdmin"
},
{
"id": "i-2zeaoq67u62vmkbo71o7",
"host_name": "adminhostadmin"
}
]
def default_body(self):
return MODIFY_INSTANCE
def test_modify_instance(self):
self.set_http_response(status_code=200)
result = self.service_connection.modify_instance(attributes=self.attributes)
self.assertEqual(len(result), len(self.attributes))
self.assertIn(result[0], "success")
class TestQueryingInstance(ACSMockServiceTestCase):
connection_class = ECSConnection
region_id="cn-beijing"
page_number=1
page_size=5
def default_body(self):
return QUERYING_INSTANCE
def test_querying_instance(self):
self.set_http_response(status_code=200)
result = self.service_connection.querying_instance(region_id=self.region_id, zone_id=None,
page_number=self.page_number,
page_size=self.page_size)
self.assertEqual(result[u'PageNumber'], self.page_number)
self.assertEqual(result[u'PageSize'], self.page_size)
class TestJoinSecGrp(ACSMockServiceTestCase):
connection_class = ECSConnection
acs_access_key = '<KEY>'
acs_secret_access_key = '<KEY>'
instance_ids = ["i-j6c5txh3q0wivxt5m807"]
group_id = 'sg-j6c34iujuqbw29zpd53u'
region = 'cn-hongkong'
state = 'join'
def default_body(self):
return JOIN_GROUP
def test_join_grp(self):
self.set_http_response(status_code=200)
result = self.service_connection.join_security_group(instance_id = self.instance_ids, security_group_id = self.group_id)
###self.assertEqual(len(result), len(self.attributes))
self.assertEqual(result[0], "success")
class TestLeaveSecGrp(ACSMockServiceTestCase):
connection_class = ECSConnection
acs_access_key = '<KEY>'
acs_secret_access_key = '<KEY>'
instance_ids = ["i-j6c5txh3q0wivxt5m807"]
group_id = 'sg-j6c34iujuqbw29zpd53u'
region = 'cn-hongkong'
state = 'remove'
def default_body(self):
return LEAVE_GROUP
def test_leave_grp(self):
self.set_http_response(status_code=200)
result = self.service_connection.leave_security_group(instance_id = self.instance_ids, security_group_id = self.group_id)
###self.assertEqual(len(result), len(self.attributes))
self.assertEqual(result[0], "success")
class TestAttachDisk(ACSMockServiceTestCase):
connection_class = ECSConnection
acs_access_key = '<KEY>'
acs_secret_access_key = '<KEY>'
instance_ids = ["i-j6c5txh3q0wivxt5m807"]
disk_id = 'd-j6cc9ssgxbkjdf55w8p7'
region = 'cn-hongkong'
device = None
delete_with_instance = None
state = 'attach'
def default_body(self):
return ATTACH_DISK
def attach_disk(self):
self.set_http_response(status_code=200)
result = self.service_connection.attach_disk_to_instance(disk_id = self.disk_id, instance_id = self.instance_ids,region_id = self.region, device = self.device,delete_with_instance = self.delete_with_instance)
###self.assertEqual(len(result), len(self.attributes))
self.assertEqual(result[0], "success")
``` |
{
"source": "joggee-fr/dbus-docker",
"score": 2
} |
#### File: dbus-docker/dbus-service/service.py
```python
from gi.repository import GObject
import dbus
import dbus.service
from gi.repository import GLib
from dbus.mainloop.glib import DBusGMainLoop
DBusGMainLoop(set_as_default=True)
OPATH = "/com/example/HelloWorld"
IFACE = "com.example.HelloWorld"
BUS_NAME = "com.example.HelloWorld"
class Example(dbus.service.Object):
def __init__(self):
bus = dbus.SessionBus()
bus.request_name(BUS_NAME)
bus_name = dbus.service.BusName(BUS_NAME, bus=bus)
dbus.service.Object.__init__(self, bus_name, OPATH)
@dbus.service.method(dbus_interface=IFACE,
in_signature="", out_signature="s")
def SayHello(self):
print("SayHello method called")
return "Hello World!"
if __name__ == "__main__":
a = Example()
loop = GLib.MainLoop()
print("Dbus service started")
loop.run()
``` |
{
"source": "jogi-k/tea5767",
"score": 2
} |
#### File: jogi-k/tea5767/radio_server.py
```python
import sys
import http
from http import server
import os
import glob
import time
import datetime
import tea5767stationscanner
import websocket
import socket
import sys
class MyRequestHandler(http.server.SimpleHTTPRequestHandler):
tea = None
def __init__(self, request, address, server):
if(self.tea==None):
self.tea = rr
self.tea.on()
http.server.SimpleHTTPRequestHandler.__init__(self, request, address, server)
def do_GET(self):
if self.path == '/':
self.path = 'index.html'
if self.path == '/searchup':
self.tea.scan(1)
print('search up finished')
self.send_response(200)
self.send_header('Content-type','text/html')
#self.send_header("Content-type", "application/json")
#self.send_header("Content-length", 2)
self.end_headers()
self.wfile.write(bytes("ok","UTF-8"))
self.wfile.flush()
return
if self.path == '/searchdown':
self.tea.scan(0)
print('search down finished')
self.send_response(200)
self.send_header('Content-type','text/html')
#self.send_header("Content-type", "application/json")
#self.send_header("Content-length", 2)
self.end_headers()
self.wfile.write(bytes("ok","UTF-8"))
self.wfile.flush()
return
if self.path == '/off':
self.tea.off()
print('radio mute')
self.send_response(200)
self.send_header('Content-type','text/html')
#self.send_header("Content-type", "application/json")
#self.send_header("Content-length", 2)
self.end_headers()
self.wfile.write(bytes("ok","UTF-8"))
self.wfile.flush()
return
if self.path == '/info':
resp = self.tea.info()
resp = "{" + '"freq":' + resp['freq'] + ',"level":' + resp['level']+',"stereo":\"'+resp['stereo'] + "\"}"
print(resp)
self.send_response(200)
self.send_header("Content-type", "application/json")
self.send_header("Content-length", len(resp))
self.end_headers()
self.wfile.write(bytes(resp, 'UTF-8'))
return
return http.server.SimpleHTTPRequestHandler.do_GET(self)
rr = tea5767stationscanner.tea5767()
HandlerClass = MyRequestHandler
ServerClass = http.server.HTTPServer
Protocol = "HTTP/1.0"
if sys.argv[1:]:
port = int(sys.argv[1])
else:
port = 8888
server_address = ('0.0.0.0', port)
HandlerClass.protocol_version = Protocol
httpd = ServerClass(server_address, HandlerClass)
WS_PORT = 9876
#ws = websocket.Websocket(WS_PORT, driver)
#Thread(target=ws.serve_forever, args=(stop,)).start()
try:
sa = httpd.socket.getsockname()
print ("Serving HTTP on ", sa[0], "port", sa[1])
httpd.serve_forever()
except:
print("Program finished")
rr.off()
```
#### File: jogi-k/tea5767/tea5767_tornado_server.py
```python
import tornado.httpserver
import tornado.websocket
import tornado.ioloop
import tornado.web
import tea5767stationscanner
class IndexHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
def get(self):
self.render("tea5767_tornado.html")
class WSHandler(tornado.websocket.WebSocketHandler):
controller = None
def check_origin(self, origin):
return True
def open(self):
print ("connecting...")
try:
#self.controller = DeviceController()
#self.write_message("Hello world!")
self.controller=tea5767stationscanner.tea5767()
self.controller.on()
data=self.controller.info()
self.write_message(data)
# self.controller.prepareSocket()
except Exception as a:
print(a)
def on_message(self, message):
print("Command:", message)
data=""
try:
if(message=="up"):
self.controller.scan(1)
elif(message=="down"):
self.controller.scan(0)
elif(message=="off"):
data=self.controller.off()
elif(message=="mute"):
data=self.controller.mute()
data=self.controller.info()
if(message=="off"):
data=self.controller.off()
self.write_message(data)
except Exception as a:
print("Error: ", a)
def on_close(self):
print ("closing sockets")
self.controller =""
static_path = "/home/pi/Projects/tea5767/"
favicon_path =""
application = tornado.web.Application([
(r'/favicon.ico', tornado.web.StaticFileHandler, {'path': favicon_path}),
(r"/images/(.*)",tornado.web.StaticFileHandler, {"path": "./images"},),
(r'/static/(.*)', tornado.web.StaticFileHandler, {'path': static_path}),
(r'/', IndexHandler),
(r"/ws", WSHandler),
])
if __name__ == "__main__":
http_server = tornado.httpserver.HTTPServer(application)
print ("Waiting client connection via browser port 8888")
http_server.listen(8888)
tornado.ioloop.IOLoop.instance().start()
``` |
{
"source": "jogipraveen/test-automation",
"score": 3
} |
#### File: jogipraveen/test-automation/fetch_testng.py
```python
import sys
import re
import argparse
from xml.dom import minidom
def getargs():
"""
Supports the following command-line arguments listed below.
testng_file - testng file name
url - bitbucket/stash url
"""
parser = argparse.ArgumentParser(description='fetch all failed functional tests')
parser.add_argument('testng_file', help='testng xml file name')
parser.add_argument('url', help='bitbucket/stash url')
args = parser.parse_args()
return args
def fetch_testng(testng_file, url):
""" create empty list """
failed_tests = []
""" create an empty list for failed config """
failed_config = []
""" parse xml file """
xmldoc = minidom.parse(testng_file)
testng = xmldoc.getElementsByTagName("testng-results")[0]
test = testng.getElementsByTagName("test")[0]
test_class = test.getElementsByTagName("class")
""" iterate through all classes """
for test_classes in test_class:
test_method=test_classes.getElementsByTagName("test-method")
for test_methods in test_method:
signature = test_methods.getAttribute("signature")
status = test_methods.getAttribute("status")
name = test_methods.getAttribute("name")
config = test_methods.getAttribute("is-config")
""" Check status of test and configuration """
if status == "FAIL" and config == "true":
""" Get all failed configs """
failed_config.append(signature)
elif status == "FAIL":
""" Get all failed tests """
failed_tests.append(signature)
""" find tests failed on retry """
list_tests = set([x for x in failed_tests if failed_tests.count(x) > 2])
for i in list_tests: # print all failed tests in the retry
"""
Apply some regular expression to find test_name and method and package
"""
lst1 = i.split('(')
test_name1 = lst1[0]
lst2 = lst1[1].split('instance:')[1].split('@')[0]
test_group1 = lst2.split('.')[-1]
package1 = re.sub(r'.[a-zA-Z]*$', "", lst2)
""" URL for the failed test """
url1 = url + "testngreports/" + package1 + "/" + test_group1 + "/" + test_name1
""" failed test """
test_case1=package1 + "." + test_group1 + "." + test_name1
"""
This is [an example](http://www.example.com/) inline link - example to insert a link for a text in stash
"""
print("[" + test_case1 + "](" + url1 + ")")
for j in failed_config: # print all failed config
"""
Apply some regular expression to find test_name and method and package
"""
lst3 = j.split('(')
test_name2 = lst3[0]
lst4 = lst3[1].split('instance:')[1].split('@')[0]
test_group2 = lst4.split('.')[-1]
package2 = re.sub(r'.[a-zA-Z]*$', "", lst4)
""" URL for the failed test """
url2 = url + "testngreports/" + package2 + "/" + test_group2 + "/" + test_name2
""" failed test config """
test_case2 = package2 + "." + test_group2 + "." + test_name2
print("[" + test_case2 + "](" + url2 + ")")
def main():
""" gather all command line arguments """
args = getargs()
testng_file = args.testng_file
url = args.url
fetch_testng(testng_file, url)
if __name__ == '__main__':
main()
``` |
{
"source": "Jogius/ttd-server",
"score": 2
} |
#### File: src/chatbot/start.py
```python
from chatterbot import ChatBot
bot = ChatBot(
'ttd',
database_uri=f'sqlite:///data/db/chatbot.sqlite3'
)
# SocketIO Initialization
from aiohttp import web
import json
async def getResponse(req):
body= await req.json()
response = {
'response': bot.get_response(body['message']).__str__()
}
print(body['message'], ' --> ', response['response'])
return web.Response(body=json.dumps(response))
app = web.Application()
app.add_routes([web.post('/', getResponse)])
web.run_app(app, host='localhost', port=1215)
``` |
{
"source": "JogleLew/ShortURL",
"score": 3
} |
#### File: ShortURL/shorturl/models.py
```python
import sqlite3
import web
from libs import short_url
class DB(object):
def __init__(self, db_kwargs):
self.db = web.database(**db_kwargs)
def exist_expand(self, long_url):
"""检查数据库中是否已有相关记录,有则返回短 URL
"""
result = self.db.where(table='url', what='shorten',
expand=long_url)
if result:
return result[0]
def add_url(self, long_url):
"""添加 URL,返回短 URL
"""
id_ = self.db.insert(tablename='url', seqname='id', shorten='',
expand=long_url)
shorten = short_url.encode_url(id_)
self.db.update(tables='url', shorten=shorten,
where='id=$id_', vars=locals())
return web.storage(shorten=shorten)
def get_expand(self, shorten):
"""根据短 URL 返回原始 URL
"""
result = self.db.where(table='url', what='expand',
shorten=shorten)
if result:
return result[0]
``` |
{
"source": "jogloran/advent-of-code-2020",
"score": 2
} |
#### File: jogloran/advent-of-code-2020/d11.py
```python
import numpy as np
from scipy.signal import convolve2d
def munge(line): return ' '.join('1.0' if c == 'L' else '0.0' for c in line.rstrip())
d = np.loadtxt(munge(line) for line in open('d11.txt')).astype(np.int)
floor = d == 0.0
kernel = np.array([[1, 1, 1], [1, 0, 1], [1, 1, 1]])
while True:
neighbours = convolve2d(d, kernel, mode='same')
d_ = np.copy(d)
d_[neighbours >= 4] = 0
d_[neighbours == 0] = 1
d_[floor] = 0
if np.all(d == d_):
print('stable')
break
d = d_
print(np.sum(d))
```
#### File: jogloran/advent-of-code-2020/d15.py
```python
start = [8,13,1,0,18,9]
last_said = None
history = {}
def say(num, turn_no):
print(f'turn {i}\tsay {num}')
for i in range(30000000):
if i < len(start):
num = start[i]
else:
# print(f'turn {i} last said {last_said} {history}')
if last_said in history:
# print('in')
num = i - history[last_said] - 1
else:
num = 0
# print(history)
if last_said is not None:
history[last_said] = i - 1
# say(num, i)
if i % 1000000 == 0: print(i, num)
last_said = num
print(i, num)
```
#### File: jogloran/advent-of-code-2020/d1.py
```python
import sys
items = '''1082
1770
1104
1180
1939
1952
1330
1569
1120
1281
1144
1091
2008
1967
1863
1819
1813
1986
1099
1860
1686
1063
1620
1107
1095
951
1897
1246
1264
1562
1151
1980
1942
1416
1170
1258
1075
1882
1329
2003
66
1249
1302
1221
1828
1154
1662
1103
1879
1205
1936
1472
1816
1071
1237
1467
1919
942
74
1178
1949
1947
1613
1931
1332
24
1987
1796
1256
1981
1158
1114
2004
1696
1775
1718
1102
1998
1540
1129
1870
1841
1582
1173
1417
1604
1214
1941
1440
1381
1149
1111
1766
1747
1940
960
1449
1171
1584
1926
1065
1832
1633
1245
1889
1906
1198
1959
1340
1951
1347
1097
1660
1957
1134
1730
1105
1124
1073
1679
1397
1963
1136
1983
1806
1964
1821
1997
1254
1823
1092
1119
2000
1089
1933
1478
1923
1576
1571
415
1875
1937
1112
1831
1969
1506
1929
1960
1322
110
1141
1080
1603
1126
1036
1762
1904
1122
1988
1962
1958
1953
1068
1188
1483
1518
1471
1961
1217
1559
1789
1523
2007
1093
1745
1955
1948
1474
1628
691
1398
1876
1650
1838
1950
1088
1697
1977
1364
1966
1945
1975
1606
1974
1847
1570
1148
1599
1772
1970'''.split('\n')
items = list(map(int, items))
def find2(target):
h = {}
for i, e in enumerate(items):
if e in h:
return e * items[h[e]]
h[target - e] = i
print(find2(2020))
def find3(target):
for i, e in enumerate(items):
t = find2(target - e)
if t:
return t * e
print(find3(2020))
```
#### File: jogloran/advent-of-code-2020/d20r2.py
```python
from more_itertools import split_at, distinct_combinations
import numpy as np
np.core.arrayprint._line_width = 160
from collections import Counter
def convert(row):
return [1 if c == "#" else 0 for c in row]
def to_sig(row):
return row.dot(2**np.arange(row.size)[::-1])
signatures = {}
signatures_f = {}
all_sigs = Counter()
class Tile:
def __init__(self, tile_id, bitmap, ori):
self.tile_id = tile_id
self.bitmap = bitmap
self.ori = ori
def choices(self):
yield self.bitmap
yield np.rot90(self.bitmap)
yield np.rot90(self.bitmap, 2)
yield np.rot90(self.bitmap, 3)
yield np.fliplr(self.bitmap)
yield np.rot90(np.fliplr(self.bitmap))
yield np.rot90(np.fliplr(self.bitmap), 2)
yield np.rot90(np.fliplr(self.bitmap), 3)
def __repr__(self): return '% 4s(%d)' % (self.tile_id, self.ori)
from collections import Counter
all_sigs = Counter()
tiles = {}
for grp in split_at(map(str.rstrip, open("d20.txt")), lambda e: e == ""):
tile_id = int(grp[0][-5:-1])
bitmap = np.array([convert(row) for row in grp[1:]])
tiles[tile_id] = Tile(tile_id, bitmap, 0)
corners = [
(3539, 0), # top left
(2693, 2), # top right
(1549, 0), # bottom right
(3709, 0), # bottom left
]
UP, RIGHT = 0, 1
def compatible(a1, a2, dir):
if dir == RIGHT:
return np.all(a1[:, -1] == a2[:, 0])
elif dir == UP:
return np.all(a1[-1, :] == a2[0, :])
def find_compatible(left_tile, dir=RIGHT):
for tile in tiles.values():
if tile.tile_id == left_tile.tile_id: continue
for j, choice in enumerate(tile.choices()):
if compatible(left_tile.bitmap, choice, dir=dir):
# print(f'{left_tile.tile_id} {left_tile.bitmap[:, -1]} compatible with {tile.tile_id} {choice[:, 0]}')
yield choice, tile.tile_id, j
# return None, -1
solution = np.empty((12, 12), dtype=np.object)
solution[0, 0] = tiles[3539]
solution[-1, 0] = tiles[3709]
# solution[0, -1] = tiles[2693].rotate(2)
# solution[-1, -1] = tiles[1549]
disallowed = {3539, 3709}
i = 1
for i in range(1, 12):
for tile in tiles.values():
if tile.tile_id in disallowed: continue
compats = list(find_compatible(solution[0, i-1]))
if compats:
found_compatible, tile_id, j = compats[0]
solution[0, i] = Tile(tile_id, found_compatible, j)
disallowed.add(tile_id)
break
for j in range(1, 12):
for i in range(0, 12):
for tile in tiles.values():
if tile.tile_id in disallowed: continue
compats = list(find_compatible(solution[j-1, i], dir=UP))
if compats:
found_compatible, tile_id, k = compats[0]
solution[j, i] = Tile(tile_id, found_compatible, k)
disallowed.add(tile_id)
break
print(np.array2string(solution, max_line_width=np.inf))
solution_matrix = np.stack((e.bitmap for e in solution.ravel())) # (144, 10, 10)
unframed = solution_matrix[:, 1:-1, 1:-1].reshape((12, 12, 8, 8))
print(unframed.shape) # (12, 12, 8, 8)
image = np.zeros((96, 96), dtype=np.int)
for row in range(12):
for col in range(12):
image[8*row:8*(row+1), 8*col:8*(col+1)] = unframed[row, col]
np.save('image.npy', image)
# import matplotlib.pyplot as plt
# plt.figure()
# f, ar = plt.subplots(2)
# ar[0].imshow(image)
# ar[1].imshow(solution[0, -1].bitmap)
# plt.show()
```
#### File: jogloran/advent-of-code-2020/d21.py
```python
import re
lines = map(str.rstrip, open("d21.txt"))
id = 0
def id_maker():
global id
id += 1
return id - 1
from collections import defaultdict, Counter
id_map = defaultdict(id_maker)
allergen_by_count = defaultdict(list)
allergen_combo_count = Counter()
all_allergens = set()
ingredient_lists = defaultdict(list)
for line in lines:
ingredients, allergens = line[:-1].split(" (contains ")
ingredients = set(id_map[i] for i in ingredients.split())
allergens = set(allergens.replace('peanuts', 'pean').split(", "))
allergen_combo_count[tuple(sorted(allergens))] += 1
all_allergens |= allergens
ingredient_lists["+".join(tuple(sorted(allergens)))].append(ingredients)
print(allergens, ingredients)
if len(allergens) == 1:
allergen_by_count[next(iter(allergens))].append(ingredients)
print(allergen_combo_count.most_common())
for allergen in all_allergens:
candidates = [l for (allergen_spec, l) in ingredient_lists.items() if allergen in allergen_spec]
all = set.intersection(*[set.intersection(*c) for c in candidates])
print(allergen, all)
# worked out manually based on:
# fish {83, 46}
# nuts {83, 44}
# eggs {9, 50}
# dairy {40, 44, 46}
# soy {44}
# pean {9, 83, 59}
# wheat {9, 83, 44}
# sesame {9, 75, 44, 59}
c = 0
for l in ingredient_lists.values():
for e in l:
s = set(e) - {44,83,50,59,75,9,40,46}
c += len(s)
print(c)
# worked out manually
rev_map = {v:k for (k,v) in id_map.items()}
print(','.join(rev_map[c] for c in [40,50,46,83,59,75,44,9]))
```
#### File: jogloran/advent-of-code-2020/d23b.py
```python
cups = list('389125467')
cups = list(map(int, cups))# + list(range(len(cups), 1000000 + 1))
max_index = max(cups)
cur_index = -1
def remove_slice(L, start, end):
result = []
N = len(L)
for i in range(start, end):
result.append(L[i % N])
if end > N:
del L[start:]
del L[:(end % N)]
else:
del L[start:end]
return result
def get_dest(L, cur):
# max_index = max(L)
cur -= 1
if cur < 0: cur = max_index
while True:
if cur in L: return L.index(cur)
cur -= 1
if cur < 0: cur = max_index
def insert_at(cups, cur, pickup):
cups[cur:cur] = pickup
for round in range(1000000):
if cur_index != -1:
cur_index = (cups.index(cur) + 1) % len(cups)
else:
cur_index = 0
cur = cups[cur_index]
pickup = remove_slice(cups, cur_index + 1, cur_index + 4)
print('after remove:', cups)
dest_idx = get_dest(cups, cur)
# print(str(cur_index) + " ", end='')
# cur_index is periodic with cycle length 242
insert_at(cups, dest_idx + 1, pickup)
print(f'{round:4} cur [{cur_index}] = {cur}, dest [{dest_idx}] {cups}')
print()
cup1_idx = cups.index(1)
print(cups[(cup1_idx - 1) % len(cups)])
print(cups[(cup1_idx - 2) % len(cups)])
```
#### File: jogloran/advent-of-code-2020/d8.py
```python
from bisect import insort, bisect_left
def index(a, x):
'Locate the leftmost value exactly equal to x'
i = bisect_left(a, x)
if i != len(a) and a[i] == x:
return i
return -1
rom = list(map(str.rstrip, open('d8.txt')))
pc = 0
a = 0
visited = []
while True:
if index(visited, pc) != -1:
print(a)
break
insort(visited, pc)
op, arg = rom[pc].split(' '); arg = int(arg)
if op == 'acc':
a += arg
elif op == 'jmp':
pc += arg
continue
pc += 1
``` |
{
"source": "jognsmith/blc.py",
"score": 2
} |
#### File: jognsmith/blc.py/setup.py
```python
from setuptools import setup
if __name__ != "__main__":
import sys
sys.exit(1)
def long_desc(ext="md"):
# Markdown because I CBF to do reST.
with open('README.{0}'.format(ext), 'rb') as f:
return f.read()
kw = {
"name": "blc.py",
"version": "1.0.0",
"description": "An easy way to interact with the BlooCoin environment.",
"long_description": long_desc(),
"url": "https://github.com/jognsmith/blc.py",
"author": "<NAME>",
"author_email": "<EMAIL>",
"license": "MIT",
"packages": ["blcpy"],
"zip_safe": False,
"keywords": "bloocoin wrapper client",
"classifiers": [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Programming Language :: Python :: 2"
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
]
}
if __name__ == "__main__":
setup(**kw)
``` |
{
"source": "jogobom/PyGameLife",
"score": 3
} |
#### File: jogobom/PyGameLife/cell.py
```python
import pygame
class Cell:
def __init__(self, x, y, alive):
self.x = x
self.y = y
self.alive = alive
self.neighbours = []
def init_neighbours(self, x, y, width, height, population):
if y > 0:
if x > 0:
self.neighbours.append(population[(width * (self.y - 1)) + self.x - 1])
self.neighbours.append(population[(width * (self.y - 1)) + self.x])
if x < width - 1:
self.neighbours.append(population[(width * (self.y - 1)) + self.x + 1])
if y < height - 1:
if x > 0:
self.neighbours.append(population[(width * (self.y + 1)) + self.x - 1])
self.neighbours.append(population[(width * (self.y + 1)) + self.x])
if x < width - 1:
self.neighbours.append(population[(width * (self.y + 1)) + self.x + 1])
if x > 0:
self.neighbours.append(population[(width * self.y) + self.x - 1])
self.neighbours.append(population[(width * self.y) + self.x])
if x < width - 1:
self.neighbours.append(population[(width * self.y) + self.x + 1])
def update(self):
live_neighbours = len([n for n in self.neighbours if n.alive])
if self.alive:
if live_neighbours < 2 or live_neighbours > 3:
self.alive = False
else:
if live_neighbours == 3:
self.alive = True
def draw(self, screen):
if self.alive:
pygame.draw.rect(screen, (160, 255, 20), (self.x * 4, self.y * 4, 4, 4))
``` |
{
"source": "jogoding/pyhouse",
"score": 3
} |
#### File: jogoding/pyhouse/njhouse.py
```python
import urllib
import urllib2
import re
import thread
import time
#
class NjHouse:
def __init__(self):
self.page_index = 1
self.user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
self.headers = {'User-Agent': self.user_agent}
self.stories = [] # 存放段子的变量,每一个元素是每一页的段子们
self.enable = False # 存放程序是否继续运行的变量
# 传入某一页的索引获得页面代码
def get_page(self, page_index):
try:
url = 'http://www.qiushibaike.com/hot/page/' + str(page_index)
request = urllib2.Request(url, headers=self.headers)
response = urllib2.urlopen(request)
# 存放程序是否继续运行的变量
page_code = response.read().decode('utf-8')
return page_code
except urllib2.URLError, e:
if hasattr(e, "reason"):
print u"连接糗事百科失败,错误原因", e.reason
return None
# 传入某一页代码,返回本页不带图片的段子列表
def get_page_items(self, page_index):
page_code = self.get_page(page_index)
if not page_code:
print "页面加载失败...."
return None
pattern = re.compile(
r'<div.*?article.*?<div.*?author.*?<h2>(.*?)</h2>.*?<div.*?content.*?<span>(.*?)</span>' +
r'(.*?)class="stats">.*?number">(.*?)</i>.*?class="dash".*?"number">(.*?)</i>' +
r'(.*?)?', re.S)
items = re.findall(pattern, page_code)
page_stories = []
for item in items:
# item[0] author; item[1]content; item[2] image, if any;
# item[3] ; item[4] ; item[5] comment, if any
img_url = ""
have_image = re.search("img", item[2])
if have_image:
img_url = re.search()
comment = ""
have_cmt = re.search("", item[5])
if have_cmt:
comment = re.search()
patten_br = re.compile(r'<br/>')
content = re.sub(patten_br, "\n", item[1])
page_stories.append(item[0].strip(), content.strip(), img_url, comment)
return page_stories
# 加载并提取页面的内容,加入到列表中
def load_page(self):
if self.enable:
if len(self.stories) < 2:
page_stories = self.get_page_items(self.page_index)
if page_stories:
self.stories.append(page_stories)
self.page_index += 1
# 调用该方法,每次敲回车打印输出一个段子
def get_one_story(self, page_stories, page):
for story in page_stories:
input_ = raw_input()
self.load_page()
if input_ == "Q":
self.enable = False
return
print u"第%d页\t发布人:%s\t发布时间:%s\t赞:%s\n%s" % (page, story[0], story[2], story[3], story[1])
# 开始方法
def start(self):
print u"正在读取糗事百科,按回车查看新段子,Q退出"
self.enable = True
self.load_page()
current = 0
#
while self.enable:
if len(self.stories) > 0:
page_stories = self.stories[0]
current += 1
del self.stories[0]
self.get_one_story(page_stories, current)
spider = NjHouse()
spider.start()
``` |
{
"source": "jogoding/search-domain",
"score": 3
} |
#### File: jogoding/search-domain/search_domain.py
```python
import requests
import xml.etree.ElementTree as ET
import time
sess = requests.Session()
sess.headers[
'User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36(KHTML, like Gecko) Chrome/50.0.2661.87 Safari/537.36'
def verify(length, typ, suffix):
num = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
char = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u',
'v', 'w', 'x', 'y', 'z']
pinyin = ["a", "ai", "an", "ang", "ao", "ba", "bai", "ban", "bang", "bao", "bei", "ben", "beng", "bi", "bian",
"biao", "bie", "bin", "bing", "bo", "bu", "ca", "cai", "can", "cang", "cao", "ce", "ceng", "cha", "chai",
"chan", "chang", "chao", "che", "chen", "cheng", "chi", "chong", "chou", "chu", "chuai", "chuan",
"chuang", "chui", "chun", "chuo", "ci", "cong", "cou", "cu", "", "cuan", "cui", "cun", "cuo", "da", "dai",
"dan", "dang", "dao", "de", "deng", "di", "dian", "diao", "die", "ding", "diu", "dong", "dou", "du",
"duan", "dui", "dun", "duo", "e", "en", "er", "fa", "fan", "fang", "fei", "fen", "feng", "fo", "fou",
"fu", "ga", "gai", "gan", "gang", "gao", "ge", "gei", "gen", "geng", "gong", "gou", "gu", "gua", "guai",
"guan", "guang", "gui", "gun", "guo", "ha", "hai", "han", "hang", "hao", "he", "hei", "hen", "heng",
"hong", "hou", "hu", "hua", "huai", "huan", "huang", "hui", "hun", "huo", "ji", "jia", "jian", "jiang",
"jiao", "jie", "jin", "jing", "jiong", "jiu", "ju", "juan", "jue", "jun", "ka", "kai", "kan", "kang",
"kao", "ke", "ken", "keng", "kong", "kou", "ku", "kua", "kuai", "kuan", "kuang", "kui", "kun", "kuo",
"la", "lai", "lan", "lang", "lao", "le", "lei", "leng", "li", "lia", "lian", "liang", "liao", "lie",
"lin", "ling", "liu", "long", "lou", "lu", "lv", "luan", "lue", "lun", "luo", "ma", "mai", "man", "mang",
"mao", "me", "mei", "men", "meng", "mi", "mian", "miao", "mie", "min", "ming", "miu", "mo", "mou", "mu",
"na", "nai", "nan", "nang", "nao", "ne", "nei", "nen", "neng", "ni", "nian", "niang", "niao", "nie",
"nin", "ning", "niu", "nong", "nu", "nv", "nuan", "nue", "nuo", "o", "ou", "pa", "pai", "pan", "pang",
"pao", "pei", "pen", "peng", "pi", "pian", "piao", "pie", "pin", "ping", "po", "pu", "qi", "qia", "qian",
"qiang", "qiao", "qie", "qin", "qing", "qiong", "qiu", "qu", "quan", "que", "qun", "ran", "rang", "rao",
"re", "ren", "reng", "ri", "rong", "rou", "ru", "ruan", "rui", "run", "ruo", "sa", "sai", "san", "sang",
"sao", "se", "sen", "seng", "sha", "shai", "shan", "shang", "shao", "she", "shen", "sheng", "shi", "shou",
"shu", "shua", "shuai", "shuan", "shuang", "shui", "shun", "shuo", "si", "song", "sou", "su", "suan",
"sui", "sun", "suo", "ta", "tai", "tan", "tang", "tao", "te", "teng", "ti", "tian", "tiao", "tie", "ting",
"tong", "tou", "tu", "tuan", "tui", "tun", "tuo", "wa", "wai", "wan", "wang", "wei", "wen", "weng", "wo",
"wu", "xi", "xia", "xian", "xiang", "xiao", "xie", "xin", "xing", "xiong", "xiu", "xu", "xuan", "xue",
"xun", "ya", "yan", "yang", "yao", "ye", "yi", "yin", "ying", "yo", "yong", "you", "yu", "yuan", "yue",
"yun", "za", "zai", "zan", "zang", "zao", "ze", "zei", "zen", "zeng", "zha", "zhai", "zhan", "zhang",
"zhao", "zhe", "zhen", "zheng", "zhi", "zhong ", "zhou", "zhu", "zhua", "zhuai", "zhuan", "zhuang",
"zhui", "zhun", "zhuo", "zi", "zong", "zou", "zu", "zuan", "zui", "zun", "zuo"]
special = ["code", "tech", "cell", "work", "man", "best"]
if typ == 1:
bas = len(num)
cur = num
elif typ == 2:
bas = len(char)
cur = char
elif typ == 3:
bas = len(pinyin)
cur = pinyin
elif typ == 4:
bas = len(num + char)
cur = num + char
special_len = len(special)
compose_len = bas ** length
u = open('unregistered.txt', 'w')
r = open('registered_or_failed.txt', 'w')
for s in range(special_len):
for x in range(compose_len):
n = x
chr0 = cur[n % bas]
composed_chars = chr0
for y in range(length - 1):
n //= bas
composed_chars += cur[n % bas]
special_domain = composed_chars + special[s]
full_domain = special_domain + '.' + suffix
search(full_domain, u, r)
u.close()
r.close()
def search(domain, available, unusable):
lookup_url = 'http://panda.www.net.cn/cgi-bin/check.cgi?area_domain='
try:
time.sleep(0.1)
resp = sess.get(lookup_url + domain, timeout=30)
et = ET.fromstring(resp.content.decode())
res = et.find('./original').text[:3]
if res == '210':
print(domain + ' domain name is available')
available.write(domain + '\n')
available.flush()
elif res == '211':
print(domain + ' domain name is not available')
else:
print(domain + ' verify timeout')
unusable.write(domain + '\n')
unusable.flush()
except Exception as e:
print(domain + '\ttimeout')
unusable.write(domain + '\n')
unusable.flush()
if __name__ == '__main__':
verify(2, 2, 'com')
sess.close()
``` |
{
"source": "jogo/lastcomment",
"score": 2
} |
#### File: jogo/lastcomment/lastcomment.py
```python
import argparse
import calendar
import collections
import datetime
import json
import sys
import urllib
import yaml
import requests
try:
# Disable InsecurePlatformWarning warnings as documented here
# https://github.com/kennethreitz/requests/issues/2214
from requests.packages.urllib3.exceptions import InsecurePlatformWarning
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecurePlatformWarning)
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
except ImportError:
# If there's an import error, then urllib3 may be packaged
# separately, so apply it there too
import urllib3
from urllib3.exceptions import InsecurePlatformWarning
from urllib3.exceptions import InsecureRequestWarning
urllib3.disable_warnings(InsecurePlatformWarning)
urllib3.disable_warnings(InsecureRequestWarning)
TIME_FORMAT = "%Y-%m-%d %H:%M:%S"
class Account(object):
_account_id = None
name = None
email = None
username = None
def __init__(self, account_info={}):
super(Account, self).__init__()
self._account_id = account_info.get('_account_id', 0)
self.name = account_info.get('name', None)
self.email = account_info.get('email', None)
self.username = account_info.get('username', None)
def __str__(self):
a = []
if self.name:
a.append("'%s'" % self.name)
if self.username:
a.append(self.username)
if self.email:
a.append("<%s>" % self.email)
if a:
return "ID %s (%s)" % (self._account_id, ", ".join(a))
else:
return "ID %s" % self._account_id
def __le__(self, other):
# self < other
return self._account_id < other._account_id
class Comment(object):
date = None
number = None
subject = None
now = None
gerrit_url = None
def __init__(self, date, number, subject, message, gerrit_url):
super(Comment, self).__init__()
self.date = date
self.number = number
self.subject = subject
self.message = message
self.gerrit_url = gerrit_url
self.now = datetime.datetime.utcnow().replace(microsecond=0)
def __str__(self):
return ("%s (%s old) %s/%s '%s' " % (
self.date.strftime(TIME_FORMAT),
self.age(),
self.gerrit_url,
self.number, self.subject))
def age(self):
return self.now - self.date
def __le__(self, other):
# self < other
return self.date < other.date
def __repr__(self):
# for sorting
return repr((self.date, self.number))
def get_comments(change, account):
"""Generator that returns all comments by account on a given change."""
body = None
for message in change['messages']:
if ('author' in message and
'_account_id' in message['author'] and
message['author']['_account_id'] == account._account_id):
if (message['message'].startswith("Uploaded patch set") and
len(message['message'].split()) is 4):
# comment is auto created from posting a new patch
continue
date = message['date']
body = message['message']
# https://review.openstack.org/Documentation/rest-api.html#timestamp
# drop nanoseconds
date = date.split('.')[0]
date = datetime.datetime.strptime(date, TIME_FORMAT)
yield date, body
def query_gerrit(gerrit_url, account, count, project, verify=True):
# Include review messages in query
search = "reviewer:{%s}" % account._account_id
if project:
search = search + (" AND project:{%s}" % project)
query = ("%s/changes/?q=%s&"
"o=MESSAGES&pp=0" % (gerrit_url, urllib.quote_plus(search)))
r = requests.get(query, verify=verify)
try:
changes = json.loads(r.text[4:])
except ValueError:
print "query: '%s' failed with:\n%s" % (query, r.text)
sys.exit(1)
comments = []
for change in changes:
for date, message in get_comments(change, account):
if date is None:
# no comments from reviewer yet. This can happen since
# 'Uploaded patch set X.' is considered a comment.
continue
comments.append(Comment(date, change['_number'],
change['subject'], message, gerrit_url))
return sorted(comments, key=lambda comment: comment.date,
reverse=True)[0:count]
def lookup_account(gerrit_url, account_id, verify=True):
"""Look up account information.
An account "ID" can be any uniquely identifying account information. See
the API documentation for more information:
https://review.openstack.org/Documentation/rest-api-accounts.html#account-id
"""
query = "%s/accounts/%s?pp=0" % (gerrit_url, urllib.quote_plus(account_id))
r = requests.get(query, verify=verify)
try:
return Account(json.loads(r.text[4:]))
except ValueError:
print "account lookup for '%s' failed with:\n%s" % (account_id, r.text)
sys.exit(1)
def vote(comment, success, failure, log=False):
for line in comment.message.splitlines():
if line.startswith("* ") or line.startswith("- "):
job = line.split(' ')[1]
if " : SUCCESS" in line:
success[job] += 1
if log:
print line
if " : FAILURE" in line:
failure[job] += 1
if log:
print line
def generate_report(gerrit_url, account, count, project, verify):
result = {'account': account.__dict__, 'project': project}
success = collections.defaultdict(int)
failure = collections.defaultdict(int)
comments = query_gerrit(gerrit_url, account, count, project, verify)
if len(comments) == 0:
print "didn't find anything"
return None
print "last seen: %s (%s old)" % (comments[0].date, comments[0].age())
result['last'] = epoch(comments[0].date)
for comment in comments:
vote(comment, success, failure)
total = sum(success.values()) + sum(failure.values())
if total > 0:
success_rate = str(int(sum(success.values()) /
float(total) * 100)) + "%"
result['rate'] = success_rate
print "success rate: %s" % success_rate
return result
def print_last_comments(gerrit_url, account, count, print_message, project,
votes, verify):
success = collections.defaultdict(int)
failure = collections.defaultdict(int)
comments = query_gerrit(gerrit_url, account, count, project, verify)
message = "last %s comments from '%s'" % (count, account.name)
if project:
message += " on project '%s'" % project
print message
# sort by time
for i, comment in enumerate(comments):
print "[%d] %s" % (i, comment)
if print_message:
print "message: \"%s\"" % comment.message
print
if votes:
vote(comment, success, failure, log=True)
if votes:
print "success count by job:"
for job in success.iterkeys():
print "* %s: %d" % (job, success[job])
print "failure count by job:"
for job in failure.iterkeys():
print "* %s: %d" % (job, failure[job])
def epoch(timestamp):
return int(calendar.timegm(timestamp.timetuple()))
def main():
parser = argparse.ArgumentParser(description='list most recent comment by '
'reviewer')
parser.add_argument('-n', '--name',
default="Elastic Recheck",
help='gerrit use (name, id, username or email) of '
'the reviewer')
parser.add_argument('-c', '--count',
default=10,
type=int,
help='Max number of results to return')
parser.add_argument('-f', '--file',
default=None,
help='yaml file containing list of names to search on'
'project: name'
' (overwrites -p and -n)')
parser.add_argument('-m', '--message',
action='store_true',
help='print comment message')
parser.add_argument('-v', '--votes',
action='store_true',
help=('Look in comments for CI Jobs and detect '
'SUCCESS/FAILURE'))
parser.add_argument('--json',
nargs='?',
const='lastcomment.json',
help=("Generate report to be stored in the json file "
"specified here. Ignores -v and -m "
"(default: 'lastcomment.json')"))
parser.add_argument('-p', '--project',
help='only list hits for a specific project')
parser.add_argument('-g', '--gerrit-url',
default='https://review.openstack.org',
help='Gerrit server http/https url')
parser.add_argument('--no-verify',
action='store_false',
help='Ignore gerrit server certificate validity')
args = parser.parse_args()
names = {args.project: [args.name]}
accounts = {}
if args.file:
with open(args.file) as f:
names = yaml.load(f)
for project in names:
for id in names[project]:
if id in accounts:
continue
accounts[id] = lookup_account(args.gerrit_url, id, args.no_verify)
if args.json:
print "generating report %s" % args.json
print "report is over last %s comments" % args.count
report = {}
timestamp = epoch(datetime.datetime.utcnow())
report['timestamp'] = timestamp
report['rows'] = []
for project in names:
print 'Checking project: %s' % project
for name in names[project]:
account = accounts[name]
print 'Checking account: %s' % account
try:
if args.json:
report['rows'].append(generate_report(
args.gerrit_url, account, args.count,
project, args.no_verify))
else:
print_last_comments(args.gerrit_url, account, args.count,
args.message, project, args.votes,
args.no_verify)
except Exception as e:
print e
pass
if args.json:
with open(args.json, 'w') as f:
json.dump(report, f)
if __name__ == "__main__":
main()
``` |
{
"source": "jogomojo/rfsoc_qpsk",
"score": 2
} |
#### File: rfsoc_qpsk/drivers/qpsk_overlay.py
```python
from pynq import Overlay
import xrfclk
import xrfdc
import os
import numpy as np
import ipywidgets as ipw
from rfsoc_qpsk import dma_timer, sdr_plots, qpsk_rx, qpsk_tx
class TimerRegistry():
"""Helper class to track active timer threads.
This can be used to help safely stop any orphaned DMA timers.
Orphans appear when a cell is re-run while its DMA timer is active.
"""
def __init__(self):
self.registry = dict()
def register_timers(self, key, timers):
"""Register a list of timers with the registry.
This will safely stop any timers that were previously registered with
the same key.
key: String name for this timer group
timers: List of DmaTimer objects
"""
if key in self.registry:
[timer.stop() for timer in self.registry[key]]
self.registry[key] = timers
class QpskOverlay(Overlay):
"""Overlay subclass for rfsoc-qpsk.
Performs initialisation (including RF components) and exposes them with
more friendly names in a flatter hierarchy. Less typing for everyone.
"""
def __init__(self, bitfile_name=None, init_rf_clks=True, dark_theme=False, presentation_mode=False, **kwargs):
"""Construct a new QpskOverlay
bitfile_name: Optional. If left None, the 'rfsoc_qpsk.bit' bundled with this
rfsoc-qpsk package will be used.
init_rf_clks: If true (default), the reference clocks are configured
for all tiles. If the clocks are already configured, set
to false for faster execution.
dark_theme: Flat to enable a dark theme for plots
presentation_mode: Flag to enable a dark theme with thick lines and
bigger font
"""
# Generate default bitfile name
if bitfile_name is None:
this_dir = os.path.dirname(__file__)
bitfile_name = os.path.join(this_dir, 'bitstream', 'rfsoc_qpsk.bit')
# Set optional theming for dark mode
if dark_theme:
from IPython.display import display, HTML
import plotly.io as pio
# Apply plotly theming
dark_template = pio.templates['plotly_dark']
dark_template.layout.paper_bgcolor = 'rgb(0,0,0,0)'
dark_template.layout.plot_bgcolor = 'rgb(0,0,0,0)'
dark_template.layout.legend.bgcolor = 'rgb(0,0,0,0)'
pio.templates['dark_plot'] = dark_template
pio.templates.default = 'dark_plot'
# Set optional theming for presentation mode
if presentation_mode:
from IPython.display import display, HTML
import plotly.io as pio
# Apply plotly theming
pio.templates.default = 'plotly_dark+presentation'
# Force dark style for ipywidget tab background
display(HTML("""
<style>
.jupyter-widgets.widget-tab > .widget-tab-contents {
background: inherit !important;
}
</style>
"""))
# Create Overlay
super().__init__(bitfile_name, **kwargs)
# Extact in-use dataconverter objects with friendly names
self.rf = self.usp_rf_data_converter_0
self.adc_tile = self.rf.adc_tiles[0]
self.adc_block = self.adc_tile.blocks[0]
self.dac_tile = self.rf.dac_tiles[1]
self.dac_block = self.dac_tile.blocks[2]
# Start up LMX clock
if init_rf_clks:
xrfclk.set_all_ref_clks(409.6)
# Set sane DAC defaults
self.dac_tile.DynamicPLLConfig(1, 409.6, 1228.8)
self.dac_block.NyquistZone = 2
self.dac_block.MixerSettings = {
'CoarseMixFreq': xrfdc.COARSE_MIX_BYPASS,
'EventSource': xrfdc.EVNT_SRC_IMMEDIATE,
'FineMixerScale': xrfdc.MIXER_SCALE_1P0,
'Freq': 1000,
'MixerMode': xrfdc.MIXER_MODE_C2R,
'MixerType': xrfdc.MIXER_TYPE_FINE,
'PhaseOffset': 0.0
}
self.dac_block.UpdateEvent(xrfdc.EVENT_MIXER)
self.dac_tile.SetupFIFO(True)
# Set sane ADC defaults
self.adc_tile.DynamicPLLConfig(1, 409.6, 1228.8)
self.adc_block.NyquistZone = 2
self.adc_block.MixerSettings = {
'CoarseMixFreq': xrfdc.COARSE_MIX_BYPASS,
'EventSource': xrfdc.EVNT_SRC_TILE,
'FineMixerScale': xrfdc.MIXER_SCALE_1P0,
'Freq': 1000,
'MixerMode': xrfdc.MIXER_MODE_R2C,
'MixerType': xrfdc.MIXER_TYPE_FINE,
'PhaseOffset': 0.0
}
self.adc_block.UpdateEvent(xrfdc.EVENT_MIXER)
self.adc_tile.SetupFIFO(True)
# Touch RX and TX drivers for strict evaluation
self.qpsk_tx.qpsk_tx.enable=1
self.qpsk_rx.qpsk_rx_dec.enable=1
self.qpsk_rx.qpsk_rx_csync.enable=1
self.qpsk_rx.qpsk_rx_rrc.enable=1
self.qpsk_rx.qpsk_rx_tsync.enable=1
self.timers = TimerRegistry()
def init_i2c(self):
"""Initialize the I2C control drivers on RFSoC2x2.
This should happen after a bitstream is loaded since I2C reset
is connected to PL pins. The I2C-related drivers are made loadable
modules so they can be removed or inserted.
"""
module_list = ['i2c_dev', 'i2c_mux_pca954x', 'i2c_mux']
for module in module_list:
cmd = "if lsmod | grep {0}; then rmmod {0}; fi".format(module)
ret = os.system(cmd)
if ret:
raise RuntimeError(
'Removing kernel module {} failed.'.format(module))
module_list.reverse()
for module in module_list:
cmd = "modprobe {}".format(module)
ret = os.system(cmd)
if ret:
raise RuntimeError(
'Inserting kernel module {} failed.'.format(module))
def plot_group(self, group_name, domains, get_time_data, fs, get_freq_data=None, get_const_data=None):
"""Create a group of plots for a given set of data generators.
group_name: String name for plot group (used to register timers with
the TimerRegistry)
domains: List of plot types to generate. Select from:
['time','time-binary','frequency','constellation'].
fs: Sampling frequency. Used for time axis scaling
get_time_data: Callback function that returns a buffer of time domain
samples
get_freq_data: Optional callback that returns a buffer of frequency
domain samples. When not specified, a software FFT will
be performed on the get_time_data callback instead.
get_const_data: Optional callback that returns a buffer of time-domain
data for any constellation plots. When not specified,
the get_time_data callback will be used.
"""
plots = []
def many(f, n=4):
return np.concatenate([f() for _ in range(n)])
for domain in domains:
if domain=='frequency':
# HW accelerated FFT
if get_freq_data != None:
f_plot = sdr_plots.HWFreqPlot(
[get_freq_data() for _ in range(4)],
fs, animation_period=100, w=700)
f_dt = dma_timer.DmaTimer(f_plot.add_frame, get_freq_data, 0.3)
# SW FFT
else:
f_plot = sdr_plots.IQFreqPlot(
[many(get_time_data) for _ in range(4)],
fs, x_range=(-2000,2000), animation_period=100, w=700)
f_dt = dma_timer.DmaTimer(f_plot.add_frame, lambda:many(get_time_data), 0.3)
plots.append(dict(title='Frequency domain', plot=f_plot, control=f_dt))
elif domain=='time' or domain=='time-binary':
if domain=='time-binary':
iq_plot = sdr_plots.IQTimePlot(many(get_time_data), fs, w=700, scaling=1, ylabel='Symbol value')
iq_plot.set_line_mode(lines=True, markers=True, shape='hvh')
iq_plot.get_widget().layout.yaxis.dtick=1
else:
iq_plot = sdr_plots.IQTimePlot(many(get_time_data), fs, w=700)
iq_plot.set_line_mode(markers=False)
iq_dt = dma_timer.DmaTimer(iq_plot.add_data, get_time_data, 0.05)
plots.append(dict(title='Time domain', plot=iq_plot, control=iq_dt))
elif domain=='constellation':
c_plot = sdr_plots.IQConstellationPlot(many(get_const_data or get_time_data, n=10), h=550, fade=True)
c_dt = dma_timer.DmaTimer(c_plot.add_data, get_const_data or get_time_data, 0.05)
plots.append(dict(title='Constellation', plot=c_plot, control=c_dt,
layout=ipw.Layout(width='550px', margin='auto')))
self.timers.register_timers(group_name, list(map(lambda tab: tab['control'], plots)))
return QpskOverlay.tab_plots(plots)
@staticmethod
def tab_plots(tabs):
"""Helper function to generate a Tab widget given a list of definitions.
tabs: A list of dicts describing a single tab. Each element needs three
keys: 'plot' with a SdrPlot object, 'control' with a DmaTimer
object, and 'title' with a string.
"""
widgets = []
titles = []
for tab in tabs:
widgets.append(ipw.VBox([
tab['plot'].get_widget(),tab['control'].get_widget()
],layout=tab.get('layout',ipw.Layout())))
titles.append(tab['title'])
tab_widget = ipw.Tab(widgets)
for i, title in enumerate(titles):
tab_widget.set_title(i, title)
QpskOverlay._tab_load_resizer_callback(tab_widget)
return tab_widget
@staticmethod
def _tab_load_resizer_callback(tabs):
"""Helper function to handle relative widths for plots in hidden tabs"""
out = ipw.Output()
display(out)
@out.capture()
def callback(change):
plot = tabs.children[change['new']].children[0]
plot.layout.autosize = False
plot.layout.autosize = True
tabs.observe(callback, names='selected_index')
def _tx_display_generator(self):
tx_plot_names = ['Symbols', 'Post TX RRC']
plot_tx_symbol = self.plot_group(
'tx_symbol', ['time-binary'], self.qpsk_tx.get_symbols, fs=500
)
plot_tx_shaped = self.plot_group(
'tx_shaped', ['time', 'frequency'], self.qpsk_tx.get_shaped_time, fs=4000,
get_freq_data=self.qpsk_tx.get_shaped_fft
)
tx_display_widgets = ipw.Accordion(children=[plot_tx_symbol,
plot_tx_shaped])
for i in range(0, 2):
tx_display_widgets.set_title(i, tx_plot_names[i])
return tx_display_widgets
def _rx_display_generator(self):
def classify_bits(frame):
bit_quantise = lambda b: 1 if b>0 else 0
symbol_quantise = lambda i, q: bit_quantise(i) + 1j*bit_quantise(q)
return np.fromiter(
map(symbol_quantise, np.real(frame), np.imag(frame)),
dtype=np.complex
)
rx_domains = ['time', 'frequency', 'constellation']
rx_plot_names = ['Decimation', 'Coarse Sync', 'Post RX RRC', 'Time Sync']
plot_rx_decimated = self.plot_group(
'rx_decimated', rx_domains, self.qpsk_rx.get_decimated, fs=4000
)
plot_rx_coarse_sync = self.plot_group(
'rx_coarse_sync', rx_domains, self.qpsk_rx.get_coarse_synced, fs=4000
)
plot_rx_rrced = self.plot_group(
'rx_rrced', rx_domains, self.qpsk_rx.get_rrced, fs=16000
)
plot_rx_constellation = self.plot_group(
'rx_data', ['constellation', 'time-binary'],
lambda : classify_bits(self.qpsk_rx.get_data()), fs=500,
get_const_data=self.qpsk_rx.get_data
)
rx_display_widgets = ipw.Accordion(children=[plot_rx_decimated,
plot_rx_coarse_sync,
plot_rx_rrced,
plot_rx_constellation])
for i in range(0, 4):
rx_display_widgets.set_title(i, rx_plot_names[i])
return rx_display_widgets
def _rx_simple_display_generator(self):
plot_rx_constellation = self.plot_group(
'rx_data', ['constellation'], self.qpsk_rx.get_data, fs=500,
get_const_data=self.qpsk_rx.get_data
)
return plot_rx_constellation
def _tx_simple_display_generator(self):
plot_tx_shaped = self.plot_group(
'tx_shaped', ['time', 'frequency'], self.qpsk_tx.get_shaped_time, fs=4000,
get_freq_data=self.qpsk_tx.get_shaped_fft
)
return plot_tx_shaped
def _common_control_generator(self):
def unwrap_slider_val(callback):
return lambda slider_val : callback(slider_val['new'])
def update_nco(rf_block, nco_freq):
mixer_cfg = rf_block.MixerSettings
mixer_cfg['Freq'] = nco_freq
rf_block.MixerSettings = mixer_cfg
rf_block.UpdateEvent(xrfdc.EVENT_MIXER)
def new_nco_slider(title):
return ipw.FloatSlider(
value=1000,
min=620,
max=1220,
step=20,
description=title,
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.1f',
style = {'description_width': 'initial'}
)
pow_slider = ipw.SelectionSlider(
options=[0.1, 0.3, 0.6, 1],
value=1,
description='Transmit Power:',
style = {'description_width': 'initial'}
)
pow_slider.observe(unwrap_slider_val(self.qpsk_tx.set_gain), names='value')
tx_nco_slider = new_nco_slider('TX Centre Frequency (MHz)')
rx_nco_slider = new_nco_slider('RX Centre Frequency (MHz)')
ipw.link((rx_nco_slider, 'value'), (tx_nco_slider, 'value'))
tx_nco_slider.observe(
unwrap_slider_val(lambda v: update_nco(self.dac_block, v)),
names='value'
)
rx_nco_slider.observe(
unwrap_slider_val(lambda v: update_nco(self.adc_block, v)),
names='value'
)
control_widgets = ipw.Accordion(children=[ipw.VBox([
pow_slider,
tx_nco_slider,
rx_nco_slider])])
control_widgets.set_title(0, 'System Control')
return control_widgets
def _qpsk_generator(self):
tx_display_widget = self._tx_simple_display_generator()
rx_display_widget = self._rx_simple_display_generator()
common_control_widget = self._common_control_generator()
control_accordion = ipw.Accordion(children=[common_control_widget])
tx_display_accordion = ipw.Accordion(children=[tx_display_widget])
control_accordion.set_title(0, 'System Control')
tx_display_accordion.set_title(0, 'Transmitter Visualisation')
side_bar = ipw.VBox([control_accordion, tx_display_accordion])
main_app = ipw.Accordion(children=[rx_display_widget])
main_app.set_title(0, 'Receiver Visualisation')
return ipw.HBox([side_bar, main_app])
def qpsk_demonstrator_application(self):
app = self._qpsk_generator()
return app
```
#### File: rfsoc_qpsk/rfsoc_qpsk/qpsk_tx.py
```python
from pynq import DefaultIP
from pynq import DefaultHierarchy
from pynq import allocate
import numpy as np
class QPSKTx(DefaultHierarchy):
def __init__(self, description, pkt_sym=16, pkt_time=128, pkt_fft=1024):
"""Driver for our QPSK TX IP hierarchy
This encompasses the qpsk tx logic and the DMAs for data
transfer of exposed signals.
"""
super().__init__(description)
self.buf_fft = allocate(shape=(pkt_fft, ), dtype=np.uint32)
self.buf_sym = allocate(shape=(pkt_sym, ), dtype=np.uint8)
self.buf_time = allocate(shape=(pkt_time * 2, ), dtype=np.int16)
# QPSK IP General Config
self.qpsk_tx.lfsr_rst = 1
self.qpsk_tx.enable = 1
self.qpsk_tx.packetsize_rf = 1024
self.set_gain(1)
self.qpsk_tx.lfsr_rst = 0
# QPSK IP Symbol Config
self.qpsk_tx.reset_symbol = 1
self.qpsk_tx.packetsize_symbol = pkt_sym - 1
self.qpsk_tx.reset_symbol = 0
self.qpsk_tx.autorestart_symbol = 0
# QPSK IP FFT Config
self.qpsk_tx.reset_fft = 1
self.qpsk_tx.packetsize_fft = pkt_fft - 1
self.qpsk_tx.reset_fft = 0
self.qpsk_tx.autorestart_fft = 0
## QPSK IP Time Config
self.qpsk_tx.reset_time = 1
self.qpsk_tx.packetsize_time = pkt_time - 1
self.qpsk_tx.reset_time = 0
self.qpsk_tx.autorestart_time = 0
def set_gain(self, normalized_gain):
scaling_factor = 0.65
gain = np.uint32( round(normalized_gain * scaling_factor * (2**32 - 1)) )
self.qpsk_tx.mmio.array[44>>2] = gain
def get_shaped_fft(self):
"""Get a single buffer of FFT data from the pulse shaped signal
"""
self.qpsk_tx.transfer_fft = 1
self.dma_tx_fft.recvchannel.transfer(self.buf_fft)
self.dma_tx_fft.recvchannel.wait()
self.qpsk_tx.transfer_fft = 0
return np.array(self.buf_fft)
def get_shaped_time(self):
"""Get a single buffer of time domain data from the pulse shaped signal
"""
self.qpsk_tx.transfer_time = 1
self.dma_tx_time.recvchannel.transfer(self.buf_time)
self.dma_tx_time.recvchannel.wait()
self.qpsk_tx.transfer_time = 0
t_data = np.array(self.buf_time)
c_data = t_data[::2] + 1j * t_data[1::2]
return c_data
def get_many_shaped_time(self, N=10):
"""Get N buffers of time domain data from the pulse shaped signal
"""
return np.concatenate([self.get_shaped_time() for i in range(N)])
def get_symbols(self):
"""Get a single buffer of raw QPSK symbols
"""
def raw_to_i(raw):
i = raw & 0b0011
if i == 3:
return -1
else:
return 1
def raw_to_q(raw):
i = (raw & 0b1100) >> 2
if i == 3:
return -1
else:
return 1
self.qpsk_tx.transfer_symbol = 1
self.dma_tx_symbol.recvchannel.transfer(self.buf_sym)
self.dma_tx_symbol.recvchannel.wait()
self.qpsk_tx.transfer_symbol = 0
raw_data = np.array(self.buf_sym)
c_data = np.array([raw_to_i(e) + 1j * raw_to_q(e) for e in raw_data])
return c_data
def get_many_symbols(self, N=10):
"""Get N buffers of raw QPSK symbols
"""
return np.concatenate([self.get_symbols() for i in range(N)])
@staticmethod
def checkhierarchy(description):
if 'dma_tx_fft' in description['ip'] \
and 'dma_tx_time' in description['ip'] \
and 'dma_tx_symbol' in description['ip'] \
and 'qpsk_tx' in description['ip']:
return True
return False
class QPSKTxCore(DefaultIP):
"""Driver for QPSK TX's core logic IP
Exposes all the configuration registers by name via data-driven properties
"""
def __init__(self, description):
super().__init__(description=description)
bindto = ['UoS:RFSoC:axi_qpsk_tx:5.3']
# LUT of property addresses for our data-driven properties
_qpsk_props = [("transfer_symbol", 0), ("transfer_fft", 4),
("transfer_time", 60), ("reset_symbol", 8), ("reset_fft", 12),
("reset_time", 48), ("packetsize_symbol", 16),
("packetsize_rf", 20), ("packetsize_fft", 24),
("packetsize_time", 52), ("autorestart_symbol", 36),
("autorestart_fft", 40), ("autorestart_time", 56),
("lfsr_rst", 28), ("enable", 32), ("output_gain", 44)]
# Func to return a MMIO getter and setter based on a relative addr
def _create_mmio_property(addr):
def _get(self):
return self.read(addr)
def _set(self, value):
self.write(addr, value)
return property(_get, _set)
# Generate getters and setters based on _qpsk_props
for (name, addr) in _qpsk_props:
setattr(QPSKTxCore, name, _create_mmio_property(addr))
``` |
{
"source": "jogonba2/TWilBert",
"score": 2
} |
#### File: twilbert/optimization/lr_annealing.py
```python
from keras.callbacks import Callback
from keras import backend as K
class Noam(Callback):
def __init__(self, warmup_steps, hidden_dims,
accum_iters, initial_batch):
super().__init__()
self.batch = initial_batch
self.warmup_steps = warmup_steps
self.hidden_dims = hidden_dims
self.accum_iters = accum_iters
def on_batch_end(self, batch, logs={}):
if (self.batch + 1) % self.accum_iters == 0:
new_lr = (self.hidden_dims ** -0.5) * \
min((((self.batch+1) / self.accum_iters) ** (-0.5)),
((self.batch+1) / self.accum_iters) *
(self.warmup_steps ** (-1.5)))
K.set_value(self.model.optimizer.lr, new_lr)
self.batch += 1
```
#### File: twilbert/utils/activations.py
```python
from keras import backend as K
from keras.layers import Layer
import math
class Gelu(Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def call(self, x, **kwargs):
cdf = 0.5 * (1.0 + K.tanh(
(math.sqrt(2 / math.pi) * (x + 0.044715 * K.pow(x, 3)))))
return x * cdf
def compute_output_shape(self, input_shape):
return input_shape
```
#### File: twilbert/utils/finetuning_monitor.py
```python
from sklearn.metrics import (accuracy_score, precision_score,
recall_score, f1_score,
classification_report)
from scipy.stats import pearsonr
from .finetuning_metrics import *
import numpy as np
class FinetuningMonitor:
def __init__(self, monitor_metric="f1", average="macro",
class_metric=None, stance=False,
multi_label=False):
self.step = 0
self.best_step = -1
self.best_value = -1
self.monitor_metric = monitor_metric
self.average = average
self.class_metric = class_metric
self.stance = stance
self.multi_label = multi_label
def __step__(self, truths, preds):
best = False
if self.multi_label:
res = {"accuracy": jaccard_acc(np.array(truths),
np.array(preds))}
else:
res = {"accuracy": accuracy_score(truths, preds),
"precision": precision_score(truths,
preds,
average=self.average),
"recall": recall_score(truths, preds,
average=self.average),
"f1": f1_score(truths, preds,
average=self.average),
"pearson": pearsonr(truths, preds)[0]}
if self.stance:
res["f1"] = mf1_stance(truths, preds)
if self.class_metric:
val = res[self.monitor_metric][self.class_metric]
else:
val = res[self.monitor_metric]
if val > self.best_value:
self.best_value = val
self.best_step = self.step
self.report_classification(res, "dev")
best = True
self.step += 1
return best, res
def __test_step__(self, truths, preds):
if self.multi_label:
res = {"accuracy": jaccard_acc(np.array(truths),
np.array(preds))}
else:
res = {"accuracy": accuracy_score(truths, preds),
"precision": precision_score(truths,
preds,
average=None),
"recall": recall_score(truths, preds,
average=None),
"f1": f1_score(truths, preds, average=None),
"macro-precision": precision_score(truths,
preds,
average="macro"),
"macro-recall": recall_score(truths,
preds,
average="macro"),
"macro-f1": f1_score(truths, preds,
average="macro"),
"pearson": pearsonr(truths, preds)[0]}
if self.stance:
res["macro-f1"] = mf1_stance(truths, preds)
self.report_classification(res, "test")
return res
def report_classification(self, res, sample_set):
if sample_set == "dev":
print("\n\n", "Best at dev, epoch %d\n"
% self.step + "-" * 20 + "\n")
else:
print("\n\n", "Best model evaluated on test\n" + "-" * 20 + "\n")
if self.multi_label:
print("1) Accuracy: %f" % res["accuracy"])
print("\n" + "-" * 20 + "\n")
else:
print("1) Accuracy: %f" % res["accuracy"])
print("2) F1: %s" % (str(res["f1"])))
print("3) Precision: %s"
% (str(res["precision"])))
print("4) Recall: %s"
% (str(res["recall"])))
if sample_set == "test":
print("5) F1 (macro): %s" % (str(res["macro-f1"])))
print("6) Precision (macro): %s"
% (str(res["macro-precision"])))
print("7) Recall (macro): %s"
% (str(res["macro-recall"])))
print("8) Pearson: %f" % res["pearson"])
print("\n" + "-" * 20 + "\n")
```
#### File: twilbert/utils/pretraining_callbacks.py
```python
from keras.callbacks import Callback
from datetime import datetime
class TimeCheckpoint(Callback):
def __init__(self, hours_step, path):
super().__init__()
self.hours_step = hours_step
self.prev_time = datetime.utcnow()
self.act_time = datetime.utcnow()
self.path = path
self.hours = 0
def on_batch_end(self, batch, logs=None):
self.act_time = datetime.utcnow()
diff_hours = (self.act_time-self.prev_time).seconds / 3600
if diff_hours >= 1:
self.hours += 1
self.prev_time = self.act_time
self.act_time = datetime.utcnow()
self.model.save(self.path + "/chekpoint_%d_hours.hdf5" %
self.hours)
``` |
{
"source": "jogoodma/inspyred-dashboard",
"score": 2
} |
#### File: inspyred-dashboard/tanager/components.py
```python
import dash_core_components as dcc
import dash_html_components as html
def navbar(*args, **kwargs):
"""
"""
return html.Div(children=[
html.Div(children=[
dcc.Link(href="/", children=[
html.I(className="fab fa-earlybirds mr-3"),
html.Span(children='Tanager', className="font-semibold")
]),
], className='mt-8 text-white space-x-5 text-2xl mx-2'),
html.Div(children=[
dcc.Input(
id="experiment-filter",
name="experiment-filter",
type="text",
placeholder="Filter by name",
className="w-2/3 focus:ring-4 focus:ring-blue-300 py-2 px-4 rounded-full",
),
html.Button(id='dir-refresh', className='text-white active:text-blue-500', title="Refresh expreiment list",
children=[
html.I(className='fas fa-redo-alt')
]),
], className='flex justify-around my-4'),
html.Nav(*args, className="overflow-y-auto h-5/6", **kwargs)
], className='w-52 lg:w-64 bg-gray-900 flex flex-col flex-none text-center h-auto'
)
def navbar_item(*args, **kwargs):
"""
"""
children = kwargs.pop('children', [])
children.append(*args)
children.insert(0, html.I(className='fas fa-chart-bar mx-3'))
return dcc.Link(
className='flex items-center py-2 px-6 text-gray-500 hover:bg-gray-700 hover:bg-opacity-25 hover:text-gray-100',
children=children,
**kwargs
)
def graph_panel(*args, **kwargs):
classname = kwargs.pop('className', '') + ' flex flex-col items-center px-5 py-6 shadow-lg rounded-xl bg-white'
return html.Section(*args, className=classname, style={'min-height': '30rem'}, **kwargs)
def get_default_page(config):
return html.Div(children=[
html.H1(config['title'], className="text-6xl font-bold alert-heading"),
html.H2(
config['description'], # "Tanager allows you to visualize Inspyred. "
className='text-2xl text-gray-400 ml-10'
),
html.Hr(className='border border-black'),
html.P(
"Please select the project from the left navigation to get started",
className="mb-0",
)
], className='mt-40'
)
```
#### File: inspyred-dashboard/tanager/networkgraph.py
```python
import pandas as pd
import plotly.graph_objects as go
def conv(s):
try:
if s:
return int(s)
except ValueError:
try:
return float(s)
except ValueError:
return -1
def readfile(data_file):
data = pd.read_csv(data_file) # , converters={'mom_hash':conv}
return data
def makeInt(item):
str_item = None
if item and item != 'None':
if isinstance(item, float):
str_item = str(format(item, '.0f'))
elif isinstance(item, int):
str_item = str(item)
return str_item
# function below sets the color based on amount
def setColor(fitness):
range = int(format(fitness, '.0f'))
clr = 'grey'
if (range > 50):
clr = "red"
elif (range >= 10 and range <= 50):
clr = "yellow"
elif (range < 10):
clr = "green"
if clr == 'grey':
print(clr)
return clr
def createNetworkGraph(data):
node_x = []
node_y = []
node_text = []
node_fitness = []
for index, row in data.iterrows():
node_x.append(row["generation"])
node_y.append(row["i"])
node_fitness.append(setColor(row['fitness']))
mom_traces, dad_traces, edge_hover_dict = getNetworkEdges(data)
for key in edge_hover_dict.keys():
node_text.append(edge_hover_dict[key])
node_trace = go.Scatter(
x=node_x, y=node_y,
mode='markers',
text=node_text,
# marker=dict(
# showscale=True,
# # colorscale options
# #'Greys' | 'YlGnBu' | 'Greens' | 'YlOrRd' | 'Bluered' | 'RdBu' |
# #'Reds' | 'Blues' | 'Picnic' | 'Rainbow' | 'Portland' | 'Jet' |
# #'Hot' | 'Blackbody' | 'Earth' | 'Electric' | 'Viridis' |
# colorscale='YlGnBu',
# reversescale=True,
# color=[],
# size=10,
# colorbar=dict(
# thickness=15,
# title='Node Connections',
# xanchor='left',
# titleside='right'
# )),
marker=dict(size=1, line_width=1, color=node_fitness),
# marker = dict(color=list(map(SetColor, y))
hoverinfo='text'
)
fig = go.Figure(data=node_trace,
layout=go.Layout(
title='Evolution Network Graph',
xaxis_title='Generation',
yaxis_title='Candidate',
titlefont_size=16,
plot_bgcolor="#FFFFFF",
legend=dict(
# Adjust click behavior
# itemclick="toggleothers",
itemdoubleclick="toggle",
),
xaxis=dict(
title="time",
linecolor="#BCCCDC",
),
yaxis=dict(
title="price",
linecolor="#BCCCDC"
),
showlegend=False,
hovermode='closest',
margin=dict(b=20, l=5, r=5, t=40)
)
)
# Add parent lines.
for trace in mom_traces:
fig.add_trace(trace)
for trace in dad_traces:
fig.add_trace(trace)
return fig
def getNetworkEdges(data):
mom_traces = []
dad_traces = []
edge_hover_dict = {}
generations = data.generation.unique()
for generation in generations:
if generation == 0:
# collect the hover text.
this_generation_rows = data[data['generation'] == generation]
for index, row in this_generation_rows.iterrows():
candidate_value = row["values"]
candidate_fitness = row["fitness"]
edge_hover_text = f"Candidate=[{candidate_value}]<br>" \
f"Fitness={candidate_fitness}"
edge_hover_dict[index] = edge_hover_text
else:
try:
edge_mom_x = []
edge_mom_y = []
edge_dad_x = []
edge_dad_y = []
prev_generation_rows = data[data['generation'] == (generation - 1)]
prev_gen_data = {}
for index, row in prev_generation_rows.iterrows():
candidate_hash = makeInt(row["hash"])
candidate_hash = f'{candidate_hash}'
prev_gen_data[candidate_hash] = row
this_generation_rows = data[data['generation'] == generation]
# Generate the edges.
for index, row in this_generation_rows.iterrows():
candidate_value = row["values"]
candidate_fitness = row["fitness"]
mom_hash = f"{makeInt(row['mom_hash'])}"
dad_hash = f"{makeInt(row['dad_hash'])}"
edge_hover_text = f"Candidate=[{candidate_value}]<br>" \
f"Fitness={candidate_fitness}"
if mom_hash and mom_hash in prev_gen_data.keys():
mom_row = prev_gen_data[mom_hash]
edge_mom_x.append(generation - 1)
edge_mom_y.append(mom_row["i"])
edge_mom_x.append(generation)
edge_mom_y.append(row["i"])
edge_hover_text = f"{edge_hover_text}<br>" \
f"Parent1=[{mom_row['values']}]"
if dad_hash and (mom_hash != dad_hash) and dad_hash in prev_gen_data.keys():
dad_row = prev_gen_data[dad_hash]
edge_dad_x.append(generation - 1)
edge_dad_y.append(dad_row["i"])
edge_dad_x.append(generation)
edge_dad_y.append(row["i"])
edge_hover_text = f"{edge_hover_text}<br>" \
f"Parent2=[{dad_row['values']}]"
edge_hover_dict[index] = edge_hover_text
edge_mom_trace = go.Scatter(
x=edge_mom_x, y=edge_mom_y,
line=dict(width=0.5, color='red'),
hoverinfo='text',
mode='lines')
edge_dad_trace = go.Scatter(
x=edge_dad_x, y=edge_dad_y,
line=dict(width=0.5, color='blue'),
hoverinfo='text',
mode='lines')
mom_traces.append(edge_mom_trace)
dad_traces.append(edge_dad_trace)
# fig.add_trace(edge_mom_trace)
# fig.add_trace(edge_dad_trace)
except Exception as e:
print(e)
# fig.update_traces(mode="markers+lines")
# fig.update_layout(hovermode="closest")
# fig.text(edge_hover_text)
# pd.reset_option.display
return mom_traces, dad_traces, edge_hover_dict
def showNetworkGraph(project_name, data):
fig = createNetworkGraph(project_name, data)
fig.show()
inspyred_data_folder = "/System/Volumes/Data/Personal/Degree/Tools/Inspyred/Code/Git/inspyred/tanager_data"
if __name__ == '__main__':
projects = ['Rastrigin', 'Sphere', 'Ackley', 'Rosenbrock', 'TSM']
# chart_types = ['BestFit', 'AllGenerations', 'Network']
# choosen_problem = f'{problem_types[0]}_{chart_types[2]}'
inspyred_data_folder = "/System/Volumes/Data/Personal/Degree/Tools/Inspyred/Code/Git/inspyred/tanager_data"
for project in projects:
# data_filename = f'{inspyred_data_folder}/{project}/tanager-individuals-file.csv'
print(f"###### Generate Graph {project} ###############")
# Generate the graph.
data_filename = f'{inspyred_data_folder}/{project}/tanager-individuals-file.csv'
data = readfile(data_filename)
fig = showNetworkGraph(project, data)
# fig.show()
# break
# data_full_path = os.path.realpath(data_filename)
# print(data_full_path)
# data = readfile(data_filename)
```
#### File: inspyred-dashboard/tanager/plots.py
```python
import glob as glob
import os.path as path
import sys
import dash_core_components as dcc
import dash_html_components as html
import numpy as np
import pandas as pd
import plotly.figure_factory as ff
import plotly.graph_objects as go
import tanager.utils as tu
from . import networkgraph as gc
from . import populationplots as pp
def read_file(pathname, filename, generation_filter):
df = None
alt = None
stats_path = path.join(pathname, filename)
data_files = glob.glob(stats_path, recursive=False)
try:
if len(data_files) >= 1:
if len(data_files) > 1:
print(f"More than one statistics file found in {path.dirname(stats_path)}.", file=sys.stderr)
print("Only one file will be used.", file=sys.stderr)
file = data_files[0]
print(f"Reading in {file}")
all_df = pd.read_csv(file)
if generation_filter:
df = all_df[all_df.generation.between(generation_filter[0], generation_filter[1])]
else:
df = all_df
else:
alt = html.H3(f'No data file found with name {filename}.')
except IOError as e:
alt = html.H3(f"ERROR: Caught an IOError while reading {filename}:\n{e}")
except ValueError as e:
alt = html.H3(f"ERROR: Caught a ValueError while reading {filename}:\n{e}")
return df, alt
def get_graph_config():
config = {
'displaylogo': False,
'scrollZoom': True,
'displayModeBar': True,
'editable': True,
'modeBarButtonsToRemove': ['toggleSpikelines',
'hoverCompareCartesian'],
'sendData': False
}
return config
def fitness_vs_generation(stats_df: pd.DataFrame, plot_id: str = 'fitness_vs_generation'):
x = stats_df['generation']
y = stats_df['average_fit']
y_upper = y + stats_df['std_fit']
y_lower = y - stats_df['std_fit']
fig = go.Figure([
go.Scatter(
name='Fitness',
x=x, y=y,
mode='lines',
line=dict(color='rgb(31, 119, 180)'),
),
go.Scatter(
name='Upper Bound',
x=x, y=y_upper,
mode='lines',
marker=dict(color="#444"),
line=dict(width=0),
showlegend=False
),
go.Scatter(
name='Lower Bound',
x=x, y=y_lower,
marker=dict(color="#444"),
line=dict(width=0),
mode='lines',
fillcolor='rgba(68, 68, 68, 0.3)',
fill='tonexty',
showlegend=False
)
])
fig.update_layout(
yaxis_title='Fitness',
xaxis_title='Generation',
title='Average Fitness vs Generation',
hovermode="x"
)
return dcc.Graph(id=plot_id, figure=fig, responsive=True, className="h-full w-full",
config=get_graph_config())
def generation_distribution(stats_df: pd.DataFrame, generation: int = 0):
# Select all individuals for the given generation number.
fitness_vals = stats_df[stats_df["generation"] == generation]["fitness"]
fig = ff.create_distplot([fitness_vals], [f"Generation {generation}"], show_rug=True, show_hist=False,
curve_type="normal")
fig.update_layout(title_text='Fitness Distribution')
return fig
# def generation_distribution(pathname: str, generation: int = 0, plot_id: str = 'gen-dist-plot'):
# df, alt = read_file(pathname, 'tanager-individuals-file.csv', None)
#
# if alt:
# plot_div = alt
# else:
# # Select all individuals for the given generation number.
# fitness_vals = df[df["generation"].isin([generation])]["fitness"]
# # fig = px.histogram(generation_df, x="fitness", show_hist=False, histnorm="probability density")
# fig = ff.create_distplot([fitness_vals], [f"Generation {generation}"], show_rug=True, show_hist=False,
# curve_type="normal")
# fig.update_layout(title_text='Fitness Distribution')
# plot_div = dcc.Graph(id=plot_id, figure=fig, responsive=True, className="h-full w-full",
# config=get_graph_config())
#
# return plot_div
def generation_network_graph(df: pd.DataFrame, generation: tuple = (np.NINF, np.inf),
plot_id: str = 'gen-network-plot'):
filtered_df = tu.filter_generation(df, generation)
fig = gc.createNetworkGraph(filtered_df)
return dcc.Graph(id=plot_id, figure=fig, responsive=True, className="h-full w-full",
config=get_graph_config())
def plot_ec_population(df: pd.DataFrame, generation: tuple = (np.NINF, np.inf), plot_id: str = 'ec-population-plot'):
filtered_df = tu.filter_generation(df, generation)
fig = pp.plot_ec_population(df)
return dcc.Graph(id=plot_id, figure=fig, responsive=True, className="h-full w-full",
config=get_graph_config())
def plot_ec_stats(df: pd.DataFrame, generation: tuple = (np.NINF, np.inf), plot_id: str = 'ec-population-plot'):
filtered_df = tu.filter_generation(df, generation)
fig = pp.plot_ec_stats(filtered_df)
return dcc.Graph(id=plot_id,
figure=fig,
responsive=True,
className="h-full w-full",
config=get_graph_config())
def plot_stats_table(pathname: str, num_rows: int = 10):
df, alt = read_file(pathname, 'tanager-statistics-file.csv', None)
table = ff.create_table(df.head(num_rows))
# py.iplot(table)
def plot_individual_table(pathname: str, num_rows: int = 10):
df, alt = read_file(pathname, 'tanager-individuals-file.csv', None)
table = ff.create_table(df.head(num_rows))
# py.iplot(table)
``` |
{
"source": "jogoon87/investments",
"score": 3
} |
#### File: jogoon87/investments/Dynamic_MeanVariance.py
```python
from math import *
from scipy import interpolate
from scipy.optimize import minimize
import numpy as np
import matplotlib.pyplot as plt
def Select_Model(Model_Name, Parameters):
if(Model_Name == "Simulation"):
return Simulation(Parameters)
elif(Model_Name == "Liu(2001)"):
return Liu2001(Parameters)
elif(Model_Name == "GaussianReturn"):
return GaussianReturn(Parameters)
elif(Model_Name == "Vasicek1977"):
return Vasicek1977(Parameters)
def InsertDB_Portfolio(errcode, dbcursor, fund_id, asset_code, spot, ttm, optStockPath, optHedgingPath):
if(errcode == 1):
dbcursor.execute("INSERT INTO PORTFOLIO (FUND_ID, ASSET_CODE, SPOT, TTM, INVESTMENT, HEDGING)" \
"VALUES (?, ?, ?, ?, ?, ?)", \
(fund_id, asset_code, spot, ttm, optStockPath, optHedgingPath))
dbcursor.commit()
class Simulation:
def __init__(self, args):
""" Arguemtns
---------
Model : General, Liu2001, GaussianReturn, Vasicek1977
"""
print("[Simulation] Warning: Coding in progress...\n")
self.underasset = args[0]
self.rf = args[1]
self.s0 = args[2]
self.x0 = args[3]
self.beta = args[4]
self.delta = args[5]
self.gamma = args[6]
self.rho = args[7]
self.nubar = args[8]
self.xbar = args[9]
self.speed = args[10]
self.Name = "Liu(2001)" # temp
def Get_Optima(self, state, ttm):
if(self.Name == "Liu(2001)"):
# Implementation of Proposition 2. of Basak and Chabakauri (2010), eq (26), p. 14.
#print("[Simulation] Warning: Check out this function <Get_Optima>.\n")
#return -1
print("[Simulation] Monte-Carlo simultation started...\n")
# additional arguments
numSim = 100
diffeps = 0.01 # epsilon for differential
RanNumsDif = np.random.randn(numSim, len(tsteps), 2, 2) # random number generation
integral = np.zeros((2, numSim))
# calculate initial solutions only
for n in range(numSim):
StatePathDif = np.empty((len(tsteps), 2))
StockPathDif = np.empty((len(tsteps), 2))
StatePathDif[0][0] = Model.x0
StatePathDif[0][1] = Model.x0 * (1 + diffeps)
StockPathDif[0][0] = Model.s0
StockPathDif[0][1] = Model.s0
for t in range(1, len(tsteps)):
for i in range(2):
StatePathDif[t][i], StockPathDif[t][i] = Model.Get_Process(delt, \
StatePathDif[t-1][i], \
StockPathDif[t-1][i], \
RanNumsDif[n][t][i][0], \
RanNumsDif[n][t][i][1])
integral[i][n] += Model.Get_trapezoid(StatePathDif[t][i], StatePathDif[t-1][i], delt)
# #-----------------------------------------------------------------
diff = (integral.mean(1)[1] - integral.mean(1)[0]) / (StatePathDif[0][1] - StatePathDif[0][0])
Optimal_Stock, Hedging_Demand_Ratio = Model.Get_Sim_Optima(diff, TTM)
else:
print("[Simulation] Warning: <Get_Optima> is yet implemted for other models.\n")
Optimal_Stock, Hedging_Demand_Ratio = 0, 0
return 1, Optimal_Stock, Hedging_Demand_Ratio
class Liu2001:
Name = "Liu2001"
def __init__(self, args):
""" Arguemtns
---------
SV model parms of Liu (2001). See [1], eq. (45) on p. 25.
rf : risk-free interest rate
ttm : time-to-maturiy
s0 : initial stock price
x0 : initial state
beta : elasticity of the market price of risk, delta*sqrt(x_t)
w.r.t. intantaneous stock return volatility, x_t^{0.5*beta)}
delta : risk-premium scale parameters
gamma : risk-aversion
rho : correlation btn s and x
nubar : vol of vol
xbar : long-run variance
speed : mean-reverting speed
exret : excess return
"""
self.underasset = args[0]
self.rf = args[1]
self.s0 = args[2]
self.x0 = args[3]
self.beta = args[4]
self.delta = args[5]
self.gamma = args[6]
self.rho = args[7]
self.nubar = args[8]
self.xbar = args[9]
self.speed = args[10]
def Get_Optima(self, state, ttm):
""" This function returns closed-form optimal stock investment
according to Liu(2001)'s stochastic volatility model.
See p.25 of Basak and Chabakauri (2010)
Input
-----
state : X, currenct state
ttm : time-to-maturity(T-t)
Return
------
opitmal stock investment
hedging demand ratio
"""
Optimal_Stock = (self.delta/self.gamma) * state**((self.beta-1)/(2*self.beta)) * exp(-self.rf*ttm) \
* (1 - self.rho*self.nubar*self.delta * (1 - exp(-(self.speed+self.rho*self.nubar*self.delta)*ttm)) \
/ (self.speed+self.rho*self.nubar*self.delta))
Hedging_Numerator = self.rho*self.nubar*self.delta * (1 - exp(-(self.speed+self.rho*self.nubar*self.delta)*ttm)) \
/ (self.speed+self.rho*self.nubar*self.delta)
Hedging_Demand_Ratio = - Hedging_Numerator / (1.0 - Hedging_Numerator) # hedging ratio
return 1, Optimal_Stock, Hedging_Demand_Ratio
def Get_Process(self, delt, curState, curStock, rand1, rand2):
r = self.rf
beta = self.beta
delta = self.delta
gamma = self.gamma
nubar = self.nubar
speed = self.speed
Xbar = self.xbar
rho = self.rho
# [1] Euler scheme -------------------------------------------
# 1) Variance(state) process
nextState = curState + speed*(Xbar - curState)*delt + nubar*sqrt(curState*delt)*rand1
# 2) Stock process
nextStock = curStock * (1.0 + \
(r + delta*curState**((1+beta)/(2*beta)))*delt \
+ curState**(1/(2*beta))*sqrt(delt)* (rho*rand1 + sqrt(1-rho*rho)*rand2) )
# [2] Mileston -----------------------------------------------
return nextState, nextStock
def Get_Sim_Optima(self, diff, TTM):
#-----------------------------------------------------------------
x0 = self.x0
r = self.rf
beta = self.beta
delta = self.delta
gamma = self.gamma
nubar = self.nubar
speed = self.speed
Xbar = self.xbar
rho = self.rho
OptStkDiff = x0**(-0.5/beta)*exp(-r*TTM) / gamma * (delta * sqrt(x0) - rho*nubar*diff)
tmpnumer = rho*nubar*diff
OptHedDiff = - tmpnumer / (delta*sqrt(x0) - tmpnumer)
return OptStkDiff, OptHedDiff
def Get_trapezoid(self, curState, preState, delt):
#-----------------------------------------------------------------
delta = self.delta
return 0.5*delta*delta*(curState + preState)*delt
class GaussianReturn:
def __init__(self, args):
""" Arguemtns
---------
Time-varying Gaussian mean returns. See [1], eq. (49) on p. 30.
rf : risk-free interest rate
s0 : initial stock price
x0 : initial state
sigma: stock volatility
gamma : risk-aversion
rho : correlation btn s and x
nu : instantaneous variance of the state variave
xbar : long-run variance
speed : mean-reverting speed
"""
self.underasset = args[0]
self.rf = args[1]
self.s0 = args[2]
self.x0 = args[3]
self.sigma= args[4]
self.gamma = args[5]
self.rho = args[6]
self.nu = args[7]
self.xbar = args[8]
self.speed = args[9]
def Get_Optima(self, state, ttm):
""" This function returns closed-form optimal stock investment
according to Liu(2001)'s stochastic volatility model.
See p.25 of Basak and Chabakauri (2010)
Input
-----
state : X, currenct state
ttm : time-to-maturity(T-t)
Return
------
(stochastic) opitmal stock investment
(mean) hedging demand ratio
"""
Optimal_Stock = state/(self.gamma*self.sigma) * exp(-self.rf*ttm) \
- (self.rho*self.nu)/(self.gamma * self.sigma) * \
(self.speed * ( (1.0 - exp(-(self.speed + self.rho*self.nu)*ttm)) / (self.speed + self.rho*self.nu))**2 * self.xbar \
+ (1.0 - exp(-2.0*(self.speed + self.rho*self.nu)*ttm)) / (self.speed + self.rho*self.nu) * state ) * exp(-self.rf*ttm)
Hedging_Numerator = (self.rho*self.nu) * \
(self.speed * ( (1.0 - exp(-(self.speed + self.rho*self.nu)*ttm)) / (self.speed + self.rho*self.nu))**2 \
+ (1.0 - exp(-2.0*(self.speed + self.rho*self.nu)*ttm)) / (self.speed + self.rho*self.nu) )
Mean_Hedging_Demand_Ratio = - Hedging_Numerator / (1.0 - Hedging_Numerator) # hedging ratio
return 1, Optimal_Stock, Mean_Hedging_Demand_Ratio
def Get_Process(self, delt, curState, curStock, rand1, rand2):
r = self.rf
sigma = self.sigma
nu = self.nu
speed = self.speed
Xbar = self.xbar
rho = self.rho
# [1] Euler scheme -------------------------------------------
# 1) Variance(state) process
nextState = curState + speed*(Xbar - curState)*delt + nu*sqrt(curState*delt)*rand1
# 2) Stock process
nextStock = curStock * (1.0 + \
(r + sigma*curState)*delt \
+ sigma*sqrt(delt)*(rho*rand1 + sqrt(1-rho*rho)*rand2) )
return nextState, nextStock
class Vasicek1977:
def __init__(self, args):
""" Arguemtns
---------
Vasicek (1977) stochastic interest rate model. See [1], eq. (63) on p. 37.
ttm : time-to-maturiy
s0 : initial stock price
x0 : initial state(=r0)
sigma: stock volatility
gamma : risk-aversion
rho : correlation btn s and r
sigmar : instantaneous volatility of the interest rate
rbar : long-run interest rate
speed : mean-reverting speed
mu : (constant) stock return
"""
self.underasset = args[0]
self.mu = args[1]
self.s0 = args[2]
self.x0 = args[3]
self.sigma= args[4]
self.sigmar = args[5]
self.gamma = args[6]
self.rho = args[7]
self.rbar = args[8]
self.speed = args[9]
def Get_Optima(self, rt, ttm):
""" This function returns closed-form optimal stock investment
according to Liu(2001)'s stochastic volatility model.
See p.25 of Basak and Chabakauri (2010)
Input
-----
rt : r(t)
ttm : time-to-maturity(T-t)
Return
------
(stochastic) opitmal stock investment
(mean) hedging demand ratio
"""
Optimal_Stock = (self.mu-rt)/(self.gamma*self.sigma**2) \
- (self.rho*self.sigmar)/(self.gamma * self.sigma) * \
( self.speed * \
( (1.0 - exp(-(self.speed - self.rho*self.sigmar/self.sigma)*ttm)) / \
(self.speed - self.rho*self.sigmar/self.sigma))**2 * (self.mu-self.rbar)/self.sigma \
+ (1.0 - exp(-2.0*(self.speed - self.rho*self.sigmar/self.sigma)*ttm)) / \
(self.speed - self.rho*self.sigmar/self.sigma) * (self.mu-rt)/self.sigma )
Hedging_Numerator = (self.rho*self.sigmar) * ( self.speed * \
( (1.0 - exp(-(self.speed - self.rho*self.sigmar/self.sigma)*ttm)) / \
(self.speed - self.rho*self.sigmar/self.sigma))**2 \
+ (1.0 - exp(-2.0*(self.speed - self.rho*self.sigmar/self.sigma)*ttm)) / \
(self.speed - self.rho*self.sigmar/self.sigma) )
Mean_Hedging_Demand_Ratio = - Hedging_Numerator / (1.0 - Hedging_Numerator) # hedging ratio
return 1, Optimal_Stock, Mean_Hedging_Demand_Ratio
def Get_Process(self, delt, curState, curStock, rand1, rand2):
mu = self.mu
sigma = self.sigma # stock volatility
sigmar = self.sigmar # interest-rate volatility
speed = self.speed
rbar = self.rbar
rho = self.rho
# [1] Euler scheme -------------------------------------------
# 1) Interest-rate(state) process
nextState = curState + speed*(rbar - curState)*delt + sigmar*sqrt(curState*delt)*rand1
# 2) Stock process
nextStock = curStock * (1.0 + \
mu*delt \
+ sigma*sqrt(delt)*(rho*rand1 + sqrt(1-rho*rho)*rand2) )
return nextState, nextStock
``` |
{
"source": "jogo/vulture",
"score": 3
} |
#### File: vulture/tests/test_conditions.py
```python
import ast
import sys
from vulture import utils
from . import check_unreachable
from . import v
assert v # Silence pyflakes
def check_condition(code, result):
condition = ast.parse(code, mode='eval').body
if result:
assert utils.condition_is_always_true(condition)
else:
assert utils.condition_is_always_false(condition)
def test_false():
check_condition('False', False)
check_condition('None', False)
check_condition("0", False)
# Only Python 3.0-3.6 allows addition and subtraction in ast.literal_eval.
# (see https://bugs.python.org/issue31778)
if (3, 0) <= sys.version_info < (3, 7):
check_condition("1 - 1", False)
def test_empty():
check_condition("''", False)
check_condition("[]", False)
check_condition("{}", False)
def test_true():
check_condition("True", True)
check_condition("2", True)
check_condition("['foo', 'bar']", True)
check_condition("{'a': 1, 'b': 2}", True)
def test_complex_conditions():
conditions = [
('foo and False', True, False),
('foo or False', False, False),
('foo and True', False, False),
('foo or True', False, True),
('False and foo', True, False),
('False and 1', True, False),
('not False', False, True),
('not True', True, False),
('not foo', False, False),
('foo and (False or [])', True, False),
('(foo and bar) or {"a": 1}', False, True),
]
for condition, always_false, always_true in conditions:
condition = ast.parse(condition, mode='eval').body
assert not (always_false and always_true)
assert utils.condition_is_always_false(condition) == always_false
assert utils.condition_is_always_true(condition) == always_true
def test_errors():
conditions = [
'foo',
'__name__ == "__main__"',
'chr(-1)',
'getattr(True, "foo")',
'hasattr(str, "foo")',
'isinstance(True, True)',
'globals()',
'locals()',
'().__class__',
]
for condition in conditions:
condition = ast.parse(condition, mode='eval').body
assert not utils.condition_is_always_false(condition)
assert not utils.condition_is_always_true(condition)
def test_while(v):
v.scan("""\
while False:
pass
""")
check_unreachable(v, 1, 2, 'while')
def test_while_nested(v):
v.scan("""\
while True:
while False:
pass
""")
check_unreachable(v, 2, 2, 'while')
def test_if_false(v):
v.scan("""\
if False:
pass
""")
check_unreachable(v, 1, 2, 'if')
def test_elif_false(v):
v.scan("""\
if bar():
pass
elif False:
print("Unreachable")
""")
check_unreachable(v, 3, 2, 'if')
def test_nested_if_statements_false(v):
v.scan("""\
if foo():
if bar():
pass
elif False:
print("Unreachable")
pass
elif something():
print("Reachable")
else:
pass
else:
pass
""")
check_unreachable(v, 4, 3, 'if')
def test_if_false_same_line(v):
v.scan("""\
if False: a = 1
else: c = 3
""")
check_unreachable(v, 1, 1, 'if')
def test_if_true(v):
v.scan("""\
if True:
a = 1
b = 2
else:
c = 3
d = 3
""")
# For simplicity, we don't report the "else" line as dead code.
check_unreachable(v, 5, 2, 'else')
def test_if_true_same_line(v):
v.scan("""\
if True:
a = 1
b = 2
else: c = 3
d = 3
""")
check_unreachable(v, 4, 1, 'else')
def test_nested_if_statements_true(v):
v.scan("""\
if foo():
if bar():
pass
elif True:
if something():
pass
else:
pass
elif something_else():
print("foo")
else:
print("bar")
else:
pass
""")
check_unreachable(v, 9, 4, 'else')
``` |
{
"source": "jogrundy/outlier_detection",
"score": 2
} |
#### File: jogrundy/outlier_detection/gpu_run.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from test_data import generate_test
from time import time
from sklearn.svm import OneClassSVM
from admm_graph_OP_tweak import GOP
from Simple_GOP import SGOP
from outlier_pursuit import outlier_pursuit
from ae import get_ae_losses
from vae import get_vae_losses
from sklearn.ensemble import IsolationForest
from gru import get_GRU_os, get_LSTM_os
from sklearn import metrics
from sklearn.cluster import DBSCAN
from sklearn.mixture import GaussianMixture
from var import get_VAR_OS
import os
import stopit
import datetime
plt.rcParams.update({'font.size': 18})
class TimeoutException(Exception):
def __init__(self, time):
Exception.__init__(self, 'timeout after {}s'.format(time))
def ese(pred, target):
"""
takes in predicted values and actual values, returns elementwise squared error
via (x-y)^2
"""
errs = (pred - target)**2
return errs
def OLS_err(X_train, y_train, X, y):
"""
takes in train test split returns elementwise error for whole dataset.
"""
reg = linear_model.LinearRegression()
reg.fit(X_train, y_train)
pred = reg.predict(X)
return ese(pred, y)
def ridge_err(X_train, y_train, X, y):
"""
takes in train test split returns elementwise error for whole dataset.
"""
reg = linear_model.Ridge()
reg.fit(X_train, y_train)
pred = reg.predict(X)
return ese(pred, y)
def lasso_err(X_train, y_train, X, y):
"""
takes in train test split returns elementwise error for whole dataset.
"""
reg = linear_model.Lasso()
reg.fit(X_train, y_train)
pred = reg.predict(X)
return ese(pred, y)
def get_reg_os(X):
n,p = X.shape
err_sum = np.zeros(n)
for i in range(p):
inds = np.arange(p)
inds = inds
X_x = np.delete(X, i, axis=1)
y_y = X[:,i]
X_train, X_test, y_train, y_test = train_test_split(X_x, y_y)
err = OLS_err(X_train, y_train, X_x, y_y)
err_sum +=err
return err_sum/n
def get_ridge_os(X):
n,p = X.shape
err_sum = np.zeros(n)
for i in range(p):
inds = np.arange(p)
inds = inds
X_x = np.delete(X, i, axis=1)
y_y = X[:,i]
X_train, X_test, y_train, y_test = train_test_split(X_x, y_y)
err = ridge_err(X_train, y_train, X_x, y_y)
err_sum +=err
return err_sum/n
def get_LASSO_os(X):
n,p = X.shape
err_sum = np.zeros(n)
for i in range(p):
inds = np.arange(p)
inds = inds
X_x = np.delete(X, i, axis=1)
y_y = X[:,i]
X_train, X_test, y_train, y_test = train_test_split(X_x, y_y)
err = lasso_err(X_train, y_train, X_x, y_y)
err_sum +=err
return err_sum/n
# The testing algorithms
#regression
def test_VAR(X):
os = get_VAR_OS(X)
return os
def test_OLS(X):
"""
takes in only data 'X', in samples as rows format
returns only list of outlier scores for each sample
higher score = more outlier
"""
losses = get_reg_os(X)
# print(len(losses))
#loss here is summed elementwise errors
return losses
def test_Ridge(X):
"""
takes in only data 'X', in samples as rows format
returns only list of outlier scores for each sample
higher score = more outlier
"""
losses = get_ridge_os(X)
# print(len(losses))
#loss here is summed elementwise errors
return losses
def test_LASSO(X):
"""
takes in only data 'X', in samples as rows format
returns only list of outlier scores for each sample
higher score = more outlier
"""
losses = get_LASSO_os(X)
# print(len(losses))
#loss here is summed elementwise errors
return losses
#tersting algorithms
#density
def test_OCSVM(X):
"""
takes in only data 'X'
returns only list of outlier scores for each sample
higher score = more outlier
"""
clf = OneClassSVM(gamma='scale')
clf.fit(X)
dists = clf.decision_function(X)*-1
return dists #largest is now most outlier
def test_GMM(X):
"""
takes in only data 'X', in samples as rows format
returns only list of outlier scores for each sample
higher score = more outlier
"""
k = 3
# arr, pi_mu_sigs,i = em(X, k, 1000)
# log_likelihoods = log_Ls(X, pi_mu_sigs)
clf = GaussianMixture(n_components=k)
clf.fit(X)
scores = clf.score_samples(X)*-1 # returns log probs for data
return scores #to give in higher score = more outlier
def test_IF(X):
clf = IsolationForest()#contamination='auto', behaviour='new')
clf.fit(X)
os = clf.decision_function(X)
return os*-1 # average number splits to isolation. small is outlier.
def test_DBSCAN(X):
"""
takes in only data 'X', in samples as rows format
DBSCAN from sklearn returns -1 as label for outliers.
use from scartch implementaiton and get distance from nn as os
returns only list of outlier scores for each sample
higher score = more outlier
own implementation is very slow for higher N..
"""
n,p = X.shape
eps = 0.3 #normalised data
if int(n//20) < 3:
minnum = 3
elif int(n//20) > 100:
minnum = 100
else:
minnum = int(n//20)
# point_classes, cl, os = dbscan(X, eps, minnum)
clf = DBSCAN(eps=eps, min_samples=minnum)
classes = clf.fit_predict(X)
# print(classes)
#returns only in class or out of class binary classification
i = -1
n_found = 0
cl_sizes = {}
while n_found <n:
n_found_inds = len(np.where(classes == i)[0])
n_found += n_found_inds
# print(i, n_found_inds)
cl_sizes[i] = n_found_inds
i+=1
# print(cl_sizes)
cl_lst = [i[0] for i in sorted(cl_sizes.items(), key=lambda k:k[1], reverse=True)]
# print(cl_lst)
n_classes = len(cl_lst)
# most populous group get score zero, then 1, 2, etc..
os = [n_classes if x<0 else x for x in classes]
# print(os)
# raise
# os = [1 if x < 0 else 0 for x in classes]
return np.array(os)
# deep learning algorithms
def test_VAE(X):
"""
takes in only data 'X', in samples as rows format
returns only list of outlier scores for each sample
higher score = more outlier
"""
losses = get_vae_losses(X)
# print(losses[:10])
#gives reconstruciton error from AE, should be largest for outliers
return losses
def test_AE(X):
"""
takes in only data 'X', in samples as rows format
returns only list of outlier scores for each sample
higher score = more outlier
"""
losses = get_ae_losses(X)
#gives reconstruciton error from AE, should be largest for outliers
return losses
def test_GRU(X):
"""
takes in only data 'X', in samples as rows format
returns only list of outlier scores for each sample
higher score = more outlier
"""
errs = get_GRU_os(X)
#gives error from GRU, should be largest for outliers
return errs
def test_LSTM(X):
"""
takes in only data 'X', in samples as rows format
returns only list of outlier scores for each sample
higher score = more outlier
"""
errs = get_LSTM_os(X)
#gives error from LSTM, should be largest for outliers
errs = np.array(errs).reshape(-1)
return errs
# Matrix methods
def test_OP(X):
"""
takes in only data 'X', in samples as rows format
returns only list of outlier scores for each sample
higher score = more outlier
"""
lamb = 0.5
M = X.T
L_hat, C_hat, count = outlier_pursuit(M, lamb)
return np.sum(C_hat, axis=0)
def test_GOP(X):
"""
takes in only data 'X', in samples as rows format
returns only list of outlier scores for each sample
higher score = more outlier
"""
lamb = 0.5
gamma = 0.1
M = X.T
S_hat = GOP(M, lamb, gamma)
return np.sum(S_hat, axis=0)
def test_SGOP(X):
"""
takes in only data 'X', in samples as rows format
returns only list of outlier scores for each sample
higher score = more outlier
"""
lamb = 0.5
gamma = 0.1
M = X.T
S_hat = SGOP(M, lamb, gamma)
return np.sum(S_hat, axis=0)
# end of testing algorithms
def test_algo(X, outs, algo, metric):
"""
takes in algorithm 'algo', data 'X', with outlier indices 'outs'
returns fp rate, as given by separation_metric
algo must have input only X
"""
outlier_scores = algo(X)
fps = metric[1](outlier_scores, outs)
aucs = metric[0](outlier_scores, outs)
return fps, aucs
def contour_fp_algo(n, p, r, ta, n_steps, n_runs, gamma, algo, metric):
"""
does 2d contour plot varying
p_frac - number of parameters changed in the outliers
p_quant - amount each parameter is varied by
ie when both are 0, there are no outliers in terms of data
"""
# step_size = 1/n_steps
pf = np.linspace(0,1,n_steps)
pq = np.linspace(0,1,n_steps)
fps = []
for p_frac in pf:
# print(p_frac)
fp_row=[]
for p_quant in pq:
# print(p_quant)
runs=[]
for i in range(n_runs):
# print('run {}'.format(i))
# print(n,p,r)
la_err = False
while not la_err:
try:
X, outs = generate_test(n, p, r, p_frac, p_quant, gamma, ta)
fp, auc = test_algo(X, outs, algo, metric)
la_err = False
except numpy.linalg.LinAlgError as err:
if 'Singular matrix' in str(err):
la_err = True
print('redoing due to sungular matrix err')
runs.append(fp)
# print(runs)
fp_row.append(np.mean(runs))
fps.append(fp_row)
fpz = np.array(fps)
# print(fps)
return pf, pq, fpz
def auc(est_out_scores, outs):
"""
measures how good the separation is between outliers and inliers
uses auc
uses the estimated outlier score from each algorithm.
"""
n = len(est_out_scores)
actual_os = [1 if i in outs else 0 for i in range(n)]
try:
fpr, tpr, thresholds = metrics.roc_curve(actual_os, est_out_scores)
except:
print(actual_os[:10], est_out_scores[:10])
# print(metrics.auc(fpr, tpr))
raise
return metrics.auc(fpr, tpr)
def separation_metric(est_out_scores, outs):
"""
measures how good the separation is between outliers and inliers
uses number of false positives found after finding all outliers
uses the estimated outlier score from each algorithm.
higher score = more outlier
"""
# n = len(est_out_scores)
#
# actual_os = [1 if i in outs else 0 for i in range(n)]
# fpr, tpr, thresholds = metrics.roc_curve(actual_os, est_out_scores)
# print(fpr)
# # print(fpr, tpr, thresholds)
# # print(metrics.auc(fpr, tpr))
# return fpr
inds = np.flip(np.argsort(est_out_scores)) #gives indices in size order
n = len(est_out_scores)
for i in range(n):
# print(inds[:i])
# print(outs)
if len(np.setdiff1d(outs,inds[:i]))==0: #everything in outs is also in inds
fps = len(np.setdiff1d(inds[:i], outs)) #count the things in inds not in outs
return fps/i
return 1
def plot_each_algo_for_each_ta(n, p, r, ta_lst, n_steps, n_runs, algo_list, str_algo_lst):
w = len(algo_list)
v = len(ta_lst)
t0 = time()
plt.figure(figsize=(10,10))
for j in range(v):
for i in range(w):
t1 = time()
algo = algo_list[i]
# ta = ta_lst[j]
print('{}'.format(str_algo_lst[i]))
pf, pq, fpz = contour_fp_algo(n, p, r, j+1, n_steps, n_runs, gamma,algo)
zs = np.round(100*(fpz.size-np.count_nonzero(fpz))/fpz.size) # gives fraction of zeros
plt.subplot(v,w, (w*j + i)+1)
label = 'ta_{}'.format(j+1)
plt.title('FPs avg. {} runs for {}'.format(n_runs, label))
plt.contourf(pf, pq, fpz)
plt.colorbar()
plt.xlabel('p_frac')
plt.ylabel('p_quant')
plt.annotate(str_algo_lst[i], (0.8,0.9))
plt.annotate('{}% zero'.format(int(zs)), (0.8,0.8))
t2 = time()-t1
print('Algorithm {} with data {} took {}m and {}s to run {} times '.format(str_algo_lst[i],
label,
int(t2//60),
int(t2%60),
n_steps*n_runs*n_steps))
t3 = time()-t0
print('Took {}m {}s to run all algorithms'.format(int(t3//60),int(t3%60)))
fname = './images/test_n{}_p{}_r{}_FPta_plot.eps'.format(n,p,r)
plt.savefig(fname, bbox_inches='tight', pad_inches=0)
plt.show()
def contour_auc_pfq(pf_lst, pq_lst, r, noise, ta, n, p, n_runs, gamma,algo, algo_str,metric, timeout, outlier_type):
"""
does 2d contour plot
varying p frac and p_quant, using ceiling, so always at least 1 outlier
"""
all_name = './results/{}_pfq_all.txt'.format(timestamp)
if not os.path.isfile(all_name):
with open(all_name, 'w') as f:
info = '{}, pfq,{},runs={},n={},p={},ta={}\n'.format(timestamp, outlier_type,
n_runs,n,p,ta)
f.write(info)
fps = []
aucs = []
for p_frac in pf_lst:
# print(p_frac)
fp_row=[]
auc_row=[]
succeed = True
for p_quant in pq_lst:
Fail = False
t0 = time()
# print(p_quant)
fp_runs=[]
auc_runs=[]
# n = 10**n_pow
# p = 2**p_pow
for i in range(n_runs):
la_err = True
while la_err and succeed:
try:
X, outs = generate_test(n, p, r, p_frac, p_quant, gamma, noise, ta=ta, nz_cols=None, outlier_type=outlier_type)
with stopit.ThreadingTimeout(timeout) as ctx_mgr:
fp, auc = test_algo(X, outs, algo, metric)
if ctx_mgr.state==ctx_mgr.TIMED_OUT:
raise TimeoutException(timeout)
la_err = False
# print('got to end of try')
except np.linalg.LinAlgError as err:
if 'Singular matrix' in str(err):
la_err = True
print('redoing due to singular matrix err')
else:
# print(err)
print('some other linalg error')
raise(err)
except TimeoutException as err:
# print('timeout after {}s'.format(timeout))
succeed = False
#want it not to bother to run another run,
#and not to bother trying the next n_pow up
# raise(err)
if succeed:
fp_runs.append(fp)
auc_runs.append(auc)
else:
break
t1 = time() - t0
if Fail:
Fail = False
fp_row.append(np.nan)
auc_row.append(np.nan)
print('n={}, p={}, Failed, LinAlgError'.format(n, p))
elif not succeed:
print('n={}, p={}, Failed, Timeout after {}s'.format(n, p, timeout))
fp_row.append(np.nan)
auc_row.append(np.nan)
with open(all_name, 'a') as f:
fp_str = '{}, {}, {}, {}, {}, {}\n'.format(algo_str, ta, 'fps',n,p, np.nan)
auc_str = '{}, {}, {}, {}, {}, {}\n'.format(algo_str, ta, 'auc',n,p, np.nan)
f.write(fp_str)
f.write(auc_str)
else:
# print(runs)
fp_row.append(np.mean(fp_runs))
auc_row.append(np.mean(auc_runs))
#saving raw data to file
with open(all_name, 'a') as f:
fp_str = '{}, {}, {}, {}, {}, '.format(algo_str, ta, 'fps',p_frac,p_quant)
fp_str = fp_str+''.join(['%0.3f, '])*len(fp_runs)%tuple(fp_runs)+'\n'
auc_str = '{}, {}, {}, {}, {}, '.format(algo_str, ta, 'auc',p_frac,p_quant)
auc_str = auc_str+''.join(['%0.3f, '])*len(auc_runs)%tuple(auc_runs)+'\n'
f.write(fp_str)
f.write(auc_str)
print('p_frac={}, quant={}, runs={}, time= {}m {}s'.format(round(p_frac,3), round(p_quant,3), n_runs, int(t1//60),int(t1%60)))
fps.append(fp_row)
aucs.append(auc_row)
fpz = np.array(fps)
aucz = np.array(aucs)
# print(fps)
return fpz, aucz
def get_auc_noise(p_frac, p_quant, r, noise_list, ta, n, p, n_runs, gamma,algo, metric, timeout, outlier_type):
"""
runs each algorithm with varying amounts of noise on ta given.
"""
all_name = './results/{}_noise_all.txt'.format(timestamp) #lazy programming using global
if not os.path.isfile(all_name):
with open(all_name, 'w') as f:
info = '{}, {}, {}, {}, {}, {}, '.format('algo','ta', 'n', 'p','fps', 'noise')
# print(len(np.arange(n_runs)), n_runs)
info2 = ''.join(['%d, '])*n_runs%tuple(np.arange(n_runs)+1)
# format([np.arange(n_runs)+1])
# print(info, info2)
f.write(info+info2[:-2]+'\n')
# print(info+info2[:-2]+'\n')
# raise
fps = []
aucs = []
for noise in noise_list:
Fail = False
t0 = time()
fp_runs=[]
auc_runs=[]
succeed=True
for i in range(n_runs):
la_err = True
while la_err and succeed:
try:
X, outs = generate_test(n, p, r, p_frac, p_quant, gamma, noise, ta=ta, nz_cols=None, outlier_type=outlier_type)
with stopit.ThreadingTimeout(timeout) as ctx_mgr:
fp, auc = test_algo(X, outs, algo, metric)
if ctx_mgr.state==ctx_mgr.TIMED_OUT:
raise TimeoutException(timeout)
la_err = False
# print('got to end of try')
except np.linalg.LinAlgError as err:
if 'Singular matrix' in str(err):
la_err = True
print('redoing due to singular matrix err')
else:
# print(err)
print('some other linalg error')
raise(err)
except TimeoutException as err:
# print('timeout after {}s'.format(timeout))
succeed = False
if succeed:
fp_runs.append(fp)
auc_runs.append(auc)
else:
break
t1 = time() - t0
if Fail:
Fail = False
fp_row.append(np.nan)
auc_row.append(np.nan)
print('n={}, p={}, Failed, LinAlgError'.format(n, p))
elif not succeed:
print('n={}, p={}, Failed, Timeout after {}s'.format(n, p, timeout))
fp_row.append(np.nan)
auc_row.append(np.nan)
with open(all_name, 'a') as f:
fp_str = '{}, {}, {}, {}, {}, {}\n'.format(algo_str, ta, 'fps',n,p, np.nan)
auc_str = '{}, {}, {}, {}, {}, {}\n'.format(algo_str, ta, 'auc',n,p, np.nan)
f.write(fp_str)
f.write(auc_str)
else:
# print(runs)
fps.append(np.mean(fp_runs))
aucs.append(np.mean(auc_runs))
with open(all_name, 'a') as f:
fp_str = '{}, {}, {}, {}, {}, {}, '.format(algo, ta, n,p,'fps',noise)
fp_str = fp_str+''.join(['%0.3f, '])*len(fp_runs)%tuple(fp_runs)+'\n'
auc_str = '{}, {}, {}, {}, {}, {}, '.format(algo, ta, n,p,'auc',noise)
auc_str = auc_str+''.join(['%0.3f, '])*len(auc_runs)%tuple(auc_runs)+'\n'
f.write(fp_str)
f.write(auc_str)
# print('p_frac={}, quant={}, runs={}, time= {}m {}s'.format(round(p_frac,3), round(p_quant,3), n_runs, int(t1//60),int(t1%60)))
print('noise={}, runs={}, time= {}m {}s'.format(noise, n_runs, int(t1//60),int(t1%60)))
# fps.append(fp_row)
# aucs.append(auc_row)
fpz = np.array(fps)
aucz = np.array(aucs)
# print(fps)
return fpz, aucz
def contour_fp_np(n_lst, p_lst, r, noise, ta, p_quant, p_frac, n_runs, gamma,algo,algo_str, metric, timeout, nz_cols, outlier_type):
"""
does 2d contour plot varying
n - number of samples
p - number of features
with 0.2 p frac and p_quant, using ceiling, so always at least 1 outlier
"""
# step_size = 1/n_steps
# p_quant = 0.2
# p_frac = 0.2
all_name = './results/{}_np_all.txt'.format(timestamp)
if not os.path.isfile(all_name):
with open(all_name, 'w') as f:
info = '{}, np,{},runs={},p_frac={},p_quant={},ta={}\n'.format(timestamp, outlier_type,
n_runs,p_frac,p_quant,ta)
f.write(info)
fps = []
aucs = []
for p_pow in p_lst:
# print(p_frac)
fp_row=[]
auc_row=[]
succeed = True
for n_pow in n_lst:
Fail = False
t0 = time()
# print(p_quant)
fp_runs=[]
auc_runs=[]
n = 10**n_pow
p = 2**p_pow
for i in range(n_runs):
la_err = True
while la_err and succeed:
try:
X, outs = generate_test(n, p, r, p_frac, p_quant, gamma, noise, ta=ta, nz_cols=nz_cols, outlier_type=outlier_type)
with stopit.ThreadingTimeout(timeout) as ctx_mgr:
fp, auc = test_algo(X, outs, algo, metric)
if ctx_mgr.state==ctx_mgr.TIMED_OUT:
raise TimeoutException(timeout)
la_err = False
# print('got to end of try')
except np.linalg.LinAlgError as err:
if 'Singular matrix' in str(err):
la_err = True
print('redoing due to singular matrix err')
elif 'SVD did not converge':
la_err = True
print('redoing due to SVD not converging')
else:
# print(err)
print('some other linalg error')
raise(err)
except TimeoutException as err:
# print('timeout after {}s'.format(timeout))
succeed = False
#want it not to bother to run another run,
#and not to bother trying the next n_pow up
# raise(err)
if succeed:
fp_runs.append(fp)
auc_runs.append(auc)
else:
break
t1 = time() - t0
if Fail:
Fail = False
fp_row.append(np.nan)
auc_row.append(np.nan)
print('n={}, p={}, Failed, LinAlgError'.format(n, p))
elif not succeed:
print('n={}, p={}, Failed, Timeout after {}s'.format(n, p, timeout))
fp_row.append(np.nan)
auc_row.append(np.nan)
with open(all_name, 'a') as f:
fp_str = '{}, {}, {}, {}, {}, {}\n'.format(algo_str, ta, 'fps',n,p, np.nan)
auc_str = '{}, {}, {}, {}, {}, {}\n'.format(algo_str, ta, 'auc',n,p, np.nan)
f.write(fp_str)
f.write(auc_str)
else:
# print(runs)
fp_row.append(np.mean(fp_runs))
auc_row.append(np.mean(auc_runs))
#saving raw data to file
with open(all_name, 'a') as f:
fp_str = '{}, {}, {}, {}, {}, '.format(algo_str, ta, 'fps',n,p)
fp_str = fp_str+''.join(['%0.3f, '])*len(fp_runs)%tuple(fp_runs)+'\n'
auc_str = '{}, {}, {}, {}, {}, '.format(algo_str, ta, 'auc',n,p)
auc_str = auc_str+''.join(['%0.3f, '])*len(auc_runs)%tuple(auc_runs)+'\n'
f.write(fp_str)
f.write(auc_str)
print('n={}, p={}, runs={}, time= {}m {}s'.format(n, p, n_runs, int(t1//60),int(t1%60)))
fps.append(fp_row)
aucs.append(auc_row)
fpz = np.array(fps)
aucz = np.array(aucs)
# print(fps)
return fpz, aucz
def plot_each_algo_for_pfq(pf_lst, pq_lst, r, gamma, noise, ta, n, p, n_runs,
algo_list, algo_type, metric, metric_str,
timeout, timestamp, outlier_type):
w = len(algo_list)
v = 1
t0 = time()
fig_size_x = len(algo_list)*5
fig_size_y = 5
plt.figure(figsize=(fig_size_x, fig_size_y))
ts = []
# fp_score = []
# auc_score = []
for i in range(w):
# To Do: code up keeping the colour bar max and min constant, 0 to 1 - done
t1 = time()
algo_str = algo_lst[i]
algo = algo_dict[algo_str]
print('{}'.format(algo_str))
fpz, aucz = contour_auc_pfq(pf_lst, pq_lst, r, noise, ta, n, p, n_runs, gamma,algo,algo_str, metric_lst, timeout, outlier_type)
plt.subplot(v,w, (w*0 + i)+1)
label = 'ta_{}'.format(ta)
plt.title('{}'.format(algo_str))
plt.contourf(pf_lst, pq_lst, aucz, np.arange(0,1+1e-8,0.05),vmin=0, vmax=1)
if i == w-1:
plt.colorbar()
plt.xlabel (r'p frac')
plt.ylabel(r'p quant')
t2 = time()-t1
# plt.annotate('{}m {}s'.format(int(t2//60),int(t2%60)), (0.8,0.8) )
ts.append(t2)
# fp_score.append(fpz)
# auc_score.append(aucz)
print('Algorithm {} with data {} took {}m and {}s to run {} times'.format(algo_str,
label,
int(t2//60),
int(t2%60),
len(pf_lst)*len(pq_lst)*n_runs))
t3 = time()-t0
print('Took {}m {}s to run all {} algorithms'.format(int(t3//60),int(t3%60), algo_type))
fname = './images/{}_pfq_{}_n_{}_p_{}_ta{}.eps'.format(timestamp,
algo_type, n, p, ta)
plt.savefig(fname, bbox_inches='tight', pad_inches=0)
# txt_fname='./results/{}_pfq_results.txt'.format(timestamp)
#
# raw_fname='./results/{}_pfq_raw.txt'.format(timestamp)
# if os.path.isfile(raw_fname):
# with open(raw_fname, 'a') as f:
# # info = '{}_np_{}_pfrac_{}_pquant_{}_ta{}\n'.format(timestamp,
# # algo_type, p_frac, p_quant, ta)
# # f.write(info)
# for i in range(len(algo_lst)):
# fps = fp_score[i].flatten()
# fp_str = algo_lst[i]+ ', ' + str(ta) +', ' + outlier_type+', fps, '+''.join(['%0.3f, '])*len(fps)%tuple(fps)
# fp_str = fp_str[:-2]+'\n'
# f.write(fp_str)
# aucs = auc_score[i].flatten()
# auc_str = algo_lst[i]+', ' + str(ta) +', ' + outlier_type+', auc, '+ ''.join(['%0.3f, '])*len(aucs)%tuple(aucs)
# auc_str = auc_str[:-2]+'\n'
# f.write(auc_str)
# else:
# with open(raw_fname, 'w') as f:
# info = '{}_pfq_{}_{}_n_{}_p_{}_ta{}\n'.format(timestamp, outlier_type,
# algo_type, n, p, ta)
# f.write(info)
# for i in range(len(algo_lst)):
# fps = fp_score[i].flatten()
# fp_str = algo_lst[i]+', ' + str(ta) +', ' + outlier_type+', fps, '+''.join(['%0.3f, '])*len(fps)%tuple(fps)
# fp_str = fp_str[:-2]+'\n'
# f.write(fp_str)
# aucs = auc_score[i].flatten()
# auc_str = algo_lst[i]+', ' + str(ta) +', ' + outlier_type+', auc, '+ ''.join(['%0.3f, '])*len(aucs)%tuple(aucs)
# auc_str = auc_str[:-2]+'\n'
# f.write(auc_str)
#
# if os.path.isfile(txt_fname):
# with open(txt_fname, 'a') as f:
# for i in range(len(algo_lst)):
# txt = '{}, {}, {}, {}, {}, {}, {},{}, {}, {}, {}, {}, {}\n'.format(timestamp,
# outlier_type, algo_lst[i], ta, n, p, n_runs,
# len(pf_lst)*len(pq_lst)*n_runs, int(pf_lst[-1]),
# int(pf_lst[-1]), int(ts[i]), np.mean(fp_score[i]),
# np.mean(auc_score[i]))
# f.write(txt)
# else:
# with open(txt_fname, 'w') as f:
# f.write('algo, ta, outlier_type, p_frac, p_quant, n_runs, total_n_runs, max_n, max_p, total_time, fp_score, auc_score\n')
# for i in range(len(algo_lst)):
# txt = '{}, {}, {}, {}, {}, {}, {}, {},{}, {}, {}, {}, {}\n'.format(timestamp,
# outlier_type, algo_lst[i], ta, n, p, n_runs,
# len(pf_lst)*len(pq_lst)*n_runs, int(pf_lst[-1]),
# int(pf_lst[-1]), int(ts[i]), np.mean(fp_score[i]),
# np.mean(auc_score[i]))
# f.write(txt)
#
#
#
# # plt.show()
plt.close()
def plot_noise(p_frac, p_quant, r, gamma, noise_list, ta, n, p, n_runs,algo_list,algo_type,
metric, metric_str, timeout, timestamp, outlier_type):
w = len(algo_list)
v = 1
t0 = time()
fig_size_x = 5
fig_size_y = 5
plt.figure(figsize=(fig_size_x, fig_size_y))
ts = []
# fp_score = [] #one row of numbers for one al
# auc_score = []
plt.title('{} avg. {} runs of {} with outlier type {}'.format(metric_str[0], n_runs, ta, outlier_type))
for i in range(w):
# To Do: code up keeping the colour bar max and min constant, 0 to 1 - done
t1 = time()
algo_str = algo_lst[i]
algo = algo_dict[algo_str] #using global naughty..
print('{}'.format(algo_str))
fpz, aucz = get_auc_noise(p_frac, p_quant, r, noise_list, ta, n, p, n_runs, gamma,algo, metric_lst, timeout, outlier_type)
# plt.subplot(v,w, (w*0 + i)+1)
label = ' ta_{}'.format(ta)
plt.plot(noise_list, aucz, label=algo_str)#+label)
plt.xlabel (r'noise')
plt.ylabel(r'AUC score')
t2 = time()-t1
# plt.annotate('{}m {}s'.format(int(t2//60),int(t2%60)), (0.8,0.8) )
ts.append(t2)
# fp_score.append(fpz)
# auc_score.append(aucz)
print('Algorithm {} with data {} took {}m and {}s to run {} times'.format(algo_str,
label,
int(t2//60),
int(t2%60),
len(noise_list)*n_runs))
t3 = time()-t0
plt.legend()
print('Took {}m {}s to run all algorithms on outlier type {}'.format(int(t3//60),int(t3%60), outlier_type))
fname = './images/{}_noise_{}_{}_n_{}_p_{}_ta{}.eps'.format(timestamp, outlier_type, algo_type, n, p, ta)
# print(len(fp_score))
# print(len(auc_score))
plt.savefig(fname, bbox_inches='tight', pad_inches=0)
# txt_fname='./results/{}_{}_noise_results.txt'.format(timestamp, algo_type)
#
# raw_fname='./results/{}_{}_noise_raw.txt'.format(timestamp, algo_type)
#
# if os.path.isfile(raw_fname):
# with open(raw_fname, 'a') as f:
# # info = '{}_np_{}_pfrac_{}_pquant_{}_ta{}\n'.format(timestamp,
# # algo_type, p_frac, p_quant, ta)
# # f.write(info)
# for i in range(len(algo_lst)):
# fps = fp_score[i].flatten()
# fp_str = algo_lst[i]+ ', ' + str(ta) +', ' + outlier_type+', fps, '+''.join(['%0.3f, '])*len(fps)%tuple(fps)
# fp_str = fp_str[:-2]+'\n'
# f.write(fp_str)
# aucs = auc_score[i].flatten()
# auc_str = algo_lst[i]+', ' + str(ta) +', ' + outlier_type+', auc, '+ ''.join(['%0.3f, '])*len(aucs)%tuple(aucs)
# auc_str = auc_str[:-2]+'\n'
# f.write(auc_str)
# else:
# with open(raw_fname, 'w') as f:
# info = '{}_noise_{}_n_{}_p_{}_ta{}\n'.format(timestamp, outlier_type, n, p, ta)
# f.write(info)
# for i in range(len(algo_lst)):
# fps = fp_score[i].flatten()
# fp_str = algo_lst[i]+', ' + str(ta) +', ' + outlier_type+', fps, '+''.join(['%0.3f, '])*len(fps)%tuple(fps)
# fp_str = fp_str[:-2]+'\n'
# f.write(fp_str)
# aucs = auc_score[i].flatten()
# auc_str = algo_lst[i]+', ' + str(ta) +', ' + outlier_type+', auc, '+ ''.join(['%0.3f, '])*len(aucs)%tuple(aucs)
# auc_str = auc_str[:-2]+'\n'
# f.write(auc_str)
#
# if os.path.isfile(txt_fname):
# with open(txt_fname, 'a') as f:
# for i in range(len(algo_lst)):
# # print(i)
# txt = '{}, {}, {}, {}, {}, {}, {},{}, {}, {}, {}, {}\n'.format(timestamp,
# outlier_type, algo_lst[i], ta, n, p, n_runs,
# len(noise_list)*n_runs, noise_list, int(ts[i]),
# np.mean(fp_score[i]), np.mean(auc_score[i]))
# f.write(txt)
# else:
# with open(txt_fname, 'w') as f:
# f.write('timestamp, outlier_type,algo, ta, p_frac, p_quant, n_runs, total_n_runs, noise_list, total_time, fp_score, auc_score\n')
# for i in range(len(algo_lst)):
# txt = '{}, {}, {}, {}, {}, {}, {}, {},{}, {}, {}, {}\n'.format(timestamp,
# outlier_type, algo_lst[i], ta, n, p, n_runs,
# len(noise_list)*n_runs, noise_list, int(ts[i]),
# np.mean(fp_score[i]), np.mean(auc_score[i]))
# f.write(txt)
# plt.show()
plt.close()
def plot_each_algo_for_np(n_lst, p_lst, r, gamma, noise, ta, p_quant, p_frac,n_runs,
algo_list, algo_type, metric, metric_str, timeout,
timestamp, nz_cols, outlier_type):
w = len(algo_list)
v = 1
t0 = time()
fig_size_x = len(algo_list)*5
fig_size_y = 5
plt.figure(figsize=(fig_size_x, fig_size_y))
ts = []
# fp_score = []
# auc_score = []
for i in range(w):
# To Do: code up keeping the colour bar max and min constant, 0 to 1
t1 = time()
algo_str = algo_lst[i]
algo = algo_dict[algo_str]
print('{}'.format(algo_str))
fpz, aucz = contour_fp_np(n_lst, p_lst, r, noise, ta,p_quant, p_frac, n_runs, gamma,algo, algo_str, metric_lst, timeout, nz_cols, outlier_type)
# zs = np.round(100*(fpz.size-np.count_nonzero(fpz))/fpz.size) # gives fraction of zeros
plt.subplot(v,w, (w*0 + i)+1)
label = 'ta_{}'.format(ta)
plt.title('{} avg. {} runs of {}'.format(metric_str[0], n_runs, algo_str))
plt.contourf(n_lst, p_lst, aucz, np.arange(0,1+1e-8,0.05),vmin=0, vmax=1)
if i == w-1:
plt.colorbar()
plt.xlabel (r'10^n samples')
plt.ylabel(r'2^p features')
t2 = time()-t1
# plt.annotate('{}m {}s'.format(int(t2//60),int(t2%60)), (0.8,0.8) )
ts.append(t2)
# fp_score.append(fpz)
# auc_score.append(aucz)
print('Algorithm {} with data {} took {}m and {}s to run {} times'.format(algo_str,
label,
int(t2//60),
int(t2%60),
len(n_lst)*len(p_lst)*n_runs))
t3 = time()-t0
print('Took {}m {}s to run all {} algorithms'.format(int(t3//60),int(t3%60), algo_type))
fname = './images/{}_np_{}_pfrac_{}_pquant_{}_ta{}.eps'.format(timestamp,
algo_type, p_frac, p_quant, ta)
plt.savefig(fname, bbox_inches='tight', pad_inches=0)
# txt_fname='./results/{}_np_results.txt'.format(timestamp)
#
# raw_fname='./results/{}_np_raw.txt'.format(timestamp)
#
#
#
# if os.path.isfile(raw_fname):
# with open(raw_fname, 'a') as f:
# # info = '{}_np_{}_pfrac_{}_pquant_{}_ta{}\n'.format(timestamp,
# # algo_type, p_frac, p_quant, ta)
# # f.write(info)
# for i in range(len(algo_lst)):
# fps = fp_score[i].flatten()
# # print(algo_lst[i])
# fp_str = algo_lst[i]+ ', ' + str(ta) + ', ' + outlier_type+', fps, '+''.join(['%0.3f, '])*len(fps)%tuple(fps)
# fp_str = fp_str[:-2]+'\n'
# f.write(fp_str)
# aucs = auc_score[i].flatten()
# auc_str = algo_lst[i]+', ' + str(ta) +', ' + outlier_type+', auc, '+ ''.join(['%0.3f, '])*len(aucs)%tuple(aucs)
# auc_str = auc_str[:-2]+'\n'
# f.write(auc_str)
# else:
# with open(raw_fname, 'w') as f:
# info = '{}_np_{}_{}_pfrac_{}_pquant_{}_ta{}\n'.format(timestamp, outlier_type,
# algo_type, p_frac, p_quant, ta)
# f.write(info)
# for i in range(len(algo_lst)):
# fps = fp_score[i].flatten()
# fp_str = algo_lst[i]+', ' + str(ta) +', ' + outlier_type+', fps, '+''.join(['%0.3f, '])*len(fps)%tuple(fps)
# fp_str = fp_str[:-2]+'\n'
# f.write(fp_str)
# aucs = auc_score[i].flatten()
# auc_str = algo_lst[i]+', ' + str(ta) +', ' + outlier_type+', auc, '+ ''.join(['%0.3f, '])*len(aucs)%tuple(aucs)
# auc_str = auc_str[:-2]+'\n'
# f.write(auc_str)
#
# if os.path.isfile(txt_fname):
# with open(txt_fname, 'a') as f:
# for i in range(len(algo_lst)):
# txt = '{}, {}, {}, {}, {}, {}, {},{}, {}, {}, {}, {}, {}\n'.format(timestamp,
# algo_lst[i], ta, outlier_type, p_frac, p_quant, n_runs,
# len(n_lst)*len(p_lst)*n_runs, int(10**n_lst[-1]),
# int(2**p_lst[-1]), int(ts[i]), np.mean(fp_score[i]),
# np.mean(auc_score[i]))
# f.write(txt)
# else:
# with open(txt_fname, 'w') as f:
# f.write('timestamp, algo, ta, outlier_type, p_frac, p_quant, n_runs, total_n_runs, max_n, max_p, total_time, fp_score, auc_score\n')
# for i in range(len(algo_lst)):
# txt = '{}, {}, {}, {}, {}, {},{}, {}, {}, {}, {}, {}, {}\n'.format(timestamp,
# algo_lst[i], ta, outlier_type, p_frac, p_quant, n_runs,
# len(n_lst)*len(p_lst)*n_runs, int(10**n_lst[-1]),
# int(2**p_lst[-1]), int(ts[i]), np.mean(fp_score[i]),
# np.mean(auc_score[i]))
# f.write(txt)
#
#
# plt.show()
plt.close()
# def record_raw_results(data, filename):
# if os.path.isfile():
def get_os_data(X, algos):
"""
X is n,p data set, algos is list of string representations of functions as
defined in dictionary built below.
"""
algo_list = [test_IF, test_OP, test_DBSCAN, test_Ridge, test_GRU, test_LSTM, test_OCSVM, test_AE]
str_algo_lst = ['IF', 'OP', 'DBSCAN', 'Ridge', 'GRU', 'LSTM', 'OC SVM', 'AE']
fn_dict = {}
os = []
for i in len(algo_lst):
fn_dict[str_algo_lst[i]]=algo_lst[i]
for algo in algos:
os.append(algo(X))
return os
def get_contour_plot(alg_type, algos, tas, score, plot_type):
"""
uses data alraedy saved to produce plot
"""
names = ['algo', 'ta', 'score']+list(np.arange(100))
f_lst = os.listdir('./results/')
if plot_type == pfq:
pf_lst = pf_lst
pq_lst = pq_lst #am referring to hopefully global variable naughty.
else: #is np plot
n_lst = n_lst
p_lst = p_lst
for f in f_lst:
if plot_type in f:
date_time = datetime.datetime.strptime(f[:19], '%Y-%m-%d_%G-%M-%S')
df = pd.read_csv(f, skipinitialspace=True, index_col=False, header=0) #, names=names)
n, p = df.shape
nv = p-3
names = ['algo', 'ta', 'score']+list(np.arange(nv))
df.columns = names
def get_occ_data(algo_lst, metric_lst):
path = os.path.expanduser('~') +'/Data/occupancy/'
files = ['occ_1', 'occ_2', 'occ_3']
for file in files:
df = pd.read_csv(path+file+'.txt')
print(sum(df['Occupancy']==0)/df.shape[0])
outs = df.index[df['Occupancy']==1]
df = df.drop(['date','Occupancy'], axis=1)
X = df.values
for algo_s in algo_lst:
algo = algo_dict[algo_s]
print('Testing {}'.format(algo_s))
fp, auc = test_algo(X, outs, algo, metric_lst)
print('for algo {}, fp = {}, auc = {}'.format(algo_s, fp, auc))
# print(df.info())
def test_metrics():
test_case_1 = [1,2,3,4,5,6,7,8]
auc1 = 1
fps1 = 0
outs1 = [7,6,5]
fpst1 = separation_metric(test_case_1, outs1)
auct1 = auc(test_case_1, outs1)
print('fps={}, should be {}, auc={}, should be {}'.format(fpst1, fps1, auct1, auc1))
test_case_2 = [1,1,1,1,1,1,1,1,1,8]
auc2 = 1
fps2 = 0
outs2 = [9]
fpst2 = separation_metric(test_case_2, outs2)
auct2 = auc(test_case_2, outs2)
print('fps={}, should be {}, auc={}, should be {}'.format(fpst2, fps2, auct2, auc2))
test_case_3 = [1,1,1,1,1,1,1,1,2,1]
auc3 = 0.75
fps3 = 0
outs3 = [9,8]
fpst3 = separation_metric(test_case_3, outs3)
auct3 = auc(test_case_3, outs3)
print('fps={}, should be {}, auc={}, should be {}'.format(fpst3, fps3, auct3, auc3))
test_case_4 = [1,2,1,1,1,1,1,1,1,1]
auc4 = 0.75
fps4 = 0
outs4 = [9,1]
fpst4 = separation_metric(test_case_4, outs4)
auct4 = auc(test_case_4, outs4)
print('fps={}, should be {}, auc={}, should be {}'.format(fpst4, fps4, auct4, auc4))
if __name__ == '__main__':
r = 20
p_frac = 0.3
p_quant = 0.3
# ta = 6
# n_steps = 10
n_runs = 10
gamma = 0.05
timeout = 900
noise=.1
algo_dict = {'VAR':test_VAR, 'FRO':test_OLS, 'FRL':test_LASSO, 'FRR':test_Ridge,
'GMM': test_GMM, 'OCSVM': test_OCSVM, 'DBSCAN':test_DBSCAN,
'IF': test_IF,
'AE': test_AE, 'VAE': test_VAE, 'GRU':test_GRU, 'LSTM':test_LSTM,
'OP': test_OP, 'GOP': test_GOP, 'SGOP': test_SGOP}
timestamp = datetime.datetime.fromtimestamp(time())
timestamp = timestamp.strftime('%Y-%m-%d_%H-%M-%S')
# timestamp = '2020-11-17_14-33-46' #to continue previously broken expt.
#quick algos for testing
reg_algo_lst = ['VAR', 'FRO', 'FRL', 'FRR']
dens_algo_lst = ['OCSVM', 'GMM', 'DBSCAN', 'IF']
dl_algo_lst = ['AE', 'VAE', 'LSTM', 'GRU']
mat_algo_lst = ['OP', 'GOP', 'SGOP']
algo_type_lst = ['reg', 'dens', 'dl', 'mat']
lst_lst = [reg_algo_lst, dens_algo_lst, dl_algo_lst, mat_algo_lst]
# dl_algo_lst = ['LSTM', 'GRU']
# lst_lst = [reg_algo_lst]
#to run on occupancy data
metric_lst = [auc, separation_metric]
# algo_lst = ['VAR', 'FRO', 'FRL', 'FRR','OCSVM', 'GMM', 'DBSCAN', 'IF', 'AE',
# 'LSTM', 'GRU', 'OP', 'SGOP']
# algo_lst= ['VAE']
# get_occ_data(algo_lst, metric_lst)
#
# raise
# to run on synthetic data.
p_lst = [1, 2, 3, 4, 5, 6]
n_lst = [1, 2, 3, 4]
ta_lst = [1,2,3,4,5,6]
noise_list=np.arange(0,1.01,0.05)
# gop_lst = ['OP','GOP']#, 'SGOP']
# lst_lst = [mat_algo_lst]
# lst_lst = [['VAR', 'FRR']]
# lst_lst = [['LSTM', 'GRU']]
# algo_type_lst=['dl']
# algo_type_lst = ['reg']
# p_lst = [1, 2, 3, 4]
# n_lst = [1, 2, 3]
#
# ta_lst = [6]
#for noise plots.
pf_lst = np.arange(0.0,1.01,0.2)
pq_lst = np.arange(0.0,1.01,0.2)
n = 1000
p = 32
metric_lst = [auc, separation_metric]
metric_str_lst = ['AUC', 'FPs']
print(timestamp)
# outlier_type = 'point'
# for ta in ta_lst:
# for i in range(len(lst_lst)):
# algo_lst = lst_lst[i]
# algo_type = algo_type_lst[i]
# plot_noise(p_frac, p_quant, r, gamma, noise_list, ta, n, p, n_runs, algo_lst,algo_type,
# metric_lst, metric_str_lst, timeout, timestamp, outlier_type)
#
# for pfq or np plots
ot_lst = ['point']#, 'context', 'stutter']
nz_cols = None
for outlier_type in ot_lst:
for ta in ta_lst:
metric_lst = [auc, separation_metric]
metric_str_lst = ['AUC', 'FPs']
for i in range(len(lst_lst)): #
algo_type = algo_type_lst[i]
algo_lst = lst_lst[i]
#
# plot_each_algo_for_np(n_lst, p_lst, r, gamma, noise, ta, p_quant, p_frac,n_runs,
# algo_lst, algo_type, metric_lst, metric_str_lst,
# timeout, timestamp, nz_cols, outlier_type)
plot_each_algo_for_pfq(pf_lst, pq_lst, r, gamma, noise, ta, n, p, n_runs,
algo_lst, algo_type, metric_lst, metric_str_lst,
timeout, timestamp, outlier_type)
#
# # pf_lst, pq_lst, r, gamma, noise, ta, n, p, n_runs,
# # algo_list, algo_type, metric, metric_str_lst
# # timeout, timestamp, outlier_type
```
#### File: jogrundy/outlier_detection/vae.py
```python
import torch
from torch import nn
from torch import optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
import os
import numpy as np
import matplotlib.pyplot as plt
from ae import SynDataset
class VAE(nn.Module):
def __init__(self, layers, criterion):
super(VAE, self).__init__()
ls = []
for i in range(len(layers)-2):
ls.append(nn.Linear(layers[i], layers[i+1]))
ls.append(nn.ReLU(True))
self.pre_encoder = nn.Sequential(
*ls
)
self.encode_mu = nn.Linear(layers[-2], layers[-1])
self.encode_sig = nn.Linear(layers[-2], layers[-1])
ls = []
for i in range(len(layers)-1,1, -1):
# print(layers[i])
ls.append(nn.Linear(layers[i], layers[i-1]))
ls.append(nn.ReLU(True))
ls.append(nn.Linear(layers[1], layers[0]))
ls.append(nn.Softmax(dim=0))
self.decoder = nn.Sequential(
*ls
)
self.criterion = criterion
def encode(self, x):
h = self.pre_encoder(x)
return self.encode_mu(h), self.encode_sig(h)
def reparametrize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
if torch.cuda.is_available():
eps = torch.cuda.FloatTensor(std.size()).normal_()
else:
eps = torch.FloatTensor(std.size()).normal_()
eps = Variable(eps) #.to(self.device)
return eps.mul(std).add_(mu)
def decode(self, z):
x = self.decoder(z)
return x
def forward(self, x):
mu, logvar = self.encode(x)
z = self.reparametrize(mu, logvar)
return self.decode(z), mu, logvar
def loss_fn(self, x,output, mu, logvar):
"""
recon_x: generated images
x: original images
mu: latent mean
logvar: latent log variance
"""
BCE = self.criterion(output, x) # mse loss
# loss = 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar)
KLD = torch.sum(KLD_element).mul_(-0.5)
# KL divergence
# print(KLD)
# print(BCE)
# raise
return BCE + KLD
def train_dataset(model, loader, optimizer, params, device):
n,p,dummy, dummy, dummy, dummy, dummy, num_epochs = params
model.to(device)
for epoch in range(num_epochs):
model.train()
train_loss = []
for batch_idx, data in enumerate(loader):
data = Variable(data[0]).to(device)
output, mu, logvar = model(data)
loss = model.loss_fn(data, output, mu, logvar)
loss.backward()
train_loss.append( loss.data)
optimizer.zero_grad()
optimizer.step()
return model, train_loss
def get_losses(model, dataset, params, device):
"""
calculates reconstruction loss for each datapoint
"""
n,p,r, p_frac, p_quant,gamma, ta, num_epochs = params
model.eval()
loader = DataLoader(dataset, batch_size=1)
losses = []
for i,data in enumerate(loader):
data = Variable(data).to(device)
# ===================forward=====================
output, mu , logvar = model(data)
loss = model.loss_fn(data, output, mu, logvar)
losses.append(loss)
losses = np.array(losses, dtype='float')
return losses
def get_vae_losses(X):
"""
trains vae on np array X, returns reconstruction loss for each data sample
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
num_epochs=50
batch_size = 8
learning_rate = 0.001
dataset=SynDataset(X)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
n = X.shape[0]
p = X.shape[1]
ta = -1
dummy = -1
params = (n,p,dummy, dummy, dummy, dummy, dummy, num_epochs)
loader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
criterion = nn.MSELoss()
i_layer_size = p
h1_layer_size = 64
e_layer_size = 8
layers = [i_layer_size, h1_layer_size, e_layer_size]
# label = 'VAE'
model = VAE(layers, criterion)
optimizer = optim.Adam(model.parameters(), lr=1e-3)
model, train_loss = train_dataset(model, loader, optimizer, params, device)
losses = get_losses(model, dataset, params, device)
return losses
def get_VAE_os(X):
"""
takes in only data 'X', in samples as rows format
returns only list of outlier scores for each sample
higher score = more outlier
gives reconstruciton error from AE, should be largest for outliers
"""
losses = get_vae_losses(X)
return losses
if __name__ == '__main__':
#Testing code.
from torchvision import transforms
from torchvision.datasets import MNIST
from torchvision.utils import save_image
os.makedirs("vae_img", exist_ok=True)
num_epochs = 2
batch_size = 128
learning_rate = 1e-3
img_transform = transforms.Compose([
transforms.ToTensor()
# transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
dataset = MNIST('./data', transform=img_transform, download=True)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
criterion = nn.MSELoss(size_average=False)
layers = [784,400,20]
model = VAE(layers, criterion)
print(model)
if torch.cuda.is_available():
model.cuda()
# reconstruction_function = nn.MSELoss(size_average=False)
optimizer = optim.Adam(model.parameters(), lr=1e-3)
for epoch in range(num_epochs):
model.train()
train_loss = 0
for batch_idx, data in enumerate(dataloader):
img, _ = data
img = img.view(img.size(0), -1)
img = Variable(img)
if torch.cuda.is_available():
img = img.cuda()
optimizer.zero_grad()
output, mu, logvar = model(img)
loss = model.loss_fn(img, output, mu, logvar)
loss.backward()
train_loss += loss.data
optimizer.step()
if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch,
batch_idx * len(img),
len(dataloader.dataset), 100. * batch_idx / len(dataloader),
loss.data / len(img)))
print('====> Epoch: {} Average loss: {:.4f}'.format(
epoch, train_loss / len(dataloader.dataset)))
if epoch % 10 == 0:
save = to_img(output.cpu().data)
save_image(save, './vae_img/image_{}.png'.format(epoch))
dataloader_test = DataLoader(dataset, batch_size=1, shuffle=False)
losses = []
for i , data in enumerate(dataloader_test):
img, o = data
# print(i, o)
img = img.view(img.size(0), -1)
img = Variable(img)
# ===================forward=====================
if torch.cuda.is_available():
img = img.cuda()
optimizer.zero_grad()
recon_batch, mu, logvar = model(img)
loss = model.loss_fn(recon_batch, img, mu, logvar)
losses.append(loss)
losses = np.array(losses)
k = 5
worst = losses.argsort()[-k:][::-1]
plt.figure(figsize=(8,3))
for i in range(k):
idx = worst[i]
plt.subplot(1,k,i+1)
img = dataset[idx][0].reshape(28,28)
cls = dataset[idx][1]
plt.axis('off')
plt.imshow(img, cmap='Greys')
plt.title('loss={:.1f}, {}'.format(losses[idx], cls))
plt.savefig('./images/highest_losses_vae.eps',bbox_inches='tight')
plt.show()
``` |
{
"source": "jogubo/oc-chessmanager",
"score": 3
} |
#### File: chessmanager/utils/database.py
```python
from tinydb import TinyDB, Query
class Database:
@staticmethod
def table(table):
"""
Select table.
Parameters:
table (str): table name
Return: table
"""
db = TinyDB('db.json')
table = db.table(table)
return table
@classmethod
def get(cls, table, id=None):
"""
Get item.
Parameters:
table (str): table name
id (int): item id
Returns:
item (dict)
"""
id = id
table = cls.table(table)
if id is None:
item = table.all()
else:
item = table.get(doc_id=id)
return item
@classmethod
def add(cls, table, serialized):
"""
Add item.
Parameters:
table (str): table name
serialized (dict): item
"""
table = cls.table(table)
table.insert_multiple(serialized)
@classmethod
def update(cls, table, key, value, doc_ids):
"""
Update items.
Parameters:
table (str): table name
key (str)
value (str, int, float, tuple, list, dict)
doc_ids (int, list): id or list of id of items to be updated
"""
if isinstance(doc_ids, (int, str)):
doc_ids = [doc_ids]
table = cls.table(table)
table.update({key: value}, doc_ids=doc_ids)
@classmethod
def search(cls, table, search):
"""
Search item.
Parameters:
table (str): table name
search (str) name request
Returns:
results (list): id list
"""
table = cls.table(table)
q = Query()
search = table.search(q.last_name == search)
results = []
for item in search:
id = item.doc_id
results.append(int(id))
return results
```
#### File: chessmanager/utils/functions.py
```python
import os
def clear():
"""
Clear the terminal.
"""
os.system('cls' if os.name == 'nt' else 'clear')
def prompt(message=''):
"""
Custom prompt.
"""
print(f"\n{message}")
_input = input('>> ')
return _input
```
#### File: chessmanager/views/tournaments_view.py
```python
from datetime import date
from utils.constants import APP_NAME, NUMBER_PLAYERS
from utils.functions import clear, prompt
class TournamentsView:
@classmethod
def main_display(cls):
clear()
print(f"[{APP_NAME}]\n")
print(f"{cls.title}\n")
@classmethod
def display_tournament(cls, tournament_infos, finished=False):
cls.title = f"{tournament_infos['name']}"
cls.main_display()
print(f"Description:\n{tournament_infos['description']}\n")
print(f"Contrôle du temps:\n{tournament_infos['time']}\n")
choices = ['C', 'R', 'T']
if not finished:
choices.append('P')
print(f"Tour actuel:\n{tournament_infos['current_round']}"
f"/{tournament_infos['total_rounds']}\n")
_input = prompt("[C]lassement | [P]rochains matchs | "
"[T]ours précédents (résultats) | "
"[R]etour à la liste des tournois").upper()
elif finished:
print("Tournoi terminé.\n")
_input = prompt("[C]lassement | "
"[R]etour à la liste des tournois").upper()
if _input in choices:
return _input
@classmethod
def display_ranking(cls, tournament_infos):
print("Joueurs participants:")
cls.main_display()
i = 1
for id, player_infos in tournament_infos['players'].items():
print(f"{i} - {player_infos['name']} | "
f"Score: {player_infos['score']} | "
f"Rang: {player_infos['rank']}")
i += 1
prompt("Appuyer sur une touche pour revenir "
"à la gestion du tournoi:").upper()
return None
@classmethod
def display_round(cls, versus_list, tournament_infos):
while True:
players_infos = tournament_infos['players']
cls.main_display()
print("Prochains matchs:")
i, choices = 1, ['E', 'R']
for players in versus_list:
print(f"{i} - {players_infos[players[0]]['name']} "
f" VS {players_infos[players[1]]['name']}")
i += 1
_input = prompt("[E]ntrer les résultats | "
"[R]etour").upper()
if _input in choices:
return _input
@classmethod
def display_list(cls, tournaments_infos, display='all'):
'''
tournments_infos = [{'id': tournament_id, 'name': tournament_name}]
'''
while True:
cls.title = "Liste des tournois\n"
cls.main_display()
i, choices = 1, ['C', 'M', 'Q']
for tournament in tournaments_infos:
print(f"[{i}] - {tournament['name']}")
choices.append(i)
i += 1
text = "Selectionnez un tournoi"
if display == 'all':
_input = prompt(f"{text} pour afficher plus d'infos\n"
"[C]réer un tournoi | [M]enu principal | "
"[Q]uitter le programme")
elif display == 'minimal':
_input = prompt(f"{text} :")
try:
user_choice = int(_input)
except ValueError:
user_choice = _input.upper()
if user_choice in choices:
if isinstance(user_choice, int):
return tournaments_infos[user_choice - 1]['id']
else:
return user_choice
else:
continue
@classmethod
def display_rounds(cls, rounds_data):
cls.main_display()
for round, matchs in rounds_data.items():
print(f"{round}")
for match, players in matchs.items():
print(f"{match}: "
f"{players['player_1']['name']} "
f"({players['player_1']['score']}) "
f"VS "
f"{players['player_2']['name']} "
f"({players['player_2']['score']}) ")
print("\n--------------------\n")
prompt("Appuyez sur une touche pour revenir au tournoi:")
return None
@classmethod
def set_name(cls):
cls.main_display()
name = prompt("Entrez le nom du tournoi :").title()
return name
@classmethod
def set_location(cls):
cls.main_display()
location = prompt("Entrez le lieu du tournoi :").upper()
return location
@classmethod
def set_description(cls):
cls.main_display()
description = prompt("Entrez la description du tournoi :").capitalize()
return description
@classmethod
def set_date(cls):
valid_date = False
while not valid_date:
cls.main_display()
_input = prompt("Entrez le date de l'évènement (JJ/MM/AAAA) :")
try:
_input = _input.split('/')
year = int(_input[2])
month = int(_input[1])
day = int(_input[0])
birth = str(date(year, month, day))
break
except ValueError:
continue
except IndexError:
continue
return birth
@classmethod
def set_time(cls):
while True:
cls.main_display()
print("[1] - Bullet\n"
"[2] - Blitz\n"
"[3] - Coup rapide\n")
_input = prompt("Choisissez le type de contrôle de temps:")
if _input == '1':
return 'Bullet'
elif _input == '2':
return 'Blitz'
elif _input == '3':
return 'Coup rapide'
@classmethod
def set_round_name(cls):
cls.main_display()
_input = prompt("Entrez un nom pour ce round ou "
"laissez vide pour un nom auto:")
if _input == '':
return None
else:
return _input
@classmethod
def set_nb_players(cls):
return NUMBER_PLAYERS
@classmethod
def add_player(cls):
cls.main_display()
_input = prompt("Entrez le nom du joueur recherché :").upper()
return _input
@classmethod
def display_matchs_list(cls, round, matchs):
if round == 1:
round = "1er"
else:
round = f"{round}ème"
print(f"Liste des matchs pour le {round} tour:\n")
for players in matchs:
player_1, player_2 = players
print(f" - {player_1.full_name} vs {player_2.full_name}")
@classmethod
def set_score_match(cls, round, players):
while True:
cls.title = f"Tour {round}:\n"
cls.main_display()
player_1, player_2 = players
print(f"[1] - {player_1}")
print(f"[2] - {player_2}")
print("[E] - Égalité")
_input = prompt("Selectionnez le joueur gagnant").upper()
if _input == "1":
return (1.0, 0.0)
elif _input == "2":
return (0.0, 1.0)
elif _input == "E":
return (0.5, 0.5)
@classmethod
def create_new_tournament(cls):
'''
Displays a form to create a new tournament
'''
cls.title = "Création d'un nouveau tournoi"
tournament = {
"name": cls.set_name(),
"description": cls.set_description(),
"date": cls.set_date(),
"location": cls.set_location(),
"time": cls.set_time(),
"nb_players": cls.set_nb_players(),
}
return tournament
``` |
{
"source": "jo-gunhee/Hack_with_Python",
"score": 3
} |
#### File: Hack_with_Python/PortScan/mutil_threads_portscanner.py
```python
import threading # 스레드 관련 라이브러리
import socket # 소켓 관련 라이브러리
import time # 시간 라이브러리
resultLock = threading.Semaphore(value=1) # 결과 출력을 제어할 세마포어
maxConnection = 100 # 스레드 개수를 제어할 세마포어
connection_lock = threading.BoundedSemaphore(value=maxConnection)
port_result = {} # 결과를 저장하는 dict
# 스레드 함수
def scanPort(tgtHost, portNum):
try: # 접속 시도
with socket.socket() as s:
data = None
s.settimeout(2)
s.connect((tgtHost, portNum))
s.send("Python Connect\n".encode())
data = s.recv(1024).decode()
except Exception as e:
if str(e) == "timed out":
data = str(e)
else:
data = 'error'
finally:
if data is None:
data = "no_data"
elif data == 'error':
connection_lock.release() # 스레드 세마포어 해제
return
resultLock.acquire() # 출력 세마포어 설정
print("[+] Port {} openend: {}".format(portNum, data[:20]).strip())
resultLock.release() # 출력 세마포어 해제
port_result[portNum] = data
connection_lock.release() # 스레드 세마포어 해제
# 메인 함수
def main():
tgtHost = "172.30.1.24" # 스캔 대상 tgtHost
for portNum in range(1024): # 반복문 수행 0~1024 포트
connection_lock.acquire()
t = threading.Thread(target=scanPort, args=(
tgtHost, portNum)) # 쓰레드 초기화
t.start() # 스레드 실행
time.sleep(5)
print(port_result)
# csv 파일 저장
with open("portScanResult.csv", 'w') as f: # 결과를 저장할 csv 파일 열기
f.write("portNum, banner\n") # 컬럼 쓰기
for p in sorted(port_result.keys()):
f.write("{}, {}".format(p, port_result[p]))
# 결과 출력
print("\n\n\n+++++++++++ the result +++++++++++")
print('portNum' + '\t' + 'banner')
for p in sorted(port_result.keys()):
print("{} \t {}".format(p, port_result[p][:20].strip()))
print(">> the result in portScanResult.csv")
if __name__ == "__main__":
startTime = time.time()
main()
endTime = time.time()
print("exceuted Time:", (endTime-startTime))
``` |
{
"source": "jogvanb/sdg-faroese-translation",
"score": 3
} |
#### File: scripts/batch/flatten_global_translations.py
```python
import os
import yaml
def export_yaml(data, filename):
with open(filename, 'w') as outfile:
yaml.dump(data, outfile, default_flow_style=False, allow_unicode=True)
languages = ['am', 'ar', 'de', 'en', 'es', 'fr', 'kz', 'ru', 'zh-Hans', 'fo']
fields = {
'global_goals': ['short', 'title'],
'global_targets': ['title'],
'global_indicators': ['title']
}
for language in languages:
for filename in fields:
filepath = os.path.join('translations', language, filename + '.yml')
flattened = {}
with open(filepath, 'r') as stream:
yamldata = yaml.load(stream)
for key in yamldata:
for child in fields[filename]:
if child in yamldata[key]:
flat_key = key + '-' + child
flattened[flat_key] = yamldata[key][child]
export_yaml(flattened, filepath)
```
#### File: scripts/batch/import_global_titles_2020.py
```python
import pandas as pd
import re
import unicodedata
import urllib
import os.path
import glob
import yaml
"""
This script imports the updated 2020 titles for goals, targets, and indicators.
In keeping with past imports, the id numbers (eg, 1.1.1, 1.1, etc) are stripped
from the beginnings of the titles.
Titles: Currently the titles are translated (at the UN level) into Arabic,
Chinese, Spanish, French, English, and Russian; however the Arabic
translations are only available as a PDF, which is more difficult to parse with
a script (and so are being skipped).
"""
def sdg_number_from_text(text):
"""
This parses a string of text and pulls out the SDG number. Possible formats of
return value are: '1', '1.1', '1.1.1'
"""
if pd.isnull(text):
return None
matches = re.findall(r'(\d+)(\.\w+)?(\.\w+)?', text)
if len(matches) > 0:
match = ''.join(matches[0])
# Sanity checks.
match_parts = match.split('.')
# In these cases, a missing space causes the first word
# of the indicator title to appear as an extension of the
# third id part.
if len(match_parts) == 3 and len(match_parts[2]) > 2:
match_2_replacement = ''
for character in match_parts[2]:
if character.isnumeric() or character.islower():
match_2_replacement += character
else:
break
if match_2_replacement != '' and match_2_replacement != match_parts[2]:
match = match_parts[0] + '.' + match_parts[1] + '.' + match_2_replacement
return match
else:
return None
def sdg_goal_is_valid(text):
if text is None:
return False
parts = text.split('.')
if len(parts) > 1:
return False
if not text.isnumeric():
return False
if int(text) > 17:
return False
return True
def sdg_indicator_is_valid(text):
if text is None:
return False
parts = text.split('.')
if len(parts) != 3:
return False
return True
def sdg_target_is_valid(text):
if text is None:
return False
parts = text.split('.')
if len(parts) != 2:
return False
return True
def sdg_text_without_number(text, number):
"""
This simply removes a number from some text.
"""
normalized = unicodedata.normalize("NFKD", str(text))
# Remove the number and everything before it.
parts = normalized.split(number)
if len(parts) == 2:
return parts[1].lstrip('.').strip()
else:
return normalized
def clean_indicator_title(title):
last = title[-1]
if last == 'i':
return title[:-1]
if last.isnumeric():
last_word = title.split(' ')[-1]
last_word = last_word.split('-')[-1]
last_word = last_word.split('–')[-1]
last_word = last_word.split('‐')[-1]
last_word = last_word.split('+B')[-1]
if not last_word.isnumeric():
print('Found a footnote: ' + title)
return title[:-1]
return title
def clean_target_title(title):
last = title[-1]
if last.isnumeric() and last != '0':
return title[:-1]
return title
def clean_goal_title(title):
last = title[-1]
if last.isnumeric():
return title[:-1]
return title
def main():
global_goals = {}
global_targets = {}
global_indicators = {}
# First, the titles.
title_spreadsheets = {
'en': 'https://unstats.un.org/sdgs/indicators/Global%20Indicator%20Framework%20after%202020%20review_English.xlsx',
'zh-Hans': 'https://unstats.un.org/sdgs/indicators/Global%20Indicator%20Framework%20after%202020%20review_Chinese.xlsx',
'es': 'https://unstats.un.org/sdgs/indicators/Global%20Indicator%20Framework%20after%202020%20review_Spanish.xlsx',
'fr': 'https://unstats.un.org/sdgs/indicators/Global%20Indicator%20Framework%20after%202020%20review_French.xlsx',
'ru': 'https://unstats.un.org/sdgs/indicators/Global%20Indicator%20Framework%20after%202020%20review_Russian.xlsx'
}
for language in title_spreadsheets:
global_goals[language] = {}
global_targets[language] = {}
global_indicators[language] = {}
spreadsheet_url = title_spreadsheets[language]
import_options = {
'header': None,
'names': ['target', 'indicator'],
'usecols': [1, 2],
'skiprows': [0, 1, 2],
'skipfooter': 6,
#'encoding': 'utf-8',
}
df = pd.read_excel(spreadsheet_url, **import_options)
for _, row in df.iterrows():
# If the 'indicator' column in empty, this is a Goal.
if pd.isnull(row['indicator']):
# Identify the goal number.
goal_number = sdg_number_from_text(row['target'])
goal_is_valid = sdg_goal_is_valid(goal_number)
if goal_is_valid and goal_number and goal_number + '-title' not in global_goals[language]:
goal_text = sdg_text_without_number(row['target'], goal_number)
global_goals[language][goal_number + '-title'] = clean_goal_title(goal_text)
else:
# Otherwise it is a target and indicator.
target_number_dots = sdg_number_from_text(row['target'])
target_number = None if target_number_dots is None else target_number_dots.replace('.', '-')
if target_number and sdg_target_is_valid(target_number_dots) and target_number + '-title' not in global_targets[language]:
target_text = sdg_text_without_number(row['target'], target_number_dots)
global_targets[language][target_number + '-title'] = clean_target_title(target_text)
indicator_number_dots = sdg_number_from_text(row['indicator'])
indicator_number = None if indicator_number_dots is None else indicator_number_dots.replace('.', '-')
if indicator_number and sdg_indicator_is_valid(indicator_number_dots) and indicator_number + '-title' not in global_indicators[language]:
indicator_text = sdg_text_without_number(row['indicator'], indicator_number_dots)
global_indicators[language][indicator_number + '-title'] = clean_indicator_title(indicator_text)
# Finally merge the results into the YAML files.
all_results = {
'global_goals.yml': global_goals,
'global_targets.yml': global_targets,
'global_indicators.yml': global_indicators,
}
for yaml_filename in all_results:
for language in all_results[yaml_filename]:
translation_path = os.path.join('translations', language, yaml_filename)
yaml_data = None
with open(translation_path, 'r') as stream:
yaml_data = yaml.load(stream, Loader=yaml.FullLoader)
if not yaml_data:
yaml_data = {}
for item in all_results[yaml_filename][language]:
yaml_data[item] = all_results[yaml_filename][language][item]
with open(translation_path, 'w') as outfile:
yaml.dump(yaml_data, outfile, default_flow_style=False, allow_unicode=True)
if __name__ == '__main__':
main()
```
#### File: sdg-faroese-translation/scripts/export_translation_file.py
```python
import os
import yaml
import pandas as pd
import csv
import sys
# Decide if we actually want to export a particular key.
def should_we_omit_key(key, language):
# Ignore keys that start with and end with these values.
starts_with_and_ends_with = [
# No need to translate URLs.
('global_indicators', 'metadata_link'),
# No need to translate organisation names.
('global_indicators', 'custodian_agency'),
# For now let's leave off the "Definition" as well, only because it
# would be a significant translation effort, and we may want to find out
# whether the UN may eventually do this translation.
('global_indicators', 'definition'),
]
# Add some more for offical UN languages.
official_un_languages = ['es', 'fr', 'zh-Hans']
if language in official_un_languages:
starts_with_and_ends_with.extend([
# The titles for these are pulled directly from UN sources.
('global_indicators', 'title'),
('global_targets', 'title'),
('global_goals', 'title'),
])
# Ignore keys that start with these values.
starts_with = [
# This key is identical in all languages.
'languages'
]
# Now do the actual ignoring.
for item in starts_with_and_ends_with:
if key.startswith(item[0]) and key.endswith(item[1]):
return True
for item in starts_with:
if key.startswith(item):
return True
# Still here? It must be fine.
return False
# Parse and "flatten" the yaml from a translation file.
def parse_translation_data(filepath):
try:
with open(filepath, 'r') as stream:
yamldata = yaml.load(stream)
# Use an unusual "sep" below so that dots can still be used in keys.
df = pd.io.json.json_normalize(yamldata, sep='---')
return df.to_dict(orient='records')[0]
except Exception as exc:
# Could not load the file, return an empty object.
return {}
def export_language(language, folder):
src_language = 'en'
rows = []
src = os.path.join('translations', src_language)
dest = os.path.join('translations', language)
# A flag to remember whether a translation already exists or not.
translation_exists = os.path.isdir(dest)
# Loop through the translation files in the source language.
for filename in os.listdir(src):
file_parts = os.path.splitext(filename)
no_extension = file_parts[0]
extension = file_parts[1]
# Only operate on Yaml files.
if extension == '.yml':
src_filepath = os.path.join(src, filename)
src_data = parse_translation_data(src_filepath)
# If a translation does not exist, the third column will be blank.
# But if a translation exists, we want to populate the third column
# with the current translation.
dest_data = {}
if translation_exists:
dest_filepath = os.path.join(dest, filename)
dest_data = parse_translation_data(dest_filepath)
# Loop through the source data and append rows for the CSV output.
for key in src_data:
full_key = no_extension + ':' + key
# First make sure we shouldn't ignore this one.
if should_we_omit_key(full_key, language):
continue
rows.append({
# First column is a combination of the filename and the
# "flattened" key, separated by a colon. For example:
# frontpage:disclaimer_text
'key': full_key,
# Second column is the source language - English.
src_language: src_data[key],
# Third column is the destination language, if it exists.
language: dest_data[key] if key in dest_data else ''
})
keys = rows[0].keys()
# Write our results to a file.
if not os.path.exists(folder):
os.makedirs(folder, exist_ok=True)
csv_filename = 'sdg-translations-' + language + '.csv'
csv_filepath = os.path.join(folder, csv_filename)
with open(csv_filepath, 'w', encoding='utf-8-sig') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(rows)
def main():
# Abort if there is no parameter provided.
if len(sys.argv) < 2:
sys.exit('Provide a 2-letter abbreviation for the target language.')
language = sys.argv[1]
export_language(language, '.')
# Boilerplace syntax for running the main function.
if __name__ == '__main__':
main()
```
#### File: sdg-faroese-translation/scripts/import_translation_file.py
```python
import os
import yaml
import pandas as pd
import csv
import sys
from unflatten import unflatten
def change_keys(obj, convert):
"""
Recursively goes through the dictionary obj and replaces keys with the convert function.
"""
if isinstance(obj, (str, int, float)):
return obj
if isinstance(obj, dict):
new = obj.__class__()
for k, v in obj.items():
new[convert(k)] = change_keys(v, convert)
elif isinstance(obj, (list, set, tuple)):
new = obj.__class__(change_keys(v, convert) for v in obj)
else:
return obj
return new
def merge_dicts(source, destination):
"""
Recursively merge the values in the source dict onto the destination dict.
"""
for key, value in source.items():
if isinstance(value, dict):
# get node or create one
node = destination.setdefault(key, {})
merge_dicts(value, node)
else:
destination[key] = value
return destination
def main():
if len(sys.argv) < 2:
sys.exit('Provide the path to the exported CSV file you would like to import.')
export_path = sys.argv[1]
with open(export_path, 'r') as csvfile:
data = csv.reader(csvfile)
header = next(data)
language = header[2]
if len(header) < 2:
sys.exit('The header for the third column must be a language code.')
# Make sure the folder exists.
language_folder = os.path.join('translations', language)
if not os.path.isdir(language_folder):
os.mkdir(language_folder)
yaml_files = {}
for row in data:
key_string = row[0]
key_parts = key_string.split(':')
filename = key_parts[0]
key_flat = key_parts[1]
# For now replace dots with something recognizable that we can
# replace later. This is because dots mess up the "unflatten"
# library.
key_flat = key_flat.replace('.', '^^^')
# Along the same lines, we now put dots where we actually want dots.
# The export script uses a separation string of "---" instead of
# dots, so now let's replace those, to prepare for unflattening.
key_flat = key_flat.replace('---', '.')
translation = row[2]
if filename not in yaml_files:
# Start with an empty dict.
yaml_files[filename] = {}
# But also check to see if there is existing data.
filepath = os.path.join(language_folder, filename + '.yml')
if (os.path.isfile(filepath)):
with open(filepath, 'r') as infile:
existing = yaml.load(infile)
if existing:
yaml_files[filename] = existing
# Unflatted and merge the data into our yaml_files dict.
unflattened = unflatten({key_flat: translation})
yaml_files[filename] = merge_dicts(unflattened, yaml_files[filename])
# Put the dots back into the keys.
yaml_files = change_keys(yaml_files, lambda key: key.replace('^^^', '.'))
# Loop through the yaml_files dict and write any changes to file.
for yaml_file in yaml_files:
yaml_path = os.path.join(language_folder, yaml_file + '.yml')
with open(yaml_path, 'w') as outfile:
yaml.dump(yaml_files[yaml_file], outfile, default_flow_style=False, allow_unicode=True)
# Boilerplace syntax for running the main function.
if __name__ == '__main__':
main()
``` |
{
"source": "joh12041/chi-2016-localness",
"score": 3
} |
#### File: rq1and3_localness/happiness/compute_happiness.py
```python
import csv
import os
import argparse
import sys
from collections import OrderedDict
import numpy
from scipy.stats import spearmanr
from scipy.stats import wilcoxon
sys.path.append("./utils")
import bots
LOCALNESS_METRICS = ['nday','plurality']
HAPPINESS_EVALUATIONS_FN = "../resources/happiness_evaluations.txt"
def build_happiness_dict():
"""Return dictionary containing word : happiness."""
with open(HAPPINESS_EVALUATIONS_FN, 'r') as fin:
csvreader = csv.reader(fin, delimiter='\t')
# Clear out metadata
for i in range(0, 3):
next(csvreader)
assert next(csvreader) == ['word', 'happiness_rank', 'happiness_average', 'happiness_standard_deviation', 'twitter_rank', 'google_rank', 'nyt_rank', 'lyrics_rank']
happy_dict = {}
for line in csvreader:
word = line[0]
h_avg = float(line[2])
if h_avg > 6 or h_avg < 4:
happy_dict[word] = h_avg
return happy_dict
def compute_happiness(scale='counties'):
"""Compute happiness by county based on localness-processed CSV from localness.py."""
# generate word -> happiness dictionary
happy_dict = build_happiness_dict()
bots_filter = bots.build_bots_filter()
# directory containing all of the tweets sorted by state or county depending on scale - one file for each region
tweets_dir = './{0}'.format(scale)
tweets_fns = os.listdir(tweets_dir)
output_fn = "./raw_happiness_results_{0}.csv".format(scale)
with open(output_fn, "w") as fout:
csvwriter = csv.writer(fout)
for localness in LOCALNESS_METRICS:
csvwriter.writerow(['{0}_fips'.format(scale), '{0}_med_h'.format(localness), '{0}_avg_h'.format(localness),
'nonlocal_med_h', 'nonlocal_avg_h', 'unfiltered_med_h', 'unfiltered_avg_h',
'total_local', 'total_nonlocal', 'local_excluded', 'nonlocal_excluded'])
local_filtered_out = 0
nonlocal_filtered_out = 0
for file in tweets_fns:
with open(os.path.join(tweets_dir, file), 'r') as fin:
fips = os.path.splitext(file)[0] # files named by <FIPS-CODE>.csv
csvreader = csv.reader(fin)
header = ['text','uid','nday','plurality']
txt_idx = header.index('text')
uid_idx = header.index('uid')
localness_idx = header.index(localness)
assert next(csvreader) == header
local_tweets = []
lt_no_happy_words = 0
non_local = []
nl_no_happy_words = 0
for line in csvreader:
txt = line[txt_idx]
uid = line[uid_idx]
if not line[localness_idx]:
continue
local = (line[localness_idx] == 'True')
if uid in bots_filter:
if local:
local_filtered_out += 1
else:
nonlocal_filtered_out += 1
continue
total_happ = 0.0
count_words = 0
for word in txt.split():
cleaned = word.lower().strip('?!.,;:()[]{}"\'')
if cleaned in happy_dict:
count_words += 1
total_happ += happy_dict[cleaned]
if count_words > 0:
h_avg_txt = total_happ / count_words
if local:
local_tweets.append(h_avg_txt)
else:
non_local.append(h_avg_txt)
else:
if local:
lt_no_happy_words += 1
else:
nl_no_happy_words += 1
local_med_h = numpy.median(local_tweets)
local_avg_h = numpy.average(local_tweets)
nonlocal_med_h = numpy.median(non_local)
nonlocal_avg_h = numpy.average(non_local)
unfiltered_med_h = numpy.median(local_tweets + non_local)
unfiltered_avg_h = numpy.average(local_tweets + non_local)
csvwriter.writerow([fips, local_med_h, local_avg_h, nonlocal_med_h, nonlocal_avg_h, unfiltered_med_h,
unfiltered_avg_h, len(local_tweets), len(non_local), lt_no_happy_words, nl_no_happy_words])
print("{0} 'local' tweets and {1} 'nonlocal' tweets filtered out from organizations for {2}.".format(local_filtered_out, nonlocal_filtered_out, localness))
process_happiness_results(scale, output_fn)
def process_happiness_results(scale, input_fn):
"""
Go through all counties/states happiness results and filter for counties with sufficient tweets to produce rankings
:param scale: counties or states
:return: writes rankings to CSV
"""
tweet_threshold = 3000 # minimum "happiness" tweets for county to be considered
output_fn = "happiness_rankings_{0}_min{1}tweets.csv".format(scale, tweet_threshold)
# include county/state names for easier evaluation of results
fips_to_county = {}
with open('../resources/fips_to_names.csv', 'r') as fin:
csvreader = csv.reader(fin)
assert next(csvreader) == ['FIPS','STATE','COUNTY']
for line in csvreader:
fips = line[0]
if scale == 'counties':
if len(fips) == 4:
fips = '0' + fips
fips_to_county[fips] = '{0}, {1}'.format(line[2], line[1])
else:
fips = fips[:2]
fips_to_county[fips] = line[1]
# read in raw results by county/state from analyzing all tweets - four tables in succession for each localness metric
with open(input_fn, "r") as fin:
csvreader = csv.reader(fin)
idx = 0
localness = LOCALNESS_METRICS[idx]
header = ['{0}_fips'.format(scale), '{0}_med_h'.format(localness), '{0}_avg_h'.format(localness),
'nonlocal_med_h', 'nonlocal_avg_h', 'unfiltered_med_h', 'unfiltered_avg_h', 'total_local',
'total_nonlocal', 'local_excluded', 'nonlocal_excluded']
assert next(csvreader) == header
total_local_idx = header.index('total_local')
total_nonlocal_idx = header.index('total_nonlocal')
fips_idx = header.index('counties_fips')
local_havg_idx = header.index('{0}_avg_h'.format(localness))
nonlocal_havg_idx = header.index('nonlocal_avg_h')
unfiltered_havg_idx = header.index('unfiltered_avg_h')
# aggregate unfiltered, local, and nonlocal happiness by county/state for generating rankings
data = {}
for line in csvreader:
if line[0] == header[0]: # have reached next localness metric
idx += 1
localness = LOCALNESS_METRICS[idx]
else:
total_local = float(line[total_local_idx])
total_nonlocal = float(line[total_nonlocal_idx])
fips = fips_to_county[line[fips_idx]]
local_havg = line[local_havg_idx]
nonlocal_havg = line[nonlocal_havg_idx]
unfiltered_havg = line[unfiltered_havg_idx]
if total_local + total_nonlocal >= tweet_threshold: # if sufficiently robust number of tweets for comparing to other counties/states
pct_local = total_local / (total_local + total_nonlocal)
if fips in data:
data[fips]['{0}_local'.format(localness)] = local_havg
data[fips]['{0}_nonlocal'.format(localness)] = nonlocal_havg
data[fips]['{0}_pct_local'.format(localness)] = pct_local
data[fips]['total_local_{0}'.format(localness)] = total_local
data[fips]['total_nonlocal_{0}'.format(localness)] = total_nonlocal
else:
data[fips] = {'county' : fips,
'total_tweets' : total_local + total_nonlocal,
'total_local_{0}'.format(localness) : total_local,
'total_nonlocal_{0}'.format(localness) : total_nonlocal,
'{0}_local'.format(localness) : local_havg,
'{0}_nonlocal'.format(localness) : nonlocal_havg,
'unfiltered' : unfiltered_havg,
'{0}_pct_local'.format(localness) : pct_local}
ranks = []
unfiltered = {}
for i in range(1, len(data) + 1):
ranks.append({})
# sort results by unfiltered happiest to saddest
sd = OrderedDict(sorted(data.items(), key=lambda x: x[1]['unfiltered'], reverse=True))
for i, fips in enumerate(sd):
ranks[i]['county'] = fips
ranks[i]['unfiltered'] = i + 1
ranks[i]['total_tweets'] = sd[fips]['total_tweets']
unfiltered[fips] = i
for localness in LOCALNESS_METRICS:
for property in ['local','nonlocal']:
sd = {}
for k in data:
if '{0}_{1}'.format(localness, property) in data[k]:
sd[k] = data[k]
# sort happiest to saddest for localness metric + local or nonlocal
sd = OrderedDict(sorted(sd.items(), key=lambda x: x[1]['{0}_{1}'.format(localness, property)], reverse=True))
# write ranking for that metric and (non)local to the row where the unfiltered county name is (so sorting any given column by rankings has the correct county labels to understand it)
for i, fips in enumerate(sd):
ranks[unfiltered[fips]]['{0}_{1}'.format(localness, property)] = i + 1
# write out rankings
with open(output_fn, 'w') as fout:
header = ['county', 'total_tweets', 'unfiltered']
for property in ['local','nonlocal']:
for localness in LOCALNESS_METRICS:
header.append('{0}_{1}'.format(localness, property))
csvwriter = csv.DictWriter(fout, fieldnames=header, extrasaction='ignore')
csvwriter.writeheader()
for rank in ranks:
csvwriter.writerow(rank)
# generate Spearman's rho comparing unfiltered to each localness metric and counting geographies that changed dramatically
ten_pct_threshold = int(len(ranks) * 0.1)
for localness in LOCALNESS_METRICS:
for property in ['local','nonlocal']:
metric = []
uf = []
ten_pct_diff = 0
name = '{0}_{1}'.format(localness, property)
for rank in ranks:
if name in rank:
uf.append(rank['unfiltered'])
metric.append(rank[name])
if abs(rank[name] - rank['unfiltered']) >= ten_pct_threshold:
ten_pct_diff += 1
rho, pval = spearmanr(metric,uf)
print('{0}:'.format(name))
print("Spearman's rho between {0} and unfiltered rankings is {1} with a p-value of {2}.".format(name, rho, pval))
print("{0} counties out of {1} were more than {2} rankings different than the unfiltered results.".format(ten_pct_diff, len(ranks), ten_pct_threshold))
stat, pval = wilcoxon(metric, uf, zero_method="pratt")
print("Wilcoxon statistic between {0} and unfiltered rankings is {1} with a p-value of {2}.\n".format(name, stat, pval))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--scale", default = "counties", help = "compute happiness by either 'states' or 'counties'")
args = parser.parse_args()
compute_happiness(scale = args.scale)
if __name__ == "__main__":
main()
```
#### File: rq1and3_localness/metrics/clean_up_geocoded.py
```python
import csv
def main(vgi_repository='t51m', points=True):
filter_out = {} # dict for fast look-up
with open("location_field/state_table.csv", "r") as fin:
csvreader = csv.reader(fin)
next(csvreader)
for line in csvreader:
if line[1] != 'Washington DC':
for direction in ['', 'southern ', 'eastern ', 'northern ', 'central ', 'western ']:
filter_out[direction + line[1].lower()] = True # e.g. Alabama
filter_out[direction + line[2].lower()] = True # e.g. AL
filter_out[direction + line[1].lower() + ", usa"] = True
filter_out[direction + line[2].lower() + ", usa"] = True
filter_out[direction + line[1].lower() + ", us"] = True
filter_out[direction + line[2].lower() + ", us"] = True
# Most of these are not necessary - they come from an earlier version that was noisier.
filter_out['america'] = True
filter_out['etats-unis'] = True
filter_out['usa'] = True
filter_out['u.s.a.'] = True
filter_out['us'] = True
filter_out['u.s.'] = True
filter_out['united states'] = True
filter_out['united states of america'] = True
filter_out['estados unidos'] = True
filter_out['pacific northwest'] = True
filter_out['the mitten'] = True
filter_out['tejas'] = True
filter_out['new england'] = True
filter_out['lone star state'] = True
filter_out['earth'] = True
filter_out['nowhere'] = True
filter_out['arg'] = True
filter_out['central city'] = True
filter_out['location'] = True
filter_out['disney'] = True
filter_out['clouds'] = True
filter_out['area 51'] = True
filter_out['westside'] = True
filter_out['lol'] = True
filter_out['house'] = True
filter_out['krypton'] = True
filter_out['pandora'] = True
filter_out['cosmos'] = True
filter_out['beach'] = True
filter_out['happy'] = True
filter_out['mars'] = True
filter_out['bed'] = True
filter_out['wonderland'] = True
filter_out['south'] = True
filter_out['nirvana'] = True
filter_out['bdg'] = True
filter_out['life'] = True
filter_out['heart'] = True
filter_out['indian'] = True
filter_out['eastern'] = True
filter_out['mlk'] = True
filter_out['hope'] = True
filter_out['badlands'] = True
filter_out['dixie'] = True
filter_out['san andreas'] = True
filter_out['transylvania'] = True
filter_out['belgique'] = True
filter_out['pateros'] = True # Manila
filter_out['corsica'] = True
filter_out['wimbledon'] = True
filter_out['fsu'] = True
filter_out['scandinavia'] = True
filter_out['mhs'] = True
filter_out['queen city'] = True # likely Cincinnati but a general term too...
filter_out['ayrshire'] = True
filter_out['alberta'] = True
filter_out['newfoundland'] = True
filter_out['bromley'] = True # district in London
with open('location_field/country_codes.tsv', 'r') as fin:
csvreader = csv.reader(fin, delimiter='\t')
header = next(csvreader)
country_idx = header.index('Country')
for line in csvreader:
country = line[country_idx].lower().strip()
filter_out[country] = True
fix = {}
if points:
fix['washington dc'] = '(38.89511, -77.03637)'
fix['twin cities, mn'] = '(44.96219, -93.178555)' # Average of Minneaplis center and St. Paul center
fix['twin cities, minnesota'] = '(44.96219, -93.178555)'
fix['city of angels'] = '(34.05223, -118.24368)'
fix['the city of angels'] = '(34.05223, -118.24368)'
fix['phil'] = "(39.95233, -75.16379)"
fix['delco'] = "(39.9168, -75.3989)"
fix['steel city'] = "(40.44062, -79.99589)"
fix['queens'] = "(40.76538, -73.81736)"
for nyc_variant in ['nyc', 'new york, new york','new york city', 'new york, ny', 'ny, ny', 'the big apple']:
fix[nyc_variant] = '(40.78343, -73.96625)'
else:
fix['washington dc'] = '11001'
fix['twin cities, mn'] = '27053;27123'
fix['twin cities, minnesota'] = '27053;27123'
fix['city of angels'] = '06037'
fix['the city of angels'] = '06037'
fix['phil'] = '42101'
fix['delco'] = '42045'
fix['steel city'] = '42003'
fix['queens'] = '36081'
for nyc_variant in ['nyc', 'new york, new york','new york city', 'new york, ny', 'ny, ny', 'the big apple']:
fix[nyc_variant] = '36047;36061;36081;36085;36005'
updated = 0
filtered_out = 0
locfields_removed = {}
if points:
input_fn = "./{0}/user_points.csv".format(vgi_repository)
else:
input_fn = "./{0}/user_counties.csv".format(vgi_repository)
with open(input_fn, "r") as fin:
csvreader = csv.reader(fin)
with open(input_fn.replace(".csv","_cleaned.csv"), "w") as fout:
csvwriter = csv.writer(fout)
for line in csvreader:
if line[1] and line[1].lower().strip() in filter_out:
csvwriter.writerow([line[0], line[1], None])
if line[1].lower().strip() in locfields_removed:
locfields_removed[line[1].lower().strip()] += 1
else:
locfields_removed[line[1].lower().strip()] = 1
filtered_out += 1
elif line[1].lower().strip() in fix:
csvwriter.writerow([line[0], line[1], fix[line[1].lower().strip()]])
updated += 1
else:
csvwriter.writerow(line)
for locfield in locfields_removed:
print(locfield, locfields_removed[locfield])
print("{0} updated and {1} filtered out.".format(updated, filtered_out))
if __name__ == "__main__":
main()
```
#### File: rq1and3_localness/metrics/count_local_vgi.py
```python
import csv
import json
import argparse
import sys
sys.path.append*("./utils")
import bots
INPUT_HEADER = ['id', 'created_at', 'text', 'user_screen_name', 'user_description', 'user_lang', 'user_location',
'user_time_zone', 'geom_src', 'uid', 'tweet', 'lon', 'lat', 'gender', 'race',
'county', 'nday', 'plurality', 'geomed', 'locfield']
def main():
parser = argparse.ArgumentParser()
parser.add_argument('localness_fn', help='CSV output from localness.py script')
parser.add_argument('output_stats_fn', help="Path to CSV file output containing the localness stats by county")
parser.add_argument('--filter_bots', default=True)
args = parser.parse_args()
localness_fn = args.localness_fn
output_fn = args.output_stats_fn
county_idx = INPUT_HEADER.index('county')
uid_idx = INPUT_HEADER.index('uid')
nday_idx = INPUT_HEADER.index("nday")
plur_idx = INPUT_HEADER.index("plurality")
geomed_idx = INPUT_HEADER.index("geomed")
locfield_idx = INPUT_HEADER.index("locfield")
twitter_bots = {}
if args.filter_bots:
twitter_bots = bots.build_bots_filter()
print("Processing {0} and outputting localness results to {1}.".format(localness_fn, output_fn))
output_header = ['fips','all','none','nday','plur','geomed','locfield','npg','ngl','npl','pgl','np','ng','nl','pg','pl','gl','bots']
tracking = {'fips' : ""}
for i in range(1, len(output_header)):
tracking[output_header[i]] = 0
county_stats = {}
with open("resources/USCounties_bare.geojson",'r') as fin:
counties = json.load(fin)
for county in counties['features']:
fips = str(county['properties']["FIPS"])
county_stats[fips] = tracking.copy()
county_stats[fips]['fips'] = fips
with open(localness_fn, 'r') as fin:
csvreader = csv.reader(fin)
assert next(csvreader) == INPUT_HEADER
line_no = 0
for line in csvreader:
line_no += 1
fips = line[county_idx]
uid = line[uid_idx]
n, p, g, l = False, False, False, False
if fips:
if uid in twitter_bots:
county_stats[fips]['bots'] += 1
continue
if line[nday_idx] == 'True':
n = True
if line[plur_idx] == "True":
p = True
if line[geomed_idx] == "True":
g = True
if line[locfield_idx] == "True":
l = True
if n and p and g and l:
county_stats[fips]['all'] += 1
elif not n and not p and not g and not l:
county_stats[fips]['none'] += 1
elif n and p and g:
county_stats[fips]['npg'] += 1
elif n and g and l:
county_stats[fips]['ngl'] += 1
elif n and p and l:
county_stats[fips]['npl'] += 1
elif p and g and l:
county_stats[fips]['pgl'] += 1
elif n and p:
county_stats[fips]['np'] += 1
elif n and g:
county_stats[fips]['ng'] += 1
elif n and l:
county_stats[fips]['nl'] += 1
elif p and g:
county_stats[fips]['pg'] += 1
elif p and l:
county_stats[fips]['pl'] += 1
elif g and l:
county_stats[fips]['gl'] += 1
elif n:
county_stats[fips]['nday'] += 1
elif p:
county_stats[fips]['plur'] += 1
elif g:
county_stats[fips]['geomed'] += 1
elif l:
county_stats[fips]['locfield'] += 1
if line_no % 100000 == 0:
print('{0} lines processed.'.format(line_no))
print('{0} total lines processed.'.format(line_no))
with open(output_fn, "w") as fout:
csvwriter = csv.DictWriter(fout, fieldnames=output_header)
csvwriter.writeheader()
for county in county_stats.values():
csvwriter.writerow(county)
if __name__ == "__main__":
main()
```
#### File: chi-2016-localness/rq2_spatial_regression/compute_effect_size_statements.py
```python
import csv
import argparse
import math
METADATA = [["pct_urban", {"stdev": 0.314, # 31.4% Urban Pop
"transformation": "scaled",
"header":"PCT_URBAN_POP_MULTIVARIATE_BETA",
"abbr":"UP"}],
["hmi", {"stdev": 0.2405, # originally $11791, 0.2405 post-log-transformation
"transformation": "logged-scaled",
"header":"HMI_MULTIVARIATE_BETA",
"abbr":"HMI"}],
["med_age", {"stdev": 5, # 5 years
"transformation": "scaled",
"header":"MED_AGE_MULTIVARIATE_BETA",
"abbr":"MA"}],
["wnl", {"stdev": 0.195, # 19.5% White, Non-Latino
"transformation": "scaled",
"header":"WNL_MULTIVARIATE_BETA",
"abbr":"WNL"}],
["mbsa", {"stdev": 0.198, # originally 19.8% Management/Business/Science/Art
"transformation": "logged-scaled",
"header":"MBSA_MULTIVARIATE_BETA",
"abbr":"MBSA"}]]
PLUS_TEN_PERCENT_CONSTANT = (110/100.0)
def main():
"""Compute effect-size statements for R spatial regression results."""
parser = argparse.ArgumentParser()
parser.add_argument("regression_file", help="file path of the the csv file containing the R spatial regression results.")
parser.add_argument("--output_file", default="regression_effect_size_statements.csv",
help="file path of the csv file that will contain the regression results with effect size statements appended.")
args = parser.parse_args()
with open(args.regression_file, 'r') as fin:
with open(args.output_file, 'w') as fout:
csvreader = csv.reader(fin)
csvwriter = csv.writer(fout)
# Expected header = [REPOSITORY, FILTER, DEPENDENT_VARIABLE, MODEL_TYPE, DV_TRANSFORMATION,
# SD_BEFORE_SCALING, PCT_URBAN_POP_MULTIVARIATE_BETA, HMI_MULTIVARIATE_BETA,
# MED_AGE_MULTIVARIATE_BETA, WNL_MULTIVARIATE_BETA, MBSA_MULTIVARIATE_BETA]
header = next(csvreader)
sd_idx = header.index("SD_BEFORE_SCALING")
transformation_idx = header.index("DV_TRANSFORMATION")
for iv in METADATA:
iv[1]["header"] = header.index(iv[1]["header"])
if iv[1]['transformation'] == "logged-scaled":
header.append('{0}_PlusTenPercent'.format(iv[1]["abbr"]))
elif iv[1]['transformation'] == 'scaled':
header.append('{0}_PlusOneSD'.format(iv[1]["abbr"]))
else:
header.append(iv[1]["abbr"])
csvwriter.writerow(header)
for line in csvreader:
dv_sd = float(line[sd_idx])
for iv in METADATA:
beta = line[iv[1]["header"]]
if "*" in beta:
dv_transformation = line[transformation_idx]
iv_transformation = iv[1]['transformation']
value = float(beta.replace("*", ""))
if dv_transformation == "Logged, Scaled":
if iv_transformation == 'logged-scaled':
effect_size = "{0}% Relative Change".format(round((math.pow((110/100), value * dv_sd / iv[1]["stdev"]) - 1) * 100, 1))
elif iv_transformation == 'scaled':
effect_size = "{0}% Relative Change".format(round((math.pow(math.e, value * dv_sd) - 1) * 100, 1))
else:
effect_size = "IV Transformation Not Supported"
elif dv_transformation == "Scaled":
if iv_transformation == "logged-scaled":
effect_size = "{0}% Absolute Change".format(round(math.log(1.1) * value * dv_sd / iv[1]["stdev"] * 100, 1))
elif iv_transformation == "scaled":
effect_size = "{0}% Absolute Change".format(round(value * dv_sd * 100, 1))
else:
effect_size = "IV Transformation Not Supported"
else:
effect_size = "DV Transformation Not Supported"
else:
effect_size = "Not Significant"
line.append(effect_size)
csvwriter.writerow(line)
if __name__ == "__main__":
main()
``` |
{
"source": "joh90/iot",
"score": 2
} |
#### File: iot/rooms/__init__.py
```python
import logging
from iot.constants import ROOM_LIST_MESSAGE
from iot.utils import return_mac
from iot.devices import DeviceType
from iot.devices.broadlink import (
BroadlinkDeviceFactory,
BroadlinkDeviceTypes
)
from iot.devices.errors import (
DeviceTypeNotFound, BrandNotFound,
SendCommandError
)
from iot.devices.factory import DeviceFactory
logger = logging.getLogger(__name__)
d_factory = DeviceFactory()
bl_d_factory = BroadlinkDeviceFactory()
# We assume one RM3 RM per room for now
# Supports multiple Broadlink devices
# eg. Smart Plug, Multi Plugs
class Room:
__slots__ = (
"name",
"rm",
"DEVICES",
"BL_DEVICES",
"last_action"
)
def __init__(self, name, rm):
self.name = name
self.rm = rm
self.DEVICES = {}
self.BL_DEVICES = {}
self.last_action = None
def room_info(self):
return {
"name": self.name,
"rm_host": self.rm.host[0] if self.rm else None,
"rm_mac": return_mac(self.rm.mac) if self.rm else None,
"type": self.rm.type if self.rm else None,
"devices": self.DEVICES
}
def format_room_devices(self):
room_devices = [
"*{}* | Type: {}".format(d.id, DeviceType(d.device_type).name) \
for d in self.DEVICES.values()
]
return room_devices
def format_room_bl_devices(self):
room_bl_devices = [
"*{}* | Type: {} | IP: {} | Mac: {}".format(
d.id, d.device_type, d.ip, d.mac_address) \
for d in self.BL_DEVICES.values()
]
return room_bl_devices
def room_list_info(self):
info = self.room_info()
room_devices = self.format_room_devices()
room_broadlink_devices = self.format_room_bl_devices()
return ROOM_LIST_MESSAGE.format(
info["name"],
"Type: {}, IP: {}, Mac: {}".format(
info["type"], info["rm_host"], info["rm_mac"]),
"\n".join(room_devices),
"\n".join(room_broadlink_devices)
)
def populate_devices(self, devices):
populated = []
for d in devices:
if d["id"] not in self.DEVICES:
try:
dev = d_factory.create_device(
d["type"], self, d["id"], d["brand"], d["model"]
)
self.add_device(dev)
populated.append(dev)
except DeviceTypeNotFound:
continue
except BrandNotFound:
logger.error(
"Room: %s, Unable to populate device %s, " \
"Brand %s not found for Device Type %s",
self.name, d["id"], d["brand"], d["type"]
)
continue
return populated
def add_device(self, device):
self.DEVICES[device.id] = device
def get_device(self, device_id):
pass
def populate_broadlink_devices(self, devices):
from iot.server import iot_server
for d in devices:
if d["id"] not in self.BL_DEVICES:
bl_device = iot_server.find_broadlink_device(
d["mac_address"], d["broadlink_type"].upper()
)
if bl_device is None:
logger.error(
"Room: %s, Unable to populate Broadlink device %s, " \
"Broadlink device %s not found with Device Type %s",
self.name, d["id"], d["mac_address"], d["broadlink_type"]
)
continue
try:
dev = bl_d_factory.create_device(
d["broadlink_type"], self, d["id"], bl_device
)
self.add_broadlink_devices(dev.id, dev)
iot_server.devices[dev.id] = dev
except DeviceTypeNotFound:
continue
def add_broadlink_devices(self, id, bl_device):
self.BL_DEVICES[id] = bl_device
def convert_to_bytearray(self, data):
return bytearray.fromhex("".join(data))
def send(self, data):
# Check device type
if self.rm and self.rm.type == "RMMINI":
self.send_rm_data(data)
def send_rm_data(self, data):
try:
self.rm.send_data(
self.convert_to_bytearray(data)
)
except Exception as e:
raise SendCommandError("{}: {}".format(e.__class__, e))
```
#### File: iot/iot/server.py
```python
from datetime import datetime, timedelta
import json
import logging
import socket
from typing import Dict, List
import broadlink
from telegram import ReplyKeyboardMarkup
from telegram.ext import (
Updater, CommandHandler, CallbackQueryHandler
)
from iot import constants
from iot.conversations.cmd_adduser import AddUserConversation
from iot.devices.base import BaseDevice
from iot.devices.errors import (
CommandNotFound, InvalidArgument,
SendCommandError
)
from iot.rooms import Room
from iot.utils import return_mac
from iot.utils.decorators import (
valid_device, valid_device_or_room,
valid_device_feature, valid_user
)
from iot.utils.keyboard.cmd_keyboard import (
CommandKeyboardCBHandler
)
from iot.utils.keyboard.cmd_user import (
TOP_MENU_TEXT as USER_TOP_MENU_TEXT,
CommandUserCBHandler
)
logger = logging.getLogger(__name__)
KEYBOARD_HANDLER_NAME = "/keyboard"
USER_HANDLER_NAME = "/user"
class TelegramIOTServer:
def __init__(self):
# Telegram Bot settings
self.bot_id = None
self.bot_secret = None
self.bot_name = None
# Telegram
self.updater: Updater = None
self.dp = None
# JSON files path
self.devices_path = None
self.commands_path = None
self.users_path = None
# Broadlink and Devices
self.broadlink_devices = {}
self.rooms = {}
self.devices = {}
self.commands = {}
self.approved_users = {}
# Keyboard query handlers
self.kb_handlers = {}
# Others
self.start_time: datetime = None
self.last_command_handled = None
def start_server(self, bot_id, bot_secret, bot_name,
devices_path, commands_path, users_path):
self.bot_id = bot_id
self.bot_secret = bot_secret
self.bot_name = bot_name
self.devices_path = devices_path
self.commands_path = commands_path
self.users_path = users_path
self.discover_broadlink_device()
self.reload_commands()
self.reload_rooms_and_devices()
self.reload_users()
self.kb_handlers[KEYBOARD_HANDLER_NAME] = \
CommandKeyboardCBHandler(self, KEYBOARD_HANDLER_NAME)
self.kb_handlers[USER_HANDLER_NAME] = \
CommandUserCBHandler(self, USER_HANDLER_NAME)
self.init_telegram_server()
def init_telegram_server(self):
token: str = '{}:{}'.format(self.bot_id, self.bot_secret)
self.updater = Updater(
token,
user_sig_handler=self.stop_server
)
# Get the dispatcher to register handlers
self.dp = self.updater.dispatcher
self.dp.add_handler(CommandHandler("start", self.command_start))
self.dp.add_handler(CommandHandler("ping", self.command_ping))
self.dp.add_handler(CommandHandler("status", self.command_status))
self.dp.add_handler(CommandHandler("list", self.command_list))
self.dp.add_handler(CommandHandler(
"keyboard", self.command_keyboard, pass_args=True
))
self.dp.add_handler(CommandHandler(
"user", self.command_user, pass_args=True
))
self.dp.add_handler(CallbackQueryHandler(self.handle_keyboard_response))
self.dp.add_handler(CommandHandler(
"on", self.command_on, pass_args=True
))
self.dp.add_handler(CommandHandler(
"off", self.command_off, pass_args=True
))
self.dp.add_handler(CommandHandler(
"d", self.command_device, pass_args=True
))
self.add_conversations()
self.dp.add_error_handler(self.error)
self.updater.start_polling()
logger.info("Telegram IOT Server Running...")
self.start_time = datetime.now()
self.updater.idle()
def add_conversations(self):
# TODO: make a map and initialize it?
AddUserConversation(
self, ["adduser"], ["canceladduser"]
)
def reload_rooms_and_devices(self):
try:
with open(self.devices_path) as f:
try:
data: dict = json.load(f)
if len(data) == 0:
logger.warning(
"Please add your rooms and devices to %s",
self.devices_path
)
return
except (ValueError, TypeError) as e:
logger.error("Decoding devices json file failed: %s", e)
return
else:
for room_name, value in data.items():
if room_name not in self.rooms:
try:
rm_device = None
to_populate_bl_device = False
# Check for RM Device
if all(
k in value for k in (
"mac_address",
"broadlink_type"
)
):
rm_mac_address: str = value["mac_address"]
rm_broadlink_type: str = value["broadlink_type"]
rm_device = self.find_broadlink_device(
rm_mac_address, rm_broadlink_type
)
if 'broadlink_devices' in value.keys():
to_populate_bl_device = True
if rm_device or to_populate_bl_device:
r: Room = Room(room_name, rm_device)
pop_device: List[BaseDevice] = r.populate_devices(
value.get("devices", []))
r.populate_broadlink_devices(
value.get("broadlink_devices", [])
)
# Only populate room if there are any
# normal / broadlink devices
if (
len(r.DEVICES) > 0 or
len(r.BL_DEVICES) > 0
):
self.rooms[room_name] = r
for pd in pop_device:
commands: Dict = self.get_commands(
pd.device_type.value,
pd.brand,
pd.model
)
if commands:
pd.populate_device_commands(commands)
self.devices[pd.id] = pd
except Exception as e:
logger.error("Error While reloading rooms and devices: %s", e)
continue
except FileNotFoundError as e:
logger.error("Devices file not found %s", self.devices_path)
raise e
def find_broadlink_device(self, mac, bl_type):
bl = self.broadlink_devices.get(mac)
if bl and bl_type == bl.type:
return bl
def discover_broadlink_device(self):
# Temp Code for when running server
# with multiple network interface
# TODO: Remove this code as it should be added to
# https://github.com/mjg59/python-broadlink/blob/master/broadlink/__init__.py#L66
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(('10.255.255.255', 1))
local_ip = s.getsockname()[0]
except:
local_ip = '127.0.0.1'
finally:
s.close()
logger.info("Local IP: %s", local_ip)
logger.info("Discovering broadlink devices...")
bl_devices: list = broadlink.discover(
timeout=5, local_ip_address=local_ip
)
for bl in bl_devices:
bl.auth()
mac = return_mac(bl.mac)
self.broadlink_devices[mac] = bl
logger.info("Discovered %s device with %s mac", bl.type, mac)
def reload_commands(self):
try:
with open(self.commands_path) as f:
try:
data: dict = json.load(f)
except (ValueError, TypeError) as e:
logger.error("Decoding commands json file failed: %s", e)
return
else:
self.commands = data
except FileNotFoundError as e:
logger.error("Commands file not found %s", self.commands_path)
raise e
def get_commands(self, device_type, brand, model) -> Dict[str, str]:
# Always convert device_type to String,
# as populated command dict key is in String
device_type = str(device_type)
try:
return self.commands[device_type][brand][model]
except KeyError:
logger.error("Command not found for %s-%s-%s",
device_type, brand, model)
return {}
def reload_users(self):
try:
with open(self.users_path) as f:
try:
data: Dict[str, str] = json.load(f)
if len(data) == 0:
logger.warning(
"Please populate at least one user to %s",
self.users_path
)
except (ValueError, TypeError) as e:
logger.error("Decoding users json file failed: %s", e)
return
else:
self.approved_users = data
except FileNotFoundError as e:
logger.error("Users file not found %s", self.users_path)
raise e
def save_users(self):
logger.info("Saving approved users to %s", self.users_path)
with open(self.users_path, "w") as f:
try:
json.dump(self.approved_users, f, indent=4, sort_keys=True)
except Exception as e:
logger.error("Error while saving users to json file: %s", e)
def error(self, update, context):
"""Log Errors caused by Updates."""
logger.warning('Update "%s" caused error "%s"', update, context.error)
@property
def uptime(self):
now = datetime.now()
uptime = str(
timedelta(
seconds=(now-self.start_time).total_seconds()
)
)
return uptime
@property
def broadlink_devices_info(self):
all_bb_info = []
for bb in self.broadlink_devices.values():
bb_info = "Type: {}, IP: {}, Mac: {}".format(
bb.type, bb.host[0],
return_mac(bb.mac)
)
all_bb_info.append(bb_info)
return '\n'.join(all_bb_info)
@valid_user
def command_start(self, update, context):
"""Send a message when the command `/start` is issued."""
update.message.reply_markdown(
constants.START_MESSAGE.format(self.bot_name)
)
def command_ping(self, update, context):
"""Sends message `pong` and user's id, name"""
user = update.effective_user
update.message.reply_markdown(constants.PONG_MESSAGE.format(
user.username, user.id
))
@valid_user
def command_status(self, update, context):
"""Sends server status back"""
server_info = constants.STATUS_MESSAGE.format(
str(datetime.now()).split(".")[0],
self.uptime,
self.last_command_handled,
self.broadlink_devices_info,
len(self.rooms),
len(self.devices),
", ".join(self.approved_users.values())
)
update.message.reply_text(server_info)
@valid_user
def command_list(self, update, context):
"""Sends list of broadlink devices, rooms and devices in room"""
if len(self.rooms) == 0:
update.message.reply_markdown(
constants.NO_ROOM_MESSAGE.format(self.devices_path)
)
return
rooms_info = [str(r.room_list_info()) for r in self.rooms.values()]
update.message.reply_markdown(constants.LIST_MESSAGE.format(
"\n".join(rooms_info)
))
@valid_user
@valid_device_or_room(compulsory=False)
def command_keyboard(self, update, context, *args, **kwargs):
"""Sends Inline keyboard to access rooms and devices"""
if len(self.rooms) == 0:
update.message.reply_markdown(
constants.NO_ROOM_MESSAGE.format(self.devices_path)
)
return
handler = self.kb_handlers[KEYBOARD_HANDLER_NAME]
# By default, reply markup will be rooms keyboard
reply_markup = handler.build_rooms_keyboard()
text = "Select room"
room = kwargs.pop("room", None)
device = kwargs.pop("device", None)
# If room, device can be found, that will be the markup
if room:
reply_markup = handler.build_room_devices_keyboard(room.name)
text = "Select {} device".format(room.name)
elif device:
reply_markup = handler.build_device_keyboard(device.id)
text = "Select {} feature".format(device.id)
update.message.reply_text(text, reply_markup=reply_markup)
@valid_user
def command_user(self, update, context, *args, **kwargs):
"""Sends inline keyboard to view approved users"""
handler = self.kb_handlers[USER_HANDLER_NAME]
reply_markup = handler.build_users_keyboard()
update.message.reply_text(USER_TOP_MENU_TEXT,
reply_markup=reply_markup)
@valid_user
def handle_keyboard_response(self, update, context):
query = update.callback_query
handler_name, internal_cb_data = query.data.split(" ", 1)
if handler_name in self.kb_handlers.keys():
self.kb_handlers[handler_name].process_query(
update, context, internal_cb_data
)
else:
logger.error(
"Unable to find handler_name, %s in kb_handlers",
handler_name
)
@valid_user
@valid_device
def command_on(self, update, context, device, *args, **kwargs):
"""Turn ON targeted device if device id can be found"""
device.power_on()
@valid_user
@valid_device
def command_off(self, update, context, device, *args, **kwargs):
"""Turn OFF targeted device if device id can be found"""
device.power_off()
@valid_user
@valid_device_feature
def command_device(self, update, context, device, feature,
action=None, *args, **kwargs
):
"""
Command device specific feature if device_id, feature and
action can be found, passthrough function and call
server's call_device method
"""
self.call_device(update, context, device, feature,
action=action, *args, **kwargs)
def call_device(self, update, context, device, feature,
action=None, handler_name=None, *args, **kwargs):
"""
Call specified device's feature and action,
if it can be found
"""
def send_text(update, context, message):
if update.callback_query:
self.kb_handlers[handler_name].answer_query(
update.callback_query, context,
text=message
)
else:
update.message.reply_markdown(message)
func = getattr(device, feature)
new_args = args
if action:
new_args = (action,)
try:
result = func(*new_args)
text = "Sent {} with {}".format(device.id, feature)
if isinstance(result, tuple):
text = result[1]
if update.callback_query:
self.kb_handlers[handler_name].answer_query(
update.callback_query, context,
text=text
)
except (NotImplementedError, CommandNotFound):
action = '' if not action else action
send_text(update, context,
constants.DEVICE_COMMAND_NOT_IMPLEMENTED.format(
device.id, feature, action)
)
except SendCommandError as e:
send_text(update, context,
constants.SEND_DEVICE_COMMAND_ERROR.format(
device.id, feature, e
)
)
except (TypeError, InvalidArgument):
send_text(update, context, constants.ARGS_ERROR)
def stop_server(self, *args, **kwargs):
logger.info("Telegram IOT Server stopping...")
# Save approved users json
# TODO: optimize this, save only on approved_users change
self.save_users()
iot_server: TelegramIOTServer = TelegramIOTServer()
```
#### File: utils/keyboard/base.py
```python
import logging
from telegram import (
InlineKeyboardButton, InlineKeyboardMarkup
)
logger = logging.getLogger(__name__)
CLOSE_INLINE_KEYBOARD_COMMAND = "close_keyboard"
YES_TEXT = "YES"
NO_TEXT = "NO"
class InlineKeyboardMixin:
def build_keyboard(self, buttons, cols, header_buttons=None, footer_buttons=None):
if cols > 0:
kb = [buttons[i:i + cols] for i in range(0, len(buttons), cols)]
else:
if not isinstance(buttons[0], list):
kb = [[b] for b in buttons]
else:
kb = [b for b in buttons]
if header_buttons:
kb.insert(0, header_buttons)
if footer_buttons:
to_append = []
for but in footer_buttons:
if isinstance(but, list):
kb.append(but)
else:
to_append.append(but)
if len(to_append):
kb.append(to_append)
return kb
def build_inline_keyboard_markup(self, keyboard):
return InlineKeyboardMarkup(keyboard)
def construct_yes_no_prompt_keyboard(self, yes_cb, no_cb):
button_list = self.yes_no_button(yes_cb, no_cb)
keyboard = self.build_keyboard(button_list, cols=0)
markup = self.build_inline_keyboard_markup(keyboard)
return markup
def header_buttons(self, *args, **kwargs):
"""Returns list of header buttons"""
pass
def footer_buttons(self, *args, **kwargs):
"""Returns list of footer buttons"""
pass
def back_button(self, *args, **kwargs):
"""
Returns InlineKeyboardButton with
callback_data for previous menu
"""
pass
def close_button(self):
"""
Returns generic "Close" InlineKeyboardButton
to close the keyboard
"""
return InlineKeyboardButton(
"Close",
callback_data=self.return_cb_data(CLOSE_INLINE_KEYBOARD_COMMAND)
)
def yes_no_button(self, yes_cb, no_cb):
return [[
InlineKeyboardButton(
YES_TEXT,
callback_data=self.return_cb_data(yes_cb)
),
InlineKeyboardButton(
NO_TEXT,
callback_data=self.return_cb_data(no_cb))
]]
def construct_keyboard_markup(self, options, *args, **kwargs):
raise NotImplementedError
def handle_close(self, text, query, update, context):
context.bot.edit_message_text(text=text,
chat_id=query.message.chat_id,
message_id=query.message.message_id,
reply_markup=None)
self.answer_query(query, context)
class KeyboardCallBackQueryHandler:
__slots__ = ("server", "handler_name")
def __init__(self, server, handler_name):
self.server = server
self.handler_name = handler_name
def return_cb_data(self, cb_data):
"""Always add handler_name before cb_data"""
return "{} {}".format(self.handler_name, cb_data)
def answer_query(self, query, context, text=None, alert=False):
context.bot.answer_callback_query(query.id, text=text, show_alert=alert)
def process_query(self, update, context, internal_callback_data):
query = update.callback_query
logger.info(
"CMD %s CB Handler: Handling '%s', Internal: %s",
self.handler_name, query.data, internal_callback_data
)
# query_data is only the device's id
query_data = internal_callback_data.split()
return query, query_data
``` |
{
"source": "joha2/ToOptixCore",
"score": 3
} |
#### File: ToOptix/FEMPy/CCXPhraser.py
```python
from .FEMBody import FEMBody
from .Node import Node
from .Material import Material
from .Element import Element
class CCXPhraser(object):
""" Importing a FEM object from a calculix input file
This FEM object is used for topology optimization
"""
def __init__(self, file_name):
# Initialize a FEMBody
try:
self.__file_name = file_name
self.__file = open(file_name, "r")
self.__nodes = self.__get_nodes()
self.__elements = self.__get_elements()
self.__material = self.__get_material()
self.__fem_body = FEMBody("CCXPhraser", self.__nodes, self.__elements, self.__material)
self.__file.close()
except IOError as e:
print(e)
def __is_empty_list(self, list):
empty = True
for element in list:
if element not in [" ", ",", "", "\n", "\t"]:
empty = False
return empty
def __remove_empty_parts(self, list):
new_list = []
for element in list:
if element not in [" ", ",", "", "\n", "\t"]:
new_list.append(element)
return new_list
def __is_line_to_ignore(self, line):
if len(line) < 1:
return False
if line[:-1].isspace():
return False
if "**" in line:
return False
def get_fem_body(self):
return self.__fem_body
def get_elements_by_set_name(self, name=None):
self.__file = open(self.__file_name, "r")
elements = []
self.__file.seek(0) # Start at first line
read_attributes = False
for line in self.__file:
if self.__is_line_to_ignore(line):
continue
if "*" in line[0]:
read_attributes = False
if read_attributes:
line_items = line[:-1].split(",")
if self.__is_empty_list(line_items):
continue
line_items = self.__remove_empty_parts(line_items)
for element_id in line_items:
try:
elements.append(int(element_id))
except IOError as e:
print(e)
if "*ELSET" in line.upper():
if name != None:
if name.upper() in line.upper():
read_attributes = True
else:
read_attributes = True
self.__file.close()
return elements
def __get_nodes(self):
node_dict = {}
read_attributes = False
self.__file.seek(0) # Start at first line
for line in self.__file:
if self.__is_line_to_ignore(line):
continue
if "*" in line[0]:
read_attributes = False
if read_attributes:
line_items = line[:-1].split(",")
try:
if self.__is_empty_list(line_items):
continue
node_id = int(line_items[0])
x = float(line_items[1])
y = float(line_items[2])
z = float(line_items[3])
node_dict[node_id] = Node(node_id, x, y, z)
except IOError as e:
print(e)
if "*NODE" in line.upper() \
and not "OUTPUT" in line.upper() \
and not "PRINT" in line.upper() \
and not "FILE" in line.upper():
read_attributes = True
return node_dict
def __get_elements(self):
element_dict = {}
read_attributes = False
self.__file.seek(0)
nodes_of_one_element = 0
new_element = True
for line in self.__file:
if self.__is_line_to_ignore(line):
continue
if "*" in line[0]:
read_attributes = False
if read_attributes:
line_items = line[:-1].split(",")
try:
if self.__is_empty_list(line_items):
continue
# Check if a new element is in this line or adding new nodes by using the node number
if new_element:
elem_id = int(line_items[0])
node_list = []
for node_id in line_items[1: len(line_items)]:
try:
node_id_int = int(node_id)
except ValueError:
continue
node_list.append(self.__nodes[node_id_int])
if len(node_list) == nodes_of_one_element:
new_element = True
else:
new_element = False
continue
if not new_element:
for node_id in line_items[0: len(line_items)]:
node_list.append(self.__nodes[int(node_id)])
if len(node_list) == nodes_of_one_element:
new_element = True
else:
new_element = False
continue
element_dict[elem_id] = Element(elem_id, node_list)
except IOError as e:
print("Error: at line {}".format(line))
if "*ELEMENT" in line.upper() \
and not "OUTPUT" in line.upper() \
and not "PRINT" in line.upper() \
and not "FILE" in line.upper():
read_attributes = True
if "C3D" in line:
node_number = line.split("C3D")[1][0:2]
if len(node_number) > 2:
node_number = node_number[0:1]
if node_number[1].isdigit():
nodes_of_one_element = int(node_number)
else:
nodes_of_one_element = int(node_number[0])
if "CPS" in line:
node_number = line.split("CPS")[1][0:2]
if node_number.isdigit():
nodes_of_one_element = int(node_number)
else:
nodes_of_one_element = int(node_number[0])
if "TYPE=S" in line.upper():
node_number = line.split('=S')[1][0:2]
if node_number.isdigit():
nodes_of_one_element = int(node_number)
else:
nodes_of_one_element = int(node_number[0])
return element_dict
def __get_material(self):
read_elastic = False
read_conductivity = False
self.__file.seek(0)
material = Material("Dummy_Material_steel")
material.add_elasticity()
material.add_conductivity()
for line in self.__file:
if "**" in line:
continue
if line[:-1].isspace():
continue
if "*" in line[0]:
read_elastic = False
read_conductivity = False
line_items = line[:-1].split(",")
if self.__is_empty_list(line_items):
continue
if read_elastic:
try:
if len(line_items) <= 2:
print(line_items)
material.add_elasticity(float(line_items[0]), float(line_items[1]))
else:
material.add_elasticity(float(line_items[0]), float(line_items[1]), float(line_items[2]))
except IOError as e:
print(e)
if read_conductivity:
try:
if len(line_items) <= 2:
material.add_conductivity(float(line_items[0]))
else:
material.add_conductivity(float(line_items[0]), float(line_items[1]))
except IOError as e:
print(e)
if "*ELASTIC" in line.upper():
read_elastic = True
if "*CONDUCTIVITY" in line.upper():
read_conductivity = True
if "*MATERIAL" in line.upper():
tmp_line = line[:-1].split(",")
name = "unknown"
for word in tmp_line:
if "NAME" in word.upper():
name = word.split("=")[1]
material = Material(name)
return material
class FRDReader(object):
def __init__(self, file_name):
self.__file_name = file_name + ".frd"
def get_displacement(self, node_dictonary):
frd_file = open(self.__file_name, "r")
displacement_section = False
for line in frd_file:
if len(line) <= 2:
continue
if " -4 DISP" in line.upper():
displacement_section = True
if displacement_section and " -3" in line:
displacement_section = False
if displacement_section and " -1" in line[0:3]:
node_id = int(line[3:13])
disp_x = float(line[13:25])
disp_y = float(line[25:37])
disp_z = float(line[37:49])
node_dictonary[node_id].set_displacement(disp_x, disp_y, disp_z)
def get_temperature(self, node_dictonary):
frd_file = open(self.__file_name, "r")
displacement_section = False
for line in frd_file:
if len(line) <= 2:
continue
if " -4 NDTEMP" in line.upper():
displacement_section = True
if displacement_section and " -3" in line:
displacement_section = False
if displacement_section and " -1" in line[0:3]:
node_id = int(line[3:13])
temperature = float(line[13:25])
node_dictonary[node_id].set_temperature(temperature)
class DATReader(object):
def __init__(self, file_name):
self.__file_name = file_name + ".dat"
def get_displacement(self, node_dictonary):
frd_file = open(self.__file_name, "r")
displacement_section = False
node_counter = 0
for line in frd_file:
if len(line) < 2:
continue
if "DISPLACEMENTS (VX,VY,VZ)" in line.upper():
displacement_section = True
continue
if node_counter == len(node_dictonary):
displacement_section = False
if displacement_section:
node_counter += 1
print(line[0:10])
print(line[10:25])
print(line[25:39])
print(line[39:53])
node_id = int(line[0:10])
disp_x = float(line[10:25])
disp_y = float(line[25:39])
disp_z = float(line[39:53])
node_dictonary[node_id].set_displacement(disp_x, disp_y, disp_z)
def get_energy_density(self, element_dictonary):
energy_vector = []
frd_file = open(self.__file_name, "r")
energy_section = False
element_id_before = -1
for line in frd_file:
if len(line) <= 2:
continue
if energy_section:
element_id = int(line[0:10])
strain_energy = float(line[15:28])
element_dictonary[element_id].set_strain_energy(strain_energy)
if element_id != element_id_before:
element_dictonary[element_id].set_strain_energy(strain_energy)
element_id_before = element_id
else:
old_energy = element_dictonary[element_id].get_strain_energy()
element_dictonary[element_id].set_heat_flux(strain_energy + old_energy)
if "INTERNAL ENERGY DENSITY" in line.upper():
energy_section = True
energy_vector = []
for key in element_dictonary:
energy_vector.append(element_dictonary[key].get_strain_energy())
return energy_vector
def get_heat_flux(self, element_dictonary):
frd_file = open(self.__file_name, "r")
energy_section = False
element_id_before = -1
for line in frd_file:
if len(line) <= 2:
continue
if energy_section:
element_id = int(line[0:10])
hflx_x = float(line[15:28])
hflx_y = float(line[28:42])
hflx_z = float(line[42:56])
ges_hfl = (hflx_x**2 + hflx_y**2 + hflx_z**2)**0.5
if element_id != element_id_before:
element_dictonary[element_id].set_heat_flux(ges_hfl)
element_dictonary[element_id].set_heat_flux_xyz(hflx_x, hflx_y, hflx_z)
element_id_before = element_id
else:
old_hflx = element_dictonary[element_id].get_heat_flux()
old_hflx_xyz = element_dictonary[element_id].get_heat_flux_xyz()
element_dictonary[element_id].set_heat_flux(ges_hfl + old_hflx)
element_dictonary[element_id].set_heat_flux_xyz(old_hflx_xyz[0] + hflx_x,
old_hflx_xyz[1] + hflx_y,
old_hflx_xyz[2] + hflx_z)
if "HEAT FLUX" in line.upper():
energy_section = True
energy_vector = []
for key in element_dictonary:
energy_vector.append(element_dictonary[key].get_heat_flux())
return energy_vector
```
#### File: ToOptix/FEMPy/CCXSolver.py
```python
import os
class CCXSolver(object):
def __init__(self, solver_path, input_deck_path):
self.__solver_path = solver_path
self.__input_path = input_deck_path
self.__2D_case = False
def run_topo_sys(self, topo_materials, topo_element_sets, run_path, output):
self.__run_topo_system(topo_materials, topo_element_sets, run_path, output)
def run_topo_sens(self, boundary_nodes, run_path, elements, output="ENER", ):
tmp_run_file = run_path + ".inp"
run_file = open(tmp_run_file, "w")
run_file.write("*** Topology optimization input deck \n")
input_deck = open(self.__input_path, "r")
ignore_element_output = False
element_output_was_set = False
ignore_boundary = False
for line in input_deck:
if "*STEP" in line.upper():
run_file.write("*ELSET, ELSET=TOPO_ALL_ELEMENTS_DMST\n")
counter_tab = 0
for element_id in elements:
counter_tab += 1
if counter_tab == 8:
run_file.write("\n")
counter_tab = 0
run_file.write(str(elements[element_id].get_id()) + ",")
run_file.write("\n")
run_file.write("*BOUNDARY\n")
for node_id in boundary_nodes:
node = boundary_nodes[node_id]
if output == "ENER":
disp = node.get_displacement()
run_file.write(str(node.id) + ", 1, 1, " + str(disp[0]) + "\n")
run_file.write(str(node.id) + ", 2, 2, " + str(disp[1]) + "\n")
run_file.write(str(node.id) + ", 3, 3, " + str(disp[2]) + "\n")
elif output == "HFL":
run_file.write(str(node.id) + ", 11, 11," + str(node.get_temperature()) + "\n")
if "*" in line.upper() and "**" not in line.upper():
ignore_boundary = False
if "*BOUNDARY" in line.upper():
ignore_boundary = True
if ignore_boundary:
continue
if ignore_element_output:
if len(line) >= 2:
if line[0] == "*" and line[0:2] != "**":
ignore_element_output = False
if ignore_element_output:
continue
if "*EL PRINT" in line.upper():
element_output_was_set = True
run_file.write("*EL PRINT, ELSET=TOPO_ALL_ELEMENTS_DMST\n")
run_file.write(output + "\n")
ignore_element_output = True
continue
if "*END STE" in line.upper():
if not element_output_was_set:
run_file.write("*EL PRINT, ELSET=TOPO_ALL_ELEMENTS_DMST\n")
run_file.write(output + "\n")
run_file.write(line)
input_deck.close()
run_file.close()
print(self.__solver_path, run_path)
os.system(self.__solver_path + " " + run_path)
def __run_topo_system(self, topo_materials, topo_element_sets, run_path, output="U"):
tmp_run_file = run_path + ".inp"
run_file = open(tmp_run_file, "w")
run_file.write("*** Topology optimization input deck \n")
input_deck = open(self.__input_path, "r")
ignore_node_output = False
node_output_was_set = False
mat_iter = 0
for line in input_deck:
if "*STEP" in line.upper():
for material in topo_materials:
mat_iter += 1
run_file.write("*MATERIAL, name=" + str(material.get_name()) + "\n")
run_file.write("*ELASTIC \n")
for elasticity in material.get_elasticity():
run_file.write('{}, {}, {} \n'.format(elasticity.get_young_module(),
elasticity.get_contraction(),
elasticity.get_temperature()))
density = mat_iter / len(topo_materials) * 7.900e-09
density = 7.900e-09
run_file.write("*DENSITY \n "+ str(density) + " \n")
run_file.write("*CONDUCTIVITY \n")
for conductivity in material.get_conductivity():
run_file.write('{},{} \n'.format(conductivity.get_conductivity(),
conductivity.get_temperature()))
for element_set in topo_element_sets:
if len(element_set.get_elements()) <= 0:
continue
run_file.write("*ELSET,ELSET=" + element_set.get_name() + "\n")
tmp_counter = 0
for element in element_set.get_elements():
tmp_counter += 1
if tmp_counter == 8:
run_file.write("\n")
tmp_counter = 0
run_file.write(str(element.get_id()) + ",")
run_file.write("\n")
for ii in range(len(topo_element_sets)):
if len(topo_element_sets[ii].get_elements()) <= 0:
continue
set_name = topo_element_sets[ii].get_name()
mat_name = topo_materials[ii].get_name()
if self.__2D_case:
run_file.write("*SHELL SECTION,ELSET=" + str(set_name) + ",material=" + str(mat_name) + "\n")
run_file.write("1 \n")
else:
run_file.write("*SOLID SECTION,ELSET=" + str(set_name) + ",material=" + str(mat_name) + "\n")
if "*SOLID SECTION" in line.upper():
continue
if "SHELL SECTION" in line.upper():
continue
if ignore_node_output:
if len(line) >= 2:
if line[0] == "*" and line[0:2] != "**":
ignore_node_output = False
if ignore_node_output:
continue
if "*NODE FILE" in line.upper():
node_output_was_set = True
run_file.write("*NODE FILE\n")
run_file.write(output + "\n")
ignore_node_output = True
continue
if "*END STE" in line.upper():
if not node_output_was_set:
run_file.write("*NODE FILE\n")
run_file.write( output + "\n")
run_file.write("*EL PRINT, ELSET=Evolumes\n")
run_file.write('ENER' + "\n")
run_file.write(line)
input_deck.close()
run_file.close()
print(self.__solver_path, run_path)
os.system(self.__solver_path + " " + run_path)
```
#### File: ToOptix/FEMPy/ElementSet.py
```python
from .Element import Element
class ElementSet(object):
def __init__(self, name, elements):
self.__name = name
self.__elements = elements
def get_name(self):
return self.__name
def get_elements(self):
return self.__elements
```
#### File: ToOptix/FEMPy/FEMBody.py
```python
from .Node import Node
from .Element import Element
from .Material import Material
class FEMBody(object):
""" Body of an FEM object with elements nodes and material data.
"""
def __init__(self, name, nodes, elements, material):
self.__nodes = nodes
self.__elements = elements
self.__materials = [material]
self.__name = name
def get_nodes(self):
return self.__nodes
def get_materials(self):
return self.__materials
def set_materials(self, materials):
self.__materials = materials
def get_elements(self):
return self.__elements
def __str__(self):
return ('Name: {} Nodes: {} Elements: {} Materials: {}'.format(
self.__name, len(self.__nodes), len(self.__elements), len(self.__materials)))
```
#### File: FEMPy/Geometry/Solid.py
```python
from .Triangle import Triangle
class Solid(object):
""" A solid structure defined by triangles
:param id(int): Solid ID
:param triangles(list(Triangle)): List of all Triangles
Example for creating a Tetrahedral-Solid out of triangles
>>> t1 = Triangle(1, p1, p2, p3)
>>> t2 = Triangle(2, p2, p3, p4)
>>> t3 = Triangle(3, p3, p4, p1)
>>> t4 = Triangle(4, p2, p4, p1)
>>> s1 = Solid(1, [t1, t2, t3, t4])
"""
def __init__(self, id, triangles):
self.__id = id
self.__triangles = triangles
@property
def id(self):
return self.__id
@ id.setter
def id(self, ID):
self.__id = ID
@property
def triangles(self):
return self.__triangles
@triangles.setter
def triangles(self, Triangles):
if len(Triangles) == 0:
raise ValueError ("No triangle were found for solid ", self.__id)
```
#### File: FEMPy/Geometry/STLPhraser.py
```python
import os
import os.path
import re
from .Triangle import Triangle
from .Solid import Solid
from .Point import Point
class File(object):
""" File-object
Example for a file definition
>>> file1 = File(1, "foo.txt")
"""
def __init__(self,ID=None, Filepath=None):
self.__filepath = Filepath
self.__id = ID
@property
def id(self):
return self.__id
@ id.setter
def id(self, ID):
self.__id = ID
@property
def filepath(self):
return self.__filepath
@filepath.setter
def filepath(self, filepath):
self.__filepath = filepath
class STL(File):
""" STL-File with geometric data
:param ID (int): Id of the file
:param Filepath (str): Path of the file
Example for creating an stl-object
>>> file1 = STL(1, "./foo.stl")
>>> part = file.parts[0]
.. note::
The file will automatically import the results if the file is given
Otherwise you need to call import_stl
"""
def __init__(self, ID=None, Filepath=None):
File.__init__(self, ID, Filepath)
self.__parts = []
# If file is given the importin will started
if self.filepath:
self.read()
def get_parts(self):
"""
:return: All solid objects which are imported
"""
return self.__parts
def add_solid(self, solid):
self.__parts.append(solid)
def write(self, filename):
""" This method can export the current data into an stl-file
"""
if os.path.isfile(filename):
raise ValueError ("File does exist alread %f", filename)
print("Export stl in", filename)
o_file = open(filename,"w")
for part in self.__parts:
solid = part
o_file.write("solid Exported from DMST-STL\n")
for triangle in solid.triangles:
o_file.write("facet normal " + str(triangle.normal[0]) + " " + str(triangle.normal[1]) + " " + str(triangle.normal[2]) + "\n")
o_file.write("outer loop\n")
for point in triangle.points:
o_file.write("vertex " + str(point.x) + " " + str(point.y) + " " + str(point.z) + "\n")
o_file.write("endloop\n")
o_file.write("endfacet\n")
o_file.write("endsolid\n")
def read(self):
""" This method imports the geometry to the parts attribute
"""
if not os.path.isfile(self.filepath):
raise ValueError ("Given file doesnt exist %f", self.filepath)
i_file = open(self.filepath, "r")
# Patterns which are needed
s_pat = "solid"
l_pat = "outer loop"
f_pat = "facet"
p_pat = "vertex"
f_e_pat = "endfacet"
s_e_pat = "endsolid"
l_e_pat = "endloop"
solid_is_found = False
facet_is_found = False
loop_is_found = False
id_s = 0 # ID of the solid
id_t = 0 # ID for triangles
id_p = 0 # ID for points
tmp_p_list = [] # Saves all found points
id_p_old = 0 #ID for points
# Reading the file
for line in i_file:
line = line[0:-1]
# Solid is found
if re.match(s_pat, line, 2):
id_s +=1
s = Solid(id_s, [])
self.__parts.append(s)
solid_is_found = True
continue
# Solid is closed
if re.match(s_e_pat, line, 2):
solid_is_found = False
continue
# Facet is found
if re.match(f_pat, line,2) and solid_is_found:
id_t += 1
facet_is_found = True
t = Triangle(id_t, [])
words = line.split(" ")
nx = float(words[2])
ny = float(words[3])
nz = float(words[4])
t.normal = [nx, ny, nz]
s.triangles.append(t)
continue
# Facet is closed
if re.match(f_e_pat, line,2) and solid_is_found and facet_is_found:
facet_is_found = False
continue
# Loop is found
if re.match(l_pat, line,2) and solid_is_found and facet_is_found:
loop_is_found = True
continue
# Loop is closed
if re.match(l_e_pat, line,2) and solid_is_found and facet_is_found and loop_is_found:
loop_is_found = False
continue
# Vertex is found
if re.match(p_pat, line,2) and solid_is_found and facet_is_found and loop_is_found:
# Finding new point coord
words = line.split(" ")
x = float(words[1])
y = float(words[2])
z = float(words[3])
# Checking if point_id exists already
# If the point_id is found choose the same ID
p_is_found = False
controll_count = 0
for t_p in tmp_p_list:
if t_p.x == x and t_p.y == y and t_p.z == z:
id_p_old = t_p.id
controll_count += 1
p_is_found = True
if controll_count > 1:
raise ValueError("Two same points have different ID s")
# Creating a new point_id or selectin an old
if p_is_found:
p = Point(id_p_old, x, y, z)
else:
id_p += 1
p = Point(id_p, x, y, z)
tmp_p_list.append(p)
# Resulting point
t.points.append(p)
i_file.close()
if id_s== 0 or id_t== 0 or id_p== 0:
raise ValueError("Fileformat STL does not match: Define Solid-->Faces-->Vertexes")
print("STL-File succesfully imported")
print("Solids: ", id_s)
print("Triangles", id_t)
print("Different Vertices", id_p)
```
#### File: FEMPy/Geometry/Surface.py
```python
from .Triangle import Triangle
from .Solid import Solid
class Surface():
def __init__(self):
self.__triangles = []
@property
def triangles(self):
return self.__triangles
@triangles.setter
def triangles(self, Triangles):
self.__triangles = Triangles
def create_surface_on_elements(self, elements):
eFace = {} # Counts how many times is there a face
for elem in elements:
if len(elem.get_nodes()) == 8 or len(elem.get_nodes()) == 20:
n1 = elem.get_nodes()[0].id
n2 = elem.get_nodes()[1].id
n3 = elem.get_nodes()[2].id
n4 = elem.get_nodes()[3].id
n5 = elem.get_nodes()[4].id
n6 = elem.get_nodes()[5].id
n7 = elem.get_nodes()[6].id
n8 = elem.get_nodes()[7].id
f = sorted([n1, n2, n3, n4])
try:
eFace[f[0], f[1], f[2], f[3]] = eFace[f[0], f[1], f[2], f[3]] + 1
except:
eFace[f[0], f[1], f[2], f[3]] = 1
# Face2
f = sorted([n5, n8, n7, n6])
try:
eFace[f[0], f[1], f[2], f[3]] = eFace[f[0], f[1], f[2], f[3]] + 1
except:
eFace[f[0], f[1], f[2], f[3]] = 1
# Face3
f = sorted([n1, n5, n6, n2])
try:
eFace[f[0], f[1], f[2], f[3]] = eFace[f[0], f[1], f[2], f[3]] + 1
except:
eFace[f[0], f[1], f[2], f[3]] = 1
# Face4
f = sorted([n2, n6, n7, n3])
try:
eFace[f[0], f[1], f[2], f[3]] = eFace[f[0], f[1], f[2], f[3]] + 1
except:
eFace[f[0], f[1], f[2], f[3]] = 1
# Face5
f = sorted([n3, n7, n8, n4])
try:
eFace[f[0], f[1], f[2], f[3]] = eFace[f[0], f[1], f[2], f[3]] + 1
except:
eFace[f[0], f[1], f[2], f[3]] = 1
# Face6
f = sorted([n4, n8, n5, n1])
try:
eFace[f[0], f[1], f[2], f[3]] = eFace[f[0], f[1], f[2], f[3]] + 1
except:
eFace[f[0], f[1], f[2], f[3]] = 1
if len(elem.get_nodes()) == 6 or len(elem.get_nodes()) == 15:
n1 = elem.get_nodes()[0].id
n2 = elem.get_nodes()[1].id
n3 = elem.get_nodes()[2].id
n4 = elem.get_nodes()[3].id
n5 = elem.get_nodes()[4].id
n6 = elem.get_nodes()[5].id
# Face1
f = sorted([n1, n2, n3])
try:
eFace[f[0], f[1], f[2]] = eFace[f[0], f[1], f[2]] + 1
except:
eFace[f[0], f[1], f[2]] = 1
# Face2
f = sorted([n4, n6, n5])
try:
eFace[f[0], f[1], f[2]] = eFace[f[0], f[1], f[2]] + 1
except:
eFace[f[0], f[1], f[2]] = 1
# Face3
f = sorted([n1, n4, n5, n2])
try:
eFace[f[0], f[1], f[2], f[3]] = eFace[f[0], f[1], f[2], f[3]] + 1
except:
eFace[f[0], f[1], f[2], f[3]] = 1
# Face4
f = sorted([n2, n5, n6, n3])
try:
eFace[f[0], f[1], f[2], f[3]] = eFace[f[0], f[1], f[2], f[3]] + 1
except:
eFace[f[0], f[1], f[2], f[3]] = 1
# Face5
f = sorted([n3, n6, n4, n1])
try:
eFace[f[0], f[1], f[2], f[3]] = eFace[f[0], f[1], f[2], f[3]] + 1
except:
eFace[f[0], f[1], f[2], f[3]] = 1
if len(elem.get_nodes()) == 4 or len(elem.get_nodes()) == 10:
n1 = elem.get_nodes()[0].id
n2 = elem.get_nodes()[1].id
n3 = elem.get_nodes()[2].id
n4 = elem.get_nodes()[3].id
# Face1
f = sorted([n1, n2, n3])
try:
eFace[f[0], f[1], f[2]] = eFace[f[0], f[1], f[2]] + 1
except:
eFace[f[0], f[1], f[2]] = 1
# Face2
f = sorted([n1, n4, n2])
try:
eFace[f[0], f[1], f[2]] = eFace[f[0], f[1], f[2]] + 1
except:
eFace[f[0], f[1], f[2]] = 1
# Face3
f = sorted([n2, n4, n3])
try:
eFace[f[0], f[1], f[2]] = eFace[f[0], f[1], f[2]] + 1
except:
eFace[f[0], f[1], f[2]] = 1
# Face4
f = sorted([n3, n4, n1])
try:
eFace[f[0], f[1], f[2]] = eFace[f[0], f[1], f[2]] + 1
except:
eFace[f[0], f[1], f[2]] = 1
tn = 0
for elem in elements:
if len(elem.get_nodes()) == 8 or len(elem.get_nodes()) == 20:
n1 = elem.get_nodes()[0].id
n2 = elem.get_nodes()[1].id
n3 = elem.get_nodes()[2].id
n4 = elem.get_nodes()[3].id
n5 = elem.get_nodes()[4].id
n6 = elem.get_nodes()[5].id
n7 = elem.get_nodes()[6].id
n8 = elem.get_nodes()[7].id
n11 = elem.get_nodes()[0]
n22 = elem.get_nodes()[1]
n33 = elem.get_nodes()[2]
n44 = elem.get_nodes()[3]
n55 = elem.get_nodes()[4]
n66 = elem.get_nodes()[5]
n77 = elem.get_nodes()[6]
n88 = elem.get_nodes()[7]
# Face1
f = sorted([n1, n2, n3, n4])
if eFace[f[0], f[1], f[2], f[3]] == 1:
tmp_tri = Triangle(tn, [n11, n22, n33], [n1, n2, n3])
self.triangles.append(tmp_tri)
tn += 1
tmp_tri = Triangle(tn, [n33, n44, n11], [n3, n4, n1])
self.triangles.append(tmp_tri)
tn += 1
# Face2
f = sorted([n5, n8, n7, n6])
if eFace[f[0], f[1], f[2], f[3]] == 1:
tmp_tri = Triangle(tn, [n55, n88, n77], [n5, n8, n7])
self.triangles.append(tmp_tri)
tn += 1
tmp_tri = Triangle(tn, [n77, n66, n55], [n7, n6, n5])
self.triangles.append(tmp_tri)
tn += 1
# Face3
f = sorted([n1, n5, n6, n2])
if eFace[f[0], f[1], f[2], f[3]] == 1:
tmp_tri = Triangle(tn, [n11, n55, n66], [n1, n5, n6])
self.triangles.append(tmp_tri)
tn += 1
tmp_tri = Triangle(tn, [n66, n22, n11], [n6, n2, n1])
self.triangles.append(tmp_tri)
tn += 1
# Face4
f = sorted([n2, n6, n7, n3])
if eFace[f[0], f[1], f[2], f[3]] == 1:
tmp_tri = Triangle(tn, [n22, n66, n77], [n2, n6, n7])
self.triangles.append(tmp_tri)
tn += 1
tmp_tri = Triangle(tn, [n77, n33, n22], [n7, n3, n2])
self.triangles.append(tmp_tri)
tn += 1
# Face5
f = sorted([n3, n7, n8, n4])
if eFace[f[0], f[1], f[2], f[3]] == 1:
tmp_tri = Triangle(tn, [n33, n77, n88], [n3,n7,n8])
self.triangles.append(tmp_tri)
tn += 1
tmp_tri = Triangle(tn, [n88, n44, n33], [n8,n4,n3])
self.triangles.append(tmp_tri)
tn += 1
# Face6
f = sorted([n4, n8, n5, n1])
if eFace[f[0], f[1], f[2], f[3]] == 1:
tmp_tri = Triangle(tn, [n44, n88, n55], [n4,n8,n5])
self.triangles.append(tmp_tri)
tn += 1
tmp_tri = Triangle(tn, [n55, n11, n44], [n5,n1,n4])
self.triangles.append(tmp_tri)
tn += 1
if len(elem.get_nodes()) == 6 or len(elem.get_nodes()) == 15:
n1 = elem.get_nodes()[0].id
n2 = elem.get_nodes()[1].id
n3 = elem.get_nodes()[2].id
n4 = elem.get_nodes()[3].id
n5 = elem.get_nodes()[4].id
n6 = elem.get_nodes()[5].id
n11 = elem.get_nodes()[0]
n22 = elem.get_nodes()[1]
n33 = elem.get_nodes()[2]
n44 = elem.get_nodes()[3]
n55 = elem.get_nodes()[4]
n66 = elem.get_nodes()[5]
# Face1
f = sorted([n1, n2, n3])
if eFace[f[0], f[1], f[2]] == 1:
tmp_tri = Triangle(tn, [n11, n22, n33], [n1, n2, n3])
self.triangles.append(tmp_tri)
tn += 1
# Face2
f = sorted([n4, n6, n5])
if eFace[f[0], f[1], f[2]] == 1:
tmp_tri = Triangle(tn, [n44, n66, n55], [n4,n6,n5])
self.triangles.append(tmp_tri)
tn += 1
# Face3
f = sorted([n1, n4, n5, n2])
if eFace[f[0], f[1], f[2], f[3]] == 1:
tmp_tri = Triangle(tn, [n11, n44, n55], [n1,n4,n5])
self.triangles.append(tmp_tri)
tn += 1
tmp_tri = Triangle(tn, [n55, n22, n11], [n5,n2,n1])
self.triangles.append(tmp_tri)
tn += 1
# Face4
f = sorted([n2, n5, n6, n3])
if eFace[f[0], f[1], f[2], f[3]] == 1:
tmp_tri = Triangle(tn, [n22, n55, n66], [n2,n5,n6])
self.triangles.append(tmp_tri)
tn += 1
tmp_tri = Triangle(tn, [n66, n33, n22], [n6,n3,n2])
self.triangles.append(tmp_tri)
tn += 1
# Face5
f = sorted([n3, n6, n4, n1])
if eFace[f[0], f[1], f[2], f[3]] == 1:
tmp_tri = Triangle(tn, [n33, n66, n44], [n3,n6,n4])
self.triangles.append(tmp_tri)
tn += 1
tmp_tri = Triangle(tn, [n44, n11, n33], [n4,n1,n3])
self.triangles.append(tmp_tri)
tn += 1
if len(elem.get_nodes()) == 4 or len(elem.get_nodes()) == 10:
n1 = elem.get_nodes()[0].id
n2 = elem.get_nodes()[1].id
n3 = elem.get_nodes()[2].id
n4 = elem.get_nodes()[3].id
n11 = elem.get_nodes()[0]
n22 = elem.get_nodes()[1]
n33 = elem.get_nodes()[2]
n44 = elem.get_nodes()[3]
# Face1
f = sorted([n1, n2, n3])
if eFace[f[0], f[1], f[2]] == 1:
tmp_tri = Triangle(tn, [n11, n22, n33], [n1,n2,n3])
self.triangles.append(tmp_tri)
tn += 1
# Face2
f = sorted([n1, n4, n2])
if eFace[f[0], f[1], f[2]] == 1:
tmp_tri = Triangle(tn, [n11, n44, n22],[n1,n4,n2])
self.triangles.append(tmp_tri)
tn += 1
# Face3
f = sorted([n2, n4, n3])
if eFace[f[0], f[1], f[2]] == 1:
tmp_tri = Triangle(tn, [n22, n44, n33], [n2,n4,n3])
self.triangles.append(tmp_tri)
tn += 1
# Face4
f = sorted([n3, n4, n1])
if eFace[f[0], f[1], f[2]] == 1:
tmp_tri = Triangle(tn, [n33, n44, n11], [n3,n4,n1])
self.triangles.append(tmp_tri)
tn += 1
```
#### File: ToOptix/FEMPy/Material.py
```python
class Elasticity(object):
def __init__(self, young_module, contraction, temperature):
self.__temperature = temperature
self.__contraction = contraction
self.__young_module = young_module
def get_temperature(self):
return self.__temperature
def get_contraction(self):
return self.__contraction
def get_young_module(self):
return self.__young_module
class Conductivity(object):
def __init__(self, conductivity, temperature):
self.__temperature = temperature
self.__conductivity = conductivity
def get_temperature(self):
return self.__temperature
def get_conductivity(self):
return self.__conductivity
class Material(object):
def __init__(self, name):
self.__name = name
self.__elasticity = []
self.__conductivity = []
def add_elasticity(self, young_module=70000, contraction=0.3, temperature=0.0):
self.__elasticity.append(Elasticity(young_module, contraction, temperature))
def add_conductivity(self, conductivity=250, temperature=0.0):
self.__conductivity.append(Conductivity(conductivity, temperature))
def get_name(self):
return self.__name
def __str__(self):
return ('Name: {} Elasticity entrys: {} Conductivity entrys: {} '.format(
self.__name, len(self.__elasticity), len(self.__conductivity)))
def get_elasticity(self):
return self.__elasticity
def get_conductivity(self):
return self.__conductivity
```
#### File: ToOptixCore/ToOptix/TopologyOptimizer.py
```python
from .FEMPy.Element import Element
from .FEMPy.ElementSet import ElementSet
from . import DensityMaterial
from .Filter import ElementFilter
import numpy as np
class TopologyOptimizer(object):
def __init__(self, density, density_material, compaction_ratio=0.3):
self.__system_answer = []
self.__system_sensitivity = []
self.__current_density = np.array(density)
self.__next_density = np.array(density)
self.__density_material = density_material
self.__memory_size = 2
self.__sensitivity_sets = []
self.__density_sets = []
self.__convergence_max = 0.01
self.__max_change = 0.1
self.__compaction_ratio = compaction_ratio
self.__no_design_space = []
def set_no_design_space(self, elements, no_design_space_elements):
print("No design space is active with {} elements".format(len(no_design_space_elements)))
counter = -1
for element_key in elements:
counter += 1
if element_key in no_design_space_elements:
self.__no_design_space.append(counter)
def set_maximum_density_change(self, max_change):
self.__max_change = max_change
def get_current_density(self):
return self.__current_density
def set_compaction_ratio(self, compaction_ratio):
self.__compaction_ratio = compaction_ratio
def get_element_sets_by_density(self, elements):
element_sets = []
for i in range(self.__density_material.get_steps()):
element_sets.append([])
counter = 0
for key in elements:
elset_number = (self.__density_material.get_steps() - 1) * self.__current_density[counter]
elements[key].set_density(self.__current_density[counter])
element_sets[int(elset_number)].append(elements[key])
counter += 1
element_sets_ob = []
counter = 1
for element_id_set in element_sets:
eset = ElementSet("topoElementSet" + str(counter), element_id_set)
element_sets_ob.append(eset)
counter += 1
return element_sets_ob
def filter_sensitivity(self, element_filter, sensitivity):
return element_filter.filter_element_properties(sensitivity * self.__current_density)
def filter_density(self, element_filter):
self.__current_density = element_filter.filter_element_properties(self.__current_density)
def change_density(self, sensitivity):
sensitivity = np.array(self.__current_density)**(self.__density_material.get_penalty_exponent() - 1) * np.array(sensitivity)
if min(sensitivity) <= 0:
sensitivity += abs(np.min(sensitivity))+ 0.1
self.__sensitivity_sets.append(sensitivity)
self.__density_sets.append(self.__current_density)
if len(self.__sensitivity_sets) > self.__memory_size:
self.__sensitivity_sets.pop(0)
self.__density_sets.pop(0)
weight = 0
for sensitivity_in_memory in self.__sensitivity_sets:
if weight == 0:
sensitivity = sensitivity_in_memory
else:
sensitivity += sensitivity_in_memory
weight += 1
"""
weight = 0
for density_in_memory in self.__density_sets:
if weight == 0:
self.__current_density = density_in_memory * np.exp(weight)
else:
self.__current_density += density_in_memory * np.exp(weight)
weight += 1
self.__current_density = self.__current_density * 1.0 / sum_weight
"""
print("length sens sets memory: ", len(self.__sensitivity_sets))
l_upper = max(sensitivity)
l_lower = min(sensitivity)
# Method of moving asympthotes
while(abs(l_upper - l_lower) > (l_upper * self.__convergence_max)):
l_mid = 0.5 * (l_lower + l_upper)
# Values between 0 and 1
# SIMP method
self.__next_density = np.maximum(0.0,
np.maximum(self.__current_density - self.__max_change,
np.minimum(1.0,
np.minimum(self.__current_density + self.__max_change,
self.__current_density * (sensitivity / l_mid) ** 0.5))))
# BESO-Method
#new_design_variable = np.maximum(0.00001, np.sign(sensitivity - l_mid))
for id in self.__no_design_space:
self.__next_density[id] = 1.0
if np.mean(self.__next_density) - self.__compaction_ratio > 0.0:
l_lower = l_mid
else:
l_upper = l_mid
print("##---- MEAN DENSTIY: " + str(np.mean(self.__next_density)))
self.__current_density = self.__next_density
``` |
{
"source": "joha73/plugin.video.mediathekview",
"score": 2
} |
#### File: resources/lib/plugin.py
```python
from __future__ import unicode_literals # ,absolute_import, division
# from future import standard_library
# from builtins import *
# standard_library.install_aliases()
import os
import time
import datetime
# pylint: disable=import-error
import xbmcgui
import xbmcplugin
from resources.lib.kodi.kodiaddon import KodiPlugin
from resources.lib.store import Store
from resources.lib.notifier import Notifier
from resources.lib.settings import Settings
from resources.lib.filmui import FilmUI
from resources.lib.channelui import ChannelUI
from resources.lib.initialui import InitialUI
from resources.lib.showui import ShowUI
from resources.lib.downloader import Downloader
from resources.lib.searches import RecentSearches
# -- Classes ------------------------------------------------
class MediathekViewPlugin(KodiPlugin):
""" The main plugin class """
def __init__(self):
super(MediathekViewPlugin, self).__init__()
self.settings = Settings()
self.notifier = Notifier()
self.database = Store(
self.get_new_logger('Store'),
self.notifier,
self.settings
)
def show_main_menu(self):
""" Creates the main menu of the plugin """
# Search
self.add_folder_item(
30901,
{'mode': "search", 'extendedsearch': False},
icon=os.path.join(self.path, 'resources', 'icons', 'search-m.png')
)
# Search all
self.add_folder_item(
30902,
{'mode': "search", 'extendedsearch': True},
icon=os.path.join(self.path, 'resources', 'icons', 'search-m.png')
)
# Browse livestreams
self.add_folder_item(
30903,
{'mode': "livestreams"},
icon=os.path.join(self.path, 'resources', 'icons', 'live2-m.png')
)
# Browse recently added
self.add_folder_item(
30904,
{'mode': "recent", 'channel': 0},
icon=os.path.join(self.path, 'resources', 'icons', 'new-m.png')
)
# Browse recently added by channel
self.add_folder_item(
30905,
{'mode': "recentchannels"},
icon=os.path.join(self.path, 'resources', 'icons', 'new-m.png')
)
# Browse by Initial->Show
self.add_folder_item(
30906,
{'mode': "initial", 'channel': 0},
icon=os.path.join(self.path, 'resources', 'icons', 'movie-m.png')
)
# Browse by Channel->Initial->Shows
self.add_folder_item(
30907,
{'mode': "channels"},
icon=os.path.join(self.path, 'resources', 'icons', 'movie-m.png')
)
# Database Information
self.add_action_item(
30908,
{'mode': "action-dbinfo"},
icon=os.path.join(self.path, 'resources', 'icons', 'dbinfo-m.png')
)
# Manual database update
if self.settings.updmode == 1 or self.settings.updmode == 2:
self.add_action_item(30909, {'mode': "action-dbupdate"})
self.end_of_directory()
self._check_outdate()
def show_searches(self, extendedsearch=False):
"""
Fill the search screen with "New Search..." and the
list of recent searches
Args:
extendedsearch(bool, optionsl): If `True`, the searches
are performed both in show title and description.
Default is `False`
"""
self.add_folder_item(
30931,
{'mode': "newsearch", 'extendedsearch': extendedsearch},
icon=os.path.join(self.path, 'resources', 'icons', 'search-m.png')
)
RecentSearches(self, extendedsearch).load().populate()
self.end_of_directory()
def new_search(self, extendedsearch=False):
"""
Asks the user to enter his search terms and then
performs the search and displays the results.
Args:
extendedsearch(bool, optionsl): If `True`, the searches
are performed both in show title and description.
Default is `False`
"""
settingid = 'lastsearch2' if extendedsearch is True else 'lastsearch1'
headingid = 30902 if extendedsearch is True else 30901
# are we returning from playback ?
search = self.get_setting(settingid)
if search:
# restore previous search
self.database.search(search, FilmUI(self), extendedsearch)
else:
# enter search term
(search, confirmed) = self.notifier.get_entered_text('', headingid)
if len(search) > 2 and confirmed is True:
RecentSearches(self, extendedsearch).load().add(search).save()
if self.database.search(search, FilmUI(self), extendedsearch) > 0:
self.set_setting(settingid, search)
else:
# pylint: disable=line-too-long
self.info(
'The following ERROR can be ignored. It is caused by the architecture of the Kodi Plugin Engine')
self.end_of_directory(False, cache_to_disc=True)
def show_db_info(self):
""" Displays current information about the database """
info = self.database.get_status()
heading = self.language(30907)
infostr = self.language({
'NONE': 30941,
'UNINIT': 30942,
'IDLE': 30943,
'UPDATING': 30944,
'ABORTED': 30945
}.get(info['status'], 30941))
infostr = self.language(30965) % infostr
totinfo = self.language(30971) % (
info['tot_chn'],
info['tot_shw'],
info['tot_mov']
)
updatetype = self.language(30972 if info['fullupdate'] > 0 else 30973)
if info['status'] == 'UPDATING' and info['filmupdate'] > 0:
updinfo = self.language(30967) % (
updatetype,
datetime.datetime.fromtimestamp(
info['filmupdate']
).strftime('%Y-%m-%d %H:%M:%S'),
info['add_chn'],
info['add_shw'],
info['add_mov']
)
elif info['status'] == 'UPDATING':
updinfo = self.language(30968) % (
updatetype,
info['add_chn'],
info['add_shw'],
info['add_mov']
)
elif info['lastupdate'] > 0 and info['filmupdate'] > 0:
updinfo = self.language(30969) % (
updatetype,
datetime.datetime.fromtimestamp(
info['lastupdate']
).strftime('%Y-%m-%d %H:%M:%S'),
datetime.datetime.fromtimestamp(
info['filmupdate']
).strftime('%Y-%m-%d %H:%M:%S'),
info['add_chn'],
info['add_shw'],
info['add_mov'],
info['del_chn'],
info['del_shw'],
info['del_mov']
)
elif info['lastupdate'] > 0:
updinfo = self.language(30970) % (
updatetype,
datetime.datetime.fromtimestamp(
info['lastupdate']
).strftime('%Y-%m-%d %H:%M:%S'),
info['add_chn'],
info['add_shw'],
info['add_mov'],
info['del_chn'],
info['del_shw'],
info['del_mov']
)
else:
updinfo = self.language(30966)
xbmcgui.Dialog().textviewer(
heading,
infostr + '\n\n' +
totinfo + '\n\n' +
updinfo
)
def _check_outdate(self, maxage=172800):
if self.settings.updmode != 1 and self.settings.updmode != 2:
# no check with update disabled or update automatic
return
if self.database is None:
# should never happen
self.notifier.show_outdated_unknown()
return
status = self.database.get_status()
if status['status'] == 'NONE' or status['status'] == 'UNINIT':
# should never happen
self.notifier.show_outdated_unknown()
return
elif status['status'] == 'UPDATING':
# great... we are updating. nuthin to show
return
# lets check how old we are
tsnow = int(time.time())
tsold = int(status['lastupdate'])
if tsnow - tsold > maxage:
self.notifier.show_outdated_known(status)
def init(self):
""" Initialisation of the plugin """
if self.database.init():
if self.settings.handle_first_run():
pass
self.settings.handle_update_on_start()
def run(self):
""" Execution of the plugin """
# save last activity timestamp
self.settings.reset_user_activity()
# process operation
self.info("Plugin invoked with parameters {}", self.args)
mode = self.get_arg('mode', None)
if mode is None:
self.show_main_menu()
elif mode == 'search':
extendedsearch = self.get_arg('extendedsearch', 'False') == 'True'
self.show_searches(extendedsearch)
elif mode == 'newsearch':
self.new_search(self.get_arg('extendedsearch', 'False') == 'True')
elif mode == 'research':
search = self.get_arg('search', '')
extendedsearch = self.get_arg('extendedsearch', 'False') == 'True'
self.database.search(search, FilmUI(self), extendedsearch)
RecentSearches(self, extendedsearch).load().add(search).save()
elif mode == 'delsearch':
search = self.get_arg('search', '')
extendedsearch = self.get_arg('extendedsearch', 'False') == 'True'
RecentSearches(self, extendedsearch).load().delete(
search).save().populate()
self.run_builtin('Container.Refresh')
elif mode == 'livestreams':
self.database.get_live_streams(
FilmUI(self, [xbmcplugin.SORT_METHOD_LABEL]))
elif mode == 'recent':
channel = self.get_arg('channel', 0)
self.database.get_recents(channel, FilmUI(self))
elif mode == 'recentchannels':
self.database.get_recent_channels(
ChannelUI(self, nextdir='recent'))
elif mode == 'channels':
self.database.get_channels(ChannelUI(self, nextdir='shows'))
elif mode == 'action-dbinfo':
self.show_db_info()
elif mode == 'action-dbupdate':
self.settings.trigger_update()
self.notifier.show_notification(30963, 30964, time=10000)
elif mode == 'initial':
channel = self.get_arg('channel', 0)
self.database.get_initials(channel, InitialUI(self))
elif mode == 'shows':
channel = self.get_arg('channel', 0)
initial = self.get_arg('initial', None)
self.database.get_shows(channel, initial, ShowUI(self))
elif mode == 'films':
show = self.get_arg('show', 0)
self.database.get_films(show, FilmUI(self))
elif mode == 'downloadmv':
filmid = self.get_arg('id', 0)
quality = self.get_arg('quality', 1)
Downloader(self).download_movie(filmid, quality)
elif mode == 'downloadep':
filmid = self.get_arg('id', 0)
quality = self.get_arg('quality', 1)
Downloader(self).download_episode(filmid, quality)
elif mode == 'playwithsrt':
filmid = self.get_arg('id', 0)
Downloader(self).play_movie_with_subs(filmid)
# cleanup saved searches
if mode is None or mode != 'newsearch':
self.set_setting('lastsearch1', '')
self.set_setting('lastsearch2', '')
def exit(self):
""" Shutdown of the application """
self.database.exit()
```
#### File: resources/lib/updater.py
```python
import os
import time
import datetime
import subprocess
# pylint: disable=import-error
try:
# Python 3.x
from urllib.error import URLError
except ImportError:
# Python 2.x
from urllib2 import URLError
from contextlib import closing
import ijson
import resources.lib.mvutils as mvutils
# from resources.lib.utils import *
from resources.lib.store import Store
from resources.lib.exceptions import DatabaseCorrupted
from resources.lib.exceptions import DatabaseLost
from resources.lib.exceptions import ExitRequested
# -- Unpacker support ---------------------------------------
UPD_CAN_BZ2 = False
UPD_CAN_GZ = False
try:
import bz2
UPD_CAN_BZ2 = True
except ImportError:
pass
try:
import gzip
UPD_CAN_GZ = True
except ImportError:
pass
# -- Constants ----------------------------------------------
FILMLISTE_URL = 'https://liste.mediathekview.de/'
FILMLISTE_AKT = 'Filmliste-akt'
FILMLISTE_DIF = 'Filmliste-diff'
# -- Classes ------------------------------------------------
# pylint: disable=bad-whitespace
class MediathekViewUpdater(object):
""" The database updator class """
def __init__(self, logger, notifier, settings, monitor=None):
self.logger = logger
self.notifier = notifier
self.settings = settings
self.monitor = monitor
self.database = None
self.use_xz = mvutils.find_xz() is not None
self.cycle = 0
self.add_chn = 0
self.add_shw = 0
self.add_mov = 0
self.del_chn = 0
self.del_shw = 0
self.del_mov = 0
self.tot_chn = 0
self.tot_shw = 0
self.tot_mov = 0
self.index = 0
self.count = 0
self.film = {}
def init(self, convert=False):
""" Initializes the updater """
if self.database is not None:
self.exit()
self.database = Store(self.logger, self.notifier, self.settings)
self.database.init(convert=convert)
def exit(self):
""" Resets the updater """
if self.database is not None:
self.database.exit()
del self.database
self.database = None
def reload(self):
""" Reloads the updater """
self.exit()
self.init()
def is_enabled(self):
""" Returns if the updater is enabled """
return self.settings.updenabled
def get_current_update_operation(self, force=False, full=False):
"""
Determines which update operation should be done. Returns
one of these values:
0 - no update operation pending
1 - full update
2 - differential update
Args:
force(bool, optional): if `True` a full update
is always returned. Default is `False`
full(book, optional): if `True` a full update
is always returned. Default is `False`
"""
if self.database is None:
# db not available - no update
self.logger.info('Update disabled since database not available')
return 0
elif self.settings.updmode == 0:
# update disabled - no update
return 0
elif self.settings.updmode == 1 or self.settings.updmode == 2:
# manual update or update on first start
if self.settings.is_update_triggered() is True:
return self._get_next_update_operation(True, False)
else:
# no update on all subsequent calls
return 0
elif self.settings.updmode == 3:
# automatic update
if self.settings.is_user_alive():
return self._get_next_update_operation(force, full)
else:
# no update if user is idle for more than 2 hours
return 0
elif self.settings.updmode == 4:
# continous update
return self._get_next_update_operation(force, full)
def _get_next_update_operation(self, force=False, full=False):
status = self.database.get_status()
tsnow = int(time.time())
tsold = status['lastupdate']
dtnow = datetime.datetime.fromtimestamp(tsnow).date()
dtold = datetime.datetime.fromtimestamp(tsold).date()
if status['status'] == 'UNINIT':
# database not initialized - no update
self.logger.debug('database not initialized')
return 0
elif status['status'] == "UPDATING" and tsnow - tsold > 10800:
# process was probably killed during update - no update
self.logger.info(
'Stuck update pretending to run since epoch {} reset', tsold)
self.database.update_status('ABORTED')
return 0
elif status['status'] == "UPDATING":
# already updating - no update
self.logger.debug('Already updating')
return 0
elif not full and not force and tsnow - tsold < self.settings.updinterval:
# last update less than the configured update interval - no update
self.logger.debug(
'Last update less than the configured update interval. do nothing')
return 0
elif dtnow != dtold:
# last update was not today. do full update once a day
self.logger.debug(
'Last update was not today. do full update once a day')
return 1
elif status['status'] == "ABORTED" and status['fullupdate'] == 1:
# last full update was aborted - full update needed
self.logger.debug(
'Last full update was aborted - full update needed')
return 1
elif full is True:
# full update requested
self.logger.info('Full update requested')
return 1
else:
# do differential update
self.logger.debug('Do differential update')
return 2
def update(self, full):
"""
Downloads the database update file and
then performs a database update
Args:
full(bool): Perform full update if `True`
"""
if self.database is None:
return
elif self.database.supports_native_update(full):
if self.get_newest_list(full):
if self.database.native_update(full):
self.cycle += 1
self.delete_list(full)
elif self.database.supports_update():
if self.get_newest_list(full):
if self.import_database(full):
self.cycle += 1
self.delete_list(full)
def import_database(self, full):
"""
Performs a database update when a
downloaded update file is available
Args:
full(bool): Perform full update if `True`
"""
(_, _, destfile, avgrecsize) = self._get_update_info(full)
if not mvutils.file_exists(destfile):
self.logger.error('File {} does not exists', destfile)
return False
# estimate number of records in update file
records = int(mvutils.file_size(destfile) / avgrecsize)
if not self.database.ft_init():
self.logger.warn(
'Failed to initialize update. Maybe a concurrency problem?')
return False
# pylint: disable=broad-except
try:
starttime = time.time()
self.logger.info(
'Starting import of approx. {} records from {}', records, destfile)
with closing(open(destfile, 'r')) as updatefile:
parser = ijson.parse(updatefile)
flsm = 0
flts = 0
(self.tot_chn, self.tot_shw, self.tot_mov) = self._update_start(full)
self.notifier.show_update_progress()
for prefix, event, value in parser:
if (prefix, event) == ("X", "start_array"):
self._init_record()
elif (prefix, event) == ("X", "end_array"):
self._end_record(records)
if self.count % 100 == 0 and self.monitor.abort_requested():
# kodi is shutting down. Close all
self._update_end(full, 'ABORTED')
self.notifier.close_update_progress()
return True
elif (prefix, event) == ("X.item", "string"):
if value is not None:
# self._add_value( value.strip().encode('utf-8') )
self._add_value(value.strip())
else:
self._add_value("")
elif (prefix, event) == ("Filmliste", "start_array"):
flsm += 1
elif (prefix, event) == ("Filmliste.item", "string"):
flsm += 1
if flsm == 2 and value is not None:
# this is the timestmap of this database update
try:
fldt = datetime.datetime.strptime(
value.strip(), "%d.%m.%Y, %H:%M")
flts = int(time.mktime(fldt.timetuple()))
self.database.update_status(filmupdate=flts)
self.logger.info(
'Filmliste dated {}', value.strip())
except TypeError:
# pylint: disable=line-too-long
# SEE: https://forum.kodi.tv/showthread.php?tid=112916&pid=1214507#pid1214507
# Wonderful. His name is also Leopold
try:
flts = int(time.mktime(time.strptime(
value.strip(), "%d.%m.%Y, %H:%M")))
self.database.update_status(
filmupdate=flts)
self.logger.info(
'Filmliste dated {}', value.strip())
# pylint: disable=broad-except
except Exception as err:
# If the universe hates us...
self.logger.debug(
'Could not determine date "{}" of filmliste: {}', value.strip(), err)
except ValueError as err:
pass
self._update_end(full, 'IDLE')
self.logger.info(
'Import of {} in update cycle {} finished. Duration: {} seconds',
destfile,
self.cycle,
int(time.time() - starttime)
)
self.notifier.close_update_progress()
return True
except KeyboardInterrupt:
self._update_end(full, 'ABORTED')
self.logger.info('Update cycle {} interrupted by user', self.cycle)
self.notifier.close_update_progress()
return False
except DatabaseCorrupted as err:
self.logger.error('{} on update cycle {}', err, self.cycle)
self.notifier.close_update_progress()
except DatabaseLost as err:
self.logger.error('{} on update cycle {}', err, self.cycle)
self.notifier.close_update_progress()
except Exception as err:
self.logger.error(
'Error {} while processing {} on update cycle {}', err, destfile, self.cycle)
self._update_end(full, 'ABORTED')
self.notifier.close_update_progress()
return False
def get_newest_list(self, full):
"""
Downloads the database update file
Args:
full(bool): Downloads the full list if `True`
"""
(url, compfile, destfile, _) = self._get_update_info(full)
if url is None:
self.logger.error(
'No suitable archive extractor available for this system')
self.notifier.show_missing_extractor_error()
return False
# cleanup downloads
self.logger.info('Cleaning up old downloads...')
mvutils.file_remove(compfile)
mvutils.file_remove(destfile)
# download filmliste
self.notifier.show_download_progress()
# pylint: disable=broad-except
try:
self.logger.info('Trying to download {} from {}...',
os.path.basename(compfile), url)
self.notifier.update_download_progress(0, url)
mvutils.url_retrieve(
url,
filename=compfile,
reporthook=self.notifier.hook_download_progress,
aborthook=self.monitor.abort_requested
)
except URLError as err:
self.logger.error('Failure downloading {} - {}', url, err)
self.notifier.close_download_progress()
self.notifier.show_download_error(url, err)
return False
except ExitRequested as err:
self.logger.error(
'Immediate exit requested. Aborting download of {}', url)
self.notifier.close_download_progress()
self.notifier.show_download_error(url, err)
return False
except Exception as err:
self.logger.error('Failure writng {}', url)
self.notifier.close_download_progress()
self.notifier.show_download_error(url, err)
return False
# decompress filmliste
if self.use_xz is True:
self.logger.info('Trying to decompress xz file...')
retval = subprocess.call([mvutils.find_xz(), '-d', compfile])
self.logger.info('Return {}', retval)
elif UPD_CAN_BZ2 is True:
self.logger.info('Trying to decompress bz2 file...')
retval = self._decompress_bz2(compfile, destfile)
self.logger.info('Return {}', retval)
elif UPD_CAN_GZ is True:
self.logger.info('Trying to decompress gz file...')
retval = self._decompress_gz(compfile, destfile)
self.logger.info('Return {}', retval)
else:
# should never reach
pass
self.notifier.close_download_progress()
return retval == 0 and mvutils.file_exists(destfile)
def delete_list(self, full):
"""
Deletes locally stored database update files
Args:
full(bool): Deletes the full lists if `True`
"""
(_, compfile, destfile, _) = self._get_update_info(full)
self.logger.info('Cleaning up downloads...')
mvutils.file_remove(compfile)
mvutils.file_remove(destfile)
def _get_update_info(self, full):
if self.use_xz is True:
ext = '.xz'
elif UPD_CAN_BZ2 is True:
ext = '.bz2'
elif UPD_CAN_GZ is True:
ext = '.gz'
else:
return (None, None, None, 0, )
info = self.database.get_native_info(full)
if info is not None:
return (
self._get_update_url(info[0]),
os.path.join(self.settings.datapath, info[1] + ext),
os.path.join(self.settings.datapath, info[1]),
500
)
if full:
return (
FILMLISTE_URL + FILMLISTE_AKT + ext,
os.path.join(self.settings.datapath, FILMLISTE_AKT + ext),
os.path.join(self.settings.datapath, FILMLISTE_AKT),
600,
)
else:
return (
FILMLISTE_URL + FILMLISTE_DIF + ext,
os.path.join(self.settings.datapath, FILMLISTE_DIF + ext),
os.path.join(self.settings.datapath, FILMLISTE_DIF),
700,
)
def _get_update_url(self, url):
if self.use_xz is True:
return url
elif UPD_CAN_BZ2 is True:
return os.path.splitext(url)[0] + '.bz2'
elif UPD_CAN_GZ is True:
return os.path.splitext(url)[0] + '.gz'
else:
# should never happen since it will not be called
return None
def _update_start(self, full):
self.logger.info('Initializing update...')
self.add_chn = 0
self.add_shw = 0
self.add_mov = 0
self.del_chn = 0
self.del_shw = 0
self.del_mov = 0
self.index = 0
self.count = 0
self.film = {
"channel": "",
"show": "",
"title": "",
"aired": "1980-01-01 00:00:00",
"duration": "00:00:00",
"size": 0,
"description": "",
"website": "",
"url_sub": "",
"url_video": "",
"url_video_sd": "",
"url_video_hd": "",
"airedepoch": 0,
"geo": ""
}
return self.database.ft_update_start(full)
def _update_end(self, full, status):
self.logger.info('Added: channels:%d, shows:%d, movies:%d ...' % (
self.add_chn, self.add_shw, self.add_mov))
(self.del_chn, self.del_shw, self.del_mov, self.tot_chn, self.tot_shw,
self.tot_mov) = self.database.ft_update_end(full and status == 'IDLE')
self.logger.info('Deleted: channels:%d, shows:%d, movies:%d' %
(self.del_chn, self.del_shw, self.del_mov))
self.logger.info('Total: channels:%d, shows:%d, movies:%d' %
(self.tot_chn, self.tot_shw, self.tot_mov))
self.database.update_status(
status,
int(time.time()) if status != 'ABORTED' else None,
None,
1 if full else 0,
self.add_chn, self.add_shw, self.add_mov,
self.del_chn, self.del_shw, self.del_mov,
self.tot_chn, self.tot_shw, self.tot_mov
)
def _init_record(self):
self.index = 0
self.film["title"] = ""
self.film["aired"] = "1980-01-01 00:00:00"
self.film["duration"] = "00:00:00"
self.film["size"] = 0
self.film["description"] = ""
self.film["website"] = ""
self.film["url_sub"] = ""
self.film["url_video"] = ""
self.film["url_video_sd"] = ""
self.film["url_video_hd"] = ""
self.film["airedepoch"] = 0
self.film["geo"] = ""
def _end_record(self, records):
if self.count % 1000 == 0:
# pylint: disable=line-too-long
percent = int(self.count * 100 / records)
self.logger.info('In progress (%d%%): channels:%d, shows:%d, movies:%d ...' % (
percent, self.add_chn, self.add_shw, self.add_mov))
self.notifier.update_update_progress(
percent if percent <= 100 else 100, self.count, self.add_chn, self.add_shw, self.add_mov)
self.database.update_status(
add_chn=self.add_chn,
add_shw=self.add_shw,
add_mov=self.add_mov,
tot_chn=self.tot_chn + self.add_chn,
tot_shw=self.tot_shw + self.add_shw,
tot_mov=self.tot_mov + self.add_mov
)
self.count = self.count + 1
(_, cnt_chn, cnt_shw, cnt_mov) = self.database.ft_insert_film(
self.film,
True
)
else:
self.count = self.count + 1
(_, cnt_chn, cnt_shw, cnt_mov) = self.database.ft_insert_film(
self.film,
False
)
self.add_chn += cnt_chn
self.add_shw += cnt_shw
self.add_mov += cnt_mov
def _add_value(self, val):
if self.index == 0:
if val != "":
self.film["channel"] = val
elif self.index == 1:
if val != "":
self.film["show"] = val[:255]
elif self.index == 2:
self.film["title"] = val[:255]
elif self.index == 3:
if len(val) == 10:
self.film["aired"] = val[6:] + '-' + val[3:5] + '-' + val[:2]
elif self.index == 4:
if (self.film["aired"] != "1980-01-01 00:00:00") and (len(val) == 8):
self.film["aired"] = self.film["aired"] + " " + val
elif self.index == 5:
if len(val) == 8:
self.film["duration"] = val
elif self.index == 6:
if val != "":
self.film["size"] = int(val)
elif self.index == 7:
self.film["description"] = val
elif self.index == 8:
self.film["url_video"] = val
elif self.index == 9:
self.film["website"] = val
elif self.index == 10:
self.film["url_sub"] = val
elif self.index == 12:
self.film["url_video_sd"] = self._make_url(val)
elif self.index == 14:
self.film["url_video_hd"] = self._make_url(val)
elif self.index == 16:
if val != "":
self.film["airedepoch"] = int(val)
elif self.index == 18:
self.film["geo"] = val
self.index = self.index + 1
def _make_url(self, val):
parts = val.split('|')
if len(parts) == 2:
cnt = int(parts[0])
return self.film["url_video"][:cnt] + parts[1]
else:
return val
def _decompress_bz2(self, sourcefile, destfile):
blocksize = 8192
try:
with open(destfile, 'wb') as dstfile, open(sourcefile, 'rb') as srcfile:
decompressor = bz2.BZ2Decompressor()
for data in iter(lambda: srcfile.read(blocksize), b''):
dstfile.write(decompressor.decompress(data))
# pylint: disable=broad-except
except Exception as err:
self.logger.error('bz2 decompression failed: {}'.format(err))
return -1
return 0
def _decompress_gz(self, sourcefile, destfile):
blocksize = 8192
# pylint: disable=broad-except
try:
with open(destfile, 'wb') as dstfile, gzip.open(sourcefile) as srcfile:
for data in iter(lambda: srcfile.read(blocksize), b''):
dstfile.write(data)
except Exception as err:
self.logger.error(
'gz decompression of "{}" to "{}" failed: {}', sourcefile, destfile, err)
if mvutils.find_gzip() is not None:
gzip_binary = mvutils.find_gzip()
self.logger.info(
'Trying to decompress gzip file "{}" using {}...', sourcefile, gzip_binary)
try:
mvutils.file_remove(destfile)
retval = subprocess.call([gzip_binary, '-d', sourcefile])
self.logger.info('Calling {} -d {} returned {}',
gzip_binary, sourcefile, retval)
return retval
except Exception as err:
self.logger.error(
'gz commandline decompression of "{}" to "{}" failed: {}',
sourcefile, destfile, err)
return -1
return 0
# pylint: disable=pointless-string-statement
"""
def _decompress_gz(self, sourcefile, destfile):
blocksize = 8192
# pylint: disable=broad-except,line-too-long
try:
srcfile = gzip.open(sourcefile)
except Exception as err:
self.logger.error('gz decompression of "{}" to "{}" failed on opening gz file: {}'.format(
sourcefile, destfile, err))
return -1
try:
dstfile = open(destfile, 'wb')
except Exception as err:
self.logger.error('gz decompression of "{}" to "{}" failed on opening destination file: {}'.format(
sourcefile, destfile, err))
return -1
try:
for data in iter(lambda: srcfile.read(blocksize), b''):
try:
dstfile.write(data)
except Exception as err:
self.logger.error('gz decompression of "{}" to "{}" failed on writing destination file: {}'.format(
sourcefile, destfile, err))
return -1
except Exception as err:
self.logger.error('gz decompression of "{}" to "{}" failed on reading gz file: {}'.format(
sourcefile, destfile, err))
return -1
return 0
"""
``` |
{
"source": "joha7866/wavelength-warrior-suite",
"score": 3
} |
#### File: wavelength-warrior-suite/modules/motor_smbus.py
```python
import time
import math
import board
import busio
import adafruit_tca9548a
from adafruit_bus_device.i2c_device import I2CDevice
MOTOR_CTRL_ADDR = 0x69
#motor info
FORWARD_CMD = bytearray([ord('F')])
BACKWARD_CMD = bytearray([ord('B')])
ROT_L_CMD = bytearray([ord(ch) for ch in 'RL'])
ROT_R_CMD = bytearray([ord(ch) for ch in 'RR'])
ROT_FL_CMD = bytearray([ord(ch) for ch in 'RFL'])
ROT_FR_CMD = bytearray([ord(ch) for ch in 'RFR'])
ROT_BL_CMD = bytearray([ord(ch) for ch in 'RBL'])
ROT_BR_CMD = bytearray([ord(ch) for ch in 'RBR'])
DIAG_FL_CMD = bytearray([ord(ch) for ch in 'DFL'])
DIAG_FR_CMD = bytearray([ord(ch) for ch in 'DFR'])
DIAG_BL_CMD = bytearray([ord(ch) for ch in 'DBL'])
DIAG_BR_CMD = bytearray([ord(ch) for ch in 'DBR'])
TRAN_L_CMD = bytearray([ord(ch) for ch in 'TL'])
TRAN_R_CMD = bytearray([ord(ch) for ch in 'TR'])
STOP_CMD = bytearray([ord('S')])
ERROR_CMD = bytearray([ord('E')])
POLL_CMD = bytearray([ord('.')])
LEFT_90_DIR = math.pi/2
RIGHT_90_DIR = -math.pi/2
LEFT_180_DIR = math.pi
RIGHT_180_DIR = -math.pi
# ROT_90_DELAY = 1.33
ROT_90_DELAY = 1.45
class MotorController(object):
def __init__(self, bus):
self.motor = I2CDevice(bus, MOTOR_CTRL_ADDR, probe=False)
self.active_cmd = 'x'
def send_cmd(self, cmd):
read_buff = bytearray(16)
with self.motor:
self.motor.write_then_readinto(cmd, read_buff)
self.active_cmd = read_buff[0]
if self.active_cmd == cmd[0]:
return 0
else:
return self.active_cmd
if __name__ == "__main__":
read_buff = bytearray(16)
with busio.I2C(board.SCL, board.SDA) as bus:
mux = adafruit_tca9548a.TCA9548A(bus)
motor = I2CDevice(mux[0], MOTOR_CTRL_ADDR, probe=False)
try:
while 1:
cmd = input('cmd>>')
cmd = bytearray([ord(ch) for ch in cmd])
print(f'Txing: "{cmd}"')
with motor:
motor.write_then_readinto(bytearray(cmd), read_buff)
print(f'Rxed: "{read_buff}"')
except KeyboardInterrupt:
with motor:
motor.write_then_readinto(STOP_CMD, read_buff)
print('exited gracefully')
``` |
{
"source": "johadahl/indy-node",
"score": 2
} |
#### File: test/state_proof/test_state_proofs_for_get_requests.py
```python
import base64
import time
import base58
import pytest
from common.serializers import serialization
from common.serializers.serialization import state_roots_serializer
from crypto.bls.bls_multi_signature import MultiSignature, MultiSignatureValue
from plenum.bls.bls_store import BlsStore
from plenum.common.constants import TXN_TYPE, TARGET_NYM, RAW, DATA, ORIGIN, \
IDENTIFIER, NAME, VERSION, ROLE, VERKEY, KeyValueStorageType, \
STATE_PROOF, ROOT_HASH, MULTI_SIGNATURE, PROOF_NODES, TXN_TIME, CURRENT_PROTOCOL_VERSION, DOMAIN_LEDGER_ID
from plenum.common.types import f
from indy_common.constants import \
ATTRIB, REF, SIGNATURE_TYPE, CLAIM_DEF, SCHEMA
from indy_common.types import Request
from indy_node.persistence.attribute_store import AttributeStore
from indy_node.persistence.idr_cache import IdrCache
from indy_node.server.domain_req_handler import DomainReqHandler
from plenum.common.util import get_utc_epoch
from state.pruning_state import PruningState
from storage.kv_in_memory import KeyValueStorageInMemory
from indy_common.state import domain
@pytest.fixture()
def bls_store():
return BlsStore(key_value_type=KeyValueStorageType.Memory,
data_location=None,
key_value_storage_name="BlsInMemoryStore",
serializer=serialization.multi_sig_store_serializer)
@pytest.fixture()
def request_handler(bls_store):
state = PruningState(KeyValueStorageInMemory())
cache = IdrCache('Cache', KeyValueStorageInMemory())
attr_store = AttributeStore(KeyValueStorageInMemory())
return DomainReqHandler(ledger=None,
state=state,
config=None,
requestProcessor=None,
idrCache=cache,
attributeStore=attr_store,
bls_store=bls_store,
tsRevoc_store=None)
def extract_proof(result, expected_multi_sig):
proof = result[STATE_PROOF]
assert proof
assert proof[ROOT_HASH]
assert proof[PROOF_NODES]
multi_sign = proof[MULTI_SIGNATURE]
assert multi_sign
assert multi_sign == expected_multi_sig
return proof
def save_multi_sig(request_handler):
multi_sig_value = MultiSignatureValue(ledger_id=DOMAIN_LEDGER_ID,
state_root_hash=state_roots_serializer.serialize(
bytes(request_handler.state.committedHeadHash)),
txn_root_hash='2' * 32,
pool_state_root_hash='1' * 32,
timestamp=get_utc_epoch())
multi_sig = MultiSignature('0' * 32, ['Alpha', 'Beta', 'Gamma'], multi_sig_value)
request_handler.bls_store.put(multi_sig)
return multi_sig.as_dict()
def is_proof_verified(request_handler,
proof, path,
value, seq_no, txn_time, ):
encoded_value = domain.encode_state_value(value, seq_no, txn_time)
proof_nodes = base64.b64decode(proof[PROOF_NODES])
root_hash = base58.b58decode(proof[ROOT_HASH])
verified = request_handler.state.verify_state_proof(
root_hash,
path,
encoded_value,
proof_nodes,
serialized=True
)
return verified
def test_state_proofs_for_get_attr(request_handler):
# Adding attribute
nym = 'Gw6pDLhcBcoQesN72qfotTgFa7cbuqZpkX3Xo6pLhPhv'
attr_key = 'last_name'
raw_attribute = '{"last_name":"Anderson"}'
seq_no = 0
txn_time = int(time.time())
txn = {
TXN_TYPE: ATTRIB,
TARGET_NYM: nym,
RAW: raw_attribute,
f.SEQ_NO.nm: seq_no,
TXN_TIME: txn_time,
}
request_handler._addAttr(txn)
request_handler.state.commit()
multi_sig = save_multi_sig(request_handler)
# Getting attribute
get_request = Request(
operation={
TARGET_NYM: nym,
RAW: 'last_name'
},
signatures={},
protocolVersion=CURRENT_PROTOCOL_VERSION
)
result = request_handler.handleGetAttrsReq(get_request)
proof = extract_proof(result, multi_sig)
attr_value = result[DATA]
assert attr_value == raw_attribute
# Verifying signed state proof
path = domain.make_state_path_for_attr(nym, attr_key)
assert is_proof_verified(request_handler,
proof, path,
domain.hash_of(attr_value), seq_no, txn_time)
def test_state_proofs_for_get_claim_def(request_handler):
# Adding claim def
nym = '<KEY>'
seq_no = 0
txn_time = int(time.time())
schema_seqno = 0
signature_type = 'CL'
key_components = '{"key_components": []}'
txn = {
IDENTIFIER: nym,
TXN_TYPE: CLAIM_DEF,
TARGET_NYM: nym,
REF: schema_seqno,
f.SEQ_NO.nm: seq_no,
DATA: key_components,
TXN_TIME: txn_time,
}
request_handler._addClaimDef(txn)
request_handler.state.commit()
multi_sig = save_multi_sig(request_handler)
# Getting claim def
request = Request(
operation={
IDENTIFIER: nym,
ORIGIN: nym,
REF: schema_seqno,
SIGNATURE_TYPE: signature_type
},
signatures={},
protocolVersion=CURRENT_PROTOCOL_VERSION
)
result = request_handler.handleGetClaimDefReq(request)
proof = extract_proof(result, multi_sig)
assert result[DATA] == key_components
# Verifying signed state proof
path = domain.make_state_path_for_claim_def(nym, schema_seqno,
signature_type)
assert is_proof_verified(request_handler,
proof, path,
key_components, seq_no, txn_time)
def test_state_proofs_for_get_schema(request_handler):
# Adding schema
nym = '<KEY>'
seq_no = 0
txn_time = int(time.time())
schema_name = "schema_a"
schema_version = "1.0"
# data = '{"name": "schema_a", "version": "1.0"}'
schema_key = {NAME: schema_name, VERSION: schema_version}
data = {**schema_key, "Some_Attr": "Attr1"}
txn = {
TXN_TYPE: SCHEMA,
IDENTIFIER: nym,
f.SEQ_NO.nm: seq_no,
DATA: data,
TXN_TIME: txn_time,
}
request_handler._addSchema(txn)
request_handler.state.commit()
multi_sig = save_multi_sig(request_handler)
# Getting schema
request = Request(
operation={
TARGET_NYM: nym,
DATA: schema_key
},
signatures={},
protocolVersion=CURRENT_PROTOCOL_VERSION
)
result = request_handler.handleGetSchemaReq(request)
proof = extract_proof(result, multi_sig)
result[DATA].pop(NAME)
result[DATA].pop(VERSION)
assert result[DATA] == data
# Verifying signed state proof
path = domain.make_state_path_for_schema(nym, schema_name, schema_version)
assert is_proof_verified(request_handler,
proof, path,
data, seq_no, txn_time)
def test_state_proofs_for_get_nym(request_handler):
nym = '<KEY>'
role = "2"
verkey = <KEY>"
seq_no = 0
txn_time = int(time.time())
# Adding nym
data = {
f.IDENTIFIER.nm: nym,
ROLE: role,
VERKEY: verkey,
f.SEQ_NO.nm: seq_no,
TXN_TIME: txn_time,
}
request_handler.updateNym(nym, data)
request_handler.state.commit()
multi_sig = save_multi_sig(request_handler)
# Getting nym
request = Request(
operation={
TARGET_NYM: nym
},
signatures={},
protocolVersion=CURRENT_PROTOCOL_VERSION
)
result = request_handler.handleGetNymReq(request)
proof = extract_proof(result, multi_sig)
# Verifying signed state proof
path = request_handler.nym_to_state_key(nym)
encoded_value = request_handler.stateSerializer.serialize(data)
proof_nodes = base64.b64decode(proof[PROOF_NODES])
root_hash = base58.b58decode(proof[ROOT_HASH])
verified = request_handler.state.verify_state_proof(
root_hash,
path,
encoded_value,
proof_nodes,
serialized=True
)
assert verified
def test_no_state_proofs_if_protocol_version_less(request_handler):
nym = 'Gw6pDLhcBcoQesN72qfotTgFa7cbuqZpkX3Xo6pLhPhv'
role = "2"
verkey = <KEY>"
seq_no = 0
txn_time = int(time.time())
# Adding nym
data = {
f.IDENTIFIER.nm: nym,
ROLE: role,
VERKEY: verkey,
f.SEQ_NO.nm: seq_no,
TXN_TIME: txn_time,
}
request_handler.updateNym(nym, data)
request_handler.state.commit()
multi_sig = save_multi_sig(request_handler)
# Getting nym
request = Request(
operation={
TARGET_NYM: nym
},
signatures={}
)
result = request_handler.handleGetNymReq(request)
assert STATE_PROOF not in result
```
#### File: test/upgrade/test_node_schedules_upgrade_after_pool_ledger_update.py
```python
import pytest
from plenum.test.bls.helper import change_bls_key, check_bls_key
from plenum.test.conftest import pool_txn_stewards_data, stewards_and_wallets
@pytest.fixture(scope="module")
def update_bls_keys(looper, tconf, nodeSet, stewards_and_wallets):
node = nodeSet[0]
steward_client, steward_wallet = stewards_and_wallets[0]
new_blspk = change_bls_key(looper, nodeSet, node,
steward_client, steward_wallet)
check_bls_key(new_blspk, node, nodeSet)
def test_node_schedules_upgrade_after_bls_keys_update(update_bls_keys,
upgradeScheduled):
# Upgrade should work even after an update to the pool ledger with a
# transaction that does not contain `SERVICES` field
pass
``` |
{
"source": "johagg17/ArtificialFlyingObjects",
"score": 3
} |
#### File: ArtificialFlyingObjects/lab2_MLP_Classification/dataset.py
```python
import numpy as np
class MLPData:
""" """
@staticmethod
def syn1(N):
"""data(samples, features)
:param N:
"""
data = np.empty(shape=(N,2), dtype = np.float32)
tar = np.empty(shape=(N,), dtype = np.float32)
N1 = int(N/2)
data[:N1,0] = 4 + np.random.normal(loc=.0, scale=1., size=(N1))
data[N1:,0] = -4 + np.random.normal(loc=.0, scale=1., size=(N-N1))
data[:,1] = 10*np.random.normal(loc=.0, scale=1., size=(N))
data = data / data.std(axis=0)
# Target
tar[:N1] = np.ones(shape=(N1,))
tar[N1:] = np.zeros(shape=(N-N1,))
# Rotation
theta = np.radians(30)
c, s = np.cos(theta), np.sin(theta)
R = np.array([[c,-s],[s,c]]) # rotation matrix
data = np.dot(data,R)
return data,tar
@staticmethod
def syn2(N):
"""data(samples, features)
:param N:
"""
data = np.empty(shape=(N,2), dtype = np.float32)
tar = np.empty(shape=(N,), dtype = np.float32)
N1 = int(N/2)
# Positive samples
data[:N1,:] = 0.8 + np.random.normal(loc=.0, scale=1., size=(N1,2))
# Negative samples
data[N1:,:] = -.8 + np.random.normal(loc=.0, scale=1., size=(N-N1,2))
# Target
tar[:N1] = np.ones(shape=(N1,))
tar[N1:] = np.zeros(shape=(N-N1,))
return data,tar
@staticmethod
def syn3(N):
"""data(samples, features)
:param N:
"""
data = np.empty(shape=(N,2), dtype = np.float32)
tar = np.empty(shape=(N,), dtype = np.float32)
N1 = int(2*N/3)
# disk
teta_d = np.random.uniform(0, 2*np.pi, N1)
inner, outer = 2, 5
r2 = np.sqrt(np.random.uniform(inner**2, outer**2, N1))
data[:N1,0],data[:N1,1] = r2*np.cos(teta_d), r2*np.sin(teta_d)
#circle
teta_c = np.random.uniform(0, 2*np.pi, N-N1)
inner, outer = 0, 3
r2 = np.sqrt(np.random.uniform(inner**2, outer**2, N-N1))
data[N1:,0],data[N1:,1] = r2*np.cos(teta_c), r2*np.sin(teta_c)
# Normalization
#data = data - data.mean(axis=0)/data.std(axis=0)
tar[:N1] = np.ones(shape=(N1,))
tar[N1:] = np.zeros(shape=(N-N1,))
return data, tar
@staticmethod
def spiral(spiral_path):
"""
:param spiral_path:
"""
tmp = np.loadtxt(spiral_path)
data, tar = tmp[:, :2], tmp[:, 2]
return data, tar
@staticmethod
def vowels(file_name_train='ae.train', file_name_test='ae.test'):
"""
:param file_name_train: Default value = 'ae.train')
:param file_name_test: Default value = 'ae.test')
"""
def pre_proc(file_name):
"""
:param file_name:
"""
block = []
x = []
with open(file_name) as file:
for line in file:
if line.strip():
numbers = [float(n) for n in line.split()]
block.append(numbers)
else:
x.append(block)
block = []
################################
x = [np.asarray(ar) for ar in x]
return x
x_train = pre_proc(file_name_train)
x_test = pre_proc(file_name_test)
############## LABELS###########
chunk1 = list(range(30,270, 30))
y_train = []
person = 0
for i, block in enumerate(x_train):
if i in chunk1:
person += 1
y_train.extend([person]*block.shape[0])
chunk2 = [31,35,88,44,29,24,40,50,29]
chunk2 = np.cumsum(chunk2)
y_test = []
person = 0
for i, block in enumerate(x_test):
if i in chunk2:
person += 1
y_test.extend([person]*block.shape[0])
x_train = np.vstack(x_train)
x_test = np.vstack(x_test)
## Split into train, validation and test
num_classes = 9
y_train = np.eye(num_classes, dtype='uint8')[y_train]#keras.utils.to_categorical(y_train, num_classes)
y_test = np.eye(num_classes, dtype='uint8')[y_test]#keras.utils.to_categorical(y_test, num_classes)
from sklearn.model_selection import train_test_split
x_test, x_val, y_test, y_val = train_test_split(x_test, y_test, test_size=0.4, random_state=42)
return x_train, y_train, x_val, y_val, x_test, y_test
@staticmethod
def regr1(N, v=0):
"""data(samples, features)
:param N: param v: (Default value = 0)
:param v: Default value = 0)
"""
data = np.empty(shape=(N,6), dtype = np.float32)
uni = lambda n : np.random.uniform(0,1,n)
norm = lambda n : np.random.normal(0,1,n)
noise = lambda n : np.random.normal(0,1,n)
for i in range(4):
data[:,i] = norm(N)
for j in [4,5]:
data[:,j] = uni(N)
tar = 2*data[:,0] + data[:,1]* data[:,2]**2 + np.exp(data[:,3]) + \
5*data[:,4]*data[:,5] + 3*np.sin(2*np.pi*data[:,5])
std_signal = np.std(tar)
tar = tar + v * std_signal * noise(N)
return data, tar
```
#### File: ArtificialFlyingObjects/utils/progressbar.py
```python
from pytorch_lightning.callbacks import progress
__all__ = ['LitProgressBar']
class LitProgressBar(progress.ProgressBarBase):
""" """
#https://pytorch-lightning.readthedocs.io/en/latest/_modules/pytorch_lightning/callbacks/progress.html#ProgressBarBase.on_validation_batch_end
def __init__(self):
super().__init__() # don't forget this :)
self.enable()
def disable(self):
"""Disable progressBar"""
self._enable = False
def enable(self):
"""Enable progressBar"""
self._enable = True
def on_epoch_start(self, trainer, pl_module):
"""
:param trainer: param pl_module:
:param pl_module:
"""
super().on_train_start(trainer, pl_module)
print("",end="", flush=True)
def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx):
"""
:param trainer: param pl_module:
:param outputs: param batch:
:param batch_idx: param dataloader_idx:
:param pl_module: param batch:
:param dataloader_idx: param batch:
:param batch:
"""
super().on_train_batch_end(trainer, pl_module, outputs,batch, batch_idx)
con = f'Epoch {trainer.current_epoch+1} [{batch_idx+1:.00f}/{self.total_train_batches:.00f}] {self.get_progress_bar_dict(trainer)}'
self._update(con)
def _update(self,con:str) -> None:
"""Update console
:param con: param con:str:
:param con: str:
:param con:str:
"""
print(con, end="\r", flush=True)
def get_progress_bar_dict(self,trainer):
"""
:param trainer:
"""
tqdm_dict = trainer.progress_bar_dict
if 'v_num' in tqdm_dict:
del tqdm_dict['v_num']
return tqdm_dict
``` |
{
"source": "johagge/DeepActiveLearning",
"score": 3
} |
#### File: johagge/DeepActiveLearning/imageDifferenceCalculator.py
```python
import random
from img2vec_pytorch import Img2Vec
from PIL import Image
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
import numpy as np
from tqdm import tqdm
import pickle
class DifferenceCalculator:
def __init__(self, image_list):
self.image_list = image_list
def calculate_for_two(self, image1, image2):
pass
def load_image_pillow(self, img_path):
return Image.open(img_path)
def load_results(self):
"""
Since the comparison only needs to be done once, we don't have to generate them each time
"""
pass
def cluster(self, amount_clusters):
pass
class Image2Vector(DifferenceCalculator):
def __init__(self, image_list):
super(Image2Vector, self).__init__(image_list)
self.img2vec = Img2Vec(cuda=True)
self.vector_list = None
self.pickle_name = "img2vec.pickle"
self.amount_of_clusters = None
self.kmeans = None
self.reduced_data = None
def calculate_for_two(self, image1, image2):
super()
vec = self.img2vec.get_vec(image1)
vec2 = self.img2vec.get_vec(image2)
similarity = cosine_similarity(vec.reshape((1, -1)), vec2.reshape((1, -1)))[0][0]
print(similarity)
def load_results(self):
"""
If this fails, you first need to run the generate all image vectors function which generates the pickle
:return:
"""
with open(self.pickle_name, "rb") as f:
self.vector_list = pickle.load(f)
def generate_all_image_vectors(self):
"""
This generates all the vectors generated from the images
On the workstation it takes about 2 minutes for all training images (~8900),
so it is not really necessary to save it.
(Otherwise we would want the vector and path in one list element to remove them after annotating)
:return:
"""
# TODO select 512 automatically instead of hardcoding
vector_list = np.zeros((len(self.image_list), 512)) # 512 because resnet-18 is used as default with 512 output
print('generating image vectors...')
for i, image in tqdm(enumerate(self.image_list)):
img = self.load_image_pillow(image)
vector = self.img2vec.get_vec(img)
vector_list[i, :] = vector
self.vector_list = vector_list
#with open(self.pickle_name, "wb") as f:
# pickle.dump(vector_list, f)
def cluster(self, amount_clusters):
# inspired by https://github.com/christiansafka/img2vec/blob/master/example/test_clustering.py
self.amount_of_clusters = amount_clusters
#self.load_results()
print('Applying PCA...')
reduced_data = PCA(n_components=300).fit_transform(self.vector_list)
print('calculating kmeans')
kmeans = KMeans(init='k-means++', n_clusters=amount_clusters, n_init=25)
kmeans.fit(reduced_data)
self.kmeans = kmeans
self.reduced_data = reduced_data
return kmeans, reduced_data
def visualizeCluster(self):
"""
visualizes the clusters. only works with 2 dimensional reduced data
:param kmeans: sklearn kmeans
:param reduced_data: PCA reduced data
:return: None
"""
import matplotlib.pyplot as plt
if not self.kmeans or not self.reduced_data:
print("Please run cluster() first, so we have kmeans and reduced data available")
sys.exit()
# the following code in this function is from: (slightly changed)
# https://scikit-learn.org/stable/auto_examples/cluster/plot_kmeans_digits.html
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, x_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation="nearest",
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired, aspect="auto", origin="lower")
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1], marker="x", s=169, linewidths=3,
color="w", zorder=10)
plt.title("K-means clustering on the TORSO-21 training set (PCA-reduced data)\n"
"Centroids are marked with white cross")
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
#plt.savefig("kmeans.png", dpi=200)
def images_by_cluster(self, amount_clusters):
self.amount_of_clusters = amount_clusters
kmeans, reduced_data = self.cluster(amount_clusters)
images_by_cluster = [list() for e in range(self.amount_of_clusters)] # one sub list for each defined cluster
for i, e in enumerate(reduced_data):
cluster = kmeans.predict(e.reshape(1, -1))
# this assumes that the order of the reduced data is the same as the image list
images_by_cluster[cluster[0]].append(self.image_list[i]) # put the image path into the fitting cluster list
return images_by_cluster
class Vae(DifferenceCalculator):
def __init__(self, image_list, pickle_file="embeddings.pickle"):
super(Vae, self).__init__(image_list)
with open(pickle_file, "rb") as f:
self.embeddings = pickle.load(f)
self.latent = self.embeddings["latent"]
self.paths = self.embeddings["path_list"]
self.tree = self.embeddings["tree"]
self.errors = self.embeddings["errors"]
def get_high_error_samples(self, value=1.64):
errors = self.errors
# by default using the 1.64 value as in TORSO-21
error_threshold = errors.mean() + errors.std() * value
not_recreatable = set([self.paths[i] for i in np.where(errors >= error_threshold)[0]])
return not_recreatable
def get_very_different_samples(self, latent_distance_to_prune=30):
temp_paths = self.paths.copy()
high_difference_samples = []
while len(temp_paths) > 0:
sample = temp_paths.pop()
high_difference_samples.append(sample)
# use self.paths, not temp_paths, to keep the indexing right
sample_id = self.paths.index(sample)
indices = self.tree.query_radius(self.latent[sample_id].reshape(1, -1), r=latent_distance_to_prune)[0]
for index in indices:
if self.paths[index] in temp_paths:
# find object in self.paths and then remove it from the temp list
temp_paths.remove((self.paths[index]))
return high_difference_samples
if __name__ == "__main__":
import os
import glob
# find all images in folder
trainImagesPool = []
# datasets = [x[0] for x in os.walk("/home/jonas/Downloads/1076/")] # a list of all subdirectories (including root directory)
datasets = [x[0] for x in os.walk("/srv/ssd_nvm/15hagge/torso-fuer-pytorchyolo/custom/images/train")]
for d in datasets:
trainImagesPool = glob.glob(f"{d}/*.png", recursive=True)
trainImagesPool += glob.glob(f"{d}/*.PNG", recursive=True)
trainImagesPool += glob.glob(f"{d}/*.jpg", recursive=True)
trainImagesPool += glob.glob(f"{d}/*.JPG", recursive=True)
"""
a = Image2Vector(trainImagesPool)
a.generate_all_image_vectors()
img_by_cluster = a.images_by_cluster(10)
for e in img_by_cluster:
print(len(e))
# skip currently unnecessary debug
import sys
sys.exit()
# Visualization
kmeans, reduced_data = a.cluster(10)
a.visualizeCluster()
"""
a = Vae(trainImagesPool)
a.get_very_different_samples()
``` |
{
"source": "johahi/TorchProteinLibrary",
"score": 2
} |
#### File: FullAtomModel/CoordsTransform/test_forward.py
```python
import sys
import os
import torch
import numpy as np
from TorchProteinLibrary.FullAtomModel.CoordsTransform import CoordsTranslate, getRandomTranslation, getBBox, CoordsRotate, getRandomRotation
from TorchProteinLibrary.FullAtomModel import Angles2Coords, Coords2TypedCoords
def test_translation(coords, num_atoms):
translate = CoordsTranslate()
a,b = getBBox(coords, num_atoms)
center = (a+b)*0.5
print (center)
centered_coords = translate(coords, -center, num_atoms)
a,b = getBBox(centered_coords, num_atoms)
center = (a+b)*0.5
print(center)
def test_rotation(coords, num_atoms):
batch_size = num_atoms.size(0)
R = getRandomRotation(batch_size)
rotate = CoordsRotate()
rotated = rotate(coords, R, num_atoms)
print(rotated)
if __name__=='__main__':
sequences = ['GGGGGG', 'GGAARRRRRRRRR']
angles = torch.zeros(2, 7,len(sequences[1]), dtype=torch.double)
angles[:,0,:] = -1.047
angles[:,1,:] = -0.698
angles[:,2:,:] = 110.4*np.pi/180.0
a2c = Angles2Coords()
protein, res_names, atom_names, num_atoms = a2c(angles, sequences)
test_translation(protein, num_atoms)
test_rotation(protein, num_atoms)
``` |
{
"source": "johan12345/bohhofsvoasteha",
"score": 3
} |
#### File: bohhofsvoasteha/data/generate_json.py
```python
import json
import yaml
from pathlib import Path
def load_dir(path):
items = []
for file in sorted(path.iterdir()):
if file.suffix == ".yml":
item = yaml.safe_load(open(file, encoding='utf8'))
if isinstance(item, list):
items += item
else:
items.append(item)
return items
trains_path = Path("trains")
stations_path = Path("stations")
result = {
"trains": load_dir(trains_path),
"stations": load_dir(stations_path)
}
outfile = Path('../frontend/src/assets/data.json')
outfile.parent.mkdir(exist_ok=True)
json.dump(result, open(outfile, 'w'), indent=2)
``` |
{
"source": "johan12345/lonet.py",
"score": 3
} |
#### File: johan12345/lonet.py/lonet.py
```python
import argparse
import os
import re
import urllib.parse
import requests
from bs4 import BeautifulSoup
from pushbullet import Pushbullet
pushbullet = None
def download_file(url, dir):
local_filename = dir + '/' + urllib.parse.unquote_plus(url.split('/')[-1], encoding='iso-8859-1')
if os.path.exists(local_filename): return;
print(local_filename)
r = requests.get(url, stream=True)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
if pushbullet is not None:
pushbullet.push_note('Neue Datei', local_filename)
return local_filename
def download_folder(folder, base_dir):
dir = base_dir + '/' + folder['name']
if not os.path.exists(dir):
os.makedirs(dir)
if 'url' in folder:
download_files(folder['url'], dir)
for key, subfolder in folder['subfolders'].items():
download_folder(subfolder, dir)
def download_files(url, dir):
files_page = BeautifulSoup(session.get(url=url).text, 'html.parser')
for download_link in files_page.select('a[download]'):
download_file(base_download_url + download_link['href'], dir)
return files_page
parser = argparse.ArgumentParser(description='Download files from lo-net2.de file storage')
parser.add_argument('-u', '--username', type=str, required=True,
help='lo-net2 email address (.lo-net2.de at the end can be omitted)')
parser.add_argument('-p', '--password', type=str, required=True,
help='lo-net2 password')
parser.add_argument('-pb', '--pushbullet-token', type=str, help='Pushbullet API token')
args = parser.parse_args()
base_url = 'https://www.lo-net2.de/wws/'
base_download_url = 'https://www.lo-net2.de'
session = requests.Session()
if args.pushbullet_token is not None:
pushbullet = Pushbullet(args.pushbullet_token)
login_page = session.get('https://www.lo-net2.de/wws/100001.php').text
sid = re.compile('sid=(\d+)').search(login_page).group(1)
main_page = BeautifulSoup(session.post(url=base_url + '100001.php?sid=' + sid,
files={
'default_submit_button': ('', ''),
'login_nojs': ('', ''),
'login_login': ('', args.username),
'login_password': ('', <PASSWORD>),
'language': ('', '2')
}).text, 'html.parser')
course_links = main_page.select('#status_member_of_19 li > a')
for course_link in course_links:
course_name = course_link.text
print(course_name)
if not os.path.exists(course_name):
os.makedirs(course_name)
course_page = BeautifulSoup(session.get(url=base_url + course_link['href']).text, 'html.parser')
files_url = base_url + course_page('a', text='Dateiablage')[0]['href']
files_page = download_files(files_url, course_name)
base_folder = dict(name=course_name, subfolders={})
for folder_link in files_page.select('#table_folders a'):
folder_url = base_download_url + folder_link['href']
query = urllib.parse.urlparse(folder_url).query
params = urllib.parse.parse_qs(query, keep_blank_values=True)
path = params['path'][0]
if path == '': continue
parts = path.split('/')[1:]
folder = base_folder
for i in range(0, len(parts) - 1):
folder = folder['subfolders'][parts[i]]
folder['subfolders'][parts[len(parts) - 1]] = dict(
name=folder_link.text,
url=folder_url,
subfolders={}
)
download_folder(base_folder, '.')
``` |
{
"source": "johan12345/sunpy",
"score": 3
} |
#### File: sunpy/io/header.py
```python
from collections import OrderedDict
__all__ = ['FileHeader']
class FileHeader(OrderedDict):
"""
FileHeader is designed to provide a consistent interface to all other sunpy
classes that expect a generic file.
Open read all file types should format their header into a
FileHeader
"""
def __init__(self, *args, **kwargs):
OrderedDict.__init__(self, *args, **kwargs)
```
#### File: io/tests/test_genx.py
```python
import os
import datetime
import numpy as np
import pytest
from sunpy.data.test import rootdir
from sunpy.io.special import genx
TESTING = genx.read_genx(os.path.join(rootdir, 'generated_sample.genx'))
def test_skeleton():
# Top level
toplevel_dims = {'MYTEXT': 63, 'MYTEXT_ARRAY': 3, 'MYTEXT_ARRAY_DIMENSION': (2, 3),
'MYNUMBER': 1, 'MYNUMBER_ARRAY': 3, 'MYNUMBER_ARRAY_DIMENSION': (2, 3, 4, 5),
'MYUINT': 1, 'MYSTRUCTURE': 14, # the elements inside the OrderedDict
'MYSTRUCTURE_ARRAY': 6, 'HEADER': 5}
assert sorted(list(TESTING.keys())) == sorted(list(toplevel_dims.keys()))
for key, val in toplevel_dims.items():
if isinstance(val, tuple):
assert TESTING[key].shape == tuple(reversed(val))
else:
if val > 1:
assert len(TESTING[key]) == val
else:
assert isinstance(TESTING[key], int)
def test_array_elements_values():
np.testing.assert_allclose(TESTING['MYSTRUCTURE']['MYFARRAY'], np.arange(3.))
np.testing.assert_allclose(TESTING['MYSTRUCTURE']['MYFARRAYD'][:, 0], np.arange(6., step=2))
assert TESTING['MYSTRUCTURE']['MYDARRAYD'][1, 2] == 5.
assert TESTING['MYSTRUCTURE']['NESTEDSTRUCT']['MYLARRAYD'][3, 0, 1] == 19
np.testing.assert_allclose(TESTING['MYSTRUCTURE']['NESTEDSTRUCT']
['MYLARRAYD'][2, :, 0], np.arange(12, 17, step=2))
assert TESTING['MYSTRUCTURE']['MYCARRAY'][1] == complex(1, -9)
assert TESTING['MYSTRUCTURE']['MYDCARRAY'][2] == complex(12, 1)
assert TESTING['MYSTRUCTURE']['NESTEDSTRUCT']['MYUL64NUMBER'] == 18446744073709551615
assert TESTING['MYSTRUCTURE']['NESTEDSTRUCT']['MYL64NUMBER'] == 9223372036854775807
@pytest.mark.parametrize("slice, value", [((0, 0, 0, 0), 0),
((4, 0, 0, 0), 96),
((0, 2, 2, 0), 16),
((0, 3, 2, 0), 22),
((4, 3, 2, 0), 118)])
def test_value_slice(slice, value):
assert TESTING['MYNUMBER_ARRAY_DIMENSION'][slice] == value
@pytest.mark.parametrize("myarray, dtype", [(TESTING['MYNUMBER_ARRAY'], np.int16),
(TESTING['MYNUMBER_ARRAY_DIMENSION'], np.int16),
(TESTING['MYSTRUCTURE']['MYFARRAY'], np.float32),
(TESTING['MYSTRUCTURE']['MYFARRAYD'], np.float32),
(TESTING['MYSTRUCTURE']['MYDARRAY'], np.float64),
(TESTING['MYSTRUCTURE']['MYDARRAYD'], np.float64),
(TESTING['MYSTRUCTURE']['NESTEDSTRUCT']
['MYLARRAY'], np.int32),
(TESTING['MYSTRUCTURE']['NESTEDSTRUCT']
['MYLARRAYD'], np.int32),
(TESTING['MYSTRUCTURE']['RANDOMNUMBERS'], np.int16),
(TESTING['MYSTRUCTURE']['MYCARRAY'], np.complex),
(TESTING['MYSTRUCTURE']['MYDCARRAY'], np.complex64)])
def test_type(myarray, dtype):
assert myarray.dtype == dtype
def test_date():
creation_str = TESTING['HEADER']['CREATION']
creation = datetime.datetime.strptime(creation_str, '%a %b %d %H:%M:%S %Y')
assert int(''.join(chr(x)
for x in TESTING['MYSTRUCTURE']['RANDOMNUMBERS'][-4:])) == creation.year
```
#### File: map/sources/suvi.py
```python
import astropy.units as u
from astropy.coordinates import CartesianRepresentation
from astropy.visualization import AsinhStretch
from astropy.visualization.mpl_normalize import ImageNormalize
from sunpy.map import GenericMap
from sunpy.map.sources.source_type import source_stretch
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__all__ = ["SUVIMap"]
class SUVIMap(GenericMap):
"""
SUVI Image Map.
The Solar Ultraviolet Imager (SUVI) is a normal-incidence Cassegrain EUV
telescope on board the latest of the Geostationary Operational Environmental
Satellite (GOES) missions (GOES-16, formerly known as GOES-R).
It is similar to Atmospheric Imaging Assembly (AIA). It operates in
geostationary orbit above the Americas at 75.2 degree W. It's primary
purpose is to support NOAA's goal to characterize solar features and detect
events that lead to space weather. It uses a filter wheel to image the Sun
in six EUV wavelength corresponding to known coronal emission lines:
- 9.4 nm (FeXVIII)
- 13.1 nm (FeXXI)
- 17.1 nm (FeIX/X)
- 19.5 nm (FeXII)
- 28.4 nm (FeXV)
- 30.4 nm (HeII)
The focal plane consists of a CCD detector with 1280 x 1280 pixels. The
plate scale is 2.5 arcsec per pixel. The field of view is therefore almost
twice the size of the Sun (53 arcmin) and extends out to 1.6 solar radii in
the horizontal direction and 2.3 solar radii in the diagonal. It provides
observations in each wavelength at multiple exposure times every 4 minutes.
GOES-16 was launched on November 16, 2016, and became operational as NOAA's
GOES East on December 18, 2017, replacing GOES-13.
Notes
-----
SUVI uses the same color tables as AIA for the matching wavelengths.
SUVI 195 and 284 images use the AIA 193 & 335 color tables respectively.
Observer location: We use the ECEF coordinates provided in the FITS header for the spacecraft
location even when coordinates in other frames are provided due to accuracy concerns over the
coordinate transformations used in the SUVI data pipeline. There could still be a small
discrepancy because the definition of the ECEF frame used by SUVI may not exactly match the
definition of the ITRS frame used by SunPy to interpret the header values.
Note that some Level 1b files cannot be loaded due to errors in the header.
References
----------
* `GOES-R Mission <https://www.goes-r.gov>`_
* `SUVI Instrument Page <https://www.goes-r.gov/spacesegment/suvi.html>`_
* `GOES-16 on Wikipedia <https://en.wikipedia.org/wiki/GOES-16>`_
* `Recommended instrument description article <https://doi.org/10.3847/2041-8213/aaa28e>`_
* `User's Guide <https://www.goes-r.gov/users/docs/PUG-L1b-vol3.pdf>`_
* `Level 1b Readme <https://data.ngdc.noaa.gov/platforms/solar-space-observing-satellites/goes/goes16/l1b/suvi-l1b-fe094/ReadMe.pdf>`_
* `Data archive <https://www.ngdc.noaa.gov/stp/satellite/goes-r.html>`_
* `Level 1b data <https://data.ngdc.noaa.gov/platforms/solar-space-observing-satellites/goes/goes16/l1b/>`_
* `Level 2 data <https://data.ngdc.noaa.gov/platforms/solar-space-observing-satellites/goes/goes16/l2/data/>`_
"""
def __init__(self, data, header, **kwargs):
super().__init__(data, header, **kwargs)
# Fill in some missing info
self.meta["detector"] = "SUVI"
self.meta["telescop"] = "GOES-R"
self._nickname = self.detector
self.plot_settings["cmap"] = self._get_cmap_name()
self.plot_settings["norm"] = ImageNormalize(
stretch=source_stretch(self.meta, AsinhStretch(0.01)), clip=False
)
@property
def _supported_observer_coordinates(self):
return [(('obsgeo-x', 'obsgeo-y', 'obsgeo-z'), {'x': self.meta.get('obsgeo-x'),
'y': self.meta.get('obsgeo-y'),
'z': self.meta.get('obsgeo-z'),
'unit': u.m,
'representation_type': CartesianRepresentation,
'frame': "itrs"})
] + super()._supported_observer_coordinates
@property
def observatory(self):
"""
Returns the observatory.
"""
return self.meta["telescop"].split("/")[0]
@classmethod
def is_datasource_for(cls, data, header, **kwargs):
"""Determines if header corresponds to an AIA image"""
return str(header.get("instrume", "")).startswith(
"GOES-R Series Solar Ultraviolet Imager"
)
```
#### File: dataretriever/sources/eve.py
```python
from sunpy.net.dataretriever import GenericClient
__all__ = ['EVEClient']
class EVEClient(GenericClient):
"""
Provides access to Level 0C Extreme ultraviolet Variability Experiment (EVE) data.
To use this client you must request Level 0 data.
It is hosted by `LASP <http://lasp.colorado.edu/home/eve/data/data-access/>`__.
Examples
--------
>>> from sunpy.net import Fido, attrs as a
>>> results = Fido.search(a.Time("2016/1/1", "2016/1/2"),
... a.Instrument.eve, a.Level.zero) #doctest: +REMOTE_DATA
>>> results #doctest: +REMOTE_DATA
<sunpy.net.fido_factory.UnifiedResponse object at ...>
Results from 1 Provider:
<BLANKLINE>
2 Results from the EVEClient:
Start Time End Time Instrument ... Source Provider Level
------------------- ------------------- ---------- ... ------ -------- -----
2016-01-01 00:00:00 2016-01-01 23:59:59 EVE ... SDO LASP 0
2016-01-02 00:00:00 2016-01-02 23:59:59 EVE ... SDO LASP 0
<BLANKLINE>
<BLANKLINE>
"""
baseurl = (r'http://lasp.colorado.edu/eve/data_access/evewebdata/quicklook/'
r'L0CS/SpWx/%Y/%Y%m%d_EVE_L0CS_DIODES_1m.txt')
pattern = '{}/SpWx/{:4d}/{year:4d}{month:2d}{day:2d}_EVE_L{Level:1d}{}'
@classmethod
def register_values(cls):
from sunpy.net import attrs
adict = {attrs.Instrument: [('EVE', 'Extreme ultraviolet Variability Experiment, which is part of the NASA Solar Dynamics Observatory mission.')],
attrs.Physobs: [('irradiance', 'the flux of radiant energy per unit area.')],
attrs.Source: [('SDO', 'The Solar Dynamics Observatory.')],
attrs.Provider: [('LASP', 'The Laboratory for Atmospheric and Space Physics.')],
attrs.Level: [('0', 'EVE: The specific EVE client can only return Level 0C data. Any other number will use the VSO Client.')]}
return adict
```
#### File: dataretriever/sources/norh.py
```python
import astropy.units as u
from sunpy.net import attrs as a
from sunpy.net.dataretriever import GenericClient
__all__ = ['NoRHClient']
class NoRHClient(GenericClient):
"""
Provides access to the Nobeyama RadioHeliograph (NoRH) averaged correlation
time series data.
Uses this `ftp archive <ftp://solar-pub.nao.ac.jp/pub/nsro/norh/data/tcx/>`__
hosted by the `NoRH Science Center <https://solar.nro.nao.ac.jp/norh/doc/manuale/node1.html>`__.
Queries to NoRH should specify either 17GHz or 34GHz as a Wavelength.
Examples
--------
>>> import astropy.units as u
>>> from sunpy.net import Fido, attrs as a
>>> results = Fido.search(a.Time("2016/1/1", "2016/1/2"),
... a.Instrument.norh, a.Wavelength(17*u.GHz)) #doctest: +REMOTE_DATA
>>> results #doctest: +REMOTE_DATA
<sunpy.net.fido_factory.UnifiedResponse object at ...>
Results from 1 Provider:
<BLANKLINE>
2 Results from the NoRHClient:
Start Time End Time Instrument Source Provider Wavelength
------------------- ------------------- ---------- ------ -------- ----------
2016-01-01 00:00:00 2016-01-01 23:59:59 NORH NAOJ NRO 17.0 GHz
2016-01-02 00:00:00 2016-01-02 23:59:59 NORH NAOJ NRO 17.0 GHz
<BLANKLINE>
<BLANKLINE>
"""
baseurl = r'ftp://solar-pub.nao.ac.jp/pub/nsro/norh/data/tcx/%Y/%m/(\w){3}%y%m%d'
pattern = '{}/tcx/{year:4d}/{month:2d}/{Wavelength:3l}{:4d}{day:2d}'
@classmethod
def pre_search_hook(cls, *args, **kwargs):
"""
Converts the wavength specified in the query to its
representation in the url which can be used by the scraper.
"""
d = cls._get_match_dict(*args, **kwargs)
waverange = a.Wavelength(34*u.GHz, 17*u.GHz)
req_wave = d.get('Wavelength', waverange)
wmin = req_wave.min.to(u.GHz, equivalencies=u.spectral())
wmax = req_wave.max.to(u.GHz, equivalencies=u.spectral())
req_wave = a.Wavelength(wmin, wmax)
d['Wavelength'] = []
if 17*u.GHz in req_wave:
d['Wavelength'].append('tca')
if 34*u.GHz in req_wave:
d['Wavelength'].append('tcz')
return cls.baseurl, cls.pattern, d
def post_search_hook(self, exdict, matchdict):
"""
This method converts 'tca' and 'tcz' in the url's metadata
to a frequency of '17 GHz' and '34 GHz' respectively.
"""
rowdict = super().post_search_hook(exdict, matchdict)
if rowdict['Wavelength'] == 'tca':
rowdict['Wavelength'] = 17*u.GHz
elif rowdict['Wavelength'] == 'tcz':
rowdict['Wavelength'] = 34*u.GHz
return rowdict
@classmethod
def register_values(cls):
from sunpy.net import attrs
adict = {attrs.Instrument: [('NORH',
('Nobeyama Radio Heliograph is an imaging radio telescope at 17 '
'or 34GHz located at the Nobeyama Solar Radio Observatory.'))],
attrs.Source: [('NAOJ', 'The National Astronomical Observatory of Japan')],
attrs.Provider: [('NRO', 'Nobeyama Radio Observatory')],
attrs.Wavelength: [('*')]}
return adict
```
#### File: sources/tests/test_lyra_ud.py
```python
import pytest
from hypothesis import given
import astropy.units as u
from astropy.time import TimeDelta
import sunpy.net.dataretriever.sources.lyra as lyra
from sunpy.net import Fido
from sunpy.net import attrs as a
from sunpy.net._attrs import Instrument, Time
from sunpy.net.dataretriever.client import QueryResponse
from sunpy.net.fido_factory import UnifiedResponse
from sunpy.net.tests.strategies import range_time
from sunpy.time import parse_time
from sunpy.time.timerange import TimeRange
@pytest.fixture
def LCClient():
return lyra.LYRAClient()
@pytest.mark.remote_data
@pytest.mark.parametrize("timerange,url_start,url_end", [
(Time('2012/1/7', '2012/1/7'),
'http://proba2.oma.be/lyra/data/bsd/2012/01/07/lyra_20120107-000000_lev2_std.fits',
'http://proba2.oma.be/lyra/data/bsd/2012/01/07/lyra_20120107-000000_lev2_std.fits'
),
(Time('2012/12/1', '2012/12/2'),
'http://proba2.oma.be/lyra/data/bsd/2012/12/01/lyra_20121201-000000_lev2_std.fits',
'http://proba2.oma.be/lyra/data/bsd/2012/12/02/lyra_20121202-000000_lev2_std.fits'
),
(Time('2012/4/7', '2012/4/14'),
'http://proba2.oma.be/lyra/data/bsd/2012/04/07/lyra_20120407-000000_lev2_std.fits',
'http://proba2.oma.be/lyra/data/bsd/2012/04/14/lyra_20120414-000000_lev2_std.fits'
)
])
def test_get_url_for_time_range(LCClient, timerange, url_start, url_end):
qresponse = LCClient.search(timerange, a.Level.two)
urls = [i['url'] for i in qresponse]
assert isinstance(urls, list)
assert urls[0] == url_start
assert urls[-1] == url_end
@given(range_time('2010-01-06'))
def test_can_handle_query(time):
LCClient = lyra.LYRAClient()
ans1 = LCClient._can_handle_query(
time, Instrument('lyra'))
assert ans1 is True
ans2 = LCClient._can_handle_query(time)
assert ans2 is False
@pytest.mark.parametrize("time", [
Time('2015/8/27', '2015/8/27'),
Time('2016/2/4', '2016/2/6')])
@pytest.mark.remote_data
def test_query(LCClient, time):
qr1 = LCClient.search(time, Instrument('lyra'))
assert isinstance(qr1, QueryResponse)
assert qr1.time_range().start == time.start
almost_day = TimeDelta(1 * u.day - 1 * u.millisecond)
assert qr1.time_range().end == time.end + almost_day
@pytest.mark.remote_data
@pytest.mark.parametrize("time,instrument", [
(Time('2013/8/27', '2013/8/27'), Instrument('lyra'))])
def test_get(LCClient, time, instrument):
qr1 = LCClient.search(time, instrument)
download_list = LCClient.fetch(qr1)
assert len(download_list) == len(qr1)
@pytest.mark.remote_data
@pytest.mark.parametrize(
"time, instrument",
[(a.Time('2012/10/4', '2012/10/6'), a.Instrument.lyra)])
def test_fido(time, instrument):
qr = Fido.search(time, instrument)
assert isinstance(qr, UnifiedResponse)
response = Fido.fetch(qr)
assert len(response) == qr._numfile
def test_attr_reg():
assert a.Instrument.lyra == a.Instrument('LYRA')
assert a.Level.one == a.Level('1')
assert a.Level.two == a.Level('2')
assert a.Level.three == a.Level('3')
def test_client_repr(LCClient):
"""
Repr check
"""
output = str(LCClient)
assert output[:50] == 'sunpy.net.dataretriever.sources.lyra.LYRAClient\n\nP'
def mock_query_object(LCClient):
"""
Creating a Query Response object and prefilling it with some information
"""
# Creating a Query Response Object
start = '2016/1/1'
end = '2016/1/1 23:59:59'
obj = {
'Time': TimeRange(parse_time(start), parse_time(end)),
'Start Time': parse_time(start),
'End Time': parse_time(end),
'Instrument': 'LYRA',
'Physobs': 'irradiance',
'Source': 'PROBA2',
'Provider': 'ESA',
'Level': '2',
'url': ('http://proba2.oma.be/lyra/data/bsd/2016/01/01/'
'lyra_20160101-000000_lev2_std.fits')
}
results = QueryResponse([obj], client=LCClient)
return results
def test_show(LCClient):
mock_qr = mock_query_object(LCClient)
qrshow0 = mock_qr.show()
qrshow1 = mock_qr.show('Start Time', 'Instrument')
allcols = ['Start Time', 'End Time', 'Instrument', 'Physobs', 'Source',
'Provider', 'Level']
assert qrshow0.colnames == allcols
assert qrshow1.colnames == ['Start Time', 'Instrument']
assert qrshow0['Instrument'][0] == 'LYRA'
```
#### File: net/tests/test_attr.py
```python
from collections import defaultdict
import pytest
from sunpy.net import attr
from sunpy.net.attr import AttrMeta, make_tuple
from sunpy.net.dataretriever import GenericClient
class Instrument(attr.SimpleAttr):
"""
Dummy Instrument Class.
"""
class Time(attr.Range):
"""
Dummy Time Class.
"""
def EmptyAttr():
AttrMeta._attr_registry = defaultdict(make_tuple)
@pytest.fixture
def ALL():
return Instrument('all')
@pytest.fixture
def AIA():
return Instrument('AIA')
@pytest.fixture
def NUM():
return Instrument('1')
@pytest.fixture
def NUMBER():
return Instrument('1AIA')
@pytest.fixture
def POINTNUMBER():
return Instrument('1.5')
@pytest.fixture
def NUMBERS():
return Instrument('12AIAs')
@pytest.fixture
def HMI():
return Instrument('HMI')
@pytest.fixture
def SPEC():
return Instrument('_!£!THIS_NAME!"!ISSPECIAL~~##')
@pytest.fixture
def KEYWORD():
return Instrument('class')
class SA1(attr.SimpleAttr):
pass
class SA2(attr.SimpleAttr):
pass
class SA3(attr.SimpleAttr):
pass
class SA4(attr.SimpleAttr):
pass
def test_empty():
class TestAttr(attr.Attr):
pass
assert repr(TestAttr)
@pytest.mark.parametrize("different_type", [
int, str, float, list, set, tuple, dict, object
])
def test_empty(different_type):
attr_ = attr.Attr()
assert attr_ != different_type()
def test_attr_and():
a1 = SA1(1)
a2 = SA2(2)
an = a1 & a2
assert isinstance(an, attr.AttrAnd)
assert a1 in an.attrs
assert a2 in an.attrs
assert len(an.attrs) == 2
def test_attr_and_AttrAnd():
a1 = SA1(1)
a2 = SA2(2)
a3 = SA3(3)
an = a1 & (a2 & a3)
assert isinstance(an, attr.AttrAnd)
assert a1 in an.attrs
assert a2 in an.attrs
assert a3 in an.attrs
assert len(an.attrs) == 3
def test_attr_multi_and_AttrAnd():
a1 = SA1(1)
a2 = SA2(2)
a3 = SA3(3)
a4 = SA4(4)
a_and1 = (a2 & a3)
a_and2 = (a1 & a4)
an = a_and1 & a_and2
assert isinstance(a_and1, attr.AttrAnd)
assert isinstance(a_and2, attr.AttrAnd)
assert isinstance(an, attr.AttrAnd)
assert a1 in an.attrs
assert a2 in an.attrs
assert a3 in an.attrs
assert a4 in an.attrs
assert len(an.attrs) == 4
def test_attr_and_AttrOr():
a1 = SA1(1)
a2 = SA2(2)
a3 = SA3(3)
an = a1 & (a2 | a3)
assert isinstance(an, attr.AttrOr)
for a in an.attrs:
assert isinstance(a, attr.AttrAnd)
assert len(an.attrs) == 2
def test_attr_hash():
a1 = SA1(1)
a2 = SA1(1)
a3 = SA1(3)
assert hash(a1) == hash(a2)
assert hash(a3) != hash(a1)
def test_attr_collies():
a1 = attr.Attr()
with pytest.raises(NotImplementedError):
a1.collides(1)
def test_attr_or():
a1 = SA1(1)
a2 = SA2(2)
an = a1 | a2
assert isinstance(an, attr.AttrOr)
assert a1 in an.attrs
assert a2 in an.attrs
assert len(an.attrs) == 2
a1 = SA1(1)
a2 = SA2(1)
an = a1 | a2
assert an is a1
def test_simpleattr_collides():
a1 = SA1(1)
with pytest.raises(TypeError):
a1 & a1
def test_simple_attr_repr():
a1 = SA1("test string")
assert "test string" in repr(a1)
assert "SA1" in repr(a1)
def test_dummyattr():
one = attr.DummyAttr()
other = attr.ValueAttr({'a': 'b'})
assert (one | other) is other
assert (one & other) is other
def test_dummyattr_hash():
one = attr.DummyAttr()
assert hash(one) == hash(None)
def test_dummyattr_collides():
one = attr.DummyAttr()
two = attr.DummyAttr()
assert one.collides(two) is False
def test_dummyattr_eq():
one = attr.DummyAttr()
two = attr.DummyAttr()
other = attr.ValueAttr({'a': 'b'})
assert one == two
assert one != other
def test_and_nesting():
a1 = SA1(1)
a2 = SA2(2)
a3 = SA3(3)
a = attr.and_(a1, attr.AttrAnd((a2, a3)))
# Test that the nesting has been removed.
assert len(a.attrs) == 3
def test_or_nesting():
a1 = SA1(1)
a2 = SA2(2)
a3 = SA3(3)
a = attr.or_(a1, attr.AttrOr((a2, a3)))
# Test that the nesting has been removed.
assert len(a.attrs) == 3
def test_attr_metamagic(AIA, HMI):
# {cls: cls.register_values()}
attr.Attr.update_values({GenericClient: {Instrument: [('AIA', 'This is AIA, it takes data')]}})
# .name is the attribute name return
assert attr.Attr._attr_registry[Instrument].name == [AIA.value.lower()]
# .name_long is the original name
assert attr.Attr._attr_registry[Instrument].name_long == [AIA.value]
# .des is the description of the item.
assert attr.Attr._attr_registry[Instrument].desc == ['This is AIA, it takes data']
# The _value_registry on the Attr object does not get cleaned.
# So by adding it again to the same type, in this case Instrument the list is appended.
attr.Attr.update_values(
{GenericClient: {Instrument: [('HMI', 'This is HMI, it lives next to AIA')]}})
assert attr.Attr._attr_registry[Instrument].name == [AIA.value.lower(), HMI.value.lower()]
assert attr.Attr._attr_registry[Instrument].name_long == [AIA.value, HMI.value]
assert attr.Attr._attr_registry[Instrument].desc == [
'This is AIA, it takes data', 'This is HMI, it lives next to AIA']
# Tests the print out for the first two inputs only
output = 'sunpy.net.tests.test_attr.Instrument\n\nDummy Instrument Class.\n\n\nAttribute Name Client Full Name Description \n-------------- ------- --------- ---------------------------------\naia Generic AIA This is AIA, it takes data \nhmi Generic HMI This is HMI, it lives next to AIA'
assert str(Instrument) == output
def test_attr_dynamic(AIA, HMI):
# This checks the dynamic attribute creation.
attr.Attr.update_values({GenericClient: {Instrument: [('AIA', 'This is AIA, it takes data')]}})
attr.Attr.update_values(
{GenericClient: {Instrument: [('HMI', 'This is HMI, it lives next to AIA')]}})
assert Instrument.aia == AIA
assert Instrument.hmi == HMI
def test_attr_dir():
# Test for __dir__
attr.Attr.update_values({GenericClient: {Instrument: [('AIA', 'This is AIA, it takes data')]}})
attr.Attr.update_values(
{GenericClient: {Instrument: [('HMI', 'This is HMI, it lives next to AIA')]}})
assert 'aia' in dir(Instrument)
assert 'hmi' in dir(Instrument)
def test_attr_sanity():
attr.Attr.update_values(
{GenericClient: {Instrument: [('_!£!THIS_NAME!"!ISSPECIAL~~##', 'To test the attribute cleaning.')]}})
# This checks for sanitization of names.
assert '___this_name___isspecial____' in attr.Attr._attr_registry[Instrument].name
assert '_!£!THIS_NAME!"!ISSPECIAL~~##' in attr.Attr._attr_registry[Instrument].name_long
assert 'To test the attribute cleaning.' in attr.Attr._attr_registry[Instrument].desc
def test_attr_keyword():
attr.Attr.update_values({GenericClient: {Instrument: [('class', 'Keyword checking.')]}})
# This checks for sanitization of names.
assert 'class_' in attr.Attr._attr_registry[Instrument].name
assert 'class' in attr.Attr._attr_registry[Instrument].name_long
assert 'Keyword checking.' in attr.Attr._attr_registry[Instrument].desc
def test_attr_num(NUM):
attr.Attr.update_values({GenericClient: {Instrument: [('1', 'One')]}})
# This checks for sanitization of names.
assert 'one' in attr.Attr._attr_registry[Instrument].name
assert '1' in attr.Attr._attr_registry[Instrument].name_long
assert 'One' in attr.Attr._attr_registry[Instrument].desc
assert Instrument.one == NUM
def test_attr_number(NUMBER):
attr.Attr.update_values({GenericClient: {Instrument: [('1AIA', 'One Number first.')]}})
# This checks for sanitization of names.
assert 'one_aia' in attr.Attr._attr_registry[Instrument].name
assert '1AIA' in attr.Attr._attr_registry[Instrument].name_long
assert 'One Number first.' in attr.Attr._attr_registry[Instrument].desc
assert Instrument.one_aia == NUMBER
def test_attr_number_point(POINTNUMBER):
attr.Attr.update_values({GenericClient: {Instrument: [('1.5', 'One Point Five.')]}})
# This checks for sanitization of names.
assert 'onepointfive' in attr.Attr._attr_registry[Instrument].name
assert '1.5' in attr.Attr._attr_registry[Instrument].name_long
assert 'One Point Five.' in attr.Attr._attr_registry[Instrument].desc
assert Instrument.onepointfive == POINTNUMBER
def test_attr_numbes():
attr.Attr.update_values({GenericClient: {Instrument: [('12AIAs', 'That is too many AIAs')]}})
# This checks for sanitization of names.
assert 'one_2aias' in attr.Attr._attr_registry[Instrument].name
assert '12AIAs' in attr.Attr._attr_registry[Instrument].name_long
assert 'That is too many AIAs' in attr.Attr._attr_registry[Instrument].desc
assert 'one_2aias' in dir(Instrument)
def test_attr_iterable_length():
# not iterable
with pytest.raises(ValueError):
attr.Attr.update_values({GenericClient: {Instrument: 'AIA'}})
# too many items
with pytest.raises(ValueError):
attr.Attr.update_values(
{GenericClient: {Instrument: [('AIA', 'AIA is Nice', 'Error now')]}})
def test_asterisk_attrs(ALL):
# This checks we can submit * to mean all attrs.
attr.Attr.update_values({GenericClient: {Instrument: [('*')]}})
assert Instrument.all == ALL
assert "Instrument(all: All values of this type are supported.)" in repr(Instrument.all)
@pytest.mark.parametrize("wrong_name", [
("not star",), ("*whoops",)
])
def test_single_pair_argument_attrs(wrong_name):
# This checks that other single string entries fail.
with pytest.raises(ValueError):
attr.Attr.update_values({GenericClient: {Instrument: [wrong_name]}})
def test_asterisk_attrs_time():
# This checks we can submit * for time/wavelength (both are ranges)
attr.Attr.update_values({GenericClient: {Time: [('*')]}})
assert "all All values of this type are supported." in repr(Time)
```
#### File: tests/tests/test_mocks.py
```python
import io
import re
import pytest
from sunpy.tests.mocks import MockHTTPResponse, MockObject, MockOpenTextFile
@pytest.fixture
def mocked_mockobject():
return MockObject(records=12)
def test_MockObject_illegal_kwargs(mocked_mockobject):
"""
Any attempt to use a kwarg which has the same name as an attribute/method
of the underlying object or datastore will raise a ValueError.
"""
with pytest.raises(ValueError):
MockObject(records=[], values=1)
with pytest.raises(ValueError):
MockObject(items=('a', 'b', 'c'))
with pytest.raises(ValueError):
MockObject(__hash__=0x23424)
# adding a new 'prohibited' attribute will be prevented
with pytest.raises(ValueError):
mocked_mockobject['keys'] = [3, 4]
def test_MockObject_attr(mocked_mockobject):
"""
builtin hasattr & getattr functions, these don't work on dictionaries but
they do on classes.
"""
assert hasattr(mocked_mockobject, 'records') is True
assert hasattr(mocked_mockobject, 'cost') is False
assert getattr(mocked_mockobject, 'records') == 12
with pytest.raises(AttributeError):
getattr(mocked_mockobject, 'jobs')
def test_MockObject_get(mocked_mockobject):
"""
Getting attributes from `MockObject` using dot and bracket notation.
"""
assert mocked_mockobject['records'] == 12
assert mocked_mockobject.records == 12
with pytest.raises(AttributeError):
mocked_mockobject.no_key
with pytest.raises(KeyError):
mocked_mockobject['not-here']
def test_MockObject_set_get(mocked_mockobject):
"""
Setting attributes in `MockObject` using bracket notation *not* dot
notation.
"""
# Only change the value of existing & new items using 'bracket' notation
mocked_mockobject['records'] = 45
assert mocked_mockobject.records == 45
assert mocked_mockobject['records'] == 45
# Using 'dot' notation will set a new attribute on 'MockObject' not on the datastore
# DO NOT DO THIS!
mocked_mockobject.records = -344
# This is equivalent to seattr(mocked_mockobject, 'records', -344). Again, don't do this!
assert mocked_mockobject.records == -344
# The 'real' value remains unchanged.
assert mocked_mockobject['records'] == 45
def test_MockObject_len():
"""
Testing ``MockObject.__len__``.
"""
assert len(MockObject(responses=['a', 'b', 'c', 'd'], requests=(1, 2, 3))) == 2
def test_MockObject_del(mocked_mockobject):
"""
Ensure ``MockObject.__delitem__`` is **not** implemented.
"""
with pytest.raises(NotImplementedError):
del mocked_mockobject['records']
def test_MockObject_iter(mocked_mockobject):
"""
Test ``MockObject.__iter__``.
"""
assert list(iter(mocked_mockobject)) == ['records']
def test_repr_MockObject():
"""
Test ``MockObject.__repr__``.
"""
empty = MockObject()
mo_p = re.compile(r"^(?P<_><)sunpy\.tests\.mocks\.MockObject \{\} "
"at 0x[0-9A-Fa-f]+L?(?(_)>|)$")
assert mo_p.match(repr(empty)) is not None
def test_read_only_mode_MockOpenTextFile():
"""
Reading from a read only file, writing should be prohibited.
"""
new_line = '\n'
content = r'a{0}bc{0}nd{0}{0}'.format(new_line)
read_only = MockOpenTextFile('rom.txt', data=content)
assert read_only.readable() is True
assert read_only.writable() is False
with pytest.raises(io.UnsupportedOperation):
read_only.write('')
assert read_only.read() == content
assert read_only.readlines() == [f'{line}{new_line}'
for line in content.split(new_line)]
read_only.close()
with pytest.raises(ValueError):
read_only.readable()
with pytest.raises(ValueError):
read_only.writable()
with pytest.raises(ValueError):
read_only.read()
with pytest.raises(ValueError):
read_only.readlines()
def test_write_only_mode_MockOpenTextFile():
"""
Writing to to write-only file, reading should be prohibited.
"""
write_only = MockOpenTextFile('write.txt', 'w')
assert write_only.readable() is False
assert write_only.writable() is True
with pytest.raises(io.UnsupportedOperation):
write_only.read()
data = '0123456789'
num_chars = write_only.write(data)
assert num_chars == len(data)
def test_read_and_write_MockOpenTextFile():
"""
Reading & writing to a file with read/write access.
"""
rd_wr = MockOpenTextFile(mode='r+')
assert rd_wr.name == 'N/A'
assert rd_wr.readable() is True
assert rd_wr.writable() is True
# Initailly empty
assert rd_wr.read() == ''
data = '0123456789'
num_chars = rd_wr.write(data)
assert num_chars == len(data)
assert rd_wr.read() == data
rd_wr.close()
def test_repr_MockOpenTextFile():
"""
Test ``MockOpenTextFile.__repr__``.
"""
mo_p = re.compile(r"^(?P<_><)sunpy\.tests\.mocks\.MockOpenTextFile file \'a\' "
"mode \'r\' at 0x[0-9A-Fa-f]+L?(?(_)>|)$")
assert mo_p.match(repr(MockOpenTextFile('a', 'r'))) is not None
def test_MockHTTPResponse():
"""
Simple tests querying the headers attribute.
"""
headers = {'Content-Type': 'text/html',
'Content-Disposition': 'attachment; filename="filename.jpg"'}
response = MockHTTPResponse(url='http://abc.com', headers=headers)
assert response.url == 'http://abc.com'
assert response.headers.get('Content-Disposition') == 'attachment; filename="filename.jpg"'
assert response.headers.get('Content-Length') is None
# Key *not* case insensitive
assert response.headers.get('content-type') is None
```
#### File: time/tests/test_timerange.py
```python
from datetime import datetime, timedelta
import pytest
import astropy.units as u
from astropy.time import Time, TimeDelta
from astropy.utils.exceptions import ErfaWarning
import sunpy.time
from sunpy.time import is_time_equal
tbegin_str = '2012/1/1'
tfin_str = '2012/1/2'
dt = u.Quantity(24 * 60 * 60, 's')
start = sunpy.time.parse_time(tbegin_str)
end = sunpy.time.parse_time(tfin_str)
delta = end - start
@pytest.mark.parametrize("inputs", [
(tbegin_str, tfin_str),
(tbegin_str, dt),
(tbegin_str, TimeDelta(1*u.day)),
(tbegin_str, timedelta(days=1))
])
def test_timerange_inputs(inputs):
timerange = sunpy.time.TimeRange(*inputs)
assert isinstance(timerange, sunpy.time.TimeRange)
assert timerange.start == start
assert timerange.end == end
assert timerange.dt == delta
def test_timerange_invalid_range():
lower = '2016/01/04 09:30'
mid = '2016/06/04 09:30'
upper = '2017/03/04 09:30'
with pytest.raises(ValueError):
sunpy.time.TimeRange((lower,))
with pytest.raises(ValueError):
sunpy.time.TimeRange((lower, mid, upper))
def test_equals():
lower = '2016/01/04T09:30:00.000'
upper = '2016/06/04T09:30:00.000'
upper_plus_one_msec = '2016/06/04T09:30:00.001'
tr = sunpy.time.TimeRange((lower, upper))
# This should *always* hold true
assert tr == tr
# Same values, different format
tr_diff_format = sunpy.time.TimeRange('2016-01-04T09:30:00.000', '2016-06-04T09:30:00.000')
assert tr == tr_diff_format
lower_dt = Time('2016-01-04T09:30:00.000')
upper_dt = Time('2016-06-04T09:30:00.000')
tr_datetime = sunpy.time.TimeRange(lower_dt, upper_dt)
assert tr == tr_datetime
tr_plus_one_msec = sunpy.time.TimeRange((lower, upper_plus_one_msec))
assert (tr_plus_one_msec == tr) is False
# Attempt using objects which are *not* TimeRanges
assert (tr == lower_dt) is False
assert (lower_dt == tr) is False
def test_not_equals():
a_st = '2016/01/04T09:30:00.000'
a_et = '2016/06/04T09:30:00.000'
b_st = '2017/01/04T09:30:00.000'
b_et = '2017/06/04T09:30:00.000'
# Same start time, different end times
assert sunpy.time.TimeRange(a_st, a_et) != sunpy.time.TimeRange(a_st, b_et)
# Different start times, same end times
assert sunpy.time.TimeRange(b_st, b_et) != sunpy.time.TimeRange(a_st, b_et)
# Different start & end times
assert sunpy.time.TimeRange(a_st, a_et) != sunpy.time.TimeRange(b_st, b_et)
# Different objects
assert sunpy.time.TimeRange(a_st, a_et) != dict()
assert list() != sunpy.time.TimeRange(a_st, a_et)
def test_get_dates():
lower = '2016/01/04 09:30'
lower_plus_one_day = '2016/01/05 09:30'
single_day = sunpy.time.TimeRange((lower, lower))
assert single_day.get_dates() == [Time('2016-1-4')]
two_days = sunpy.time.TimeRange((lower, lower_plus_one_day))
assert two_days.get_dates() == [Time('2016-1-4'), Time('2016-1-5')]
one_year = sunpy.time.TimeRange('2017/01/01', '2017-12-31')
assert len(one_year.get_dates()) == 365
leap_year = sunpy.time.TimeRange('2016/01/01', '2016-12-31')
assert len(leap_year.get_dates()) == 366
@pytest.mark.parametrize("ainput", [
(tbegin_str, tfin_str),
(tbegin_str, dt),
(tbegin_str, TimeDelta(1*u.day)),
(tbegin_str, timedelta(days=1)),
(sunpy.time.TimeRange(tbegin_str, tfin_str))
])
def test_timerange_input(ainput):
timerange = sunpy.time.TimeRange(ainput)
assert isinstance(timerange, sunpy.time.TimeRange)
assert timerange.start == start
assert timerange.end == end
assert timerange.dt == delta
@pytest.mark.parametrize("ainput", [
(tbegin_str, tfin_str),
(tfin_str, -dt),
(tfin_str, tbegin_str)
])
def test_start_lessthan_end(ainput):
timerange = sunpy.time.TimeRange(ainput)
t1 = timerange.start
t2 = timerange.end
assert t1 < t2
assert timerange.start == start
assert timerange.end == end
@pytest.fixture
def timerange_a():
return sunpy.time.TimeRange(tbegin_str, tfin_str)
def test_center(timerange_a):
assert is_time_equal(timerange_a.center, Time('2012-1-1T12:00:00'))
def test_split(timerange_a):
expect = [sunpy.time.TimeRange('2012/1/1T00:00:00', '2012/1/1T12:00:00'),
sunpy.time.TimeRange('2012/1/1T12:00:00', '2012/1/2T00:00:00')]
split = timerange_a.split(n=2)
# Doing direct comparisons seem to not work
assert all([is_time_equal(wi.start, ex.start) and is_time_equal(wi.end, ex.end)
for wi, ex in zip(split, expect)])
def test_split_n_0_error(timerange_a):
with pytest.raises(ValueError):
timerange_a.split(n=0)
def test_input_error(timerange_a):
with pytest.raises(ValueError):
sunpy.time.TimeRange(tbegin_str)
def test_window(timerange_a):
timerange = sunpy.time.TimeRange(tbegin_str, tfin_str)
window = timerange.window(u.Quantity(12 * 60 * 60, 's'), u.Quantity(10, 's'))
expect = [sunpy.time.TimeRange('2012/1/1T00:00:00', '2012/1/1T00:00:10'),
sunpy.time.TimeRange('2012/1/1T12:00:00', '2012/1/1T12:00:10'),
sunpy.time.TimeRange('2012/1/2T00:00:00', '2012/1/2T00:00:10')]
assert isinstance(window, list)
# Doing direct comparisons seem to not work
assert all([wi == ex for wi, ex in zip(window, expect)])
@pytest.mark.parametrize("td1,td2", [
(TimeDelta(12*u.hour), TimeDelta(10*u.second)),
(timedelta(hours=12), timedelta(seconds=10))
])
def test_window_timedelta(timerange_a, td1, td2):
timerange = sunpy.time.TimeRange(tbegin_str, tfin_str)
window = timerange.window(td1, td2)
expect = [sunpy.time.TimeRange('2012/1/1T00:00:00', '2012/1/1T00:00:10'),
sunpy.time.TimeRange('2012/1/1T12:00:00', '2012/1/1T12:00:10'),
sunpy.time.TimeRange('2012/1/2T00:00:00', '2012/1/2T00:00:10')]
assert isinstance(window, list)
# Doing direct comparisons seem to not work
assert all([wi == ex for wi, ex in zip(window, expect)])
def test_days(timerange_a):
assert timerange_a.days == u.Quantity(1, 'd')
def test_start(timerange_a):
assert timerange_a.start == start
def test_end(timerange_a):
assert timerange_a.end == end
def test_seconds(timerange_a):
assert timerange_a.seconds == dt
def test_minutes(timerange_a):
assert timerange_a.minutes == u.Quantity(24 * 60, 'min')
def test_hours(timerange_a):
assert timerange_a.hours == u.Quantity(24, 'hour')
def test_next():
timerange = sunpy.time.TimeRange(tbegin_str, tfin_str)
timerange.next()
assert isinstance(timerange, sunpy.time.TimeRange)
assert timerange.start == start + delta
assert timerange.end == end + delta
assert timerange.dt == delta
def test_previous():
timerange = sunpy.time.TimeRange(tbegin_str, tfin_str)
timerange.previous()
assert isinstance(timerange, sunpy.time.TimeRange)
assert timerange.start == start - delta
assert timerange.end == end - delta
assert timerange.dt == delta
def test_extend():
timerange = sunpy.time.TimeRange(tbegin_str, tfin_str)
timerange.extend(delta, delta)
assert isinstance(timerange, sunpy.time.TimeRange)
assert timerange.start == start + delta
assert timerange.end == end + delta
assert timerange.dt == delta
def test_contains(timerange_a):
before = Time('1990-1-1')
after = Time('2022-1-1')
between = Time('2014-5-4')
timerange = sunpy.time.TimeRange('2014/05/03 12:00', '2014/05/05 21:00')
assert between in timerange
assert before not in timerange
assert after not in timerange
assert timerange.start in timerange
assert timerange.end in timerange
assert '2014/05/04 15:21' in timerange
assert '1975/4/13' not in timerange
with pytest.warns(ErfaWarning, match='dubious year'):
assert '2100/1/1'not in timerange
assert '2014/05/03 12:00' in timerange
assert '2014/05/05 21:00' in timerange
def test_get_dates_daylist_less_24_hours():
starttime = datetime(2020, 1, 1, 12)
endtime = datetime(2020, 1, 2, 11)
interval = sunpy.time.TimeRange(starttime, endtime)
daylist = interval.get_dates()
day_one = Time("2020-01-01T00:00:00.000")
day_two = Time("2020-01-02T00:00:00.000")
assert len(daylist) == 2
assert daylist[0] == day_one
assert daylist[1] == day_two
```
#### File: sunpy/util/datatype_factory_base.py
```python
import inspect
__all__ = ["BasicRegistrationFactory", "NoMatchError",
"MultipleMatchError", "ValidationFunctionError"]
class BasicRegistrationFactory:
"""
Generalized registerable factory type.
Widgets (classes) can be registered with an instance of this class.
Arguments to the factory's ``__call__`` method are then passed to a function
specified by the registered factory, which validates the input and returns
a instance of the class that best matches the inputs.
Attributes
----------
registry : `dict`
Dictionary mapping classes (key) to function (value) which validates input.
default_widget_type : `type`
Class of the default widget.
validation_functions : `list` of `str`
List of function names that are valid validation functions.
Parameters
----------
default_widget_type : `type`, optional
Class of the default widget. Defaults to `None`.
additional_validation_functions : `list` of `str`, optional
List of strings corresponding to additional validation function names.
Defaults to `list`.
registry : `dict`, optional
Dictionary mapping classes (key) to function (value) which validates input.
Defaults to `None`.
Notes
-----
* A valid validation function must be a classmethod of the registered widget
and it must return a `bool`.
"""
def __init__(self, default_widget_type=None,
additional_validation_functions=[], registry=None):
if registry is None:
self.registry = dict()
else:
self.registry = registry
self.default_widget_type = default_widget_type
self.validation_functions = (['_factory_validation_function'] +
additional_validation_functions)
def __call__(self, *args, **kwargs):
"""
Method for running the factory.
Arguments args and kwargs are passed through to the validation
function and to the constructor for the final type.
"""
# Any preprocessing and massaging of inputs can happen here
return self._check_registered_widget(*args, **kwargs)
def _check_registered_widget(self, *args, **kwargs):
"""
Implementation of a basic check to see if arguments match a widget.
"""
candidate_widget_types = list()
for key in self.registry:
# Call the registered validation function for each registered class
if self.registry[key](*args, **kwargs):
candidate_widget_types.append(key)
n_matches = len(candidate_widget_types)
if n_matches == 0:
if self.default_widget_type is None:
raise NoMatchError("No types match specified arguments and no default is set.")
else:
candidate_widget_types = [self.default_widget_type]
elif n_matches > 1:
raise MultipleMatchError("Too many candidate types identified ({})."
"Specify enough keywords to guarantee unique type "
"identification.".format(n_matches))
# Only one is found
WidgetType = candidate_widget_types[0]
return WidgetType(*args, **kwargs)
def register(self, WidgetType, validation_function=None, is_default=False):
"""
Register a widget with the factory.
If ``validation_function`` is not specified, tests ``WidgetType`` for
existence of any function in in the list ``self.validation_functions``,
which is a list of strings which must be callable class attribute.
Parameters
----------
WidgetType : `type`
Widget to register.
validation_function : `function`, optional
Function to validate against. Defaults to None, which indicates
that a classmethod in validation_functions is used. Defaults to `None`.
is_default : `bool`, optional
Sets WidgetType to be the default widget. Defaults to `False`.
"""
if is_default:
self.default_widget_type = WidgetType
elif validation_function is not None:
if not callable(validation_function):
raise AttributeError("Keyword argument 'validation_function' must be callable.")
self.registry[WidgetType] = validation_function
else:
found = False
for vfunc_str in self.validation_functions:
if hasattr(WidgetType, vfunc_str):
vfunc = getattr(WidgetType, vfunc_str)
# check if classmethod: stackoverflow #19227724
_classmethod = inspect.ismethod(vfunc) and vfunc.__self__ is WidgetType
if _classmethod:
self.registry[WidgetType] = vfunc
found = True
break
else:
raise ValidationFunctionError("{}.{} must be a classmethod."
.format(WidgetType.__name__, vfunc_str))
if not found:
raise ValidationFunctionError("No proper validation function for class {} "
"found.".format(WidgetType.__name__))
def unregister(self, WidgetType):
"""
Remove a widget from the factory's registry.
"""
self.registry.pop(WidgetType)
class NoMatchError(Exception):
"""
Exception for when no candidate class is found.
"""
class MultipleMatchError(Exception):
"""
Exception for when too many candidate classes are found.
"""
class ValidationFunctionError(AttributeError):
"""
Exception for when no candidate class is found.
"""
```
#### File: sunpy/tools/hek_mkcls.py
```python
import os
import sys
from collections import defaultdict
EVENTS = [
'AR', 'CME', 'CD', 'CH', 'CW', 'FI', 'FE', 'FA', 'FL', 'LP', 'OS', 'SS',
'EF', 'CJ', 'PG', 'OT', 'NR', 'SG', 'SP', 'CR', 'CC', 'ER', 'TO'
]
# For some reason, the event type is "ce" but all its attributes start with
# "CME". This dict is here to consider this.
NAMES = defaultdict(lambda: None, {
'CME': 'CE'
})
# These are just groups for attributes that are not _ListAttrs themselves.
OTHER = ['Area', 'BoundBox', 'Bound', 'OBS', 'Skel', 'FRM', 'Event', 'Outflow']
# There is no underscore after Wave in the names of the API, so we do not
# need to remove it.
OTHER_NOPAD = ['Wave', 'Veloc', 'Freq', 'Intens']
# Every attribute that neither starts with something in EVENTS, OTHER or
# OTHER_NOPAD, is put into the Misc class.
# XXX: Not all of them actually are string. We just use string for now because
# that is the type that has the most functionality.
fields = {
'AR_CompactnessCls': '_StringParamAttrWrapper',
'AR_IntensKurt': '_StringParamAttrWrapper',
'AR_IntensMax': '_StringParamAttrWrapper',
'AR_IntensMean': '_StringParamAttrWrapper',
'AR_IntensMin': '_StringParamAttrWrapper',
'AR_IntensSkew': '_StringParamAttrWrapper',
'AR_IntensTotal': '_StringParamAttrWrapper',
'AR_IntensUnit': '_StringParamAttrWrapper',
'AR_IntensVar': '_StringParamAttrWrapper',
'AR_McIntoshCls': '_StringParamAttrWrapper',
'AR_MtWilsonCls': '_StringParamAttrWrapper',
'AR_NOAANum': '_StringParamAttrWrapper',
'AR_NOAAclass': '_StringParamAttrWrapper',
'AR_NumSpots': '_StringParamAttrWrapper',
'AR_PenumbraCls': '_StringParamAttrWrapper',
'AR_Polarity': '_StringParamAttrWrapper',
'AR_SpotAreaRaw': '_StringParamAttrWrapper',
'AR_SpotAreaRawUncert': '_StringParamAttrWrapper',
'AR_SpotAreaRawUnit': '_StringParamAttrWrapper',
'AR_SpotAreaRepr': '_StringParamAttrWrapper',
'AR_SpotAreaReprUncert': '_StringParamAttrWrapper',
'AR_SpotAreaReprUnit': '_StringParamAttrWrapper',
'AR_ZurichCls': '_StringParamAttrWrapper',
'Area_AtDiskCenter': '_StringParamAttrWrapper',
'Area_AtDiskCenterUncert': '_StringParamAttrWrapper',
'Area_Raw': '_StringParamAttrWrapper',
'Area_Uncert': '_StringParamAttrWrapper',
'Area_Unit': '_StringParamAttrWrapper',
'BoundBox_C1LL': '_StringParamAttrWrapper',
'BoundBox_C1UR': '_StringParamAttrWrapper',
'BoundBox_C2LL': '_StringParamAttrWrapper',
'BoundBox_C2UR': '_StringParamAttrWrapper',
'Bound_CCNsteps': '_StringParamAttrWrapper',
'Bound_CCStartC1': '_StringParamAttrWrapper',
'Bound_CCStartC2': '_StringParamAttrWrapper',
'CC_AxisUnit': '_StringParamAttrWrapper',
'CC_MajorAxis': '_StringParamAttrWrapper',
'CC_MinorAxis': '_StringParamAttrWrapper',
'CC_TiltAngleMajorFromRadial': '_StringParamAttrWrapper',
'CC_TiltAngleUnit': '_StringParamAttrWrapper',
'CD_Area': '_StringParamAttrWrapper',
'CD_AreaUncert': '_StringParamAttrWrapper',
'CD_AreaUnit': '_StringParamAttrWrapper',
'CD_Mass': '_StringParamAttrWrapper',
'CD_MassUncert': '_StringParamAttrWrapper',
'CD_MassUnit': '_StringParamAttrWrapper',
'CD_Volume': '_StringParamAttrWrapper',
'CD_VolumeUncert': '_StringParamAttrWrapper',
'CD_VolumeUnit': '_StringParamAttrWrapper',
'CME_Accel': '_StringParamAttrWrapper',
'CME_AccelUncert': '_StringParamAttrWrapper',
'CME_AccelUnit': '_StringParamAttrWrapper',
'CME_AngularWidth': '_StringParamAttrWrapper',
'CME_AngularWidthUnit': '_StringParamAttrWrapper',
'CME_Mass': '_StringParamAttrWrapper',
'CME_MassUncert': '_StringParamAttrWrapper',
'CME_MassUnit': '_StringParamAttrWrapper',
'CME_RadialLinVel': '_StringParamAttrWrapper',
'CME_RadialLinVelMax': '_StringParamAttrWrapper',
'CME_RadialLinVelMin': '_StringParamAttrWrapper',
'CME_RadialLinVelStddev': '_StringParamAttrWrapper',
'CME_RadialLinVelUncert': '_StringParamAttrWrapper',
'CME_RadialLinVelUnit': '_StringParamAttrWrapper',
'EF_AspectRatio': '_StringParamAttrWrapper',
'EF_AxisLength': '_StringParamAttrWrapper',
'EF_AxisOrientation': '_StringParamAttrWrapper',
'EF_AxisOrientationUnit': '_StringParamAttrWrapper',
'EF_FluxUnit': '_StringParamAttrWrapper',
'EF_LengthUnit': '_StringParamAttrWrapper',
'EF_NegEquivRadius': '_StringParamAttrWrapper',
'EF_NegPeakFluxOnsetRate': '_StringParamAttrWrapper',
'EF_OnsetRateUnit': '_StringParamAttrWrapper',
'EF_PosEquivRadius': '_StringParamAttrWrapper',
'EF_PosPeakFluxOnsetRate': '_StringParamAttrWrapper',
'EF_ProximityRatio': '_StringParamAttrWrapper',
'EF_SumNegSignedFlux': '_StringParamAttrWrapper',
'EF_SumPosSignedFlux': '_StringParamAttrWrapper',
'Event_C1Error': '_StringParamAttrWrapper',
'Event_C2Error': '_StringParamAttrWrapper',
'Event_ClippedSpatial': '_StringParamAttrWrapper',
'Event_ClippedTemporal': '_StringParamAttrWrapper',
'Event_Coord1': '_StringParamAttrWrapper',
'Event_Coord2': '_StringParamAttrWrapper',
'Event_Coord3': '_StringParamAttrWrapper',
'Event_CoordSys': '_StringParamAttrWrapper',
'Event_CoordUnit': '_StringParamAttrWrapper',
'Event_MapURL': '_StringParamAttrWrapper',
'Event_MaskURL': '_StringParamAttrWrapper',
'Event_Npixels': '_StringParamAttrWrapper',
'Event_PixelUnit': '_StringParamAttrWrapper',
'Event_Probability': '_StringParamAttrWrapper',
'Event_TestFlag': '_StringParamAttrWrapper',
'Event_Type': '_StringParamAttrWrapper',
'FI_BarbsL': '_StringParamAttrWrapper',
'FI_BarbsR': '_StringParamAttrWrapper',
'FI_BarbsTot': '_StringParamAttrWrapper',
'FI_Chirality': '_StringParamAttrWrapper',
'FI_Length': '_StringParamAttrWrapper',
'FI_LengthUnit': '_StringParamAttrWrapper',
'FI_Tilt': '_StringParamAttrWrapper',
'FL_EFoldTime': '_StringParamAttrWrapper',
'FL_EFoldTimeUnit': '_StringParamAttrWrapper',
'FL_Fluence': '_StringParamAttrWrapper',
'FL_FluenceUnit': '_StringParamAttrWrapper',
'FL_GOESCls': '_StringParamAttrWrapper',
'FL_PeakEM': '_StringParamAttrWrapper',
'FL_PeakEMUnit': '_StringParamAttrWrapper',
'FL_PeakFlux': '_StringParamAttrWrapper',
'FL_PeakFluxUnit': '_StringParamAttrWrapper',
'FL_PeakTemp': '_StringParamAttrWrapper',
'FL_PeakTempUnit': '_StringParamAttrWrapper',
'FRM_Contact': '_StringParamAttrWrapper',
'FRM_HumanFlag': '_StringParamAttrWrapper',
'FRM_Identifier': '_StringParamAttrWrapper',
'FRM_Institute': '_StringParamAttrWrapper',
'FRM_Name': '_StringParamAttrWrapper',
'FRM_ParamSet': '_StringParamAttrWrapper',
'FRM_SpecificID': '_StringParamAttrWrapper',
'FRM_URL': '_StringParamAttrWrapper',
'FRM_VersionNumber': '_StringParamAttrWrapper',
'FreqMaxRange': '_StringParamAttrWrapper',
'FreqMinRange': '_StringParamAttrWrapper',
'FreqPeakPower': '_StringParamAttrWrapper',
'FreqUnit': '_StringParamAttrWrapper',
'IntensMaxAmpl': '_StringParamAttrWrapper',
'IntensMinAmpl': '_StringParamAttrWrapper',
'IntensUnit': '_StringParamAttrWrapper',
'KB_Archivist': '_StringParamAttrWrapper',
'MaxMagFieldStrength': '_StringParamAttrWrapper',
'MaxMagFieldStrengthUnit': '_StringParamAttrWrapper',
'OBS_ChannelID': '_StringParamAttrWrapper',
'OBS_DataPrepURL': '_StringParamAttrWrapper',
'OBS_FirstProcessingDate': '_StringParamAttrWrapper',
'OBS_IncludesNRT': '_StringParamAttrWrapper',
'OBS_Instrument': '_StringParamAttrWrapper',
'OBS_LastProcessingDate': '_StringParamAttrWrapper',
'OBS_LevelNum': '_StringParamAttrWrapper',
'OBS_MeanWavel': '_StringParamAttrWrapper',
'OBS_Observatory': '_StringParamAttrWrapper',
'OBS_Title': '_StringParamAttrWrapper',
'OBS_WavelUnit': '_StringParamAttrWrapper',
'OscillNPeriods': '_StringParamAttrWrapper',
'OscillNPeriodsUncert': '_StringParamAttrWrapper',
'Outflow_Length': '_StringParamAttrWrapper',
'Outflow_LengthUnit': '_StringParamAttrWrapper',
'Outflow_OpeningAngle': '_StringParamAttrWrapper',
'Outflow_Speed': '_StringParamAttrWrapper',
'Outflow_SpeedUnit': '_StringParamAttrWrapper',
'Outflow_TransSpeed': '_StringParamAttrWrapper',
'Outflow_Width': '_StringParamAttrWrapper',
'Outflow_WidthUnit': '_StringParamAttrWrapper',
'PeakPower': '_StringParamAttrWrapper',
'PeakPowerUnit': '_StringParamAttrWrapper',
'RasterScanType': '_StringParamAttrWrapper',
'SG_AspectRatio': '_StringParamAttrWrapper',
'SG_Chirality': '_StringParamAttrWrapper',
'SG_MeanContrast': '_StringParamAttrWrapper',
'SG_Orientation': '_StringParamAttrWrapper',
'SG_PeakContrast': '_StringParamAttrWrapper',
'SG_Shape': '_StringParamAttrWrapper',
'SS_SpinRate': '_StringParamAttrWrapper',
'SS_SpinRateUnit': '_StringParamAttrWrapper',
'Skel_Curvature': '_StringParamAttrWrapper',
'Skel_Nsteps': '_StringParamAttrWrapper',
'Skel_StartC1': '_StringParamAttrWrapper',
'Skel_StartC2': '_StringParamAttrWrapper',
'TO_Shape': '_StringParamAttrWrapper',
'VelocMaxAmpl': '_StringParamAttrWrapper',
'VelocMaxPower': '_StringParamAttrWrapper',
'VelocMaxPowerUncert': '_StringParamAttrWrapper',
'VelocMinAmpl': '_StringParamAttrWrapper',
'VelocUnit': '_StringParamAttrWrapper',
'WaveDisplMaxAmpl': '_StringParamAttrWrapper',
'WaveDisplMinAmpl': '_StringParamAttrWrapper',
'WaveDisplUnit': '_StringParamAttrWrapper',
'WavelMaxPower': '_StringParamAttrWrapper',
'WavelMaxPowerUncert': '_StringParamAttrWrapper',
'WavelMaxRange': '_StringParamAttrWrapper',
'WavelMinRange': '_StringParamAttrWrapper',
'WavelUnit': '_StringParamAttrWrapper'
}
def mk_gen(rest):
""" Generate Misc class. """
ret = ''
ret += '@apply\nclass Misc(object):\n'
for elem in sorted(rest):
ret += ' %s = %s(%r)\n' % (elem, fields[elem], elem)
return ret
def mk_cls(key, used, pad=1, nokeys=True, init=True, name=None, base='EventType'):
if name is None:
name = key
keys = sorted(
[(k, v) for k, v in fields.iteritems() if k.startswith(key)]
)
used.update(set([k for k, v in keys]))
if not keys:
if not nokeys:
raise ValueError
return '%s = EventType(%r)' % (key, name.lower())
ret = ''
ret += '@apply\nclass %s(%s):\n' % (name, base)
for k, v in keys:
ret += ' %s = %s(%r)\n' % (k[len(key) + pad:], v, k)
if init:
ret += ''' def __init__(self):
EventType.__init__(self, %r)''' % name.lower()
return ret
if __name__ == '__main__':
BUFFER = 4096
used = set()
tmpl = (
os.path.join(os.path.dirname(__file__), 'hektemplate.py')
if len(sys.argv) <= 2 else sys.argv[2]
)
dest = (
os.path.join(
os.path.dirname(__file__), os.pardir, 'sunpy', 'net', 'hek',
'attrs.py')
if len(sys.argv) <= 1 else sys.argv[1]
)
if dest == '-':
fd = sys.stdout
else:
fd = open(dest, 'w')
tmplfd = open(tmpl)
while True:
buf = tmplfd.read(BUFFER)
if not buf:
break
fd.write(buf)
fd.write('\n\n')
fd.write('\n\n'.join(mk_cls(evt, used, name=NAMES[evt]) for evt in EVENTS))
fd.write('\n\n')
fd.write('\n\n'.join(mk_cls(evt, used, 0, 0, 0, NAMES[evt], 'object') for evt in OTHER_NOPAD))
fd.write('\n\n')
fd.write('\n\n'.join(mk_cls(evt, used, 1, 0, 0, NAMES[evt], 'object') for evt in OTHER))
fd.write('\n\n')
fd.write(mk_gen(set(fields) - used))
```
#### File: sunpy/tools/update_zenodo.py
```python
import json
import subprocess
def remove_initials(name):
# Remove initials for a string name
# Assumes names/initials are all separated by a single space
new_name = []
for n in name.split(' '):
if len(n) == 2 and n[1] == '.':
continue
new_name.append(n)
return ' '.join(new_name)
authors = subprocess.check_output(['git shortlog -s -n'], shell=True)
authors = authors.decode('utf-8')
# 1 author per line
authors = authors.split('\n')[:-1]
# Use tab between number of commits and name to get name
authors = [author.split('\t')[1] for author in authors]
# Remove initials
authors = [remove_initials(auth) for auth in authors]
# List of authors to ignore because they are bots
manual_ignore = ['codetriage-readme-bot']
authors = [auth for auth in authors if auth not in manual_ignore]
# Get list of current authors in zenodo.yml
with open('.zenodo.json') as zenodo_file:
data = json.load(zenodo_file)
creators = data['creators']
already_auth = [auth['name'] for auth in creators]
# Remove any initials
already_auth = [remove_initials(auth) for auth in already_auth]
new_creators = []
# Loop through all the current authors
for author in authors:
# If already in .zenodo.json, take the entry to preserve ORCID and affiliation
if author in already_auth:
new_creators.append(creators[already_auth.index(author)])
else:
new_creators.append({'name': author})
# Add in anyone currently in .zenodo.json, but who hasn't committed to the repository
for author in list(set(already_auth) - set(authors)):
new_creators.append(creators[already_auth.index(author)])
data['creators'] = new_creators
with open('.zenodo.json', 'w') as zenodo_file:
json.dump(data, zenodo_file, indent=' ', ensure_ascii=False)
``` |
{
"source": "johan456789/netflix-to-srt",
"score": 3
} |
#### File: johan456789/netflix-to-srt/stack_srt.py
```python
from __future__ import annotations
import sys
import pysrt
from pysrt import SubRipFile
from tqdm import tqdm
def stack_subs(subs: SubRipFile) -> SubRipFile:
remove_list = [] # list of unwanted indexes
sub_index = subs[0].index # existing starting index
# stack subs with the same start and end time
prev_start = subs[-1].start # get a valid time for comparison
prev_end = subs[-1].end
append_index = -1
for index, sub in enumerate(subs):
cur_start = sub.start
cur_end = sub.end
if cur_start == prev_start and cur_end == prev_end:
subs[append_index].text += '\n' + sub.text
remove_list.append(index)
else:
append_index = index
prev_start = cur_start
prev_end = cur_end
# remove orphaned subs in reverse order
for index in remove_list[::-1]:
del subs[index]
# reindex remaining subs
for index in range(len(subs)):
subs[index].index = index + sub_index
return subs
if __name__ == "__main__":
if len(sys.argv) <= 1:
print('Usage: python stack_srt.py file1.txt [[file2.txt]...]')
exit(1)
for f in tqdm(sys.argv[1:]):
print(f)
subs = pysrt.open(f)
subs = stack_subs(subs)
subs.save(f, encoding='utf-8')
``` |
{
"source": "Johan809/P3-ML",
"score": 3
} |
#### File: P3-ML/scripts/ImageIdentify.py
```python
import face_recognition as faces
from PIL import Image, ImageDraw
import os
#create array of encodings and names
know_encode = []
know_names = []
#the grasita function
def readImages():
obj = os.scandir('./img/known')
for entry in obj:
n = entry.name
know_names.append(n[:-4])
img = faces.load_image_file('./img/known/'+n)
i_encode = faces.face_encodings(img)[0]
know_encode.append(i_encode)
print(n[:-4] + ' proccess')
readImages()
#load test image
test_image1 = faces.load_image_file('./img/groups/vladimir-putin-bashar-al-asad-y-barack-obama-1.jpg')
# find faces in test images
face_location1 = faces.face_locations(test_image1)
face_encode1 = faces.face_encodings(test_image1, face_location1)
# convert to PIL format
pil_image1 = Image.fromarray(test_image1)
#create a draw instance
draw1 = ImageDraw.Draw(pil_image1)
#loop the faces in images
for(top, right, bottom, left), face_encode1 in zip(face_location1, face_encode1):
matches = faces.compare_faces(know_encode, face_encode1)
name = "<NAME>"
if True in matches:
first_match_index = matches.index(True)
name = know_names[first_match_index]
#draw box
draw1.rectangle(((left, top), (right, bottom)), outline=(0,0,0))
#draw label
text_width, text_height = draw1.textsize(name)
draw1.rectangle(((left, bottom - text_height - 10), (right, bottom)), fill=(0,0,0), outline=(0,0,0))
draw1.text((left+6, bottom - text_height - 5), name, fill=(255,255,255,255))
#display image
pil_image1.show()
del draw1
``` |
{
"source": "Johan809/web_final",
"score": 3
} |
#### File: web_final/API/data.py
```python
from datetime import datetime
from peewee import *
import uuid
db = SqliteDatabase('ITLAMED.db')
ZodiacalSigns = [
'aries', 'libra', 'tauro',
'escorpio', 'geminis', 'sagitario',
'cancer', 'capricornio', 'leo',
'acuario', 'virgo', 'piscis'
]
class DBModel(Model):
class Meta:
database = db
class Doctor(DBModel):
_id = AutoField()
name = CharField()
email = CharField()
password = CharField()
class Patient(DBModel):
_id = AutoField()
dr = ForeignKeyField(Doctor, backref='h_doctor')
id_card = CharField() # Cedula
photo = BlobField() # convertir la imagen en base64 y guardarla en la db
name = CharField()
lastname = CharField()
blood_type = CharField()
email = CharField()
gender = CharField()
b_date = DateField()
allergies = CharField()
class Consultation(DBModel):
_id = AutoField()
dr = ForeignKeyField(Doctor, backref='cons')
patient = ForeignKeyField(Patient, backref='patients')
date = DateField()
motive = CharField()
n_insurance = CharField()
p_amount = FloatField()
diagnosis = TextField()
note = TextField()
photo = BlobField() # lo mismo que la foto del paciente
class Sesion(DBModel):
_id = AutoField()
token = CharField()
user = ForeignKeyField(Doctor, backref='user')
def getOld(nacDate: str):
today = datetime.now()
birthday = datetime.strptime(nacDate, "%Y-%m-%d")
return today.year - birthday.year - ((today.month, today.day) < (birthday.month, birthday.day))
def getSign(nacDate: datetime.date):
month = nacDate.month
day = nacDate.day
if ((day >= 21 and month == 3) or (day <= 20 and month == 4)):
sign = 0
elif ((day >= 24 and month == 9) or (day <= 23 and month == 10)):
sign = 1
elif ((day >= 21 and month == 4) or (day <= 21 and month == 5)):
sign = 2
elif ((day >= 24 and month == 10) or (day <= 22 and month == 11)):
sign = 3
elif ((day >= 22 and month == 5) or (day <= 21 and month == 6)):
sign = 4
elif ((day >= 23 and month == 11) or (day <= 21 and month == 12)):
sign = 5
elif ((day >= 21 and month == 6) or (day <= 23 and month == 7)):
sign = 6
elif ((day >= 22 and month == 12) or (day <= 20 and month == 1)):
sign = 7
elif ((day >= 24 and month == 7) or (day <= 23 and month == 8)):
sign = 8
elif ((day >= 21 and month == 1) or (day <= 19 and month == 2)):
sign = 9
elif ((day >= 24 and month == 8) or (day <= 23 and month == 9)):
sign = 10
elif ((day >= 20 and month == 2) or (day <= 20 and month == 3)):
sign = 11
return ZodiacalSigns[sign].capitalize()
def getDate(_date: str):
if "-" in _date:
withDash = int(_date.split('-')[0])
if withDash > 1000:
resultDate = datetime.strptime(_date, "%Y-%m-%d")
return resultDate
elif withDash <= 31:
resultDate = datetime.strptime(_date, "%d-%m-%Y")
return resultDate
elif "/" in _date:
withSlash = int(_date.split('/')[0])
if withSlash > 1000:
resultDate = datetime.strptime(_date, "%Y/%m/%d")
return resultDate
elif withSlash <= 31:
resultDate = datetime.strptime(_date, "%d/%m/%Y")
return resultDate
def generate_token():
return str(uuid.uuid4()).replace('-', '')
def serverAnswer(status: bool, msg: str, args={}):
_arg = False
if args != {}:
_arg = True
a = {'ok': status, 'msg': msg, 'arg': args}
b = {'ok': status, 'msg': msg}
return a if _arg else b
``` |
{
"source": "johan92/fpga-risc-16",
"score": 3
} |
#### File: fpga-risc-16/tb/asm.py
```python
from logic import *
import re
ASM_FILE = "t.asm"
OUT_FILE = "t.bin"
opcodes_d = {
"NOP" : {
"opbin" : logic( int( "0000", 2 ) ),
"type" : "NOP"
},
"ADD" : {
"opbin" : logic( int( "0001", 2 ) ),
"type" : "RRR"
},
"SUB" : {
"opbin" : logic( int( "0010", 2 ) ),
"type" : "RRR"
},
"AND" : {
"opbin" : logic( int( "0011", 2 ) ),
"type" : "RRR"
},
"OR" : {
"opbin" : logic( int( "0100", 2 ) ),
"type" : "RRR"
},
"XOR" : {
"opbin" : logic( int( "0101", 2 ) ),
"type" : "RRR"
},
"ADDI" : {
"opbin" : logic( int( "1001", 2 ) ),
"type" : "RRI"
},
"LW" : {
"opbin" : logic( int( "1010", 2 ) ),
"type" : "RRI"
},
"SW" : {
"opbin" : logic( int( "1011", 2 ) ),
"type" : "RRI"
},
}
def print_opcode( opcode_str, opcode_d ):
print "%5s: %4x: %3s" % ( opcode_str, opcode_d["opbin"], opcode_d["type"] )
def print_opcodes( ):
for i in opcodes_d:
print_opcode( i, opcodes_d[i] )
def find_opcode( possible_opcode ):
for i in opcodes_d:
if possible_opcode == i:
return opcodes_d[i]
return None
def regstr_to_int( _str ):
p = re.compile("R[0-7]$")
if p.match(_str):
return int( _str[1] )
return None
def rrr_parser( sp_line, opcode_d ):
if len( sp_line ) > 3:
instr = logic(0)
r = [0, 0, 0]
for i in xrange( 1, 4 ):
r_ = regstr_to_int( sp_line[i] )
if r_:
r[ i - 1 ] = r_
else:
return "Wrong reg name"
instr[15:12] = int( opcode_d["opbin"] )
instr[11:9] = r[0]
instr[8:6] = r[1]
instr[2:0] = r[2]
return instr
else:
return "Too short"
def rrr_parser( sp_line, opcode_d ):
if len( sp_line ) > 3:
instr = logic(0)
r = [0, 0, 0]
for i in xrange( 1, 4 ):
r_ = regstr_to_int( sp_line[i] )
if r_:
r[ i - 1 ] = r_
else:
return "Wrong reg name"
instr[15:12] = int( opcode_d["opbin"] )
instr[11:9] = r[0]
instr[8:6] = r[1]
instr[2:0] = r[2]
return instr
else:
return "Too short"
def rri_parser( sp_line, opcode_d ):
if len( sp_line ) > 3:
instr = logic(0)
r = [0, 0]
for i in xrange( 1, 3 ):
r_ = regstr_to_int( sp_line[i] )
if r_:
r[ i - 1 ] = r_
else:
return "Wrong reg name"
#TODO: check imm values
imm = int( sp_line[3] )
instr[15:12] = int( opcode_d["opbin"] )
instr[11:9] = r[0]
instr[8:6] = r[1]
instr[5:0] = imm
return instr
else:
return "Too short"
def instr_l_to_file( fname, instr_l ):
f = open( fname, "w" )
for i in instr_l:
wstr = "{:016b}".format( int( i ) )
print wstr
f.write("%s\n" % wstr )
def asm_parser( fname ):
f = open( fname )
lines_raw = f.readlines();
f.close();
line_num = 0
instr_l = []
for s in lines_raw:
line_num += 1
tmp = s.strip().split()
#print tmp
opcode = find_opcode( tmp[0] )
if opcode:
print tmp
instr = None
if opcode["type"] == "RRR":
instr = rrr_parser( tmp, opcode )
elif opcode["type"] == "RRI":
instr = rri_parser( tmp, opcode )
if ( type( instr ) == str ):
parse_msg( instr, s, line_num )
else:
#print "%s" % bin( int( instr ) )
instr_l.append( instr )
else:
parse_msg("Unknown opcode", s, line_num )
return instr_l
def parse_msg( msg, line, line_num ):
# -1 for deleting \n at the end
print "Error at line %2d: \"%s\": %s" %( line_num, line[:-1], msg )
if __name__ == "__main__":
print_opcodes( )
instr_l = asm_parser( ASM_FILE )
instr_l_to_file( OUT_FILE, instr_l )
``` |
{
"source": "johanahlqvist/poe-neural-pricer",
"score": 2
} |
#### File: poe-neural-pricer/data_retriever/dataretriever.py
```python
import time
from os.path import dirname, realpath
import multiprocessing as mp
import numpy as np
import configparser
import json
from .filter import Filter
from .retriever import Retriever
from .currency_converter import CurrencyConverter
from .encoder import Encoder
from .get_next_id import get_next_id
DEFAULT_LABEL_FILE = '\labels\classes'
DEFAULT_DATA_DIR = '\saved_data\\'
DEFAULT_ENCODED_DATA_DIR = '\encoded_data'
PULLS_PER_SAVE = 10
SIMULTANEOUS_REQUESTERS = 6
TIME_BETWEEN_REQUESTS = 1
class DataRetriever:
def __init__(self):
self.location = dirname(realpath(__file__))
self.config = configparser.ConfigParser()
self.config.read('config.ini')
self.currencyconverter = CurrencyConverter(league='Harbinger')
self.filter = Filter(self.currencyconverter)
self.retriever = Retriever()
self.encoder = Encoder(self._get_classes_path())
def collect(self, pulls, start_id):
pool = mp.Pool(SIMULTANEOUS_REQUESTERS)
next_id = start_id
filtered_item_count, ips = 0, 0
start_time = time.time()
filtered_data = []
print('Initiating retrieving..')
for i in range(pulls):
if next_id is None:
print('No more data to fetch, quitting.')
next_ids = self._request_ids(next_id)
next_id = next_ids[-1]
data = self._request_data(next_ids, pool)
if data == None:
print('We reached the end of the stash updates. Exiting.')
sys.exit(0)
X_Y = self.filter.filter_items(data)
filtered_data.extend(X_Y)
self.encoder.fit([item_value_tuple[0] for item_value_tuple in X_Y])
filtered_item_count += len(X_Y)
ips = filtered_item_count/(time.time()-start_time)
if i != 0 and i % PULLS_PER_SAVE == 0:
np.save('%s\\%s\\%s.npy' % (self.location, DEFAULT_DATA_DIR, next_id), np.array(filtered_data))
filtered_data = []
print('Retriever saved data. Requested %s/%s pages per worker and collected %s eligible items at %.1f items per second'
% (i, pulls, filtered_item_count, ips))
print('Last retrieved next_change_id was: %s' % next_id)
print('Retriever finished. Requested %s pages per worker and collected %s eligible items at %.1f items per second'
% (i, filtered_item_count, ips))
def encode(self, files):
for i in range(len(files)):
print('Encoding file %s' % (files[i]))
filtered_data = np.load('%s%s%s' % (self.location, DEFAULT_DATA_DIR, files[i]))
encoded_data = self.encoder.encode(filtered_data)
np.save(DEFAULT_ENCODED_DATA_DIR + files[i], encoded_data)
def _request_ids(self, start_id):
id_list = [start_id]
for i in range(SIMULTANEOUS_REQUESTERS):
id_list.append(get_next_id(id_list[i]))
return id_list[1:]
def _request_data(self, next_ids, pool):
base_request_time = time.time()
worker_args = []
for i in range(SIMULTANEOUS_REQUESTERS):
worker_args.append((next_ids[i], base_request_time + i*TIME_BETWEEN_REQUESTS))
data = pool.map(RequestWorker.request_data, worker_args)
if None in data:
return None
merged_data = [item for sublist in data for item in sublist]
return merged_data
def _get_config_value(self, value):
if ',' in value:
return value.split(',')
return value
def _get_classes_path(self):
value = self.config['encoder']['ClassesFile']
if value is '':
print('Notice: label class file not set, using default.')
return self.location + DEFAULT_LABEL_FILE + '.npy'
else:
return self.location + value + '.npy'
class RequestWorker:
def request_data(args):
next_id = args[0]
request_time = args[1]
ret = Retriever()
return ret.retrieve(next_id, request_time)
``` |
{
"source": "johanaluna/DataScience_summary",
"score": 4
} |
#### File: DataScience_summary/AlgoExpert/FindX_NumSum.py
```python
def find2NumsSum(array, target):
i = 0
dict_sums = {}
sln = []
while i < len(array):
if array[i] not in dict_sums.values():
dict_sums[array[i]] = target - array[i]
else:
sln.append([ array[i], target - array[i] ])
i += 1
return(sln)
# Find 3 numbers that sum target
def find3Numbers(array, target):
arr_size = len(array)
for i in range(0, arr_size-1):
# Find pair in subarray A[i + 1..n-1]
# with sum equal to sum - A[i]
s = set()
curr_sum = target - array[i]
for j in range(i + 1, arr_size):
if (curr_sum - array[j]) in s:
print("Triplet is", array[i],
", ", array[j], ", ", curr_sum-array[j])
return True
s.add(array[j])
print(s)
return False
# Find 4 numbers that sum target
def find4NumsSum(array, target):
pass
array2 = [1,2,3,4,5,6,7,8]
# print(find2NumsSum(array2, 9))
print(find3Numbers(array2, 13))
```
#### File: DataScience_summary/AlgoExpert/longestPeak.py
```python
def longestPeak(array):
# Write your code here.
i = 1
maxi=0
peaks=[]
while i < len(array)-1:
if array[i] > array[i-1] and array[i] > array[i+1]:
peaks.append(i)
i += 1
maxi = 0
for i in peaks:
left = i-1
right = i+1
while left > 0 and array[left] > array[left-1]:
left -=1
while right < len(array)-1 and array[right] > array[right+1]:
right +=1
if (right -left)+1 > maxi :
maxi = (right -left)+1
return maxi
array= [0,1,2,3,0,14,1]
print(longestPeak(array))
```
#### File: DataScience_summary/CodeSignal/areEquallyStrong.py
```python
def areEquallyStrong(yourLeft, yourRight, friendsLeft, friendsRight):
# conditions
c1 = (yourLeft == friendsLeft) and (yourRight == friendsRight)
c2 = (yourLeft == friendsRight) and (yourRight ==friendsLeft)
return(True if c1 or c2 else False)
yourLeft = 10
yourRight = 15
friendsLeft = 15
friendsRight = 10
print(areEquallyStrong(yourLeft, yourRight, friendsLeft, friendsRight))
```
#### File: DataScience_summary/CodeSignal/arrayChange.py
```python
def arrayChange(inputArray):
# initialize pointer
i = 0
# intialize moves
moves = 0
# go throught all the array until one position before end
while i < len(inputArray)-1:
# if the next position is smaller or equal to the current position
if inputArray[i] >= inputArray[i+1]:
# save the actual value of the next position
pivot = inputArray[i+1]
# make the next position equal to bigger by one of the actual position
inputArray[i+1] = inputArray[i]+1
# calculate all moves
moves += inputArray[i+1] - pivot
i += 1
return moves
inputArray = [1, 1, 1]
print(arrayChange(inputArray))
```
#### File: DataScience_summary/CodeSignal/commonCharacterCount.py
```python
from collections import Counter
def commonCharacterCount(s1, s2):
dictS1 = Counter(s1)
counting = 0
for string in s2:
if string in dictS1:
if dictS1[string] > 0:
counting += 1
dictS1[string] -= 1
return counting
s1 = "aabcc"
s2 = "adcaa"
print(commonCharacterCount(s1, s2))
```
#### File: DataScience_summary/CodeSignal/sortByHeight.py
```python
def sortByHeight(a):
people = []
for i in a:
if i != -1:
people.append(i)
people.sort()
i = j = 0
while i < len(a):
if a[i] != -1:
a[i] = people[j]
j += 1
i += 1
return a
a= [-1, 150, 190, 170, -1, -1, 160, 180]
print(sortByHeight(a))
```
#### File: DataScience_summary/CodeSignal/stringRearragement.py
```python
from itertools import permutations
def stringsRearrangement(inputArray):
permu = permutations(inputArray)
for p in permu:
changes = [changer(p[i],p[i+1]) for i in range(len(p)-1)]
if all(changes):
return True
return False
def changer(a,b):
changes = 0
for i in range(len(a)):
if a[i] != b[i]:
changes += 1
if changes ==1:
return True
return False
if __name__ == "__main__":
a= ["aba", "bbb", "bab"]
print(stringsRearrangement(a))
```
#### File: DataScience_summary/LeetCode/checkIP.py
```python
class Solution:
def validIPAddress(self, IP: str) -> str:
iplist = IP.split(".")
if len(iplist) == 4:
for ipX in iplist:
if len(ipX) == 0 or (len(ipX) > 1 and ipX[0] == "0"):
return "Neither"
if ipX.isnumeric()==False or int(ipX) > 255:
return "Neither"
return "IPv4"
iplist = IP.split(":")
if len(iplist) == 8:
symbols = "0123456789abcdefABCDEF"
for ipX in iplist:
if len(ipX) == 0 or len(ipX) > 4:
return "Neither"
for elem in ipX:
if elem not in symbols:
return "Neither"
return "IPv6"
return "Neither"
sln = Solution()
print(sln.validIPAddress("256.256.256.256"))
print(sln.validIPAddress("255.0.1.2"))
print(sln.validIPAddress("2001:0db8:85a3:0:0:8A2E:0370:7334"))
```
#### File: DataScience_summary/LeetCode/remove_duplicates.py
```python
def removeDuplicates(nums):
if len(nums)<2:
return len(nums)
# print(nums)
i = 1
while i < len(nums):
# print("len nums ",len(nums))
# print("i",i,"\n")
if nums[i] != nums[i-1]:
i += 1
# print("i+1",i,"\n")
else:
nums.pop(i)
# print("pop",nums,"\n")
return len(nums)
if __name__ == '__main__':
nums = [0,0,1,1,1,2,2,3,3,4]
print(removeDuplicates(nums))
```
#### File: DataScience_summary/LeetCode/twoCitySchedCost.py
```python
def twoCitySchedCost(costs):
costDifferences = []
total = 0
for cityA, cityB in costs:
total += cityA # sum A
costDifferences.append(cityA - cityB)
costDifferences.sort()
biggest_diff= costDifferences[len(costs)//2:]
total = total - sum(biggest_diff)
return total
costs = [[259,770],[448,54],[926,667],[184,139],[840,118],[577,469]]
# costs = [[10,20],[30,200],[400,50],[30,20]]
print(twoCitySchedCost(costs))
#1859
```
#### File: DataScience_summary/LeetCode/uniquePath.py
```python
def uniquePaths(m, n):
path = [1 for i in range(m)]
for i in range(n - 1):
for j in range(1, m):
path[j] += path[j - 1]
return path[m - 1]
if __name__ == "__main__":
m=7
n=3
print(uniquePaths(m, n))
```
#### File: DataScience_summary/Others/decimalToBinary.py
```python
def DecBinary(n):
return((bin(n).replace("0b",""))[::-1])
def DecBinary2(n):
return (format(n,"b")[::-1])
if __name__ == "__main__":
print(DecBinary(8))
print(DecBinary2(8))
```
#### File: DataScience_summary/Others/dll.py
```python
class Node:
def __init__(self, value):
self.value = value
self.prev = None
self.next = None
# Feel free to add new properties and methods to the class.
class DoublyLinkedList:
def __init__(self):
self.head = None
self.tail = None
def setHead(self, node):
if self.head == None:
self.head = node
self.tail = node
else:
current = self.head
node.next = current
current.prev = node
self.head = node
def setTail(self, node):
if self.tail == None:
self.head = node
self.tail = node
else:
current = self.head
while current.next:
current = current.next
current.next = node
node.prev = current
self.tail = node
def insertBefore(self, node, nodeToInsert):
if self.head.value == node:
self.setHead(nodeToInsert)
else:
current = self.head
while current:
if current.value == node:
current.prev.next = nodeToInsert
nodeToInsert.next = current
current.prev = nodeToInsert
current = current.next
def insertAfter(self, node, nodeToInsert):
current = self.head
while current.next:
if current.value == node:
nodeToInsert.prev = current
nodeToInsert.next = current.next
current.next = nodeToInsert
current = current.next
self.setTail(nodeToInsert)
def insertAtPosition(self, position, nodeToInsert):
index = 0
if self.head is None:
self.setHead(nodeToInsert)
else:
current = self.head
while current:
if position == index :
self.insertBefore(current.value, nodeToInsert)
index += 1
current = current.next
def removeNodesWithValue(self, value):
# Write your code here.
pass
def remove(self, node):
# Write your code here.
pass
def containsNodeWithValue(self, value):
# Write your code here.
pass
def printlist(self):
currentNode = self.head
completeList = []
while currentNode:
completeList.append(currentNode.value)
currentNode = currentNode.next
return completeList
dll = DoublyLinkedList()
a = Node(7)
dll.setHead(Node(7))
dll.setHead(Node(8))
print(dll.printlist())
dll.setTail(Node(9))
dll.setTail(Node(3))
print(dll.printlist())
dll.insertAfter(3, Node(4))
print(dll.printlist())
print("insert before")
dll.insertBefore(8, Node(2))
print(dll.printlist())
dll.insertBefore(4, Node(1))
print(dll.printlist())
print("At position ")
dll.insertAtPosition(6, Node(6))
print(dll.printlist())
```
#### File: DataScience_summary/Others/findOdds.py
```python
import unittest
def merge_ranges( meetings ):
## Meeting(start, end)
#sort the array
meetings.sort()
# make a pointer in the first position
i = 0
# if we receive just one or an empty metting return the array
if len( meetings )< 2:
return meetings
# go throught the array until the last position minus one,
# because we are check current vs the next position
while i < len( meetings ) - 1:
# if the if the end hour of our current position is later than
# the starting hour of my next meeting
if meetings[ i ][ 1 ] >= meetings[ i+1 ][ 0 ] :
# save the start hour of my current position
start = meetings[i][0]
# save the later hour between the end hour of my current and next meeting as my end
end = max(meetings[ i ][ 1 ], meetings[ i + 1 ][ 1 ])
# save both start and end hour in the first position of my meeting array
meetings[ i ]= ( start, end )
# delete the next meeting because in the range of the actual position the we melted before
del(meetings[ i + 1 ])
else:
i += 1
return(meetings)
# # Tests
class Test(unittest.TestCase):
def test_meetings_overlap(self):
actual = merge_ranges([(1, 3), (2, 4)])
expected = [(1, 4)]
self.assertEqual(actual, expected)
def test_meetings_touch(self):
actual = merge_ranges([(5, 6), (6, 8)])
expected = [(5, 8)]
self.assertEqual(actual, expected)
def test_meeting_contains_other_meeting(self):
actual = merge_ranges([(1, 8), (2, 5)])
expected = [(1, 8)]
self.assertEqual(actual, expected)
def test_meetings_stay_separate(self):
actual = merge_ranges([(1, 3), (4, 8)])
expected = [(1, 3), (4, 8)]
self.assertEqual(actual, expected)
def test_multiple_merged_meetings(self):
actual = merge_ranges([(1, 4), (2, 5), (5, 8)])
expected = [(1, 8)]
self.assertEqual(actual, expected)
def test_meetings_not_sorted(self):
actual = merge_ranges([(5, 8), (1, 4), (6, 8)])
expected = [(1, 4), (5, 8)]
self.assertEqual(actual, expected)
def test_one_long_meeting_contains_smaller_meetings(self):
actual = merge_ranges([(1, 10), (2, 5), (6, 8), (9, 10), (10, 12)])
expected = [(1, 12)]
self.assertEqual(actual, expected)
def test_sample_input(self):
actual = merge_ranges([(0, 1), (3, 5), (4, 8), (10, 12), (9, 10)])
expected = [(0, 1), (3, 8), (9, 12)]
self.assertEqual(actual, expected)
unittest.main(verbosity=2)
```
#### File: DataScience_summary/Others/mergeSort.py
```python
def mergeSort(listNumbers):
if len(listNumbers) > 1:
# split the list
left = listNumbers[:len(listNumbers)//2]
right = listNumbers[len(left):]
# recursion
left = mergeSort(left)
right = mergeSort(right)
#create array to save the list sorted
listNumbers = []
# while both sides has numers:
while (len(left) and len(right)) > 0:
if left[0] < right[0]:
listNumbers.append(left[0])
left.pop(0)
else:
listNumbers.append(right[0])
right.pop(0)
for i in left:
listNumbers.append(i)
for i in right:
listNumbers.append(i)
return listNumbers
listNumbers = [5,4,1,8,7,2,6,3]
print(mergeSort(listNumbers))
``` |
{
"source": "johanaluna/lambdata",
"score": 3
} |
#### File: johanaluna/lambdata/addmission.py
```python
import random
class Student():
def __init__(self, name, age=0,precourse_scores=0,passing_score=3.5,applied='yes'):
self.name=name
self.age= random.randint(18,56)
self.precourse_scores= random.sample(range(1,6),5)
self.passing_score=passing_score
self.applied=applied
```
#### File: lambdata/lambdata_johanaluna/tryme2.py
```python
import pandas
import numpy
from sklearn.model_selection import train_test_split
class Check_Data():
def __init__(self, df, name_column_target):
self.df = df
self.name_column_target = name_column_target
# function to check the null in a data frame and report how many nulls it found
def reportnulls(self):
"""
Takes a data frame and check de nulls and sum
the resutls and organizes them from highest to lowest
"""
self.null_counts = self.df.isnull().sum().sort_values(ascending=False)
# return count of null values
return self.null_counts
"""
function to split the data into train, validation and test
this function split the data in 80% 20%
that means that the target corresponds to 20%
of the complete data frame
"""
def splitdata(self):
print('shape of your data frame: ', self.df.shape)
# Define X and y
self.X = self.df.drop(columns=self.name_column_target)
self.y = self.df[self.name_column_target]
# we need to do 2 splits
# 1.(Takes X and y into X_trainval, X_test, y_trainval, y_test)
self.X_trainval, self.X_test, self.y_trainval, self.y_test = train_test_split(
self.X, self.y, train_size=0.80, test_size=0.20, random_state=42)
# 2.(Takes X_trainval, y_trainval and split data
# into X_train, X_val, y_train, y_val)
self.X_train, self.X_val, self.y_train, self.y_val = train_test_split(
self.X_trainval, self.y_trainval, train_size=0.80,
test_size=0.20, random_state=42)
# Return the results of the split
return (self.X_train, self.y_train, self.X_val, self.y_val, self.X_test, self.y_test)
``` |
{
"source": "johanaluna/subreddit-finder",
"score": 3
} |
#### File: johanaluna/subreddit-finder/application.py
```python
from flask import Flask, render_template, request, url_for, redirect, jsonify
from flask_cors import CORS
from decouple import config
import joblib
from model import *
import pandas as pd
application = Flask(__name__,static_url_path="/static/")
CORS(application)
loadcv = joblib.load('models/tf.joblib')
loaddf = joblib.load('models/tfarray.joblib')
loaddf = loaddf.todense()
@application.route('/')
def index():
return render_template('index.html')
@application.route('/subreddit')
def search():
subreddit_input = request.args.get('title') + ' ' + request.args.get('content')
data = transform_get(subreddit_input, loadcv, loaddf)
# print(data)
res = get_subreddit_info(data)
# print(res)
return(render_template('result.html', res=res))
@application.route('/test')
def vals():
res = get_subreddit_info([1,6,8,5])
print(res)
return(jsonify({'data':res}))
# run the application.
if __name__ == "__main__":
"Entry point for the falsk app"
application.debug = True
application.run()
``` |
{
"source": "johananlai/integrations-extras",
"score": 2
} |
#### File: datadog_checks/twitchtv/twitchtv.py
```python
import requests
import simplejson as json
from six.moves.urllib.parse import urljoin
from datadog_checks.checks import AgentCheck
class TwitchtvCheck(AgentCheck):
CHECK_NAME = 'twitchtv'
def __init__(self, name, init_config, agentConfig, instances=None):
super(TwitchtvCheck, self).__init__(name, init_config, agentConfig, instances)
def check(self, instance):
# parse config fields
self._validate_instance(instance)
api_url = instance['api_url']
client_id = instance['client_id']
channels = instance.get("channels", [])
# get channel metrics from API
payload = {}
tags = {}
try:
payload = self._get_channel_data(instance, api_url, client_id, channels)
tags = self._get_game_tags(instance, api_url, client_id, payload)
except Exception, e:
self.log.error("Failed to get metrics with error: {}".format(e))
# send to DD
try:
self._report_channel_metrics(instance, payload, tags)
except Exception, e:
self.log.error("Failed to report channel metrics with error: {}".format(e))
# get follower metrics from API
users_payload = {}
follows = {}
try:
users_payload = self._get_user_data(instance, api_url, client_id, channels)
follows = self._get_all_follows(instance, api_url, client_id, users_payload)
except Exception, e:
self.log.error("Failed to get user follows with error: {}".format(e))
# send to DD
try:
self._report_follows_metrics(instance, follows)
except Exception, e:
self.log.error("Failed to report follows metrics with error: {}".format(e))
def _validate_instance(self, instance):
if any([x for x in ['api_url', 'client_id', 'channels'] if x not in instance]):
raise Exception("Missing 'api_url', 'client_id', or 'channels' in config")
def _report_channel_metrics(self, instance, payload, tags):
metric_name = 'twitchtv.live.viewers'
for ch in payload['data']:
self.gauge(metric_name, ch['viewer_count'],
tags=instance.get('tags', []) +
['channel:' + ch['user_name']] +
['language:' + ch['language']] +
['game:' + tags[ch['user_name']]])
def _report_follows_metrics(self, instance, follows):
metric_name = 'twitchtv.followers'
for ch, total in follows.items():
self.gauge(metric_name, total,
tags=instance.get('tags', []) +
['channel:' + ch])
def _get_channel_data(self, instance, api_url, client_id, channels):
path = "streams"
headers = {'Client-ID': client_id}
params = [('user_login', ch) for ch in channels]
r = requests.get(urljoin(api_url, path), headers=headers, params=params, timeout=60)
r.raise_for_status()
return json.loads(r.text)
def _get_game_data(self, instance, api_url, client_id, game_id):
path = "games"
headers = {'Client-ID': client_id}
params = {'id': game_id}
r = requests.get(urljoin(api_url, path), headers=headers, params=params, timeout=60)
r.raise_for_status()
return json.loads(r.text)
def _get_game_tags(self, instance, api_url, client_id, payload):
tags = {}
for ch in payload['data']:
try:
game_payload = self._get_game_data(instance, api_url, client_id, ch['game_id'])
tags[ch['user_name']] = game_payload['data'][0]['name']
except Exception, e:
self.log.error("Failed to get game name with error: {}".format(e))
return tags
def _get_user_data(self, instance, api_url, client_id, channels):
path = "users"
headers = {'Client-ID': client_id}
params = [('login', ch) for ch in channels]
r = requests.get(urljoin(api_url, path), headers=headers, params=params, timeout=60)
r.raise_for_status()
return json.loads(r.text)
def _get_follow_data(self, instance, api_url, client_id, user_id):
path = "users/follows"
headers = {'Client-ID': client_id}
params = {'to_id': user_id}
r = requests.get(urljoin(api_url, path), headers=headers, params=params, timeout=60)
r.raise_for_status()
return json.loads(r.text)
def _get_all_follows(self, instance, api_url, client_id, payload):
follows = {}
for ch in payload['data']:
try:
follow_payload = self._get_follow_data(instance, api_url, client_id, ch['id'])
follows[ch['login']] = follow_payload['total']
except Exception, e:
self.log.error("Failed to get user follows with error: {}".format(e))
return follows
``` |
{
"source": "JohanAR/face-alignment",
"score": 2
} |
#### File: detection/blazeface/detect.py
```python
import torch
import torch.nn.functional as F
import os
import sys
import cv2
import random
import datetime
import math
import argparse
import numpy as np
import scipy.io as sio
import zipfile
from .utils import *
# from .net_blazeface import s3fd
def detect(net, img, device):
H, W, C = img.shape
orig_size = min(H, W)
img, (xshift, yshift) = resize_and_crop_image(img, 128)
preds = net.predict_on_image(img)
if 0 == len(preds):
return np.zeros((1, 1, 5))
shift = np.array([xshift, yshift] * 2)
scores = preds[:, -1:]
# TODO: ugly
# reverses, x and y to adapt with face-alignment code
locs = np.concatenate((preds[:, 1:2], preds[:, 0:1], preds[:, 3:4], preds[:, 2:3]), axis=1)
return [np.concatenate((locs * orig_size + shift, scores), axis=1)]
def batch_detect(net, img_batch, device):
"""
Inputs:
- img_batch: a numpy array of shape (Batch size, Channels, Height, Width)
"""
B, C, H, W = img_batch.shape
orig_size = min(H, W)
# BB, HH, WW = img_batch.shape
# if img_batch
if isinstance(img_batch, torch.Tensor):
img_batch = img_batch.cpu().numpy()
img_batch = img_batch.transpose((0, 2, 3, 1))
imgs, (xshift, yshift) = resize_and_crop_batch(img_batch, 128)
preds = net.predict_on_batch(imgs)
bboxlists = []
for pred in preds:
shift = np.array([xshift, yshift] * 2)
scores = pred[:, -1:]
locs = np.concatenate((pred[:, 1:2], pred[:, 0:1], pred[:, 3:4], pred[:, 2:3]), axis=1)
bboxlists.append(np.concatenate((locs * orig_size + shift, scores), axis=1))
if 0 == len(bboxlists):
bboxlists = np.zeros((1, 1, 5))
return bboxlists
def flip_detect(net, img, device):
img = cv2.flip(img, 1)
b = detect(net, img, device)
bboxlist = np.zeros(b.shape)
bboxlist[:, 0] = img.shape[1] - b[:, 2]
bboxlist[:, 1] = b[:, 1]
bboxlist[:, 2] = img.shape[1] - b[:, 0]
bboxlist[:, 3] = b[:, 3]
bboxlist[:, 4] = b[:, 4]
return bboxlist
def pts_to_bb(pts):
min_x, min_y = np.min(pts, axis=0)
max_x, max_y = np.max(pts, axis=0)
return np.array([min_x, min_y, max_x, max_y])
``` |
{
"source": "johanasplund/befunge-98",
"score": 3
} |
#### File: befunge-98/lib/argparser.py
```python
import argparse
def parse_arguments():
global parser
parser = argparse.ArgumentParser(
description="A Befunge-98 interpreter written in pygame.",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-s", action="store", dest="SPEED", type=int,
help="specify the time between "
"each tick (default: 50 ms)",
default=50)
parser.add_argument("-o", action="store_true", dest="OUTPUT_MODE",
help="only show the output of the program in the shell")
parser.add_argument("befunge_file", action="store",
help="the full path to a befunge "
"file to be interpreted")
helpmsg = "show the environment variables used in the y instruction"
parser.add_argument("-y", "--sysinfo", action="version", help=helpmsg,
version="Version 0.9.0\n"
"t is NOT implemented\n"
"i is NOT implemented\n"
"o is NOT implemented\n"
"= is NOT implemented\n"
"Bytes per cell: 24\n"
"Scalars per vector: 2")
try:
return parser.parse_args()
except IOError as io:
parser.error(str(io))
```
#### File: befunge-98/lib/bf98.py
```python
import pygame
import field_and_pointer as fp
import instructions as i
import initialize as ini
def render_code(code):
'''
Renders each character in the code with a specific syntax highlighing
and blits the code to the background surface.
'''
for y, c in enumerate(code):
for x, char in enumerate(c):
if char in "0123456789abcdef":
charcolor = (152, 152, 152)
elif char in "+-*/%!`wk()":
charcolor = (255, 136, 136)
elif char in "><^v?_|#@jx[]r":
charcolor = (136, 255, 136)
elif char in ":\\$n":
charcolor = (255, 255, 136)
elif char in ".,&~":
charcolor = (136, 255, 255)
elif char in "\"\';":
charcolor = (255, 136, 255)
elif char in "{}upgsyt":
charcolor = (136, 136, 255)
else:
charcolor = (206, 206, 206)
codechar = ini.codefont.render(char, 1, charcolor)
ini.background.blit(codechar, (ini.CHAR_WIDTH * x,
ini.CHAR_HEIGHT * y))
def print_stack(stack, color, SOSS=False):
'''
Blits the TOSS and SOSS to the stack surface.
'''
for x, s in enumerate(reversed(stack)):
try:
printstack = ini.stackfont.render(
"{}. {} [{}] ({})".format(x + 1, s,
hex(s),
fp.chhr(s)), 1, color)
except Exception:
printstack = ini.stackfont.render(
"{}. {} ({})".format(x + 1, s, fp.chhr(s)), 1, color)
if SOSS:
ini.stacksurf.blit(printstack, (ini.SCREEN_WIDTH/4,
ini.STACK_CHAR_HEIGHT * x + 20))
else:
ini.stacksurf.blit(printstack, (0, ini.STACK_CHAR_HEIGHT * x + 20))
def initiate_new_run():
'''
Blits all visuals.
'''
# Background, stack and output surfaces
ini.screen.blit(ini.background, (0, 0))
ini.screen.blit(ini.stacksurf, (0, ini.SCREEN_HEIGHT -
ini.SCREEN_HEIGHT_MODIFIER))
ini.screen.blit(ini.outsurf, (int(float(ini.SCREEN_WIDTH) / 2.0),
ini.SCREEN_HEIGHT -
ini.SCREEN_HEIGHT_MODIFIER))
# Stack titles
toss_text = ini.stackfont.render("TOSS", 1, ini.STACK_OUTPUT_COLOR)
soss_text = ini.stackfont.render("SOSS", 1, ini.SOSS_OUTPUT_COLOR)
ini.stacksurf.blit(toss_text, (0, 0))
ini.stacksurf.blit(soss_text, (ini.SCREEN_WIDTH / 4, 0))
# Pointer rectangle
ini.screen.blit(ini.pointer_rect, (ini.pointer.xy[0] * ini.CHAR_WIDTH,
ini.pointer.xy[1] * ini.CHAR_HEIGHT))
pygame.display.flip()
def blit_statics():
'''
Blits all static visuals. This is to prevent the code
and stack blits to pile up on top of each other.
'''
# Reset colors
ini.background.fill(ini.BG_COLOR)
ini.stacksurf.fill(ini.STACK_BG_COLOR)
# Blit surfaces to screen
ini.screen.blit(ini.background, (0, 0))
ini.screen.blit(ini.stacksurf, (0, ini.SCREEN_HEIGHT -
ini.SCREEN_HEIGHT_MODIFIER))
ini.screen.blit(
ini.outsurf, (int(float(ini.SCREEN_WIDTH) / 2.0),
ini.SCREEN_HEIGHT -
ini.SCREEN_HEIGHT_MODIFIER))
# Stack titles
toss_text = ini.stackfont.render("TOSS", 1, ini.STACK_OUTPUT_COLOR)
soss_text = ini.stackfont.render("SOSS", 1, ini.SOSS_OUTPUT_COLOR)
ini.stacksurf.blit(toss_text, (0, 0))
ini.stacksurf.blit(soss_text, (ini.SCREEN_WIDTH / 4, 0))
# Pointer rectangle
ini.screen.blit(ini.pointer_rect,
(ini.pointer.xy[0] * ini.CHAR_WIDTH,
ini.pointer.xy[1] * ini.CHAR_HEIGHT))
# The code
render_code(ini.the_field.code)
def event_handler(pygame_event):
for event in pygame_event:
if event.type == pygame.QUIT:
return "Quit"
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
ini._paused = not ini._paused
elif event.key == pygame.K_RIGHT:
ini._step_once = not ini._step_once
ini._paused = False
elif event.key == pygame.K_ESCAPE:
return "Quit"
elif event.key == pygame.K_r:
ini._reset = True
def run_code():
'''
This is the main method where the event loop is kept. This is where all
the pygame magic is happening.
'''
while True:
get_event = event_handler(pygame.event.get())
if get_event == "Quit":
return
if ini._paused:
continue
if ini._step_once:
ini._paused = True
ini._step_once = False
if ini._reset:
ini._reset = False
break
if not ini.ARGS.OUTPUT_MODE:
initiate_new_run()
blit_statics()
i.do_instruction(ini.pointer.current_char())
if not ini.ARGS.OUTPUT_MODE:
blit_statics()
# Print stack (TOSS)
print_stack(ini.stackstack[-1], ini.STACK_OUTPUT_COLOR)
# Print SOSS if it exists
if len(ini.stackstack) >= 2:
print_stack(ini.stackstack[-2], ini.SOSS_OUTPUT_COLOR, SOSS=True)
pygame.time.wait(ini.ARGS.SPEED)
# Reinitiating the code, pointer, stackstack and output panel
ini.the_field = fp.Field(fp.load_code())
ini.pointer = fp.Pointer((0, 0), (1, 0))
ini.stackstack = [[]]
# Reset output field
ini.outsurf.fill((0, 0, 0, 0))
ini._outcount = 0
ini._outline = 0
ini._instring = ""
run_code()
if __name__ == '__main__':
pygame.init()
run_code()
pygame.quit()
``` |
{
"source": "johanattia/opt",
"score": 3
} |
#### File: opt/tf_opt/minimize.py
```python
from dataclasses import dataclass, field
from typing import Callable, List, Optional
import tensorflow as tf
from tensorflow_addons.utils.types import FloatTensorLike
@dataclass
class OptimizationResult:
"""[summary]"""
position_value: List[FloatTensorLike] = field(default_factory=list)
function_value: List[FloatTensorLike] = field(default_factory=list)
n_iteration: int = field(default_factory=int)
def minimizer(
func: Callable[[FloatTensorLike], FloatTensorLike],
optimizer: tf.keras.optimizers.Optimizer,
initial_position: FloatTensorLike,
verbose: bool = False,
name: Optional[str] = None,
):
"""[summary]
Args:
func (Callable[[FloatTensorLike], FloatTensorLike]): [description]
optimizer (tf.keras.optimizers.Optimizer): [description]
initial_position (FloatTensorLike): [description]
verbose (bool, optional): [description]. Defaults to False.
name (Optional[str], optional): [description]. Defaults to None.
Raises:
TypeError: [description]
TypeError: [description]
Returns:
[type]: [description]
"""
if not callable(func):
raise TypeError("`func` must be a valid callable Python object.")
if not isinstance(optimizer, tf.keras.optimizers.Optimizer):
raise TypeError(
"`optimizer` must be a valid tf.keras.optimizers.Optimizer instance."
)
if isinstance(initial_position, tf.Variable):
comparison_position = tf.Variable(initial_position.value())
else:
initial_position = tf.Variable(initial_position)
comparison_position = tf.Variable(initial_position)
history = {"function_value": [], "position_value": []}
n_iteration = tf.Variable(0)
with tf.name_scope(name or "minimizer"):
def _cond(initial_position, comparison_position):
return tf.less_equal(func(comparison_position), func(initial_position))
def _body(initial_position, comparison_position):
initial_position.assign(comparison_position.value())
cost_function = lambda: func(comparison_position)
optimizer.minimize(cost_function, [comparison_position])
history["function_value"].append(func(initial_position).numpy())
history["position_value"].append(initial_position.numpy())
n_iteration.assign_add(1)
if verbose:
tf.print(
"Iteration",
n_iteration,
"- Function value :",
history["function_value"][-1],
"- Position value :",
history["position_value"][-1],
)
return initial_position, comparison_position
tf.while_loop(
cond=_cond,
body=_body,
loop_vars=[initial_position, comparison_position],
shape_invariants=[initial_position.shape, comparison_position.shape],
)
history["n_iteration"] = n_iteration.numpy()
return history
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.