filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_21305 | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('blogs/', views.BlogListView.as_view(), name='blogs'),
path('blog/<int:pk>', views.BlogDetailView.as_view(), name='blog-detail'),
path('bloggers/', views.BlogAuthorListView.as_view(), name='bloggers'),
path('blogger/<int:pk>', views.BlogAuthorDetailView.as_view(), name='blogs-by-author'),
path('blog/<int:pk>/comment/', views.BlogCommentCreate.as_view(), name='blog_comment'),
]
|
the-stack_0_21306 | """show_lldp.py
supported commands:
* show lldp
* show lldp entry *
* show lldp entry [<WORD>]
* show lldp interface [<WORD>]
* show lldp neighbors
* show lldp neighbors detail
* show lldp traffic
"""
import re
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Schema, \
Any, \
Optional, \
Or, \
And, \
Default, \
Use
# import parser utils
from genie.libs.parser.utils.common import Common
class ShowLldpSchema(MetaParser):
"""Schema for show lldp"""
schema = {
'status': str,
'enabled': bool,
'hello_timer': int,
'hold_timer': int,
'reinit_timer': int
}
class ShowLldp(ShowLldpSchema):
"""Parser for show lldp"""
cli_command = 'show lldp'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
# initial return dictionary
ret_dict = {}
# initial regexp pattern
p1 = re.compile(r'^Status: +(?P<status>\w+)$')
p2 = re.compile(r'^LLDP +(?P<pattern>[\w\s]+) +(?P<value>\d+) +seconds$')
for line in out.splitlines():
line = line.strip()
# Status: ACTIVE
m = p1.match(line)
if m:
status = m.groupdict()['status'].lower()
ret_dict['status'] = status
ret_dict['enabled'] = True if 'active' in status else False
continue
# LLDP advertisements are sent every 30 seconds
# LLDP hold time advertised is 120 seconds
# LLDP interface reinitialisation delay is 2 seconds
m = p2.match(line)
if m:
group = m.groupdict()
if re.search('(advertisements +are +sent +every)', group['pattern']):
key = 'hello_timer'
elif re.search('(hold +time +advertised +is)', group['pattern']):
key = 'hold_timer'
elif re.search('(interface +reinitialisation +delay +is)', group['pattern']):
key = 'reinit_timer'
else:
continue
ret_dict[key] = int(group['value'])
continue
return ret_dict
class ShowLldpEntrySchema(MetaParser):
"""Schema for show lldp entry [<WORD>|*]"""
schema = {
'total_entries': int,
Optional('interfaces'): {
Any(): {
'if_name': str,
'port_id': {
Any(): {
'neighbors': {
Any(): {
'chassis_id': str,
'port_id': str,
'neighbor_id': str,
Optional('port_description'): str,
Optional('system_description'): str,
Optional('system_name'): str,
'time_remaining': int,
Optional('capabilities'): {
Any():{
Optional('system'): bool,
Optional('enabled'): bool,
'name': str,
}
},
Optional('management_address'): str,
Optional('auto_negotiation'): str,
Optional('physical_media_capabilities'): list,
Optional('unit_type'): int,
Optional('vlan_id'): int,
}
}
}
}
}
},
Optional('med_information'): {
Optional('f/w_revision'): str,
Optional('h/w_revision'): str,
Optional('s/w_revision'): str,
Optional('manufacturer'): str,
Optional('model'): str,
Optional('capabilities'): list,
'device_type': str,
Optional('network_policy'): {
Any(): { # 'voice'; 'voice_signal'
'vlan': int, # 110
'tagged': bool,
'layer_2_priority': int,
'dscp': int,
},
},
Optional('serial_number'): str,
Optional('power_source'): str,
Optional('power_priority'): str,
Optional('wattage'): float,
'location': str,
}
}
class ShowLldpEntry(ShowLldpEntrySchema):
"""Parser for show lldp entry {* | word}"""
CAPABILITY_CODES = {'R': 'router',
'B': 'mac_bridge',
'T': 'telephone',
'C': 'docsis_cable_device',
'W': 'wlan_access_point',
'P': 'repeater',
'S': 'station_only',
'O': 'other'}
cli_command = ['show lldp entry {entry}', 'show lldp entry *']
def cli(self, entry='',output=None):
if output is None:
if entry:
cmd = self.cli_command[0].format(entry=entry)
else:
cmd = self.cli_command[1]
out = self.device.execute(cmd)
else:
out = output
# initial return dictionary
ret_dict = {}
item = ''
sub_dict = {}
# ==== initial regexp pattern ====
# Local Intf: Gi2/0/15
p1 = re.compile(r'^Local\s+Intf:\s+(?P<intf>[\w\/\.\-]+)$')
# Port id: Gi1/0/4
p1_1 = re.compile(r'^Port\s+id:\s+(?P<port_id>[\S\s]+)$')
# Chassis id: 843d.c6ff.f1b8
# Chassis id: r2-rf2222-qwe
p2 = re.compile(r'^Chassis\s+id:\s+(?P<chassis_id>[\w\.\:\-]+)$')
# Port Description: GigabitEthernet1/0/4
p3 = re.compile(r'^Port\s+Description:\s+(?P<desc>[\w\/\.\-\s]+)$')
# System Name: R5
# System Name - not advertised
p4 = re.compile(r'^System\s+Name(?: +-|:)\s+(?P<name>[\S\s]+)$')
# System Description:
p5 = re.compile(r'^System\s+Description:.*$')
# Cisco IOS Software, C3750E Software (C3750E-UNIVERSALK9-M), Version 12.2(58)SE2, RELEASE SOFTWARE (fc1)
# Technical Support: http://www.cisco.com/techsupport
# Copyright (c) 1986-2011 by Cisco Systems, Inc.
# Cisco IP Phone 7962G,V12, SCCP42.9-3-1ES27S
p5_1 = re.compile(r'^(?P<msg>(Cisco +IOS +Software|Technical Support|Copyright|Cisco IP Phone).*)$')
# Compiled Thu 21-Jul-11 01:23 by prod_rel_team
# Avaya 1220 IP Deskphone, Firmware:06Q
# IP Phone, Firmware:90234AP
# {"SN":"SN-NR","Owner":"OWNER"}
p5_2 = re.compile(r'^(?P<msg>(Compile|Avaya|IP Phone|{).*)$')
# Time remaining: 112 seconds
p6 = re.compile(r'^Time\s+remaining:\s+(?P<time_remaining>\w+)\s+seconds$')
# System Capabilities: B,R
p7 = re.compile(r'^System\s+Capabilities:\s+(?P<capab>[\w\,\s]+)$')
# Enabled Capabilities: B,R
p8 = re.compile(r'^Enabled\s+Capabilities:\s+(?P<capab>[\w\,\s]+)$')
# Management Addresses:
# IP: 10.9.1.1
# Management Addresses:
# IPV6: 0000:0000:0000:0000:0000:ffff:7f00:0001
# Management Addresses - not advertised
p9 = re.compile(r'^(IP|IPV6):\s+(?P<ip>[\w\.:]+)$')
p9_1 = re.compile(r'^Management\s+Addresses\s+-\s+(?P<ip>not\sadvertised)$')
# Auto Negotiation - supported, enabled
p10 = re.compile(r'^Auto\s+Negotiation\s+\-\s+(?P<auto_negotiation>[\w\s\,]+)$')
# Physical media capabilities:
p11 = re.compile(r'^Physical\s+media\s+capabilities:$')
# 1000baseT(FD)
# 100base-TX(HD)
# Symm, Asym Pause(FD)
# Symm Pause(FD)
p11_1 = re.compile(r'^(?P<physical_media_capabilities>[\S\(\s]+(HD|FD)[\)])$')
# Media Attachment Unit type: 30
p12 = re.compile(r'^Media\s+Attachment\s+Unit\s+type:\s+(?P<unit_type>\d+)$')
# Vlan ID: 1
# Note: not parsing 'not advertised since value type is int
p13 = re.compile(r'^^Vlan\s+ID:\s+(?P<vlan_id>\d+)$')
# Total entries displayed: 4
p14 = re.compile(r'^Total\s+entries\s+displayed:\s+(?P<entry>\d+)$')
# ==== MED Information patterns =====
# MED Information:
med_p0 = re.compile(r'^MED\s+Information:.*$')
# F/W revision: 06Q
# S/W revision: SCCP42.9-3-1ES27S
# H/W revision: 12
med_p1 = re.compile(r'^(?P<head>(H/W|F/W|S/W))\s+revision:\s+(?P<revision>\S+)$')
# Manufacturer: Avaya-05
med_p2 = re.compile(r'^Manufacturer:\s+(?P<manufacturer>[\S\s]+)$')
# Model: 1220 IP Deskphone
med_p3 = re.compile(r'^Model:\s+(?P<model>[\S\s]+)$')
# Capabilities: NP, LI, PD, IN
med_p4 = re.compile(r'^Capabilities:\s*(?P<capabilities>[\S\s]+)$')
# Device type: Endpoint Class III
med_p5 = re.compile(r'^Device\s+type:\s+(?P<device_type>[\S\s]+)$')
# Network Policy(Voice): VLAN 110, tagged, Layer-2 priority: 5, DSCP: 46
# Network Policy(Voice Signal): VLAN 110, tagged, Layer-2 priority: 0, DSCP: 0
med_p6 = re.compile(r'^Network\s+Policy\(Voice(\s+(?P<voice_signal>Signal))?\):'
r'\s+VLAN\s+(?P<vlan>\d+),\s+(?P<tagged>tagged),\s+'
r'Layer-2 priority:\s+(?P<layer_2_priority>\d+),\s+DSCP:\s+(?P<dscp>\d+)$')
# PD device, Power source: Unknown, Power Priority: High, Wattage: 6.0
med_p7 = re.compile(r'^(?P<device_type>PD device),\s+Power\s+source:\s+(?P<power_source>\S+),\s+'
r'Power\s+Priority:\s+(?P<power_priority>\S+),\s+Wattage:\s+(?P<wattage>\S+)$')
# Location - not advertised
med_p8 = re.compile(r'^Location\s+-\s+(?P<location>[\S\s]+)$')
# Serial number: FCH1610A5S5
med_p9 = re.compile(r'^Serial\s+number:\s+(?P<serial_number>\S+)$')
for line in out.splitlines():
line = line.strip()
# Local Intf: Gi2/0/15
m = p1.match(line)
if m:
intf = Common.convert_intf_name(m.groupdict()['intf'])
intf_dict = ret_dict.setdefault('interfaces', {}).setdefault(intf, {})
intf_dict['if_name'] = intf
sub_dict = {}
continue
# Chassis id: 843d.c6ff.f1b8
m = p2.match(line)
if m:
sub_dict = {}
chassis_id = m.groupdict()['chassis_id']
sub_dict.setdefault('chassis_id', chassis_id)
continue
# Port id: Gi1/0/4
m = p1_1.match(line)
if m:
if 'interfaces' not in ret_dict:
intf_dict = ret_dict.setdefault('interfaces', {}).setdefault('N/A', {})
intf_dict['if_name'] = 'N/A'
port_id = Common.convert_intf_name(m.groupdict()['port_id'])
port_dict = intf_dict.setdefault('port_id', {}). \
setdefault(port_id, {})
sub_dict.setdefault('port_id', port_id)
continue
# Port Description: GigabitEthernet1/0/4
m = p3.match(line)
if m:
sub_dict.setdefault('port_description', m.groupdict()['desc'])
continue
# System Name: R5
# System Name - not advertised
m = p4.match(line)
if m:
name = m.groupdict()['name']
nei_dict = port_dict.setdefault('neighbors', {}).setdefault(name, {})
sub_dict['system_name'] = name
nei_dict['neighbor_id'] = name
nei_dict.update(sub_dict)
continue
# System Description:
m = p5.match(line)
if m:
nei_dict.update({'system_description': ''})
continue
# Cisco IOS Software, C3750E Software (C3750E-UNIVERSALK9-M), Version 12.2(58)SE2, RELEASE SOFTWARE (fc1)
# Technical Support: http://www.cisco.com/techsupport
# Copyright (c) 1986-2011 by Cisco Systems, Inc.
# Cisco IP Phone 7962G,V12, SCCP42.9-3-1ES27S
m = p5_1.match(line)
if m:
nei_dict['system_description'] += m.groupdict()['msg'] + '\n'
continue
# Compiled Thu 21-Jul-11 01:23 by prod_rel_team
# Avaya 1220 IP Deskphone, Firmware:06Q
# IP Phone, Firmware:90234AP
m = p5_2.match(line)
if m:
nei_dict['system_description'] += m.groupdict()['msg']
continue
# Time remaining: 112 seconds
m = p6.match(line)
if m:
nei_dict['time_remaining'] = int(m.groupdict()['time_remaining'])
continue
# System Capabilities: B,R
m = p7.match(line)
if m:
cap = [self.CAPABILITY_CODES[n] for n in m.groupdict()['capab'].split(',')]
for item in cap:
cap_dict = nei_dict.setdefault('capabilities', {}).\
setdefault(item, {})
cap_dict['name'] = item
cap_dict['system'] = True
continue
# Enabled Capabilities: B,R
m = p8.match(line)
if m:
cap = [self.CAPABILITY_CODES[n] for n in m.groupdict()['capab'].split(',')]
for item in cap:
cap_dict = nei_dict.setdefault('capabilities', {}).\
setdefault(item, {})
cap_dict['name'] = item
cap_dict['enabled'] = True
continue
# Management Addresses:
# IP: 10.9.1.1
# Management Addresses - not advertised
m = p9.match(line) or p9_1.match(line)
if m:
nei_dict['management_address'] = m.groupdict()['ip']
continue
# Auto Negotiation - supported, enabled
m = p10.match(line)
if m:
nei_dict['auto_negotiation'] = m.groupdict()['auto_negotiation']
continue
# Physical media capabilities:
m = p11.match(line)
if m:
nei_dict['physical_media_capabilities'] = []
continue
# 1000baseT(FD)
# 100base-TX(HD)
# Symm, Asym Pause(FD)
# Symm Pause(FD)
m = p11_1.match(line)
if m:
item = nei_dict.get('physical_media_capabilities', [])
item.append(m.groupdict()['physical_media_capabilities'])
nei_dict['physical_media_capabilities'] = item
continue
# Media Attachment Unit type: 30
m = p12.match(line)
if m:
nei_dict['unit_type'] = int(m.groupdict()['unit_type'])
continue
# Vlan ID: 1
# Note: not parsing 'not advertised since value type is int
m = p13.match(line)
if m:
nei_dict['vlan_id'] = int(m.groupdict()['vlan_id'])
continue
# Total entries displayed: 4
m = p14.match(line)
if m:
ret_dict['total_entries'] = int(m.groupdict()['entry'])
continue
# ==== Med Information ====
# MED Information:
m = med_p0.match(line)
if m:
med_dict = ret_dict.setdefault('med_information', {})
continue
# F/W revision: 06Q
# S/W revision: SCCP42.9-3-1ES27S
# H/W revision: 12
m = med_p1.match(line)
if m:
group = m.groupdict()
med_dict[group['head'].lower()+'_revision'] = m.groupdict()['revision']
continue
# Manufacturer: Avaya-05
# Model: 1220 IP Deskphone
# Device type: Endpoint Class III
m = med_p2.match(line) or med_p3.match(line) or med_p5.match(line)
if m:
match_key = [*m.groupdict().keys()][0]
med_dict[match_key] = m.groupdict()[match_key]
continue
# Capabilities: NP, LI, PD, IN
# Capabilities:
m = med_p4.match(line)
if m:
list_capabilities = m.groupdict()['capabilities'].split(', ')
med_dict['capabilities'] = list_capabilities
continue
# Network Policy(Voice): VLAN 110, tagged, Layer-2 priority: 5, DSCP: 46
# Network Policy(Voice Signal): VLAN 110, tagged, Layer-2 priority: 0, DSCP: 0
m = med_p6.match(line)
if m:
group = m.groupdict()
if group['voice_signal']:
voice = 'voice_signal'
else:
voice = 'voice'
voice_sub_dict = med_dict.setdefault('network_policy', {}).\
setdefault(voice, {})
if group['tagged'] == 'tagged':
voice_sub_dict['tagged'] = True
else:
voice_sub_dict['tagged'] = False
for k in ['layer_2_priority', 'dscp', 'vlan']:
voice_sub_dict[k] = int(group[k])
continue
# PD device, Power source: Unknown, Power Priority: High, Wattage: 6.0
m = med_p7.match(line)
if m:
for k in ['device_type', 'power_source', 'power_priority']:
med_dict[k] = m.groupdict()[k]
med_dict['wattage'] = float(m.groupdict()['wattage'])
continue
# Location - not advertised
m = med_p8.match(line)
if m:
med_dict['location'] = m.groupdict()['location']
continue
# Serial number: FCH1610A5S5
m = med_p9.match(line)
if m:
med_dict['serial_number'] = m.groupdict()['serial_number']
continue
return ret_dict
class ShowLldpNeighborsDetail(ShowLldpEntry):
'''Parser for show lldp neighbors detail'''
cli_command = 'show lldp neighbors detail'
exclude = ['time_remaining']
def cli(self,output=None):
if output is None:
show_output = self.device.execute(self.cli_command)
else:
show_output = output
return super().cli(output=show_output)
class ShowLldpTrafficSchema(MetaParser):
"""Schema for show lldp traffic"""
schema = {
"frame_in": int,
"frame_out": int,
"frame_error_in": int,
"frame_discard": int,
"tlv_discard": int,
'tlv_unknown': int,
'entries_aged_out': int
}
class ShowLldpTraffic(ShowLldpTrafficSchema):
"""Parser for show lldp traffic"""
cli_command = 'show lldp traffic'
exclude = ['frame_in' , 'frame_out', 'tlv_discard', 'tlv_unknown']
def cli(self,output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
# initial return dictionary
ret_dict = {}
# initial regexp pattern
p1 = re.compile(r'^(?P<pattern>[\w\s]+): +(?P<value>\d+)$')
for line in out.splitlines():
line = line.strip()
# Total frames out: 20372
# Total entries aged: 34
# Total frames in: 13315
# Total frames received in error: 0
# Total frames discarded: 14
# Total TLVs discarded: 0
# Total TLVs unrecognized: 0
m = p1.match(line)
if m:
group = m.groupdict()
if re.search('(Total +frames +out)', group['pattern']):
key = 'frame_out'
elif re.search('(Total +entries +aged)', group['pattern']):
key = 'entries_aged_out'
elif re.search('(Total +frames +in)', group['pattern']):
key = 'frame_in'
elif re.search('(Total +frames +received +in +error)', group['pattern']):
key = 'frame_error_in'
elif re.search('(Total +frames +discarded)', group['pattern']):
key = 'frame_discard'
elif re.search('(Total +TLVs +discarded)', group['pattern']):
key = 'tlv_discard'
elif re.search('(Total +TLVs +unrecognized)', group['pattern']):
key = 'tlv_unknown'
else:
continue
ret_dict[key] = int(group['value'])
continue
return ret_dict
class ShowLldpInterfaceSchema(MetaParser):
"""Schema for show lldp interface [<WORD>]"""
schema = {
'interfaces': {
Any(): {
'tx': str,
'rx': str,
'tx_state': str,
'rx_state': str,
},
}
}
class ShowLldpInterface(ShowLldpInterfaceSchema):
"""Parser for show lldp interface [<WORD>]"""
cli_command = ['show lldp interface {interface}','show lldp interface']
def cli(self, interface='',output=None):
if output is None:
if interface:
cmd = self.cli_command[0].format(interface=interface)
else:
cmd = self.cli_command[1]
out = self.device.execute(cmd)
else:
out = output
# initial return dictionary
ret_dict = {}
# initial regexp pattern
p1 = re.compile(r'^(?P<intf>[\w\/\-\.]+):$')
p2 = re.compile(r'^(?P<key>[\w\s]+): +(?P<value>[\w\s]+)$')
for line in out.splitlines():
line = line.strip()
# GigabitEthernet1/0/15
m = p1.match(line)
if m:
intf_dict = ret_dict.setdefault('interfaces', {}).\
setdefault(m.groupdict()['intf'], {})
continue
# Tx: enabled
# Rx: enabled
# Tx state: IDLE
# Rx state: WAIT FOR FRAME
m = p2.match(line)
if m:
group = m.groupdict()
key = '_'.join(group['key'].lower().split())
intf_dict[key] = group['value'].lower()
continue
return ret_dict
class ShowLldpNeighborsSchema(MetaParser):
"""
Schema for show lldp neighbors
"""
schema = {
'total_entries': int,
'interfaces': {
Any(): {
'port_id': {
Any(): {
'neighbors': {
Any(): {
'hold_time': int,
Optional('capabilities'): list,
}
}
}
}
}
}
}
class ShowLldpNeighbors(ShowLldpNeighborsSchema):
"""
Parser for show lldp neighbors
"""
CAPABILITY_CODES = {'R': 'router',
'B': 'mac_bridge',
'T': 'telephone',
'C': 'docsis_cable_device',
'W': 'wlan_access_point',
'P': 'repeater',
'S': 'station_only',
'O': 'other'}
cli_command = ['show lldp neighbors']
def cli(self, output=None):
if output is None:
cmd = self.cli_command[0]
out = self.device.execute(cmd)
else:
out = output
parsed_output = {}
# Total entries displayed: 4
p1 = re.compile(r'^Total\s+entries\s+displayed:\s+(?P<entry>\d+)$')
# Device ID Local Intf Hold-time Capability Port ID
# router Gi1/0/52 117 R Gi0/0/0
# 10.10.191.107 Gi1/0/14 155 B,T 7038.eeff.572d
# d89e.f3ff.58fe Gi1/0/33 3070 d89e.f3ff.58fe
# Polycom Trio Visual+Gi2/0/28 120 T 6416.7f1e.ff12
p2 = re.compile(r'(?P<device_id>.{20})(?P<interfaces>\S+)'
r'\s+(?P<hold_time>\d+)\s+(?P<capabilities>[A-Z,]+)?'
r'\s+(?P<port_id>\S+)')
for line in out.splitlines():
line = line.strip()
# Total entries displayed: 4
m = p1.match(line)
if m:
parsed_output['total_entries'] = int(m.groupdict()['entry'])
continue
# Device ID Local Intf Hold-time Capability Port ID
# router Gi1/0/52 117 R Gi0/0/0
# 10.10.191.107 Gi1/0/14 155 B,T 7038.eeff.572d
# d89e.f3ff.58fe Gi1/0/33 3070 d89e.f3ff.58fe
m = p2.match(line)
if m:
group = m.groupdict()
intf = Common.convert_intf_name(group['interfaces'])
device_dict = parsed_output.setdefault('interfaces', {}). \
setdefault(intf, {}). \
setdefault('port_id', {}). \
setdefault(group['port_id'], {}).\
setdefault('neighbors', {}). \
setdefault(group['device_id'], {})
device_dict['hold_time'] = int(group['hold_time'])
if group['capabilities']:
capabilities = list(map(lambda x: x.strip(), group['capabilities'].split(',')))
device_dict['capabilities'] = capabilities
continue
return parsed_output
|
the-stack_0_21308 | def mergeSort(nList, left, right):
if(right-left>1):
middle = (left+right)//2
mergeSort(nList, left, middle)
mergeSort(nList, middle, right)
mergeCombine(nList, left, middle, right)
def mergeCombine(nList, left, middle, right):
lList = nList[left:middle]
rList = nList[middle:right]
k, i, j = left, 0, 0
while(left+i<middle and middle+j<right):
if(lList[i]<=rList[j]):
nList[k] = lList[i]
i+=1
else:
nList[k] = rList[j]
j+=1
k+=1
if(left+i<middle):
while(k<right):
nList[k] = lList[i]
i,k = i+1,k+1
else:
while(k<right):
nList[k] = rList[j]
j,k = j+1,k+1
nList = [int(x) for x in input('Enter the array of elements to be sorted: ').split()]
mergeSort(nList,0,len(nList))
print('The sorted array: '+str(nList)) |
the-stack_0_21311 | from tensorflow.keras.models import model_from_json
import pandas as pd
import numpy as np
import plotly.figure_factory as ff
from scipy.spatial import Delaunay
import pickle
# Load scaler models
with open("x_scaler.pkl", 'rb') as file:
scaler_x = pickle.load(file)
with open("y_scaler.pkl", 'rb') as file:
scaler_y_2 = pickle.load(file)
# read coordinates
site = pd.read_csv('site_coordinates.csv')
def load_generator_model(model):
"""
load the saved trained generator model
"""
json_file = open(f'{model}.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights(f"{model}.h5")
return loaded_model
g_model = load_generator_model('generator_model')
def visualize(thickness):
"""
3D interpolation of the thickness profile.
"""
site['SITE_Z'] = thickness
points2D = np.vstack([site['SITE_X'], site['SITE_Y']]).T
tri = Delaunay(points2D)
simplices = tri.simplices
fig = ff.create_trisurf(site['SITE_X'], site['SITE_Y'], site['SITE_Z'],
simplices=simplices,
title="wafare", aspectratio=dict(x=1, y=1, z=0.5))
fig.show()
def create_process_vector(flow, space, dept, tool):
"""
Creat a procces vector of the parameters for the model.
"""
features = np.array([flow, space, dept])
if tool == 1:
encode = np.array([1, 0, 0, 0])
features = np.concatenate([features, encode])
elif tool == 2:
encode = np.array([0, 1, 0, 0])
features = np.concatenate([features, encode])
elif tool == 3:
encode = np.array([0, 0, 1, 0])
features = np.concatenate([features, encode])
elif tool == 4:
encode = np.array([0, 0, 0, 1])
features = np.concatenate([features, encode])
features = np.array(features).reshape(1, -1)
print(features)
process_vector = scaler_y_2.transform(features)
return process_vector
def generate_latent_points(latent_dim=32, n_samples=1):
"""
generate points in latent space as input for the generator
"""
# generate points in the latent space
x_input = np.random.randn(latent_dim * n_samples)
# reshape into a batch of inputs for the network
z_input = x_input.reshape(n_samples, latent_dim)
return z_input
def generate_thickness(latent_points, process_vector, g_model):
"""
Genrerate thickness profile form the generator model with given parameters
"""
X_test = g_model.predict([latent_points, process_vector])
X_test = X_test.reshape(49,)
X_test = scaler_x.inverse_transform(X_test.reshape(1, -1))
return X_test[0]
if __name__ == '__main__':
latent_points = generate_latent_points()
print("Please enter flow rate value:\n")
flow = 0.916000 #input()
print("Please enter spacing value:\n")
space = 0.344000 #input()
print("Please enter deposition time value:\n")
dept = 69.289000 #input()
print("Please enter tool number from 1, 2, 3 and 4:\n")
tool = 1 #input()
process_vector = create_process_vector(flow, space, dept, tool)
thickness = generate_thickness(latent_points, process_vector, g_model)
visualize(thickness)
|
the-stack_0_21312 | import os, sys, time
import math
import copy
import re
# import requests
import urllib
import lxml.html as ht
# import lxml.etree as et
from markdownify import markdownify
# import markdown
##------------------------------------------------------------
sys.path.append(os.path.join(os.path.dirname(__file__), '../_public')) ## Note: ํ์ฌ ๋๋ ํ ๋ฆฌ ๊ธฐ์ค ์๋ ๊ฒฝ๋ก ์ค์
from utils_basic import (_create_folder, _read_file, _file_to_json, _json_to_file, _to_lists, _fn)
from utils_scraping import (
_root,
_remove_punc,
_pages_by_pagination,
_scrape_list_pages,
_extract_values,
_scrape_detail_page,
_scrape_full_html
)
# from scrap_selenium import (go_selenium, _wait_xpath)
base_path = '../../../__references/_python/sats/_scrap/wikidocs/2155_๋ฅ ๋ฌ๋์ ์ด์ฉํ ์์ฐ์ด ์ฒ๋ฆฌ ์
๋ฌธ/source/' ## 'https://wikidocs.net/book/2155'
base_url = 'https://wikidocs.net/'
url = 'https://wikidocs.net/book/2155'
defaults = dict(
base_url = base_url,
base_path = base_path,
head = {
'create': {
'meta': {
'charset': 'utf-8'
}
},
# 'copy': {
# "//style[@type='text/css']"
# },
# 'files': ['js', 'css']
},
body = {
'title': './/h1[@class="page-subject"]',
'date': './/div[@class="muted text-right"]',
'content': './/div[contains(@class, "page-content")]',
},
dels = [ # content์์ ์ ๊ฑฐํ ์์ xpath
# './/blockquote[last()]'
],
# pres = [ ## NOTE: ์ฌ๋ฐฑ, ์ค๋ฐ๊ฟ ์ ์ง
# './/pre'
# ],
full = False,
files = {
'fig' : {
'xpath': './/img',
'url': {
# 'xpath': 'span',
'target': 'src',
},
'path': {
# 'xpath': './/img',
# 'target': 'data-filename',
# 'body': callback,
'prefix': 'images/',
'suffix': ''
},
# 'swap': { # element๋ฅผ ์นํํ๋ ๊ฒฝ์ฐ์๋ง ์ฌ์ฉ
# 'tag': 'img', #
# 'attrs': [ # ์๋ element์์ ๋ณต์ฌํ attribute ์์
# 'src',
# 'data-origin-width',
# 'data-origin-height',
# ]
# }
}
}
)
## NOTE: ์ฑ
์ ๋ชฉ ํ์ด์ง
# _scrape_full_html(title='intro', url=url, base_url=base_url, base_path=base_path, files=defaults['files'])
# time.sleep(5)
# ## NOTE: detail pages from ์ ๋ชฉ ํ์ด์ง
# path = f'{base_path}intro.html'
# # root = _root(path, encoding='utf-8')
# root = _root(path)
# root = root.xpath('.//div[contains(@class, "list-group-toc")]')[0]
# detail_pages = {}
# prefixs = ['', '']
# for el in root.xpath('.//a[contains(@class, "list-group-item")]'):
# url = el.get('href')
# if 'javascript' in url: # NOTE: ์ฑ
์ ๋ชฉ ์ธ์๋ ๋ชจ๋ ํด๋น
# url = f"/{url.split('(')[-1][:-1]}"
# span = el.xpath('./span/span')[0]
# style = span.attrib['style']
# title = span.text.strip()
# if ':0px' in style:
# # prefixs[0] = f"{title.split(' ')[0][:-1]}-" #
# prefixs[0] = f"{title.split(' ')[0].replace('.', '').replace(')', '')}-" # ๋ค์ ์๋ '.', ')' ์ ๊ฑฐ
# elif ':20px' in style:
# # title = prefixs[0] + title
# title = prefixs[0] + title.split(' ')[0].replace('.', '').replace(')', '') + ' ' + ' '.join(title.split(' ')[1:])
# # prefixs[1] = f"{title.split(' ')[0][:-1]}-"
# prefixs[1] = f"{title.split(' ')[0].replace('.', '').replace(')', '')}-" # ๋ค์ ์๋ '.', ')' ์ ๊ฑฐ
# elif ':40px' in style:
# title = prefixs[1] + title.split(' ')[0].replace('.', '').replace(')', '') + ' ' + ' '.join(title.split(' ')[1:])
# detail_pages[title.replace('.', '').replace('/', '_')] = url
# _json_to_file(detail_pages, f'{base_path}_json/detail_pages.json')
# time.sleep(5)
## NOTE: scrape category
detail_pages = _file_to_json(f'{base_path}_json/detail_pages.json')
for title, url in detail_pages.items():
_defaults = copy.deepcopy(defaults) ## NOTE: ๊น์ ๋ณต์ฌ
url = f"{base_url}{url[1:]}"
detail_options = dict(_defaults, **dict(title=title, url=url, base_path=base_path))
_scrape_detail_page(**detail_options)
## NOTE: html -> markdown
|
the-stack_0_21314 | from datetime import timezone
import flask
import json
import math
import time
import re
import psycopg2
import validators
from datatypes import (AccessDeniedError, Account, Bot, Dashboard, Entity, Credential, Sensor, Measurement,
Path, PathInputValue, PathFilter, Permission, Timestamp, UnfinishedPathFilter, ValidationError, Widget, Stats,
)
from .common import mqtt_publish_changed, mqtt_publish_changed_multiple_payloads
from const import SYSTEM_PATH_INSERTED_COUNT, SYSTEM_PATH_UPDATED_COUNT, SYSTEM_PATH_CHANGED_COUNT
accounts_api = flask.Blueprint('accounts_api', __name__)
@accounts_api.before_request
def accounts_before_request():
# If bot has successfully logged in (and retrieved the list of its accounts), we should
# publish an MQTT message so that frontend can update 'Last login' field of the bot, and
# show/hide notification badges based on it:
if flask.g.grafolean_data['user_is_bot']:
bot_id = flask.g.grafolean_data['user_id']
m = re.match(r'^/api/accounts/([0-9]+)(/.*)?$', flask.request.path)
if m:
account_id = m.groups()[0]
mqtt_publish_changed([
'accounts/{account_id}/bots'.format(account_id=account_id),
'accounts/{account_id}/bots/{bot_id}'.format(account_id=account_id, bot_id=bot_id),
])
else:
mqtt_publish_changed([
'bots',
'bots/{bot_id}'.format(bot_id=bot_id),
])
def accounts_apidoc_schemas():
yield "AccountPOST", validators.AccountSchemaInputs
yield "AccountGET", {
'type': 'object',
'properties': {
'id': {
'type': 'integer',
'description': "Account id",
'example': 123,
},
'name': {
'type': 'string',
'description': "Account name",
'example': 'My First Account',
},
},
'required': ['id', 'name'],
}
yield "ValuesGET", {
'type': 'object',
'properties': {
'paths': {
'type': 'object',
'additionalProperties': {
# we don't define any properties (because keys are paths and are not known in advance), but
# anything below must conform to this sub-schema:
'type': 'object',
'properties': {
'next_data_point': {
'type': ['number', 'null'],
'description': "Measurements time (UNIX timestamp) of the next value - null if limit was not reached",
'example': 1234567890.123456,
},
'data': {
'type': 'array',
'description': "List of values",
'items': {
'type': 'object',
'properties': {
't': {
'type': 'number',
'description': "Measurements time (UNIX timestamp) - middle of aggregation bucket if aggregation was requested",
'example': 1234567890.123456,
},
'v': {
'type': 'number',
'description': "Measurement value; median values if aggregation was requested",
'example': 12.33,
},
}
}
},
},
},
},
},
}
yield "TopValuesGET", {
'type': 'object',
'properties': {
't': {
'type': ['number', 'null'],
'description': "Measurements time (UNIX timestamp) - null if no results were found",
'example': 1234567890.123456,
},
'total': {
'type': ['number', 'null'],
'description': "Sum of values for all paths that match the path filter (useful for calculating percentages)",
'example': 1500.0,
},
'list': {
'type': 'array',
'description': "List of top N candidates",
'items': {
'type': 'object',
'properties': {
'p': {
'type': 'string',
'description': "Path",
'example': 'entity.1.interface.12.my.path',
},
'v': {
'type': 'number',
'description': "Measurement value",
'example': 12.33,
},
}
}
},
},
}
# --------------
# /accounts/
# --------------
@accounts_api.route('/', methods=['GET'])
# CAREFUL: accessible to any authenticated user (permissions check bypassed)
def accounts_root():
"""
---
get:
summary: Get all accounts this user has access to
tags:
- Accounts
description:
Returns the list of accounts that this user (person or bot) has permission to access. The list is returned in a single array (no pagination).
responses:
200:
content:
application/json:
schema:
type: object
properties:
list:
type: array
items:
"$ref": '#/definitions/AccountGET'
"""
user_id = flask.g.grafolean_data['user_id']
accounts = Account.get_list(user_id)
return json.dumps({'list': accounts}), 200
@accounts_api.route('/<int:account_id>', methods=['GET', 'PUT'])
def account_crud(account_id):
"""
---
get:
summary: Get an account
tags:
- Accounts
description:
Returns account data.
parameters:
- name: account_id
in: path
description: "Account id"
required: true
schema:
type: integer
responses:
200:
content:
application/json:
schema:
"$ref": '#/definitions/AccountGET'
404:
description: No such account
put:
summary: Update the account
tags:
- Accounts
description:
Updates account name.
parameters:
- name: account_id
in: path
description: "Account id"
required: true
schema:
type: integer
- name: "body"
in: body
description: "Account data"
required: true
schema:
"$ref": '#/definitions/AccountPOST'
responses:
204:
description: Update successful
404:
description: No such account
"""
if flask.request.method in ['GET', 'HEAD']:
rec = Account.get(account_id)
if not rec:
return "No such account", 404
return json.dumps(rec), 200
elif flask.request.method == 'PUT':
rec = Account.forge_from_input(flask.request.get_json(), force_id=account_id)
rowcount = rec.update()
if not rowcount:
return "No such account", 404
mqtt_publish_changed([
'accounts/{account_id}'.format(account_id=account_id),
])
return "", 204
@accounts_api.route('/<int:account_id>/bots', methods=['GET', 'POST'])
def account_bots(account_id):
if flask.request.method in ['GET', 'HEAD']:
rec = Bot.get_list(account_id)
return json.dumps({'list': rec}), 200
elif flask.request.method == 'POST':
bot = Bot.forge_from_input(flask.request.get_json(), force_account=account_id)
user_id, _ = bot.insert()
rec = Bot.get(user_id, account_id)
mqtt_publish_changed([
'accounts/{account_id}/bots'.format(account_id=account_id),
'accounts/{account_id}/bots/{bot_id}'.format(account_id=account_id, bot_id=user_id),
])
return json.dumps(rec), 201
@accounts_api.route('/<int:account_id>/bots/<string:user_id>', methods=['GET', 'PUT', 'DELETE'])
def account_bot_crud(account_id, user_id):
if flask.request.method in ['GET', 'HEAD']:
rec = Bot.get(user_id, account_id)
if not rec:
return "No such bot", 404
return json.dumps(rec), 200
elif flask.request.method == 'PUT':
bot = Bot.forge_from_input(flask.request.get_json(), force_account=account_id, force_id=user_id)
rowcount = bot.update()
if not rowcount:
return "No such bot", 404
mqtt_publish_changed([
'accounts/{account_id}/bots'.format(account_id=account_id),
'accounts/{account_id}/bots/{bot_id}'.format(account_id=account_id, bot_id=user_id),
])
return "", 204
elif flask.request.method == 'DELETE':
# bot should not be able to delete himself, otherwise they could lock themselves out:
if int(flask.g.grafolean_data['user_id']) == int(user_id):
return "Can't delete yourself", 403
rowcount = Bot.delete(user_id, force_account=account_id)
if not rowcount:
return "No such bot", 404
mqtt_publish_changed([
'accounts/{account_id}/bots'.format(account_id=account_id),
'accounts/{account_id}/bots/{bot_id}'.format(account_id=account_id, bot_id=user_id),
])
return "", 204
@accounts_api.route('/<int:account_id>/bots/<int:user_id>/token', methods=['GET'])
def account_bot_token_get(account_id, user_id):
# make sure the user who is requesting to see the bot token has every permission that this token has, and
# also that this user can add the bot:
request_user_permissions = Permission.get_list(int(flask.g.grafolean_data['user_id']))
if not Permission.has_all_permissions(request_user_permissions, user_id):
return "Not enough permissions to see this bot's token", 401
if not Permission.can_grant_permission(request_user_permissions, 'accounts/{}/bots'.format(account_id), 'POST'):
return "Not enough permissions to see this bot's token - POST to accounts/:account_id/bots not allowed", 401
token = Bot.get_token(user_id, account_id)
if not token:
return "No such bot", 404
return {'token': token}, 200
@accounts_api.route('/<int:account_id>/entities', methods=['GET', 'POST'])
def account_entities(account_id):
if flask.request.method in ['GET', 'HEAD']:
rec = Entity.get_list(account_id)
return json.dumps({'list': rec}), 200
elif flask.request.method == 'POST':
entity = Entity.forge_from_input(flask.request.get_json(), account_id)
entity_id = entity.insert()
rec = {'id': entity_id}
mqtt_publish_changed([
'accounts/{}/entities'.format(account_id),
])
return json.dumps(rec), 201
@accounts_api.route('/<int:account_id>/entities/<string:entity_id>', methods=['GET', 'PUT', 'DELETE'])
def account_entity_crud(account_id, entity_id):
if flask.request.method in ['GET', 'HEAD']:
rec = Entity.get(entity_id, account_id)
if not rec:
return "No such entity", 404
return json.dumps(rec), 200
elif flask.request.method == 'PUT':
entity = Entity.forge_from_input(flask.request.get_json(), account_id, force_id=entity_id)
rowcount = entity.update()
if not rowcount:
return "No such entity", 404
mqtt_publish_changed([
'accounts/{}/entities'.format(account_id),
'accounts/{}/entities/{}'.format(account_id, entity_id),
])
return "", 204
elif flask.request.method == 'DELETE':
rowcount = Entity.delete(entity_id, account_id)
if not rowcount:
return "No such entity", 404
mqtt_publish_changed([
'accounts/{}/entities'.format(account_id),
'accounts/{}/entities/{}'.format(account_id, entity_id),
])
return "", 204
@accounts_api.route('/<int:account_id>/credentials', methods=['GET', 'POST'])
def account_credentials(account_id):
if flask.request.method in ['GET', 'HEAD']:
rec = Credential.get_list(account_id)
return json.dumps({'list': rec}), 200
elif flask.request.method == 'POST':
credential = Credential.forge_from_input(flask.request.get_json(), account_id)
credential_id = credential.insert()
rec = {'id': credential_id}
mqtt_publish_changed([
'accounts/{}/credentials'.format(account_id),
])
return json.dumps(rec), 201
@accounts_api.route('/<int:account_id>/credentials/<string:credential_id>', methods=['GET', 'PUT', 'DELETE'])
def account_credential_crud(account_id, credential_id):
if flask.request.method in ['GET', 'HEAD']:
rec = Credential.get(credential_id, account_id)
if not rec:
return "No such credential", 404
return json.dumps(rec), 200
elif flask.request.method == 'PUT':
credential = Credential.forge_from_input(flask.request.get_json(), account_id, force_id=credential_id)
rowcount = credential.update()
if not rowcount:
return "No such credential", 404
mqtt_publish_changed([
'accounts/{}/credentials'.format(account_id),
'accounts/{}/credentials/{}'.format(account_id, credential_id),
])
return "", 204
elif flask.request.method == 'DELETE':
rowcount = Credential.delete(credential_id, account_id)
if not rowcount:
return "No such credential", 404
mqtt_publish_changed([
'accounts/{}/credentials'.format(account_id),
'accounts/{}/credentials/{}'.format(account_id, credential_id),
])
return "", 204
@accounts_api.route('/<int:account_id>/sensors', methods=['GET', 'POST'])
def account_sensors(account_id):
if flask.request.method in ['GET', 'HEAD']:
rec = Sensor.get_list(account_id)
return json.dumps({'list': rec}), 200
elif flask.request.method == 'POST':
sensor = Sensor.forge_from_input(flask.request.get_json(), account_id)
sensor_id = sensor.insert()
rec = {'id': sensor_id}
mqtt_publish_changed([
'accounts/{}/sensors'.format(account_id),
])
return json.dumps(rec), 201
@accounts_api.route('/<int:account_id>/sensors/<string:sensor_id>', methods=['GET', 'PUT', 'DELETE'])
def account_sensor_crud(account_id, sensor_id):
if flask.request.method in ['GET', 'HEAD']:
rec = Sensor.get(sensor_id, account_id)
if not rec:
return "No such sensor", 404
return json.dumps(rec), 200
elif flask.request.method == 'PUT':
sensor = Sensor.forge_from_input(flask.request.get_json(), account_id, force_id=sensor_id)
rowcount = sensor.update()
if not rowcount:
return "No such sensor", 404
mqtt_publish_changed([
'accounts/{}/sensors'.format(account_id),
'accounts/{}/sensors/{}'.format(account_id, sensor_id),
])
return "", 204
elif flask.request.method == 'DELETE':
rowcount = Sensor.delete(sensor_id, account_id)
if not rowcount:
return "No such sensor", 404
mqtt_publish_changed([
'accounts/{}/sensors'.format(account_id),
'accounts/{}/sensors/{}'.format(account_id, sensor_id),
])
return "", 204
@accounts_api.route('/<int:account_id>/bots/<string:user_id>/permissions', methods=['GET', 'POST'])
def account_bot_permissions(account_id, user_id):
"""
Allows reading and assigning permissions to account bots (bots which are tied to a specific account).
"""
# make sure the bot really belongs to the account:
rec = Bot.get(user_id, account_id)
if not rec:
return "No such bot", 404
if flask.request.method in ['GET', 'HEAD']:
rec = Permission.get_list(user_id)
return json.dumps({'list': rec}), 200
elif flask.request.method == 'POST':
granting_user_id = flask.g.grafolean_data['user_id']
permission = Permission.forge_from_input(flask.request.get_json(), user_id)
try:
permission_id = permission.insert(granting_user_id)
mqtt_publish_changed([
'persons/{}'.format(permission.user_id),
'bots/{}'.format(permission.user_id),
])
return json.dumps({
'user_id': permission.user_id,
'resource_prefix': permission.resource_prefix,
'methods': permission.methods,
'id': permission_id,
}), 201
except AccessDeniedError as ex:
return str(ex), 401
except psycopg2.IntegrityError:
return "Invalid parameters", 400
@accounts_api.route('/<int:account_id>/bots/<int:user_id>/permissions/<int:permission_id>', methods=['DELETE'])
def account_bot_permission_delete(account_id, user_id, permission_id):
""" Revoke permission from account bot """
# make sure the bot really belongs to the account:
rec = Bot.get(user_id, account_id)
if not rec:
return "No such bot", 404
granting_user_id = flask.g.grafolean_data['user_id']
try:
rowcount = Permission.delete(permission_id, user_id, granting_user_id)
except AccessDeniedError as ex:
return str(ex), 401
if not rowcount:
return "No such permission", 404
mqtt_publish_changed([
'persons/{user_id}'.format(user_id=user_id),
'bots/{user_id}'.format(user_id=user_id),
])
return "", 204
@accounts_api.route("/<int:account_id>/values", methods=['PUT'])
def values_put(account_id):
data = flask.request.get_json()
# let's just pretend our data is of correct form, otherwise Exception will be thrown and Flask will return error response:
try:
newly_created_paths = Measurement.save_values_data_to_db(account_id, data)
except psycopg2.IntegrityError:
return "Invalid input format", 400
# save the stats:
minute = math.floor(time.time() / 60) * 60
stats_updates = {
SYSTEM_PATH_UPDATED_COUNT: { 'v': len(data), 't': minute },
SYSTEM_PATH_CHANGED_COUNT: { 'v': len(data), 't': minute },
}
topics_with_payloads_stats = Stats.update_account_stats(account_id, stats_updates)
# publish the changes over MQTT:
topics_with_payloads = [(
f"accounts/{account_id}/values/{d['p']}",
{ 'v': d['v'], 't': d['t'] },
) for d in data]
topics_with_payloads.extend(topics_with_payloads_stats)
if newly_created_paths:
topics_with_payloads.append(
(
f"accounts/{account_id}/paths",
[{"p": p.path, "id": p.force_id} for p in newly_created_paths],
),
)
mqtt_publish_changed_multiple_payloads(topics_with_payloads)
return "", 204
@accounts_api.route("/<int:account_id>/values", methods=['POST'])
def values_post(account_id):
# data comes from two sources, query params and JSON body. We use both and append timestamp to each
# piece, then we use the same function as for PUT:
data = []
now = time.time()
json_data = flask.request.get_json()
query_params_p = flask.request.args.get('p')
if json_data:
for x in json_data:
if x.get('t'):
return "Parameter 't' shouldn't be specified with POST", 400
data = [{
'p': x['p'],
'v': x['v'],
't': now,
} for x in json_data]
elif query_params_p:
if flask.request.args.get('t'):
return "Query parameter 't' shouldn't be specified with POST", 400
data.append({
'p': query_params_p,
'v': flask.request.args.get('v'),
't': now,
})
else:
return "Missing data", 400
# let's just pretend our data is of correct form, otherwise Exception will be thrown and Flask will return error response:
try:
newly_created_paths = Measurement.save_values_data_to_db(account_id, data)
except psycopg2.IntegrityError:
return "Invalid input format", 400
# update stats:
minute = math.floor(time.time() / 60) * 60
stats_updates = {
SYSTEM_PATH_INSERTED_COUNT: { 'v': len(data), 't': minute },
SYSTEM_PATH_CHANGED_COUNT: { 'v': len(data), 't': minute },
}
topics_with_payloads_stats = Stats.update_account_stats(account_id, stats_updates)
# publish the changes over MQTT:
topics_with_payloads = [(
f"accounts/{account_id}/values/{d['p']}",
{ 'v': d['v'], 't': d['t'] },
) for d in data]
topics_with_payloads.extend(topics_with_payloads_stats)
if newly_created_paths:
topics_with_payloads.append(
(
f"accounts/{account_id}/paths",
[{"p": p.path, "id": p.force_id} for p in newly_created_paths],
),
)
mqtt_publish_changed_multiple_payloads(topics_with_payloads)
return "", 204
@accounts_api.route("/<int:account_id>/values/<string:path>", methods=['GET'])
def values_get(account_id, path):
"""
---
get:
summary: Get values within the specified timeframe
tags:
- Accounts
description:
Returns the values for the specified path. Similar to POST /accounts/<account_id>/getvalues/, except that only a single path can be specified (and GET is used).
parameters:
- name: account_id
in: path
description: "Account id"
required: true
schema:
type: integer
- name: path
in: path
description: "Path"
required: true
schema:
type: string
- name: t0
in: query
description: "Start time (UNIX timestamp with up to 6 decimals)"
required: true
schema:
type: string
- name: t1
in: query
description: "End time (UNIX timestamp with up to 6 decimals)"
required: true
schema:
type: string
- name: sort
in: query
description: "Sort order (default asc)"
required: false
schema:
type: string
enum: [asc, desc]
- name: limit
in: query
description: "Limit number or returned results (default 100000, max 100000)"
required: false
schema:
type: integer
minimum: 1
maximum: 100000
responses:
200:
content:
application/json:
schema:
"$ref": '#/definitions/ValuesGET'
"""
args = flask.request.args
if "," in path:
return "Only a single path is allowed\n\n", 400
paths_input = path
return _values_get(account_id, paths_input, None, args)
@accounts_api.route("/<int:account_id>/getvalues", methods=['POST'])
def values_get_with_post(account_id):
# when we request data for too many paths at once, we run in trouble with URLs being too long. Using
# POST is not ideal, but it works... We do however keep the interface as close to GET as possible, so
# we use the same arguments:
args = flask.request.get_json()
paths_input = args.get('p')
return _values_get(account_id, paths_input, None, args)
@accounts_api.route("/<int:account_id>/getaggrvalues", methods=['POST'])
def aggrvalues_get_with_post(account_id):
args = flask.request.get_json()
paths_input = args.get('p')
try:
aggr_level = int(args.get('a'))
except:
return "Invalid parameter: a\n\n", 400
if not (0 <= aggr_level <= 6):
return "Invalid parameter a (should be a number in range from 0 to 6).\n\n", 400
return _values_get(account_id, paths_input, aggr_level, args)
def _values_get(account_id, paths_input, aggr_level, args):
if paths_input is None:
return "Path(s) not specified\n\n", 400
try:
paths = [Path(p, account_id).path for p in paths_input.split(',')]
except:
return "Path(s) not specified correctly\n\n", 400
t_from_input = args.get('t0')
if t_from_input:
try:
t_froms = [Timestamp(t) for t in str(t_from_input).split(',')]
if len(t_froms) == 1:
t_froms = [t_froms[0] for _ in paths]
elif len(t_froms) == len(paths):
pass
else:
return "Number of t0 timestamps must be 1 or equal to number of paths\n\n", 400
except:
return "Error parsing t0\n\n", 400
else:
t_from = Measurement.get_oldest_measurement_time(account_id, paths)
if not t_from:
t_from = Timestamp(time.time())
t_froms = [t_from for _ in paths]
t_to_input = args.get('t1')
if t_to_input:
try:
t_to = Timestamp(t_to_input)
except:
return "Error parsing t1\n\n", 400
else:
t_to = Timestamp(time.time())
sort_order = str(args.get('sort', 'asc'))
if sort_order not in ['asc', 'desc']:
return "Invalid parameter: sort (should be 'asc' or 'desc')\n\n", 400
should_sort_asc = True if sort_order == 'asc' else False
try:
max_records = int(args.get('limit', Measurement.MAX_DATAPOINTS_RETURNED))
if max_records > Measurement.MAX_DATAPOINTS_RETURNED:
return "Invalid parameter: limit (max. value is {})\n\n".format(Measurement.MAX_DATAPOINTS_RETURNED), 400
except:
return "Invalid parameter: limit\n\n", 400
# finally, return the data:
paths_data = Measurement.fetch_data(account_id, paths, aggr_level, t_froms, t_to, should_sort_asc, max_records)
return json.dumps({
'paths': paths_data,
}), 200
@accounts_api.route("/<int:account_id>/topvalues", methods=['GET'])
def topvalues_get(account_id):
"""
---
get:
summary: Get highest N measurements for the latest timestamp before specified time
tags:
- Accounts
description:
Finds the latest timestamp of any measurement that was recorded for any path that matches the path filter. Returns a list of highest `n`
measurements (for the matching paths) that were taken at that timestamp. Note that timestamp must match exactly.
It is possible to change the search for timestamp so that it is lower than some provided time (parameter `t`).
parameters:
- name: account_id
in: path
description: "Account id"
required: true
schema:
type: integer
- name: f
in: query
description: "Path filter (determines which paths are taken as candidates)"
required: true
schema:
type: string
- name: n
in: query
description: "Number of paths with highest measurements to return (default 5)"
required: false
schema:
type: integer
minimum: 1
- name: t
in: query
description: "Search for candidates older than this timestamp (default: current timestamp)"
required: false
schema:
type: number
responses:
200:
content:
application/json:
schema:
"$ref": '#/definitions/TopValuesGET'
"""
max_results_input = flask.request.args.get('n')
max_results = max(0, int(max_results_input)) if max_results_input else 5
path_filter_input = flask.request.args.get('f')
if not path_filter_input:
return "Path filter not specified\n\n", 400
try:
pf = str(PathFilter(path_filter_input))
except ValidationError:
raise ValidationError("Invalid path filter")
ts_to_input = flask.request.args.get('t', time.time())
try:
ts_to = Timestamp(ts_to_input)
except ValidationError:
raise ValidationError("Invalid parameter t")
ts, total, topn = Measurement.fetch_topn(account_id, pf, ts_to, max_results)
return json.dumps({
't': ts.replace(tzinfo=timezone.utc).timestamp(),
'total': float(total),
'list': topn,
}), 200
@accounts_api.route("/<int:account_id>/paths", methods=['GET'])
def paths_get(account_id):
max_results_input = flask.request.args.get('limit')
if not max_results_input:
max_results = 10
else:
max_results = max(0, int(max_results_input))
path_filters_input = flask.request.args.get('filter')
failover_trailing = flask.request.args.get('failover_trailing', 'false').lower() == 'true'
try:
matching_paths = {}
any_found = False
any_limit_reached = False
for path_filter_input in str(path_filters_input).split(','):
pf = str(PathFilter(path_filter_input))
matching_paths[pf], limit_reached = PathFilter.find_matching_paths(account_id, pf, limit=max_results)
any_found = len(matching_paths[pf]) > 0 or any_found
any_limit_reached = any_limit_reached or limit_reached
except ValidationError:
if not failover_trailing:
raise
# looks like we don't have a valid filter, but that's ok - we allow trailing chars so it might fare better there
matching_paths, any_limit_reached = {}, False
ret = {
'paths': matching_paths if any_found else {},
'limit_reached': any_limit_reached,
}
if failover_trailing and not any_found:
ret['paths_with_trailing'] = {}
for path_filter_input in str(path_filters_input).split(','):
upf = str(UnfinishedPathFilter(path_filter_input))
ret['paths_with_trailing'][upf], limit_reached = UnfinishedPathFilter.find_matching_paths(account_id, upf, limit=max_results, allow_trailing_chars=True)
ret['limit_reached'] = ret['limit_reached'] or limit_reached
return json.dumps(ret), 200
@accounts_api.route('/<int:account_id>/paths/<int:path_id>', methods=['GET', 'PUT', 'DELETE'])
def account_path_crud(account_id, path_id):
if flask.request.method in ['GET', 'HEAD']:
rec = Path.get(path_id, account_id)
if not rec:
return "No such path", 404
return json.dumps(rec), 200
elif flask.request.method == 'PUT':
record = Path.forge_from_input(flask.request.get_json(), account_id, force_id=path_id)
rowcount = record.update()
if not rowcount:
return "No such path", 404
return "", 204
elif flask.request.method == 'DELETE':
rowcount = Path.delete(path_id, account_id)
if not rowcount:
return "No such path", 404
return "", 204
@accounts_api.route("/<int:account_id>/dashboards", methods=['GET', 'POST'])
def dashboards_crud(account_id):
if flask.request.method in ['GET', 'HEAD']:
rec = Dashboard.get_list(account_id)
return json.dumps({'list': rec}), 200
elif flask.request.method == 'POST':
dashboard = Dashboard.forge_from_input(account_id, flask.request.get_json())
try:
dashboard.insert()
except psycopg2.IntegrityError:
return "Dashboard with this slug already exists", 400
mqtt_publish_changed(['accounts/{}/dashboards'.format(account_id)])
return json.dumps({'slug': dashboard.slug}), 201
@accounts_api.route("/<int:account_id>/dashboards/<string:dashboard_slug>", methods=['GET', 'PUT', 'DELETE'])
def dashboard_crud(account_id, dashboard_slug):
if flask.request.method in ['GET', 'HEAD']:
rec = Dashboard.get(account_id, slug=dashboard_slug)
if not rec:
return "No such dashboard", 404
return json.dumps(rec), 200
elif flask.request.method == 'PUT':
dashboard = Dashboard.forge_from_input(account_id, flask.request.get_json(), force_slug=dashboard_slug)
rowcount = dashboard.update()
if not rowcount:
return "No such dashboard", 404
mqtt_publish_changed([
'accounts/{}/dashboards'.format(account_id),
'accounts/{}/dashboards/{}'.format(account_id, dashboard_slug),
])
return "", 204
elif flask.request.method == 'DELETE':
rowcount = Dashboard.delete(account_id, dashboard_slug)
if not rowcount:
return "No such dashboard", 404
mqtt_publish_changed([
'accounts/{}/dashboards'.format(account_id),
'accounts/{}/dashboards/{}'.format(account_id, dashboard_slug),
])
return "", 200
@accounts_api.route("/<int:account_id>/dashboards/<string:dashboard_slug>/widgets", methods=['GET', 'POST'])
def widgets_crud(account_id, dashboard_slug):
if flask.request.method in ['GET', 'HEAD']:
try:
paths_limit = int(flask.request.args.get('paths_limit', 200))
except:
return "Invalid parameter: paths_limit\n\n", 400
rec = Widget.get_list(account_id, dashboard_slug, paths_limit=paths_limit)
return json.dumps({'list': rec}), 200
elif flask.request.method == 'POST':
widget = Widget.forge_from_input(account_id, dashboard_slug, flask.request.get_json())
try:
widget_id = widget.insert()
except psycopg2.IntegrityError as ex:
return "Error inserting widget" + str(ex), 400
mqtt_publish_changed([
f'accounts/{account_id}/dashboards/{dashboard_slug}',
])
return json.dumps({'id': widget_id}), 201
@accounts_api.route("/<int:account_id>/dashboards/<string:dashboard_slug>/widgets/<string:widget_id>", methods=['GET', 'PUT', 'DELETE'])
def widget_crud(account_id, dashboard_slug, widget_id):
try:
widget_id = int(widget_id)
except:
raise ValidationError("Invalid widget id")
if flask.request.method in ['GET', 'HEAD']:
try:
paths_limit = int(flask.request.args.get('paths_limit', 200))
except:
return "Invalid parameter: paths_limit\n\n", 400
rec = Widget.get(account_id, dashboard_slug, widget_id, paths_limit=paths_limit)
if not rec:
return "No such widget", 404
return json.dumps(rec), 200
elif flask.request.method == 'PUT':
widget = Widget.forge_from_input(account_id, dashboard_slug, flask.request.get_json(), widget_id=widget_id)
rowcount = widget.update()
if not rowcount:
return "No such widget", 404
mqtt_publish_changed([
f'accounts/{account_id}/dashboards/{dashboard_slug}',
])
return "", 204
elif flask.request.method == 'DELETE':
rowcount = Widget.delete(account_id, dashboard_slug, widget_id)
if not rowcount:
return "No such widget", 404
mqtt_publish_changed([
f'accounts/{account_id}/dashboards/{dashboard_slug}',
])
return "", 200
@accounts_api.route("/<int:account_id>/dashboards/<string:dashboard_slug>/widgets_positions/", methods=['PUT'])
def widgets_positions(account_id, dashboard_slug):
Widget.set_positions(account_id, dashboard_slug, flask.request.get_json())
mqtt_publish_changed([
f'accounts/{account_id}/dashboards/{dashboard_slug}',
])
return "", 204
|
the-stack_0_21316 | import os, sys
import math, time
import contextlib
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst
import numpy as np
import torch, torchvision
frame_format, pixel_bytes, model_precision = 'RGBA', 4, 'fp32'
model_dtype = torch.float16 if model_precision == 'fp16' else torch.float32
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
detector = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_ssd', model_math=model_precision).eval().to(device)
ssd_utils = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_ssd_processing_utils')
detection_threshold = 0.4
start_time, frames_processed = None, 0
# context manager to help keep track of ranges of time, using NVTX
@contextlib.contextmanager
def nvtx_range(msg):
depth = torch.cuda.nvtx.range_push(msg)
try:
yield depth
finally:
torch.cuda.nvtx.range_pop()
def on_frame_probe(pad, info):
global start_time, frames_processed
start_time = start_time or time.time()
with nvtx_range('on_frame_probe'):
buf = info.get_buffer()
print(f'[{buf.pts / Gst.SECOND:6.2f}]')
image_tensor = buffer_to_image_tensor(buf, pad.get_current_caps())
image_batch = preprocess(image_tensor.unsqueeze(0))
frames_processed += image_batch.size(0)
with torch.no_grad():
with nvtx_range('inference'):
locs, labels = detector(image_batch)
postprocess(locs, labels)
return Gst.PadProbeReturn.OK
def buffer_to_image_tensor(buf, caps):
with nvtx_range('buffer_to_image_tensor'):
caps_structure = caps.get_structure(0)
height, width = caps_structure.get_value('height'), caps_structure.get_value('width')
is_mapped, map_info = buf.map(Gst.MapFlags.READ)
if is_mapped:
try:
image_array = np.ndarray(
(height, width, pixel_bytes),
dtype=np.uint8,
buffer=map_info.data
)
return torch.from_numpy(
image_array[:,:,:3].copy() # RGBA -> RGB, and extend lifetime beyond subsequent unmap
)
finally:
buf.unmap(map_info)
def preprocess(image_batch):
'300x300 centre crop, normalize, HWC -> CHW'
with nvtx_range('preprocess'):
batch_dim, image_height, image_width, image_depth = image_batch.size()
copy_x, copy_y = min(300, image_width), min(300, image_height)
dest_x_offset = max(0, (300 - image_width) // 2)
source_x_offset = max(0, (image_width - 300) // 2)
dest_y_offset = max(0, (300 - image_height) // 2)
source_y_offset = max(0, (image_height - 300) // 2)
input_batch = torch.zeros((batch_dim, 300, 300, 3), dtype=model_dtype, device=device)
input_batch[:, dest_y_offset:dest_y_offset + copy_y, dest_x_offset:dest_x_offset + copy_x] = \
image_batch[:, source_y_offset:source_y_offset + copy_y, source_x_offset:source_x_offset + copy_x]
return torch.einsum(
'bhwc -> bchw',
normalize(input_batch / 255)
).contiguous()
def normalize(input_tensor):
'Nvidia SSD300 code uses mean and std-dev of 128/256'
return (2.0 * input_tensor) - 1.0
def postprocess(locs, labels):
with nvtx_range('postprocess'):
results_batch = ssd_utils.decode_results((locs.cpu(), labels.cpu()))
results_batch = [ssd_utils.pick_best(results, detection_threshold) for results in results_batch]
for bboxes, classes, scores in results_batch:
if scores.shape[0] > 0:
print(bboxes, classes, scores)
Gst.init()
pipeline = Gst.parse_launch(f'''
filesrc location=media/in.mp4 num-buffers=256 !
decodebin !
nvvideoconvert !
video/x-raw,format={frame_format} !
fakesink name=s
''')
pipeline.get_by_name('s').get_static_pad('sink').add_probe(
Gst.PadProbeType.BUFFER,
on_frame_probe
)
pipeline.set_state(Gst.State.PLAYING)
try:
while True:
msg = pipeline.get_bus().timed_pop_filtered(
Gst.SECOND,
Gst.MessageType.EOS | Gst.MessageType.ERROR
)
if msg:
text = msg.get_structure().to_string() if msg.get_structure() else ''
msg_type = Gst.message_type_get_name(msg.type)
print(f'{msg.src.name}: [{msg_type}] {text}')
break
finally:
finish_time = time.time()
open(f'logs/{os.path.splitext(sys.argv[0])[0]}.pipeline.dot', 'w').write(
Gst.debug_bin_to_dot_data(pipeline, Gst.DebugGraphDetails.ALL)
)
pipeline.set_state(Gst.State.NULL)
print(f'FPS: {frames_processed / (finish_time - start_time):.2f}')
|
the-stack_0_21318 | from app.Model.file_management import ManagerFile
class Database:
def __init__(self):
self._manager_file = ManagerFile()
self.name_archive = 'confirmed'
self.fields = []
self.rows = []
def save_data(self, data_csv_for_update):
for filename in data_csv_for_update:
fields = filename[0]
rows = filename[1]
name_archive = filename[2]
with open(f"app\Services\{name_archive}.txt",'w') as arc:
arc.write(','.join(fields)+'\n')
for row in rows:
arc.write(','.join(row)+'\n')
print(f"{name_archive.capitalize()} data updated successfully!")
def load_data(self, option):
name_archive = str()
for filename in self._manager_file.filenames:
if option == 1 and filename == 'confirmed':
name_archive = filename
elif option == 2 and filename == 'death':
name_archive = filename
elif option == 3 and filename == 'recovered':
name_archive = filename
with open(f'app\Services\{name_archive}.txt', 'r') as arc:
for row in arc:
row = row.strip()
new_row = row.split(',')
self.rows.append(new_row)
self.fields = self.rows.pop(0)
print("Data loaded successfully!")
return self.fields, self.rows |
the-stack_0_21319 | # This file belongs to DWGranularSpeckles project.
# The software is realeased with MIT license.
import os
import sys
import subprocess
def get_frame_rate(filename):
if not os.path.exists(filename):
sys.stderr.write("ERROR: filename %r was not found!" % (filename,))
return -1
out = subprocess.check_output(["ffprobe", filename, "-v", "0", "-select_streams",
"v", "-print_format", "flat", "-show_entries", "stream=r_frame_rate"])
rate = out.split('=')[1].strip()[1:-1].split('/')
if len(rate) == 1:
return float(rate[0])
if len(rate) == 2:
return float(rate[0])/float(rate[1])
return -1
def videoToFrame(args):
import cv2
vidcap = cv2.VideoCapture(args.videofile)
success, image = vidcap.read()
count = 0
success = True
if not os.path.exists(args.image_folder):
os.makedirs(args.image_folder)
success, image = vidcap.read()
while success:
# print 'Read a new frame: ', success
cv2.imwrite(args.image_folder+"/frame_%04d.png" %
count, image) # save frame as JPEG file
count += 1
success, image = vidcap.read()
return count
|
the-stack_0_21322 | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import json
import logging
import os
import pytest
import shutil
import subprocess
import sys
import tempfile
import tensorflow as tf
from .. import recordio_utils
from sagemaker_tensorflow import PipeModeDataset, PipeModeDatasetException
dimension = 100
tf.logging.set_verbosity(logging.INFO)
@pytest.fixture(autouse=True, scope='session')
def recordio_file():
recordio_utils.build_record_file('test.recordio', num_records=100, dimension=dimension)
recordio_utils.validate_record_file('test.recordio', dimension=dimension)
yield
os.remove('test.recordio')
@pytest.fixture(autouse=True, scope='session')
def multipart_recordio_file():
recordio_utils.build_record_file('test.mp.recordio', num_records=100, dimension=dimension, multipart=True)
yield
os.remove('test.mp.recordio')
@pytest.fixture(autouse=True, scope='session')
def tfrecords_file():
writer = tf.python_io.TFRecordWriter("test.tfrecords")
for i in range(100):
writer.write(b"hello world")
writer.close()
yield
os.remove('test.tfrecords')
@pytest.fixture(autouse=True, scope='session')
def csv_file():
with open('test.csv', 'w') as csv_file:
for i in range(100):
for j in range(100):
csv_file.write(str(j))
if j < 99:
csv_file.write(',')
else:
csv_file.write('\n')
yield
os.remove('test.csv')
@pytest.fixture
def model_dir():
model_dir = tempfile.mkdtemp()
yield model_dir
shutil.rmtree(model_dir)
def write_config(directory, *channels):
configpath = os.path.join(directory, 'inputdataconfig.json')
input_data_config = {
channel: {
"TrainingInputMode": "Pipe"
} for channel in channels
}
with open(configpath, 'w') as f:
f.write(json.dumps(input_data_config))
def create_fifos(epochs, channel_dir, channel_name, input_file='test.recordio'):
for epoch in range(epochs):
fifo = '{}/{}_{}'.format(channel_dir, channel_name, epoch)
subprocess.check_call(['mkfifo', fifo])
devnull = open(os.devnull, 'w')
subprocess.Popen(['dd', 'if={}'.format(input_file), 'of={}'.format(fifo), 'bs=65536'],
stdout=devnull, stderr=devnull)
features = {
'data': tf.FixedLenFeature([], tf.string),
'labels': tf.FixedLenFeature([], tf.int64),
}
def parse(record):
parsed = tf.parse_single_example(record, features)
return ({
'data': tf.decode_raw(parsed['data'], tf.float64)
}, parsed['labels'])
def make_estimator(model_dir):
column = tf.feature_column.numeric_column('data', shape=(dimension, ))
return tf.estimator.LinearClassifier(feature_columns=[column], model_dir=model_dir)
def test_multi_epoch_pipeline(model_dir):
channel_dir = tempfile.mkdtemp()
state_dir = tempfile.mkdtemp()
epochs = 3
channel_name = 'testchannel'
create_fifos(epochs, channel_dir, channel_name)
write_config(channel_dir, 'testchannel')
def input_fn():
ds = PipeModeDataset(channel_name, pipe_dir=channel_dir, state_dir=state_dir, config_dir=channel_dir)
ds = ds.map(parse, num_parallel_calls=12)
ds = ds.repeat(count=2)
ds = ds.prefetch(3)
ds = ds.batch(3)
it = ds.make_one_shot_iterator()
return it.get_next()
estimator = make_estimator(model_dir=model_dir)
estimator.train(input_fn=input_fn)
def test_multi_channels():
channel_dir = tempfile.mkdtemp()
state_dir = tempfile.mkdtemp()
epochs = 3
create_fifos(epochs, channel_dir, "channel_a")
create_fifos(epochs, channel_dir, "channel_b")
write_config(channel_dir, 'channel_a', 'channel_b')
def make_dataset(channel_name):
ds = PipeModeDataset(channel_name, pipe_dir=channel_dir, state_dir=state_dir, config_dir=channel_dir)
ds = ds.map(parse, num_parallel_calls=12)
ds = ds.repeat(count=2)
ds = ds.prefetch(3)
ds = ds.batch(10)
return ds
ds_a = make_dataset("channel_a")
ds_b = make_dataset("channel_b")
dataset = tf.data.Dataset.zip((ds_a, ds_b))
with tf.Session() as sess:
it = dataset.make_one_shot_iterator()
next = it.get_next()
for i in range(20):
a, b = sess.run(next)
assert a[0]['data'].shape == (10, 100)
assert len(a[1]) == 10
assert b[0]['data'].shape == (10, 100)
assert len(b[1]) == 10
with pytest.raises(tf.errors.OutOfRangeError):
sess.run(next)
def test_multipart_recordio(model_dir):
channel_dir = tempfile.mkdtemp()
state_dir = tempfile.mkdtemp()
channel_name = 'testchannel'
create_fifos(1, channel_dir, channel_name, input_file='test.mp.recordio')
write_config(channel_dir, 'testchannel')
def input_fn():
ds = PipeModeDataset(channel_name, pipe_dir=channel_dir, state_dir=state_dir, config_dir=channel_dir)
ds = ds.map(parse, num_parallel_calls=12)
ds = ds.prefetch(3)
ds = ds.batch(3)
it = ds.make_one_shot_iterator()
return it.get_next()
estimator = make_estimator(model_dir=model_dir)
estimator.train(input_fn=input_fn)
def test_tf_record():
channel_dir = tempfile.mkdtemp()
state_dir = tempfile.mkdtemp()
epochs = 1
channel_name = 'testchannel'
create_fifos(epochs, channel_dir, channel_name, input_file='test.tfrecords')
write_config(channel_dir, 'testchannel')
ds = PipeModeDataset(channel_name, pipe_dir=channel_dir, state_dir=state_dir, config_dir=channel_dir,
record_format='TFRecord')
with tf.Session() as sess:
it = ds.make_one_shot_iterator()
next = it.get_next()
for i in range(100):
assert sess.run(next) == b'hello world'
FIELD_DEFAULTS = [[0] for i in range(100)]
COLUMNS = [str(i) for i in range(100)]
def test_csv():
channel_dir = tempfile.mkdtemp()
state_dir = tempfile.mkdtemp()
epochs = 1
channel_name = 'testchannel'
write_config(channel_dir, 'testchannel')
create_fifos(epochs, channel_dir, channel_name, input_file='test.csv')
def parse(line):
fields = tf.decode_csv(line, FIELD_DEFAULTS)
features = dict(zip(COLUMNS, fields))
return features
with tf.Session() as sess:
ds = PipeModeDataset(channel_name, pipe_dir=channel_dir, state_dir=state_dir, config_dir=channel_dir,
record_format='TextLine')
ds = ds.map(parse)
it = ds.make_one_shot_iterator()
next = it.get_next()
for i in range(100):
d = sess.run(next)
sys.stdout.flush()
assert d == {str(i): i for i in range(100)}
def test_input_config_validation_failure():
channel_dir = tempfile.mkdtemp()
state_dir = tempfile.mkdtemp()
write_config(channel_dir, 'testchannel')
with pytest.raises(PipeModeDatasetException):
with tf.Session():
PipeModeDataset("Not a Channel", pipe_dir=channel_dir, state_dir=state_dir, config_dir=channel_dir)
|
the-stack_0_21326 | from typing import List
from subsurface.writer.to_rex.common import file_header_size, encode
from subsurface.writer.to_rex.data_struct import RexLineSet, RexMesh, RexMaterial
from subsurface.writer.to_rex.material_encoder import material_encode
from subsurface.writer.to_rex.mesh_encoder import mesh_encode
def numpy_to_rex(
rex_line_set: List[RexLineSet] = None,
rex_meshes: List[RexMesh] = None,
rex_material: List[RexMaterial] = None
):
if rex_line_set is None:
rex_line_set = list()
if rex_meshes is None:
rex_meshes = list()
if rex_material is None:
rex_material = list()
data_block_bytes = bytearray()
byte_size = 0
data_block_bytes, data_id = w_data_blocks(rex_meshes, rex_material)
n_data_blocks = data_id
header_and_coord_block_bytes = w_file_header_and_coord_system_block(
n_data_blocks=n_data_blocks,
size_data_blocks=len(data_block_bytes),
start_data=file_header_size
)
return header_and_coord_block_bytes + data_block_bytes
def w_data_blocks(rex_meshes: List[RexMesh], rex_material: List[RexMaterial]):
data_id = 0
rmesh_bytes, data_id = w_block_data_type(mesh_encode, rex_meshes, data_id)
rmaterial_bytes, data_id = w_block_data_type(material_encode, rex_material,
data_id)
blocks_bytes = rmesh_bytes + rmaterial_bytes
return blocks_bytes, data_id
def w_block_data_type(encoder, rex_objects: List, data_id: int):
data_block_bytes = bytearray()
for rex_obj in rex_objects:
data_block_bytes += encoder(rex_obj, data_id=data_id)
data_id += 1
return data_block_bytes, data_id
def w_file_header_and_coord_system_block():
return
def w_file_header_and_coord_system_block(n_data_blocks, size_data_blocks, version=1,
start_data=86, srid=3876, offsets=None):
"""
Function that writes the header block of a rexfile:
Args:
n_data_blocks:
size_data_blocks:
version (int): Version of the file
start_data (int): Position where data start. This is after the header
and coordinate system. If everything works fine it should be 86
srid (int): Spatial reference system identifier (srid)
offsets:
Returns:
"""
reserved = '0' * 42
if offsets is None:
offsets = [0, 0, 0]
input_ = [('REX1', 'bytes'), # REX1
(version, 'uint16'), # file version
(0, 'uint32'), # CRC32
(n_data_blocks, 'uint16'), # Number of DATA BLOCKS
(start_data, 'uint16'), # StartData
(size_data_blocks, 'uint64'), # Size of all data blocks
(reserved, 'bytes'), # Reserved
# Coordinate system block
(srid, 'uint32'), # Spatial reference system identifier (srid)
(4, 'uint16'), # Size of the name of the used system.
('EPSG', 'bytes'), # name of the used system.
(offsets, 'float32')] # Global x, y, z offset
block_bytes = encode(input_)
return block_bytes
def write_rex_file(bytes, path: str):
"""Write to disk a rexfile from its binary format"""
new_file = open(path + ".rex", "wb")
new_file.write(bytes)
return True
def read_rex_file(path: str) -> bytes:
with open(path, "rb") as f:
bytes_read = f.read()
return bytes_read
|
the-stack_0_21329 | """
Copyright (c) 2020 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import logging
import cocotb
from cocotb.triggers import RisingEdge, ReadOnly
from cocotb.bus import Bus
from cocotbext.axi.memory import Memory
class PsdpRamWrite(Memory):
_cmd_signals = ["wr_cmd_be", "wr_cmd_addr", "wr_cmd_data", "wr_cmd_valid", "wr_cmd_ready"]
def __init__(self, entity, name, clock, reset=None, size=1024, mem=None, *args, **kwargs):
self.log = logging.getLogger(f"cocotb.{entity._name}.{name}")
self.entity = entity
self.clock = clock
self.reset = reset
self.cmd_bus = Bus(self.entity, name, self._cmd_signals, **kwargs)
self.log.info("Parallel Simple Dual Port RAM model (write)")
self.log.info("Copyright (c) 2020 Alex Forencich")
super().__init__(size, mem, *args, **kwargs)
self.pause = False
self._pause_generator = None
self._pause_cr = None
self.width = len(self.cmd_bus.wr_cmd_data)
self.byte_width = len(self.cmd_bus.wr_cmd_be)
self.seg_count = len(self.cmd_bus.wr_cmd_valid)
self.seg_data_width = self.width // self.seg_count
self.seg_byte_width = self.seg_data_width // 8
self.seg_addr_width = len(self.cmd_bus.wr_cmd_addr) // self.seg_count
self.seg_be_width = self.seg_data_width // 8
self.seg_data_mask = 2**self.seg_data_width-1
self.seg_addr_mask = 2**self.seg_addr_width-1
self.seg_be_mask = 2**self.seg_be_width-1
self.log.info("Parallel Simple Dual Port RAM model configuration:")
self.log.info(" Memory size: %d bytes", len(self.mem))
self.log.info(" Segment count: %d", self.seg_count)
self.log.info(" Segment addr width: %d bits", self.seg_addr_width)
self.log.info(" Segment data width: %d bits (%d bytes)", self.seg_data_width, self.seg_byte_width)
self.log.info(" Total data width: %d bits (%d bytes)", self.width, self.width // 8)
assert self.seg_be_width*self.seg_count == len(self.cmd_bus.wr_cmd_be)
self.cmd_bus.wr_cmd_ready.setimmediatevalue(0)
cocotb.fork(self._run())
def set_pause_generator(self, generator=None):
if self._pause_cr is not None:
self._pause_cr.kill()
self._pause_cr = None
self._pause_generator = generator
if self._pause_generator is not None:
self._pause_cr = cocotb.fork(self._run_pause())
def clear_pause_generator(self):
self.set_pause_generator(None)
async def _run(self):
while True:
await ReadOnly()
cmd_be_sample = self.cmd_bus.wr_cmd_be.value
cmd_addr_sample = self.cmd_bus.wr_cmd_addr.value
cmd_data_sample = self.cmd_bus.wr_cmd_data.value
cmd_ready_sample = self.cmd_bus.wr_cmd_ready.value
cmd_valid_sample = self.cmd_bus.wr_cmd_valid.value
if self.reset is not None and self.reset.value:
await RisingEdge(self.clock)
self.cmd_bus.wr_cmd_ready.setimmediatevalue(0)
continue
await RisingEdge(self.clock)
# process segments
for seg in range(self.seg_count):
if cmd_ready_sample & cmd_valid_sample & (1 << seg):
seg_addr = (cmd_addr_sample >> self.seg_addr_width*seg) & self.seg_addr_mask
seg_data = (cmd_data_sample >> self.seg_data_width*seg) & self.seg_data_mask
seg_be = (cmd_be_sample >> self.seg_be_width*seg) & self.seg_be_mask
addr = (seg_addr*self.seg_count+seg)*self.seg_byte_width
self.mem.seek(addr % self.size)
data = seg_data.to_bytes(self.seg_byte_width, 'little')
for i in range(self.seg_byte_width):
if seg_be & (1 << i):
self.mem.write(data[i:i+1])
else:
self.mem.seek(1, 1)
self.log.info("Write word seg: %d addr: 0x%08x be 0x%02x data %s",
seg, addr, seg_be, ' '.join((f'{c:02x}' for c in data)))
if self.pause:
self.cmd_bus.wr_cmd_ready <= 0
else:
self.cmd_bus.wr_cmd_ready <= 2**self.seg_count-1
async def _run_pause(self):
for val in self._pause_generator:
self.pause = val
await RisingEdge(self.clock)
class PsdpRamRead(Memory):
_cmd_signals = ["rd_cmd_addr", "rd_cmd_valid", "rd_cmd_ready"]
_resp_signals = ["rd_resp_data", "rd_resp_valid", "rd_resp_ready"]
def __init__(self, entity, name, clock, reset=None, size=1024, mem=None, *args, **kwargs):
self.log = logging.getLogger(f"cocotb.{entity._name}.{name}")
self.entity = entity
self.clock = clock
self.reset = reset
self.cmd_bus = Bus(self.entity, name, self._cmd_signals, **kwargs)
self.resp_bus = Bus(self.entity, name, self._resp_signals, **kwargs)
self.log.info("Parallel Simple Dual Port RAM model (read)")
self.log.info("Copyright (c) 2020 Alex Forencich")
super().__init__(size, mem, *args, **kwargs)
self.pause = False
self._pause_generator = None
self._pause_cr = None
self.width = len(self.resp_bus.rd_resp_data)
self.byte_width = self.width // 8
self.seg_count = len(self.cmd_bus.rd_cmd_valid)
self.seg_data_width = self.width // self.seg_count
self.seg_byte_width = self.seg_data_width // 8
self.seg_addr_width = len(self.cmd_bus.rd_cmd_addr) // self.seg_count
self.seg_data_mask = 2**self.seg_data_width-1
self.seg_addr_mask = 2**self.seg_addr_width-1
self.log.info("Parallel Simple Dual Port RAM model configuration:")
self.log.info(" Memory size: %d bytes", len(self.mem))
self.log.info(" Segment count: %d", self.seg_count)
self.log.info(" Segment addr width: %d bits", self.seg_addr_width)
self.log.info(" Segment data width: %d bits (%d bytes)", self.seg_data_width, self.seg_byte_width)
self.log.info(" Total data width: %d bits (%d bytes)", self.width, self.width // 8)
self.cmd_bus.rd_cmd_ready.setimmediatevalue(0)
self.resp_bus.rd_resp_valid.setimmediatevalue(0)
cocotb.fork(self._run())
def set_pause_generator(self, generator=None):
if self._pause_cr is not None:
self._pause_cr.kill()
self._pause_cr = None
self._pause_generator = generator
if self._pause_generator is not None:
self._pause_cr = cocotb.fork(self._run_pause())
def clear_pause_generator(self):
self.set_pause_generator(None)
async def _run(self):
pipeline = [[None for x in range(1)] for seg in range(self.seg_count)]
cmd_ready = 0
resp_valid = 0
resp_data = 0
while True:
await ReadOnly()
cmd_addr_sample = self.cmd_bus.rd_cmd_addr.value
cmd_ready_sample = self.cmd_bus.rd_cmd_ready.value
cmd_valid_sample = self.cmd_bus.rd_cmd_valid.value
resp_ready_sample = self.resp_bus.rd_resp_ready.value
resp_valid_sample = self.resp_bus.rd_resp_valid.value
if self.reset is not None and self.reset.value:
await RisingEdge(self.clock)
self.cmd_bus.rd_cmd_ready.setimmediatevalue(0)
self.resp_bus.rd_resp_valid.setimmediatevalue(0)
cmd_ready = 0
resp_valid = 0
continue
await RisingEdge(self.clock)
# process segments
for seg in range(self.seg_count):
seg_mask = 1 << seg
if (resp_ready_sample & seg_mask) or not (resp_valid_sample & seg_mask):
if pipeline[seg][-1] is not None:
resp_data &= ~(self.seg_data_mask << self.seg_data_width*seg)
resp_data |= ((pipeline[seg][-1] & self.seg_data_mask) << self.seg_data_width*seg)
resp_valid |= seg_mask
pipeline[seg][-1] = None
else:
resp_valid &= ~seg_mask
for i in range(len(pipeline[seg])-1, 0, -1):
if pipeline[seg][i] is None:
pipeline[i] = pipeline[i-1]
pipeline[i-1] = None
if cmd_ready_sample & cmd_valid_sample & seg_mask:
seg_addr = (cmd_addr_sample >> self.seg_addr_width*seg) & self.seg_addr_mask
addr = (seg_addr*self.seg_count+seg)*self.seg_byte_width
self.mem.seek(addr % self.size)
data = self.mem.read(self.seg_byte_width)
pipeline[seg][0] = int.from_bytes(data, 'little')
self.log.info("Read word seg: %d addr: 0x%08x data %s",
seg, addr, ' '.join((f'{c:02x}' for c in data)))
if (not resp_valid & seg_mask) or None in pipeline[seg]:
cmd_ready |= seg_mask
else:
cmd_ready &= ~seg_mask
if self.pause:
self.cmd_bus.rd_cmd_ready <= 0
else:
self.cmd_bus.rd_cmd_ready <= cmd_ready
self.resp_bus.rd_resp_data <= resp_data
self.resp_bus.rd_resp_valid <= resp_valid
async def _run_pause(self):
for val in self._pause_generator:
self.pause = val
await RisingEdge(self.clock)
class PsdpRam(Memory):
def __init__(self, entity, name, clock, reset=None, size=1024, mem=None, *args, **kwargs):
self.write_if = None
self.read_if = None
super().__init__(size, mem, *args, **kwargs)
self.write_if = PsdpRamWrite(entity, name, clock, reset, mem=self.mem)
self.read_if = PsdpRamRead(entity, name, clock, reset, mem=self.mem)
|
the-stack_0_21330 | """
SMS backend for sending text messages using service SMSAPI.pl
Requires:
- https://github.com/smsapi/smsapi-python-client
- SMSAPI_TOKEN to be set
"""
from typing import List, Optional
from django.conf import settings # type: ignore
from django.core.exceptions import ImproperlyConfigured # type: ignore
from sms.backends.base import BaseSmsBackend
from sms.message import Message
try:
from smsapi.client import SmsApiPlClient # type: ignore
HAS_SMSAPI = True
except ImportError:
HAS_SMSAPI = False
class SmsBackend(BaseSmsBackend):
def __init__(self, fail_silently: bool = False, **kwargs) -> None:
super().__init__(fail_silently=fail_silently, **kwargs)
if not HAS_SMSAPI and not self.fail_silently:
raise ImproperlyConfigured(
"You're using the SMS backend "
"'sms.backends.smsapi.SmsBackend' without having "
"'smsapi-client' installed. Install 'smsapi-client' or use "
"another SMS backend."
)
access_key: Optional[str] = getattr(settings, 'SMSAPI_TOKEN')
if not access_key and not self.fail_silently:
raise ImproperlyConfigured(
"You're using the SMS backend "
"'sms.backends.smsapi.SmsBackend' without having the "
"setting 'SMSAPI_TOKEN' set."
)
self.client = None
if HAS_SMSAPI:
self.client = SmsApiPlClient(access_token=access_key)
def send_messages(self, messages: List[Message]) -> int:
if not self.client:
return 0
msg_count: int = 0
for message in messages:
for recipient in message.recipients:
try:
message.send_results = self.client.sms.send(
to=recipient,
message=message.body
)
except Exception as exc:
if not self.fail_silently:
raise exc
msg_count += 1
return msg_count
|
the-stack_0_21332 | """Template helper methods for rendering strings with Open Peer Power data."""
import base64
from datetime import datetime
from functools import wraps
import json
import logging
import math
import random
import re
from typing import Any, Dict, Iterable, List, Optional, Union
import jinja2
from jinja2 import contextfilter, contextfunction
from jinja2.sandbox import ImmutableSandboxedEnvironment
from jinja2.utils import Namespace # type: ignore
from openpeerpower.const import (
ATTR_ENTITY_ID,
ATTR_LATITUDE,
ATTR_LONGITUDE,
ATTR_UNIT_OF_MEASUREMENT,
MATCH_ALL,
STATE_UNKNOWN,
)
from openpeerpower.core import State, callback, split_entity_id, valid_entity_id
from openpeerpower.exceptions import TemplateError
from openpeerpower.helpers import location as loc_helper
from openpeerpower.helpers.typing import OpenPeerPowerType, TemplateVarsType
from openpeerpower.loader import bind_opp
from openpeerpower.util import convert, dt as dt_util, location as loc_util
from openpeerpower.util.async_ import run_callback_threadsafe
# mypy: allow-untyped-calls, allow-untyped-defs
# mypy: no-check-untyped-defs, no-warn-return-any
_LOGGER = logging.getLogger(__name__)
_SENTINEL = object()
DATE_STR_FORMAT = "%Y-%m-%d %H:%M:%S"
_RENDER_INFO = "template.render_info"
_ENVIRONMENT = "template.environment"
_RE_NONE_ENTITIES = re.compile(r"distance\(|closest\(", re.I | re.M)
_RE_GET_ENTITIES = re.compile(
r"(?:(?:states\.|(?:is_state|is_state_attr|state_attr|states)"
r"\((?:[\ \'\"]?))([\w]+\.[\w]+)|([\w]+))",
re.I | re.M,
)
_RE_JINJA_DELIMITERS = re.compile(r"\{%|\{\{")
@bind_opp
def attach(opp, obj):
"""Recursively attach opp to all template instances in list and dict."""
if isinstance(obj, list):
for child in obj:
attach(opp, child)
elif isinstance(obj, dict):
for child in obj.values():
attach(opp, child)
elif isinstance(obj, Template):
obj.opp = opp
def render_complex(value, variables=None):
"""Recursive template creator helper function."""
if isinstance(value, list):
return [render_complex(item, variables) for item in value]
if isinstance(value, dict):
return {key: render_complex(item, variables) for key, item in value.items()}
if isinstance(value, Template):
return value.async_render(variables)
return value
def extract_entities(
template: Optional[str], variables: Optional[Dict[str, Any]] = None
) -> Union[str, List[str]]:
"""Extract all entities for state_changed listener from template string."""
if template is None or _RE_JINJA_DELIMITERS.search(template) is None:
return []
if _RE_NONE_ENTITIES.search(template):
return MATCH_ALL
extraction = _RE_GET_ENTITIES.findall(template)
extraction_final = []
for result in extraction:
if (
result[0] == "trigger.entity_id"
and variables
and "trigger" in variables
and "entity_id" in variables["trigger"]
):
extraction_final.append(variables["trigger"]["entity_id"])
elif result[0]:
extraction_final.append(result[0])
if (
variables
and result[1] in variables
and isinstance(variables[result[1]], str)
and valid_entity_id(variables[result[1]])
):
extraction_final.append(variables[result[1]])
if extraction_final:
return list(set(extraction_final))
return MATCH_ALL
def _true(arg: Any) -> bool:
return True
class RenderInfo:
"""Holds information about a template render."""
def __init__(self, template):
"""Initialise."""
self.template = template
# Will be set sensibly once frozen.
self.filter_lifecycle = _true
self._result = None
self._exception = None
self._all_states = False
self._domains = []
self._entities = []
def filter(self, entity_id: str) -> bool:
"""Template should re-render if the state changes."""
return entity_id in self._entities
def _filter_lifecycle(self, entity_id: str) -> bool:
"""Template should re-render if the state changes."""
return (
split_entity_id(entity_id)[0] in self._domains
or entity_id in self._entities
)
@property
def result(self) -> str:
"""Results of the template computation."""
if self._exception is not None:
raise self._exception
return self._result
def _freeze(self) -> None:
self._entities = frozenset(self._entities)
if self._all_states:
# Leave lifecycle_filter as True
del self._domains
elif not self._domains:
del self._domains
self.filter_lifecycle = self.filter
else:
self._domains = frozenset(self._domains)
self.filter_lifecycle = self._filter_lifecycle
class Template:
"""Class to hold a template and manage caching and rendering."""
def __init__(self, template, opp=None):
"""Instantiate a template."""
if not isinstance(template, str):
raise TypeError("Expected template to be a string")
self.template: str = template
self._compiled_code = None
self._compiled = None
self.opp = opp
@property
def _env(self):
if self.opp is None:
return _NO_OPP_ENV
ret = self.opp.data.get(_ENVIRONMENT)
if ret is None:
ret = self.opp.data[_ENVIRONMENT] = TemplateEnvironment(self.opp)
return ret
def ensure_valid(self):
"""Return if template is valid."""
if self._compiled_code is not None:
return
try:
self._compiled_code = self._env.compile(self.template)
except jinja2.exceptions.TemplateSyntaxError as err:
raise TemplateError(err)
def extract_entities(
self, variables: Dict[str, Any] = None
) -> Union[str, List[str]]:
"""Extract all entities for state_changed listener."""
return extract_entities(self.template, variables)
def render(self, variables: TemplateVarsType = None, **kwargs: Any) -> str:
"""Render given template."""
if variables is not None:
kwargs.update(variables)
return run_callback_threadsafe(
self.opp.loop, self.async_render, kwargs
).result()
@callback
def async_render(self, variables: TemplateVarsType = None, **kwargs: Any) -> str:
"""Render given template.
This method must be run in the event loop.
"""
compiled = self._compiled or self._ensure_compiled()
if variables is not None:
kwargs.update(variables)
try:
return compiled.render(kwargs).strip()
except jinja2.TemplateError as err:
raise TemplateError(err)
@callback
def async_render_to_info(
self, variables: TemplateVarsType = None, **kwargs: Any
) -> RenderInfo:
"""Render the template and collect an entity filter."""
assert self.opp and _RENDER_INFO not in self.opp.data
render_info = self.opp.data[_RENDER_INFO] = RenderInfo(self)
# pylint: disable=protected-access
try:
render_info._result = self.async_render(variables, **kwargs)
except TemplateError as ex:
render_info._exception = ex
finally:
del self.opp.data[_RENDER_INFO]
render_info._freeze()
return render_info
def render_with_possible_json_value(self, value, error_value=_SENTINEL):
"""Render template with value exposed.
If valid JSON will expose value_json too.
"""
return run_callback_threadsafe(
self.opp.loop,
self.async_render_with_possible_json_value,
value,
error_value,
).result()
@callback
def async_render_with_possible_json_value(
self, value, error_value=_SENTINEL, variables=None
):
"""Render template with value exposed.
If valid JSON will expose value_json too.
This method must be run in the event loop.
"""
if self._compiled is None:
self._ensure_compiled()
variables = dict(variables or {})
variables["value"] = value
try:
variables["value_json"] = json.loads(value)
except (ValueError, TypeError):
pass
try:
return self._compiled.render(variables).strip()
except jinja2.TemplateError as ex:
if error_value is _SENTINEL:
_LOGGER.error(
"Error parsing value: %s (value: %s, template: %s)",
ex,
value,
self.template,
)
return value if error_value is _SENTINEL else error_value
def _ensure_compiled(self):
"""Bind a template to a specific opp instance."""
self.ensure_valid()
assert self.opp is not None, "opp variable not set on template"
env = self._env
self._compiled = jinja2.Template.from_code(
env, self._compiled_code, env.globals, None
)
return self._compiled
def __eq__(self, other):
"""Compare template with another."""
return (
self.__class__ == other.__class__
and self.template == other.template
and self.opp == other.opp
)
def __hash__(self):
"""Hash code for template."""
return hash(self.template)
def __repr__(self):
"""Representation of Template."""
return 'Template("' + self.template + '")'
class AllStates:
"""Class to expose all HA states as attributes."""
def __init__(self, opp):
"""Initialize all states."""
self._opp = opp
def __getattr__(self, name):
"""Return the domain state."""
if "." in name:
if not valid_entity_id(name):
raise TemplateError(f"Invalid entity ID '{name}'")
return _get_state(self._opp, name)
if not valid_entity_id(f"{name}.entity"):
raise TemplateError(f"Invalid domain name '{name}'")
return DomainStates(self._opp, name)
def _collect_all(self):
render_info = self._opp.data.get(_RENDER_INFO)
if render_info is not None:
# pylint: disable=protected-access
render_info._all_states = True
def __iter__(self):
"""Return all states."""
self._collect_all()
return iter(
_wrap_state(self._opp, state)
for state in sorted(
self._opp.states.async_all(), key=lambda state: state.entity_id
)
)
def __len__(self):
"""Return number of states."""
self._collect_all()
return len(self._opp.states.async_entity_ids())
def __call__(self, entity_id):
"""Return the states."""
state = _get_state(self._opp, entity_id)
return STATE_UNKNOWN if state is None else state.state
def __repr__(self):
"""Representation of All States."""
return "<template AllStates>"
class DomainStates:
"""Class to expose a specific HA domain as attributes."""
def __init__(self, opp, domain):
"""Initialize the domain states."""
self._opp = opp
self._domain = domain
def __getattr__(self, name):
"""Return the states."""
entity_id = f"{self._domain}.{name}"
if not valid_entity_id(entity_id):
raise TemplateError(f"Invalid entity ID '{entity_id}'")
return _get_state(self._opp, entity_id)
def _collect_domain(self) -> None:
entity_collect = self._opp.data.get(_RENDER_INFO)
if entity_collect is not None:
# pylint: disable=protected-access
entity_collect._domains.append(self._domain)
def __iter__(self):
"""Return the iteration over all the states."""
self._collect_domain()
return iter(
sorted(
(
_wrap_state(self._opp, state)
for state in self._opp.states.async_all()
if state.domain == self._domain
),
key=lambda state: state.entity_id,
)
)
def __len__(self) -> int:
"""Return number of states."""
self._collect_domain()
return len(self._opp.states.async_entity_ids(self._domain))
def __repr__(self) -> str:
"""Representation of Domain States."""
return f"<template DomainStates('{self._domain}')>"
class TemplateState(State):
"""Class to represent a state object in a template."""
# Inheritance is done so functions that check against State keep working
# pylint: disable=super-init-not-called
def __init__(self, opp, state):
"""Initialize template state."""
self._opp = opp
self._state = state
def _access_state(self):
state = object.__getattribute__(self, "_state")
opp = object.__getattribute__(self, "_opp")
_collect_state(opp, state.entity_id)
return state
@property
def state_with_unit(self) -> str:
"""Return the state concatenated with the unit if available."""
state = object.__getattribute__(self, "_access_state")()
unit = state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
if unit is None:
return state.state
return f"{state.state} {unit}"
def __getattribute__(self, name):
"""Return an attribute of the state."""
# This one doesn't count as an access of the state
# since we either found it by looking direct for the ID
# or got it off an iterator.
if name == "entity_id" or name in object.__dict__:
state = object.__getattribute__(self, "_state")
return getattr(state, name)
if name in TemplateState.__dict__:
return object.__getattribute__(self, name)
state = object.__getattribute__(self, "_access_state")()
return getattr(state, name)
def __repr__(self) -> str:
"""Representation of Template State."""
state = object.__getattribute__(self, "_access_state")()
rep = state.__repr__()
return f"<template {rep[1:]}"
def _collect_state(opp, entity_id):
entity_collect = opp.data.get(_RENDER_INFO)
if entity_collect is not None:
# pylint: disable=protected-access
entity_collect._entities.append(entity_id)
def _wrap_state(opp, state):
"""Wrap a state."""
return None if state is None else TemplateState(opp, state)
def _get_state(opp, entity_id):
state = opp.states.get(entity_id)
if state is None:
# Only need to collect if none, if not none collect first actual
# access to the state properties in the state wrapper.
_collect_state(opp, entity_id)
return None
return _wrap_state(opp, state)
def _resolve_state(opp, entity_id_or_state):
"""Return state or entity_id if given."""
if isinstance(entity_id_or_state, State):
return entity_id_or_state
if isinstance(entity_id_or_state, str):
return _get_state(opp, entity_id_or_state)
return None
def expand(opp: OpenPeerPowerType, *args: Any) -> Iterable[State]:
"""Expand out any groups into entity states."""
search = list(args)
found = {}
while search:
entity = search.pop()
if isinstance(entity, str):
entity_id = entity
entity = _get_state(opp, entity)
if entity is None:
continue
elif isinstance(entity, State):
entity_id = entity.entity_id
elif isinstance(entity, Iterable):
search += entity
continue
else:
# ignore other types
continue
from openpeerpower.components import group
if split_entity_id(entity_id)[0] == group.DOMAIN:
# Collect state will be called in here since it's wrapped
group_entities = entity.attributes.get(ATTR_ENTITY_ID)
if group_entities:
search += group_entities
else:
found[entity_id] = entity
return sorted(found.values(), key=lambda a: a.entity_id)
def closest(opp, *args):
"""Find closest entity.
Closest to home:
closest(states)
closest(states.device_tracker)
closest('group.children')
closest(states.group.children)
Closest to a point:
closest(23.456, 23.456, 'group.children')
closest('zone.school', 'group.children')
closest(states.zone.school, 'group.children')
As a filter:
states | closest
states.device_tracker | closest
['group.children', states.device_tracker] | closest
'group.children' | closest(23.456, 23.456)
states.device_tracker | closest('zone.school')
'group.children' | closest(states.zone.school)
"""
if len(args) == 1:
latitude = opp.config.latitude
longitude = opp.config.longitude
entities = args[0]
elif len(args) == 2:
point_state = _resolve_state(opp, args[0])
if point_state is None:
_LOGGER.warning("Closest:Unable to find state %s", args[0])
return None
if not loc_helper.has_location(point_state):
_LOGGER.warning(
"Closest:State does not contain valid location: %s", point_state
)
return None
latitude = point_state.attributes.get(ATTR_LATITUDE)
longitude = point_state.attributes.get(ATTR_LONGITUDE)
entities = args[1]
else:
latitude = convert(args[0], float)
longitude = convert(args[1], float)
if latitude is None or longitude is None:
_LOGGER.warning(
"Closest:Received invalid coordinates: %s, %s", args[0], args[1]
)
return None
entities = args[2]
states = expand(opp, entities)
# state will already be wrapped here
return loc_helper.closest(latitude, longitude, states)
def closest_filter(opp, *args):
"""Call closest as a filter. Need to reorder arguments."""
new_args = list(args[1:])
new_args.append(args[0])
return closest(opp, *new_args)
def distance(opp, *args):
"""Calculate distance.
Will calculate distance from home to a point or between points.
Points can be passed in using state objects or lat/lng coordinates.
"""
locations = []
to_process = list(args)
while to_process:
value = to_process.pop(0)
point_state = _resolve_state(opp, value)
if point_state is None:
# We expect this and next value to be lat&lng
if not to_process:
_LOGGER.warning(
"Distance:Expected latitude and longitude, got %s", value
)
return None
value_2 = to_process.pop(0)
latitude = convert(value, float)
longitude = convert(value_2, float)
if latitude is None or longitude is None:
_LOGGER.warning(
"Distance:Unable to process latitude and longitude: %s, %s",
value,
value_2,
)
return None
else:
if not loc_helper.has_location(point_state):
_LOGGER.warning(
"distance:State does not contain valid location: %s", point_state
)
return None
latitude = point_state.attributes.get(ATTR_LATITUDE)
longitude = point_state.attributes.get(ATTR_LONGITUDE)
locations.append((latitude, longitude))
if len(locations) == 1:
return opp.config.distance(*locations[0])
return opp.config.units.length(loc_util.distance(*locations[0] + locations[1]), "m")
def is_state(opp: OpenPeerPowerType, entity_id: str, state: State) -> bool:
"""Test if a state is a specific value."""
state_obj = _get_state(opp, entity_id)
return state_obj is not None and state_obj.state == state
def is_state_attr(opp, entity_id, name, value):
"""Test if a state's attribute is a specific value."""
attr = state_attr(opp, entity_id, name)
return attr is not None and attr == value
def state_attr(opp, entity_id, name):
"""Get a specific attribute from a state."""
state_obj = _get_state(opp, entity_id)
if state_obj is not None:
return state_obj.attributes.get(name)
return None
def forgiving_round(value, precision=0, method="common"):
"""Round accepted strings."""
try:
# support rounding methods like jinja
multiplier = float(10 ** precision)
if method == "ceil":
value = math.ceil(float(value) * multiplier) / multiplier
elif method == "floor":
value = math.floor(float(value) * multiplier) / multiplier
elif method == "half":
value = round(float(value) * 2) / 2
else:
# if method is common or something else, use common rounding
value = round(float(value), precision)
return int(value) if precision == 0 else value
except (ValueError, TypeError):
# If value can't be converted to float
return value
def multiply(value, amount):
"""Filter to convert value to float and multiply it."""
try:
return float(value) * amount
except (ValueError, TypeError):
# If value can't be converted to float
return value
def logarithm(value, base=math.e):
"""Filter to get logarithm of the value with a specific base."""
try:
return math.log(float(value), float(base))
except (ValueError, TypeError):
return value
def sine(value):
"""Filter to get sine of the value."""
try:
return math.sin(float(value))
except (ValueError, TypeError):
return value
def cosine(value):
"""Filter to get cosine of the value."""
try:
return math.cos(float(value))
except (ValueError, TypeError):
return value
def tangent(value):
"""Filter to get tangent of the value."""
try:
return math.tan(float(value))
except (ValueError, TypeError):
return value
def arc_sine(value):
"""Filter to get arc sine of the value."""
try:
return math.asin(float(value))
except (ValueError, TypeError):
return value
def arc_cosine(value):
"""Filter to get arc cosine of the value."""
try:
return math.acos(float(value))
except (ValueError, TypeError):
return value
def arc_tangent(value):
"""Filter to get arc tangent of the value."""
try:
return math.atan(float(value))
except (ValueError, TypeError):
return value
def arc_tangent2(*args):
"""Filter to calculate four quadrant arc tangent of y / x."""
try:
if len(args) == 1 and isinstance(args[0], (list, tuple)):
args = args[0]
return math.atan2(float(args[0]), float(args[1]))
except (ValueError, TypeError):
return args
def square_root(value):
"""Filter to get square root of the value."""
try:
return math.sqrt(float(value))
except (ValueError, TypeError):
return value
def timestamp_custom(value, date_format=DATE_STR_FORMAT, local=True):
"""Filter to convert given timestamp to format."""
try:
date = dt_util.utc_from_timestamp(value)
if local:
date = dt_util.as_local(date)
return date.strftime(date_format)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def timestamp_local(value):
"""Filter to convert given timestamp to local date/time."""
try:
return dt_util.as_local(dt_util.utc_from_timestamp(value)).strftime(
DATE_STR_FORMAT
)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def timestamp_utc(value):
"""Filter to convert given timestamp to UTC date/time."""
try:
return dt_util.utc_from_timestamp(value).strftime(DATE_STR_FORMAT)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def forgiving_as_timestamp(value):
"""Try to convert value to timestamp."""
try:
return dt_util.as_timestamp(value)
except (ValueError, TypeError):
return None
def strptime(string, fmt):
"""Parse a time string to datetime."""
try:
return datetime.strptime(string, fmt)
except (ValueError, AttributeError):
return string
def fail_when_undefined(value):
"""Filter to force a failure when the value is undefined."""
if isinstance(value, jinja2.Undefined):
value()
return value
def forgiving_float(value):
"""Try to convert value to a float."""
try:
return float(value)
except (ValueError, TypeError):
return value
def regex_match(value, find="", ignorecase=False):
"""Match value using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return bool(re.match(find, value, flags))
def regex_replace(value="", find="", replace="", ignorecase=False):
"""Replace using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
regex = re.compile(find, flags)
return regex.sub(replace, value)
def regex_search(value, find="", ignorecase=False):
"""Search using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return bool(re.search(find, value, flags))
def regex_findall_index(value, find="", index=0, ignorecase=False):
"""Find all matches using regex and then pick specific match index."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return re.findall(find, value, flags)[index]
def bitwise_and(first_value, second_value):
"""Perform a bitwise and operation."""
return first_value & second_value
def bitwise_or(first_value, second_value):
"""Perform a bitwise or operation."""
return first_value | second_value
def base64_encode(value):
"""Perform base64 encode."""
return base64.b64encode(value.encode("utf-8")).decode("utf-8")
def base64_decode(value):
"""Perform base64 denode."""
return base64.b64decode(value).decode("utf-8")
def ordinal(value):
"""Perform ordinal conversion."""
return str(value) + (
list(["th", "st", "nd", "rd"] + ["th"] * 6)[(int(str(value)[-1])) % 10]
if int(str(value)[-2:]) % 100 not in range(11, 14)
else "th"
)
def from_json(value):
"""Convert a JSON string to an object."""
return json.loads(value)
def to_json(value):
"""Convert an object to a JSON string."""
return json.dumps(value)
@contextfilter
def random_every_time(context, values):
"""Choose a random value.
Unlike Jinja's random filter,
this is context-dependent to avoid caching the chosen value.
"""
return random.choice(values)
class TemplateEnvironment(ImmutableSandboxedEnvironment):
"""The Open Peer Power template environment."""
def __init__(self, opp):
"""Initialise template environment."""
super().__init__()
self.opp = opp
self.filters["round"] = forgiving_round
self.filters["multiply"] = multiply
self.filters["log"] = logarithm
self.filters["sin"] = sine
self.filters["cos"] = cosine
self.filters["tan"] = tangent
self.filters["asin"] = arc_sine
self.filters["acos"] = arc_cosine
self.filters["atan"] = arc_tangent
self.filters["atan2"] = arc_tangent2
self.filters["sqrt"] = square_root
self.filters["as_timestamp"] = forgiving_as_timestamp
self.filters["timestamp_custom"] = timestamp_custom
self.filters["timestamp_local"] = timestamp_local
self.filters["timestamp_utc"] = timestamp_utc
self.filters["to_json"] = to_json
self.filters["from_json"] = from_json
self.filters["is_defined"] = fail_when_undefined
self.filters["max"] = max
self.filters["min"] = min
self.filters["random"] = random_every_time
self.filters["base64_encode"] = base64_encode
self.filters["base64_decode"] = base64_decode
self.filters["ordinal"] = ordinal
self.filters["regex_match"] = regex_match
self.filters["regex_replace"] = regex_replace
self.filters["regex_search"] = regex_search
self.filters["regex_findall_index"] = regex_findall_index
self.filters["bitwise_and"] = bitwise_and
self.filters["bitwise_or"] = bitwise_or
self.filters["ord"] = ord
self.globals["log"] = logarithm
self.globals["sin"] = sine
self.globals["cos"] = cosine
self.globals["tan"] = tangent
self.globals["sqrt"] = square_root
self.globals["pi"] = math.pi
self.globals["tau"] = math.pi * 2
self.globals["e"] = math.e
self.globals["asin"] = arc_sine
self.globals["acos"] = arc_cosine
self.globals["atan"] = arc_tangent
self.globals["atan2"] = arc_tangent2
self.globals["float"] = forgiving_float
self.globals["now"] = dt_util.now
self.globals["utcnow"] = dt_util.utcnow
self.globals["as_timestamp"] = forgiving_as_timestamp
self.globals["relative_time"] = dt_util.get_age
self.globals["strptime"] = strptime
if opp is None:
return
# We mark these as a context functions to ensure they get
# evaluated fresh with every execution, rather than executed
# at compile time and the value stored. The context itself
# can be discarded, we only need to get at the opp object.
def oppfunction(func):
"""Wrap function that depend on opp."""
@wraps(func)
def wrapper(*args, **kwargs):
return func(opp, *args[1:], **kwargs)
return contextfunction(wrapper)
self.globals["expand"] = oppfunction(expand)
self.filters["expand"] = contextfilter(self.globals["expand"])
self.globals["closest"] = oppfunction(closest)
self.filters["closest"] = contextfilter(oppfunction(closest_filter))
self.globals["distance"] = oppfunction(distance)
self.globals["is_state"] = oppfunction(is_state)
self.globals["is_state_attr"] = oppfunction(is_state_attr)
self.globals["state_attr"] = oppfunction(state_attr)
self.globals["states"] = AllStates(opp)
def is_safe_callable(self, obj):
"""Test if callback is safe."""
return isinstance(obj, AllStates) or super().is_safe_callable(obj)
def is_safe_attribute(self, obj, attr, value):
"""Test if attribute is safe."""
return isinstance(obj, Namespace) or super().is_safe_attribute(obj, attr, value)
_NO_OPP_ENV = TemplateEnvironment(None)
|
the-stack_0_21333 | """Runner implementation."""
import logging
import multiprocessing
import multiprocessing.pool
import os
from dataclasses import dataclass
from pathlib import Path
from typing import TYPE_CHECKING, Any, FrozenSet, Generator, List, Optional, Set, Union
import ansiblelint.skip_utils
import ansiblelint.utils
from ansiblelint._internal.rules import LoadingFailureRule
from ansiblelint.errors import MatchError
from ansiblelint.file_utils import Lintable, expand_dirs_in_lintables
from ansiblelint.rules.AnsibleSyntaxCheckRule import AnsibleSyntaxCheckRule
if TYPE_CHECKING:
from argparse import Namespace
from ansiblelint.rules import RulesCollection
_logger = logging.getLogger(__name__)
@dataclass
class LintResult:
"""Class that tracks result of linting."""
matches: List[MatchError]
files: Set[Lintable]
class Runner:
"""Runner class performs the linting process."""
# pylint: disable=too-many-arguments
def __init__(
self,
*lintables: Union[Lintable, str],
rules: "RulesCollection",
tags: FrozenSet[Any] = frozenset(),
skip_list: List[str] = [],
exclude_paths: List[str] = [],
verbosity: int = 0,
checked_files: Optional[Set[Lintable]] = None
) -> None:
"""Initialize a Runner instance."""
self.rules = rules
self.lintables: Set[Lintable] = set()
# Assure consistent type
for item in lintables:
if not isinstance(item, Lintable):
item = Lintable(item)
self.lintables.add(item)
# Expand folders (roles) to their components
expand_dirs_in_lintables(self.lintables)
self.tags = tags
self.skip_list = skip_list
self._update_exclude_paths(exclude_paths)
self.verbosity = verbosity
if checked_files is None:
checked_files = set()
self.checked_files = checked_files
def _update_exclude_paths(self, exclude_paths: List[str]) -> None:
if exclude_paths:
# These will be (potentially) relative paths
paths = ansiblelint.file_utils.expand_paths_vars(exclude_paths)
# Since ansiblelint.utils.find_children returns absolute paths,
# and the list of files we create in `Runner.run` can contain both
# relative and absolute paths, we need to cover both bases.
self.exclude_paths = paths + [os.path.abspath(p) for p in paths]
else:
self.exclude_paths = []
def is_excluded(self, file_path: str) -> bool:
"""Verify if a file path should be excluded."""
# Any will short-circuit as soon as something returns True, but will
# be poor performance for the case where the path under question is
# not excluded.
# Exclusions should be evaluated only using absolute paths in order
# to work correctly.
if not file_path:
return False
abs_path = os.path.abspath(file_path)
_file_path = Path(file_path)
return any(
abs_path.startswith(path) or _file_path.match(path)
for path in self.exclude_paths
)
def run(self) -> List[MatchError]:
"""Execute the linting process."""
files: List[Lintable] = list()
matches: List[MatchError] = list()
# remove exclusions
for lintable in self.lintables.copy():
if self.is_excluded(str(lintable.path.resolve())):
_logger.debug("Excluded %s", lintable)
self.lintables.remove(lintable)
# -- phase 1 : syntax check in parallel --
def worker(lintable: Lintable) -> List[MatchError]:
return AnsibleSyntaxCheckRule._get_ansible_syntax_check_matches(lintable)
# playbooks: List[Lintable] = []
for lintable in self.lintables:
if lintable.kind != 'playbook':
continue
files.append(lintable)
pool = multiprocessing.pool.ThreadPool(processes=multiprocessing.cpu_count())
return_list = pool.map(worker, files, chunksize=1)
pool.close()
pool.join()
for data in return_list:
matches.extend(data)
# -- phase 2 ---
if not matches:
# do our processing only when ansible syntax check passed in order
# to avoid causing runtime exceptions. Our processing is not as
# relisient to be able process garbage.
matches.extend(self._emit_matches(files))
# remove duplicates from files list
files = [value for n, value in enumerate(files) if value not in files[:n]]
for file in self.lintables:
if file in self.checked_files:
continue
_logger.debug(
"Examining %s of type %s",
ansiblelint.file_utils.normpath(file.path),
file.kind,
)
matches.extend(
self.rules.run(file, tags=set(self.tags), skip_list=self.skip_list)
)
# update list of checked files
self.checked_files.update(self.lintables)
# remove any matches made inside excluded files
matches = list(
filter(lambda match: not self.is_excluded(match.filename), matches)
)
return sorted(set(matches))
def _emit_matches(self, files: List[Lintable]) -> Generator[MatchError, None, None]:
visited: Set[Lintable] = set()
while visited != self.lintables:
for lintable in self.lintables - visited:
try:
for child in ansiblelint.utils.find_children(lintable):
if self.is_excluded(str(child.path)):
continue
self.lintables.add(child)
files.append(child)
except MatchError as e:
if not e.filename:
e.filename = str(lintable.path)
e.rule = LoadingFailureRule()
yield e
except AttributeError:
yield MatchError(
filename=str(lintable.path), rule=LoadingFailureRule()
)
visited.add(lintable)
def _get_matches(rules: "RulesCollection", options: "Namespace") -> LintResult:
lintables = ansiblelint.utils.get_lintables(options=options, args=options.lintables)
matches = list()
checked_files: Set[Lintable] = set()
runner = Runner(
*lintables,
rules=rules,
tags=options.tags,
skip_list=options.skip_list,
exclude_paths=options.exclude_paths,
verbosity=options.verbosity,
checked_files=checked_files
)
matches.extend(runner.run())
# Assure we do not print duplicates and the order is consistent
matches = sorted(set(matches))
# Convert reported filenames into human redable ones, so we hide the
# fact we used temporary files when processing input from stdin.
for match in matches:
for lintable in lintables:
if match.filename == lintable.filename:
match.filename = lintable.name
break
return LintResult(matches=matches, files=checked_files)
|
the-stack_0_21337 | # coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for utils."""
import tensorflow as tf
import utils # local file import from experimental.language_structure.vrnn
class UtilsTest(tf.test.TestCase):
def test_gumbel_softmax_sample_shape(self):
sampler = utils.GumbelSoftmaxSampler(temperature=0.5)
logits = tf.ones(shape=[5, 10])
samples = sampler(logits)
self.assertEqual([5, 10], samples.shape.as_list())
def test_get_last_step(self):
batch_size = 5
hidden_size = 4
inputs = tf.tile(
tf.reshape(tf.range(1, 11), [1, 10, 1]), [batch_size, 1, hidden_size])
seqlen = tf.constant([2, 3, 10, 1, 0], dtype=tf.int32)
last_step = utils.get_last_step(inputs, seqlen)
expected = tf.tile(tf.expand_dims(seqlen, axis=1), [1, hidden_size])
self.assertAllEqual(last_step, expected)
def test_to_one_hot(self):
inputs = tf.constant([[0.6, 0.3, 0.1], [0.1, 0.8, 0.1]])
expected = tf.constant([[1, 0, 0], [0, 1, 0]])
self.assertAllEqual(utils.to_one_hot(inputs), expected)
def test_to_one_hot_tie_inputs(self):
inputs = tf.constant([[0.5, 0.5], [0.5, 0.5]])
expected = tf.constant([[1, 0], [1, 0]])
self.assertAllEqual(utils.to_one_hot(inputs), expected)
def test_mlp_with_final_activitation(self):
output_sizes = [5, 6]
final_activation = tf.keras.layers.ReLU()
test_model = utils.MLP(
output_sizes=output_sizes, final_activation=final_activation)
input_tensor = tf.keras.Input(shape=(8))
output_tensor = test_model(input_tensor)
expected_output_shape = [None, 6]
self.assertEqual(expected_output_shape, output_tensor.shape.as_list())
self.assertEqual(tf.float32, output_tensor.dtype)
def test_sequential_word_loss_shape(self):
max_dialog_length = 2
y_true = tf.keras.Input(shape=(max_dialog_length, None))
y_pred = tf.keras.Input(shape=(max_dialog_length, None, None))
loss_fn = utils.SequentialWordLoss()
loss = loss_fn(y_true=y_true, y_pred=y_pred)
self.assertEqual([None, max_dialog_length, None], loss.shape.as_list())
def test_sequential_word_loss(self):
y_true = tf.constant([[1, 2], [1, 0]])
y_pred = tf.constant([[[0, 0, 1], [1, 0, 0]], [[0, 1, 0], [0, 1, 0]]],
dtype=tf.float32)
loss_fn = utils.SequentialWordLoss()
unit_loss = tf.keras.losses.SparseCategoricalCrossentropy()(
y_true=tf.constant([1]),
y_pred=tf.constant([0, 0, 1], dtype=tf.float32)).numpy()
expected = tf.constant([[unit_loss, unit_loss], [0, 0]])
self.assertAllClose(
loss_fn(y_true, y_pred, sample_weight=tf.sign(y_true)), expected)
def test_sequential_word_loss_with_word_weights(self):
y_true = tf.constant([[1, 2], [0, 0]])
y_pred = tf.constant([[[0, 0, 1], [1, 0, 0]], [[0, 1, 0], [0, 1, 0]]],
dtype=tf.float32)
word_weights = [0, 1, 2]
loss_fn = utils.SequentialWordLoss(word_weights=word_weights)
unit_loss = tf.keras.losses.SparseCategoricalCrossentropy()(
y_true=tf.constant([1]),
y_pred=tf.constant([0, 0, 1], dtype=tf.float32)).numpy()
expected = tf.constant([[unit_loss, 2 * unit_loss], [0, 0]])
self.assertAllClose(
loss_fn(y_true, y_pred, sample_weight=tf.sign(y_true)), expected)
def test_kl_loss_shape(self):
max_dialog_length = 2
p = tf.keras.Input(shape=(max_dialog_length, None, None))
q = tf.keras.Input(shape=(max_dialog_length, None, None))
loss_fn = utils.KlLoss(bpr=True, reduction=tf.keras.losses.Reduction.NONE)
loss = loss_fn(p, q)
self.assertEqual([max_dialog_length, None], loss.shape.as_list())
def test_kl_loss(self):
p = tf.constant([[[0, 1], [0, 1]], [[1, 0], [0, 1]]], dtype=tf.float32)
q = tf.constant([[[1, 0], [0, 1]], [[0, 1], [0, 1]]], dtype=tf.float32)
loss_fn = utils.KlLoss(bpr=False, reduction=tf.keras.losses.Reduction.NONE)
unit_loss = tf.keras.losses.KLDivergence()(
y_true=tf.constant([0, 1], dtype=tf.float32),
y_pred=tf.constant([1, 0], dtype=tf.float32)).numpy()
expected = tf.constant([[unit_loss, 0], [unit_loss, 0]])
self.assertAllEqual(loss_fn(p, q), expected)
def test_kl_loss_with_bpr(self):
p = tf.constant([[[0, 1], [1, 0]], [[0, 1], [0, 1]]], dtype=tf.float32)
q = tf.constant([[[1, 0], [0, 1]], [[1, 0], [1, 0]]], dtype=tf.float32)
loss_fn = utils.KlLoss(bpr=True, reduction=tf.keras.losses.Reduction.NONE)
unit_loss = tf.keras.losses.KLDivergence()(
y_true=tf.constant([0, 1], dtype=tf.float32),
y_pred=tf.constant([1, 0], dtype=tf.float32)).numpy()
expected = tf.constant([unit_loss * 2, 0])
self.assertAllEqual(loss_fn(p, q), expected)
def test_bow_loss_shape(self):
max_dialog_length = 2
y_true = tf.keras.Input(shape=(max_dialog_length, None))
y_pred = tf.keras.Input(shape=(max_dialog_length, None))
loss_fn = utils.BowLoss(sequence_axis=2)
loss = loss_fn(y_true=y_true, y_pred=y_pred)
self.assertEqual([None, max_dialog_length, None], loss.shape.as_list())
def test_bow_loss(self):
y_true = tf.constant([[1, 2], [1, 0]])
y_pred = tf.constant([[0, 1, 0], [0, 1, 0]], dtype=tf.float32)
loss_fn = utils.BowLoss()
unit_loss = tf.keras.losses.SparseCategoricalCrossentropy()(
y_true=tf.constant([1]),
y_pred=tf.constant([0, 0, 1], dtype=tf.float32)).numpy()
expected = tf.constant([[0, unit_loss], [0, 0]])
self.assertAllClose(
loss_fn(y_true, y_pred, sample_weight=tf.sign(y_true)), expected)
def test_create_mask(self):
inputs = tf.constant([[1, 2], [2, 1], [3, 2]])
masking_prob = {1: 1., 2: 0., 3: 0.8}
self.assertAllEqual(
tf.constant([[1, 0], [0, 1], [0, 0]]),
utils.create_mask(inputs, masking_prob, seed=1))
self.assertAllEqual(
tf.constant([[1, 0], [0, 1], [1, 0]]),
utils.create_mask(inputs, masking_prob, seed=2))
def test_value_in_tensor(self):
inputs = tf.constant([[1, 2], [2, 1], [3, 2]])
tensor = tf.constant([1, 1, 2])
expected = tf.constant([[True, True], [True, True], [False, True]])
self.assertAllEqual(expected, utils.value_in_tensor(inputs, tensor))
def test_bert_preprocessor(self):
tfhub_url = 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3'
max_seq_length = 10
batch_size = 12
preprocessor = utils.BertPreprocessor(tfhub_url, max_seq_length)
self.assertEqual(preprocessor.vocab_size, 30522)
inputs = [
tf.keras.layers.Input(shape=(), dtype=tf.string, batch_size=batch_size)
for _ in range(2)
]
outputs = preprocessor(inputs)
self.assertLen(outputs, 2)
for output in outputs:
for key in ['input_word_ids', 'input_type_ids', 'input_mask']:
self.assertEqual(output[key].shape.as_list(), [batch_size, 10])
outputs = preprocessor(inputs, concat=True)
for key in ['input_word_ids', 'input_type_ids', 'input_mask']:
self.assertEqual(outputs[key].shape.as_list(), [batch_size, 10])
def test_adjusted_mutual_info(self):
a = tf.constant([[1, 2, 0], [2, 1, 0]])
b = tf.constant([[1, 2, 1], [2, 1, 1]])
self.assertEqual(utils.adjusted_mutual_info(a, b), 1.)
def test_cluster_purity(self):
a = tf.constant([[1, 0, 0], [1, 1, 0]])
b = tf.constant([[1, 2, 3], [1, 1, 2]])
self.assertEqual(utils.cluster_purity(a, b), 1.)
def test_create_rebalanced_sample_weights(self):
labels = tf.constant([[1, 2, 3], [1, 4, 0]])
sample_weights = utils.create_rebalanced_sample_weights(labels)
self.assertAllEqual(sample_weights,
tf.constant([[0.75, 1.5, 1.5], [0.75, 1.5, 0.]]))
if __name__ == '__main__':
tf.test.main()
|
the-stack_0_21338 | import os
import sys
import shutil
import tempfile
import subprocess
from shlex import quote
class Error (Exception): pass
def decode(x):
try:
s = x.decode()
except:
return x
return s
def _write_script(cmds,script):
'''Write commands into a bash script'''
f = open(script, 'w+')
for cmd in cmds:
print(cmd, file=f)
f.close()
def syscall(cmd, verbose=False):
'''Manage error handling when making syscalls'''
if verbose:
print('Running command:', cmd, flush=True)
try:
output = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as error:
print('The following command failed with exit code', error.returncode, file=sys.stderr)
print(cmd, file=sys.stderr)
print('\nThe output was:\n', file=sys.stderr)
print(error.output.decode(), file=sys.stderr)
raise Error('Error running command:', cmd)
if verbose:
print(decode(output))
def makeBlast(seq=None, outfile=None, pid=60):
cmd = 'blastn -word_size 4 -outfmt "6 qstart qend sstart send length positive pident qlen slen qframe sframe qseqid sseqid" -query ' + quote(str(seq)) + ' -subject ' + quote(str(seq)) + ' -out ' + quote(str(outfile)) + ' -perc_identity ' + str(pid)
return [cmd]
def run_blast(cmds,verbose=False):
'''Write and excute HMMER script'''
tmpdir = tempfile.mkdtemp(prefix='tmp.', dir=os.getcwd())
original_dir = os.getcwd()
os.chdir(tmpdir)
script = 'run_jobs.sh'
_write_script(cmds,script)
syscall('bash ' + script, verbose=verbose)
os.chdir(original_dir)
shutil.rmtree(tmpdir)
'''
Recreate pymummer-like coords file output from blast.
#Blast field, coords field, Description
qstart [S1] Start of the alignment region in the reference sequence
qend [E1] End of the alignment region in the reference sequence
sstart [S2] Start of the alignment region in the query sequence
send [E2] End of the alignment region in the query sequence
length [LEN 1] Length of the alignment region in the reference sequence
positive [LEN 2] Length of the alignment region in the query sequence (#"positive" is just a filler as blast won't all repeated fields)
pident [% IDY] Percent identity of the alignment
qlen [LEN R] Length of the reference sequence
slen [LEN Q] Length of the query sequence
qframe sframe [FRM] Reading frame for the reference AND query sequence alignments respectively
qseqid sseqid [TAGS] The reference AND query FastA IDs respectively. All output coordinates and lengths are relative to the forward strand of the reference DNA sequence.
''' |
the-stack_0_21340 | import asyncio
import subprocess
import os
import logs
# COMMAND_LATEX = \
# "pdflatex -disable-pipes -disable-installer -disable-write18 -no-shell-escape -interaction=nonstopmode -output-directory={pdir} {fname}"
COMMAND_LATEX = \
"pdflatex -no-shell-escape -interaction=nonstopmode -output-directory={pdir} {fname}"
COMMAND_IMG_CONVERT = "convert -trim -density {density} -quality {quality} {pdf} {dest}"
def mkdir(p):
if not os.path.exists(p):
os.makedirs(p)
async def run_command_async(command, timeout = 3):
INTERVAL = 0.25
if isinstance(command, str):
command = command.split(' ')
process = subprocess.Popen(command)
for i in range(int(timeout / INTERVAL)):
await asyncio.sleep(INTERVAL)
retcode = process.poll()
if retcode is not None:
break
if retcode is None:
process.kill()
raise subprocess.TimeoutExpired(command, timeout, process.stdout, process.stderr)
if retcode == 0:
return str(process.stdout)
raise subprocess.CalledProcessError(retcode, command, process.stdout, process.stderr)
async def render_latex(job_id, output_format, code, density, quality):
try:
pdir = './temp/' + job_id + '/'
mkdir(pdir)
fname = pdir + 'a.tex'
latex_log = ''
with open(fname, 'wt') as f:
f.write(code)
f.flush()
f.close()
try:
try:
output = await run_command_async(
COMMAND_LATEX.format(pdir = pdir, fname = fname),
timeout = 8
)
finally:
log_file = fname.replace('.tex', '.log')
try:
latex_log = open(log_file, encoding = 'utf-8').read()
except FileNotFoundError:
pass
except subprocess.TimeoutExpired as e:
return {
'status': 'error',
'description': 'Time limit exceeded during latex rendering'
}
except subprocess.CalledProcessError as e:
# NOTE: Sometimes a pdf file can still be produced.
# Maybe we should let the caller access it anyway?
return {
'status': 'error',
'description': 'pdflatex exited with non-zero return code',
'log': latex_log
}
pdf_file = fname.replace('.tex', '.pdf')
if output_format == 'pdf':
try:
# Binary so as to avoid encoding errors
with open(pdf_file, 'rb') as f:
pass
except FileNotFoundError:
return {
'status': 'error',
'description': 'pdflatex produced no output',
'log': latex_log
}
return {
'status': 'success',
# 'filename': pdf_file,
'log': latex_log
}
elif output_format in ('png', 'jpg'):
img_file = pdf_file.replace('.pdf', '.' + output_format)
try:
output = await run_command_async(
COMMAND_IMG_CONVERT.format(
density = density,
quality = quality,
pdf = pdf_file,
dest = img_file
),
timeout = 3
)
# If there are multiple pages, need to make sure we get the first one
# A later version of the API will allow for accessing the rest of the
# pages. This is more of a temporary bug fix than anything.
multipaged = img_file.replace('.', '-0.')
if os.path.isfile(multipaged):
os.rename(multipaged, img_file)
except subprocess.TimeoutExpired:
return {
'status': 'error',
'description': 'Time limit exceeded during image conversion',
}
except subprocess.CalledProcessError as proc:
return {
'status': 'error',
'description': 'Image conversion exited with non-zero return code'
}
return {
'status': 'success',
# 'filename': img_file,
'log': latex_log
}
return {
'status': 'error',
'description': 'Output format was invalid'
}
except Exception as e:
logs.error(e)
return {
'status': 'error',
'description': 'The server broke. This is bad.'
# 'details': repr(e)
}
|
the-stack_0_21342 | # Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import json
import time
import unittest
import urllib
from sawtooth_sdk.protobuf import validator_pb2
from sawtooth_sdk.protobuf import consensus_pb2
from sawtooth_sdk.messaging.stream import Stream
from sawtooth_intkey.intkey_message_factory import IntkeyMessageFactory
from sawtooth_integration.tests.integration_tools import wait_for_rest_apis
REST_API_URL = "http://rest-api:8008"
INTERBLOCK_PERIOD = 1
WAIT = 300
BATCH_KEYS = 'abcd'
class TestConsensusEngineAPI(unittest.TestCase):
def setUp(self):
wait_for_rest_apis([REST_API_URL])
self.stream = Stream("tcp://validator:5005")
def tearDown(self):
self.stream.close()
def test_consensus_engine_api(self):
chain_head = self.head()
batches = make_batches(BATCH_KEYS)
committed = 0
for batch in batches:
batch_response = self.publish_block(batch)
self.commit_block()
# Wait for the validator to respond that the batch was committed
wait_for_batch(batch_response)
committed += 1
self.assertEqual(committed, len(batches))
blocks = query_rest_api('/blocks')
self.assertEqual(
len(blocks['data']),
len(BATCH_KEYS) + 1)
def publish_block(self, batch):
# Initialize a new block
status = self._initialize()
# Submit a batch and wait
response = post_batch(batch)
time.sleep(INTERBLOCK_PERIOD)
# Finalize the block
while True:
status = self._finalize()
if status == consensus_pb2.\
ConsensusFinalizeBlockResponse.BLOCK_NOT_READY:
time.sleep(1)
else:
self.assertEqual(
status,
consensus_pb2.ConsensusFinalizeBlockResponse.OK)
break
return response
def commit_block(self):
block_id = self._receive_new()
self._check_and_commit(block_id)
def _receive_new(self):
new_update = self._receive_update(
validator_pb2.Message.CONSENSUS_NOTIFY_BLOCK_NEW,
consensus_pb2.ConsensusNotifyBlockNew)
return new_update.block.block_id
def _check_and_commit(self, block_id):
self._check(block_id)
valid_update = self._receive_update(
validator_pb2.Message.CONSENSUS_NOTIFY_BLOCK_VALID,
consensus_pb2.ConsensusNotifyBlockValid)
self.assertEqual(
block_id,
valid_update.block_id)
self._commit(block_id)
commit_update = self._receive_update(
validator_pb2.Message.CONSENSUS_NOTIFY_BLOCK_COMMIT,
consensus_pb2.ConsensusNotifyBlockCommit)
self.assertEqual(
block_id,
commit_update.block_id)
def _check(self, block_id):
future = self.stream.send(
validator_pb2.Message.CONSENSUS_CHECK_BLOCKS_REQUEST,
consensus_pb2.ConsensusCheckBlocksRequest(block_ids=[block_id])
.SerializeToString())
result = future.result()
self.assertEqual(
result.message_type,
validator_pb2.Message.CONSENSUS_CHECK_BLOCKS_RESPONSE)
response = consensus_pb2.ConsensusCheckBlocksResponse()
response.ParseFromString(result.content)
return response.status
def _commit(self, block_id):
future = self.stream.send(
validator_pb2.Message.CONSENSUS_COMMIT_BLOCK_REQUEST,
consensus_pb2.ConsensusCommitBlockRequest(block_id=block_id)
.SerializeToString())
result = future.result()
self.assertEqual(
result.message_type,
validator_pb2.Message.CONSENSUS_COMMIT_BLOCK_RESPONSE)
response = consensus_pb2.ConsensusCommitBlockResponse()
response.ParseFromString(result.content)
return response.status
def head(self):
future = self.stream.send(
validator_pb2.Message.CONSENSUS_CHAIN_HEAD_GET_REQUEST,
consensus_pb2.ConsensusChainHeadGetRequest()
.SerializeToString())
result = future.result()
self.assertEqual(
result.message_type,
validator_pb2.Message.CONSENSUS_CHAIN_HEAD_GET_RESPONSE)
response = consensus_pb2.ConsensusChainHeadGetResponse()
response.ParseFromString(result.content)
return response.status
def _initialize(self):
future = self.stream.send(
validator_pb2.Message.CONSENSUS_INITIALIZE_BLOCK_REQUEST,
consensus_pb2.ConsensusInitializeBlockRequest()
.SerializeToString())
result = future.result()
self.assertEqual(
result.message_type,
validator_pb2.Message.CONSENSUS_INITIALIZE_BLOCK_RESPONSE)
response = consensus_pb2.ConsensusInitializeBlockResponse()
response.ParseFromString(result.content)
return response.status
def _finalize(self):
future = self.stream.send(
validator_pb2.Message.CONSENSUS_FINALIZE_BLOCK_REQUEST,
consensus_pb2.ConsensusFinalizeBlockRequest(data=b"Devmode")
.SerializeToString())
result = future.result()
self.assertEqual(
result.message_type,
validator_pb2.Message.CONSENSUS_FINALIZE_BLOCK_RESPONSE)
response = consensus_pb2.ConsensusFinalizeBlockResponse()
response.ParseFromString(result.content)
return response.status
def _receive_update(self, update_type, update_class):
message = self.stream.receive().result()
self.stream.send_back(
validator_pb2.Message.CONSENSUS_NOTIFY_ACK,
message.correlation_id,
consensus_pb2.ConsensusNotifyAck().SerializeToString())
self.assertEqual(message.message_type, update_type)
update = update_class()
update.ParseFromString(message.content)
return update
def post_batch(batch):
headers = {'Content-Type': 'application/octet-stream'}
response = query_rest_api(
'/batches', data=batch, headers=headers)
return response
def wait_for_batch(post_response):
response = submit_request('{}&wait={}'.format(post_response['link'], WAIT))
return response
def query_rest_api(suffix='', data=None, headers=None):
if headers is None:
headers = {}
url = REST_API_URL + suffix
return submit_request(urllib.request.Request(url, data, headers))
def submit_request(request):
response = urllib.request.urlopen(request).read().decode('utf-8')
return json.loads(response)
def make_batches(keys):
imf = IntkeyMessageFactory()
return [imf.create_batch([('set', k, 0)]) for k in keys]
|
the-stack_0_21343 | import os
import tkinter as tk
import win32com.shell.shell as shell
from tkinter import *
from tkinter.ttk import *
root= tk.Tk()
root.geometry('400x200')
root.configure(bg='black')
#root.iconbitmap(r'F:\py\Lock\ok.png')
root.resizable(width=False, height=False)
root.title ("Lock Mount")
w = tk.Label(root, text=" Abhishek Patel @imdarkcoder " ,font = ("Times New Roman", 12),bg='black',fg="white")
w.pack()
def myc():
commands = 'manage-bde -lock E: -forcedismount'
shell.ShellExecuteEx(lpVerb='runas', lpFile='cmd.exe', lpParameters='/c '+commands)
photo = PhotoImage(file = r"F:\py\Lock\b.ico")
photoimage = photo.subsample(2,2)
button1 = tk.Button (root,image = photoimage, compound = LEFT,text = "Lock E:Mount ", font = ("Times New Roman", 20),bg='darkslategray', command = myc)
button1.pack (expand = 1)
root.mainloop() |
the-stack_0_21344 | import torch
import torch.nn as nn
class DeconvolutionModule(nn.Module):
def __init__(self, cin_conv=1024, cin_deconv=512, cout=512, norm_layer=nn.BatchNorm2d, elementwise_type="sum", deconv_kernel_size=2, deconv_out_padding=0):
super(DeconvolutionModule, self).__init__()
self.conv_layer = nn.Sequential(
nn.Conv2d(cin_conv, cout, kernel_size=3, stride=1, padding=1, dilation=1, groups=1),
norm_layer(cout),
nn.ReLU(inplace=True),
nn.Conv2d(cout, cout, kernel_size=3, stride=1, padding=1, dilation=1, groups=1),
norm_layer(cout),
)
self.deconv_layer = nn.Sequential(
nn.ConvTranspose2d(cin_deconv, cout, kernel_size=deconv_kernel_size, stride=2, padding=0, output_padding=deconv_out_padding),
nn.Conv2d(cout, cout, kernel_size=3, stride=1, padding=1, dilation=1),
norm_layer(cout)
)
if elementwise_type in ["sum", "prod"]:
self.elementwise_type = elementwise_type
else:
raise RuntimeError("elementwise type incorrect!")
self.relu = nn.ReLU(inplace=True)
self.reset_parameters()
def reset_parameters(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight)
nn.init.zeros_(m.bias)
def forward(self, x_deconv, x_conv):
y_deconv = self.deconv_layer(x_deconv)
y_conv = self.conv_layer(x_conv)
if self.elementwise_type == "sum":
return self.relu(y_deconv + y_conv)
elif self.elementwise_type == "prod":
return self.relu(y_deconv + y_conv)
|
the-stack_0_21345 | from django.utils.safestring import mark_safe
#from markdown import markdown
from django import template
import re
import pypandoc
from django.contrib.staticfiles.templatetags import staticfiles
from django.utils import timezone
from django.contrib.auth.models import User
from datetime import timedelta
register = template.Library()
InlineMathPat = re.compile(r'\\\((.+?)\\\)', flags=re.DOTALL)
DisplayMathPat = re.compile(r'\\\[(.+?)\\\]', flags=re.DOTALL)
StaticImagePat = re.compile(r'STATICIMAGE/([^"]+)')
@register.filter(name='md2html')
def md2html(txt, stripP=False):
'converst ReST to HTML using pandoc, w/ audio support'
txt, markers = add_temporary_markers(txt, find_audio)
txt, videoMarkers = add_temporary_markers(txt, find_video, len(markers))
try:
txt = pypandoc.convert(
txt,
'html',
format='rst',
extra_args=('--mathjax', '--email-obfuscation=none')
)
except StandardError:
pass
txt = replace_temporary_markers(txt, audio_html, markers)
txt = replace_temporary_markers(txt, video_html, videoMarkers)
txt = StaticImagePat.sub(staticfiles.static('ct') + '/' + r'\1', txt)
if stripP and txt.startswith('<p>') and txt.endswith('</p>'):
txt = txt[3:-4]
return mark_safe(txt)
def nolongerused():
'convert markdown to html, preserving latex delimiters'
# markdown replaces \( with (, so have to protect our math...
# replace \(math\) with \\(math\\)
txt = InlineMathPat.sub(r'\\\\(\1\\\\)', txt)
# replace \[math\] with \\[math\\]
txt = DisplayMathPat.sub(r'\\\\[\1\\\\]', txt)
txt = markdown(txt, safe_mode='escape')
if stripP and txt.startswith('<p>') and txt.endswith('</p>'):
txt = txt[3:-4]
return mark_safe(txt)
def find_audio(txt, lastpos, tag='.. audio::'):
i = txt.find(tag, lastpos)
if i < 0:
return -1, None, None
lastpos = i + len(tag)
k = txt.find('\n', lastpos)
if k < 0:
k = txt.find('\r', lastpos)
if k < 0: # no EOL, slurp to end of text
k = len(txt)
v = txt[lastpos:k].strip()
return i, k, v
def find_video(txt, lastpos, tag='.. video::'):
return find_audio(txt, lastpos, tag)
def audio_html(filename):
i = filename.rfind('.')
if i > 0: # remove file suffix
filename = filename[:i]
return '<audio controls><source src="%s.ogg" type="audio/ogg"><source src="%s.mp3" type="audio/mpeg">no support for audio!</audio>' \
% (filename,filename)
def video_html(filename):
try:
sourceDB, sourceID = filename.split(':')
except ValueError:
return 'ERROR: bad video source: %s' % filename
d = {
'youtube': '''<div class="embed-responsive embed-responsive-4by3">
<iframe class="embed-responsive-item"
src="https://www.youtube.com/embed/%s"
allowfullscreen></iframe></div>
''',
'vimeo': '''<div class="embed-responsive embed-responsive-4by3">
<iframe class="embed-responsive-item"
src="https://player.vimeo.com/video/%s"
webkitallowfullscreen mozallowfullscreen
allowfullscreen></iframe></div>
''',
}
try:
return d[sourceDB] % sourceID
except KeyError:
return 'ERROR: unknown video sourceDB: %s' % sourceDB
def add_temporary_markers(txt, func, base=0, l=None):
s = ''
lastpos = 0
if l is None:
l = []
while True: # replace selected content with unique markers
i, j, v = func(txt, lastpos)
if i < 0: # no more markers
break
marker = 'mArKeR:%d:' % (base + len(l))
l.append((marker, v))
s += txt[lastpos:i] + marker
lastpos = j
s += txt[lastpos:]
return s, l
def replace_temporary_markers(txt, func, l):
s = ''
lastpos = 0
for marker, v in l: # put them back in after conversion
i = txt.find(marker, lastpos)
if i < 0:
continue # must have been removed by comment, so ignore
s += txt[lastpos:i] + func(v) # substitute value
lastpos = i + len(marker)
s += txt[lastpos:]
return s
def get_base_url(path, extension=[], baseToken='units', tail=2):
l = path.split('/')
for i,v in enumerate(l):
if v == baseToken:
return '/'.join(l[:i + tail] + extension) + '/'
raise ValueError('baseToken not found in path')
def get_path_type(path, baseToken='units', typeOffset=2):
l = path.split('/')
for i, s in enumerate(l[:-typeOffset]):
if s == baseToken:
return l[i + typeOffset]
raise ValueError('baseToken not found in path')
def is_teacher_url(path):
return path.startswith('/ct/teach/')
@register.filter(name='get_object_url')
def get_object_url(actionTarget, o, forceDefault=False, subpath=None):
basePath = get_base_url(actionTarget)
try:
urlFunc = o.get_url
except AttributeError:
if subpath:
tail = subpath + '/'
elif subpath is None:
tail = 'teach/'
else:
tail = ''
head = getattr(o, '_headURL', o.__class__.__name__.lower())
return '%s%s/%d/%s' % (basePath, head, o.pk,
getattr(o, '_subURL', tail))
else:
return urlFunc(basePath, forceDefault, subpath,
is_teacher_url(basePath))
@register.filter(name='get_home_url')
def get_home_url(actionTarget, o):
return get_object_url(actionTarget, o, subpath='')
@register.filter(name='get_thread_url')
def get_thread_url(actionTarget, r):
'get URL for FAQ thread for this student inquiry'
return get_object_url(actionTarget, r.unitLesson,
subpath='faq/%d' % r.pk)
@register.filter(name='get_tasks_url')
def get_tasks_url(actionTarget, ul):
'get URL for FAQ thread for this student inquiry'
subpath = ul._tasksPath[ul.get_type()]
return get_object_url(actionTarget, ul, subpath=subpath)
@register.filter(name='get_dummy_navbar')
def get_dummy_navbar(actionTarget, baseToken='courses'):
l = actionTarget.split('/')
try:
i = l.index(baseToken)
except ValueError:
return ''
m = []
for j, label in ((i + 2, 'Course'), (i + 4, 'Courselet')):
if j + 2 < len(l):
m.append('<li><a href="%s/">%s</a></li>'
% ('/'.join(l[:j]), label))
return mark_safe('\n'.join(m))
@register.filter(name='get_base_faq_url')
def get_base_faq_url(actionTarget, ul):
return get_object_url(actionTarget, ul, subpath='faq')
##############################################################
# time utilities
timeUnits = (('seconds', timedelta(minutes=1), lambda t:int(t.seconds)),
('minutes', timedelta(hours=1), lambda t:int(t.seconds / 60)),
('hours', timedelta(1), lambda t:int(t.seconds / 3600)),
('days', timedelta(7), lambda t:t.days))
monthStrings = ('Jan.', 'Feb.', 'Mar.', 'Apr.', 'May', 'Jun.', 'Jul.',
'Aug.', 'Sep.', 'Oct.', 'Nov.', 'Dec.')
@register.filter(name='display_datetime')
def display_datetime(dt):
'get string that sidesteps timezone issues thus: 27 minutes ago'
def singularize(i, s):
if i == 1:
return s[:-1]
return s
diff = timezone.now() - dt
for unit, td, f in timeUnits:
if diff < td:
n = f(diff)
return '%d %s ago' % (n, singularize(n, unit))
return '%s %d, %d' % (monthStrings[dt.month - 1], dt.day, dt.year)
@register.filter
def filter_input(edge, obj):
"""Filter input UnitLesson to be acceptable for this edge.
:param edge: FSMEdge
:param obj: data object to be checked whether it's acceptable input.
:return:
"""
return edge.filter_input(obj)
@register.filter
def display_full_username(user=None):
if user and isinstance(user, User):
return user.get_full_name() or user.username
else:
return ''
|
the-stack_0_21346 | # 2020.06.05
# activate learning methods
import sys
import numpy as np
from scipy.spatial import distance
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from scipy.stats import entropy
import time
def entropy_query(prob, num_feature):
#prob is the output of the clf.predict_proba on the pool of unlabeled data
#num_feature is the number of images you want to select using the entropy method
entropies = np.zeros(prob.shape[0])
for i in range(prob.shape[0]):
entropies[i] = np.sum(-prob[i]*np.log(prob[i]+.0000001))
th = np.sort(entropies)[prob.shape[0]-num_feature]
num_feature_idx = entropies <= th
num_feature_idx = entropies >= th
return num_feature_idx
def coreset(x_pool, x_train,k):
#x_pool is the pool of unlabeled data
#x_train is the current set of labeled data
#k is the number of images to select from x_pool
dists = distance.cdist(x_pool, x_train, 'euclidean')
nearesttolabeleds = np.min(dists,axis=1)
th = np.sort(nearesttolabeleds)[nearesttolabeleds.shape[0]-k]
num_feature_idx = nearesttolabeleds >= th
return num_feature_idx
class QBC():
def __init__(self, learners, init=0.01, n_increment=200, n_iter=40, percent=0.05):
self.init = init
self.n_increment = n_increment
self.n_learner = len(learners)
self.n_iter = n_iter
self.num_class = 3
self.learners = learners
self.percent = percent
self.trained = False
self.acc_t = []
self.acc_v = []
def metric(self, prob):
return entropy(prob, base=self.num_class, axis=1)
def fit(self, x, y, xv=None, yv=None):
self.trained = True
self.num_class = np.unique(y).shape[0]
#x, xt, y, yt = train_test_split(x, y, train_size=self.init, random_state=42, stratify=y)
idx = np.random.choice(x.shape[0], (int)(x.shape[0]*self.percent))
x_train, y_train = x[idx], y[idx]
x_pool = np.delete(x, idx, axis=0)
y_pool = np.delete(y, idx, axis=0)
acc_t, acc_v, s = [], [], []
for k in range(self.n_iter):
print(' start iter -> %3s'%str(k))
t0 = time.time()
for i in range(self.n_learner):
self.learners[i].fit(x_train, y_train)
pt = self.predict_proba(x_pool)
at = accuracy_score(y_pool, np.argmax(pt, axis=1))
acc_t.append(at)
s.append(y_pool.shape[0])
ht = self.metric(pt)
try:
xv.shape
print(' test shape: %s, val shape: %s'%(str(x_pool.shape), str(xv.shape)))
pv = self.predict_proba(xv)
av = accuracy_score(yv, np.argmax(pv, axis=1))
print(' <Acc> test: %s, val: %s'%(at, av))
acc_v.append(av)
hv = self.metric(pv)
print(' <Entropy> test: %s, val: %s'%(np.mean(ht), np.mean(hv)))
except:
pass
idx = np.argsort(ht)[-self.n_increment:]
x_train = np.concatenate((x_train, x_pool[idx]), axis=0)
y_train = np.concatenate((y_train, y_pool[idx]), axis=0)
x_pool = np.delete(x_pool, idx, axis=0)
y_pool = np.delete(y_pool, idx, axis=0)
print(' end iter -> %3s using %10s seconds\n'%(str(k),str(time.time()-t0)))
self.acc_t = acc_t
self.acc_v = acc_v
return s, acc_t, acc_v
def predict_proba(self, x):
assert (self.trained == True), "Must call fit first!"
pred = np.zeros((x.shape[0], self.num_class))
for i in range(self.n_learner):
pred += self.learners[i].predict_proba(x)
return pred / np.sum(pred, axis=1, keepdims=True)
def predict(self, x):
assert (self.trained == True), "Must call fit first!"
pred = self.predict_proba(x)
return np.argmax(pred, axis=1)
def score(self, x, y):
assert (self.trained == True), "Must call fit first!"
pred = self.predict(x)
return accuracy_score(y, pred) |
the-stack_0_21349 | # -*- coding: utf-8 -*-
"""
Created on 2017-5-20
@author: cheng.li
"""
import os
import sys
import arrow
import datetime as dt
import uqer
import sqlalchemy
import numpy as np
import pandas as pd
from airflow.operators.python_operator import PythonOperator
from airflow.operators.email_operator import EmailOperator
from airflow.operators.sensors import ExternalTaskSensor
from airflow.models import DAG
from uqer import DataAPI as api
from alphamind.utilities import alpha_logger
from sqlalchemy import select, and_, or_, MetaData, delete
from PyFin.api import advanceDateByCalendar
from PyFin.api import isBizDay
from alphamind.api import SqlEngine
from alphamind.data.dbmodel.models import *
from alphamind.api import Universe as UniversProxy
from alphamind.api import industry_styles
from alphamind.api import risk_styles
uqer.DataAPI.api_base.timeout = 300
start_date = dt.datetime(2018, 5, 4)
dag_name = 'update_uqer_data_postgres'
default_args = {
'owner': 'wegamekinglc',
'depends_on_past': True,
'start_date': start_date
}
dag = DAG(
dag_id=dag_name,
default_args=default_args,
schedule_interval='0 1 * * 1,2,3,4,5'
)
_ = uqer.Client(token=os.environ['DATAYES_TOKEN'])
engine = sqlalchemy.create_engine(os.environ['DB_URI'])
alpha_engine = SqlEngine(os.environ['DB_URI'])
def process_date(ds):
alpha_logger.info("Loading data at {0}".format(ds))
this_date = dt.datetime.strptime(ds, '%Y-%m-%d')
ref_date = this_date.strftime('%Y%m%d')
return ref_date, this_date
def format_data(df, format='%Y%m%d'):
df['trade_date'] = pd.to_datetime(df['trade_date'], format=format)
def check_holiday(this_date):
flag = isBizDay('china.sse', this_date)
if not flag:
alpha_logger.info('Job will be omitted as {0} is a holiday'.format(this_date))
return flag
def data_info_log(df, table):
data_len = len(df)
if data_len > 0:
alpha_logger.info("{0} records will be inserted in {1}".format(data_len, table))
else:
msg = "No records will be inserted in {0}".format(table)
alpha_logger.warning(msg)
raise ValueError(msg)
def update_uqer_factors(ds, **kwargs):
ref_date, this_date = process_date(ds)
flag = check_holiday(this_date)
if not flag:
return
df = api.MktStockFactorsOneDayProGet(tradeDate=ref_date)
df.rename(columns={'tradeDate': 'trade_date', 'ticker': 'code'}, inplace=True)
df.code = df.code.astype(int)
del df['secID']
query = delete(Uqer).where(Uqer.trade_date == this_date)
engine.execute(query)
data_info_log(df, Uqer)
format_data(df, format='%Y-%m-%d')
df.to_sql(Uqer.__table__.name, engine, index=False, if_exists='append')
def update_uqer_market(ds, **kwargs):
ref_date, this_date = process_date(ds)
flag = check_holiday(this_date)
if not flag:
return
df = api.MktEqudGet(tradeDate=ref_date)
df.rename(columns={'tradeDate': 'trade_date', 'ticker': 'code'}, inplace=True)
df.code = df.code.astype(int)
del df['secID']
query = delete(Market).where(Market.trade_date == this_date)
engine.execute(query)
data_info_log(df, Market)
format_data(df, format='%Y-%m-%d')
df.to_sql(Market.__table__.name, engine, index=False, if_exists='append')
def update_uqer_index_market(ds, **kwargs):
ref_date, this_date = process_date(ds)
flag = check_holiday(this_date)
if not flag:
return
df = api.MktIdxdGet(tradeDate=ref_date)
df = df[df.exchangeCD.isin(['XSHE', 'XSHG', 'ZICN'])]
df = df[df.ticker <= '999999']
df.rename(columns={'tradeDate': 'trade_date',
'ticker': 'indexCode',
'CHGPct': 'chgPct',
'secShortName': 'indexShortName'}, inplace=True)
df = df[['trade_date',
'indexCode',
'preCloseIndex',
'openIndex',
'highestIndex',
'lowestIndex',
'closeIndex',
'turnoverVol',
'turnoverValue',
'chgPct']]
df['indexCode'] = df.indexCode.astype(int)
query = delete(IndexMarket).where(IndexMarket.trade_date == this_date)
engine.execute(query)
data_info_log(df, Market)
format_data(df, format='%Y-%m-%d')
df.to_sql(IndexMarket.__table__.name, engine, index=False, if_exists='append')
def update_uqer_halt_list(ds, **kwargs):
ref_date, this_date = process_date(ds)
flag = check_holiday(this_date)
if not flag:
return
df = api.SecHaltGet(beginDate=ref_date, endDate=ref_date)
df = df[df.assetClass == 'E']
df['trade_date'] = ref_date
df.rename(columns={'ticker': 'code'}, inplace=True)
df.code = df.code.astype(int)
del df['secID']
query = delete(HaltList).where(HaltList.trade_date == this_date)
engine.execute(query)
data_info_log(df, HaltList)
format_data(df)
df.to_sql(HaltList.__table__.name, engine, index=False, if_exists='append')
def update_universe(ds, **kwargs):
ref_date, this_date = process_date(ds)
flag = check_holiday(this_date)
if not flag:
return
query = delete(Universe).where(
Universe.trade_date == this_date,
)
engine.execute(query)
# indexed universe
universe_map = {'hs300': 300,
'sh50': 16,
'zz500': 905,
'zz800': 906,
'zz1000': 852,
'zxb': 399005,
'cyb': 399006}
total_df = None
for u in universe_map:
query = select([IndexComponent.code]).where(
and_(
IndexComponent.trade_date == this_date,
IndexComponent.indexCode == universe_map[u]
)
)
df = pd.read_sql(query, engine)
df[u] = 1
if total_df is None:
total_df = df
else:
total_df = pd.merge(total_df, df, on=['code'], how='outer')
# ashare
query = select([SecurityMaster.code]).where(
and_(
SecurityMaster.listDate <= this_date,
or_(
SecurityMaster.listStatusCD == 'L',
SecurityMaster.delistDate > this_date
)
)
)
df = pd.read_sql(query, engine)
df['ashare'] = 1
total_df = pd.merge(total_df, df, on=['code'], how='outer')
# ashare_ex
ex_date = advanceDateByCalendar('china.sse', this_date, '-3m')
query = select([SecurityMaster.code]).where(
and_(
SecurityMaster.listDate <= ex_date,
or_(
SecurityMaster.listStatusCD == "L",
SecurityMaster.delistDate > this_date
)
)
)
df = pd.read_sql(query, engine)
df['ashare_ex'] = 1
total_df = pd.merge(total_df, df, on=['code'], how='outer')
# industry universe
codes = total_df.code.tolist()
risk_models = alpha_engine.fetch_risk_model(ref_date, codes)[1]
df = risk_models[['code'] + industry_styles]
df.columns = [i.lower() for i in df.columns]
total_df = pd.merge(total_df, df, on=['code'], how='outer')
total_df['trade_date'] = this_date
total_df.fillna(0, inplace=True)
total_df.to_sql('universe', engine, if_exists='append', index=False)
def update_uqer_universe_security_master(ds, **kwargs):
ref_date, this_date = process_date(ds)
flag = check_holiday(this_date)
if not flag:
return
df = api.EquGet(equTypeCD='A').drop_duplicates()
if df.empty:
return
query = delete(SecurityMaster)
engine.execute(query)
df = df[df.ticker.str.len() <= 6]
df['code'] = df.ticker.astype(int)
df['listDate'] = pd.to_datetime(df['listDate'], format='%Y-%m-%d')
df['endDate'] = pd.to_datetime(df['endDate'], format='%Y-%m-%d')
df['delistDate'] = pd.to_datetime(df['delistDate'], format='%Y-%m-%d')
del df['ticker']
del df['secID']
data_info_log(df, SecurityMaster)
df.to_sql(SecurityMaster.__table__.name, engine, index=False, if_exists='append')
def update_sw1_adj_industry(ds, **kwargs):
ref_date, this_date = process_date(ds)
flag = check_holiday(this_date)
if not flag:
return
industry = '็ณไธ่กไธๅ็ฑป'
query = select([Industry]).where(
and_(
Industry.trade_date == ref_date,
Industry.industry == industry
)
)
df = pd.read_sql(query, engine)
df['industry'] = '็ณไธ่กไธๅ็ฑปไฟฎ่ฎข'
df['industryID'] = 10303330102
df['industrySymbol'] = '440102'
ids = df[df.industryName2 == '่ฏๅธ'].index
df.loc[ids, 'industryName1'] = df.loc[ids, 'industryName2']
df.loc[ids, 'industryID1'] = df.loc[ids, 'industryID2']
ids = df[df.industryName2 == '้ถ่ก'].index
df.loc[ids, 'industryName1'] = df.loc[ids, 'industryName2']
df.loc[ids, 'industryID1'] = df.loc[ids, 'industryID2']
ids = df[df.industryName2 == 'ไฟ้ฉ'].index
df.loc[ids, 'industryName1'] = df.loc[ids, 'industryName2']
df.loc[ids, 'industryID1'] = df.loc[ids, 'industryID2']
ids = df[df.industryName2 == 'ๅคๅ
้่'].index
df.loc[ids, 'industryName1'] = df.loc[ids, 'industryName2']
df.loc[ids, 'industryID1'] = df.loc[ids, 'industryID2']
query = delete(Industry).where(
and_(
Industry.trade_date == ref_date,
Industry.industry == industry + "ไฟฎ่ฎข"
)
)
engine.execute(query)
df.to_sql(Industry.__table__.name, engine, if_exists='append', index=False)
def update_dx_industry(ds, **kwargs):
ref_date, this_date = process_date(ds)
flag = check_holiday(this_date)
if not flag:
return
barra_sector_dict = {
'Energy':
[],
'Materials':
['ๅปบ็ญๅปบๆ', 'ๅๅทฅ', 'ๆ่ฒ้ๅฑ', '้ข้', 'ๅปบ็ญๆๆ'],
'Industrials':
['้ๆ', 'ๆบๆขฐ่ฎพๅค', '็ปผๅ', 'ๅปบ็ญ่ฃ
้ฅฐ', '็ตๅญ', 'ไบค้่ฟ่พ', '่ฝปๅทฅๅถ้ ', 'ๅไธ่ดธๆ', 'ๅๆ็งๆธ', '็ตๆฐ่ฎพๅค', 'ๅฝ้ฒๅๅทฅ', '็บบ็ปๆ่ฃ
', 'ไบค่ฟ่ฎพๅค'],
'ConsumerDiscretionary':
['ไผ้ฒๆๅก', 'ๆฑฝ่ฝฆ', 'ไผ ๅช'],
'ConsumerStaples':
['้ฃๅ้ฅฎๆ', 'ๅฎถ็จ็ตๅจ'],
'HealthCare':
['ๅป่ฏ็็ฉ'],
'Financials':
['้ถ่ก', '้้ถ้่', '้่ๆๅก'],
'IT':
['่ฎก็ฎๆบ', '้ไฟก', 'ไฟกๆฏ่ฎพๅค', 'ไฟกๆฏๆๅก'],
'Utilities':
['ๅ
ฌ็จไบไธ'],
'RealEstate':
['ๆฟๅฐไบง'],
}
# ref: https://en.wikipedia.org/wiki/Global_Industry_Classification_Standard
barra_sector_id_dict = {
'Energy': 10,
'Materials': 15,
'Industrials': 20,
'ConsumerDiscretionary': 25,
'ConsumerStaples': 30,
'HealthCare': 35,
'Financials': 40,
'IT': 45,
'Utilities': 55,
'RealEstate': 60
}
# ref: Morningstar Global Equity Classification Structure
ms_supersector_dict = {
'Cyclical': ['Materials', 'Financials', 'RealEstate', 'ConsumerDiscretionary'],
'Defensive': ['ConsumerStaples', 'HealthCare', 'Utilities'],
'Sensitive': ['Energy', 'Industrials', 'IT']
}
ms_supersector_id_dict = {
'Cyclical': 1,
'Defensive': 2,
'Sensitive': 3
}
barra_sector_rev_dict = {}
for x in barra_sector_dict:
for y in barra_sector_dict[x]:
barra_sector_rev_dict[y] = x
ms_supersector_rev_dict = {}
for x in ms_supersector_dict:
for y in ms_supersector_dict[x]:
ms_supersector_rev_dict[y] = x
industry = '็ณไธ่กไธๅ็ฑป'
query = select([Industry]).where(
and_(
Industry.trade_date == ref_date,
Industry.industry == industry
)
)
df = pd.read_sql(query, engine)
df['industry'] = 'ไธๅ
ด่กไธๅ็ฑป'
df['industryID'] = 0
df['industrySymbol'] = '0'
df['industryID3'] = df['industryID1']
df['industryName3'] = df['industryName1']
df['industryName2'] = [barra_sector_rev_dict[x] for x in df['industryName3']]
df['industryName1'] = [ms_supersector_rev_dict[x] for x in df['industryName2']]
df['industryID1'] = [ms_supersector_id_dict[x] for x in df['industryName1']]
df['industryID2'] = [barra_sector_id_dict[x] for x in df['industryName2']]
query = delete(Industry).where(
and_(
Industry.trade_date == ref_date,
Industry.industry == "ไธๅ
ด่กไธๅ็ฑป"
)
)
engine.execute(query)
df.to_sql(Industry.__table__.name, engine, if_exists='append', index=False)
def update_uqer_index_components(ds, **kwargs):
ref_date, this_date = process_date(ds)
flag = check_holiday(this_date)
if not flag:
return
index_codes = ['000001',
'000002',
'000003',
'000004',
'000005',
'000006',
'000007',
'000008',
'000009',
'000010',
'000015',
'000016',
'000020',
'000090',
'000132',
'000133',
'000300',
'000852',
'000902',
'000903',
'000904',
'000905',
'000906',
'000907',
'000922',
'399001',
'399002',
'399004',
'399005',
'399006',
'399007',
'399008',
'399009',
'399010',
'399011',
'399012',
'399013',
'399107',
'399324',
'399330',
'399333',
'399400',
'399401',
'399649']
total_data = pd.DataFrame()
for index in index_codes:
df = api.IdxCloseWeightGet(ticker=index,
beginDate=ref_date,
endDate=ref_date)
if df.empty:
ref_previous_date = advanceDateByCalendar('china.sse', this_date, '-1b')
query = select([IndexComponent]).where(
and_(
IndexComponent.trade_date == ref_previous_date,
IndexComponent.indexCode == int(index)
)
)
df = pd.read_sql(query, engine)
df['trade_date'] = this_date
if df.empty:
continue
alpha_logger.info('{0} is finished with previous data {1}'.format(index, len(df)))
else:
################################
# 2017-10-09, patch for uqer bug
def filter_out_eqy(code: str):
if code[0] in ['0', '3'] and code[-4:] in ['XSHE']:
return True
elif code[0] in ['6'] and code[-4:] in ['XSHG']:
return True
else:
return False
df = df[df.consID.apply(lambda x: filter_out_eqy(x))]
################################
df.rename(columns={'ticker': 'indexCode',
'secShortName': 'indexShortName',
'consTickerSymbol': 'code',
'consExchangeCD': 'exchangeCD',
'consShortName': 'secShortName'}, inplace=True)
df['indexCode'] = df.indexCode.astype(int)
df['code'] = df.code.astype(int)
df['trade_date'] = this_date
del df['secID']
del df['consID']
alpha_logger.info('{0} is finished with new data {1}'.format(index, len(df)))
total_data = total_data.append(df)
index_codes = total_data.indexCode.unique()
index_codes = [int(index) for index in index_codes]
query = delete(IndexComponent).where(
and_(IndexComponent.trade_date == this_date, IndexComponent.indexCode.in_(index_codes)))
engine.execute(query)
if total_data.empty:
return
data_info_log(total_data, IndexComponent)
format_data(total_data)
total_data.to_sql(IndexComponent.__table__.name, engine, index=False, if_exists='append')
def update_dummy_index_components(ds, **kwargs):
ref_date, this_date = process_date(ds)
flag = check_holiday(this_date)
if not flag:
return
query = select([IndexComponent]).where(
and_(
IndexComponent.trade_date == '2018-05-04',
IndexComponent.indexCode.in_([900300, 900905])
)
)
df = pd.read_sql(query, con=engine)
df['trade_date'] = ref_date
query = delete(IndexComponent).where(
and_(
IndexComponent.trade_date == ref_date,
IndexComponent.indexCode.in_([900300, 900905])
)
)
engine.execute(query)
df.to_sql(IndexComponent.__table__.name, engine, index=False, if_exists='append')
def update_uqer_risk_model(ds, **kwargs):
ref_date, this_date = process_date(ds)
flag = check_holiday(this_date)
if not flag:
return
df = api.RMExposureDayGet(tradeDate=ref_date)
df.rename(columns={'tradeDate': 'trade_date', 'ticker': 'code'}, inplace=True)
df.code = df.code.astype(int)
del df['secID']
del df['exchangeCD']
del df['secShortName']
del df['updateTime']
engine.execute(delete(RiskExposure).where(RiskExposure.trade_date == this_date))
data_info_log(df, RiskExposure)
format_data(df)
df.to_sql(RiskExposure.__table__.name, engine, index=False, if_exists='append')
df = api.RMFactorRetDayGet(tradeDate=ref_date)
df.rename(columns={'tradeDate': 'trade_date'}, inplace=True)
engine.execute(delete(RiskReturn).where(RiskReturn.trade_date == this_date))
data_info_log(df, RiskReturn)
format_data(df)
df.to_sql(RiskReturn.__table__.name, engine, index=False, if_exists='append')
df = api.RMSpecificRetDayGet(tradeDate=ref_date)
df.rename(columns={'tradeDate': 'trade_date', 'ticker': 'code'}, inplace=True)
df.code = df.code.astype(int)
del df['secID']
engine.execute(delete(SpecificReturn).where(SpecificReturn.trade_date == this_date))
data_info_log(df, SpecificReturn)
format_data(df)
df.to_sql(SpecificReturn.__table__.name, engine, index=False, if_exists='append')
df = api.RMCovarianceDayGet(tradeDate=ref_date)
df.rename(columns={'tradeDate': 'trade_date'}, inplace=True)
engine.execute(delete(RiskCovDay).where(RiskCovDay.trade_date == this_date))
data_info_log(df, RiskCovDay)
format_data(df)
df.to_sql(RiskCovDay.__table__.name, engine, index=False, if_exists='append')
df = api.RMCovarianceShortGet(tradeDate=ref_date)
df.rename(columns={'tradeDate': 'trade_date'}, inplace=True)
engine.execute(delete(RiskCovShort).where(RiskCovShort.trade_date == this_date))
data_info_log(df, RiskCovShort)
format_data(df)
df.to_sql(RiskCovShort.__table__.name, engine, index=False, if_exists='append')
df = api.RMCovarianceLongGet(tradeDate=ref_date)
df.rename(columns={'tradeDate': 'trade_date'}, inplace=True)
engine.execute(delete(RiskCovLong).where(RiskCovLong.trade_date == this_date))
data_info_log(df, RiskCovLong)
format_data(df)
df.to_sql(RiskCovLong.__table__.name, engine, index=False, if_exists='append')
df = api.RMSriskDayGet(tradeDate=ref_date)
df.rename(columns={'tradeDate': 'trade_date', 'ticker': 'code'}, inplace=True)
df.code = df.code.astype(int)
del df['secID']
engine.execute(delete(SpecificRiskDay).where(SpecificRiskDay.trade_date == this_date))
data_info_log(df, SpecificRiskDay)
format_data(df)
df.to_sql(SpecificRiskDay.__table__.name, engine, index=False, if_exists='append')
df = api.RMSriskShortGet(tradeDate=ref_date)
df.rename(columns={'tradeDate': 'trade_date', 'ticker': 'code'}, inplace=True)
df.code = df.code.astype(int)
del df['secID']
engine.execute(delete(SpecificRiskShort).where(SpecificRiskShort.trade_date == this_date))
data_info_log(df, SpecificRiskShort)
format_data(df)
df.to_sql(SpecificRiskShort.__table__.name, engine, index=False, if_exists='append')
df = api.RMSriskLongGet(tradeDate=ref_date)
df.rename(columns={'tradeDate': 'trade_date', 'ticker': 'code'}, inplace=True)
df.code = df.code.astype(int)
del df['secID']
engine.execute(delete(SpecificRiskLong).where(SpecificRiskLong.trade_date == this_date))
data_info_log(df, SpecificRiskLong)
format_data(df)
df.to_sql(SpecificRiskLong.__table__.name, engine, index=False, if_exists='append')
def update_uqer_industry_info(ds, **kwargs):
ref_date, this_date = process_date(ds)
flag = check_holiday(this_date)
if not flag:
return
query = select([Market.code]).where(Market.trade_date == this_date)
df = pd.read_sql(query, engine)
codes = df.code.astype(str).str.zfill(6)
engine.execute(delete(Industry).where(Industry.trade_date == this_date))
df = api.EquIndustryGet(intoDate=ref_date)
df = df[df.ticker.isin(codes)]
df['code'] = df.ticker.astype(int)
df['trade_date'] = this_date
df.rename(columns={'ticker': 'code'}, inplace=True)
df = df[['trade_date',
'code',
'industry',
'industryID',
'industrySymbol',
'industryID1',
'industryName1',
'industryID2',
'industryName2',
'industryID3',
'industryName3',
'IndustryID4',
'IndustryName4']]
data_info_log(df, Industry)
format_data(df)
df.to_sql(Industry.__table__.name, engine, index=False, if_exists='append')
def update_category(ds, **kwargs):
ref_date, this_date = process_date(ds)
flag = check_holiday(this_date)
if not flag:
return
codes = alpha_engine.fetch_codes(ref_date, UniversProxy('ashare'))
industry_matrix1 = alpha_engine.fetch_industry_matrix(ref_date, codes, 'sw', 1)
industry_matrix2 = alpha_engine.fetch_industry_matrix(ref_date, codes, 'sw_adj', 1)
cols1 = sorted(industry_matrix1.columns[2:].tolist())
vals1 = (industry_matrix1[cols1].values * np.array(range(1, len(cols1)+1))).sum(axis=1)
cols2 = sorted(industry_matrix2.columns[2:].tolist())
vals2 = (industry_matrix2[cols2].values * np.array(range(1, len(cols2) + 1))).sum(axis=1)
df = pd.DataFrame()
df['code'] = industry_matrix1.code.tolist()
df['trade_date'] = ref_date
df['sw1'] = vals1
df['sw1_adj'] = vals2
query = delete(Categories).where(
Categories.trade_date == ref_date
)
engine.execute(query)
df.to_sql(Categories.__table__.name, con=engine, if_exists='append', index=False)
def fetch_date(table, query_date, engine):
query_date = query_date.replace('-', '')
sql = "select * from {0} where Date = {1}".format(table, query_date)
df = pd.read_sql_query(sql, engine)
df.rename(columns={'Date': 'trade_date', 'Code': 'code'}, inplace=True)
cols = df.columns.tolist()
cols[2] = '็ณไธไธ็บง่กไธ'
cols[3] = '็ณไธไบ็บง่กไธ'
cols[4] = '็ณไธไธ็บง่กไธ'
df.columns = cols
df['trade_date'] = pd.to_datetime(df.trade_date.astype(str))
return df
def update_factor_master(ds, **kwargs):
ref_date, this_date = process_date(ds)
flag = check_holiday(this_date)
if not flag:
return
tables = [Uqer, Gogoal, Experimental, RiskExposure]
meta = MetaData(bind=engine, reflect=True)
df = pd.DataFrame(columns=['factor', 'source', 'alias', 'updateTime', 'description'])
for t in tables:
source = t.__table__.name
table = meta.tables[source]
columns = table.columns.keys()
columns = list(set(columns).difference({'trade_date',
'code',
'secShortName',
'exchangeCD',
'updateTime',
'COUNTRY'}))
col_alias = [c + '_' + source for c in columns]
new_df = pd.DataFrame({'factor': columns,
'source': [source] * len(columns),
'alias': col_alias})
df = df.append(new_df)
query = delete(FactorMaster)
engine.execute(query)
df['updateTime'] = arrow.now().format('YYYY-MM-DD, HH:mm:ss')
df.to_sql(FactorMaster.__table__.name, engine, if_exists='append', index=False)
uqer_task = PythonOperator(
task_id='update_uqer_factors',
provide_context=True,
python_callable=update_uqer_factors,
dag=dag
)
market_task = PythonOperator(
task_id='update_uqer_market',
provide_context=True,
python_callable=update_uqer_market,
dag=dag
)
universe_task = PythonOperator(
task_id='update_universe',
provide_context=True,
python_callable=update_universe,
dag=dag
)
index_market_task = PythonOperator(
task_id='update_uqer_index_market',
provide_context=True,
python_callable=update_uqer_index_market,
dag=dag
)
industry_task = PythonOperator(
task_id='update_uqer_industry_info',
provide_context=True,
python_callable=update_uqer_industry_info,
dag=dag
)
sw1_adj_industry_task = PythonOperator(
task_id='update_sw1_adj_industry',
provide_context=True,
python_callable=update_sw1_adj_industry,
dag=dag
)
dx_industry_task = PythonOperator(
task_id='update_dx_industry',
provide_context=True,
python_callable=update_dx_industry,
dag=dag
)
industry_task.set_upstream(market_task)
sw1_adj_industry_task.set_upstream(industry_task)
dx_industry_task.set_upstream(industry_task)
categories_task = PythonOperator(
task_id='update_categories',
provide_context=True,
python_callable=update_category,
dag=dag
)
categories_task.set_upstream(sw1_adj_industry_task)
index_task = PythonOperator(
task_id='update_uqer_index_components',
provide_context=True,
python_callable=update_uqer_index_components,
dag=dag
)
security_master_task = PythonOperator(
task_id='update_uqer_universe_security_master',
provide_context=True,
python_callable=update_uqer_universe_security_master,
dag=dag
)
universe_task.set_upstream(security_master_task)
universe_task.set_upstream(index_task)
risk_model_task = PythonOperator(
task_id='update_uqer_risk_model',
provide_context=True,
python_callable=update_uqer_risk_model,
dag=dag
)
universe_task.set_upstream(risk_model_task)
_ = PythonOperator(
task_id='update_uqer_halt_list',
provide_context=True,
python_callable=update_uqer_halt_list,
dag=dag
)
factor_master_task = PythonOperator(
task_id='update_factor_master',
provide_context=True,
python_callable=update_factor_master,
dag=dag
)
factor_master_task.set_upstream(uqer_task)
if __name__ == '__main__':
update_universe(ds='2018-05-09')
|
the-stack_0_21352 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 4 17:10:31 2021
@author: datadevil
"""
# Display logo
from data import data
from art import logo, vs
import random
from replit import clear
print(logo)
#Formatting the account data
#{'name':,'follower count':,'description':,'country':} --> name, descrip, country
def format_data(account):
'''Takes the account of the celebrity and gives out the formatted neat output'''
acc_name = account['name']
acc_fcount = account['follower_count']
acc_desc = account['description']
acc_country = account['country']
return (f"{acc_name}, {acc_desc}, from {acc_country}")
def check_ans(guess, a_followers, b_followers):
'''Takes the both followers count and return if they got it right'''
if a_followers > b_followers:
return guess == "a"
else:
return guess == "b"
score = 0
game_over = False
accB = random.choice(data)
while game_over == False:
accA = accB
accB = random.choice(data)
while accA == accB:
accB = random.choice(data)
print(f"Compare A: {format_data(accA)}")
print(vs)
print(f"Aganist B: {format_data(accB)}")
#Ask user for the guess
response = input("Who has more followers? A or B ").lower()
A_fcount = accA['follower_count']
B_fcount = accB['follower_count']
is_crt = check_ans(response, A_fcount, B_fcount)
clear()
if is_crt:
print("You're Right")
score += 1
print(f"And your score is {score}")
else:
print("Sorry you're wrong")
print(f"Your final score is {score}")
game_over = True
|
the-stack_0_21353 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from io import BytesIO
import pandas as pd
import numpy as np
from ... import opcodes as OperandDef
from ...config import options
from ...core import OutputType
from ...utils import parse_readable_size, lazy_import, FixedSizeFileObject
from ...serialize import StringField, DictField, ListField, Int32Field, Int64Field, BoolField, AnyField
from ...filesystem import open_file, file_size, glob
from ..arrays import ArrowStringDtype
from ..core import IndexValue
from ..utils import parse_index, build_empty_df, standardize_range_index, to_arrow_dtypes
from ..operands import DataFrameOperand, DataFrameOperandMixin
try:
from pyarrow import HdfsFile
except ImportError: # pragma: no cover
HdfsFile = None
cudf = lazy_import('cudf', globals=globals())
def _find_delimiter(f, block_size=2 ** 16):
delimiter = b'\n'
if f.tell() == 0:
return 0
while True:
b = f.read(block_size)
if not b:
return f.tell()
elif delimiter in b:
return f.tell() - len(b) + b.index(delimiter) + 1
def _find_hdfs_start_end(f, offset, size):
# As pyarrow doesn't support `readline` operation (https://github.com/apache/arrow/issues/3838),
# we need to find the start and end of file block manually.
# Be careful with HdfsFile's seek, it doesn't allow seek beyond EOF.
loc = min(offset, f.size())
f.seek(loc)
start = _find_delimiter(f)
loc = min(offset + size, f.size())
f.seek(loc)
end = _find_delimiter(f)
return start, end
def _find_chunk_start_end(f, offset, size):
if HdfsFile is not None and isinstance(f, HdfsFile):
return _find_hdfs_start_end(f, offset, size)
f.seek(offset)
if f.tell() == 0:
start = 0
else:
f.readline()
start = f.tell()
f.seek(offset + size)
f.readline()
end = f.tell()
return start, end
class DataFrameReadCSV(DataFrameOperand, DataFrameOperandMixin):
_op_type_ = OperandDef.READ_CSV
_path = AnyField('path')
_names = ListField('names')
_sep = StringField('sep')
_header = AnyField('header')
_index_col = Int32Field('index_col')
_compression = StringField('compression')
_usecols = ListField('usecols')
_offset = Int64Field('offset')
_size = Int64Field('size')
_nrows = Int64Field('nrows')
_incremental_index = BoolField('incremental_index')
_use_arrow_dtype = BoolField('use_arrow_dtype')
_keep_usecols_order = BoolField('keep_usecols_order')
_storage_options = DictField('storage_options')
def __init__(self, path=None, names=None, sep=None, header=None, index_col=None,
compression=None, usecols=None, offset=None, size=None, nrows=None,
gpu=None, keep_usecols_order=None, incremental_index=None,
use_arrow_dtype=None, storage_options=None, **kw):
super().__init__(_path=path, _names=names, _sep=sep, _header=header,
_index_col=index_col, _compression=compression,
_usecols=usecols, _offset=offset, _size=size, _nrows=nrows,
_gpu=gpu, _incremental_index=incremental_index,
_keep_usecols_order=keep_usecols_order,
_use_arrow_dtype=use_arrow_dtype,
_storage_options=storage_options,
_output_types=[OutputType.dataframe], **kw)
@property
def path(self):
return self._path
@property
def names(self):
return self._names
@property
def sep(self):
return self._sep
@property
def header(self):
return self._header
@property
def index_col(self):
return self._index_col
@property
def compression(self):
return self._compression
@property
def usecols(self):
return self._usecols
@property
def nrows(self):
return self._nrows
@property
def offset(self):
return self._offset
@property
def size(self):
return self._size
@property
def incremental_index(self):
return self._incremental_index
@property
def use_arrow_dtype(self):
return self._use_arrow_dtype
@property
def keep_usecols_order(self):
return self._keep_usecols_order
@property
def storage_options(self):
return self._storage_options
@classmethod
def _tile_compressed(cls, op):
# Compression does not support break into small parts
df = op.outputs[0]
chunk_op = op.copy().reset_key()
chunk_op._offset = 0
chunk_op._size = file_size(op.path)
shape = df.shape
new_chunk = chunk_op.new_chunk(None, shape=shape, index=(0, 0), index_value=df.index_value,
columns_value=df.columns_value, dtypes=df.dtypes)
new_op = op.copy()
nsplits = ((np.nan,), (df.shape[1],))
return new_op.new_dataframes(None, df.shape, dtypes=df.dtypes,
index_value=df.index_value,
columns_value=df.columns_value,
chunks=[new_chunk], nsplits=nsplits)
@classmethod
def _validate_dtypes(cls, dtypes, is_gpu):
dtypes = dtypes.to_dict()
# CuDF doesn't support object type, turn it to 'str'.
if is_gpu:
dtypes = dict((n, dt.name if dt != np.dtype('object') else 'str') for n, dt in dtypes.items())
return dtypes
@classmethod
def tile(cls, op):
if op.compression:
return cls._tile_compressed(op)
df = op.outputs[0]
chunk_bytes = df.extra_params.chunk_bytes
chunk_bytes = int(parse_readable_size(chunk_bytes)[0])
dtypes = df.dtypes
if op.use_arrow_dtype is None and not op.gpu and \
options.dataframe.use_arrow_dtype: # pragma: no cover
# check if use_arrow_dtype set on the server side
dtypes = to_arrow_dtypes(df.dtypes)
paths = op.path if isinstance(op.path, (tuple, list)) else glob(op.path, storage_options=op.storage_options)
out_chunks = []
index_num = 0
for path in paths:
total_bytes = file_size(path)
offset = 0
for _ in range(int(np.ceil(total_bytes * 1.0 / chunk_bytes))):
chunk_op = op.copy().reset_key()
chunk_op._path = path
chunk_op._offset = offset
chunk_op._size = min(chunk_bytes, total_bytes - offset)
shape = (np.nan, len(dtypes))
index_value = parse_index(df.index_value.to_pandas(), path, index_num)
new_chunk = chunk_op.new_chunk(None, shape=shape, index=(index_num, 0), index_value=index_value,
columns_value=df.columns_value, dtypes=dtypes)
out_chunks.append(new_chunk)
index_num += 1
offset += chunk_bytes
if op.incremental_index and len(out_chunks) > 1 and \
isinstance(df.index_value._index_value, IndexValue.RangeIndex):
out_chunks = standardize_range_index(out_chunks)
new_op = op.copy()
nsplits = ((np.nan,) * len(out_chunks), (df.shape[1],))
return new_op.new_dataframes(None, df.shape, dtypes=dtypes,
index_value=df.index_value,
columns_value=df.columns_value,
chunks=out_chunks, nsplits=nsplits)
@classmethod
def _pandas_read_csv(cls, f, op):
csv_kwargs = op.extra_params.copy()
out_df = op.outputs[0]
start, end = _find_chunk_start_end(f, op.offset, op.size)
f.seek(start)
b = FixedSizeFileObject(f, end - start)
if hasattr(out_df, 'dtypes'):
dtypes = out_df.dtypes
else:
# Output will be a Series in some optimize rules.
dtypes = pd.Series([out_df.dtype], index=[out_df.name])
if end == start:
# the last chunk may be empty
df = build_empty_df(dtypes)
if op.keep_usecols_order and not isinstance(op.usecols, list):
# convert to Series, if usecols is a scalar
df = df[op.usecols]
else:
if start == 0:
# The first chunk contains header
# As we specify names and dtype, we need to skip header rows
csv_kwargs['skiprows'] = 1 if op.header == 'infer' else op.header
if op.usecols:
usecols = op.usecols if isinstance(op.usecols, list) else [op.usecols]
else:
usecols = op.usecols
if cls._contains_arrow_dtype(dtypes):
# when keep_default_na is True which is default,
# will replace null value with np.nan,
# which will cause failure when converting to arrow string array
csv_kwargs['keep_default_na'] = False
df = pd.read_csv(b, sep=op.sep, names=op.names, index_col=op.index_col, usecols=usecols,
dtype=dtypes.to_dict(), nrows=op.nrows, **csv_kwargs)
if op.keep_usecols_order:
df = df[op.usecols]
return df
@classmethod
def _cudf_read_csv(cls, op): # pragma: no cover
if op.usecols:
usecols = op.usecols if isinstance(op.usecols, list) else [op.usecols]
else:
usecols = op.usecols
csv_kwargs = op.extra_params
if op.offset == 0:
df = cudf.read_csv(op.path, byte_range=(op.offset, op.size), sep=op.sep, usecols=usecols, **csv_kwargs)
else:
df = cudf.read_csv(op.path, byte_range=(op.offset, op.size), sep=op.sep, names=op.names,
usecols=usecols, dtype=cls._validate_dtypes(op.outputs[0].dtypes, op.gpu),
nrows=op.nrows, **csv_kwargs)
if op.keep_usecols_order:
df = df[op.usecols]
return df
@classmethod
def _contains_arrow_dtype(cls, dtypes):
return any(isinstance(dtype, ArrowStringDtype) for dtype in dtypes)
@classmethod
def execute(cls, ctx, op):
xdf = cudf if op.gpu else pd
out_df = op.outputs[0]
csv_kwargs = op.extra_params.copy()
with open_file(op.path, compression=op.compression, storage_options=op.storage_options) as f:
if op.compression is not None:
# As we specify names and dtype, we need to skip header rows
csv_kwargs['skiprows'] = 1 if op.header == 'infer' else op.header
dtypes = cls._validate_dtypes(op.outputs[0].dtypes, op.gpu)
if cls._contains_arrow_dtype(dtypes.values()):
# when keep_default_na is True which is default,
# will replace null value with np.nan,
# which will cause failure when converting to arrow string array
csv_kwargs['keep_default_na'] = False
df = xdf.read_csv(f, sep=op.sep, names=op.names, index_col=op.index_col,
usecols=op.usecols, dtype=dtypes,
nrows=op.nrows, **csv_kwargs)
if op.keep_usecols_order:
df = df[op.usecols]
else:
df = cls._cudf_read_csv(op) if op.gpu else cls._pandas_read_csv(f, op)
ctx[out_df.key] = df
def __call__(self, index_value=None, columns_value=None, dtypes=None, chunk_bytes=None):
shape = (np.nan, len(dtypes))
return self.new_dataframe(None, shape, dtypes=dtypes, index_value=index_value,
columns_value=columns_value, chunk_bytes=chunk_bytes)
def read_csv(path, names=None, sep=',', index_col=None, compression=None, header='infer',
dtype=None, usecols=None, nrows=None, chunk_bytes='64M', gpu=None, head_bytes='100k',
head_lines=None, incremental_index=False, use_arrow_dtype=None,
storage_options=None, **kwargs):
r"""
Read a comma-separated values (csv) file into DataFrame.
Also supports optionally iterating or breaking of the file
into chunks.
Parameters
----------
path : str
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be: file://localhost/path/to/table.csv.
If you want to pass in a path object, pandas accepts any ``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method, such as
a file handler (e.g. via builtin ``open`` function) or ``StringIO``.
sep : str, default ','
Delimiter to use. If sep is None, the C engine cannot automatically detect
the separator, but the Python parsing engine can, meaning the latter will
be used and automatically detect the separator by Python's builtin sniffer
tool, ``csv.Sniffer``. In addition, separators longer than 1 character and
different from ``'\s+'`` will be interpreted as regular expressions and
will also force the use of the Python parsing engine. Note that regex
delimiters are prone to ignoring quoted data. Regex example: ``'\r\t'``.
delimiter : str, default ``None``
Alias for sep.
header : int, list of int, default 'infer'
Row number(s) to use as the column names, and the start of the
data. Default behavior is to infer the column names: if no names
are passed the behavior is identical to ``header=0`` and column
names are inferred from the first line of the file, if column
names are passed explicitly then the behavior is identical to
``header=None``. Explicitly pass ``header=0`` to be able to
replace existing names. The header can be a list of integers that
specify row locations for a multi-index on the columns
e.g. [0,1,3]. Intervening rows that are not specified will be
skipped (e.g. 2 in this example is skipped). Note that this
parameter ignores commented lines and empty lines if
``skip_blank_lines=True``, so ``header=0`` denotes the first line of
data rather than the first line of the file.
names : array-like, optional
List of column names to use. If the file contains a header row,
then you should explicitly pass ``header=0`` to override the column names.
Duplicates in this list are not allowed.
index_col : int, str, sequence of int / str, or False, default ``None``
Column(s) to use as the row labels of the ``DataFrame``, either given as
string name or column index. If a sequence of int / str is given, a
MultiIndex is used.
Note: ``index_col=False`` can be used to force pandas to *not* use the first
column as the index, e.g. when you have a malformed file with delimiters at
the end of each line.
usecols : list-like or callable, optional
Return a subset of the columns. If list-like, all elements must either
be positional (i.e. integer indices into the document columns) or strings
that correspond to column names provided either by the user in `names` or
inferred from the document header row(s). For example, a valid list-like
`usecols` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``.
Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``.
To instantiate a DataFrame from ``data`` with element order preserved use
``pd.read_csv(data, usecols=['foo', 'bar'])[['foo', 'bar']]`` for columns
in ``['foo', 'bar']`` order or
``pd.read_csv(data, usecols=['foo', 'bar'])[['bar', 'foo']]``
for ``['bar', 'foo']`` order.
If callable, the callable function will be evaluated against the column
names, returning names where the callable function evaluates to True. An
example of a valid callable argument would be ``lambda x: x.upper() in
['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster
parsing time and lower memory usage.
squeeze : bool, default False
If the parsed data only contains one column then return a Series.
prefix : str, optional
Prefix to add to column numbers when no header, e.g. 'X' for X0, X1, ...
mangle_dupe_cols : bool, default True
Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than
'X'...'X'. Passing in False will cause data to be overwritten if there
are duplicate names in the columns.
dtype : Type name or dict of column -> type, optional
Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32,
'c': 'Int64'}
Use `str` or `object` together with suitable `na_values` settings
to preserve and not interpret dtype.
If converters are specified, they will be applied INSTEAD
of dtype conversion.
engine : {'c', 'python'}, optional
Parser engine to use. The C engine is faster while the python engine is
currently more feature-complete.
converters : dict, optional
Dict of functions for converting values in certain columns. Keys can either
be integers or column labels.
true_values : list, optional
Values to consider as True.
false_values : list, optional
Values to consider as False.
skipinitialspace : bool, default False
Skip spaces after delimiter.
skiprows : list-like, int or callable, optional
Line numbers to skip (0-indexed) or number of lines to skip (int)
at the start of the file.
If callable, the callable function will be evaluated against the row
indices, returning True if the row should be skipped and False otherwise.
An example of a valid callable argument would be ``lambda x: x in [0, 2]``.
skipfooter : int, default 0
Number of lines at bottom of file to skip (Unsupported with engine='c').
nrows : int, optional
Number of rows of file to read. Useful for reading pieces of large files.
na_values : scalar, str, list-like, or dict, optional
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values. By default the following values are interpreted as
NaN: '', '#N/A', '#N/A N/A', '#NA', '-1.#IND', '-1.#QNAN', '-NaN', '-nan',
'1.#IND', '1.#QNAN', '<NA>', 'N/A', 'NA', 'NULL', 'NaN', 'n/a',
'nan', 'null'.
keep_default_na : bool, default True
Whether or not to include the default NaN values when parsing the data.
Depending on whether `na_values` is passed in, the behavior is as follows:
* If `keep_default_na` is True, and `na_values` are specified, `na_values`
is appended to the default NaN values used for parsing.
* If `keep_default_na` is True, and `na_values` are not specified, only
the default NaN values are used for parsing.
* If `keep_default_na` is False, and `na_values` are specified, only
the NaN values specified `na_values` are used for parsing.
* If `keep_default_na` is False, and `na_values` are not specified, no
strings will be parsed as NaN.
Note that if `na_filter` is passed in as False, the `keep_default_na` and
`na_values` parameters will be ignored.
na_filter : bool, default True
Detect missing value markers (empty strings and the value of na_values). In
data without any NAs, passing na_filter=False can improve the performance
of reading a large file.
verbose : bool, default False
Indicate number of NA values placed in non-numeric columns.
skip_blank_lines : bool, default True
If True, skip over blank lines rather than interpreting as NaN values.
parse_dates : bool or list of int or names or list of lists or dict, default False
The behavior is as follows:
* boolean. If True -> try parsing the index.
* list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
each as a separate date column.
* list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
a single date column.
* dict, e.g. {'foo' : [1, 3]} -> parse columns 1, 3 as date and call
result 'foo'
If a column or index cannot be represented as an array of datetimes,
say because of an unparseable value or a mixture of timezones, the column
or index will be returned unaltered as an object data type. For
non-standard datetime parsing, use ``pd.to_datetime`` after
``pd.read_csv``. To parse an index or column with a mixture of timezones,
specify ``date_parser`` to be a partially-applied
:func:`pandas.to_datetime` with ``utc=True``. See
:ref:`io.csv.mixed_timezones` for more.
Note: A fast-path exists for iso8601-formatted dates.
infer_datetime_format : bool, default False
If True and `parse_dates` is enabled, pandas will attempt to infer the
format of the datetime strings in the columns, and if it can be inferred,
switch to a faster method of parsing them. In some cases this can increase
the parsing speed by 5-10x.
keep_date_col : bool, default False
If True and `parse_dates` specifies combining multiple columns then
keep the original columns.
date_parser : function, optional
Function to use for converting a sequence of string columns to an array of
datetime instances. The default uses ``dateutil.parser.parser`` to do the
conversion. Pandas will try to call `date_parser` in three different ways,
advancing to the next if an exception occurs: 1) Pass one or more arrays
(as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the
string values from the columns defined by `parse_dates` into a single array
and pass that; and 3) call `date_parser` once for each row using one or
more strings (corresponding to the columns defined by `parse_dates`) as
arguments.
dayfirst : bool, default False
DD/MM format dates, international and European format.
cache_dates : bool, default True
If True, use a cache of unique, converted dates to apply the datetime
conversion. May produce significant speed-up when parsing duplicate
date strings, especially ones with timezone offsets.
.. versionadded:: 0.25.0
iterator : bool, default False
Return TextFileReader object for iteration or getting chunks with
``get_chunk()``.
chunksize : int, optional
Return TextFileReader object for iteration.
See the `IO Tools docs
<https://pandas.pydata.org/pandas-docs/stable/io.html#io-chunking>`_
for more information on ``iterator`` and ``chunksize``.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer' and
`filepath_or_buffer` is path-like, then detect compression from the
following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no
decompression). If using 'zip', the ZIP file must contain only one data
file to be read in. Set to None for no decompression.
thousands : str, optional
Thousands separator.
decimal : str, default '.'
Character to recognize as decimal point (e.g. use ',' for European data).
lineterminator : str (length 1), optional
Character to break file into lines. Only valid with C parser.
quotechar : str (length 1), optional
The character used to denote the start and end of a quoted item. Quoted
items can include the delimiter and it will be ignored.
quoting : int or csv.QUOTE_* instance, default 0
Control field quoting behavior per ``csv.QUOTE_*`` constants. Use one of
QUOTE_MINIMAL (0), QUOTE_ALL (1), QUOTE_NONNUMERIC (2) or QUOTE_NONE (3).
doublequote : bool, default ``True``
When quotechar is specified and quoting is not ``QUOTE_NONE``, indicate
whether or not to interpret two consecutive quotechar elements INSIDE a
field as a single ``quotechar`` element.
escapechar : str (length 1), optional
One-character string used to escape other characters.
comment : str, optional
Indicates remainder of line should not be parsed. If found at the beginning
of a line, the line will be ignored altogether. This parameter must be a
single character. Like empty lines (as long as ``skip_blank_lines=True``),
fully commented lines are ignored by the parameter `header` but not by
`skiprows`. For example, if ``comment='#'``, parsing
``#empty\na,b,c\n1,2,3`` with ``header=0`` will result in 'a,b,c' being
treated as the header.
encoding : str, optional
Encoding to use for UTF when reading/writing (ex. 'utf-8'). `List of Python
standard encodings
<https://docs.python.org/3/library/codecs.html#standard-encodings>`_ .
dialect : str or csv.Dialect, optional
If provided, this parameter will override values (default or not) for the
following parameters: `delimiter`, `doublequote`, `escapechar`,
`skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to
override values, a ParserWarning will be issued. See csv.Dialect
documentation for more details.
error_bad_lines : bool, default True
Lines with too many fields (e.g. a csv line with too many commas) will by
default cause an exception to be raised, and no DataFrame will be returned.
If False, then these "bad lines" will dropped from the DataFrame that is
returned.
warn_bad_lines : bool, default True
If error_bad_lines is False, and warn_bad_lines is True, a warning for each
"bad line" will be output.
delim_whitespace : bool, default False
Specifies whether or not whitespace (e.g. ``' '`` or ``' '``) will be
used as the sep. Equivalent to setting ``sep='\s+'``. If this option
is set to True, nothing should be passed in for the ``delimiter``
parameter.
low_memory : bool, default True
Internally process the file in chunks, resulting in lower memory use
while parsing, but possibly mixed type inference. To ensure no mixed
types either set False, or specify the type with the `dtype` parameter.
Note that the entire file is read into a single DataFrame regardless,
use the `chunksize` or `iterator` parameter to return the data in chunks.
(Only valid with C parser).
float_precision : str, optional
Specifies which converter the C engine should use for floating-point
values. The options are `None` for the ordinary converter,
`high` for the high-precision converter, and `round_trip` for the
round-trip converter.
chunk_bytes: int, float or str, optional
Number of chunk bytes.
gpu: bool, default False
If read into cudf DataFrame.
head_bytes: int, float or str, optional
Number of bytes to use in the head of file, mainly for data inference.
head_lines: int, optional
Number of lines to use in the head of file, mainly for data inference.
incremental_index: bool, default False
Create a new RangeIndex if csv doesn't contain index columns.
use_arrow_dtype: bool, default None
If True, use arrow dtype to store columns.
storage_options: dict, optional
Options for storage connection.
Returns
-------
DataFrame
A comma-separated values (csv) file is returned as two-dimensional
data structure with labeled axes.
See Also
--------
to_csv : Write DataFrame to a comma-separated values (csv) file.
Examples
--------
>>> import mars.dataframe as md
>>> md.read_csv('data.csv') # doctest: +SKIP
"""
# infer dtypes and columns
if isinstance(path, (list, tuple)):
file_path = path[0]
else:
file_path = glob(path)[0]
with open_file(file_path, compression=compression, storage_options=storage_options) as f:
if head_lines is not None:
b = b''.join([f.readline() for _ in range(head_lines)])
else:
head_bytes = int(parse_readable_size(head_bytes)[0])
head_start, head_end = _find_chunk_start_end(f, 0, head_bytes)
f.seek(head_start)
b = f.read(head_end - head_start)
mini_df = pd.read_csv(BytesIO(b), sep=sep, index_col=index_col, dtype=dtype,
names=names, header=header)
if isinstance(mini_df.index, pd.RangeIndex):
index_value = parse_index(pd.RangeIndex(-1))
else:
index_value = parse_index(mini_df.index)
columns_value = parse_index(mini_df.columns, store_data=True)
if index_col and not isinstance(index_col, int):
index_col = list(mini_df.columns).index(index_col)
names = list(mini_df.columns)
op = DataFrameReadCSV(path=path, names=names, sep=sep, header=header, index_col=index_col,
usecols=usecols, compression=compression, gpu=gpu,
incremental_index=incremental_index, use_arrow_dtype=use_arrow_dtype,
storage_options=storage_options,
**kwargs)
chunk_bytes = chunk_bytes or options.chunk_store_limit
dtypes = mini_df.dtypes
if use_arrow_dtype is None:
use_arrow_dtype = options.dataframe.use_arrow_dtype
if not gpu and use_arrow_dtype:
dtypes = to_arrow_dtypes(dtypes, test_df=mini_df)
ret = op(index_value=index_value, columns_value=columns_value,
dtypes=dtypes, chunk_bytes=chunk_bytes)
if nrows is not None:
return ret.head(nrows)
return ret
|
the-stack_0_21354 | import copy
from typing import Dict, Optional
from . import rid, scd
def limit_long_arrays(obj, limit: int):
if isinstance(obj, dict):
result = {}
for k, v in obj.items():
result[k] = limit_long_arrays(v, limit)
return result
elif isinstance(obj, str):
return obj
elif isinstance(obj, list):
if len(obj) > limit:
return '<list of {} items>'.format(len(obj))
else:
return [limit_long_arrays(item, limit) for item in obj]
else:
return obj
def isas(fetched: rid.FetchedISAs) -> Dict:
summary = {}
if fetched.success:
for isa_id, isa in fetched.isas.items():
if isa.flights_url not in summary:
summary[isa.flights_url] = {}
isa_summary = copy.deepcopy(isa)
if 'id' in isa_summary:
del isa_summary['id']
if 'owner' in isa_summary:
del isa_summary['owner']
isa_key = '{} ({})'.format(isa.id, isa.owner)
summary[isa.flights_url][isa_key] = isa_summary
else:
summary['error'] = fetched.error
return summary
def _entity(fetched: scd.FetchedEntities, id: str) -> Dict:
entity = fetched.entities_by_id[id]
if entity.success:
return {
'reference': {
'dss': fetched.dss_query.references_by_id.get(id, None),
'uss': entity.reference,
},
'details': entity.details,
}
else:
return {
'error': entity.error,
}
def entities(fetched: scd.FetchedEntities, entity_type: Optional[str]=None) -> Dict:
if fetched.success:
if entity_type is not None:
return {
entity_type: {id: _entity(fetched, id) for id in fetched.entities_by_id},
}
else:
return {
'new': {id: _entity(fetched, id) for id in fetched.new_entities_by_id},
'cached': {id: _entity(fetched, id) for id in fetched.cached_entities_by_id},
}
else:
return {
'error': fetched.error,
}
def flights(fetched: rid.FetchedFlights) -> Dict:
if fetched.success:
isas_by_url = {}
owners_by_url = {}
for isa_id, isa in fetched.dss_isa_query.isas.items():
if isa.flights_url not in isas_by_url:
isas_by_url[isa.flights_url] = {}
isa_info = copy.deepcopy(isa)
del isa_info['id']
isas_by_url[isa.flights_url][isa_id] = isa_info
owners_by_url[isa.flights_url] = isa.owner
summary = {}
for url, flights_result in fetched.uss_flight_queries.items():
if flights_result.success:
owner = owners_by_url[url]
isas = isas_by_url[url]
for rid_flight in flights_result.flights:
flight = copy.deepcopy(rid_flight)
flight['isas'] = isas
if rid_flight.id in fetched.uss_flight_details_queries:
flight['details'] = fetched.uss_flight_details_queries[rid_flight.id].details
summary['{} ({})'.format(rid_flight.id, owner)] = flight
return summary
else:
return {
'errors': fetched.errors
}
|
the-stack_0_21355 | from __future__ import print_function
import pylab
from rh_renderer.multiple_tiles_affine_renderer import MultipleTilesAffineRenderer
from rh_renderer.single_tile_affine_renderer import SingleTileAffineRenderer
import math
import numpy as np
import time
def get_rigid_matrix(t):
splitted = t.split()
r = float(splitted[0])
cos_val = np.cos(r)
sin_val = np.sin(r)
delta = np.array([float(d) for d in splitted[1:]])
return np.vstack([
[cos_val, -sin_val, delta[0]],
[sin_val, cos_val, delta[1]]
])
if __name__ == '__main__':
img_paths = [
'images/tile1.bmp',
'images/tile2.bmp'
]
img_shapes = [
(2976, 3348),
(2976, 3348)
]
# Rigid transforms
transform_models = [
'0.000385855181988 15014.2735713 11052.6315792',
'0.000472672980423 18213.433402 11034.7113096'
]
# Rigid transforms
transform_models = [
'0.00139610475169 15771.948614 34106.6791879',
'0.0012425735603 18776.1580651 34096.4384221',
'0.00120817972641 17252.4365449 31500.3165185',
'0.00117953590639 14242.9767036 31510.4183765'
]
transform_matrices = [get_rigid_matrix(t) for t in transform_models]
# Create all single tiles, and add their transformations
single_tiles = [SingleTileAffineRenderer(img_path, img_shape[1], img_shape[0], compute_mask=True, compute_distances=True) for
img_path, img_shape in zip(img_paths, img_shapes)]
for single_tile, matrix in zip(single_tiles, transform_matrices):
single_tile.add_transformation(matrix)
# Create multiple tiles renderer using different blending techniques
renderer1 = MultipleTilesAffineRenderer(single_tiles, blend_type="NO_BLENDING")
renderer2 = MultipleTilesAffineRenderer(single_tiles, blend_type="AVERAGING")
renderer3 = MultipleTilesAffineRenderer(single_tiles, blend_type="LINEAR")
# Add a transformation
transform_45 = np.array([[math.cos(math.pi/4), -math.sin(math.pi/4), 10.0], [math.sin(math.pi/4), math.cos(math.pi/4), 15.0]])
print("Adding transformation:", transform_45)
renderer1.add_transformation(transform_45)
renderer2.add_transformation(transform_45)
renderer3.add_transformation(transform_45)
start_time = time.time()
img1, start_point1 = renderer1.render()
print("NO_BLENDING Before transformations: Start point is at:", start_point1, "image shape:", img1.shape, "took: {} seconds".format(time.time() - start_time))
pylab.figure()
pylab.imshow(img1, cmap='gray', vmin=0., vmax=255.)
start_time = time.time()
img2, start_point2 = renderer2.render()
print("AVERAGING Before transformations: Start point is at:", start_point2, "image shape:", img2.shape, "took: {} seconds".format(time.time() - start_time))
pylab.figure()
pylab.imshow(img2, cmap='gray', vmin=0., vmax=255.)
start_time = time.time()
img3, start_point3 = renderer3.render()
print("LINEAR Before transformations: Start point is at:", start_point3, "image shape:", img3.shape, "took: {} seconds".format(time.time() - start_time))
pylab.figure()
pylab.imshow(img3, cmap='gray', vmin=0., vmax=255.)
pylab.show()
|
the-stack_0_21357 | """
Simple test interface for quick and no error-prone setup
"""
from utility.json_utils import load_json, save_json
import os
import const_define as cd
from utility.log_utils import get_logger
logger = get_logger(__name__)
if __name__ == '__main__':
# Any model name
model_type = "experimental_basic_memn2n_v2"
# Leave this blank
model_dataset_type = ""
# This should be fixed
dataset_name = "tos_100"
# If using KB -> task1_kb - If not using KB -> task1
task_type = 'task1_kb'
# A, CH, CR, TER, LTD
category = 'CH'
# This should be fixed
test_type = "cv_test"
# Leave this blank
architecture_type = ""
# If you want to save results and do a proper test -> True
is_valid_test = False
batch_size = 256
step_checkpoint = None
quick_settings = load_json(os.path.join(cd.CONFIGS_DIR, cd.JSON_QUICK_SETUP_CONFIG_NAME))
logger.info('Quick setup start!')
# Data loader
data_loader_settings = load_json(os.path.join(cd.CONFIGS_DIR, cd.JSON_DATA_LOADER_CONFIG_NAME))
data_loader_settings['type'] = task_type
data_loader_settings['configs'][task_type]['category'] = category
if architecture_type in quick_settings['input_specific_settings']:
logger.info('Uploading data loader settings...')
for key, value in quick_settings['input_specific_settings'][architecture_type].items():
data_loader_settings['configs'][task_type][key] = value
else:
logger.warning('Could not find architecture type specific settings...Is it ok?')
logger.info('Data loader settings uploaded! Do not forget to check loader specific args!!')
save_json(os.path.join(cd.CONFIGS_DIR, cd.JSON_DATA_LOADER_CONFIG_NAME), data_loader_settings)
# Specific test settings
if model_dataset_type:
actual_model_type = model_dataset_type + '_' + model_type
else:
actual_model_type = model_type
test_settings = load_json(os.path.join(cd.CONFIGS_DIR, '{}_config.json'.format(test_type)))
test_settings['model_type'] = actual_model_type
if dataset_name in quick_settings['data_specific_settings']:
logger.info('Uploading {} settings...'.format(test_type))
for key, value in quick_settings['data_specific_settings'][dataset_name].items():
test_settings[key] = value
else:
logger.warning('Could not find dataset test specific settings...Aborting..')
exit(-1)
if is_valid_test:
if test_type in quick_settings['valid_test_settings'] and dataset_name in quick_settings['valid_test_settings'][test_type]:
for key, value in quick_settings['valid_test_settings'][test_type][dataset_name].items():
test_settings[key] = value
test_settings['save_model'] = True
test_settings['compute_test_info'] = True
else:
if test_type in quick_settings['default_test_settings'] and dataset_name in quick_settings['default_test_settings'][test_type]:
for key, value in quick_settings['default_test_settings'][test_type][dataset_name].items():
test_settings[key] = value
test_settings['save_model'] = False
if model_type in quick_settings['model_specific_settings']:
for key, value in quick_settings['model_specific_settings'][model_type].items():
test_settings[key] = value
else:
logger.warning('Could not find model specific settings! Is this ok?')
save_json(os.path.join(cd.CONFIGS_DIR, '{}_config.json'.format(test_type)), test_settings)
# Training settings
training_path = os.path.join(cd.CONFIGS_DIR, cd.JSON_TRAINING_CONFIG_NAME)
training_settings = load_json(training_path)
logger.info('Uploading {} training settings...'.format(test_type))
training_settings['batch_size'] = batch_size
training_settings['metrics'] = quick_settings['data_specific_settings'][dataset_name]['error_metrics']
training_settings['additional_metrics_info'] = quick_settings['data_specific_settings'][dataset_name]['error_metrics_additional_info']
training_settings['metrics_nicknames'] = quick_settings['data_specific_settings'][dataset_name]['error_metrics_nicknames']
save_json(training_path, training_settings)
logger.info('Quick setup upload done! Check loader specific settings and your model config before running the test!') |
the-stack_0_21358 | import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import h5py
import sys
from lib.constants import *
def plot(fig, trajectory, sensor_height):
# Filter out everything above block (start of trajectory)
traj_inside = trajectory[trajectory[:, 2] < sensor_height]
ax = fig.add_subplot(111, projection='3d') # type:Axes3D
ax.view_init(elev=20., azim=32)
# Plot track
ax.plot(traj_inside[:, 0], traj_inside[:, 1], traj_inside[:, 2])
# Plot incident electron
print("Traj incident: %f, %f" % (traj_inside[0, 1] / 55000, traj_inside[0, 0] / 55000))
ax.plot([traj_inside[0, 0]], [traj_inside[0, 1]], [sensor_height], 'ro')
# Set axes scale
ax.set_xlim3d(0, n_pixels * pixel_size)
ax.set_ylim3d(0, n_pixels * pixel_size)
ax.set_zlim3d(0, sensor_height)
# Set tick lines to pixel size,
xedges = np.arange(0, n_pixels * pixel_size, pixel_size)
yedges = np.arange(0, n_pixels * pixel_size, pixel_size)
zedges = np.arange(0, sensor_height, 100000)
ax.set_yticks(yedges, minor=False)
ax.yaxis.grid(True, which='major')
ax.yaxis.set_ticklabels([])
ax.set_xticks(xedges, minor=False)
ax.xaxis.grid(True, which='major')
ax.xaxis.set_ticklabels([])
ax.set_zticks(zedges, minor=False)
ax.zaxis.grid(True, which='major')
# Change background color
ax.xaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))
ax.yaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))
ax.zaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))
return fig, ax
if __name__ == "__main__":
filename = sys.argv[1]
f = h5py.File(filename, 'r')
# Get trajectory
trajectory = f['trajectories'][sys.argv[2]][()]
height = f.attrs['sensor_height']
ax = plot(plt.figure(), trajectory, height)
# from matplotlib import animation
# def animate(i):
# ax[1].view_init(30, i)
# plt.draw()
# plt.pause(.001)
# # Animate
# anim = animation.FuncAnimation(ax[0], animate,
# frames=360, interval=20)
# # Save
# anim.save('basic_animation.mp4', fps=30, extra_args=['-vcodec', 'libx264'])
plt.show()
|
the-stack_0_21359 | #! /usr/bin/env python
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import nose
from nose.tools import assert_equals
from sknano.testing import AtomsTestFixture, generate_atoms
class TestCase(AtomsTestFixture):
def test1(self):
atoms = self.atoms
atoms.kNN = 6
atoms.NNrc = 9.0
for atom in atoms:
assert_equals(atom.CN, 0)
atom.CN = 3
assert_equals(atom.CN, 3)
atoms.update_attrs()
atoms = atoms.filtered((atoms.z >= -5) & (atoms.z <= 5))
print('Natoms: {}'.format(atoms.Natoms))
for atom in atoms:
assert_equals(atom.CN, atoms.kNN)
def test2(self):
atoms = self.atoms
atoms.update_attrs()
print(atoms.neighbors.tolist())
print(atoms.neighbor_distances)
def test3(self):
atoms = \
generate_atoms(generator_class='SWNTGenerator', n=10, m=0, nz=5)
atoms.kNN = 30
atoms.NNrc = 10
atoms.update_neighbors(cutoffs=[1.5, 2.5, 2.9, 3.8, 4.3])
# print(atoms.neighbors.tolist())
print(atoms.neighbor_distances.tolist())
# print('first_neighbors:\n{}'.format(atoms.first_neighbors))
# print('second_neighbors:\n{}'.format(atoms.second_neighbors))
print('len(first_neighbors): {}'.format(
[len(atom.first_neighbors) for atom in atoms]))
print('len(second_neighbors): {}'.format(
[len(atom.second_neighbors) for atom in atoms]))
print('len(third_neighbors): {}'.format(
[len(atom.third_neighbors) for atom in atoms]))
print('len(4th_neighbors): {}'.format(
[len(atom.get_neighbors(4)) for atom in atoms]))
print('len(5th_neighbors): {}'.format(
[len(atom.get_neighbors(5)) for atom in atoms]))
if __name__ == '__main__':
nose.runmodule()
|
the-stack_0_21360 | from blueman.Functions import dprint
import tty
import termios
import os
import subprocess
from gi.repository import GObject
import errno
import re
pppd_errors = {
1: """An immediately fatal error of some kind occurred, such as an essential system call failing, or running out of virtual memory.""",
2: """An error was detected in processing the options given, such as two mutually exclusive options being used.""",
3: """Pppd is not setuid-root and the invoking user is not root.""",
4: """The kernel does not support PPP, for example, the PPP kernel driver is not included or cannot be loaded.""",
5: """Pppd terminated because it was sent a SIGINT, SIGTERM or SIGHUP signal.""",
6: """The serial port could not be locked.""", 7: """The serial port could not be opened.""",
8: """The connect script failed (returned a non-zero exit status).""",
9: """The command specified as the argument to the pty option could not be run.""",
10: """The PPP negotiation failed, that is, it didn't reach the point where at least one network protocol (e.g. IP) was running.""",
11: """The peer system failed (or refused) to authenticate itself.""",
12: """The link was established successfully and terminated because it was idle.""",
13: """The link was established successfully and terminated because the connect time limit was reached.""",
14: """Callback was negotiated and an incoming call should arrive shortly.""",
15: """The link was terminated because the peer is not responding to echo requests.""",
16: """The link was terminated by the modem hanging up.""",
17: """The PPP negotiation failed because serial loopback was detected.""",
18: """The init script failed (returned a non-zero exit status).""",
19: """We failed to authenticate ourselves to the peer."""
}
class PPPException(Exception):
pass
class PPPConnection(GObject.GObject):
__gsignals__ = { # arg: interface name eg. ppp0
'connected': (GObject.SignalFlags.NO_HOOKS, None, (GObject.TYPE_PYOBJECT,)),
'error-occurred': (GObject.SignalFlags.NO_HOOKS, None, (GObject.TYPE_PYOBJECT,))
}
def __init__(self, port, number="*99#", apn="", user="", pwd=""):
GObject.GObject.__init__(self)
self.apn = apn
self.number = number
self.user = user
self.pwd = pwd
self.port = port
self.interface = None
self.pppd = None
self.file = None
self.commands = [
("ATZ E0 V1 X4 &C1 +FCLASS=0", self.simple_callback),
("ATE0", self.simple_callback),
("AT+GCAP", self.simple_callback),
(
"ATD%s" % self.number,
self.connect_callback,
["CONNECT", "NO CARRIER", "BUSY", "NO ANSWER", "NO DIALTONE", "OK", "ERROR"]
)
]
if self.apn != "":
self.commands.insert(-1, ('AT+CGDCONT=1,"IP","%s"' % self.apn, self.simple_callback))
def cleanup(self):
os.close(self.file)
self.file = None
def simple_callback(self, response):
pass
def connect_callback(self, response):
if "CONNECT" in response:
dprint("Starting pppd")
self.pppd = subprocess.Popen(
["/usr/sbin/pppd", "%s" % self.port, "115200", "defaultroute", "updetach", "usepeerdns"], bufsize=1,
stdout=subprocess.PIPE)
GObject.io_add_watch(self.pppd.stdout, GObject.IO_IN | GObject.IO_ERR | GObject.IO_HUP, self.on_pppd_stdout)
GObject.timeout_add(1000, self.check_pppd)
self.cleanup()
else:
self.cleanup()
raise PPPException("Bad modem response %s, expected CONNECT" % response[0])
def __cmd_response_cb(self, response, exception, item_id):
if exception:
self.emit("error-occurred", str(exception))
else:
try:
self.commands[item_id][1](response)
except PPPException as e:
self.emit("error-occurred", str(e))
return
self.send_commands(item_id + 1)
def send_commands(self, id=0):
try:
item = self.commands[id]
except IndexError:
return
if len(item) == 3:
(command, callback, terminators) = item
else:
(command, callback) = item
terminators = ["OK", "ERROR"]
self.send_command(command)
self.wait_for_reply(self.__cmd_response_cb, terminators, id)
def Connect(self):
self.file = os.open(self.port, os.O_RDWR | os.O_EXCL | os.O_NONBLOCK | os.O_NOCTTY)
tty.setraw(self.file)
attrs = termios.tcgetattr(self.file)
attrs[0] &= ~(termios.IGNCR | termios.ICRNL | termios.IUCLC | termios.INPCK | termios.IXON | termios.IXANY |
termios.IGNPAR)
attrs[1] &= ~(termios.OPOST | termios.OLCUC | termios.OCRNL | termios.ONLCR | termios.ONLRET)
attrs[3] &= ~(termios.ICANON | termios.XCASE | termios.ECHO | termios.ECHOE | termios.ECHONL)
attrs[3] &= ~(termios.ECHO | termios.ECHOE)
attrs[6][termios.VMIN] = 1
attrs[6][termios.VTIME] = 0
attrs[6][termios.VEOF] = 1
attrs[2] &= ~(termios.CBAUD | termios.CSIZE | termios.CSTOPB | termios.CLOCAL | termios.PARENB)
attrs[2] |= (termios.B9600 | termios.CS8 | termios.CREAD | termios.PARENB)
termios.tcsetattr(self.file, termios.TCSANOW, attrs)
termios.tcflush(self.file, termios.TCIOFLUSH)
self.send_commands()
def on_pppd_stdout(self, source, cond):
if cond & GObject.IO_ERR or cond & GObject.IO_HUP:
return False
line = source.readline()
m = re.match("Using interface (ppp[0-9]*)", line)
if m:
self.interface = m.groups(1)[0]
print(line)
return True
def check_pppd(self):
status = self.pppd.poll()
if status is not None:
if status == 0:
self.emit("connected", self.interface)
else:
try:
msg = "pppd exited: " + pppd_errors[int(status)]
except KeyError:
msg = "pppd exited with unknown error"
self.emit("error-occurred", msg)
print("pppd exited with status %d" % status)
return False
return True
def send_command(self, command):
dprint("-->", command)
os.write(self.file, "%s\r\n" % command)
termios.tcdrain(self.file)
def on_data_ready(self, source, condition, terminators, on_done):
if condition & GObject.IO_ERR or condition & GObject.IO_HUP:
on_done(None, PPPException("Socket error"))
self.cleanup()
return False
try:
self.buffer += os.read(self.file, 1)
except OSError as e:
if e.errno == errno.EAGAIN:
dprint("Got EAGAIN")
return True
else:
on_done(None, PPPException("Socket error"))
dprint(e)
self.cleanup()
return False
lines = self.buffer.split("\r\n")
found = False
for l in lines:
if l == "":
pass
else:
for t in terminators:
if t in l:
found = True
if found:
lines = filter(lambda x: x != "", lines)
lines = map(lambda x: x.strip("\r\n"), lines)
dprint("<-- ", lines)
on_done(lines, None)
return False
return True
def wait_for_reply(self, callback, terminators=["OK", "ERROR"], *user_data):
def on_timeout():
GObject.source_remove(self.io_watch)
callback(None, PPPException("Modem initialization timed out"), *user_data)
self.cleanup()
return False
def on_done(ret, exception):
GObject.source_remove(self.timeout)
callback(ret, exception, *user_data)
self.buffer = ""
self.term_found = False
self.io_watch = GObject.io_add_watch(self.file, GObject.IO_IN | GObject.IO_ERR | GObject.IO_HUP, self.on_data_ready,
terminators, on_done)
self.timeout = GObject.timeout_add(15000, on_timeout)
|
the-stack_0_21363 | # encoding=utf8
import sys
import os
import hashlib
import re
import time
import requests
import warcio
from warcio.archiveiterator import ArchiveIterator
from warcio.warcwriter import WARCWriter
assert hasattr(warcio, 'ATWARCIO'), 'warcio was not imported correctly. Location: ' + warcio.__file__
def ia_available(url, digest):
tries = 0
print('Deduplicating digest ' + digest + ', url ' + url)
assert digest.startswith('sha1:')
digest = digest.split(':', 1)[1]
encoded = digest + ';' + re.sub('^https?://', '', url)
encoded = encoded.encode('utf-8')
hashed = hashlib.sha256(encoded).hexdigest()
while tries < 10:
try:
tries += 1
ia_data = requests.get('http://NewsGrabberDedupe.b-cdn.net/?key={hashed}' \
.format(hashed=hashed), timeout=60)
if not ';' in ia_data.text:
return False
return ia_data.text.split(';', 1)
except:
pass
time.sleep(1)
return False
def revisit_record(writer, record, ia_record):
warc_headers = record.rec_headers
#warc_headers.add_header('WARC-Refers-To'
warc_headers.replace_header('WARC-Refers-To-Date',
'-'.join([ia_record[0][:4], ia_record[0][4:6], ia_record[0][6:8]]) + 'T' +
':'.join([ia_record[0][8:10], ia_record[0][10:12], ia_record[0][12:14]]) + 'Z')
warc_headers.replace_header('WARC-Refers-To-Target-URI', ia_record[1])
warc_headers.replace_header('WARC-Type', 'revisit')
warc_headers.replace_header('WARC-Truncated', 'length')
warc_headers.replace_header('WARC-Profile', 'http://netpreserve.org/warc/1.0/revisit/identical-payload-digest')
warc_headers.remove_header('WARC-Block-Digest')
warc_headers.remove_header('Content-Length')
return writer.create_warc_record(
record.rec_headers.get_header('WARC-Target-URI'),
'revisit',
warc_headers=warc_headers,
http_headers=record.http_headers
)
def process(filename_in, filename_out):
with open(filename_in, 'rb') as file_in:
with open(filename_out, 'wb') as file_out:
writer = WARCWriter(filebuf=file_out, gzip=True)
for record in ArchiveIterator(file_in):
if record.rec_headers.get_header('WARC-Type') == 'response':
record_url = record.rec_headers.get_header('WARC-Target-URI')
record_digest = record.rec_headers.get_header('WARC-Payload-Digest')
ia_record = ia_available(record_url, record_digest)
#print(ia_record)
if not ia_record:
writer.write_record(record)
else:
print('Found duplicate, writing revisit record.')
writer.write_record(revisit_record(writer, record, ia_record))
else:
writer.write_record(record)
if __name__ == '__main__':
filename_in = sys.argv[1]
filename_out = sys.argv[2]
process(filename_in, filename_out)
|
the-stack_0_21364 | from deepliif.models import ResnetGenerator, get_norm_layer
class Resnet(ResnetGenerator):
def __init__(self):
super(Resnet, self).__init__(
input_nc=3,
output_nc=3,
ngf=64,
norm_layer=get_norm_layer(norm_type='batch'),
use_dropout=True,
n_blocks=9,
padding_type='zero'
)
|
the-stack_0_21366 | from celery import Celery
from kombu import Queue, Exchange
import os
import time
BROKER_URL = os.getenv("BROKER_URL")
RESULT_BACKEND_URL = os.getenv("RESULT_BACKEND_URL", None)
celery_app = Celery(
broker=BROKER_URL,
)
if RESULT_BACKEND_URL:
celery_app.conf.update(backend=RESULT_BACKEND_URL)
celery_app.conf.update(
CELERY_DEFAULT_QUEUE="queue1",
CELERY_QUEUES=(
Queue('queue1', exchange=Exchange('queue1', type='direct'), routing_key='queue1'),
Queue('queue2', exchange=Exchange('queue2', type='direct'), routing_key='queue2'),
Queue('queue3', exchange=Exchange('queue3', type='direct'), routing_key='queue3'),
),
CELERY_ROUTES={
'task1': {'queue': 'queue1', 'routing_key': 'queue1'},
'task2': {'queue': 'queue2', 'routing_key': 'queue2'},
'task3': {'queue': 'queue3', 'routing_key': 'queue3'},
}
)
@celery_app.task
def task1():
time.sleep(20)
@celery_app.task
def task2():
time.sleep(20)
@celery_app.task
def task3():
time.sleep(20)
|
the-stack_0_21369 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="pagent", # Replace with your own username
version="0.0.1",
author="Dylan Miracle",
author_email="[email protected]",
description="A probabalistic agent",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/dmiracle/pagent",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
) |
the-stack_0_21370 | # Django
from django.db.models.query import QuerySet
# APPs
from systemtest.pts import models as pts_models
from systemtest.utils.db2 import Database
def get_request_by_status_name(status_name: str) -> QuerySet[pts_models.Request]:
"""
Django QuerySet with all Request objects/rows in a specific status
Args:
status_name:
Name of status to fetch
Returns:
Django QuerySet of Request objects/rows
"""
return pts_models.Request.objects.filter(request_status__name=status_name)
def get_status(status_name: str) -> pts_models.RequestStatus:
"""
Gets a specific RequestStatus by exact name
Args:
status_name:
Name of status to fetch
Raises:
DoesNotExist:
RequestStatus matching query does not exist
Returns:
RequestStatus object
"""
return pts_models.RequestStatus.objects.get(name=status_name)
def get_ncm(request: pts_models.Request, database: Database) -> dict:
"""
Performs query to DB2 and get NCM data as tag and part location
Args:
request:
Request object to fetch NCM data.
database:
Database instance to execute query
Returns:
Dictionary with NCM data if it exits and additional flag for
location part or an empty dictionary in case of not finding ncm
"""
# Create a dict with data for execute query
data = {
"part_number": request.part_number,
"serial_number": request.serial_number,
"created": request.created
}
# Add dinamic data to SQL query
sql = """
SELECT
RPREJS AS NCM_TAG,
RPCULC AS CURRENT_LOCATION
FROM
QRYQ.MFSGPRP10_GRC
WHERE
RPITEM = '00000{part_number}' AND -- Part number has 12 chars,
-- 5 chars aditional to save in db,
-- so filled extra chars with 0s
RPPTSN = '{serial_number}' AND
RPSTMP > '{created}';
""".format(**data)
rows = database.fetch(sql, False)
# Get first row from query if no row gets empty dict
row = next(rows, {})
# If row has column 'NCM_TAG' named in the query above
if ncm_tag := row.get("NCM_TAG"):
data["ncm_tag"] = ncm_tag
# Fetch next row if exists to validate the location of part
row = next(rows, {})
data["is_returned"] = row.get("CURRENT_LOCATION", "").strip() == "PNCM"
return data
# If any ncm_data was found return empty dict
return {}
|
the-stack_0_21372 | import datetime
import importlib
import os
import re
import sys
import yaml
from src import const
from src.ff import FF
from src.log import log
from src.utils import Util
class Updater:
def __init__(self, name):
"""Dispaching config to its updater by config name"""
self.config_name = name
self.modified = False
def _save_yaml_file(self, path, config):
_, yaml_dumper = Util.get_yaml()
with open(path, 'wb') as f:
f.write(yaml.dump(config, Dumper=yaml_dumper, encoding='utf-8', allow_unicode=True))
def update(self):
"""Perform update"""
yaml_path = self.config_name + '.yaml'
if os.path.isfile(yaml_path):
# .yaml file path
config = Util.read_config_file(yaml_path)
getattr(self, self.config_name + "_yaml")(config)
else:
if FF.is_enabled("WALBOT_FEATURE_NEW_CONFIG") == "1":
getattr(self, self.config_name + "_db")()
else:
log.error(f"File '{self.config_name}.yaml' does not exist")
sys.exit(const.ExitStatus.CONFIG_FILE_ERROR)
if self.modified:
self._save_yaml_file(yaml_path, config)
def result(self):
"""Get updater result: was config file actually updated or not"""
return self.modified
def _bump_version(self, config, new_version):
config.version = new_version
self.modified = True
log.info(f"Successfully upgraded to version {new_version}")
def config_yaml(self, config):
"""Update config.yaml"""
if config.version == "0.0.1":
for key in config.commands.data.keys():
if not hasattr(config.commands.data[key], "times_called"):
config.commands.data[key].times_called = 0
self._bump_version(config, "0.0.2")
if config.version == "0.0.2":
config.__dict__["ids"] = {"reminder": 1}
reminders = config.reminders
config.reminders = {}
for reminder in reminders:
config.reminders[config.ids["reminder"]] = reminder
config.ids["reminder"] += 1
self._bump_version(config, "0.0.3")
if config.version == "0.0.3":
for com in ("quote", "addquote", "listquote", "delquote", "setquoteauthor"):
config.commands.data[com].module_name = "src.cmd.quote"
config.commands.data[com].class_name = "QuoteCommands"
for com in ("reminder", "updreminder", "listreminder", "delreminder"):
config.commands.data[com].module_name = "src.cmd.reminder"
config.commands.data[com].class_name = "ReminderCommands"
self._bump_version(config, "0.0.4")
if config.version == "0.0.4":
for key in config.commands.data.keys():
if not hasattr(config.commands.data[key], "cmd_line"):
config.commands.data[key].cmd_line = None
self._bump_version(config, "0.0.5")
if config.version == "0.0.5":
for com in ("markov", "markovgc", "delmarkov", "findmarkov", "dropmarkov", "addmarkovfilter",
"listmarkovfilter", "delmarkovfilter"):
config.commands.data[com].module_name = "src.cmd.markov"
config.commands.data[com].class_name = "MarkovCommands"
self._bump_version(config, "0.0.6")
if config.version == "0.0.6":
if hasattr(config.commands, "config"):
delattr(config.commands, "config")
self._bump_version(config, "0.0.7")
if config.version == "0.0.7":
config.__dict__["saving"] = {
"backup": {
"compress": config.compress,
"period": 10,
},
"period": 10,
}
delattr(config, "compress")
self._bump_version(config, "0.0.8")
if config.version == "0.0.8":
config.ids["reaction"] = 1
reactions = config.reactions
config.reactions = {}
for reaction in reactions:
config.reactions[config.ids["reaction"]] = reaction
config.ids["reaction"] += 1
self._bump_version(config, "0.0.9")
if config.version == "0.0.9":
config.__dict__["responses"] = dict()
config.ids["response"] = 1
self._bump_version(config, "0.0.10")
if config.version == "0.0.10":
for index in config.reminders.keys():
config.reminders[index].__dict__["users"] = []
self._bump_version(config, "0.0.11")
if config.version == "0.0.11":
if "addreminder" in config.commands.aliases.keys():
del config.commands.aliases["addreminder"]
config.commands.data["addreminder"] = config.commands.data["reminder"]
del config.commands.data["reminder"]
config.commands.data["addreminder"].module_name = "src.cmd.reminder"
config.commands.data["addreminder"].perform = "_addreminder"
self._bump_version(config, "0.0.12")
if config.version == "0.0.12":
for index, reminder in config.reminders.items():
reminder.__dict__["ping_users"] = reminder.users
del reminder.__dict__["users"]
reminder.__dict__["whisper_users"] = []
self._bump_version(config, "0.0.13")
if config.version == "0.0.13":
for guild in config.guilds.values():
guild.__dict__["markov_logging_whitelist"] = guild.markov_whitelist
del guild.__dict__["markov_whitelist"]
guild.__dict__["markov_responses_whitelist"] = guild.responses_whitelist
del guild.__dict__["responses_whitelist"]
self._bump_version(config, "0.0.14")
if config.version == "0.0.14":
for guild in config.guilds.values():
guild.__dict__["responses_whitelist"] = set()
self._bump_version(config, "0.0.15")
if config.version == "0.0.15":
config.__dict__["repl"] = {}
config.repl["port"] = 8080 # set default port for REPL
self._bump_version(config, "0.0.16")
if config.version == "0.0.16":
for reminder in config.reminders.values():
reminder.__dict__["repeat_after"] = 0
self._bump_version(config, "0.0.17")
if config.version == "0.0.17":
config.ids["quote"] = 1
quotes = config.quotes
config.quotes = {}
for quote in quotes:
config.quotes[config.ids["quote"]] = quote
config.ids["quote"] += 1
self._bump_version(config, "0.0.18")
if config.version == "0.0.18":
for index in config.reminders.keys():
config.reminders[index].__dict__["author"] = "<unknown>"
self._bump_version(config, "0.0.19")
if config.version == "0.0.19":
for index in config.reminders.keys():
config.reminders[index].__dict__["time_created"] = (
datetime.datetime(1970, 1, 1).strftime(const.REMINDER_DATETIME_FORMAT))
self._bump_version(config, "0.0.20")
if config.version == "0.0.20":
for key in config.commands.data.keys():
if not hasattr(config.commands.data[key], "is_private"):
config.commands.data[key].is_private = False
self._bump_version(config, "0.0.21")
if config.version == "0.0.21":
for key in config.commands.data.keys():
if (hasattr(config.commands.data[key], "module_name") and
not config.commands.data[key].module_name.startswith("src.")):
del config.commands.data[key].__dict__["module_name"]
self._bump_version(config, "0.0.22")
if config.version == "0.0.22":
for index, reminder in config.reminders.items():
reminder.__dict__["repeat_interval_measure"] = "minutes"
self._bump_version(config, "0.0.23")
if config.version == "0.0.23":
for guild in config.guilds.values():
guild.__dict__["ignored"] = False
self._bump_version(config, "0.0.24")
if config.version == "0.0.24":
config.commands.data["listreminder"].subcommand = False
self._bump_version(config, "0.0.25")
if config.version == "0.0.25":
config.commands.data["reminder"].subcommand = False
self._bump_version(config, "0.0.26")
if config.version == "0.0.26":
for index in config.quotes.keys():
config.quotes[index].added_by = config.quotes[index].added_by[:-5]
self._bump_version(config, "0.0.27")
if config.version == "0.0.27":
config.ids["timer"] = 1
self._bump_version(config, "0.0.28")
if config.version == "0.0.28":
config.__dict__["plugins"] = dict()
self._bump_version(config, "0.0.29")
if config.version == "0.0.29":
config.ids["stopwatch"] = 1
self._bump_version(config, "0.0.30")
if config.version == "0.0.30":
for index, reminder in config.reminders.items():
reminder.__dict__["email_users"] = []
self._bump_version(config, "0.0.31")
if config.version == "0.0.31":
for index, reminder in config.reminders.items():
reminder.__dict__["prereminders_list"] = []
if config.version == "0.0.32":
for index, reminder in config.reminders.items():
reminder.__dict__["used_prereminders_list"] = [False] * len(reminder.prereminders_list)
self._bump_version(config, "0.0.33")
if config.version == "0.0.33":
config.commands.__dict__["module_help"] = dict()
self._bump_version(config, "0.0.34")
if config.version == "0.0.34":
for index, reminder in config.reminders.items():
reminder.__dict__["notes"] = ""
self._bump_version(config, "0.0.35")
if config.version == "0.0.35":
config.ids["markov_ignored_prefix"] = 1
self._bump_version(config, "0.0.36")
if config.version == "0.0.36":
log.info(f"Version of {self.config_name} is up to date!")
else:
log.error(f"Unknown version {config.version} for {self.config_name}!")
def markov_yaml(self, config):
"""Update markov.yaml"""
if config.version == "0.0.1":
config.__dict__["min_chars"] = 1
config.__dict__["min_words"] = 1
self._bump_version(config, "0.0.2")
if config.version == "0.0.2":
config.__dict__["chains_generated"] = 0
self._bump_version(config, "0.0.3")
if config.version == "0.0.3":
config.__dict__["max_chars"] = 2000
config.__dict__["max_words"] = 500
self._bump_version(config, "0.0.4")
if config.version == "0.0.4":
config.model[""].__dict__["word"] = None
self._bump_version(config, "0.0.5")
if config.version == "0.0.5":
for i, _ in enumerate(config.filters):
config.__dict__["filters"][i] = re.compile(config.filters[i].pattern, re.DOTALL)
self._bump_version(config, "0.0.6")
if config.version == "0.0.6":
config.__dict__["ignored_prefixes"] = dict()
self._bump_version(config, "0.0.7")
if config.version == "0.0.7":
log.info(f"Version of {self.config_name} is up to date!")
else:
log.error(f"Unknown version {config.version} for {self.config_name}!")
def secret_yaml(self, config):
"""Update secret.yaml"""
if config.version == "0.0.1":
if FF.is_enabled("WALBOT_FEATURE_NEW_CONFIG") == "1":
os.makedirs("db", exist_ok=True)
sqlite3 = importlib.import_module("sqlite3")
con = sqlite3.connect(os.path.join("db", "secret.db"))
cur = con.cursor()
cur.execute("CREATE TABLE db_info (key text, value text)")
cur.execute("INSERT INTO db_info VALUES ('version', '0.1.0')")
cur.execute("CREATE TABLE tokens (key text, value text)")
cur.execute("INSERT INTO tokens VALUES ('discord', ?)", (config.token,))
con.commit()
con.close()
os.remove(self.config_name + '.yaml')
log.info("Successfully migrated contig.yaml to db/config.db!")
else:
config.__dict__["mail"] = {
"smtp_server": None,
"email": None,
"password": None,
}
config.__dict__["admin_email_list"] = list()
self._bump_version(config, "0.0.2")
if config.version == "0.0.2":
log.info(f"Version of {self.config_name} is up to date!")
else:
log.error(f"Unknown version {config.version} for {self.config_name}!")
def secret_db(self):
"""Update db/secret.db"""
pass
|
the-stack_0_21374 | '''
Test chunked request/responses
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
import time
import logging
import json
import uuid
import socket
import helpers
import tsqa.test_cases
import tsqa.utils
import tsqa.endpoint
log = logging.getLogger(__name__)
import SocketServer
class ChunkedHandler(SocketServer.BaseRequestHandler):
"""
A subclass of RequestHandler which return chunked encoding optionally
/parts/sleep_time/close
parts: number of parts to send
sleep_time: time between parts
close: bool whether to close properly
"""
def handle(self):
# Receive the data in small chunks and retransmit it
conn_id = uuid.uuid4().hex
while True:
data = self.request.recv(4096).strip()
if data:
log.info('sending data back to the client')
else:
log.info('Client disconnected')
break
inc_lines = data.splitlines()
try:
uri = inc_lines[0].split()[1]
except IndexError:
break
parts = 5 # how many things to send
sleep_time = 0.2 # how long to sleep between parts
close = True # whether to close properly
if uri[1:]: # if there is something besides /
uri_parts = uri[1:].split('/')
if len(uri_parts) >= 1:
parts = int(uri_parts[0])
if len(uri_parts) >= 2:
sleep_time = float(uri_parts[1])
if len(uri_parts) >= 3:
close = json.loads(uri_parts[2])
resp = ('HTTP/1.1 200 OK\r\n'
'X-Conn-Id: ' + str(conn_id) + '\r\n'
'Transfer-Encoding: chunked\r\n'
'Connection: keep-alive\r\n'
'\r\n')
self.request.sendall(resp)
for x in xrange(0, parts):
self.request.sendall('{0}\r\n{1}\r\n'.format(len(str(x)), x))
time.sleep(sleep_time)
if close:
self.request.sendall('0\r\n\r\n')
else:
self.request.sendall('lkfjasd;lfjas;d')
time.sleep(2)
class TestChunked(helpers.EnvironmentCase):
@classmethod
def setUpEnv(cls, env):
'''
This function is responsible for setting up the environment for this fixture
This includes everything pre-daemon start
'''
# create a socket server
cls.port = tsqa.utils.bind_unused_port()[1]
cls.server = tsqa.endpoint.SocketServerDaemon(ChunkedHandler, port=cls.port)
cls.server.start()
cls.server.ready.wait()
cls.configs['remap.config'].add_line('map / http://127.0.0.1:{0}/'.format(cls.port))
cls.configs['records.config']['CONFIG'].update({
'proxy.config.http.connect_attempts_timeout': 5,
'proxy.config.http.connect_attempts_max_retries': 0,
'proxy.config.http.keep_alive_enabled_in': 1,
'proxy.config.http.keep_alive_enabled_out': 1,
'proxy.config.exec_thread.limit': 1,
'proxy.config.exec_thread.autoconfig': 0,
'proxy.config.http.chunking_enabled': 1,
})
def test_chunked_origin(self):
'''
Test that the origin does in fact support keepalive
'''
self._client_test_chunked_keepalive(self.port)
self._client_test_chunked_keepalive(self.port, num_bytes=2)
self._client_test_chunked_keepalive(self.port, num_bytes=2, sleep=1)
def _client_test_chunked_keepalive(self,
port=None,
times=3,
num_bytes=None,
sleep=None,
):
if port is None:
port = int(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports'])
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('127.0.0.1', port))
url = '/'
if num_bytes is not None:
url += str(num_bytes)
if sleep is not None:
if num_bytes is None:
raise Exception()
url += '/' + str(sleep)
request = ('GET ' + url + ' HTTP/1.1\r\n'
'Host: 127.0.0.1\r\n'
'\r\n')
uuid = None
# test basic
for x in xrange(1, times):
s.send(request)
resp = ''
while True:
response = s.recv(4096)
for line in response.splitlines():
line = line.strip()
if line.startswith('X-Conn-Id:'):
r_uuid = line.replace('X-Conn-Id:', '')
if uuid is None:
uuid = r_uuid
else:
self.assertEqual(uuid, r_uuid)
resp += response
if resp.endswith('\r\n0\r\n\r\n'):
break
for x in xrange(0, num_bytes or 4):
self.assertIn('1\r\n{0}\r\n'.format(x), resp)
s.close()
def test_chunked_basic(self):
url = 'http://127.0.0.1:{0}'.format(self.port)
ret = requests.get(url, proxies=self.proxies)
self.assertEqual(ret.status_code, 200)
self.assertEqual(ret.text.strip(), '01234')
# TODO: fix keepalive with chunked responses
def test_chunked_keepalive_server(self):
url = 'http://127.0.0.1:{0}'.format(self.port)
ret = requests.get(url, proxies=self.proxies)
conn_id = ret.headers['x-conn-id']
self.assertEqual(ret.status_code, 200)
self.assertEqual(ret.text.strip(), '01234')
# make sure that a second request works, and since we have keep-alive out
# disabled it should be a new connection
ret = requests.get(url, proxies=self.proxies)
self.assertEqual(ret.status_code, 200)
self.assertEqual(ret.text.strip(), '01234')
self.assertEqual(conn_id, ret.headers['x-conn-id'])
def test_chunked_keepalive_client(self):
self._client_test_chunked_keepalive()
self._client_test_chunked_keepalive(num_bytes=2)
self._client_test_chunked_keepalive(num_bytes=2, sleep=1)
def test_chunked_bad_close(self):
url = 'http://127.0.0.1:{0}/5/0.1/false'.format(self.port)
# TODO: better exception catch (seems to be ConnectionError)
with self.assertRaises(Exception):
requests.get(url, proxies=self.proxies, timeout=2)
|
the-stack_0_21375 | import os
import numpy as np
import cv2
import matplotlib.pyplot as plt
import argparse
import textwrap
from PIL import Image
import matplotlib as mpl
from osgeo import gdal, osr
# -- Create a class for each vegetation_index -- #
class Indexes:
def __init__(self, img):
self.img = img
self.R = self.img[:, :, 2].astype(np.float32)
self.G = self.img[:, :, 1].astype(np.float32)
self.B = self.img[:, :, 0].astype(np.float32)
# All these operations aim to not have ZeroDivisionError -- #
# -- Visible Atmospheric Resistant Index -- #
def VARI(self):
vari = np.divide((self.G - self.R), (self.G + self.R - self.B + 0.00001))
return np.clip(vari, -1, 1) # -- VI values outside the [-1, 1] are clipped to this interval edges. -- #
# -- Green Leaf Index -- #
def GLI(self):
gli = np.divide((2 * self.G - self.R - self.B), (2 * self.G + self.R + self.B + 0.00001))
return np.clip(gli, -1, 1)
# -- Normalized Green Red Difference Index -- #
def NGRDI(self):
v_ndvi = np.divide((self.G - self.R), (self.G + self.R + 0.00001))
return np.clip(v_ndvi, -1, 1)
# -- Normalized Green Blue Difference Index -- #
def NGBDI(self):
ngbdi = (self.G - self.B) / (self.G + self.B + 0.00001)
return np.clip(ngbdi, -1, +1)
# -- Identification of the Idx object -- #
def get_index(self, index_name):
if index_name == 'VARI':
return self.VARI()
elif index_name == 'GLI':
return self.GLI()
elif index_name == 'NGRDI':
return self.NGRDI()
elif index_name == 'NGBDI':
return self.NGBDI()
else:
print('Uknown index')
# -- Find the real values of the min, max based on the frequency of the vegetation_index histogramm' s bin in each examined interval -- #
def find_real_min_max(perc, edges, index_clear):
mask = perc > (0.05 * len(index_clear))
edges = edges[:-1]
min_v = edges[mask].min()
max_v = edges[mask].max()
return min_v, max_v
# -- Function that creates the georeferenced VI map -- #
def array_to_raster(output_path, ds_reference, array, name1, name2):
rows, cols, band_num = array.shape
driver = gdal.GetDriverByName("GTiff")
outRaster = driver.Create(os.path.join(output_path, name1+'_'+name2+'.tif'), cols, rows, band_num, gdal.GDT_Byte, options=["COMPRESS=DEFLATE"])
originX, pixelWidth, b, originY, d, pixelHeight = ds_reference.GetGeoTransform()
outRaster.SetGeoTransform((originX, pixelWidth, 0, originY, 0, pixelHeight))
descriptions = ['Red Band', 'Green Band', 'Blue Band', 'Alpha Band', 'Index Array']
for b in range(band_num):
outband = outRaster.GetRasterBand(b+1)
outband.WriteArray(array[:,:,b])
outband.SetDescription(descriptions[b])
if b+1==1:
outRaster.GetRasterBand(1).SetColorInterpretation(gdal.GCI_RedBand)
elif b+1==2:
outRaster.GetRasterBand(2).SetColorInterpretation(gdal.GCI_GreenBand)
elif b+1==3:
outRaster.GetRasterBand(3).SetColorInterpretation(gdal.GCI_BlueBand)
elif b+1==4:
outRaster.GetRasterBand(4).SetColorInterpretation(gdal.GCI_AlphaBand)
outRasterSRS = osr.SpatialReference(wkt=prj)
outRaster.SetProjection(outRasterSRS.ExportToWkt())
driver = None
outband.FlushCache()
print('Georeferenced {} map was extracted!'.format(index_name))
return outRaster
# -- Arguments -- #
parser = argparse.ArgumentParser(prog='index_calculation', description = textwrap.dedent('''\
The available VIs are:
1. Visible Atmospheric Resistant Index (VARI)
2. Green Leaf Index (GLI)
3. Normalized Green Red Difference Index (NGRDI)
4. Normalized Green Blue Difference Index (NGBDI)
'''), formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--input_image', required=True, help="Please enter the absolute path of the input image.")
parser.add_argument('--output_path', nargs='?', help="Please enter the absolute path of the output path.")
parser.add_argument('--vis', nargs="*", required=False, help="Please enter the abbreviation of the Vegetation Index/Indices.")
args = parser.parse_args()
if args.output_path==None:
os.makedirs('results', exist_ok=True)
save_dir = os.path.join(os.getcwd(), 'results')
else:
save_dir = os.path.abspath(args.output_path)
if len(args.vis) == 0:
args.vis = ['VARI', 'GLI', 'NGRDI', 'NGBDI']
print('All VIs will be calculated!')
else:
args.vis = [elem.upper() for elem in args.vis]
img_path = os.path.abspath(args.input_image)
img_name = os.path.basename(img_path)
name, ext = os.path.splitext(img_name)
os.chdir(os.path.dirname(img_path))
img = cv2.imread(img_name, cv2.IMREAD_UNCHANGED)
h, w, ch = img.shape
if ch > 3:
image = img[:, :, :3].astype(float)
image[img[:, :, 3] == 0] = np.nan
empty_space = img[:, :, 3] == 0
else:
image = img
# -- Print function for testing reasons -- #
print('Processing image with shape {} x {}'.format(img.shape[0], img.shape[1]))
Idx = Indexes(image)
for index_name in args.vis:
# -- Calculate index -- #
idx = Idx.get_index(index_name)
index_clear = idx[~np.isnan(idx)]
# -- Calculate index histogram -- #
perc, edges, _ = plt.hist(index_clear, bins=100, range=(-1, 1), color='darkcyan', edgecolor='black')
plt.close()
# -- Find the real min, max values of the vegetation_index -- #
lower, upper = find_real_min_max(perc, edges, index_clear)
index_clipped = np.clip(idx, lower, upper)
cm = plt.get_cmap('RdYlGn')
cNorm = mpl.colors.Normalize(vmax=upper, vmin=lower)
colored_image = cm(cNorm(index_clipped))
img = Image.fromarray(np.uint8(colored_image * 255), mode='RGBA')
rgba = np.array(img, dtype=np.float32)
ds = gdal.Open(img_path, gdal.GA_ReadOnly)
prj = ds.GetProjection()
if prj:
array_to_raster(save_dir, ds, rgba, name, index_name)
else:
img.save('{}/{}_{}.tif'.format(save_dir, name, index_name))
print('Non georeferrenced {} map was extracted!'.format(index_name))
np.save('{}/{}_{}.npy'.format(save_dir, name, index_name), index_clipped)
print('Done!')
|
the-stack_0_21379 | # Author: Jean-Remi King, <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from mne.utils import requires_sklearn
from mne.decoding.time_frequency import TimeFrequency
@requires_sklearn
def test_timefrequency():
"""Test TimeFrequency."""
from sklearn.base import clone
# Init
n_freqs = 3
freqs = [20, 21, 22]
tf = TimeFrequency(freqs, sfreq=100)
for output in ['avg_power', 'foo', None]:
pytest.raises(ValueError, TimeFrequency, freqs, output=output)
tf = clone(tf)
# Fit
n_epochs, n_chans, n_times = 10, 2, 100
X = np.random.rand(n_epochs, n_chans, n_times)
tf.fit(X, None)
# Transform
tf = TimeFrequency(freqs, sfreq=100)
tf.fit_transform(X, None)
# 3-D X
Xt = tf.transform(X)
assert_array_equal(Xt.shape, [n_epochs, n_chans, n_freqs, n_times])
# 2-D X
Xt = tf.transform(X[:, 0, :])
assert_array_equal(Xt.shape, [n_epochs, n_freqs, n_times])
# 3-D with decim
tf = TimeFrequency(freqs, sfreq=100, decim=2)
Xt = tf.transform(X)
assert_array_equal(Xt.shape, [n_epochs, n_chans, n_freqs, n_times // 2])
|
the-stack_0_21380 | #!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2014-2015 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Hervรฉ BREDIN - http://herve.niderb.fr
from __future__ import unicode_literals
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
import os
import sys
from pkg_resources import iter_entry_points
__all__ = []
ParserPlugins = {}
# iterate over parser plugins
for o in iter_entry_points(group='pyannote.parser.plugin', name=None):
# obtain parser class name (e.g. "MDTMParser")
parser_name = o.name
# obtain parser class (e.g. MDTMParser)
parser_class = o.load()
# iterate over supported file extensions
# (e.g. ['mdtm'] for MDTMParser)
for extension in parser_class.file_extensions():
# if extension is already registered by another parser plugin
# raise an exception
if extension in ParserPlugins:
msg = 'Extension {e} is registered by both {p1} and {p2}'
raise ValueError(
msg.format(
e=extension,
p1=parser_class.__name__,
p2=ParserPlugins[extension].__name__))
# otherwise, register the extension with the parser class
ParserPlugins[extension] = parser_class
# import parser class at package root
# (e.g. pyannote.parser.MDTMParser)
setattr(sys.modules[__name__], parser_name, parser_class)
# make sure parser class is imported with
# >>> from pyannote.parser import *
__all__.append(parser_name)
class MagicParser(object):
"""Magic parser chooses which parser to use based on file extension
Notes
-----
kwargs are passed to selected parser
"""
@staticmethod
def get_parser(extension):
try:
Parser = ParserPlugins[extension]
except Exception:
msg = 'Extension "{e}" is not supported.'
raise NotImplementedError(msg.format(e=extension))
return Parser
@staticmethod
def guess_parser(path):
# obtain file extension (without leading .)
_, extension = os.path.splitext(path)
extension = extension[1:]
return MagicParser.get_parser(extension)
def __init__(self, **kwargs):
super(MagicParser, self).__init__()
self.init_kwargs = kwargs
def read(self, path, **kwargs):
# obtain file extension (without leading .)
_, extension = os.path.splitext(path)
extension = extension[1:]
# obtain parser based on the extension
Parser = self.get_parser(extension)
# initialize parser
parser = Parser(**self.init_kwargs)
# read file
parser.read(path, **kwargs)
# return parser with file loaded internally
return parser
__all__.append(str('MagicParser'))
|
the-stack_0_21384 | import claripy
import nose
import math
import logging
l = logging.getLogger('claripy.test.solver')
solver_list = (claripy.Solver, claripy.SolverReplacement, claripy.SolverHybrid, claripy.SolverComposite, claripy.SolverCacheless)
def test_solver():
for s in solver_list:
yield raw_solver, s, True
yield raw_solver, s, False
def test_hybrid_solver():
yield raw_hybrid_solver, True
yield raw_hybrid_solver, False
def raw_hybrid_solver(reuse_z3_solver):
claripy._backend_z3.reuse_z3_solver = reuse_z3_solver
s = claripy.SolverHybrid()
x = claripy.BVS('x', 32, min=0, max=10, stride=2)
y = claripy.BVS('y', 32, min=20, max=30, stride=5)
# TODO: for now, the stride isn't respected in symbolic mode, but we'll fix that next.
# until we do, let's add constraints
s.add(x <= 10)
s.add(x % 2 == 0)
s.add(y >= 20)
s.add(y <= 30)
s.add((y-20) % 5 == 0)
s.add(x != 8)
nose.tools.assert_equal(sorted(s.eval(x, 20, exact=False)), [0, 2, 4, 6, 8, 10])
nose.tools.assert_equal(sorted(s.eval(x, 20)), [0, 2, 4, 6, 10])
nose.tools.assert_equal(sorted(s.eval(y, 20, exact=False)), [20, 25, 30])
nose.tools.assert_equal(sorted(s.eval(y, 20)), [20, 25, 30])
# test approximate_first option
s._approximate_first = True
old_solve_count = claripy._backends_module.backend_z3.solve_count
nose.tools.assert_equal(sorted(s.eval(x, 20)), [0, 2, 4, 6, 8, 10])
nose.tools.assert_equal(claripy._backends_module.backend_z3.solve_count, old_solve_count)
s._approximate_firsrt = False
# now constrain things further so that the VSA overapproximates
s.add(x <= 4)
nose.tools.assert_equal(sorted(s.eval(x, 20, exact=False)), [0, 2, 4])
nose.tools.assert_equal(sorted(s.eval(x, 20)), [0, 2, 4])
s.add(y >= 27)
nose.tools.assert_equal(s.eval(y, 20, exact=False), (30,))
nose.tools.assert_equal(s.eval(y, 20), (30,))
t = claripy.SolverHybrid()
x = claripy.BVS('x', 32)
t.add(x <= 10)
print(t.eval(x, 80, exact=False))
nose.tools.assert_equal(len(t.eval(x, 5, exact=False)), 5)
nose.tools.assert_equal(len(t.eval(x, 5, exact=False)), 5)
nose.tools.assert_equal(len(t.eval(x, 6, exact=False)), 6)
nose.tools.assert_equal(len(t.eval(x, 8, exact=False)), 8)
nose.tools.assert_equal(len(t.eval(x, 99, exact=False)), 11)
def test_replacement_solver():
yield raw_replacement_solver, True
yield raw_replacement_solver, False
def raw_replacement_solver(reuse_z3_solver):
claripy._backend_z3.reuse_z3_solver = reuse_z3_solver
sr = claripy.SolverReplacement()
x = claripy.BVS('x', 32)
nose.tools.assert_equal(len(sr.eval(x, 10)), 10)
sr.add_replacement(x, claripy.BVV(0x101, 32))
nose.tools.assert_equal(sr.eval(x, 10), (0x101,))
y = claripy.BVS('y', 32)
sr.add([y+1 == 200])
assert (y+1).cache_key in sr._replacements
assert sr._replacement(y+1) is claripy.BVV(200, 32)
srb = sr.branch()
assert len(srb.constraints) == len(sr.constraints) #pylint:disable=no-member
assert (y+1).cache_key in sr._replacements
assert sr._replacement(y+1) is claripy.BVV(200, 32)
sr = claripy.SolverReplacement()
b = claripy.BoolS('b')
assert sr._replacement(b) is b
sr.add(claripy.Not(b))
assert sr._replacement(b) is claripy.false
sr = claripy.SolverReplacement(claripy.SolverVSA(), complex_auto_replace=True)
x = claripy.BVS('x', 64)
sr.add([x + 8 <= 0xffffffffffffffff])
sr.add([x + 8 >= 0])
assert sr._replacement(x) is not x
def raw_solver(solver_type, reuse_z3_solver):
#bc = claripy.backends.BackendConcrete(clrp)
#bz = claripy.backends.BackendZ3(clrp)
#claripy.expression_backends = [ bc, bz, ba ]
claripy._backend_z3.reuse_z3_solver = reuse_z3_solver
s = solver_type()
s.simplify()
x = claripy.BVS('x', 32)
y = claripy.BVS('y', 32)
z = claripy.BVS('z', 32)
l.debug("adding constraints")
s.add(x == 10)
s.add(y == 15)
# Batch evaluation
results = s.batch_eval([x + 5, x + 6, 3], 2)
nose.tools.assert_equal(len(results), 1)
nose.tools.assert_equal(results[0][0], 15) # x + 5
nose.tools.assert_equal(results[0][1], 16) # x + 6
nose.tools.assert_equal(results[0][2], 3) # constant
l.debug("checking")
nose.tools.assert_true(s.satisfiable())
nose.tools.assert_false(s.satisfiable(extra_constraints=[x == 5]))
nose.tools.assert_equal(s.eval(x + 5, 1)[0], 15)
nose.tools.assert_true(s.solution(x + 5, 15))
nose.tools.assert_true(s.solution(x, 10))
nose.tools.assert_true(s.solution(y, 15))
nose.tools.assert_false(s.solution(y, 13))
shards = s.split()
nose.tools.assert_equal(len(shards), 2)
nose.tools.assert_equal(len(shards[0].variables), 1)
nose.tools.assert_equal(len(shards[1].variables), 1)
if isinstance(s, claripy.frontend_mixins.ConstraintExpansionMixin) or (
isinstance(s, claripy.frontends.HybridFrontend) and
isinstance(s._exact_frontend, claripy.frontend_mixins.ConstraintExpansionMixin)
): #the hybrid frontend actually uses the exact frontend for the split
nose.tools.assert_equal({ len(shards[0].constraints), len(shards[1].constraints) }, { 2, 1 }) # adds the != from the solution() check
if isinstance(s, claripy.frontends.ReplacementFrontend):
nose.tools.assert_equal({ len(shards[0].constraints), len(shards[1].constraints) }, { 1, 1 }) # not a caching frontend
# test result caching
s = solver_type()
s.add(x == 10)
s.add(y == 15)
nose.tools.assert_false(s.satisfiable(extra_constraints=(x==5,)))
nose.tools.assert_true(s.satisfiable())
s = solver_type()
#claripy.expression_backends = [ bc, ba, bz ]
s.add(claripy.UGT(x, 10))
s.add(claripy.UGT(x, 20))
s.simplify()
nose.tools.assert_equal(len(s.constraints), 1)
#nose.tools.assert_equal(str(s.constraints[0]._obj), "Not(ULE(x <= 20))")
s.add(claripy.UGT(y, x))
s.add(claripy.ULT(z, 5))
# test that duplicate constraints are ignored
old_count = len(s.constraints)
s.add(claripy.ULT(z, 5))
nose.tools.assert_equal(len(s.constraints), old_count)
#print("========================================================================================")
#print("========================================================================================")
#print("========================================================================================")
#print("========================================================================================")
#a = s.eval(z, 100)
#print("ANY:", a)
#print("========================================================================================")
#mx = s.max(z)
#print("MAX",mx)
#print("========================================================================================")
#mn = s.min(z)
#print("MIN",mn)
#print("========================================================================================")
#print("========================================================================================")
#print("========================================================================================")
#print("========================================================================================")
print("CONSTRAINT COUNTS:", [ len(_.constraints) for _ in s.split() ])
nose.tools.assert_equal(s.max(z), 4)
nose.tools.assert_equal(s.min(z), 0)
nose.tools.assert_equal(s.min(y), 22)
nose.tools.assert_equal(s.max(y), 2**y.size()-1)
print("CONSTRAINT COUNTS:", [ len(_.constraints) for _ in s.split() ])
ss = s.split()
nose.tools.assert_equal(len(ss), 2)
#if isinstance(s, claripy.frontend_mixins.ConstraintExpansionMixin):
# nose.tools.assert_equal({ len(_.constraints) for _ in ss }, { 3, 2 }) # constraints from min or max
# Batch evaluation
s.add(y < 24)
s.add(z < x) # Just to make sure x, y, and z belong to the same solver, since batch evaluation does not support the
# situation where expressions belong to more than one solver
results = s.batch_eval([x, y, z], 20)
nose.tools.assert_set_equal(
set(results),
{(21, 23, 1), (22, 23, 3), (22, 23, 2), (22, 23, 4), (21, 22, 4), (21, 23, 4), (22, 23, 0),
(22, 23, 1), (21, 22, 1), (21, 22, 3), (21, 22, 2), (21, 22, 0), (21, 23, 0), (21, 23, 2),
(21, 23, 3)
}
)
# test that False makes it unsat
s = solver_type()
s.add(claripy.BVV(1,1) == claripy.BVV(1,1))
nose.tools.assert_true(s.satisfiable())
s.add(claripy.BVV(1,1) == claripy.BVV(0,1))
nose.tools.assert_false(s.satisfiable())
# test extra constraints
s = solver_type()
x = claripy.BVS('x', 32)
nose.tools.assert_equal(s.eval(x, 2, extra_constraints=[x==10]), ( 10, ))
s.add(x == 10)
nose.tools.assert_false(s.solution(x, 2))
nose.tools.assert_true(s.solution(x, 10))
# test result caching
if isinstance(s, claripy.frontend_mixins.ModelCacheMixin):
count = claripy._backends_module.backend_z3.solve_count
s = solver_type()
x = claripy.BVS('x', 32)
s.add(x == 10)
nose.tools.assert_true(s.satisfiable())
assert claripy._backends_module.backend_z3.solve_count == count
nose.tools.assert_equal(s.eval(x, 1)[0], 10)
assert claripy._backends_module.backend_z3.solve_count == count
s.add(x == 10)
s.add(x > 9)
nose.tools.assert_equal(s.eval(x, 1)[0], 10)
assert claripy._backends_module.backend_z3.solve_count == count
y = claripy.BVS('y', 32)
s.add(y < 999)
assert s.satisfiable()
assert claripy._backends_module.backend_z3.solve_count == count
nose.tools.assert_equal(s.eval(y, 1)[0], 0)
assert claripy._backends_module.backend_z3.solve_count == count
def test_solver_branching():
for s in solver_list:
yield raw_solver_branching, s, True
yield raw_solver_branching, s, False
def raw_solver_branching(solver_type, reuse_z3_solver):
claripy._backend_z3.reuse_z3_solver = reuse_z3_solver
s = solver_type()
x = claripy.BVS("x", 32)
y = claripy.BVS("y", 32)
s.add(claripy.UGT(x, y))
s.add(claripy.ULT(x, 10))
nose.tools.assert_greater(s.eval(x, 1)[0], 0)
t = s.branch()
if isinstance(s, claripy.frontends.FullFrontend):
nose.tools.assert_is(s._tls.solver, t._tls.solver)
nose.tools.assert_true(s._finalized)
nose.tools.assert_true(t._finalized)
t.add(x == 5)
#if isinstance(s, claripy.FullFrontend):
# nose.tools.assert_is(t._solver, None)
s.add(x == 3)
nose.tools.assert_true(s.satisfiable())
t.add(x == 3)
nose.tools.assert_false(t.satisfiable())
s.add(y == 2)
nose.tools.assert_true(s.satisfiable())
nose.tools.assert_equal(s.eval(x, 1)[0], 3)
nose.tools.assert_equal(s.eval(y, 1)[0], 2)
nose.tools.assert_false(t.satisfiable())
def test_combine():
for s in solver_list:
yield raw_combine, s, True
yield raw_combine, s, False
def raw_combine(solver_type, reuse_z3_solver):
claripy._backend_z3.reuse_z3_solver = reuse_z3_solver
s10 = solver_type()
s20 = solver_type()
s30 = solver_type()
x = claripy.BVS("x", 32)
s10.add(x >= 10)
s20.add(x <= 20)
s30.add(x == 30)
nose.tools.assert_true(s10.satisfiable())
nose.tools.assert_true(s20.satisfiable())
nose.tools.assert_true(s30.satisfiable())
nose.tools.assert_true(s10.combine([s20]).satisfiable())
nose.tools.assert_true(s20.combine([s10]).satisfiable())
nose.tools.assert_true(s30.combine([s10]).satisfiable())
nose.tools.assert_false(s30.combine([s20]).satisfiable())
nose.tools.assert_equal(s30.combine([s10]).eval(x, 1), ( 30, ))
nose.tools.assert_equal(len(s30.combine([s10]).constraints), 2)
def test_composite_solver_with_strings():
try:
s = claripy.SolverComposite(
template_solver_string=claripy.SolverCompositeChild(backend=claripy.backend_manager.backends.smtlib_cvc4))
x = claripy.BVS("x", 32)
y = claripy.BVS("y", 32)
z = claripy.BVS("z", 32)
str_1 = claripy.StringS("sym_str_1", 1024)
c = claripy.And(x == 1, y == 2, z == 3, str_1 == claripy.StringV("cavallo"))
s.add(c)
nose.tools.assert_equal(len(s._solver_list), 4)
nose.tools.assert_true(s.satisfiable())
nose.tools.assert_equal(list(s.eval(str_1, 1)), ["cavallo"])
except claripy.errors.MissingSolverError:
raise nose.SkipTest()
def test_composite_solver():
yield raw_composite_solver, True
yield raw_composite_solver, False
def raw_composite_solver(reuse_z3_solver):
claripy._backend_z3.reuse_z3_solver = reuse_z3_solver
#pylint:disable=no-member
s = claripy.SolverComposite()
x = claripy.BVS("x", 32)
y = claripy.BVS("y", 32)
z = claripy.BVS("z", 32)
c = claripy.And(x == 1, y == 2, z == 3)
s.add(c)
nose.tools.assert_equal(len(s._solver_list), 3)
nose.tools.assert_true(s.satisfiable())
s.add(x < y)
nose.tools.assert_equal(len(s._solver_list), 2)
nose.tools.assert_true(s.satisfiable())
s.simplify()
nose.tools.assert_equal(len(s._solver_list), 3)
nose.tools.assert_true(s.satisfiable())
s1 = s.branch()
nose.tools.assert_equal(len(s1._solver_list), 3)
s1.add(x > y)
nose.tools.assert_equal(len(s1._solver_list), 2)
nose.tools.assert_false(s1.satisfiable())
nose.tools.assert_equal(len(s._solver_list), 3)
nose.tools.assert_true(s.satisfiable())
s.add(claripy.BVV(1, 32) == claripy.BVV(2, 32))
nose.tools.assert_equal(len(s._solver_list), 3)
nose.tools.assert_false(s.satisfiable())
ss = s.branch()
nose.tools.assert_equal(len(ss._solver_list), 3)
nose.tools.assert_false(ss.satisfiable())
s = claripy.SolverComposite()
x = claripy.BVS("x", 32)
y = claripy.BVS("y", 32)
z = claripy.BVS("z", 32)
c = claripy.And(x == 1, y == 2, z > 3)
s.add(c)
if isinstance(s._template_frontend, claripy.frontend_mixins.ModelCacheMixin):
assert len(s._solver_list) == 3
count = claripy._backends_module.backend_z3.solve_count
assert s.satisfiable()
assert claripy._backends_module.backend_z3.solve_count == count + 1
assert list(s.eval(x+y, 1)) == [3]
assert claripy._backends_module.backend_z3.solve_count == count + 1
def test_minmax():
yield raw_minmax, True
yield raw_minmax, False
def raw_minmax(reuse_z3_solver):
claripy._backend_z3.reuse_z3_solver = reuse_z3_solver
s = claripy.Solver()
x = claripy.BVS("x", 32)
nose.tools.assert_equal(s.max(x), 2**32-1)
nose.tools.assert_equal(s.min(x), 0)
nose.tools.assert_true(s.satisfiable())
def test_composite_discrepancy():
yield raw_composite_discrepancy, True
yield raw_composite_discrepancy, False
def raw_composite_discrepancy(reuse_z3_solver):
claripy._backend_z3.reuse_z3_solver = reuse_z3_solver
a = claripy.BVS("a", 8)
b = claripy.BVS("b", 8)
x = claripy.BVS("x", 32)
y = claripy.BVS("y", 32)
z = claripy.BVS("z", 32)
xy = x + y
dst = claripy.BVV(0xbaaaaf50, 32) + xy
constraints = [ ]
constraints.append(x <= 0x1)
constraints.append(x != 0x0)
constraints.append(claripy.SignExt(24, claripy.If(x > 0x0, a, 0)) != 0xa)
constraints.append(x < 0x80)
constraints.append(y <= 0x1)
constraints.append(x == 0x1)
constraints.append((0xbaaaaf50 + x) == 0xbaaaaf51)
constraints.append(y != 0x0)
constraints.append(claripy.SignExt(24, claripy.If(y > 0x0, b, 0)) != 0xa)
constraints.append((x + y) < 0x80)
constraints.append(z <= 0x1)
constraints.append((x + y) == 0x2)
sn = claripy.Solver()
sc = claripy.SolverComposite()
sn.add(constraints)
sc.add(constraints)
print(sn.max(dst), sc.max(dst))
print(sn.min(dst), sc.min(dst))
assert sn.max(dst) == sc.max(dst)
assert sn.min(dst) == sc.min(dst)
def test_model():
x = claripy.BVS("x", 32)
y = claripy.BVS("y", 32)
s = claripy.Solver()
s.add(x < 10)
assert sorted(s.eval(x, 20)) == list(range(10))
s.add(y == 1337)
assert sorted(s.eval(x, 20)) == list(range(10))
def test_unsatness():
x = claripy.BVS("x", 32)
s = claripy.Solver()
s.add(x == 10)
assert s.satisfiable()
s.add(claripy.false)
assert not s.satisfiable()
def test_simplification_annotations():
s = claripy.Solver()
x = claripy.BVS("x", 32)
s.add(x > 10)
s.add(x > 11)
s.add((x > 12).annotate(claripy.SimplificationAvoidanceAnnotation()))
assert len(s.constraints) == 3
s.simplify()
assert len(s.constraints) == 2
def raw_ancestor_merge(solver, reuse_z3_solver):
claripy._backend_z3.reuse_z3_solver = reuse_z3_solver
s = solver()
x = claripy.BVS("x", 32)
y = claripy.BVS("y", 32)
z = claripy.BVS("z", 32)
s.add(x == 10)
s.add(y == x)
p = s.branch()
q = s.branch()
p.add(z == 1)
q.add(z == 2)
r = p.merge([q], [claripy.true, claripy.true])[-1]
t = p.merge([q], [p.constraints[-1], q.constraints[-1]], common_ancestor=s)[-1]
if not isinstance(r, claripy.frontends.CompositeFrontend):
assert len(r.constraints) == 1
assert len(t.constraints) == 3
assert t.constraints[-1].variables == z.variables
assert t.constraints[-1].op == 'Or'
assert len(t.constraints[-1].args) == 2
def test_ancestor_merge():
for s in solver_list:
yield raw_ancestor_merge, s, True
yield raw_ancestor_merge, s, False
def raw_unsat_core(solver, reuse_z3_solver):
claripy._backend_z3.reuse_z3_solver = reuse_z3_solver
s = solver(track=True)
x = claripy.BVS("x", 32)
y = claripy.BVS("y", 32)
z = claripy.BVS("z", 32)
a = claripy.BVS("a", 32)
s.add(x == y)
s.add(x == 1)
s.add(z != a)
s.add(z == 2)
s.add(a == 2)
assert not s.satisfiable()
unsat_core = s.unsat_core()
assert len(unsat_core) == 3
assert unsat_core[0] is not None
assert unsat_core[1] is not None
assert unsat_core[2] is not None
def test_unsat_core():
for s in (claripy.Solver, claripy.SolverComposite, claripy.SolverCacheless, claripy.SolverHybrid):
yield raw_unsat_core, s, True
yield raw_unsat_core, s, False
def test_zero_division_in_cache_mixin():
# Bug in the caching backend. See issue #49 on github.
num = claripy.BVS('num', 256)
denum = claripy.BVS('denum', 256)
e = claripy.BVS('e', 256)
s = claripy.Solver()
s.add(e == 8)
assert s.satisfiable()
s.add(claripy.If(denum == 0, 0, num / denum) == e)
assert s.satisfiable()
# As a bonus:
s.add(num == 16)
assert s.satisfiable()
s.add(denum == 3)
assert not s.satisfiable()
def test_nan():
a = claripy.FPS('a', claripy.FSORT_FLOAT)
b = claripy.BVS('b', 32)
s1 = claripy.Solver()
s1.add((a + 1).isNaN())
res = s1.eval(a, 1)[0]
assert math.isnan(res)
s2 = claripy.Solver()
s2.add(b.raw_to_fp().isNaN())
res = s2.eval(b, 1)[0]
assert res & 0xff800000 == 0x7f800000 and res & 0x007fffff != 0
s3 = claripy.Solver()
s3.add(a.isNaN())
res = s3.eval(a.raw_to_bv(), 1)[0]
assert res & 0xff800000 == 0x7f800000 and res & 0x007fffff != 0
if __name__ == '__main__':
for fparams in test_unsat_core():
fparams[0](*fparams[1:])
for fparams in test_ancestor_merge():
fparams[0](*fparams[1:])
test_simplification_annotations()
test_model()
for fparams in test_composite_discrepancy():
fparams[0](*fparams[1:])
for fparams in test_solver():
fparams[0](*fparams[1:])
for fparams in test_hybrid_solver():
fparams[0](*fparams[1:])
test_replacement_solver()
test_minmax()
test_solver_branching()
for fparams in test_solver_branching():
fparams[0](*fparams[1:])
for fparams in test_combine():
fparams[0](*fparams[1:])
test_composite_solver()
test_zero_division_in_cache_mixin()
test_nan()
|
the-stack_0_21387 | import codecs
import json
import os
from typing import List, Optional
import click
from click.core import Context, Option
from .environ import _load_environ
from .event import _load_dispatch_getter
from .exception import _raise_exception_with_exit_code
from ..event import _DEFAULT_FILENAME
from ..script.result import result_to_json
from ...config.meta import __TITLE__, __VERSION__, __AUTHOR__, __AUTHOR_EMAIL__
# noinspection DuplicatedCode,PyUnusedLocal
def print_version(ctx: Context, param: Option, value: bool) -> None:
"""
Print version information of cli
:param ctx: click context
:param param: current parameter's metadata
:param value: value of current parameter
"""
if not value or ctx.resilient_parsing:
return
click.echo('{title}, version {version}.'.format(title=__TITLE__.capitalize(), version=__VERSION__))
click.echo('Developed by {author}, {email}.'.format(author=__AUTHOR__, email=__AUTHOR_EMAIL__))
ctx.exit()
CONTEXT_SETTINGS = dict(
help_option_names=['-h', '--help']
)
_DEFAULT_TASK = 'main'
@click.command(context_settings=CONTEXT_SETTINGS)
@click.option('-v', '--version', is_flag=True,
callback=print_version, expose_value=False, is_eager=True,
help="Show package's version information.")
@click.option('-s', '--script', type=click.Path(exists=True, readable=True),
help='Path of pji script.', default=_DEFAULT_FILENAME, show_default=True)
@click.option('-t', '--task', type=str, help='Task going to be executed.',
default=_DEFAULT_TASK, show_default=True)
@click.option('-e', '--environ', type=str, multiple=True,
help='Environment variables (loaded before global config).')
@click.option('-E', '--environ_after', type=str, multiple=True,
help='Environment variables (loaded after global config).')
@click.option('-i', '--information', type=click.Path(dir_okay=False),
help='Information dump file (no dumping when not given).')
def cli(script: str, task: str, environ: List[str], environ_after: List[str],
information: Optional[str] = None):
_dispatch_getter = _load_dispatch_getter(script)
_success, _result = _dispatch_getter(
environ=_load_environ(environ),
environ_after=_load_environ(environ_after),
)(task)
if information:
click.echo(
click.style('Dumping result of this work to {info} ... '.format(
info=repr(os.path.abspath(information))), bold=False),
nl=False,
)
with codecs.open(information, 'w') as info_file:
json.dump(result_to_json(_success, _result), info_file, indent=4, sort_keys=True)
click.echo(click.style('COMPLETE', fg='green'), nl=True)
if _success:
click.echo(click.style('Task success.', fg='green'))
else:
click.echo(click.style('Task failed.', fg='red'))
raise _raise_exception_with_exit_code(1, 'task failed.')
|
the-stack_0_21390 | from __future__ import division as __division__
import numpy as __np__
import matplotlib.pyplot as __plt__
import os
from . glass_function.refractiveIndex import *
# glass related functions
# use polyanskiy's refractiveindex database(http://refractiveindex.info/)
# polyanskiy's github: https://github.com/polyanskiy
# https://github.com/polyanskiy/refractiveindex.info-database
# Also use Pavel Dmitriev's script for return refractiveindex from database
# https://github.com/kitchenknif/PyTMM
def glass2indexlist(wavelength_list,glassname):
lens_index_list = []
wavelength_num = len(wavelength_list)
if glassname == 'air' or glassname == 'AIR':
lens_index_list = [1]*wavelength_num
return lens_index_list
else:
# this ABSOLUTELY REQUIRES that glass name is page_book with NO OTHER UNDERSCORES
# you fucking bastard
n = glassname.find('_')
glass_catalog_name = glassname[n+1:]
glass_name = glassname[:n]
catalog = RefractiveIndex()
for w in wavelength_list:
mat = catalog.getMaterial('glass', glass_catalog_name, glass_name) # shelf, book, page
n = mat.getRefractiveIndex(w)
lens_index_list.append(round(n,6))
return lens_index_list
def output(wavelength_list,lens_index_list):
print('Lens wavelength vs index')
print('wavelength-----index---')
for wavelength,index in zip(wavelength_list,lens_index_list):
print("| {0:<8s} | {1:<8s} |".\
format(str(wavelength),str(index)))
print('-----------------------')
return 0
#=============================================================
# Old glass check functions
#
# def glass2indexlist(wavelength_list,glassname):
# """
# convert glass to index list related to wavelength
# glassname: str
# """
# #print 'glass',glassname
# wavelength_num = len(wavelength_list)
# if glassname == 'air' or glassname == 'AIR':
# lens_index_list = [1]*wavelength_num
# #output(wavelength_list,lens_index_list)
# return lens_index_list
# else:
# n = glassname.find('_')
# glass_catalog_name = glassname[n+1:]
# glass_name = glassname[:n]
# dir = 'opticspy/ray_tracing/glass/' + glass_catalog_name+'/'+glassname
# e = os.path.exists(dir)
# if e == False:
# print 'No This Kind Of Glass'
# return None
# else:
# wave_list = []
# index_list = []
# lens_index_list = []
# file = open(dir)
# while 1:
# line = file.readline()
# if not line:
# break
# a = line.split()
# wave_list.append(float(a[0]))
# index_list.append(float(a[1]))
# file.close()
# for wavelength in wavelength_list:
# index = find_closest_wavelength(wavelength,wave_list,index_list)
# lens_index_list.append(index)
# #output(wavelength_list,lens_index_list)
# return lens_index_list
# def find_closest_wavelength(wavelength,wave_list,index_list):
# n = 0
# for i in wave_list:
# if wavelength == i:
# return index_list[n]
# elif wavelength < i:
# return (index_list[n]+index_list[n])/2
# else:
# n = n + 1
|
the-stack_0_21391 | from abc import ABC
class ModelMeta(type):
"""Model metaclass.
By studying how SQLAlchemy and Django ORM work under the hood, we can see
a metaclass can add useful abstractions to class definitions at runtime.
That being said, this metaclass is a toy example and does not reflect
everything that happens in either framework.
The main use cases for a metaclass are (A) to modify a class before
it is visible to a developer and (B) to add a class to a dynamic registry
for further automation.
Do NOT use a metaclass if a task can be done more simply with class
composition, class inheritance or functions. Simple code is the reason
why Python is attractive for 99% of users.
For more on metaclass mechanisms, visit the link below:
https://realpython.com/python-metaclasses/
"""
# Model table registry
tables = {}
def __new__(mcs, name, bases, attrs):
"""Factory for modifying the defined class at runtime."""
kls = super().__new__(mcs, name, bases, attrs)
# Abstract model does not have a `model_name` but a real model does.
# We will leverage this fact later on this routine
if attrs.get("__abstract__") is True:
kls.model_name = None
else:
custom_name = attrs.get("__table_name__")
default_name = kls.__name__.replace("Model", "").lower()
kls.model_name = custom_name if custom_name else default_name
# Ensure abstract and real models have fields so that
# they can be inherited
kls.model_fields = {}
# Fill model fields from the parent classes (left-to-right)
for base in bases:
kls.model_fields.update(base.model_fields)
# Fill model fields from itself
kls.model_fields.update({
field_name: field_obj
for field_name, field_obj in attrs.items()
if isinstance(field_obj, BaseField)
})
# Register a real table (a table with valid `model_name`) to
# the metaclass `table` registry. After all the tables are
# registered, the registry can be sent to a database adapter
# which uses each table to create a properly defined schema
# for the database of choice (i.e. PostgreSQL, MySQL)
if kls.model_name:
kls.model_table = ModelTable(kls.model_name, kls.model_fields)
ModelMeta.tables[kls.model_name] = kls.model_table
else:
kls.model_table = None
# Return newly modified class
return kls
@property
def is_registered(cls):
"""Check if the model's name is valid and exists in the registry."""
return cls.model_name and cls.model_name in cls.tables
class ModelTable:
"""Model table."""
def __init__(self, table_name, table_fields):
self.table_name = table_name
self.table_fields = table_fields
def __repr__(self):
return f"<ModelTable name={self.table_name}>"
class BaseField(ABC):
"""Base field."""
def __repr__(self):
"""Brief representation of any field."""
return f"<{type(self).__name__}>"
class CharField(BaseField):
"""Character field."""
class IntegerField(BaseField):
"""Integer field."""
class BaseModel(metaclass=ModelMeta):
"""Base model.
Notice how `ModelMeta` is injected at the base class. The base class
and its subclasses will be processed by the method `__new__` in the
`ModelMeta` class before being created.
In short, think of a metaclass as the creator of classes. This is
very similar to how classes are the creator of instances.
"""
__abstract__ = True # This is NOT a real table
row_id = IntegerField()
class UserModel(BaseModel):
"""User model."""
__table_name__ = "user_rocks" # This is a custom table name
username = CharField()
password = CharField()
age = CharField()
sex = CharField()
class AddressModel(BaseModel):
"""Address model."""
user_id = IntegerField()
address = CharField()
state = CharField()
zip_code = CharField()
def main():
# Real models are given a name at runtime with `ModelMeta`
assert UserModel.model_name == "user_rocks"
assert AddressModel.model_name == "address"
# Real models are given fields at runtime with `ModelMeta`
assert "row_id" in UserModel.model_fields
assert "row_id" in AddressModel.model_fields
assert "username" in UserModel.model_fields
assert "address" in AddressModel.model_fields
# Real models are registered at runtime with `ModelMeta`
assert UserModel.is_registered
assert AddressModel.is_registered
# Real models have a `ModelTable` that can be used for DB setup
assert isinstance(ModelMeta.tables[UserModel.model_name], ModelTable)
assert isinstance(ModelMeta.tables[AddressModel.model_name], ModelTable)
# Base model is given special treatment at runtime
assert not BaseModel.is_registered
assert BaseModel.model_name is None
assert BaseModel.model_table is None
# Every model is created by `ModelMeta`
assert isinstance(BaseModel, ModelMeta)
assert all(isinstance(model, ModelMeta)
for model in BaseModel.__subclasses__())
# And `ModelMeta` is created by `type`
assert isinstance(ModelMeta, type)
# And `type` is created by `type` itself
assert isinstance(type, type)
# And everything in Python is an object!
assert isinstance(BaseModel, object)
assert isinstance(ModelMeta, object)
assert isinstance(type, object)
assert isinstance(object, object)
if __name__ == "__main__":
main()
|
the-stack_0_21394 | #!/usr/bin/env python
"""
Finds issues with tA pages, links, TOC, config, etc.
"""
import logging
import sys
import os
import yaml
import codecs
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
class TaInspector(object):
def __init__(self, repo_dir):
"""
:param string repo_dir:
"""
self.repo_dir = repo_dir # Local directory
self.dirs = [name for name in os.listdir(self.repo_dir) if os.path.isfile(os.path.join(self.repo_dir, name, 'toc.yaml'))]
self.links = []
def check_exists(self, type, link, d=None, parent=None):
dirs = []
if d:
dirs.append(d)
else:
dirs = self.dirs
found = None
for d in dirs:
if os.path.isdir(os.path.join(self.repo_dir, d, link)):
if not found:
found = d
else:
logger.critical('Dependency {0} found in {1} and {2}'.format(link, found, d))
if found:
if not os.path.isfile(os.path.join(self.repo_dir, found, link, 'title.md')):
logger.critical('No title.md for {0}/{1}'.format(found, link))
if not os.path.isfile(os.path.join(self.repo_dir, found, link, 'sub-title.md')):
logger.critical('No sub-title.md for {0}/{1}'.format(found, link))
if not os.path.isfile(os.path.join(self.repo_dir, found, link, '01.md')):
logger.critical('No 01.md for {0}/{1}'.format(found, link))
elif type == 'TOC':
logger.critical('Article {0} in {1}/toc.yaml does not exist ({1}/{0} does not exist)'.format(link, d))
else:
logger.critical('{0} {1} for {2} in {3}/config.yaml not found!'.format(type, link, parent, d))
def inspect_section(self, d, section, config):
if 'link' in section:
link = section['link']
if link in self.links:
logger.critical('There is already a link called {0}'.format(link))
else:
self.links.append(link)
self.check_exists('TOC', link, d)
if link in config:
if 'dependencies' in config[link] and config[link]['dependencies']:
for dependency in config[link]['dependencies']:
self.check_exists('Dependency', dependency, None, link)
if 'recommended' in config[link] and config[link]['recommended']:
for recommended in config[link]['recommended']:
self.check_exists('Recommended', recommended, None, link)
else:
logger.warning('{0} does not have an entry in the {1}/config.yaml file'.format(link, d))
if 'sections' in section:
for section in section['sections']:
self.inspect_section(d, section, config)
def run(self):
for d in self.dirs:
toc_path = os.path.join(self.repo_dir, d, 'toc.yaml')
config_path = os.path.join(self.repo_dir, d, 'config.yaml')
with codecs.open(toc_path, 'r', encoding='utf-8-sig') as f:
toc = yaml.load(f)
with codecs.open(config_path, 'r', encoding='utf-8-sig') as f:
config = yaml.load(f)
for section in toc['sections']:
self.inspect_section(d, section, config)
for link in config:
if link not in self.links:
logger.warning('{0} in {1}/config.yaml is never used.'.format(link, d))
for sub in [name for name in os.listdir(os.path.join(self.repo_dir, d)) if os.path.isfile(os.path.join(self.repo_dir, name, 'toc.yaml'))]:
if sub not in self.links:
logger.warning('{0}/{1} is never used in the TOC.'.format(d, sub))
def main():
if len(sys.argv) < 2 or not os.path.isdir(sys.argv[1]):
logger.critical('You must provide the path to the tA repo!')
exit(1)
repo_dir = sys.argv[1]
ta = TaInspector(repo_dir)
ta.run()
if __name__ == '__main__':
main()
|
the-stack_0_21397 | """Projects urls."""
# Django
from django.urls import path
# Blog views
from projects import views
urlpatterns = [
# Projects feed
path(route = 'projects/',
view = views.ProjectsList.as_view(), #views.temporal_function, #view = views.ProjectsList.as_view(),
name = 'projects_feed'),
# Project detailed
path(route = 'project/<slug:slug>/',
view = views.ProjectDetail.as_view(),
name = 'project_detail'),
]
|
the-stack_0_21399 | import logging
from enum import Enum
from pathlib import Path
from subprocess import CalledProcessError, CompletedProcess
from typing import Final, Iterable, List, Optional
from ..cli_utils import check_dir_path, check_file_path, run_process
_LOGGER: Final = logging.getLogger(__name__)
class KompileBackend(Enum):
LLVM = 'llvm'
HASKELL = 'haskell'
KORE = 'kore'
JAVA = 'java'
def kompile(
main_file: Path,
*,
backend: Optional[KompileBackend],
output_dir: Optional[Path] = None,
include_dirs: Iterable[Path] = (),
emit_json=False,
) -> Path:
check_file_path(main_file)
for include_dir in include_dirs:
check_dir_path(include_dir)
args = _build_arg_list(backend=backend, output_dir=output_dir, include_dirs=include_dirs, emit_json=emit_json)
try:
_kompile(str(main_file), *args)
except CalledProcessError as err:
raise RuntimeError(f'Command kompile exited with code {err.returncode} for: {main_file}', err.stdout, err.stderr)
kompiled_dir = _kompiled_dir(main_file, output_dir)
assert kompiled_dir.is_dir()
return kompiled_dir
def _build_arg_list(
*,
backend: Optional[KompileBackend],
output_dir: Optional[Path],
include_dirs: Iterable[Path],
emit_json: bool,
) -> List[str]:
args = []
if backend:
args += ['--backend', backend.value]
if output_dir:
args += ['--output-definition', str(output_dir)]
for include_dir in include_dirs:
args += ['-I', str(include_dir)]
if emit_json:
args.append('--emit-json')
return args
def _kompile(main_file: str, *args: str) -> CompletedProcess:
run_args = ['kompile', main_file] + list(args)
return run_process(run_args, _LOGGER)
def _kompiled_dir(main_file: Path, output_dir: Optional[Path] = None) -> Path:
if output_dir:
return output_dir
return Path(main_file.stem + '-kompiled')
|
the-stack_0_21400 | import streamlit as st
import pandas as pd
import numpy as np
import plotly.express as px
def app():
st.title('Simulation 2')
st.write('The Sensitivity of distrubution of Token A is presented below with the parameter "weight_Token_A: (0.5, 0.01)."')
# Load dataset
df2 = pd.read_csv("./data/Simulation 2_ weight_Token_A 0.5 -_ 0.01 - Sheet1 (1).csv")
# Chart building
fig2 = px.line(
df2,
title='Sensitivity of distrubution of Token_A - Parameter: Weight of Token A (0.5 -> 0.01)',
x='timestep',
y='balance_of_Token_A',
color='par_weight_token_A_inverse',
animation_frame='par_weight_token_A_inverse',
height=450, width=800)
st.plotly_chart(fig2)
|
the-stack_0_21403 | from typing import List, Optional, Tuple, Union
import numpy as np
import torch
import torch.nn as nn
from torch.distributions import Distribution
from pts.core.component import validated
from pts.model import weighted_average
from pts.modules import DistributionOutput, MeanScaler, NOPScaler, FeatureEmbedder
def prod(xs):
p = 1
for x in xs:
p *= x
return p
class DeepARNetwork(nn.Module):
@validated()
def __init__(
self,
input_size: int,
num_layers: int,
num_cells: int,
cell_type: str,
history_length: int,
context_length: int,
prediction_length: int,
distr_output: DistributionOutput,
dropout_rate: float,
cardinality: List[int],
embedding_dimension: List[int],
lags_seq: List[int],
scaling: bool = True,
dtype: np.dtype = np.float32,
) -> None:
super().__init__()
self.num_layers = num_layers
self.num_cells = num_cells
self.cell_type = cell_type
self.history_length = history_length
self.context_length = context_length
self.prediction_length = prediction_length
self.dropout_rate = dropout_rate
self.cardinality = cardinality
self.embedding_dimension = embedding_dimension
self.num_cat = len(cardinality)
self.scaling = scaling
self.dtype = dtype
self.lags_seq = lags_seq
self.distr_output = distr_output
rnn = {"LSTM": nn.LSTM, "GRU": nn.GRU}[self.cell_type]
self.rnn = rnn(
input_size=input_size,
hidden_size=num_cells,
num_layers=num_layers,
dropout=dropout_rate,
batch_first=True,
)
self.target_shape = distr_output.event_shape
self.proj_distr_args = distr_output.get_args_proj(num_cells)
self.embedder = FeatureEmbedder(
cardinalities=cardinality, embedding_dims=embedding_dimension
)
if scaling:
self.scaler = MeanScaler(keepdim=True)
else:
self.scaler = NOPScaler(keepdim=True)
@staticmethod
def get_lagged_subsequences(
sequence: torch.Tensor,
sequence_length: int,
indices: List[int],
subsequences_length: int = 1,
) -> torch.Tensor:
"""
Returns lagged subsequences of a given sequence.
Parameters
----------
sequence : Tensor
the sequence from which lagged subsequences should be extracted.
Shape: (N, T, C).
sequence_length : int
length of sequence in the T (time) dimension (axis = 1).
indices : List[int]
list of lag indices to be used.
subsequences_length : int
length of the subsequences to be extracted.
Returns
--------
lagged : Tensor
a tensor of shape (N, S, C, I), where S = subsequences_length and
I = len(indices), containing lagged subsequences. Specifically,
lagged[i, j, :, k] = sequence[i, -indices[k]-S+j, :].
"""
assert max(indices) + subsequences_length <= sequence_length, (
f"lags cannot go further than history length, found lag {max(indices)} "
f"while history length is only {sequence_length}"
)
assert all(lag_index >= 0 for lag_index in indices)
lagged_values = []
for lag_index in indices:
begin_index = -lag_index - subsequences_length
end_index = -lag_index if lag_index > 0 else None
lagged_values.append(sequence[:, begin_index:end_index, ...])
return torch.stack(lagged_values, dim=-1)
def unroll_encoder(
self,
feat_static_cat: torch.Tensor, # (batch_size, num_features)
feat_static_real: torch.Tensor, # (batch_size, num_features)
past_time_feat: torch.Tensor, # (batch_size, history_length, num_features)
past_target: torch.Tensor, # (batch_size, history_length, *target_shape)
past_observed_values: torch.Tensor, # (batch_size, history_length, *target_shape)
future_time_feat: Optional[
torch.Tensor
] = None, # (batch_size, prediction_length, num_features)
future_target: Optional[
torch.Tensor
] = None, # (batch_size, prediction_length, *target_shape)
) -> Tuple[torch.Tensor, Union[torch.Tensor, List], torch.Tensor, torch.Tensor]:
if future_time_feat is None or future_target is None:
time_feat = past_time_feat[
:, self.history_length - self.context_length :, ...
]
sequence = past_target
sequence_length = self.history_length
subsequences_length = self.context_length
else:
time_feat = torch.cat(
(
past_time_feat[:, self.history_length - self.context_length :, ...],
future_time_feat,
),
dim=1
)
sequence = torch.cat((past_target, future_target), dim=1)
sequence_length = self.history_length + self.prediction_length
subsequences_length = self.context_length + self.prediction_length
lags = self.get_lagged_subsequences(
sequence=sequence,
sequence_length=sequence_length,
indices=self.lags_seq,
subsequences_length=subsequences_length
)
# scale is computed on the context length last units of the past target
# scale shape is (batch_size, 1, *target_shape)
_, scale = self.scaler(
past_target[:, self.context_length :, ...],
past_observed_values[:, self.context_length :, ...],
)
# (batch_size, num_features)
embedded_cat = self.embedder(feat_static_cat)
# in addition to embedding features, use the log scale as it can help
# prediction too
# (batch_size, num_features + prod(target_shape))
static_feat = torch.cat(
(
embedded_cat,
feat_static_real,
scale.log() if len(self.target_shape) == 0 else scale.squeeze(1).log(),
),
dim=1,
)
# (batch_size, subsequences_length, num_features + 1)
repeated_static_feat = static_feat.unsqueeze(1).expand(
-1, subsequences_length, -1
)
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags_scaled = lags / scale.unsqueeze(-1)
# from (batch_size, sub_seq_len, *target_shape, num_lags)
# to (batch_size, sub_seq_len, prod(target_shape) * num_lags)
input_lags = lags_scaled.reshape(
(-1, subsequences_length, len(self.lags_seq) * prod(self.target_shape))
)
# (batch_size, sub_seq_len, input_dim)
inputs = torch.cat((input_lags, time_feat, repeated_static_feat), dim=-1)
# unroll encoder
outputs, state = self.rnn(inputs)
# outputs: (batch_size, seq_len, num_cells)
# state: list of (num_layers, batch_size, num_cells) tensors
# scale: (batch_size, 1, *target_shape)
# static_feat: (batch_size, num_features + prod(target_shape))
return outputs, state, scale, static_feat
class DeepARTrainingNetwork(DeepARNetwork):
def distribution(
self,
feat_static_cat: torch.Tensor,
feat_static_real: torch.Tensor,
past_time_feat: torch.Tensor,
past_target: torch.Tensor,
past_observed_values: torch.Tensor,
future_time_feat: torch.Tensor,
future_target: torch.Tensor,
future_observed_values: torch.Tensor,
) -> Distribution:
rnn_outputs, _, scale, _ = self.unroll_encoder(
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=future_time_feat,
future_target=future_target,
)
distr_args = self.proj_distr_args(rnn_outputs)
return self.distr_output.distribution(distr_args, scale=scale)
def forward(
self,
feat_static_cat: torch.Tensor,
feat_static_real: torch.Tensor,
past_time_feat: torch.Tensor,
past_target: torch.Tensor,
past_observed_values: torch.Tensor,
future_time_feat: torch.Tensor,
future_target: torch.Tensor,
future_observed_values: torch.Tensor,
) -> torch.Tensor:
distr = self.distribution(
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=future_time_feat,
future_target=future_target,
future_observed_values=future_observed_values,
)
# put together target sequence
# (batch_size, seq_len, *target_shape)
target = torch.cat(
(
past_target[:, self.history_length - self.context_length :, ...],
future_target,
),
dim=1,
)
# (batch_size, seq_len)
loss = -distr.log_prob(target)
# (batch_size, seq_len, *target_shape)
observed_values = torch.cat(
(
past_observed_values[
:, self.history_length - self.context_length :, ...
],
future_observed_values,
),
dim=1,
)
# mask the loss at one time step iff one or more observations is missing in the target dimensions
# (batch_size, seq_len)
loss_weights = (
observed_values
if (len(self.target_shape) == 0)
else observed_values.min(dim=-1, keepdim=False)
)
weighted_loss = weighted_average(loss, weights=loss_weights)
return weighted_loss, loss
class DeepARPredictionNetwork(DeepARNetwork):
def __init__(self, num_parallel_samples: int = 100, **kwargs) -> None:
super().__init__(**kwargs)
self.num_parallel_samples = num_parallel_samples
# for decoding the lags are shifted by one, at the first time-step
# of the decoder a lag of one corresponds to the last target value
self.shifted_lags = [l - 1 for l in self.lags_seq]
def sampling_decoder(
self,
static_feat: torch.Tensor,
past_target: torch.Tensor,
time_feat: torch.Tensor,
scale: torch.Tensor,
begin_states: Union[torch.Tensor, List[torch.Tensor]],
) -> torch.Tensor:
"""
Computes sample paths by unrolling the RNN starting with a initial
input and state.
Parameters
----------
static_feat : Tensor
static features. Shape: (batch_size, num_static_features).
past_target : Tensor
target history. Shape: (batch_size, history_length).
time_feat : Tensor
time features. Shape: (batch_size, prediction_length, num_time_features).
scale : Tensor
tensor containing the scale of each element in the batch. Shape: (batch_size, 1, 1).
begin_states : List or Tensor
list of initial states for the LSTM layers or tensor for GRU.
the shape of each tensor of the list should be (num_layers, batch_size, num_cells)
Returns
--------
Tensor
A tensor containing sampled paths.
Shape: (batch_size, num_sample_paths, prediction_length).
"""
# blows-up the dimension of each tensor to batch_size * self.num_parallel_samples for increasing parallelism
repeated_past_target = past_target.repeat_interleave(
repeats=self.num_parallel_samples, dim=0
)
repeated_time_feat = time_feat.repeat_interleave(
repeats=self.num_parallel_samples, dim=0
)
repeated_static_feat = static_feat.repeat_interleave(
repeats=self.num_parallel_samples, dim=0
).unsqueeze(1)
repeated_scale = scale.repeat_interleave(
repeats=self.num_parallel_samples, dim=0
)
if self.cell_type == "LSTM":
repeated_states = [
s.repeat_interleave(repeats=self.num_parallel_samples, dim=1)
for s in begin_states
]
else:
repeated_states = begin_states.repeat_interleave(
repeats=self.num_parallel_samples, dim=1
)
future_samples = []
# for each future time-units we draw new samples for this time-unit and update the state
for k in range(self.prediction_length):
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags = self.get_lagged_subsequences(
sequence=repeated_past_target,
sequence_length=self.history_length + k,
indices=self.shifted_lags,
subsequences_length=1,
)
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags_scaled = lags / repeated_scale.unsqueeze(-1)
# from (batch_size * num_samples, 1, *target_shape, num_lags)
# to (batch_size * num_samples, 1, prod(target_shape) * num_lags)
input_lags = lags_scaled.reshape(
(-1, 1, prod(self.target_shape) * len(self.lags_seq))
)
# (batch_size * num_samples, 1, prod(target_shape) * num_lags + num_time_features + num_static_features)
decoder_input = torch.cat(
(input_lags, repeated_time_feat[:, k : k + 1, :], repeated_static_feat),
dim=-1,
)
# output shape: (batch_size * num_samples, 1, num_cells)
# state shape: (batch_size * num_samples, num_cells)
rnn_outputs, repeated_states = self.rnn(decoder_input, repeated_states)
distr_args = self.proj_distr_args(rnn_outputs)
# compute likelihood of target given the predicted parameters
distr = self.distr_output.distribution(distr_args, scale=repeated_scale)
# (batch_size * num_samples, 1, *target_shape)
new_samples = distr.sample()
# (batch_size * num_samples, seq_len, *target_shape)
repeated_past_target = torch.cat((repeated_past_target, new_samples), dim=1)
future_samples.append(new_samples)
# (batch_size * num_samples, prediction_length, *target_shape)
samples = torch.cat(future_samples, dim=1)
# (batch_size, num_samples, prediction_length, *target_shape)
return samples.reshape(
(
(-1, self.num_parallel_samples)
+ (self.prediction_length,)
+ self.target_shape
)
)
# noinspection PyMethodOverriding,PyPep8Naming
def forward(
self,
feat_static_cat: torch.Tensor, # (batch_size, num_features)
feat_static_real: torch.Tensor, # (batch_size, num_features)
past_time_feat: torch.Tensor, # (batch_size, history_length, num_features)
past_target: torch.Tensor, # (batch_size, history_length, *target_shape)
past_observed_values: torch.Tensor, # (batch_size, history_length, *target_shape)
future_time_feat: torch.Tensor, # (batch_size, prediction_length, num_features)
) -> torch.Tensor:
"""
Predicts samples, all tensors should have NTC layout.
Parameters
----------
feat_static_cat : (batch_size, num_features)
feat_static_real : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape)
future_time_feat : (batch_size, prediction_length, num_features)
Returns
-------
Tensor
Predicted samples
"""
# unroll the decoder in "prediction mode", i.e. with past data only
_, state, scale, static_feat = self.unroll_encoder(
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=None,
future_target=None,
)
return self.sampling_decoder(
past_target=past_target,
time_feat=future_time_feat,
static_feat=static_feat,
scale=scale,
begin_states=state,
)
|
the-stack_0_21404 | #!/usr/bin/env python
from typing import Tuple
import torch
import argparse
from src.loadopts import *
from src.config import SAVED_FILENAME, DEVICE
from src.utils import timemeter
from autoattack import AutoAttack
METHOD = "AutoAttack"
FMT = "{description}={norm}-{version}-{epsilon}"
parser = argparse.ArgumentParser()
parser.add_argument("model", type=str)
parser.add_argument("dataset", type=str)
parser.add_argument("info_path", type=str)
parser.add_argument("--filename", type=str, default=SAVED_FILENAME)
# for AA
parser.add_argument("--norm", choices=("Linf", "L2", "L1"), default="Linf")
parser.add_argument("--epsilon", type=float, default=8/255)
parser.add_argument("--version", choices=("standard", "plus"), default="standard")
# basic settings
parser.add_argument("-b", "--batch_size", type=int, default=256)
parser.add_argument("--transform", type=str, default='tensor,none')
parser.add_argument("--log2file", action="store_false", default=True,
help="False: remove file handler")
parser.add_argument("--log2console", action="store_false", default=True,
help="False: remove console handler if log2file is True ...")
parser.add_argument("--seed", type=int, default=1)
parser.add_argument("--benchmark", action="store_false", default=True,
help="cudnn.benchmark == True ?")
parser.add_argument("-m", "--description", type=str, default=METHOD)
opts = parser.parse_args()
opts.description = FMT.format(**opts.__dict__)
@timemeter("Setup")
def load_cfg() -> Tuple[Config, str]:
from src.dict2obj import Config
from src.utils import set_seed, activate_benchmark, load, set_logger
from models.base import ADArch
cfg = Config()
# generate the log path
_, log_path = generate_path(METHOD, opts.dataset,
opts.model, opts.description)
# set logger
logger = set_logger(
path=log_path,
log2file=opts.log2file,
log2console=opts.log2console
)
logger.debug(opts.info_path)
activate_benchmark(opts.benchmark)
set_seed(opts.seed)
# load the model
model = load_model(opts.model)(num_classes=get_num_classes(opts.dataset))
mean, std = load_normalizer(opts.dataset)
model = ADArch(model=model, mean=mean, std=std)
load(
model=model,
path=opts.info_path,
filename=opts.filename
)
model.eval()
# load the testset
testset = load_dataset(
dataset_type=opts.dataset,
transforms=opts.transform,
train=False
)
data = []
targets = []
for i in range(len(testset)):
img, label = testset[i]
data.append(img)
targets.append(label)
cfg['data'] = torch.stack(data)
cfg['targets'] = torch.tensor(targets, dtype=torch.long)
cfg['attacker'] = AutoAttack(
model,
norm=opts.norm,
eps=opts.epsilon,
version=opts.version,
device=DEVICE,
)
return cfg, log_path
@timemeter('Main')
def main(attacker, data, targets):
attacker.run_standard_evaluation(data, targets, bs=opts.batch_size)
if __name__ == "__main__":
from src.utils import readme
cfg, log_path = load_cfg()
readme(log_path, opts, mode="a")
main(**cfg)
|
the-stack_0_21405 | import pytest
from faker import Faker
from pastry_shop.blog.models import Post, Comment
pytestmark = pytest.mark.django_db
fake = Faker()
class TestPostModel:
def test_post_to_str(self, set_up):
post = Post.objects.first()
assert str(post) == post.title
class TestCommentModel:
def test_comment_to_str(self, set_up):
comment = Comment.objects.first()
assert str(comment) == f"{comment.content[:20]} - {comment.author.username}"
|
the-stack_0_21407 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""URI endpoint for nudging Anomaly entities and updating alert bug IDs."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import json
from google.appengine.api import users
from google.appengine.ext import ndb
from dashboard.common import request_handler
from dashboard.common import utils
from dashboard.common import xsrf
class EditAnomaliesHandler(request_handler.RequestHandler):
"""Handles editing the bug IDs and revision range of Alerts."""
@xsrf.TokenRequired
def post(self):
"""Allows adding or resetting bug IDs and invalid statuses to Alerts.
Additionally, this endpoint is also responsible for changing the start
and end revisions of Anomaly entities.
Request parameters:
keys: A comma-separated list of urlsafe keys of Anomaly entities.
bug_id: The new bug ID. This should be either the string REMOVE
(indicating resetting the bug ID to None), or an integer. A negative
integer indicates an invalid or ignored alert. If this is given, then
the start and end revision ranges are ignored.
new_start_revision: New start revision value for the alert.
new_end_revision: New end revision value for the alert.
Outputs:
JSON which indicates the result. If an error has occurred, the field
"error" should be in the result. If successful, the response is still
expected to be JSON.
"""
if not utils.IsValidSheriffUser():
user = users.get_current_user()
self.ReportError('User "%s" not authorized.' % user, status=403)
return
# Get the list of alerts to modify.
urlsafe_keys = self.request.get('keys')
if not urlsafe_keys:
self.response.out.write(json.dumps({
'error': 'No alerts specified to add bugs to.'}))
return
keys = [ndb.Key(urlsafe=k) for k in urlsafe_keys.split(',')]
alert_entities = ndb.get_multi(keys)
# Get the parameters which specify the changes to make.
bug_id = self.request.get('bug_id')
new_start_revision = self.request.get('new_start_revision')
new_end_revision = self.request.get('new_end_revision')
result = None
if bug_id:
result = self.ChangeBugId(alert_entities, bug_id)
elif new_start_revision and new_end_revision:
result = self.NudgeAnomalies(
alert_entities, new_start_revision, new_end_revision)
else:
result = {'error': 'No bug ID or new revision specified.'}
self.response.out.write(json.dumps(result))
def ChangeBugId(self, alert_entities, bug_id):
"""Changes or resets the bug ID of all given alerts."""
# Change the bug ID if a new bug ID is specified and valid.
if bug_id == 'REMOVE':
bug_id = None
else:
try:
bug_id = int(bug_id)
except ValueError:
return {'error': 'Invalid bug ID %s' % str(bug_id)}
for a in alert_entities:
a.bug_id = bug_id
ndb.put_multi(alert_entities)
return {'bug_id': bug_id}
def NudgeAnomalies(self, anomaly_entities, start, end):
# Change the revision range if a new revision range is specified and valid.
try:
start = int(start)
end = int(end)
except ValueError:
return {'error': 'Invalid revisions %s, %s' % (start, end)}
for a in anomaly_entities:
a.start_revision = start
a.end_revision = end
ndb.put_multi(anomaly_entities)
return {'success': 'Alerts nudged.'}
|
the-stack_0_21408 | import sys,argparse,os
import librosa
from sklearn import preprocessing
from sklearn.utils import shuffle
import torch
import numpy as np
from utils import utils
# Arguments
parser=argparse.ArgumentParser(description='')
parser.add_argument('--device',default='cuda',type=str,required=False,help='(default=%(default)s)')
parser.add_argument('--mode',type=str,required=True,help='(default=%(default)s)',
choices=['train','valid','test','random'])
parser.add_argument('--path_in',default='',type=str,required=True,help='(default=%(default)s)')
parser.add_argument('--fn_mask',default='_to_',type=str,required=False,help='(default=%(default)s)')
parser.add_argument('--fn_cla',default='',type=str,required=True,help='(default=%(default)s)')
parser.add_argument('--fn_res',default='',type=str,required=True,help='(default=%(default)s)')
parser.add_argument('--extension',default='.wav',type=str,required=False,help='(default=%(default)s)')
parser.add_argument('--pc_valid',default=0.1,type=float,required=False,help='(default=%(default)f)')
parser.add_argument('--patience',default=5,type=int,required=False,help='(default=%(default)f)')
parser.add_argument('--cut_last',default=0,type=int,required=False,help='(default=%(default)f)')
args=parser.parse_args()
utils.print_arguments(args)
########################################################################################################################
speakers={}
if args.mode!='train':
print('Load classifier')
speakers,sca,model=torch.load(args.fn_cla)
########################################################################################################################
# Load filenames
print('Filenames')
fn_all=[]
for dirpath,dirnames,filenames in os.walk(args.path_in):
for fn in filenames:
if fn.endswith(args.extension):
if args.mode!='test' or (args.mode=='test' and args.fn_mask in fn):
fn_all.append(os.path.join(dirpath,fn))
print(len(fn_all))
# Feature extraction
print('Feature extraction')
durations=[]
features=[]
labels=[]
source,target=[],[]
try:
for i,fn in enumerate(fn_all):
arrs=[]
y,sr=librosa.load(fn,sr=16000)
if args.cut_last>0:
y=y[:-args.cut_last-1]
spec=np.abs(librosa.stft(y=y,n_fft=2048,hop_length=128,win_length=256))**2
melspec=librosa.feature.melspectrogram(S=spec,n_mels=200)
mfcc=librosa.feature.mfcc(S=librosa.power_to_db(melspec),n_mfcc=40)
arrs.append(mfcc)
mfcc=librosa.feature.delta(mfcc)
arrs.append(mfcc)
mfcc=librosa.feature.delta(mfcc)
arrs.append(mfcc)
#cqt=librosa.amplitude_to_db(np.abs(librosa.cqt(y,sr=sr,fmin=27.5,n_bins=96)),ref=np.max)
#arrs.append(cqt)
#cqt=librosa.feature.delta(cqt)
#arrs.append(cqt)
rms=librosa.feature.rms(y=y)
arrs.append(rms)
#zcr=librosa.feature.zero_crossing_rate(y=y)
#arrs.append(zcr)
feat=[]
for x in arrs:
feat+=list(np.mean(x,axis=1))
feat+=list(np.std(x,axis=1))
spk=os.path.split(fn)[-1].split('_')[0]
if args.mode=='train' and spk not in speakers:
speakers[spk]=len(speakers)
elif spk not in speakers:
continue
source.append(speakers[spk])
if args.mode=='test' or args.mode=='random':
spk=os.path.split(fn)[-1].split(args.fn_mask)[-1][:-len(args.extension)]
if spk not in speakers:
continue
target.append(speakers[spk])
yy=(np.abs(y)>0.02).astype(np.float32)
durations.append(np.sum(yy)/sr)
features.append(feat)
labels.append(speakers[spk])
print('\r{:5.1f}%'.format(100*(i+1)/len(fn_all)),end='')
except KeyboardInterrupt:
pass
print()
durations=np.array(durations,dtype=np.float32)
features=np.array(features,dtype=np.float32)
labels=np.array(labels,dtype=np.int32)
source=np.array(source,dtype=np.int32)
if target[0] is not None:
target=np.array(target,dtype=np.int32)
print(len(speakers),'speakers')
print(features.shape,labels.shape)
########################################################################################################################
def batch_loop(e,r,x,y,eval):
if eval:
model.eval()
else:
model.train()
r=shuffle(r)
losses=[]
predictions=[]
for b in range(0,len(r),sbatch):
if b+sbatch>len(r):
rr=r[b:]
else:
rr=r[b:b+sbatch]
rr=torch.LongTensor(rr)
xb=x[rr,:].to(args.device)
yb=y[rr].to(args.device)
ybhat=model.forward(xb)
loss=loss_function(ybhat,yb)
losses+=list(loss.data.cpu().numpy())
predictions+=list(ybhat.data.max(1)[1].cpu().numpy())
if not eval:
loss=loss.mean()
optim.zero_grad()
loss.backward()
optim.step()
print('\rEpoch {:03d}/{:03d} - {:5.1f}% : loss = {:7.3f}'.format(e+1,nepochs,100*len(losses)/len(x),np.mean(losses)),end='')
return losses,predictions
print('-'*100)
nepochs=200
sbatch=128
loss_function=torch.nn.CrossEntropyLoss(reduction='none')
if args.mode=='train':
print('Train')
sca=preprocessing.StandardScaler()
x=sca.fit_transform(features)
x,y=torch.FloatTensor(x),torch.LongTensor(labels)
model=torch.nn.Sequential(torch.nn.Dropout(0.4),torch.nn.Linear(x.size(1),len(speakers)))
model=model.to(args.device)
optim=torch.optim.Adam(model.parameters())
r=list(range(len(x)))
r=shuffle(r)
split=int(args.pc_valid*len(r))
r_train,r_valid=r[:-split],r[-split:]
try:
loss_best=np.inf
patience=args.patience
for e in range(nepochs):
batch_loop(e,r_train,x,y,False)
with torch.no_grad():
losses,_=batch_loop(e,r_valid,x,y,True)
print()
if np.mean(losses)<loss_best:
loss_best=np.mean(losses)
patience=args.patience
else:
patience-=1
if patience==0:
break
except KeyboardInterrupt:
print()
torch.save([speakers,sca,model.cpu()],args.fn_cla)
print('[Saved '+args.fn_cla+']')
print('Predict')
x,y=x[r_valid,:],y[r_valid]
else:
print('Predict')
x=sca.transform(features)
x,y=torch.FloatTensor(x),torch.LongTensor(labels)
if args.mode=='random':
losses,predictions=[],[]
ymax=np.max(y.numpy())
for i in range(len(y)):
losses.append(0)
predictions.append(np.random.randint(ymax+1))
else:
model=model.to(args.device)
with torch.no_grad():
losses,predictions=batch_loop(-1,list(range(len(x))),x,y,True)
print()
losses=np.array(losses,dtype=np.float32)
predictions=np.array(predictions,dtype=np.int32)
print('NLL = {:7.3f}'.format(np.mean(losses)))
print('Accuracy = {:5.1f}%'.format(100*np.mean((predictions==y.numpy()).astype(np.float32))))
print('-'*100)
########################################################################################################################
torch.save([durations,losses,predictions,labels,speakers,source,target],args.fn_res)
print('[Saved '+args.fn_res+']')
|
the-stack_0_21409 | import numpy as np
from tqdm import trange, tqdm
import tensorflow as tf
import copy
from .fedbase import BaseFedarated
from flearn.utils.tf_utils import process_grad, cosine_sim, softmax, norm_grad, l2_clip, get_stdev
from flearn.utils.model_utils import batch_data, gen_batch, gen_epoch, gen_batch_celeba
class Server(BaseFedarated):
def __init__(self, params, learner, dataset):
print('Using fair fed avg to Train')
self.inner_opt = tf.train.GradientDescentOptimizer(params['learning_rate'])
super(Server, self).__init__(params, learner, dataset)
def train(self):
print('Training with {} workers ---'.format(self.clients_per_round))
np.random.seed(1234567+self.seed)
corrupt_id = np.random.choice(range(len(self.clients)), size=self.num_corrupted, replace=False)
print(corrupt_id)
batches = {}
for idx, c in enumerate(self.clients):
if idx in corrupt_id:
c.train_data['y'] = np.asarray(c.train_data['y'])
if self.dataset == 'celeba':
c.train_data['y'] = 1 - c.train_data['y']
elif self.dataset == 'femnist':
c.train_data['y'] = np.random.randint(0, 62, len(c.train_data['y'])) # [0, 62)
elif self.dataset == 'fmnist': # fashion mnist
c.train_data['y'] = np.random.randint(0, 10, len(c.train_data['y']))
if self.dataset == 'celeba':
batches[c] = gen_batch_celeba(c.train_data, self.batch_size, self.num_rounds * self.local_iters + 350)
else:
batches[c] = gen_batch(c.train_data, self.batch_size, self.num_rounds * self.local_iters + 350)
initialization = copy.deepcopy(self.clients[0].get_params())
for i in range(self.num_rounds + 1):
if i % self.eval_every == 0:
num_test, num_correct_test, _ = self.test() # have set the latest model for all clients
num_train, num_correct_train, loss_vector = self.train_error()
avg_loss = np.dot(loss_vector, num_train) / np.sum(num_train)
tqdm.write('At round {} training accu: {}, loss: {}'.format(i, np.sum(num_correct_train) * 1.0 / np.sum(
num_train), avg_loss))
tqdm.write('At round {} test accu: {}'.format(i, np.sum(num_correct_test) * 1.0 / np.sum(num_test)))
non_corrupt_id = np.setdiff1d(range(len(self.clients)), corrupt_id)
tqdm.write('At round {} malicious test accu: {}'.format(i, np.sum(
num_correct_test[corrupt_id]) * 1.0 / np.sum(num_test[corrupt_id])))
tqdm.write('At round {} benign test accu: {}'.format(i, np.sum(
num_correct_test[non_corrupt_id]) * 1.0 / np.sum(num_test[non_corrupt_id])))
print("variance of the performance: ",
np.var(num_correct_test[non_corrupt_id] / num_test[non_corrupt_id]))
indices, selected_clients = self.select_clients(round=i, corrupt_id=corrupt_id,
num_clients=self.clients_per_round)
csolns = []
losses = []
for idx in indices:
c = self.clients[idx]
# communicate the latest model
c.set_params(self.latest_model)
weights_before = copy.deepcopy(self.latest_model)
loss = c.get_loss() # compute loss on the whole training data
losses.append(loss)
for _ in range(self.local_iters):
data_batch = next(batches[c])
_, _, _ = c.solve_sgd(data_batch)
new_weights = c.get_params()
grads = [(u - v) * 1.0 for u, v in zip(new_weights, weights_before)]
if idx in corrupt_id:
if self.boosting: # model replacement
grads = [self.clients_per_round * u for u in grads]
elif self.random_updates:
# send random updates
stdev_ = get_stdev(grads)
grads = [np.random.normal(0, stdev_, size=u.shape) for u in grads]
if self.q > 0:
csolns.append((np.exp(self.q * loss), grads))
else:
csolns.append(grads)
if self.q > 0:
overall_updates = self.aggregate(csolns)
else:
if self.gradient_clipping:
csolns = l2_clip(csolns)
expected_num_mali = int(self.clients_per_round * self.num_corrupted / len(self.clients))
if self.median:
overall_updates = self.median_average(csolns)
elif self.k_norm:
overall_updates = self.k_norm_average(self.clients_per_round - expected_num_mali, csolns)
elif self.k_loss:
overall_updates = self.k_loss_average(self.clients_per_round - expected_num_mali, losses, csolns)
elif self.krum:
overall_updates = self.krum_average(self.clients_per_round - expected_num_mali - 2, csolns)
elif self.mkrum:
m = self.clients_per_round - expected_num_mali
overall_updates = self.mkrum_average(self.clients_per_round - expected_num_mali - 2, m, csolns)
else:
overall_updates = self.simple_average(csolns)
self.latest_model = [(u + v) for u, v in zip(self.latest_model, overall_updates)]
distance = np.linalg.norm(process_grad(self.latest_model) - process_grad(initialization))
if i % self.eval_every == 0:
print('distance to initialization:', distance)
# local finetuning
init_model = copy.deepcopy(self.latest_model)
after_test_accu = []
test_samples = []
for idx, c in enumerate(self.clients):
c.set_params(init_model)
local_model = copy.deepcopy(init_model)
for _ in range(max(int(self.finetune_iters * c.train_samples / self.batch_size), self.finetune_iters)):
c.set_params(local_model)
data_batch = next(batches[c])
_, grads, _ = c.solve_sgd(data_batch)
for j in range(len(grads[1])):
eff_grad = grads[1][j] + self.lam * (local_model[j] - init_model[j])
local_model[j] = local_model[j] - self.learning_rate * self.decay_factor * eff_grad
c.set_params(local_model)
tc, _, num_test = c.test()
after_test_accu.append(tc)
test_samples.append(num_test)
after_test_accu = np.asarray(after_test_accu)
test_samples = np.asarray(test_samples)
tqdm.write('final test accu: {}'.format(np.sum(after_test_accu) * 1.0 / np.sum(test_samples)))
tqdm.write('final malicious test accu: {}'.format(np.sum(
after_test_accu[corrupt_id]) * 1.0 / np.sum(test_samples[corrupt_id])))
tqdm.write('final benign test accu: {}'.format(np.sum(
after_test_accu[non_corrupt_id]) * 1.0 / np.sum(test_samples[non_corrupt_id])))
print("variance of the performance: ",
np.var(after_test_accu[non_corrupt_id] / test_samples[non_corrupt_id]))
|
the-stack_0_21410 | """
planner/urls.py
"""
# import statements
from django.conf.urls import url
from django.views.generic import ListView
from planner import views
from planner.models import Lawn
from planner.views import LawnDetailView, ProfileUpdate, UserDetailView, UserLawnListView, LawnDeleteView,\
LawnNewView, LawnEditView
from django.contrib.sitemaps.views import sitemap
from .sitemaps import StaticViewSitemap
sitemaps = {
'static': StaticViewSitemap,
}
urlpatterns = [
url(r'^$', views.index, name="index"),
url(r'planner/$', LawnNewView.as_view(), name='lawn_new'),
url(r'^planner/lawn/(?P<pk>\d+)/$', LawnDetailView.as_view(), name="lawn_detail"),
url(r'^planner/lawn/(?P<pk>\d+)/edit/$', LawnEditView.as_view(), name='lawn_edit'),
url(r'^planner/lawn/list/$', ListView.as_view(
queryset=Lawn.objects.filter(user__username="examples").order_by('name'),
template_name="planner/lawn_list.html"), name="example_lawn_list"),
url(r'^planner/lawn/(?P<pk>\d+)/delete/$', LawnDeleteView.as_view(), name="lawn_delete"),
url(r'^accounts/profile/$', UserDetailView.as_view(), name="account_profile"),
url(r'^accounts/profile/edit/$', ProfileUpdate.as_view(), name="account_profile_edit"),
url(r'^planner/mylawns/$', UserLawnListView.as_view(), name="user_lawn_list"),
url(r'^sitemap\.xml$', sitemap, {'sitemaps': sitemaps},
name='django.contrib.sitemaps.views.sitemap'),
] |
the-stack_0_21411 | import binascii
import copy
import hashlib
import logging
from memoizer import memoize
import os
import plistlib
from plistlib import _PlistWriter
import re
OUTPUT_DIRECTORY = '_CodeSignature'
OUTPUT_FILENAME = 'CodeResources'
TEMPLATE_FILENAME = 'code_resources_template.xml'
# DIGEST_ALGORITHM = "sha1"
HASH_BLOCKSIZE = 65536
log = logging.getLogger(__name__)
# have to monkey patch Plist, in order to make the values
# look the same - no .0 for floats
# Apple's plist utils work like this:
# 1234.5 ---> <real>1234.5</real>
# 1234.0 ---> <real>1234</real>
def write_value(self, value):
if isinstance(value, float):
if value.is_integer():
rep = repr(int(value))
else:
rep = repr(value)
self.simple_element("real", rep)
else:
self.old_write_value(value)
_PlistWriter.old_write_value = _PlistWriter.write_value
_PlistWriter.write_value = write_value
# Simple reimplementation of ResourceBuilder, in the Apple Open Source
# file bundlediskrep.cpp
class PathRule(object):
OPTIONAL = 0x01
OMITTED = 0x02
NESTED = 0x04
EXCLUSION = 0x10 # unused?
TOP = 0x20 # unused?
def __init__(self, pattern='', properties=None):
# on Mac OS the FS is case-insensitive; simulate that here
self.pattern = re.compile(pattern, re.IGNORECASE)
self.flags = 0
self.weight = 0
if properties is not None:
if type(properties) == 'bool':
if properties is False:
self.flags |= PathRule.OMITTED
# if it was true, this file is required;
# do nothing
elif isinstance(properties, dict):
for key, value in properties.items():
if key == 'optional' and value is True:
self.flags |= PathRule.OPTIONAL
elif key == 'omit' and value is True:
self.flags |= PathRule.OMITTED
elif key == 'nested' and value is True:
self.flags |= PathRule.NESTED
elif key == 'weight':
self.weight = float(value)
def is_optional(self):
return self.flags & PathRule.OPTIONAL != 0
def is_omitted(self):
return self.flags & PathRule.OMITTED != 0
def is_nested(self):
return self.flags & PathRule.NESTED != 0
def is_exclusion(self):
return self.flags & PathRule.EXCLUSION != 0
def is_top(self):
return self.flags & PathRule.TOP != 0
def matches(self, path):
return re.match(self.pattern, path)
def __str__(self):
return 'PathRule:' + str(self.flags) + ':' + str(self.weight)
class ResourceBuilder(object):
NULL_PATH_RULE = PathRule()
def __init__(self, app_path, rules_data, respect_omissions=False, include_sha256=False):
self.app_path = app_path
self.app_dir = os.path.dirname(app_path)
self.rules = []
self.respect_omissions = respect_omissions
self.include_sha256 = include_sha256
for pattern, properties in rules_data.items():
self.rules.append(PathRule(pattern, properties))
def find_rule(self, path):
best_rule = ResourceBuilder.NULL_PATH_RULE
for rule in self.rules:
# log.debug('trying rule ' + str(rule) + ' against ' + path)
if rule.matches(path):
if rule.flags and rule.is_exclusion():
best_rule = rule
break
elif rule.weight > best_rule.weight:
best_rule = rule
return best_rule
def get_rule_and_paths(self, root, path):
path = os.path.join(root, path)
relative_path = os.path.relpath(path, self.app_dir)
rule = self.find_rule(relative_path)
return (rule, path, relative_path)
def scan(self):
"""
Walk entire directory, compile mapping
path relative to source_dir -> digest and other data
"""
file_entries = {}
# rule_debug_fmt = "rule: {0}, path: {1}, relative_path: {2}"
for root, dirs, filenames in os.walk(self.app_dir):
# log.debug("root: {0}".format(root))
for filename in filenames:
rule, path, relative_path = self.get_rule_and_paths(root,
filename)
# log.debug(rule_debug_fmt.format(rule, path, relative_path))
# specifically ignore the CodeResources symlink in base directory if it exists (iOS 11+ fix)
if relative_path == "CodeResources" and os.path.islink(path):
continue
if rule.is_exclusion():
continue
if rule.is_omitted() and self.respect_omissions is True:
continue
if self.app_path == path:
continue
# in the case of symlinks, we don't calculate the hash but rather add a key for it being a symlink
if os.path.islink(path):
# omit symlinks from files, leave in files2
if not self.respect_omissions:
continue
val = {'symlink': os.readlink(path)}
else:
# the Data element in plists is base64-encoded
val = {'hash': plistlib.Data(get_hash_binary(path))}
if self.include_sha256:
val['hash2'] = plistlib.Data(get_hash_binary(path, 'sha256'))
if rule.is_optional():
val['optional'] = True
if len(val) == 1 and 'hash' in val:
file_entries[relative_path] = val['hash']
else:
file_entries[relative_path] = val
for dirname in dirs:
rule, path, relative_path = self.get_rule_and_paths(root,
dirname)
if rule.is_nested() and '.' not in path:
dirs.remove(dirname)
continue
if relative_path == OUTPUT_DIRECTORY:
dirs.remove(dirname)
return file_entries
def get_template():
"""
Obtain the 'template' plist which also contains things like
default rules about which files should count
"""
current_dir = os.path.dirname(os.path.abspath(__file__))
template_path = os.path.join(current_dir, TEMPLATE_FILENAME)
return plistlib.readPlist(open(template_path, 'rb'))
@memoize
def get_hash_hex(path, hash_type='sha1'):
""" Get the hash of a file at path, encoded as hexadecimal """
if hash_type == 'sha256':
hasher = hashlib.sha256()
elif hash_type == 'sha1':
hasher = hashlib.sha1()
else:
raise ValueError("Incorrect hash type provided: {}".format(hash_type))
with open(path, 'rb') as afile:
buf = afile.read(HASH_BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(HASH_BLOCKSIZE)
return hasher.hexdigest()
@memoize
def get_hash_binary(path, hash_type='sha1'):
""" Get the hash of a file at path, encoded as binary """
return binascii.a2b_hex(get_hash_hex(path, hash_type))
def write_plist(target_dir, plist):
""" Write the CodeResources file """
output_dir = os.path.join(target_dir, OUTPUT_DIRECTORY)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
output_path = os.path.join(output_dir, OUTPUT_FILENAME)
plistlib.writePlist(plist, open(output_path, 'wb'))
return output_path
def make_seal(source_app_path, target_dir=None):
"""
Given a source app, create a CodeResources file for the
surrounding directory, and write it into the appropriate path in a target
directory
"""
if target_dir is None:
target_dir = os.path.dirname(source_app_path)
template = get_template()
# n.b. code_resources_template not only contains a template of
# what the file should look like; it contains default rules
# deciding which files should be part of the seal
rules = template['rules']
plist = copy.deepcopy(template)
resource_builder = ResourceBuilder(source_app_path, rules, respect_omissions=False)
plist['files'] = resource_builder.scan()
rules2 = template['rules2']
resource_builder2 = ResourceBuilder(source_app_path, rules2, respect_omissions=True, include_sha256=True)
plist['files2'] = resource_builder2.scan()
return write_plist(target_dir, plist)
|
the-stack_0_21412 | # coding=utf8
import calendar
from datetime import date, datetime
class SCalendars:
@staticmethod
def get_last_day_of_this_month(year: int, month: int) -> date:
return date(year, month, calendar.monthrange(year, month)[-1])
@staticmethod
def get_fist_day_of_year_week(year: int, week: int) -> date:
fmt = '{}-W{}-1'.format(year, week)
return datetime.strptime(fmt, "%Y-W%W-%w").date()
|
the-stack_0_21413 | # **********************************************************************
# Copyright (C) 2020 Johns Hopkins University Applied Physics Laboratory
#
# All Rights Reserved.
# For any other permission, please contact the Legal Office at JHU/APL.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# **********************************************************************
import json
from flask import Blueprint
from shared.config import config
from system.extensions import FlaskExtensions
from system.utils.biocontainers import get_biocontainers
from system.utils.readtypes import get_read_types
info_bp = Blueprint("info", __name__, url_prefix=config.SERVER_API_CHROOT)
mongodb = FlaskExtensions.mongodb
@info_bp.route("/info/classifiers", methods=["GET"])
def get_classifiers():
_, classifiers_info = get_biocontainers()
classifier_links = get_classifiers_links(classifiers_info)
res = json.dumps(dict(data=classifier_links))
return res, 200
def get_classifiers_links(classifier_info):
data = []
for k, v in classifier_info.items():
data.append(dict(name=k, link=v.link))
return data
@info_bp.route("/info/classifiers_only", methods=["GET"])
def get_classifiersOnly():
classifier_names, _ = get_biocontainers()
names = get_classifiers_name(classifier_names)
res = json.dumps(names)
return res, 200
def get_classifiers_name(classifier_names):
res = dict(data=classifier_names)
return res
@info_bp.route("/info/read_types", methods=["GET"])
def get_read_type():
_, read_types_info = get_read_types()
read_type_links = get_read_types_links(read_types_info)
res = json.dumps(dict(data=read_type_links))
return res, 200
def get_read_types_links(read_types_info):
data = []
for k, v in read_types_info.items():
data.append(dict(name=k, prodlink=v.producturl, simlink=v.simurl))
return data
@info_bp.route("/info/read_types_only", methods=["GET"])
def get_read_types_only():
read_type_names, _ = get_read_types()
names = get_read_type_names(read_type_names)
res = json.dumps(names)
return res, 200
def get_read_type_names(read_type_names):
res = dict(data=read_type_names)
return res
|
the-stack_0_21415 | import collections
import numpy as np
import itertools
class General:
def __init__(self, g_id, loyalty, post=None):
self.g_id = g_id
self.loyalty = loyalty
self.post = post
self.allMessagesReceived = []
self.previousRoundMessages = []
self.messages = []
def send_message(self, generalToSend, c_mes, round):
mStr = str(self.g_id) + "," + c_mes
if self.loyalty == "T":
ord = mStr[-1]
if round == 0:
if ord == "A":
new_ord = "R"
elif ord == "R":
new_ord = "A"
mStr = mStr.replace(ord, new_ord)
generalToSend.receive_message(mStr)
#print("General {} (loyalty {}) sent message {} to General {}".format(self.g_id, self.loyalty, mStr, generalToSend.g_id))
def cm_send_message(self, generalToSend, order):
mStr = str(self.g_id) + "," + order
generalToSend.receive_message(mStr)
def receive_message(self, messageStr):
self.messages.append(messageStr)
self.allMessagesReceived.append(messageStr)
# Example Inputs
NUM_OF_LEIUTENANTS = 6
COMMANDER_LOYALTY = "L"
loyalties = "LTLLTL"
'''
NUM_OF_LEIUTENANTS = 3
COMMANDER_LOYALTY = "L"
loyalties = "LLT"
'''
'''
NUM_OF_LEIUTENANTS = 9
COMMANDER_LOYALTY = "T"
loyalties = "LLLLTTLLL"
'''
traitorCount = (COMMANDER_LOYALTY + loyalties).count("T")
if traitorCount > (NUM_OF_LEIUTENANTS + 1)*2/3:
raise ValueError("Assumption Failure: Too many traitors!")
print(traitorCount)
# Initialize Commander
commander = General(0, COMMANDER_LOYALTY, "CM")
# Initialize Generals
generals = []
generals.append(commander)
for i in range(NUM_OF_LEIUTENANTS):
generals.append(General(i+1, loyalties[i], "LT"))
print("Initializing Generals: Starting State ->")
for g in generals:
print(g.__dict__)
print("Total Generals: ", NUM_OF_LEIUTENANTS+1, " | ","Commander Loyalty: ", generals[0].loyalty, " | Traitor Lieutenants: ", loyalties.count("T"))
#Oral Message Algorithm
print("Oral Message Algorithm Starting: Phase 1")
rounds = traitorCount
#total rounds is equal to number of traitors + 1, use <= if using while with rounds var
#Phase 1: Commander's Turn
#print(generals[0].__dict__)
#send initial set of messages including actual order
for gens in generals[1:]:
#print(gens.__dict__)
if commander.loyalty == "L":
#commander is loyal, send same message to all LTs
order = "A"
commander.cm_send_message(gens, order)
elif commander.loyalty == "T":
#commander is traitor, send different message to half
n = len(generals[1:])//2
halfgens1 = generals[1:n]
halfgens2 = generals[n:]
for g in halfgens1:
commander.cm_send_message(g, "A")
for g in halfgens2:
commander.cm_send_message(g, "R")
for g in generals:
g.previousRoundMessages += g.messages
g.messages = []
print(g.__dict__)
for r in range(rounds):
print("\n Round: ",r+1 , "\n \n")
for g in generals:
for m in g.previousRoundMessages:
#print("Message Sent: ", m)
for other_g in generals:
if g.post == "CM" or other_g.post == "CM": continue
if g.g_id == other_g.g_id: continue
if str(other_g.g_id) in m:
#print("Message: ", m, " | Id Found: ", other_g.g_id)
continue
g.send_message(other_g, m, r)
for g in generals:
g.previousRoundMessages = g.messages
g.messages = []
print("General {}: ".format(g.g_id))
print(g.__dict__, "\n")
# Format messages to match assignment
for g in generals[1:]:
res = [x[-3::-1] + x[-2:] for x in g.allMessagesReceived if len(x) >= 3]
g.allMessagesReceived = []
g.allMessagesReceived += res
#Phase 2: Consensus
print('Phase 2: Consensus \n')
# Message Grouping
for g in generals[1:]:
util_func = lambda x: x[0:5]
temp = sorted(g.allMessagesReceived, key = util_func)
res = [list(ele) for i, ele in itertools.groupby(temp, util_func)]
#print(g.allMessagesReceived)
#print(res)
consensusList = [] # 'A', 'R'
for r in res:
print(r)
rConsensusCommands = [s[-1] for s in r]
print(rConsensusCommands)
res_con = collections.Counter(rConsensusCommands)
aCons, rCons = res_con['A'], res_con['R']
#print("Consensus A: ", aCons, " Consensus R: ", rCons)
if aCons > rCons:
consensusList.append('A')
else:
consensusList.append('R')
superConsensus = collections.Counter(consensusList)
saCons, srCons = superConsensus['A'], superConsensus['R']
if saCons > srCons:
g.consensus = 'A'
else:
g.consensus = 'R'
#print(consensusList)
print("\n General {} Consensus: {}".format(g.g_id, g.consensus))
print("#########################")
'''
# Naive Consensus (takes consensus over all messages received)
for g in generals[1:]:
consensusCommands = [s[-1] for s in g.allMessagesReceived]
#print("General {} (Loyalty {}) Consensus Str: {}".format(g.g_id, g.loyalty, consensusCommands))
cons = collections.Counter(consensusCommands)
aCons, rCons = cons['A'], cons['R']
if aCons > rCons:
g.consensus = "A"
print("General {} (Loyalty {}) Consensus Reached. Command: {}".format(g.g_id, g.loyalty, "A"))
else:
g.consensus = "R"
print("General {} (Loyalty {}) Consensus Reached. Command: {}".format(g.g_id, g.loyalty, "R"))
'''
print("Final Consensus ->")
for g in generals[1:]:
print("General {} (Loyalty {}) Command: {}".format(g.g_id, g.loyalty, g.consensus))
|
the-stack_0_21420 | """
Module holds all stuff regarding Gatling tool usage
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import codecs
import json
import os
import re
import time
from collections import defaultdict
from distutils.version import LooseVersion
from bzt import TaurusConfigError, ToolError
from bzt.engine import ScenarioExecutor, Scenario, FileLister, HavingInstallableTools, SelfDiagnosable
from bzt.modules.aggregator import ConsolidatingAggregator, ResultsReader
from bzt.modules.console import WidgetProvider, ExecutorWidget
from bzt.requests_model import HTTPRequest
from bzt.six import string_types, numeric_types, PY2
from bzt.utils import TclLibrary, EXE_SUFFIX, dehumanize_time, get_full_path, FileReader, RESOURCES_DIR, BetterDict
from bzt.utils import simple_body_dict, CALL_PROBLEMS
from bzt.utils import unzip, RequiredTool, JavaVM, shutdown_process, ensure_is_dict, is_windows
def is_gatling2(ver):
return LooseVersion(ver) < LooseVersion("3")
class GatlingScriptBuilder(object):
def __init__(self, load, scenario, parent_logger, class_name, gatling_version=None):
super(GatlingScriptBuilder, self).__init__()
self.log = parent_logger.getChild(self.__class__.__name__)
self.load = load
self.feeder_names = {}
self.scenario = scenario
self.class_name = class_name
if gatling_version is None:
self.gatling_version = Gatling.VERSION
else:
self.gatling_version = gatling_version
# add prefix 'http://' if user forgot it
@staticmethod
def fixed_addr(addr):
if len(addr) > 0 and not addr.startswith('http'):
return 'http://' + addr
else:
return addr
@staticmethod
def indent(text, level):
return " " * level + text
def _get_http(self):
default_address = self.scenario.get("default-address", "")
http_str = '("%(addr)s")\n' % {'addr': self.fixed_addr(default_address)}
if self.scenario.get("retrieve-resources", False):
regex = self.scenario.get("retrieve-resources-regex")
params = 'BlackList(), WhiteList("""%s""")' % regex if regex else ""
http_str += self.indent(".inferHtmlResources(%s)\n" % params, level=2)
if not self.scenario.get('store-cache', True):
http_str += self.indent('.disableCaching\n', level=2)
scenario_headers = self.scenario.get_headers()
for key in scenario_headers:
http_str += self.indent('.header("%(key)s", "%(val)s")\n' % {'key': key, 'val': scenario_headers[key]},
level=2)
return http_str
def _get_exec(self):
exec_str = ''
for req in self.scenario.get_requests():
if not isinstance(req, HTTPRequest):
msg = "Gatling simulation generator doesn't support '%s' blocks, skipping"
self.log.warning(msg, req.NAME)
continue
if len(exec_str) > 0:
exec_str += '.'
default_address = self.scenario.get("default-address")
if default_address:
url = req.url
else:
url = self.fixed_addr(req.url)
exec_str += 'exec(\n'
exec_template = self.indent('http("%(req_label)s").%(method)s("%(url)s")\n', level=2)
exec_str += exec_template % {'req_label': req.label, 'method': req.method.lower(), 'url': url}
for key in req.headers:
exec_template = self.indent('.header("%(key)s", "%(val)s")\n', level=3)
exec_str += exec_template % {'key': key, 'val': req.headers[key]}
# todo: join with the same in get_sampler_pair
if isinstance(req.body, (dict, list, numeric_types)):
if req.get_header('content-type') == 'application/json' or isinstance(req.body, numeric_types):
req.body = json.dumps(req.body)
elif not simple_body_dict(req.body):
self.log.debug('Header "Content-Type: application/json" is required for body: "%s"', req.body)
req.body = json.dumps(req.body)
if isinstance(req.body, string_types):
exec_str += self.indent('.body(%(method)s("""%(body)s"""))\n', level=3)
exec_str = exec_str % {'method': 'StringBody', 'body': req.body}
elif isinstance(req.body, dict):
for key in sorted(req.body.keys()):
exec_str += self.indent('.formParam("%(key)s", "%(val)s")\n', level=3)
exec_str = exec_str % {'key': key, 'val': req.body[key]}
elif req.body is not None:
self.log.warning("Unknown body type: %s", req.body)
exec_str += self.__get_assertions(req.config.get('assert', []))
if not req.priority_option('follow-redirects', default=True):
exec_str += self.indent('.disableFollowRedirect\n', level=3)
exec_str += self.indent(')', level=1)
think_time = int(dehumanize_time(req.get_think_time()))
if think_time:
exec_str += '.pause(%(think_time)s)' % {'think_time': think_time}
return exec_str
@staticmethod
def __get_check_template(assertion):
a_not = assertion.get('not', False)
a_regexp = assertion.get('regexp', False)
a_subject = assertion.get('subject', Scenario.FIELD_BODY)
if a_subject == Scenario.FIELD_RESP_CODE:
if a_not:
res = 'status.not(%(sample)s)'
else:
res = 'status.is(%(sample)s)'
elif a_subject == Scenario.FIELD_HEADERS:
res = ''
else: # FIELD_BODY
if a_regexp:
res = 'regex("""%(sample)s""").'
else:
res = 'substring("""%(sample)s""").'
if a_not:
res += 'notExists'
else:
res += 'exists'
return res
def __get_assertions(self, assertions):
if len(assertions) == 0:
return ''
first_check = True
check_result = self.indent('.check(\n', level=3)
for idx, assertion in enumerate(assertions):
assertion = ensure_is_dict(assertions, idx, "contains")
error_str = 'You must specify "contains" parameter for assertion item'
a_contains = assertion.get('contains', TaurusConfigError(error_str))
check_template = self.__get_check_template(assertion)
if check_template == '': # FIELD_HEADERS
self.log.warning('Sorry, but "headers" subject is not implemented for gatling asserts')
return ''
if not isinstance(a_contains, list):
a_contains = [a_contains]
for sample in a_contains:
if not first_check:
check_result += ',\n'
check_result += self.indent(check_template % {'sample': sample}, level=4)
first_check = False
check_result += '\n' + self.indent(')', level=3) + '\n'
return check_result
def _get_feeder_name(self, source_filename):
base_feeder_name = ".".join(os.path.basename(source_filename).split(".")[:-1])
base_feeder_name = re.sub(r'[^A-Za-z0-9_]', '', base_feeder_name) + "Feed"
index = 0
feeder_name = base_feeder_name
while feeder_name in self.feeder_names and self.feeder_names[feeder_name] != source_filename:
index += 1
feeder_name = base_feeder_name + "_%s" % index
if feeder_name not in self.feeder_names:
self.feeder_names[feeder_name] = source_filename
return feeder_name
def _get_feeders(self):
feeders_def = ""
feeding = ""
for source in self.scenario.get_data_sources():
path = self.scenario.engine.find_file(source["path"])
delimiter = source.get('delimiter', None)
loop_over = source.get("loop", True)
var_name = self._get_feeder_name(path)
params = dict(varname=var_name, filename=path, delimiter=delimiter)
if delimiter is not None:
tpl = """val %(varname)s = separatedValues("%(filename)s", '%(delimiter)s')"""
else:
tpl = 'val %(varname)s = csv("%(filename)s")'
line = self.indent(tpl % params, level=1)
if loop_over:
line += '.circular'
feeders_def += line + '\n'
feeding += "feed(%s)." % var_name
if feeders_def:
feeders_def = '\n' + feeders_def
return feeders_def, feeding
def gen_test_case(self):
if is_gatling2(self.gatling_version):
version = 2
else:
version = 3
template_path = os.path.join(RESOURCES_DIR, "gatling", ("v%s_script.tpl" % version))
with open(template_path) as template_file:
template_line = template_file.read()
feeders_def, feeding = self._get_feeders()
params = {
'class_name': self.class_name,
'httpConf': self._get_http(),
'_exec': self._get_exec(),
'feeders': feeders_def,
'feeding': feeding,
}
return template_line % params
class GatlingExecutor(ScenarioExecutor, WidgetProvider, FileLister, HavingInstallableTools, SelfDiagnosable):
"""
Gatling executor module
"""
def __init__(self):
super(GatlingExecutor, self).__init__()
self.script = None
self.process = None
self.end_time = None
self.retcode = None
self.simulation_started = False
self.dir_prefix = "gatling-%s" % id(self)
self.tool = None
def get_cp_from_files(self):
jar_files = []
files = self.execution.get('files', [])
for candidate in files:
candidate = self.engine.find_file(candidate)
if os.path.isfile(candidate) and candidate.lower().endswith('.jar'):
jar_files.append(candidate)
elif os.path.isdir(candidate):
for element in os.listdir(candidate):
element = os.path.join(candidate, element)
if os.path.isfile(element) and element.lower().endswith('.jar'):
jar_files.append(element)
return jar_files
def get_additional_classpath(self):
cp = self.get_scenario().get("additional-classpath", [])
cp.extend(self.settings.get("additional-classpath", []))
return cp
def prepare(self):
super(GatlingExecutor, self).prepare()
self.install_required_tools()
scenario = self.get_scenario()
self.env.set({"GATLING_HOME": self.tool.tool_dir})
cpath = self.get_additional_classpath()
self.log.debug("Classpath for Gatling: %s", cpath)
for element in cpath:
self.env.add_path({"JAVA_CLASSPATH": element})
self.env.add_path({"COMPILATION_CLASSPATH": element})
new_name = self.engine.create_artifact('gatling-launcher', EXE_SUFFIX)
self.log.debug("Building Gatling launcher: %s", new_name)
self.tool.build_launcher(new_name)
self.script = self.get_script_path()
if not self.script:
if "requests" in scenario:
self.get_scenario()['simulation'], self.script = self.__generate_script()
else:
msg = "There must be a script file or requests for its generation "
msg += "to run Gatling tool (%s)" % self.execution.get('scenario')
raise TaurusConfigError(msg)
self.dir_prefix = self.settings.get("dir-prefix", self.dir_prefix)
self.stdout = open(self.engine.create_artifact("gatling", ".out"), "w")
self.stderr = open(self.engine.create_artifact("gatling", ".err"), "w")
self.reader = DataLogReader(self.engine.artifacts_dir, self.log, self.dir_prefix)
if isinstance(self.engine.aggregator, ConsolidatingAggregator):
self.engine.aggregator.add_underling(self.reader)
def __generate_script(self):
simulation = "TaurusSimulation_%s" % id(self)
file_name = self.engine.create_artifact(simulation, ".scala")
gen_script = GatlingScriptBuilder(self.get_load(), self.get_scenario(), self.log, simulation, self.tool.version)
with codecs.open(file_name, 'w', encoding='utf-8') as script:
script.write(gen_script.gen_test_case())
return simulation, file_name
def _get_simulation_props(self):
props = {}
if os.path.isfile(self.script):
if self.script.endswith('.jar'):
self.env.add_path({"JAVA_CLASSPATH": self.script})
self.env.add_path({"COMPILATION_CLASSPATH": self.script})
else:
props['gatling.core.directory.simulations'] = get_full_path(self.script, step_up=1)
else:
props['gatling.core.directory.simulations'] = self.script
simulation = self.get_scenario().get("simulation")
if simulation:
props['gatling.core.simulationClass'] = simulation
else:
props['gatling.core.runDescription'] = "Taurus_Test"
return props
def _get_load_props(self):
load = self.get_load()
props = {}
if load.concurrency:
props['concurrency'] = load.concurrency
if load.ramp_up is not None:
props['ramp-up'] = int(load.ramp_up)
if load.hold is not None:
props['hold-for'] = int(load.hold)
if load.iterations:
props['iterations'] = int(load.iterations)
if load.throughput:
if load.duration:
props['throughput'] = load.throughput
else:
self.log.warning("You should set up 'ramp-up' and/or 'hold-for' for usage of 'throughput'")
return props
def _get_scenario_props(self):
props = {}
scenario = self.get_scenario()
timeout = scenario.get('timeout', None)
if timeout is not None:
props['gatling.http.ahc.requestTimeout'] = int(dehumanize_time(timeout) * 1000)
if scenario.get('keepalive', True):
# gatling <= 2.2.0
props['gatling.http.ahc.allowPoolingConnections'] = 'true'
props['gatling.http.ahc.allowPoolingSslConnections'] = 'true'
# gatling > 2.2.0
props['gatling.http.ahc.keepAlive'] = 'true'
else:
# gatling <= 2.2.0
props['gatling.http.ahc.allowPoolingConnections'] = 'false'
props['gatling.http.ahc.allowPoolingSslConnections'] = 'false'
# gatling > 2.2.0
props['gatling.http.ahc.keepAlive'] = 'false'
return props
def _set_env(self):
props = BetterDict()
props.merge(self.settings.get('properties'))
props.merge(self.get_scenario().get("properties"))
props['gatling.core.outputDirectoryBaseName'] = self.dir_prefix
props['gatling.core.directory.resources'] = self.engine.artifacts_dir
props['gatling.core.directory.results'] = self.engine.artifacts_dir
props.merge(self._get_simulation_props())
props.merge(self._get_load_props())
props.merge(self._get_scenario_props())
for key in sorted(props.keys()):
prop = props[key]
val_tpl = "%s"
if isinstance(prop, string_types):
if not is_windows(): # extend properties support (contained separators/quotes/etc.) on lin/mac
val_tpl = "%r"
if PY2:
prop = prop.encode("utf-8", 'ignore') # to convert from unicode into str
self.env.add_java_param({"JAVA_OPTS": ("-D%s=" + val_tpl) % (key, prop)})
self.env.set({"NO_PAUSE": "TRUE"})
self.env.add_java_param({"JAVA_OPTS": self.settings.get("java-opts", None)})
self.log.debug('JAVA_OPTS: "%s"', self.env.get("JAVA_OPTS"))
def startup(self):
self._set_env()
self.process = self._execute(self._get_cmdline(), pgrp=False)
def _get_cmdline(self):
cmdline = [self.tool.tool_path]
if is_gatling2(self.tool.version):
cmdline += ["-m"] # default for 3.0.0
return cmdline
def check(self):
"""
Checks if tool is still running. Also checks if resulting logs contains
any data and throws exception otherwise.
:return: bool
:raise TaurusConfigError:
:raise TaurusToolError:
"""
self.retcode = self.process.poll()
# detect interactive mode and raise exception if it found
if not self.simulation_started:
wrong_line = "Choose a simulation number:"
with open(self.stdout.name) as out:
file_header = out.read(1024)
if wrong_line in file_header: # gatling can't select test scenario
scenarios = file_header[file_header.find(wrong_line) + len(wrong_line):].rstrip()
msg = 'Several gatling simulations are found, you must '
msg += 'specify one of them to use in "simulation" option: %s' % scenarios
raise TaurusConfigError(msg)
if 'started...' in file_header:
self.simulation_started = True
if self.retcode is None:
return False
elif self.retcode == 0:
return True
else:
raise ToolError("Gatling tool exited with non-zero code: %s" % self.retcode, self.get_error_diagnostics())
def shutdown(self):
"""
If tool is still running - let's stop it.
"""
shutdown_process(self.process, self.log)
if self.start_time:
self.end_time = time.time()
self.log.debug("Gatling worked for %s seconds", self.end_time - self.start_time)
def post_process(self):
"""
Save data log as artifact
"""
if self.reader and self.reader.file and self.reader.file.name:
self.engine.existing_artifact(self.reader.file.name)
super(GatlingExecutor, self).post_process()
def install_required_tools(self):
self.tool = self._get_tool(Gatling, config=self.settings)
java = self._get_tool(JavaVM)
required_tools = [self._get_tool(TclLibrary), java, self.tool]
for tool in required_tools:
if not tool.check_if_installed():
tool.install()
# old gatling compiler (zinc) is incompatible with new jre
new_java = java.version and int(java.version) > 8
if is_gatling2(self.tool.version) and new_java:
self.log.warning('Gatling v%s is incompatible with Java %s', self.tool.version, java.version)
def get_widget(self):
if not self.widget:
simulation = self.get_scenario().get('simulation', None)
if simulation == "TaurusSimulation_%s" % id(self):
simulation = 'generated script'
if simulation is None:
simulation = os.path.basename(self.script)
self.widget = ExecutorWidget(self, 'Gatling: %s' % simulation)
return self.widget
def resource_files(self):
files = []
script = self.get_script_path()
if script:
files.append(script)
else:
for source in self.get_scenario().get_data_sources():
source_path = self.engine.find_file(source["path"])
files.append(source_path)
files.extend(self.get_additional_classpath())
return files
def get_error_diagnostics(self):
diagnostics = []
if self.stdout is not None:
with open(self.stdout.name) as fds:
contents = fds.read().strip()
if contents.strip():
diagnostics.append("Gatling STDOUT:\n" + contents)
if self.stderr is not None:
with open(self.stderr.name) as fds:
contents = fds.read().strip()
if contents.strip():
diagnostics.append("Gatling STDERR:\n" + contents)
if self.reader and self.reader.file and self.reader.file.name:
with open(self.reader.file.name) as fds:
contents = fds.read().strip()
if contents.strip():
diagnostics.append("Simulation log:\n" + contents)
return diagnostics
class DataLogReader(ResultsReader):
""" Class to read KPI from data log """
def __init__(self, basedir, parent_logger, dir_prefix):
super(DataLogReader, self).__init__()
self.concurrency = 0
self.log = parent_logger.getChild(self.__class__.__name__)
self.basedir = basedir
self.file = FileReader(file_opener=self.open_fds, parent_logger=self.log)
self.partial_buffer = ""
self.delimiter = "\t"
self.dir_prefix = dir_prefix
self.guessed_gatling_version = None
self._group_errors = defaultdict(lambda: defaultdict(set))
def _extract_log_gatling_21(self, fields):
"""
Extract stats from Gatling 2.1 format.
:param fields:
:return:
"""
# $scenario $userId ${RequestRecordHeader.value}
# ${serializeGroups(groupHierarchy)} $name
# 5requestStartDate 6requestEndDate
# 7responseStartDate 8responseEndDate
# 9status
# ${serializeMessage(message)}${serializeExtraInfo(extraInfo)}$Eol"
if fields[2].strip() == "USER":
if fields[3].strip() == "START":
self.concurrency += 1
elif fields[3].strip() == "END":
self.concurrency -= 1
if fields[2].strip() != "REQUEST":
return None
label = fields[4]
t_stamp = int(fields[8]) / 1000.0
r_time = (int(fields[8]) - int(fields[5])) / 1000.0
latency = (int(fields[7]) - int(fields[6])) / 1000.0
con_time = (int(fields[6]) - int(fields[5])) / 1000.0
if fields[-1] == 'OK':
r_code = '200'
else:
_tmp_rc = fields[-1].split(" ")[-1]
r_code = _tmp_rc if _tmp_rc.isdigit() else 'No RC'
if len(fields) >= 11 and fields[10]:
error = fields[10]
else:
error = None
return int(t_stamp), label, r_time, con_time, latency, r_code, error
def _extract_log_gatling_22(self, fields):
"""
Extract stats from Gatling 2.2 format
:param fields:
:return:
"""
# 0 ${RequestRecordHeader.value}
# 1 $scenario
# 2 $userId
# 3 ${serializeGroups(groupHierarchy)}
# 4 $label
# 5 $startTimestamp
# 6 $endTimestamp
# 7 $status
# [8] ${serializeMessage(message)}${serializeExtraInfo(extraInfo)}
if fields[0].strip() == "USER":
user_id = fields[2]
if fields[3].strip() == "START":
self.concurrency += 1
self._group_errors[user_id].clear()
elif fields[3].strip() == "END":
self.concurrency -= 1
self._group_errors.pop(user_id)
if fields[0].strip() == "GROUP":
return self.__parse_group(fields)
elif fields[0].strip() == "REQUEST":
del fields[0]
if self.guessed_gatling_version != "3.X":
del fields[0]
return self.__parse_request(fields)
else:
return None
def __parse_group(self, fields):
latency = 0.0
con_time = 0.0
if len(fields) < 4:
label = ""
t_stamp = int(fields[2]) / 1000.0
r_time = 0
error = fields[1]
r_code = "N/A"
else:
if self.guessed_gatling_version != "3.X":
del fields[1]
user_id = fields[1]
label = fields[2]
if ',' in label:
return None # skip nested groups for now
t_stamp = int(fields[4]) / 1000.0
r_time = int(fields[5]) / 1000.0
if label in self._group_errors[user_id]:
error = ';'.join(self._group_errors[user_id].pop(label))
else:
error = None
if fields[6] == 'OK':
r_code = '200'
else:
r_code = self.__rc_from_msg(fields[-1])
assert error, label
return int(t_stamp), label, r_time, con_time, latency, r_code, error
def __parse_request(self, fields):
# see LogFileDataWriter.ResponseMessageSerializer in gatling-core
if len(fields) >= 7 and fields[6]:
error = fields[6]
else:
error = None
req_hierarchy = fields[1].split(',')[0]
if req_hierarchy:
user_id = fields[0]
if error:
self._group_errors[user_id][req_hierarchy].add(error)
return None
label = fields[2]
t_stamp = int(fields[4]) / 1000.0
r_time = (int(fields[4]) - int(fields[3])) / 1000.0
latency = 0.0
con_time = 0.0
if fields[5] == 'OK':
r_code = '200'
else:
r_code = self.__rc_from_msg(fields[-1])
return int(t_stamp), label, r_time, con_time, latency, r_code, error
def __rc_from_msg(self, msg):
_tmp_rc = msg.split("but actually ")[-1] # gatling-core/src/main/scala/io/gatling/core/check/Validator.scala
if _tmp_rc.startswith("unexpectedly "):
_tmp_rc = _tmp_rc[len("unexpectedly "):]
if _tmp_rc.startswith("found "):
_tmp_rc = _tmp_rc[len("found "):]
parts = _tmp_rc.split(' ')
if len(parts) > 1 and parts[1] == 'is':
_tmp_rc = parts[0]
return _tmp_rc if _tmp_rc.isdigit() else 'N/A'
def _guess_gatling_version(self, fields):
if fields and fields[-1].strip().startswith("3"):
return "3.X"
elif fields[0].strip() in ["USER", "REQUEST", "RUN"]:
self.log.debug("Parsing Gatling 2.2+ stats")
return "2.2+"
elif len(fields) >= 3 and fields[2].strip() in ["USER", "REQUEST", "RUN"]:
self.log.debug("Parsing Gatling 2.1 stats")
return "2.1"
else:
return None
def _extract_log_data(self, fields):
if self.guessed_gatling_version is None:
self.guessed_gatling_version = self._guess_gatling_version(fields)
if self.guessed_gatling_version == "2.1":
return self._extract_log_gatling_21(fields)
elif self.guessed_gatling_version in ["2.2+", "3.X"]:
return self._extract_log_gatling_22(fields)
else:
return None
def _read(self, last_pass=False):
"""
Generator method that returns next portion of data
:param last_pass:
"""
lines = self.file.get_lines(size=1024 * 1024, last_pass=last_pass)
for line in lines:
if not line.endswith("\n"):
self.partial_buffer += line
continue
line = "%s%s" % (self.partial_buffer, line)
self.partial_buffer = ""
line = line.strip()
fields = line.split(self.delimiter)
data = self._extract_log_data(fields)
if data is None:
continue
t_stamp, label, r_time, con_time, latency, r_code, error = data
bytes_count = None
yield t_stamp, label, self.concurrency, r_time, con_time, latency, r_code, error, '', bytes_count
def open_fds(self, filename):
"""
open gatling simulation.log
"""
if os.path.isdir(self.basedir):
prog = re.compile("^%s-[0-9]+$" % self.dir_prefix)
for fname in os.listdir(self.basedir):
if prog.match(fname):
filename = os.path.join(self.basedir, fname, "simulation.log")
break
if not filename or not os.path.isfile(filename):
self.log.debug('simulation.log not found')
return
elif os.path.isfile(self.basedir):
filename = self.basedir
else:
self.log.debug('Path not found: %s', self.basedir)
return
if not os.path.getsize(filename):
self.log.debug('simulation.log is empty')
else:
return open(filename, 'rb')
class Gatling(RequiredTool):
"""
Gatling tool
"""
DOWNLOAD_LINK = "https://repo1.maven.org/maven2/io/gatling/highcharts/gatling-charts-highcharts-bundle" \
"/{version}/gatling-charts-highcharts-bundle-{version}-bundle.zip"
VERSION = "3.1.2"
LOCAL_PATH = "~/.bzt/gatling-taurus/{version}/bin/gatling{suffix}"
def __init__(self, config=None, **kwargs):
settings = config or {}
version = settings.get("version", self.VERSION)
def_path = self.LOCAL_PATH.format(version=version, suffix=EXE_SUFFIX)
gatling_path = get_full_path(settings.get("path", def_path))
download_link = settings.get("download-link", self.DOWNLOAD_LINK).format(version=version)
super(Gatling, self).__init__(tool_path=gatling_path, download_link=download_link, version=version, **kwargs)
self.tool_dir = get_full_path(self.tool_path, step_up=2)
def check_if_installed(self):
self.log.debug("Trying Gatling...")
try:
out, err = self.call([self.tool_path, '--help'])
self.log.debug("Gatling check output: %s", out)
except CALL_PROBLEMS as exc:
self.log.info("Gatling check failed: %s", exc)
return False
if err:
self.log.warning("Gatling check stderr: %s", err)
return True
def install(self):
dest = get_full_path(self.tool_path, step_up=2)
self.log.info("Will install %s into %s", self.tool_name, dest)
gatling_dist = self._download(use_link=True)
self.log.info("Unzipping %s", gatling_dist)
unzip(gatling_dist, dest, 'gatling-charts-highcharts-bundle-' + self.version)
os.remove(gatling_dist)
os.chmod(get_full_path(self.tool_path), 0o755)
self.log.info("Installed Gatling successfully")
if not self.check_if_installed():
raise ToolError("Unable to run %s after installation!" % self.tool_name)
def build_launcher(self, new_name): # legacy, for v2 only
def convert_v2():
modified_lines = []
mod_success = False
with open(self.tool_path) as fds:
for line in fds.readlines():
if is_windows():
if line.startswith('set COMPILATION_CLASSPATH=""'):
mod_success = True
continue # don't add it to modified_lines - just remove
else:
if line.startswith('COMPILATION_CLASSPATH='):
mod_success = True
line = line.rstrip() + ':"${COMPILATION_CLASSPATH}"\n' # add from env
elif line.startswith('"$JAVA"'):
line = 'eval ' + line
modified_lines.append(line)
if not mod_success:
raise ToolError("Can't modify gatling launcher for jar usage, ability isn't supported")
return modified_lines
def convert_v3():
modified_lines = []
mod_success = False
with open(self.tool_path) as fds:
for line in fds.readlines():
if is_windows():
if line.startswith('set COMPILER_CLASSPATH='):
mod_success = True
line = line.rstrip() + ';%COMPILATION_CLASSPATH%\n' # add from env
elif line.startswith('set GATLING_CLASSPATH='):
mod_success = True
line = line.rstrip() + ';%JAVA_CLASSPATH%\n' # add from env
else:
if line.startswith('COMPILER_CLASSPATH='):
mod_success = True
line = line.rstrip()[:-1] + '${COMPILATION_CLASSPATH}"\n' # add from env
elif line.startswith('GATLING_CLASSPATH='):
mod_success = True
line = line.rstrip()[:-1] + '${JAVA_CLASSPATH}"\n' # add from env
elif line.startswith('"$JAVA"'):
line = 'eval ' + line
modified_lines.append(line)
if not mod_success:
raise ToolError("Can't modify gatling launcher for jar usage, ability isn't supported")
return modified_lines
if is_gatling2(self.version):
converted_lines = convert_v2()
else:
converted_lines = convert_v3()
self.tool_path = new_name
with open(self.tool_path, 'w') as modified:
modified.writelines(converted_lines)
if not is_windows():
os.chmod(self.tool_path, 0o755)
|
the-stack_0_21421 | # mypy: allow_untyped_decorators
import collections
import os.path
import pathlib
import subprocess
import typing
import attr
import click
import pendulum
import plotman.job
import plotman.plotters
@attr.frozen
class Options:
executable: str = "chia_plot"
n_threads: int = 4
n_buckets: int = 256
n_buckets3: int = 256
n_rmulti2: int = 1
def check_configuration(
options: Options, pool_contract_address: typing.Optional[str]
) -> None:
if pool_contract_address is not None:
completed_process = subprocess.run(
args=[options.executable, "--help"],
capture_output=True,
check=True,
encoding="utf-8",
)
if "--contract" not in completed_process.stdout:
raise Exception(
f"found madMAx version does not support the `--contract`"
f" option for pools."
)
def create_command_line(
options: Options,
tmpdir: str,
tmp2dir: typing.Optional[str],
dstdir: str,
farmer_public_key: typing.Optional[str],
pool_public_key: typing.Optional[str],
pool_contract_address: typing.Optional[str],
) -> typing.List[str]:
args = [
options.executable,
"-n",
str(1),
"-r",
str(options.n_threads),
"-u",
str(options.n_buckets),
"-t",
tmpdir if tmpdir.endswith("/") else (tmpdir + "/"),
"-d",
dstdir if dstdir.endswith("/") else (dstdir + "/"),
]
if tmp2dir is not None:
args.append("-2")
args.append(tmp2dir if tmp2dir.endswith("/") else (tmp2dir + "/"))
if options.n_buckets3 is not None:
args.append("-v")
args.append(str(options.n_buckets3))
if options.n_rmulti2 is not None:
args.append("-K")
args.append(str(options.n_rmulti2))
if farmer_public_key is not None:
args.append("-f")
args.append(farmer_public_key)
if pool_public_key is not None:
args.append("-p")
args.append(pool_public_key)
if pool_contract_address is not None:
args.append("-c")
args.append(pool_contract_address)
return args
# @plotman.plotters.ProtocolChecker[plotman.plotters.SpecificInfo]()
@plotman.plotters.check_SpecificInfo
@attr.frozen
class SpecificInfo:
process_id: typing.Optional[int] = None
phase: plotman.job.Phase = plotman.job.Phase(known=False)
started_at: typing.Optional[pendulum.DateTime] = None
plot_id: str = ""
p1_buckets: int = 0
p34_buckets: int = 0
threads: int = 0
# buffer: int = 0
plot_size: int = 0
tmp_dir: str = ""
tmp2_dir: str = ""
dst_dir: str = ""
phase1_duration_raw: float = 0
phase2_duration_raw: float = 0
phase3_duration_raw: float = 0
phase4_duration_raw: float = 0
total_time_raw: float = 0
# copy_time_raw: float = 0
filename: str = ""
plot_name: str = ""
def common(self) -> plotman.plotters.CommonInfo:
return plotman.plotters.CommonInfo(
type="madmax",
dstdir=self.dst_dir,
phase=self.phase,
tmpdir=self.tmp_dir,
tmp2dir=self.tmp2_dir,
started_at=self.started_at,
plot_id=self.plot_id,
plot_size=self.plot_size,
# TODO: handle p34_buckets as well somehow
buckets=self.p1_buckets,
threads=self.threads,
phase1_duration_raw=self.phase1_duration_raw,
phase2_duration_raw=self.phase2_duration_raw,
phase3_duration_raw=self.phase3_duration_raw,
phase4_duration_raw=self.phase4_duration_raw,
total_time_raw=self.total_time_raw,
filename=self.filename,
)
@plotman.plotters.check_Plotter
@attr.mutable
class Plotter:
decoder: plotman.plotters.LineDecoder = attr.ib(
factory=plotman.plotters.LineDecoder
)
info: SpecificInfo = attr.ib(factory=SpecificInfo)
parsed_command_line: typing.Optional[
plotman.job.ParsedChiaPlotsCreateCommand
] = None
@classmethod
def identify_log(cls, line: str) -> bool:
return "Multi-threaded pipelined Chia" in line
@classmethod
def identify_process(cls, command_line: typing.List[str]) -> bool:
if len(command_line) == 0:
return False
return "chia_plot" == os.path.basename(command_line[0]).lower()
def common_info(self) -> plotman.plotters.CommonInfo:
return self.info.common()
def parse_command_line(self, command_line: typing.List[str], cwd: str) -> None:
# drop the chia_plot
arguments = command_line[1:]
# TODO: We could at some point do chia version detection and pick the
# associated command. For now we'll just use the latest one we have
# copied.
command = commands.latest_command()
self.parsed_command_line = plotman.plotters.parse_command_line_with_click(
command=command,
arguments=arguments,
)
for key in ["tmpdir", "tmpdir2", "finaldir"]:
original: os.PathLike[str] = self.parsed_command_line.parameters.get(key) # type: ignore[assignment]
if original is not None:
self.parsed_command_line.parameters[key] = pathlib.Path(cwd).joinpath(
original
)
def update(self, chunk: bytes) -> SpecificInfo:
new_lines = self.decoder.update(chunk=chunk)
for line in new_lines:
if not self.info.phase.known:
self.info = attr.evolve(
self.info, phase=plotman.job.Phase(major=0, minor=0)
)
for pattern, handler_functions in handlers.mapping.items():
match = pattern.search(line)
if match is None:
continue
for handler_function in handler_functions:
self.info = handler_function(match=match, info=self.info)
break
return self.info
handlers = plotman.plotters.RegexLineHandlers[SpecificInfo]()
@handlers.register(expression=r"^\[P1\] Table ([1-6])")
def phase_1(match: typing.Match[str], info: SpecificInfo) -> SpecificInfo:
# [P1] Table 1 took 39.8662 sec
# [P1] Table 2 took 211.248 sec, found 4294987039 matches
# [P1] Table 3 took 295.536 sec, found 4295003219 matches
# [P1] Table 4 took 360.731 sec, found 4295083991 matches
# [P1] Table 5 took 346.816 sec, found 4295198226 matches
# [P1] Table 6 took 337.844 sec, found 4295283897 matches
minor = int(match.group(1)) + 1
return attr.evolve(info, phase=plotman.job.Phase(major=1, minor=minor))
@handlers.register(expression=r"^\[P2\] max_table_size")
def phase_2_start(match: typing.Match[str], info: SpecificInfo) -> SpecificInfo:
# [P2] max_table_size = 4295422716
return attr.evolve(info, phase=plotman.job.Phase(major=2, minor=1))
@handlers.register(expression=r"^\[P2\] Table ([2-7]) rewrite")
def phase_2(match: typing.Match[str], info: SpecificInfo) -> SpecificInfo:
# [P2] Table 7 scan took 18.4493 sec
# [P2] Table 7 rewrite took 60.7659 sec, dropped 0 entries (0 %)
# [P2] Table 6 scan took 82.9818 sec
# [P2] Table 6 rewrite took 142.287 sec, dropped 581464719 entries (13.5373 %)
# [P2] Table 5 scan took 122.71 sec
# [P2] Table 5 rewrite took 205.382 sec, dropped 762140364 entries (17.744 %)
# [P2] Table 4 scan took 119.723 sec
# [P2] Table 4 rewrite took 131.374 sec, dropped 828922032 entries (19.2993 %)
# [P2] Table 3 scan took 87.8078 sec
# [P2] Table 3 rewrite took 135.269 sec, dropped 855096923 entries (19.9091 %)
# [P2] Table 2 scan took 103.825 sec
# [P2] Table 2 rewrite took 159.486 sec, dropped 865588810 entries (20.1532 %)
minor_in_log = int(match.group(1))
active_minor = 8 - minor_in_log + 1
return attr.evolve(info, phase=plotman.job.Phase(major=2, minor=active_minor))
@handlers.register(expression=r"^Phase 2 took (\d+(\.\d+)) sec")
def phase3_0(match: typing.Match[str], info: SpecificInfo) -> SpecificInfo:
# Phase 2 took 1344.24 sec
return attr.evolve(
info,
phase=plotman.job.Phase(major=3, minor=0),
phase2_duration_raw=float(match.group(1)),
)
@handlers.register(expression=r"^Wrote plot header")
def phase_3_start(match: typing.Match[str], info: SpecificInfo) -> SpecificInfo:
# Wrote plot header with 252 bytes
return attr.evolve(info, phase=plotman.job.Phase(major=3, minor=1))
@handlers.register(expression=r"^\[P3-2\] Table ([2-6]) took")
def phase_3(match: typing.Match[str], info: SpecificInfo) -> SpecificInfo:
# [P3-1] Table 2 took 80.1436 sec, wrote 3429403335 right entries
# [P3-2] Table 2 took 69.0526 sec, wrote 3429403335 left entries, 3429403335 final
# [P3-1] Table 3 took 104.477 sec, wrote 3439906296 right entries
# [P3-2] Table 3 took 69.8111 sec, wrote 3439906296 left entries, 3439906296 final
# [P3-1] Table 4 took 111.704 sec, wrote 3466161959 right entries
# [P3-2] Table 4 took 68.1434 sec, wrote 3466161959 left entries, 3466161959 final
# [P3-1] Table 5 took 106.097 sec, wrote 3533057862 right entries
# [P3-2] Table 5 took 69.3742 sec, wrote 3533057862 left entries, 3533057862 final
# [P3-1] Table 6 took 105.378 sec, wrote 3713819178 right entries
# [P3-2] Table 6 took 60.371 sec, wrote 3713819178 left entries, 3713819178 final
minor = int(match.group(1))
return attr.evolve(info, phase=plotman.job.Phase(major=3, minor=minor))
@handlers.register(expression=r"^Phase 3 took (\d+(\.\d+)) sec")
def phase4(match: typing.Match[str], info: SpecificInfo) -> SpecificInfo:
# Phase 3 took 1002.89 sec, wrote 21877315926 entries to final plot
return attr.evolve(
info,
phase=plotman.job.Phase(major=4, minor=0),
phase3_duration_raw=float(match.group(1)),
)
@handlers.register(expression=r"^\[P4\] Starting")
def phase_4_1(match: typing.Match[str], info: SpecificInfo) -> SpecificInfo:
# [P4] Starting to write C1 and C3 tables
return attr.evolve(info, phase=plotman.job.Phase(major=4, minor=1))
@handlers.register(expression=r"^\[P4\] Writing C2 table")
def phase_4_2(match: typing.Match[str], info: SpecificInfo) -> SpecificInfo:
# [P4] Writing C2 table
return attr.evolve(info, phase=plotman.job.Phase(major=4, minor=2))
@handlers.register(expression=r"^Phase 4 took (\d+(\.\d+)) sec")
def phase5(match: typing.Match[str], info: SpecificInfo) -> SpecificInfo:
# Phase 4 took 77.9891 sec, final plot size is 108836186159 bytes
return attr.evolve(
info,
phase=plotman.job.Phase(major=5, minor=0),
phase4_duration_raw=float(match.group(1)),
)
@handlers.register(expression=r"^Started copy to ")
def phase_5_1(match: typing.Match[str], info: SpecificInfo) -> SpecificInfo:
# Started copy to /farm/yards/902/fake_dst/plot-k32-2021-07-14-21-56-522acbd6308af7e229281352f746449134126482cfabd51d38e0f89745d21698.plot
return attr.evolve(info, phase=plotman.job.Phase(major=5, minor=1))
@handlers.register(expression=r"^Renamed final plot to ")
def phase_5_2(match: typing.Match[str], info: SpecificInfo) -> SpecificInfo:
# Renamed final plot to /farm/yards/902/fake_dst/plot-k32-2021-07-14-21-56-522acbd6308af7e229281352f746449134126482cfabd51d38e0f89745d21698.plot
return attr.evolve(info, phase=plotman.job.Phase(major=5, minor=2))
@handlers.register(expression=r"^Final Directory:\s*(.+)")
def dst_dir(match: typing.Match[str], info: SpecificInfo) -> SpecificInfo:
# Final Directory: /farm/yards/907/
return attr.evolve(info, dst_dir=match.group(1))
@handlers.register(expression=r"^Working Directory:\s*(.+)")
def tmp_dir(match: typing.Match[str], info: SpecificInfo) -> SpecificInfo:
# Working Directory: /farm/yards/907/
return attr.evolve(info, tmp_dir=match.group(1))
@handlers.register(expression=r"^Working Directory 2:\s*(.+)")
def tmp2_dir(match: typing.Match[str], info: SpecificInfo) -> SpecificInfo:
# Working Directory 2: /farm/yards/907/
return attr.evolve(info, tmp2_dir=match.group(1))
@handlers.register(
expression=r"^Plot Name: (?P<name>plot-k(?P<size>\d+)-(?P<year>\d+)-(?P<month>\d+)-(?P<day>\d+)-(?P<hour>\d+)-(?P<minute>\d+)-(?P<plot_id>\w+))$"
)
def plot_name_line(match: typing.Match[str], info: SpecificInfo) -> SpecificInfo:
# Plot Name: plot-k32-2021-07-11-16-52-3a3872f5a124497a17fb917dfe027802aa1867f8b0a8cbac558ed12aa5b697b2
return attr.evolve(
info,
plot_size=int(match.group("size")),
plot_name=match.group("name"),
started_at=pendulum.datetime(
year=int(match.group("year")),
month=int(match.group("month")),
day=int(match.group("day")),
hour=int(match.group("hour")),
minute=int(match.group("minute")),
tz=None,
),
plot_id=match.group("plot_id"),
phase=plotman.job.Phase(major=1, minor=1),
)
@handlers.register(expression=r"^Number of Threads:\s*(\d+)")
def threads(match: typing.Match[str], info: SpecificInfo) -> SpecificInfo:
# Number of Threads: 9
return attr.evolve(info, threads=int(match.group(1)))
@handlers.register(expression=r"^Number of Buckets P1:.*\((\d+)\)")
def p1_buckets(match: typing.Match[str], info: SpecificInfo) -> SpecificInfo:
# Number of Buckets P1: 2^8 (256)
return attr.evolve(info, p1_buckets=int(match.group(1)))
@handlers.register(expression=r"^Number of Buckets P3\+P4:.*\((\d+)\)")
def p34_buckets(match: typing.Match[str], info: SpecificInfo) -> SpecificInfo:
# Number of Buckets P3+P4: 2^8 (256)
return attr.evolve(info, p34_buckets=int(match.group(1)))
@handlers.register(expression=r"^Phase 1 took (\d+(\.\d+)) sec")
def phase1_duration_raw(match: typing.Match[str], info: SpecificInfo) -> SpecificInfo:
# Phase 1 took 1851.12 sec
return attr.evolve(info, phase1_duration_raw=float(match.group(1)))
@handlers.register(expression=r"^Total plot creation time was (\d+(\.\d+)) sec")
def total_time(match: typing.Match[str], info: SpecificInfo) -> SpecificInfo:
# Total plot creation time was 4276.32 sec (71.272 min)
return attr.evolve(info, total_time_raw=float(match.group(1)))
commands = plotman.plotters.core.Commands()
# Madmax Git on 2021-06-19 -> https://github.com/madMAx43v3r/chia-plotter/commit/c8121b987186c42c895b49818e6c13acecc51332
@commands.register(version=(0,))
@click.command()
# https://github.com/madMAx43v3r/chia-plotter/blob/c8121b987186c42c895b49818e6c13acecc51332/LICENSE
# https://github.com/madMAx43v3r/chia-plotter/blob/c8121b987186c42c895b49818e6c13acecc51332/src/chia_plot.cpp#L177-L188
@click.option(
"-n",
"--count",
help="Number of plots to create (default = 1, -1 = infinite)",
type=int,
default=1,
show_default=True,
)
@click.option(
"-r",
"--threads",
help="Number of threads (default = 4)",
type=int,
default=4,
show_default=True,
)
@click.option(
"-u",
"--buckets",
help="Number of buckets (default = 256)",
type=int,
default=256,
show_default=True,
)
@click.option(
"-v",
"--buckets3",
help="Number of buckets for phase 3+4 (default = buckets)",
type=int,
default=256,
)
@click.option(
"-t",
"--tmpdir",
help="Temporary directory, needs ~220 GiB (default = $PWD)",
type=click.Path(),
default=pathlib.Path("."),
show_default=True,
)
@click.option(
"-2",
"--tmpdir2",
help="Temporary directory 2, needs ~110 GiB [RAM] (default = <tmpdir>)",
type=click.Path(),
default=None,
)
@click.option(
"-d",
"--finaldir",
help="Final directory (default = <tmpdir>)",
type=click.Path(),
default=pathlib.Path("."),
show_default=True,
)
@click.option(
"-p", "--poolkey", help="Pool Public Key (48 bytes)", type=str, default=None
)
@click.option(
"-f", "--farmerkey", help="Farmer Public Key (48 bytes)", type=str, default=None
)
@click.option(
"-G", "--tmptoggle", help="Alternate tmpdir/tmpdir2", type=str, default=None
)
def _cli_c8121b987186c42c895b49818e6c13acecc51332() -> None:
pass
# Madmax Git on 2021-07-12 -> https://github.com/madMAx43v3r/chia-plotter/commit/974d6e5f1440f68c48492122ca33828a98864dfc
@commands.register(version=(1,))
@click.command()
# https://github.com/madMAx43v3r/chia-plotter/blob/974d6e5f1440f68c48492122ca33828a98864dfc/LICENSE
# https://github.com/madMAx43v3r/chia-plotter/blob/974d6e5f1440f68c48492122ca33828a98864dfc/src/chia_plot.cpp#L235-L249
@click.option(
"-n",
"--count",
help="Number of plots to create (default = 1, -1 = infinite)",
type=int,
default=1,
show_default=True,
)
@click.option(
"-r",
"--threads",
help="Number of threads (default = 4)",
type=int,
default=4,
show_default=True,
)
@click.option(
"-u",
"--buckets",
help="Number of buckets (default = 256)",
type=int,
default=256,
show_default=True,
)
@click.option(
"-v",
"--buckets3",
help="Number of buckets for phase 3+4 (default = buckets)",
type=int,
default=256,
)
@click.option(
"-t",
"--tmpdir",
help="Temporary directory, needs ~220 GiB (default = $PWD)",
type=click.Path(),
default=pathlib.Path("."),
show_default=True,
)
@click.option(
"-2",
"--tmpdir2",
help="Temporary directory 2, needs ~110 GiB [RAM] (default = <tmpdir>)",
type=click.Path(),
default=None,
)
@click.option(
"-d",
"--finaldir",
help="Final directory (default = <tmpdir>)",
type=click.Path(),
default=pathlib.Path("."),
show_default=True,
)
@click.option(
"-w",
"--waitforcopy",
help="Wait for copy to start next plot",
type=bool,
default=False,
show_default=True,
)
@click.option(
"-p", "--poolkey", help="Pool Public Key (48 bytes)", type=str, default=None
)
@click.option(
"-c", "--contract", help="Pool Contract Address (62 chars)", type=str, default=None
)
@click.option(
"-f", "--farmerkey", help="Farmer Public Key (48 bytes)", type=str, default=None
)
@click.option(
"-G", "--tmptoggle", help="Alternate tmpdir/tmpdir2", type=str, default=None
)
@click.option(
"-K",
"--rmulti2",
help="Thread multiplier for P2 (default = 1)",
type=int,
default=1,
)
def _cli_974d6e5f1440f68c48492122ca33828a98864dfc() -> None:
pass
# Madmax Git on 2021-08-22 -> https://github.com/madMAx43v3r/chia-plotter/commit/aaa3214d4abbd49bb99c2ec087e27c765424cd65
@commands.register(version=(2,))
@click.command()
# https://github.com/madMAx43v3r/chia-plotter/blob/aaa3214d4abbd49bb99c2ec087e27c765424cd65/LICENSE
# https://github.com/madMAx43v3r/chia-plotter/blob/aaa3214d4abbd49bb99c2ec087e27c765424cd65/src/chia_plot.cpp#L238-L253
@click.option(
"-k",
"--size",
help="K size (default = 32, k <= 32)",
type=int,
default=32,
show_default=True,
)
@click.option(
"-n",
"--count",
help="Number of plots to create (default = 1, -1 = infinite)",
type=int,
default=1,
show_default=True,
)
@click.option(
"-r",
"--threads",
help="Number of threads (default = 4)",
type=int,
default=4,
show_default=True,
)
@click.option(
"-u",
"--buckets",
help="Number of buckets (default = 256)",
type=int,
default=256,
show_default=True,
)
@click.option(
"-v",
"--buckets3",
help="Number of buckets for phase 3+4 (default = buckets)",
type=int,
default=256,
)
@click.option(
"-t",
"--tmpdir",
help="Temporary directory, needs ~220 GiB (default = $PWD)",
type=click.Path(),
default=pathlib.Path("."),
show_default=True,
)
@click.option(
"-2",
"--tmpdir2",
help="Temporary directory 2, needs ~110 GiB [RAM] (default = <tmpdir>)",
type=click.Path(),
default=None,
)
@click.option(
"-d",
"--finaldir",
help="Final directory (default = <tmpdir>)",
type=click.Path(),
default=pathlib.Path("."),
show_default=True,
)
@click.option(
"-w",
"--waitforcopy",
help="Wait for copy to start next plot",
type=bool,
default=False,
show_default=True,
)
@click.option(
"-p", "--poolkey", help="Pool Public Key (48 bytes)", type=str, default=None
)
@click.option(
"-c", "--contract", help="Pool Contract Address (62 chars)", type=str, default=None
)
@click.option(
"-f", "--farmerkey", help="Farmer Public Key (48 bytes)", type=str, default=None
)
@click.option(
"-G", "--tmptoggle", help="Alternate tmpdir/tmpdir2", type=str, default=None
)
@click.option(
"-K",
"--rmulti2",
help="Thread multiplier for P2 (default = 1)",
type=int,
default=1,
)
def _cli_aaa3214d4abbd49bb99c2ec087e27c765424cd65() -> None:
pass
|
the-stack_0_21422 | import sys
import os.path
import re
kReColorHex = re.compile(r'#[0-9A-Fa-f]{6}')
def parse_key_value(line):
line = line.strip()
if not line or line[0] in ';#[':
return None
items = line.split('=', 2)
if not items or len(items) != 2:
return None
items[0] = items[0].strip()
items[1] = items[1].strip()
if not items[0] or not items[1]:
return None
return items
def find_color_in_file(path, color_map):
with open(path, encoding='utf-8') as fd:
lines = fd.readlines()
for line in lines:
items = parse_key_value(line)
if not items:
continue
colors = kReColorHex.findall(items[1])
if not colors:
continue
key = items[0]
for color in colors:
color = color.upper()
if color in color_map:
color_stat = color_map[color]
color_stat['total_count'] += 1
if key not in color_stat['usage']:
color_stat['usage'][key] = 1
else:
color_stat['usage'][key] += 1
else:
color_stat = {
'total_count': 1,
'usage': {
key: 1,
},
}
color_map[color] = color_stat
def print_color_count(color_map):
for color, color_stat in color_map.items():
print(f"{color}\t{color_stat['total_count']}")
usage = color_stat['usage']
for key, count in usage.items():
print(f'\t{count}\t{key}')
def count_color(path):
# { color : { total_count: total_count, usage: { key: count}}}
color_map = {}
find_color_in_file(path, color_map)
colors = sorted(color_map.items(), key=lambda m: m[0])
colors = sorted(colors, key=lambda m: m[1]['total_count'], reverse=True)
color_map = dict(colors)
for color_stat in color_map.values():
usage = color_stat['usage']
usage = sorted(usage.items(), key=lambda m: m[0])
usage = sorted(usage, key=lambda m: m[1], reverse=True)
color_stat['usage'] = dict(usage)
print_color_count(color_map)
if __name__ == '__main__':
if len(sys.argv) > 1 and os.path.isfile(sys.argv[1]):
count_color(sys.argv[1])
else:
print(f"""Usage: {sys.argv[0]} path""")
|
the-stack_0_21423 | import os
import copy
import pickle
import platform
import subprocess
import sys
import unittest
from unittest import mock
from test import support
from test.support import os_helper
FEDORA_OS_RELEASE = """\
NAME=Fedora
VERSION="32 (Thirty Two)"
ID=fedora
VERSION_ID=32
VERSION_CODENAME=""
PLATFORM_ID="platform:f32"
PRETTY_NAME="Fedora 32 (Thirty Two)"
ANSI_COLOR="0;34"
LOGO=fedora-logo-icon
CPE_NAME="cpe:/o:fedoraproject:fedora:32"
HOME_URL="https://fedoraproject.org/"
DOCUMENTATION_URL="https://docs.fedoraproject.org/en-US/fedora/f32/system-administrators-guide/"
SUPPORT_URL="https://fedoraproject.org/wiki/Communicating_and_getting_help"
BUG_REPORT_URL="https://bugzilla.redhat.com/"
REDHAT_BUGZILLA_PRODUCT="Fedora"
REDHAT_BUGZILLA_PRODUCT_VERSION=32
REDHAT_SUPPORT_PRODUCT="Fedora"
REDHAT_SUPPORT_PRODUCT_VERSION=32
PRIVACY_POLICY_URL="https://fedoraproject.org/wiki/Legal:PrivacyPolicy"
"""
UBUNTU_OS_RELEASE = """\
NAME="Ubuntu"
VERSION="20.04.1 LTS (Focal Fossa)"
ID=ubuntu
ID_LIKE=debian
PRETTY_NAME="Ubuntu 20.04.1 LTS"
VERSION_ID="20.04"
HOME_URL="https://www.ubuntu.com/"
SUPPORT_URL="https://help.ubuntu.com/"
BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/"
PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy"
VERSION_CODENAME=focal
UBUNTU_CODENAME=focal
"""
TEST_OS_RELEASE = r"""
# test data
ID_LIKE="egg spam viking"
EMPTY=
# comments and empty lines are ignored
SINGLE_QUOTE='single'
EMPTY_SINGLE=''
DOUBLE_QUOTE="double"
EMPTY_DOUBLE=""
QUOTES="double\'s"
SPECIALS="\$\`\\\'\""
# invalid lines
=invalid
=
INVALID
IN-VALID=value
IN VALID=value
"""
class PlatformTest(unittest.TestCase):
def clear_caches(self):
platform._platform_cache.clear()
platform._sys_version_cache.clear()
platform._uname_cache = None
platform._os_release_cache = None
def test_architecture(self):
res = platform.architecture()
@os_helper.skip_unless_symlink
def test_architecture_via_symlink(self): # issue3762
with support.PythonSymlink() as py:
cmd = "-c", "import platform; print(platform.architecture())"
self.assertEqual(py.call_real(*cmd), py.call_link(*cmd))
def test_platform(self):
for aliased in (False, True):
for terse in (False, True):
res = platform.platform(aliased, terse)
def test_system(self):
res = platform.system()
def test_node(self):
res = platform.node()
def test_release(self):
res = platform.release()
def test_version(self):
res = platform.version()
def test_machine(self):
res = platform.machine()
def test_processor(self):
res = platform.processor()
def setUp(self):
self.save_version = sys.version
self.save_git = sys._git
self.save_platform = sys.platform
def tearDown(self):
sys.version = self.save_version
sys._git = self.save_git
sys.platform = self.save_platform
def test_sys_version(self):
# Old test.
for input, output in (
('2.4.3 (#1, Jun 21 2006, 13:54:21) \n[GCC 3.3.4 (pre 3.3.5 20040809)]',
('CPython', '2.4.3', '', '', '1', 'Jun 21 2006 13:54:21', 'GCC 3.3.4 (pre 3.3.5 20040809)')),
('IronPython 1.0.60816 on .NET 2.0.50727.42',
('IronPython', '1.0.60816', '', '', '', '', '.NET 2.0.50727.42')),
('IronPython 1.0 (1.0.61005.1977) on .NET 2.0.50727.42',
('IronPython', '1.0.0', '', '', '', '', '.NET 2.0.50727.42')),
('2.4.3 (truncation, date, t) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', 'date t', 'GCC')),
('2.4.3 (truncation, date, ) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', 'date', 'GCC')),
('2.4.3 (truncation, date,) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', 'date', 'GCC')),
('2.4.3 (truncation, date) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', 'date', 'GCC')),
('2.4.3 (truncation, d) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', 'd', 'GCC')),
('2.4.3 (truncation, ) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', '', 'GCC')),
('2.4.3 (truncation,) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', '', 'GCC')),
('2.4.3 (truncation) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', '', 'GCC')),
):
# branch and revision are not "parsed", but fetched
# from sys._git. Ignore them
(name, version, branch, revision, buildno, builddate, compiler) \
= platform._sys_version(input)
self.assertEqual(
(name, version, '', '', buildno, builddate, compiler), output)
# Tests for python_implementation(), python_version(), python_branch(),
# python_revision(), python_build(), and python_compiler().
sys_versions = {
("2.6.1 (r261:67515, Dec 6 2008, 15:26:00) \n[GCC 4.0.1 (Apple Computer, Inc. build 5370)]",
('CPython', 'tags/r261', '67515'), self.save_platform)
:
("CPython", "2.6.1", "tags/r261", "67515",
('r261:67515', 'Dec 6 2008 15:26:00'),
'GCC 4.0.1 (Apple Computer, Inc. build 5370)'),
("IronPython 2.0 (2.0.0.0) on .NET 2.0.50727.3053", None, "cli")
:
("IronPython", "2.0.0", "", "", ("", ""),
".NET 2.0.50727.3053"),
("2.6.1 (IronPython 2.6.1 (2.6.10920.0) on .NET 2.0.50727.1433)", None, "cli")
:
("IronPython", "2.6.1", "", "", ("", ""),
".NET 2.0.50727.1433"),
("2.7.4 (IronPython 2.7.4 (2.7.0.40) on Mono 4.0.30319.1 (32-bit))", None, "cli")
:
("IronPython", "2.7.4", "", "", ("", ""),
"Mono 4.0.30319.1 (32-bit)"),
("2.5 (trunk:6107, Mar 26 2009, 13:02:18) \n[Java HotSpot(TM) Client VM (\"Apple Computer, Inc.\")]",
('Jython', 'trunk', '6107'), "java1.5.0_16")
:
("Jython", "2.5.0", "trunk", "6107",
('trunk:6107', 'Mar 26 2009'), "java1.5.0_16"),
("2.5.2 (63378, Mar 26 2009, 18:03:29)\n[PyPy 1.0.0]",
('PyPy', 'trunk', '63378'), self.save_platform)
:
("PyPy", "2.5.2", "trunk", "63378", ('63378', 'Mar 26 2009'),
"")
}
for (version_tag, scm, sys_platform), info in \
sys_versions.items():
sys.version = version_tag
if scm is None:
if hasattr(sys, "_git"):
del sys._git
else:
sys._git = scm
if sys_platform is not None:
sys.platform = sys_platform
self.assertEqual(platform.python_implementation(), info[0])
self.assertEqual(platform.python_version(), info[1])
self.assertEqual(platform.python_branch(), info[2])
self.assertEqual(platform.python_revision(), info[3])
self.assertEqual(platform.python_build(), info[4])
self.assertEqual(platform.python_compiler(), info[5])
def test_system_alias(self):
res = platform.system_alias(
platform.system(),
platform.release(),
platform.version(),
)
def test_uname(self):
res = platform.uname()
self.assertTrue(any(res))
self.assertEqual(res[0], res.system)
self.assertEqual(res[-6], res.system)
self.assertEqual(res[1], res.node)
self.assertEqual(res[-5], res.node)
self.assertEqual(res[2], res.release)
self.assertEqual(res[-4], res.release)
self.assertEqual(res[3], res.version)
self.assertEqual(res[-3], res.version)
self.assertEqual(res[4], res.machine)
self.assertEqual(res[-2], res.machine)
self.assertEqual(res[5], res.processor)
self.assertEqual(res[-1], res.processor)
self.assertEqual(len(res), 6)
def test_uname_cast_to_tuple(self):
res = platform.uname()
expected = (
res.system, res.node, res.release, res.version, res.machine,
res.processor,
)
self.assertEqual(tuple(res), expected)
def test_uname_replace(self):
res = platform.uname()
new = res._replace(
system='system', node='node', release='release',
version='version', machine='machine')
self.assertEqual(new.system, 'system')
self.assertEqual(new.node, 'node')
self.assertEqual(new.release, 'release')
self.assertEqual(new.version, 'version')
self.assertEqual(new.machine, 'machine')
# processor cannot be replaced
self.assertEqual(new.processor, res.processor)
def test_uname_copy(self):
uname = platform.uname()
self.assertEqual(copy.copy(uname), uname)
self.assertEqual(copy.deepcopy(uname), uname)
def test_uname_pickle(self):
orig = platform.uname()
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(protocol=proto):
pickled = pickle.dumps(orig, proto)
restored = pickle.loads(pickled)
self.assertEqual(restored, orig)
def test_uname_slices(self):
res = platform.uname()
expected = tuple(res)
self.assertEqual(res[:], expected)
self.assertEqual(res[:5], expected[:5])
@unittest.skipIf(sys.platform in ['win32', 'OpenVMS'], "uname -p not used")
def test_uname_processor(self):
"""
On some systems, the processor must match the output
of 'uname -p'. See Issue 35967 for rationale.
"""
try:
proc_res = subprocess.check_output(['uname', '-p'], text=True).strip()
expect = platform._unknown_as_blank(proc_res)
except (OSError, subprocess.CalledProcessError):
expect = ''
self.assertEqual(platform.uname().processor, expect)
@unittest.skipUnless(sys.platform.startswith('win'), "windows only test")
def test_uname_win32_ARCHITEW6432(self):
# Issue 7860: make sure we get architecture from the correct variable
# on 64 bit Windows: if PROCESSOR_ARCHITEW6432 exists we should be
# using it, per
# http://blogs.msdn.com/david.wang/archive/2006/03/26/HOWTO-Detect-Process-Bitness.aspx
try:
with os_helper.EnvironmentVarGuard() as environ:
if 'PROCESSOR_ARCHITEW6432' in environ:
del environ['PROCESSOR_ARCHITEW6432']
environ['PROCESSOR_ARCHITECTURE'] = 'foo'
platform._uname_cache = None
system, node, release, version, machine, processor = platform.uname()
self.assertEqual(machine, 'foo')
environ['PROCESSOR_ARCHITEW6432'] = 'bar'
platform._uname_cache = None
system, node, release, version, machine, processor = platform.uname()
self.assertEqual(machine, 'bar')
finally:
platform._uname_cache = None
def test_java_ver(self):
res = platform.java_ver()
if sys.platform == 'java':
self.assertTrue(all(res))
def test_win32_ver(self):
res = platform.win32_ver()
def test_mac_ver(self):
res = platform.mac_ver()
if platform.uname().system == 'Darwin':
# We are on a macOS system, check that the right version
# information is returned
output = subprocess.check_output(['sw_vers'], text=True)
for line in output.splitlines():
if line.startswith('ProductVersion:'):
real_ver = line.strip().split()[-1]
break
else:
self.fail(f"failed to parse sw_vers output: {output!r}")
result_list = res[0].split('.')
expect_list = real_ver.split('.')
len_diff = len(result_list) - len(expect_list)
# On Snow Leopard, sw_vers reports 10.6.0 as 10.6
if len_diff > 0:
expect_list.extend(['0'] * len_diff)
# For compatibility with older binaries, macOS 11.x may report
# itself as '10.16' rather than '11.x.y'.
if result_list != ['10', '16']:
self.assertEqual(result_list, expect_list)
# res[1] claims to contain
# (version, dev_stage, non_release_version)
# That information is no longer available
self.assertEqual(res[1], ('', '', ''))
if sys.byteorder == 'little':
self.assertIn(res[2], ('i386', 'x86_64', 'arm64'))
else:
self.assertEqual(res[2], 'PowerPC')
@unittest.skipUnless(sys.platform == 'darwin', "OSX only test")
def test_mac_ver_with_fork(self):
# Issue7895: platform.mac_ver() crashes when using fork without exec
#
# This test checks that the fix for that issue works.
#
pid = os.fork()
if pid == 0:
# child
info = platform.mac_ver()
os._exit(0)
else:
# parent
support.wait_process(pid, exitcode=0)
def test_libc_ver(self):
# check that libc_ver(executable) doesn't raise an exception
if os.path.isdir(sys.executable) and \
os.path.exists(sys.executable+'.exe'):
# Cygwin horror
executable = sys.executable + '.exe'
elif sys.platform == "win32" and not os.path.exists(sys.executable):
# App symlink appears to not exist, but we want the
# real executable here anyway
import _winapi
executable = _winapi.GetModuleFileName(0)
else:
executable = sys.executable
platform.libc_ver(executable)
filename = os_helper.TESTFN
self.addCleanup(os_helper.unlink, filename)
with mock.patch('os.confstr', create=True, return_value='mock 1.0'):
# test os.confstr() code path
self.assertEqual(platform.libc_ver(), ('mock', '1.0'))
# test the different regular expressions
for data, expected in (
(b'__libc_init', ('libc', '')),
(b'GLIBC_2.9', ('glibc', '2.9')),
(b'libc.so.1.2.5', ('libc', '1.2.5')),
(b'libc_pthread.so.1.2.5', ('libc', '1.2.5_pthread')),
(b'', ('', '')),
):
with open(filename, 'wb') as fp:
fp.write(b'[xxx%sxxx]' % data)
fp.flush()
# os.confstr() must not be used if executable is set
self.assertEqual(platform.libc_ver(executable=filename),
expected)
# binary containing multiple versions: get the most recent,
# make sure that 1.9 is seen as older than 1.23.4
chunksize = 16384
with open(filename, 'wb') as f:
# test match at chunk boundary
f.write(b'x'*(chunksize - 10))
f.write(b'GLIBC_1.23.4\0GLIBC_1.9\0GLIBC_1.21\0')
self.assertEqual(platform.libc_ver(filename, chunksize=chunksize),
('glibc', '1.23.4'))
@support.cpython_only
def test__comparable_version(self):
from platform import _comparable_version as V
self.assertEqual(V('1.2.3'), V('1.2.3'))
self.assertLess(V('1.2.3'), V('1.2.10'))
self.assertEqual(V('1.2.3.4'), V('1_2-3+4'))
self.assertLess(V('1.2spam'), V('1.2dev'))
self.assertLess(V('1.2dev'), V('1.2alpha'))
self.assertLess(V('1.2dev'), V('1.2a'))
self.assertLess(V('1.2alpha'), V('1.2beta'))
self.assertLess(V('1.2a'), V('1.2b'))
self.assertLess(V('1.2beta'), V('1.2c'))
self.assertLess(V('1.2b'), V('1.2c'))
self.assertLess(V('1.2c'), V('1.2RC'))
self.assertLess(V('1.2c'), V('1.2rc'))
self.assertLess(V('1.2RC'), V('1.2.0'))
self.assertLess(V('1.2rc'), V('1.2.0'))
self.assertLess(V('1.2.0'), V('1.2pl'))
self.assertLess(V('1.2.0'), V('1.2p'))
self.assertLess(V('1.5.1'), V('1.5.2b2'))
self.assertLess(V('3.10a'), V('161'))
self.assertEqual(V('8.02'), V('8.02'))
self.assertLess(V('3.4j'), V('1996.07.12'))
self.assertLess(V('3.1.1.6'), V('3.2.pl0'))
self.assertLess(V('2g6'), V('11g'))
self.assertLess(V('0.9'), V('2.2'))
self.assertLess(V('1.2'), V('1.2.1'))
self.assertLess(V('1.1'), V('1.2.2'))
self.assertLess(V('1.1'), V('1.2'))
self.assertLess(V('1.2.1'), V('1.2.2'))
self.assertLess(V('1.2'), V('1.2.2'))
self.assertLess(V('0.4'), V('0.4.0'))
self.assertLess(V('1.13++'), V('5.5.kw'))
self.assertLess(V('0.960923'), V('2.2beta29'))
def test_macos(self):
self.addCleanup(self.clear_caches)
uname = ('Darwin', 'hostname', '17.7.0',
('Darwin Kernel Version 17.7.0: '
'Thu Jun 21 22:53:14 PDT 2018; '
'root:xnu-4570.71.2~1/RELEASE_X86_64'),
'x86_64', 'i386')
arch = ('64bit', '')
with mock.patch.object(platform, 'uname', return_value=uname), \
mock.patch.object(platform, 'architecture', return_value=arch):
for mac_ver, expected_terse, expected in [
# darwin: mac_ver() returns empty strings
(('', '', ''),
'Darwin-17.7.0',
'Darwin-17.7.0-x86_64-i386-64bit'),
# macOS: mac_ver() returns macOS version
(('10.13.6', ('', '', ''), 'x86_64'),
'macOS-10.13.6',
'macOS-10.13.6-x86_64-i386-64bit'),
]:
with mock.patch.object(platform, 'mac_ver',
return_value=mac_ver):
self.clear_caches()
self.assertEqual(platform.platform(terse=1), expected_terse)
self.assertEqual(platform.platform(), expected)
def test_freedesktop_os_release(self):
self.addCleanup(self.clear_caches)
self.clear_caches()
if any(os.path.isfile(fn) for fn in platform._os_release_candidates):
info = platform.freedesktop_os_release()
self.assertIn("NAME", info)
self.assertIn("ID", info)
info["CPYTHON_TEST"] = "test"
self.assertNotIn(
"CPYTHON_TEST",
platform.freedesktop_os_release()
)
else:
with self.assertRaises(OSError):
platform.freedesktop_os_release()
def test_parse_os_release(self):
info = platform._parse_os_release(FEDORA_OS_RELEASE.splitlines())
self.assertEqual(info["NAME"], "Fedora")
self.assertEqual(info["ID"], "fedora")
self.assertNotIn("ID_LIKE", info)
self.assertEqual(info["VERSION_CODENAME"], "")
info = platform._parse_os_release(UBUNTU_OS_RELEASE.splitlines())
self.assertEqual(info["NAME"], "Ubuntu")
self.assertEqual(info["ID"], "ubuntu")
self.assertEqual(info["ID_LIKE"], "debian")
self.assertEqual(info["VERSION_CODENAME"], "focal")
info = platform._parse_os_release(TEST_OS_RELEASE.splitlines())
expected = {
"ID": "linux",
"NAME": "Linux",
"PRETTY_NAME": "Linux",
"ID_LIKE": "egg spam viking",
"EMPTY": "",
"DOUBLE_QUOTE": "double",
"EMPTY_DOUBLE": "",
"SINGLE_QUOTE": "single",
"EMPTY_SINGLE": "",
"QUOTES": "double's",
"SPECIALS": "$`\\'\"",
}
self.assertEqual(info, expected)
self.assertEqual(len(info["SPECIALS"]), 5)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_21424 | import requests
import os
import pika
import json
import socket
from flask import request
from flask_restful import Resource
from flask_jwt_extended import get_jwt_claims, get_jwt_identity, jwt_required
from app.cache import redis_client
from app.worker import celery
from app.api.user.models import User
from app.api.system.models import SystemSetting, SystemSettingSchema
class SearchApi(Resource):
@jwt_required
def post(self):
"Query the spotify api with the given params"
data = request.get_json()
trackName = data["trackName"]
query = "%20".join(trackName.split(" "))
url = "https://api.spotify.com/v1/search?q={}&type=track&market=DE".format(query)
spotify_token = redis_client.get("spotify_token").decode("utf-8")
headers = {"Authorization": "Bearer {}".format(spotify_token)}
r = requests.get(url, headers=headers)
return r.json(), 201
class RecorderApi(Resource):
@jwt_required
def post(self):
tracks = {}
"Record the passed list"
data = request.get_json()
tracks['recordList'] = data['recordList']
settings = SystemSetting.get_settings()
schema = SystemSettingSchema()
connection = pika.BlockingConnection(
pika.ConnectionParameters(host="rabbit")
)
channel = connection.channel()
channel.queue_declare(queue="recorder")
tracks['settings'] = schema.dump(settings).data
for record in tracks['recordList']:
obj = {
"trackname": record['name'],
"id": record['id'],
"duration_ms": record['duration_ms'],
"settings": {
"framesize": tracks['settings']['framesize'],
"driver_path": tracks['settings']['driver_path'],
"profile_path": tracks['settings']['profile_path']
}
}
for t in record['album']['artists']:
obj['artist'] = t['name']
json_data = json.dumps(obj)
channel.basic_publish(
exchange="",
routing_key="recorder",
body=json_data
)
connection.close()
return {
"message": "Jobs wurden erstellt. Nachricht wird verstand wenn fertig."
}, 201
class TokenCacheApi(Resource):
@jwt_required
def post(self):
"""Put the token into the Cache"""
data = request.get_json()
spotify_token = data["spotify_token"]
if redis_client.exists("spotify_token"):
redis_client.delete("spotify_token")
redis_client.set("spotify_token", spotify_token)
saved_token = redis_client.get("spotify_token").decode("utf-8")
return {
"message": "Token wurde gespeichert!",
"spotify_token": saved_token
}, 201
else:
redis_client.set("spotify_token", spotify_token)
saved_token = redis_client.get("spotify_token").decode("utf-8")
return {
"message": "Token wurde gespeichert!",
"spotify_token": saved_token
}, 201
|
the-stack_0_21425 | #!/usr/bin/env python
# MIT License
#
# Copyright (c) 2017 Francesco Evangelista
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
import argparse
import sys
import re
import subprocess
import os
import datetime
from os import listdir, environ
from os.path import isfile, join
from future.utils import iteritems
vmd_cube_help = """vmd_cube is a script to render cube files with vmd.
To generate cube files with Psi4 add the command cubeprop() at the end of your input file."""
vmd_exe = ""
vmd_script_name = "vmd_mo_script.vmd"
vmd_template = """#
# VMD script to plot MOs from cube files
#
# Load the molecule and change the atom style
mol load cube PARAM_CUBEFILE.cube
mol modcolor 0 PARAM_CUBENUM Element
mol modstyle 0 PARAM_CUBENUM Licorice 0.110000 10.000000 10.000000
#mol modstyle 0 PARAM_CUBENUM CPK 0.400000 0.40000 30.000000 16.000000
# Define the material
material change ambient Opaque 0.310000
material change diffuse Opaque 0.720000
material change specular Opaque 0.500000
material change shininess Opaque 0.480000
material change opacity Opaque 1.000000
material change outline Opaque 0.000000
material change outlinewidth Opaque 0.000000
material change transmode Opaque 0.000000
material change specular Opaque 0.750000
material change ambient EdgyShiny 0.310000
material change diffuse EdgyShiny 0.720000
material change shininess EdgyShiny 1.0000
material change opacity EdgyShiny PARAM_OPACITY
# Customize atom colors
color Element C silver
color Element H white
# Rotate and translate the molecule
rotate x by PARAM_RX
rotate y by PARAM_RY
rotate z by PARAM_RZ
translate by PARAM_TX PARAM_TY PARAM_TZ
scale by PARAM_SCALE
# Eliminate the axis and perfect the view
axes location Off
display projection Orthographic
display depthcue off
display resize PARAM_IMAGEW PARAM_IMAGEH
color Display Background white"""
vmd_template_surface = """#
# Add a surface
mol color ColorID PARAM_ISOCOLOR
mol representation Isosurface PARAM_ISOVALUE 0 0 0 1 1
mol selection all
mol material EdgyShiny
mol addrep PARAM_CUBENUM
"""
vmd_template_interactive = """#
# Disable rendering
mol off PARAM_CUBENUM
"""
vmd_template_render = """
# Render
render TachyonInternal PARAM_CUBEFILE.tga
mol delete PARAM_CUBENUM
"""
vmd_template_rotate = """
light 1 off
light 0 rot y 30.0
light 0 rot x -30.0
"""
default_path = os.getcwd()
# Default parameters
options = {"ISOVALUE" : [None,"Isosurface Value(s)"],
"ISOCOLOR" : [None,"Isosurface Color(s)"],
"ISOCUT" : [None,"Isosurface Value Cutoff"],
"RX" : [None,"X-axis Rotation"],
"RY" : [None,"Y-axis Rotation"],
"RZ" : [None,"Z-axis Rotation"],
"TX" : [None,"X-axis Translation"],
"TY" : [None,"Y-axis Translation"],
"TZ" : [None,"Z-axis Translation"],
"OPACITY" : [None,"Opacity"],
"CUBEDIR" : [None,"Cubefile Directory"],
"SCALE" : [None,"Scaling Factor"],
"MONTAGE" : [None,"Montage"],
"LABEL_MOS" : [None,"Label MOs"],
"FONTSIZE" : [None,"Font size"],
"IMAGEW" : [None,"Image width"],
"IMAGEH" : [None,"Image height"],
"VMDPATH" : [None,"VMD Path"],
"INTERACTIVE" : [None,"Interactive Mode"],
"GZIP" : [None,"Gzip Cube Files"]}
def which(program):
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def multigsub(subs,str):
for k,v in subs.items():
str = re.sub(k,v,str)
return str
def find_vmd(options):
if environ['VMDPATH']:
vmdpath = environ['VMDPATH']
vmdpath = multigsub({" " : r"\ "},vmdpath)
options["VMDPATH"][0] = vmdpath
else:
print("Please set the VMDPATH environmental variable to the path of VMD.")
exit(1)
def save_setup_command(argv):
file_name = join(default_path, 'vmd_cube_command')
f = open(file_name, 'w')
f.write('# setup command was executed '+datetime.datetime.now().strftime("%d-%B-%Y %H:%M:%S"+"\n"))
f.write(" ".join(argv[:])+"\n")
f.close()
def read_options(options):
parser = argparse.ArgumentParser(description=vmd_cube_help)
parser.add_argument('data', metavar='<cubefile dir>', type=str, nargs='?',default=".",
help='The directory containing the cube files.')
parser.add_argument('--isovalue', metavar='<isovalue>', type=float, nargs='*',default=[0.05,-0.05],
help='a list of isosurface values (a list of floats, default = [0.05,-0.05])')
parser.add_argument('--isocolor', metavar='<integer>', type=int, nargs='*',default=[3,23],
help='a list of isosurface color IDs (a list of integers, default = [3,23])')
parser.add_argument('--isocut', metavar='<isovalue cutoff>', type=float, nargs='?',default=1.0e-5,
help='cutoff value for rendering an isosurface (float, default = 1.0e-5)')
parser.add_argument('--rx', metavar='<angle>', type=float, nargs='?',default=30.0,
help='the x-axis rotation angle (float, default = 30.0)')
parser.add_argument('--ry', metavar='<angle>', type=float, nargs='?',default=40.0,
help='the y-axis rotation angle (float, default = 40.0)')
parser.add_argument('--rz', metavar='<angle>', type=float, nargs='?',default=15.0,
help='the z-axis rotation angle (float, default = 15.0)')
parser.add_argument('--tx', metavar='<length>', type=float, nargs='?',default=0.0,
help='the x-axis translation (float, default = 0.0)')
parser.add_argument('--ty', metavar='<length>', type=float, nargs='?',default=0.0,
help='the y-axis translation (float, default = 0.0)')
parser.add_argument('--tz', metavar='<length>', type=float, nargs='?',default=0.0,
help='the z-axis translation (float, default = 0.0)')
parser.add_argument('--opacity', metavar='<opacity>', type=float, nargs='?',default=1.0,
help='opacity of the isosurface (float, default = 1.0)')
parser.add_argument('--scale', metavar='<factor>', type=float, nargs='?',default=1.0,
help='the scaling factor (float, default = 1.0)')
parser.add_argument('--no-montage', action="store_true",
help='call montage to combine images. (string, default = false)')
parser.add_argument('--no-labels', action="store_true",
help='do not add labels to images. (string, default = false)')
parser.add_argument('--imagew', metavar='<integer>', type=int, nargs='?',default=250,
help='the width of images (integer, default = 250)')
parser.add_argument('--imageh', metavar='<integer>', type=int, nargs='?',default=250,
help='the height of images (integer, default = 250)')
parser.add_argument('--fontsize', metavar='<integer>', type=int, nargs='?',default=20,
help='the font size (integer, default = 20)')
parser.add_argument('--interactive', action="store_true",
help='run in interactive mode (default = false)')
parser.add_argument('--gzip', action="store_true",
help='gzip cube files (default = false)')
parser.add_argument('--national_scheme', action="store_true",
help='use a red/blue color scheme. (string, default = false)')
parser.add_argument('--silver_scheme', action="store_true",
help='use a gray/white color scheme. (string, default = false)')
parser.add_argument('--bright_scheme', action="store_true",
help='use a soft yellow/blue color scheme. (string, default = false)')
parser.add_argument('--electron_scheme', action="store_true",
help='use a purple/green color scheme. (string, default = false)')
args = parser.parse_args()
options["CUBEDIR"][0] = str(args.data)
options["ISOVALUE"][0] = args.isovalue
options["ISOCOLOR"][0] = args.isocolor
options["ISOCUT"][0] = str(args.isocut)
options["RX"][0] = str(args.rx)
options["RY"][0] = str(args.ry)
options["RZ"][0] = str(args.rz)
options["TX"][0] = str(args.tx)
options["TY"][0] = str(args.ty)
options["TZ"][0] = str(args.tz)
options["OPACITY"][0] = str(args.opacity)
options["SCALE"][0] = str(args.scale)
options["LABEL_MOS"][0] = str(not args.no_labels)
options["MONTAGE"][0] = str(not args.no_montage)
options["FONTSIZE"][0] = str(args.fontsize)
options["IMAGEW"][0] = str(args.imagew)
options["IMAGEH"][0] = str(args.imageh)
options["INTERACTIVE"][0] = str(args.interactive)
options["GZIP"][0] = str(args.gzip)
if args.national_scheme:
options["ISOCOLOR"][0] = [23,30]
if args.silver_scheme:
options["ISOCOLOR"][0] = [2,8]
if args.electron_scheme:
options["ISOCOLOR"][0] = [13,12]
if args.bright_scheme:
options["ISOCOLOR"][0] = [32,22]
print("Parameters:")
sorted_parameters = sorted(options.keys())
for k in sorted_parameters:
print(" %-20s %s" % (options[k][1],str(options[k][0])))
def find_cubes(options):
# Find all the cube files in a given directory
dir = options["CUBEDIR"][0]
sorted_files = []
zipped_files = []
for f in listdir(options["CUBEDIR"][0]):
if "\'" in f:
nf = f.replace("\'", "p")
os.rename(f,nf)
f = nf
if "\"" in f:
nf = f.replace("\"", "pp")
os.rename(f,nf)
f = nf
if f[-5:] == '.cube':
sorted_files.append(f)
elif f[-8:] == '.cube.gz':
found_zipped = True
# unzip file
sorted_files.append(f[:-3])
zipped_files.append(f)
if len(zipped_files) > 0:
print("\nDecompressing gzipped cube files")
FNULL = open(os.devnull, 'w')
subprocess.call(("gzip -d %s" % " ".join(zipped_files)),stdout=FNULL, shell=True)
options["GZIP"][0] = 'True'
return sorted(sorted_files)
def write_and_run_vmd_script(options,cube_files):
vmd_script = open(vmd_script_name,"w+")
vmd_script.write(vmd_template_rotate)
# Define a map that contains all the values of the VMD parameters
replacement_map = {}
for (k, v) in iteritems(options):
key = "PARAM_" + k.upper()
replacement_map[key] = v[0]
for n,f in enumerate(cube_files):
replacement_map["PARAM_CUBENUM"] = '%03d' % n
replacement_map["PARAM_CUBEFILE"] = options["CUBEDIR"][0] + '/' + f[:-5]
# Default isocontour values or user-provided
isovalue = options["ISOVALUE"][0][:]
isocolor = options["ISOCOLOR"][0][:]
# Read isocontour values from file, if available
with open(f,'r') as file:
l1 = file.readline()
l2 = file.readline()
m = re.search(r'density: \(([-+]?[0-9]*\.?[0-9]+)\,([-+]?[0-9]*\.?[0-9]+)\)',l2)
if m:
isovalue[0] = float(m.groups()[0])
isovalue[1] = float(m.groups()[1])
nisovalue = len(isovalue)
nisocolor = len(isocolor)
if nisovalue!= nisocolor:
print("Quitting: Please specify the same number of isosurface values and colors.")
quit()
else:
print("Plotting %s with isosurface values" % (f), str(isovalue))
vmd_script_surface = ""
surf = zip(isovalue,isocolor)
for c in surf:
if abs(c[0]) > float(options["ISOCUT"][0]):
replacement_map["PARAM_ISOVALUE"] = str(c[0])
replacement_map["PARAM_ISOCOLOR"] = str(c[1])
vmd_script_surface += multigsub(replacement_map,vmd_template_surface)
else:
print(" * Skipping isosurface with isocontour value %f" % c[0])
vmd_script_head = multigsub(replacement_map,vmd_template)
if options["INTERACTIVE"][0] == 'True':
vmd_script_render = multigsub(replacement_map,vmd_template_interactive)
else:
vmd_script_render = multigsub(replacement_map,vmd_template_render)
vmd_script.write(vmd_script_head + "\n" + vmd_script_surface + "\n" + vmd_script_render)
if options["INTERACTIVE"][0] == 'False':
vmd_script.write("quit")
vmd_script.close()
# Call VMD in text mode
FNULL = open(os.devnull, 'w')
subprocess.call(("%s -dispdev text -e %s" % (options["VMDPATH"][0],vmd_script_name)),stdout=FNULL, shell=True)
else:
vmd_script.close()
# Call VMD in graphic mode
FNULL = open(os.devnull, 'w')
subprocess.call(("%s -e %s" % (options["VMDPATH"][0],vmd_script_name)),stdout=FNULL, shell=True)
def call_montage(options,cube_files):
if options["MONTAGE"][0] == 'True':
# Optionally, combine all figures into one image using montage
montage_exe = which("montage")
if montage_exe:
alpha_mos = []
beta_mos = []
densities = []
basis_functions = []
for f in cube_files:
tga_file = f[:-5] + ".tga"
if "Psi_a" in f:
alpha_mos.append(tga_file)
if "Psi_b" in f:
beta_mos.append(tga_file)
if "D" in f:
densities.append(tga_file)
if "Phi" in f:
basis_functions.append(tga_file)
# Sort the MOs
sorted_mos = []
for set in [alpha_mos,beta_mos]:
sorted_set = []
for s in set:
s_split = s.split('_')
sorted_set.append((int(s_split[2]),"Psi_a_%s_%s" % (s_split[2],s_split[3])))
sorted_set = sorted(sorted_set)
sorted_mos.append([s[1] for s in sorted_set])
os.chdir(options["CUBEDIR"][0])
# Add labels
if options["LABEL_MOS"][0] == 'True':
for f in sorted_mos[0]:
f_split = f.split('_')
label = '%s\ \(%s\)' % (f_split[3][:-4],f_split[2])
subprocess.call(("montage -pointsize %s -label %s %s -geometry '%sx%s+0+0>' %s" %
(options["FONTSIZE"][0],label,f,options["IMAGEW"][0],options["IMAGEH"][0],f)), shell=True)
# Combine together in one image
if len(alpha_mos) > 0:
subprocess.call(("%s %s -geometry +2+2 AlphaMOs.tga" % (montage_exe," ".join(sorted_mos[0]))), shell=True)
if len(beta_mos) > 0:
subprocess.call(("%s %s -geometry +2+2 BetaMOs.tga" % (montage_exe," ".join(sorted_mos[1]))), shell=True)
if len(densities) > 0:
subprocess.call(("%s %s -geometry +2+2 Densities.tga" % (montage_exe," ".join(densities))), shell=True)
if len(basis_functions) > 0:
subprocess.call(("%s %s -geometry +2+2 BasisFunctions.tga" % (montage_exe," ".join(basis_functions))), shell=True)
def zip_files(cube_files,options):
"""Gzip cube files if requested or necessary."""
if options["GZIP"][0] == 'True':
print("\nCompressing cube files")
FNULL = open(os.devnull, 'w')
subprocess.call(("gzip %s" % " ".join(cube_files)),stdout=FNULL, shell=True)
def get_cumulative_density_iso_value(file,sigma):
"""Find the isosurface values that capture a certain amount of the total density (sigma)."""
cube_data = []
norm = 0.0
k = 0
with open(file) as f:
for line in f:
if k > 6:
for s in line.split():
value = float(s)
value_sqr = value * value
norm = norm + value_sqr
cube_data.append((value_sqr,value))
k = k + 1
cube_data.sort(reverse=True)
sum = 0.0
positive_iso = 0.0
negative_iso = 0.0
for (value_sqr,value) in cube_data:
if sum < sigma:
sum = sum + value_sqr / norm
if value > 0:
positive_iso = value
else:
negative_iso = value
else:
return (positive_iso, negative_iso)
return (positive_iso, negative_iso)
def main(argv):
find_vmd(options)
read_options(options)
save_setup_command(argv)
cube_files = find_cubes(options)
write_and_run_vmd_script(options,cube_files)
call_montage(options,cube_files)
zip_files(cube_files,options)
if __name__ == '__main__':
main(sys.argv)
|
the-stack_0_21427 | # -*- coding: utf-8 -*-
"""
Small prototype of a cost function and show difference between
edge betweenness centrality and node betweenness centrality averaged
over the edges.
"""
import numpy as np
import networkx as nx
import osmnx as ox
import nerds_osmnx.simplification
def normalized_array(arr):
"""Return any array of values normalized between 0 and 1."""
return (arr - min(arr))/(max(arr) - min(arr))
def average_node_to_edge(G, attr_name):
"""Return graph with edge attribute as average of node attribute"""
G = G.copy()
for edge in G.edges:
G.edges[edge][attr_name] = np.mean([
G.nodes[edge[0]][attr_name],
G.nodes[edge[1]][attr_name]
])
return G
if __name__ == "__main__":
G = ox.graph_from_bbox(43.5337, 43.5233, 5.4577, 5.4376,
simplify=False)
ox.plot_graph(G, figsize=(12, 8), bgcolor='w',
node_color='r', node_size=30,
edge_color='black', edge_linewidth=3)
G = nerds_osmnx.simplification.simplify_graph(G)
ox.plot_graph(G, figsize=(12, 8), bgcolor='w',
node_color='r', node_size=30,
edge_color='black', edge_linewidth=3)
G = nerds_osmnx.simplification.multidigraph_to_graph(G)
betw = nx.betweenness_centrality(G, weight="length")
clos = nx.closeness_centrality(G, distance="length")
A = 1.
B = 1.
cost_function = (A*normalized_array(np.array(list(betw.values())))
+ B*normalized_array(np.array(list(clos.values()))))
cost_dict = dict(zip(betw.keys(), cost_function))
nx.set_node_attributes(G, cost_dict, name="cost")
cost_nc = ox.plot.get_node_colors_by_attr(G, "cost")
e_betw = nx.edge_betweenness_centrality(G, weight="length")
nx.set_edge_attributes(G, e_betw, name="edge_betweenness")
nx.set_node_attributes(G, betw, name="betweenness")
G = average_node_to_edge(G, 'betweenness')
betw_ec = ox.plot.get_edge_colors_by_attr(G, "betweenness")
ebet_ec = ox.plot.get_edge_colors_by_attr(G, "edge_betweenness")
#osmx.plot_graph only take multigraph because it retrieves keys
G = nx.MultiGraph(G)
ox.plot_graph(G, figsize=(12, 8), bgcolor='w',
node_color='black', node_size=30,
edge_color=betw_ec, edge_linewidth=3)
ox.plot_graph(G, figsize=(12, 8), bgcolor='w',
node_color='black', node_size=30,
edge_color=ebet_ec, edge_linewidth=3) |
the-stack_0_21428 | #
# converted from matlab version to python in Jan 2018
import sys
import numpy as np
__doc__ = """
Density of Sea Water using McDougall et al. 2003 (JAOT 20) polynomial
Functions:
dens :: computes in-situ density from salinity, potential temperature
and pressure
"""
# coefficients nonlinear equation of state in pressure coordinates for
eosMDJWFnum = [ 7.35212840e+00,
-5.45928211e-02,
3.98476704e-04,
2.96938239e+00,
-7.23268813e-03,
2.12382341e-03,
1.04004591e-02,
1.03970529e-07,
5.18761880e-06,
-3.24041825e-08,
-1.23869360e-11,
9.99843699e+02 ]
eosMDJWFden = [ 7.28606739e-03,
-4.60835542e-05,
3.68390573e-07,
1.80809186e-10,
2.14691708e-03,
-9.27062484e-06,
-1.78343643e-10,
4.76534122e-06,
1.63410736e-09,
5.30848875e-06,
-3.03175128e-16,
-1.27934137e-17,
1.00000000e+00 ]
def densmdjwf(s,theta,p):
"""
densmdjwf Density of sea water
=========================================================================
USAGE: dens = densmdjwf(s,theta,p)
DESCRIPTION:
Density of Sea Water using McDougall et al. 2003 (JAOT 20)
polynomial (Gibbs Potential).
INPUT: (all must have same dimensions)
S = salinity [psu (PSS-78)]
Theta = potential temperature [degree C (IPTS-68)]
P = pressure [dbar]
(P may have dims 1x1, mx1, 1xn or mxn for S(mxn) )
OUTPUT:
dens = density [kg/m^3]
AUTHOR: Martin Losch 2002-08-09 ([email protected])
check value
S = 35 PSU
Theta = 25 degC
P = 2000 dbar
rho = 1031.654229 kg/m^3
McDougall et al., 2003, JAOT 20(5), pp. 730-741
"""
# make sure arguments are floating point
s = np.asfarray(s)
t = np.asfarray(theta)
p = np.asfarray(p)
p1 = np.copy(p);
t1 = np.copy(t);
t2 = t*t;
s1 = np.copy(s);
if np.any(s1<0):
sys.stderr.write('negative salinity values! setting to nan\n')
# # the sqrt will take care of this
# if s.ndim > 0:
# s[s<0] = np.nan
# else:
# s = np.nan
sp5 = np.sqrt(s1)
p1t1=p1*t1
num = ( eosMDJWFnum[11]
+ t1*(eosMDJWFnum[0]
+ t1*(eosMDJWFnum[1] + eosMDJWFnum[2]*t1) )
+ s1*(eosMDJWFnum[3]
+ eosMDJWFnum[4]*t1 + eosMDJWFnum[5]*s1)
+ p1*(eosMDJWFnum[6] + eosMDJWFnum[7]*t2
+ eosMDJWFnum[8]*s1
+ p1*(eosMDJWFnum[9] + eosMDJWFnum[10]*t2) )
)
den = ( eosMDJWFden[12]
+ t1*(eosMDJWFden[0]
+ t1*(eosMDJWFden[1]
+ t1*(eosMDJWFden[2] + t1*eosMDJWFden[3] ) ) )
+ s1*(eosMDJWFden[4]
+ t1*(eosMDJWFden[5]
+ eosMDJWFden[6]*t2)
+ sp5*(eosMDJWFden[7] + eosMDJWFden[8]*t2) )
+ p1*(eosMDJWFden[9]
+ p1t1*(eosMDJWFden[10]*t2 + eosMDJWFden[11]*p1) )
)
epsln = 0
denom = 1.0/(epsln+den)
rho = num*denom;
return rho
# aliases
dens = densmdjwf
|
the-stack_0_21429 | #Web - Calculadora
nome_pagina = 'Calculadora'
from flask import Flask, render_template, request
from calculoExercicio import *
app = Flask(__name__)
@app.route('/')
def home():
return render_template('home.html', titulo =nome_pagina)
@app.route('/calcular')
def calcular():
x = int(request.args['numero1'])
y = int(request.args['numero2'])
r_soma = soma(x,y)
r_sub = sub(x,y)
r_multi = multi(x,y)
r_div = div(x,y)
r_div_fra = div_fra(x,y)
r_rest_div = rest_div(x,y)
r_raiz = raiz(x,y)
resultados = {'soma':r_soma,'sub':r_sub,'multi':r_multi,'div':r_div,'div_fra':r_div_fra,'rest_div':r_rest_div,'raiz':r_raiz}
return render_template(
'resultado.html'
,x = x
,y = y
,resultados = resultados)
app.run()
|
the-stack_0_21430 | import click
from tabulate import tabulate
from . import database
@click.group()
def cli():
...
@cli.command()
def requests():
reqs = []
def update_requests():
nonlocal reqs
reqs = database.get_pending_requests()
update_requests()
if len(reqs) == 0:
click.secho('No pending requests!', fg='green')
return
dirty = []
def view():
update_requests()
count = len(reqs)
if count != 0:
print(tabulate(reqs, ['ID', 'Nicename', 'Timestamp'], tablefmt='pretty'))
if count == 1:
print(f'There is {count} pending request.')
else:
print(f'There are {count} pending requests.')
def approve(idx, *name):
idx = int(idx)
if idx not in (row[0] for row in reqs):
click.secho('Invalid index provided!', fg='red')
raise ValueError()
if len(name) == 0:
click.secho('Please provide a name!', fg='red')
raise ValueError()
name = ' '.join(name)
database.approve_request(idx, name)
dirty.append(f'approve {idx} {name}')
update_requests()
def reject(idx):
idx = int(idx)
if idx not in (row[0] for row in reqs):
click.secho('Invalid index provided!', fg='red')
raise ValueError()
database.reject_request(idx)
dirty.append(f'reject {idx}')
update_requests()
def purge():
database.purge_requests()
dirty.append(f'purge')
update_requests()
def diff():
if dirty:
[print(' - ' + stmt) for stmt in dirty]
else:
click.secho('No pending changes!')
def print_help():
print("""Available commands:
- view view all pending requests
- accept (or approve) accept a request by providing an id and a name
- reject (or deny) reject a request by id
- purge reject all pending requests
- diff (or history) view pending changes
- commit commit changes to db
- rollback rollback changes from db
* help print this help message
- exit exit the interactive CLI""")
view()
while True:
try:
cmd, *args = input(click.style('What would you like to do?\n> ', fg='cyan')).split()
if cmd == 'view':
view()
elif cmd in ('accept', 'approve'):
approve(*args)
elif cmd in ('reject', 'deny'):
reject(*args)
elif cmd == 'purge':
purge()
elif cmd in ('diff', 'history'):
diff()
elif cmd == 'commit':
if not dirty:
click.secho('No changes to commit!', fg='yellow')
continue
database.commit()
click.secho('Committed!', fg='green')
dirty = []
elif cmd == 'rollback':
if not dirty:
click.secho('No changes to rollback!', fg='yellow')
continue
database.rollback()
click.secho('Rolled back!', fg='green')
dirty = []
elif cmd == 'help':
print_help()
elif cmd == 'exit':
if dirty:
response = input('You have unsaved changes! Save them? [y/N] ').lower()
if response == 'y':
database.commit()
click.secho('Committed!', fg='green')
else:
database.rollback()
click.secho('Rolled back!', fg='green')
print('Goodbye!')
return
else:
raise ValueError()
except ValueError:
click.secho('Invalid input!', fg='red')
print()
if __name__ == "__main__":
cli()
|
the-stack_0_21431 |
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import hashlib
from copy import deepcopy
import numpy as np
from graph.dim import Dim
from graph.types.activations import ActivationParameters
from graph.types.base import NNEdge
from graph.types.input_output import ConstantInputParameters
from graph.types.others import ReshapeParameters
from importer.common.provisional_dim import ProvisionalDim
from quantization.new_qrec import QRec
from quantization.qtype import QType
from utils.node_id import NodeId
from ..tflite_schema_head.ActivationFunctionType import ActivationFunctionType
from .handler import Handler
class BackendHandler(Handler):
""" This class is base backend handler class.
All backend operator handler class MUST inherit this class.
In backend, operator handler class's name should be pascal case of file name
which should be snake case.
Use ONNX operator name as class name.
"""
VAR_COUNT = 0
TF_ACTIVATIONS = {
ActivationFunctionType.RELU: "relu",
ActivationFunctionType.RELU6: "relu6",
ActivationFunctionType.SIGN_BIT: "sign_bit",
ActivationFunctionType.TANH: "tanh"
}
@classmethod
def _get_real_dim(cls, shape):
return np.array([elem for elem in shape if elem is not None])
@classmethod
def _get_real_dims(cls, dims):
return [cls._get_real_dim(dim.shape) for dim in dims]
@classmethod
def _verify_constant(cls, inp):
if cls._is_constant(inp):
return cls._get_constant(inp)
raise ValueError("expected node %s to be constant input" % inp[0].name)
@classmethod
def _is_constant(cls, inp):
return isinstance(inp[0], ConstantInputParameters)
@classmethod
def _get_constant(cls, inp):
return inp[0].value
@classmethod
def _slice_len(cls, vstart, vend, vstep):
if vstep < 0:
vstart, vend = vend, vstart
vstep = -vstep
return (vend - vstart - 1) // vstep + 1
@classmethod
def fuse_activation(cls, tfl_opts, name, params, **kwargs):
G = kwargs['G']
opts = kwargs['opts']
ext = hashlib.sha1(name.encode(
"UTF-8")).hexdigest()[:8] if opts.get('anonymise') else 'activation'
if opts.get('load_quantization') and NodeId(params) in G.quantization:
node_qrec = G.quantization[NodeId(params)]
else:
node_qrec = None
# if node_qrec is not None and None in node_qrec.in_qs + node_qrec.out_qs:
# # one of the input is a constant or strange behaviour -> may be is something fusions will get rid of
# return add_node(self.G, node)
aparams = None
if tfl_opts.FusedActivationFunction() == ActivationFunctionType.NONE:
if node_qrec is not None and node_qrec.ktype.startswith('scaled'): # and opts.get('insert_relus'):
# here we have no activation in an asymmetric qtype -> may be an omitted relu
if node_qrec.out_qs[0] is not None and node_qrec.out_qs[0].min_val == 0:
if np.all(np.round(node_qrec.out_qs[0].max_val) == 6):
aparams = ActivationParameters.get_activation(
'relu6', name + f"_{ext}")
else:
aparams = ActivationParameters.get_activation(
'relu', name + f"_{ext}")
else:
aparams = ActivationParameters.get_activation(cls.TF_ACTIVATIONS[tfl_opts.FusedActivationFunction()],
name + f"_{ext}")
if aparams:
G.add_edge(NNEdge(from_node=params, to_node=aparams))
if opts.get('load_quantization'):
# In between the fused operation and activation the
# transfer is in int32 representation
node_qrec = G.quantization[NodeId(params)]
ina_qtype = deepcopy(node_qrec.out_qs[0])
outa_qtype = deepcopy(ina_qtype)
G.quantization[NodeId(aparams)] = QRec.scaled(
in_qs=[ina_qtype], out_qs=[outa_qtype])
params = aparams
return params
@classmethod
def remove_unspecified_dim(cls, shape):
return [dim for dim in shape if dim is not None]
@classmethod
def get_all_const_inputs(cls, G, all_nodes, opts, node, params,
exclude=None, names=None,
short_names=None,
adjust_transposes=None,
load_quantization_if_present=False,
skip_empty_tensors=True):
if exclude is None:
exclude = []
if names is None:
names = [None] * len(node.inputs)
if short_names is None:
short_names = [None] * len(node.inputs)
if adjust_transposes is None:
adjust_transposes = [None] * len(node.nputs)
const_params = []
# TODO - this should just be picking up the existing constant nodes not creating new ones.
for idx, tensor in enumerate(node.input):
if tensor is None or idx in exclude or (skip_empty_tensors and not tensor.is_constant):
const_params.append(None)
continue
tensor.used = True
if tensor not in all_nodes:
# this can occur for RNN/LSTM state nodes that have a buffer idx of 0
const_param = ConstantInputParameters(
tensor.name,
dims=Dim.unnamed(tensor.shape),
value=tensor.value,
constant_store=G.constant_store)
all_nodes[tensor] = (
const_param,
0,
ProvisionalDim.from_tflite_shape(tensor.shape)
)
else:
const_param = all_nodes[tensor][0]
# some constant nodes can be connected to multiple nodes
# changing their name is not a good idea
if const_param not in G.nodes():
const_param.name = names[idx]
const_param.adjust_transpose = adjust_transposes[idx]
const_param.is_mutated = node.is_mutated(idx)
const_param.is_intermediate = node.is_intermediate(idx)
const_param.short_name = short_names[idx]
const_param.value = np.reshape(tensor.value, tensor.shape)
if opts.get('load_quantization'):
G.quantization[NodeId(const_param)] = QRec.scaled(
in_qs=[tensor.qtype],
out_qs=[tensor.qtype])
if load_quantization_if_present and tensor.qtype:
const_param.value_quantization = tensor.qtype
const_params.append(const_param)
G.add_edge(NNEdge(const_param, params, to_idx=idx))
return const_params
@classmethod
def remove_none_from_constants(cls, inputs, model):
if None not in model:
return
for inp in inputs:
if not isinstance(inp[0], ConstantInputParameters):
continue
val = inp[0].value
if val is None or len(val.shape) != len(model):
continue
assert all(val.shape[idx] == 1 for idx, dim in enumerate(model) if dim is None),\
"value has axis that is larger than one in an unknown dimension"
new_shape = [dim for idx, dim in enumerate(
val.shape) if model[idx] is not None]
inp[0].value = np.reshape(inp[0].value, new_shape)
inp[0].dims = Dim.unnamed(new_shape)
@classmethod
def convert_to_symmetric(cls, qtypes):
return [QType.from_min_max_sq(qtype.min_val, qtype.max_val)
if qtype is not None and (qtype.asymmetric or not qtype.signed) else qtype for qtype in qtypes]
@classmethod
def load_tf_quantization(cls, input_tensors, output_tensors, in_qs=None, out_qs=None, qrec_class=None):
if qrec_class is None:
qrec = QRec.scaled(
in_qs=cls.convert_to_symmetric(
in_qs if in_qs is not None else [tensor.qtype if tensor is not None else None for tensor in input_tensors]),
out_qs=cls.convert_to_symmetric(
out_qs if out_qs is not None else [tensor.qtype for tensor in output_tensors]))
else:
qrec = qrec_class(
in_qs=cls.convert_to_symmetric(
in_qs if in_qs is not None else [tensor.qtype if tensor is not None else None for tensor in input_tensors]),
out_qs=cls.convert_to_symmetric(
out_qs if out_qs is not None else [tensor.qtype for tensor in output_tensors]))
return qrec
@classmethod
def remove_known_batch_dimension(cls, G, x, node, batch_axis=0):
x_shape = x[2].shape
if x_shape[batch_axis] is not None:
if x_shape[0] > 1:
raise ValueError(
f'multi batch (n={x_shape[batch_axis]}) operations are not supported by {node.name}')
rparams = ReshapeParameters(
f'{node.name}_batch',
old_shape=Dim.unnamed(x_shape),
shape=Dim.unnamed(x_shape[0:batch_axis:]+x_shape[batch_axis+1::]))
if G.quantization:
qrec = G.quantization[NodeId(x[0])]
G.quantization[NodeId(rparams)] = QRec.copy_ktype(
qrec,
in_qs=[qrec.out_qs[0]],
out_qs=[qrec.out_qs[0]])
G.add_edge(
NNEdge(from_node=x[0], to_node=rparams, from_idx=x[1], to_idx=0))
return (rparams, 0, ProvisionalDim(x_shape[0:batch_axis:]+[None]+x_shape[batch_axis+1::]))
else:
return x
|
the-stack_0_21432 | # coding: utf-8
"""
Purity//FB REST Client
Client for Purity//FB REST API, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.4
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ArrayResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'pagination_info': 'PaginationInfo',
'items': 'list[PureArray]'
}
attribute_map = {
'pagination_info': 'pagination_info',
'items': 'items'
}
def __init__(self, pagination_info=None, items=None):
"""
ArrayResponse - a model defined in Swagger
"""
self._pagination_info = None
self._items = None
if pagination_info is not None:
self.pagination_info = pagination_info
if items is not None:
self.items = items
@property
def pagination_info(self):
"""
Gets the pagination_info of this ArrayResponse.
pagination information, only available in GET requests
:return: The pagination_info of this ArrayResponse.
:rtype: PaginationInfo
"""
return self._pagination_info
@pagination_info.setter
def pagination_info(self, pagination_info):
"""
Sets the pagination_info of this ArrayResponse.
pagination information, only available in GET requests
:param pagination_info: The pagination_info of this ArrayResponse.
:type: PaginationInfo
"""
self._pagination_info = pagination_info
@property
def items(self):
"""
Gets the items of this ArrayResponse.
a list of array objects
:return: The items of this ArrayResponse.
:rtype: list[PureArray]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this ArrayResponse.
a list of array objects
:param items: The items of this ArrayResponse.
:type: list[PureArray]
"""
self._items = items
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ArrayResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
the-stack_0_21433 | import torch
from tqdm import tqdm
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
import torchvision
import BigGAN_utils.utils as utils
import clip.clip
import torch.nn.functional as F
from DiffAugment_pytorch import DiffAugment
import numpy as np
import lpips
import wandb
LATENT_NOISE = 0.01
Z_THRES = 2.0
POLICY = 'color,translation,resize,cutout'
TEST_POLICY = 'color,translation,resize,cutout'
def ReconstructionLoss(img, target, clip_model, mean, std, interp_mode, loss_type, lpips=None):
if loss_type == "reconstruct_l2":
loss = torch.sum((img - target)**2, dim=[1, 2, 3])
loss = torch.sqrt(loss)
mag = torch.sqrt(torch.sum((target)**2, dim=(1,2,3)))
loss = loss / mag
elif loss_type == "reconstruct_clip":
img = (img+1.)/2.
img = F.interpolate(img, size=224, mode=interp_mode)
img.sub_(mean[None, :, None, None]).div_(std[None, :, None, None])
target = (target+1.)/2.
target = F.interpolate(target, size=224, mode=interp_mode)
target.sub_(mean[None, :, None, None]).div_(std[None, :, None, None])
img_encoding = clip_model.encode_image(img)
target_encoding = clip_model.encode_image(target)
loss = -torch.sum(img_encoding * target_encoding, -1) / (torch.norm(img_encoding) * torch.norm(target_encoding))
elif loss_type == "reconstruct_lpips":
loss = lpips(img, target)
else:
raise ValueError("Invalid loss type specified")
return loss
def AugmentLoss(img, clip_model, text, mean, std, replicate=10, interp_mode='bilinear', policy=POLICY):
clip_c = clip_model.logit_scale.exp()
img_aug = DiffAugment(img.repeat(replicate, 1, 1, 1), policy=policy)
img_aug = (img_aug+1.)/2.
img_aug = F.interpolate(img_aug, size=224, mode=interp_mode)
img_aug.sub_(mean[None, :, None, None]).div_(std[None, :, None, None])
logits_per_image, logits_per_text = clip_model(img_aug, text)
logits_per_image = logits_per_image / clip_c
concept_loss = (-1.) * logits_per_image
return concept_loss.mean(dim=0, keepdim=False)
def NaiveSemanticLoss(img, clip_model, text, mean, std, interp_mode='bilinear'):
clip_c = clip_model.logit_scale.exp()
img = (img+1.)/2.
img = F.interpolate(img, size=224, mode=interp_mode)
img.sub_(mean[None, :, None, None]).div_(std[None, :, None, None])
logits_per_image, logits_per_text = clip_model(img, text)
logits_per_image = logits_per_image / clip_c
concept_loss = (-1.) * logits_per_image
return concept_loss.mean(dim=0, keepdim=False)
def get_gaussian_mask(size=256):
x, y = np.meshgrid(np.linspace(-1,1, size), np.linspace(-1,1,size))
dst = np.sqrt(x*x+y*y)
# Intializing sigma and muu
sigma = 1
muu = 0.000
# Calculating Gaussian array
gauss = np.exp(-( (dst-muu)**2 / ( 2.0 * sigma**2 ) ) )
return gauss
def save_image(img, path, n_per_row=1):
with torch.no_grad():
torchvision.utils.save_image(
torch.from_numpy(img.cpu().numpy()), ##hack, to turn Distribution back to tensor
path,
nrow=n_per_row,
normalize=True,
)
def get_G(resolution=256, device="cuda:0"):
if resolution == 256:
parser = utils.prepare_parser()
parser = utils.add_sample_parser(parser)
config = vars(parser.parse_args())
# See: https://github.com/ajbrock/BigGAN-PyTorch/blob/master/scripts/sample_BigGAN_bs256x8.sh.
config["resolution"] = utils.imsize_dict["I128_hdf5"]
config["n_classes"] = utils.nclass_dict["I128_hdf5"]
config["G_activation"] = utils.activation_dict["inplace_relu"]
config["D_activation"] = utils.activation_dict["inplace_relu"]
config["G_attn"] = "128"
config["D_attn"] = "128"
config["G_ch"] = 96
config["D_ch"] = 96
config["hier"] = True
config["dim_z"] = 140
config["shared_dim"] = 128
config["G_shared"] = True
config = utils.update_config_roots(config)
config["skip_init"] = True
config["no_optim"] = True
config["device"] = device
config["resolution"] = 256
# Set up cudnn.benchmark for free speed.
torch.backends.cudnn.benchmark = True
# Import the model.
model = __import__(config["model"])
G = model.Generator(**config).to(config["device"])
utils.count_parameters(G)
# Load weights.
weights_path = "./BigGAN_utils/weights/biggan-256.pth" # Change this.
G.load_state_dict(torch.load(weights_path), strict=False)
elif resolution == 512:
parser = utils.prepare_parser()
parser = utils.add_sample_parser(parser)
config = vars(parser.parse_args())
# See: https://github.com/ajbrock/BigGAN-PyTorch/blob/master/scripts/sample_BigGAN_bs128x8.sh.
config["resolution"] = 512
config["n_classes"] = utils.nclass_dict["I128_hdf5"]
config["G_activation"] = utils.activation_dict["inplace_relu"]
config["D_activation"] = utils.activation_dict["inplace_relu"]
config["G_attn"] = "64"
config["D_attn"] = "64"
config["G_ch"] = 96
config["D_ch"] = 64
config["hier"] = True
config["dim_z"] = 128
config["shared_dim"] = 128
config["G_shared"] = True
config = utils.update_config_roots(config)
config["skip_init"] = True
config["no_optim"] = True
config["device"] = device
# Set up cudnn.benchmark for free speed.
torch.backends.cudnn.benchmark = True
# Import the model.
model = __import__(config["model"])
#print(config["model"])
G = model.Generator(**config).to(config["device"])
utils.count_parameters(G)
#print('G parameters:')
#for p, m in G.named_parameters():
# print(p)
# Load weights.
weights_path = "./BigGAN_utils/weights/biggan-512.pth" # Change this.
G.load_state_dict(torch.load(weights_path), strict=False)
return G, config
class FuseDreamBaseGenerator():
def __init__(self, G, G_config, G_batch_size=10, device="cuda:0", clip_mode="ViT-B/32", interp_mode='bilinear', use_wandb=False, target=None, loss_type="clip"):
self.device = device
self.G = G
self.clip_model, _ = clip.load(clip_mode, device=device)
self.mean = torch.tensor([0.48145466, 0.4578275, 0.40821073]).cuda(device=device)
self.std = torch.tensor([0.26862954, 0.26130258, 0.27577711]).cuda(device=device)
self.use_wandb = use_wandb
self.target = target
self.loss_type = loss_type
if self.loss_type == "reconstruct_lpips":
self.lpips = lpips.LPIPS(net='vgg').to(device)
else:
self.lpips = None
(self.z_, self.y_) = utils.prepare_z_y(
G_batch_size,
self.G.dim_z,
G_config["n_classes"],
device=device,
fp16=G_config["G_fp16"],
z_var=G_config["z_var"],
)
self.G.eval()
for p in self.G.parameters():
p.requires_grad = False
for p in self.clip_model.parameters():
p.requires_grad = False
self.interp_mode = interp_mode
def generate_basis(self, text, init_iters=500, num_basis=5):
text_tok = clip.tokenize([text]).to(self.device)
clip_c = self.clip_model.logit_scale.exp()
z_init_cllt = []
y_init_cllt = []
z_init = None
y_init = None
score_init = None
with torch.no_grad():
for i in tqdm(range(init_iters)):
self.z_.sample_()
self.y_.sample_()
self.z_.data = torch.clamp(self.z_.data.detach().clone(), min=-Z_THRES, max=Z_THRES)
image_tensors = self.G(self.z_, self.G.shared(self.y_))
if "reconstruct" in self.loss_type and False:
logits_per_image = -ReconstructionLoss(image_tensors, self.target, self.clip_model, self.mean,
self.std, self.interp_mode, self.loss_type)
else:
image_tensors = (image_tensors+1.) / 2.
image_tensors = F.interpolate(image_tensors, size=224, mode=self.interp_mode)
image_tensors.sub_(self.mean[None, :, None, None]).div_(self.std[None, :, None, None])
logits_per_image, logits_per_text = self.clip_model(image_tensors, text_tok)
logits_per_image = logits_per_image/clip_c
if z_init is None:
z_init = self.z_.data.detach().clone()
y_init = self.y_.data.detach().clone()
score_init = logits_per_image.squeeze()
else:
z_init = torch.cat([z_init, self.z_.data.detach().clone()], dim=0)
y_init = torch.cat([y_init, self.y_.data.detach().clone()], dim=0)
score_init = torch.cat([score_init, logits_per_image.squeeze()])
sorted, indices = torch.sort(score_init, descending=True)
z_init = z_init[indices]
y_init = y_init[indices]
score_init = score_init[indices]
z_init = z_init[:num_basis]
y_init = y_init[:num_basis]
score_init = score_init[:num_basis]
#save_image(self.G(z_init, self.G.shared(y_init)), 'samples/init_%s.png'%text, 1)
z_init_cllt.append(z_init.detach().clone())
y_init_cllt.append(self.G.shared(y_init.detach().clone()))
return z_init_cllt, y_init_cllt
def optimize_clip_score(self, z_init_cllt, y_init_cllt, text, latent_noise=False, augment=True, opt_iters=500, optimize_y=False):
text_tok = clip.tokenize([text]).to(self.device)
clip_c = self.clip_model.logit_scale.exp()
z_init_ans = torch.stack(z_init_cllt)
y_init_ans = torch.stack(y_init_cllt)
z_init_ans = z_init_ans.view(-1, z_init_ans.shape[-1])
y_init_ans = y_init_ans.view(-1, y_init_ans.shape[-1])
w_z = torch.randn((z_init_ans.shape[0], z_init_ans.shape[1])).to(self.device)
w_y = torch.randn((y_init_ans.shape[0], y_init_ans.shape[1])).to(self.device)
w_z.requires_grad = True
w_y.requires_grad = True
opt_y = torch.zeros(y_init_ans.shape).to(self.device)
opt_y.data = y_init_ans.data.detach().clone()
opt_z = torch.zeros(z_init_ans.shape).to(self.device)
opt_z.data = z_init_ans.data.detach().clone()
opt_z.requires_grad = True
if not optimize_y:
optimizer = torch.optim.Adam([w_z, w_y, opt_z], lr=5e-3, weight_decay=0.0)
else:
opt_y.requires_grad = True
optimizer = torch.optim.Adam([w_z, w_y,opt_y,opt_z], lr=5e-3, weight_decay=0.0)
for i in tqdm(range(opt_iters)):
#print(w_z.shape, w_y.shape)
optimizer.zero_grad()
if not latent_noise:
s_z = torch.softmax(w_z, dim=0)
s_y = torch.softmax(w_y, dim=0)
#print(s_z)
cur_z = s_z * opt_z
cur_y = s_y * opt_y
cur_z = cur_z.sum(dim=0, keepdim=True)
cur_y = cur_y.sum(dim=0, keepdim=True)
image_tensors = self.G(cur_z, cur_y)
else:
s_z = torch.softmax(w_z, dim=0)
s_y = torch.softmax(w_y, dim=0)
cur_z = s_z * opt_z
cur_y = s_y * opt_y
cur_z = cur_z.sum(dim=0, keepdim=True)
cur_y = cur_y.sum(dim=0, keepdim=True)
cur_z_aug = cur_z + torch.randn(cur_z.shape).to(cur_z.device) * LATENT_NOISE
cur_y_aug = cur_y + torch.randn(cur_y.shape).to(cur_y.device) * LATENT_NOISE
image_tensors = self.G(cur_z_aug, cur_y_aug)
loss = 0.0
for j in range(image_tensors.shape[0]):
if "reconstruct" in self.loss_type or self.loss_type == "joint":
recons_loss = ReconstructionLoss(image_tensors[j:(j+1)], self.target, self.clip_model, self.mean, self.std, self.interp_mode, self.loss_type, self.lpips)
recons_loss = torch.mean(recons_loss, axis=0)
else:
recons_loss = 0.0
if self.loss_type == "clip" or self.loss_type == "joint":
if augment:
clip_loss = AugmentLoss(image_tensors[j:(j+1)], self.clip_model, text_tok, self.mean, self.std, replicate=50, interp_mode=self.interp_mode)
else:
clip_loss = NaiveSemanticLoss(image_tensors[j:(j+1)], self.clip_model, text_tok, self.mean, self.std)
else:
clip_loss = 0.0
loss = loss + clip_loss + recons_loss
loss.backward()
optimizer.step()
if self.use_wandb:
wandb.log({"loss":loss, "clip_loss":clip_loss, "recons_loss":recons_loss}, step=i)
opt_z.data = torch.clamp(opt_z.data.detach().clone(), min=-Z_THRES, max=Z_THRES)
z_init_ans = cur_z.detach().clone()
y_init_ans = cur_y.detach().clone()
#save_image(self.G(z_init_ans, y_init_ans), 'samples/opt_%s.png'%text, 1)
return self.G(z_init_ans, y_init_ans), z_init_ans, y_init_ans
def measureAugCLIP(self, z, y, text, augment=False, num_samples=20):
text_tok = clip.tokenize([text]).to(self.device)
avg_loss = 0.0
for itr in range(num_samples):
image_tensors = self.G(z, y)
for j in range(image_tensors.shape[0]):
if augment:
loss = AugmentLoss(image_tensors[j:(j+1)], self.clip_model, text_tok, self.mean, self.std, replicate=50, interp_mode=self.interp_mode, policy=TEST_POLICY)
else:
loss = NaiveSemanticLoss(image_tensors[j:(j+1)], self.clip_model, text_tok, self.mean, self.std)
avg_loss += loss.item()
avg_loss /= num_samples
return avg_loss * (-1.)
|
the-stack_0_21435 | import json
import os
import re
import sys
import time
import unittest
import click
import frappe
import requests
from .test_runner import (SLOW_TEST_THRESHOLD, make_test_records, set_test_email_config)
click_ctx = click.get_current_context(True)
if click_ctx:
click_ctx.color = True
class ParallelTestRunner():
def __init__(self, app, site, build_number=1, total_builds=1):
self.app = app
self.site = site
self.build_number = frappe.utils.cint(build_number) or 1
self.total_builds = frappe.utils.cint(total_builds)
self.setup_test_site()
self.run_tests()
def setup_test_site(self):
frappe.init(site=self.site)
if not frappe.db:
frappe.connect()
frappe.flags.in_test = True
frappe.clear_cache()
frappe.utils.scheduler.disable_scheduler()
set_test_email_config()
self.before_test_setup()
def before_test_setup(self):
start_time = time.time()
for fn in frappe.get_hooks("before_tests", app_name=self.app):
frappe.get_attr(fn)()
test_module = frappe.get_module(f'{self.app}.tests')
if hasattr(test_module, "global_test_dependencies"):
for doctype in test_module.global_test_dependencies:
make_test_records(doctype)
elapsed = time.time() - start_time
elapsed = click.style(f' ({elapsed:.03}s)', fg='red')
click.echo(f'Before Test {elapsed}')
def run_tests(self):
self.test_result = ParallelTestResult(stream=sys.stderr, descriptions=True, verbosity=2)
for test_file_info in self.get_test_file_list():
self.run_tests_for_file(test_file_info)
self.print_result()
def run_tests_for_file(self, file_info):
if not file_info: return
frappe.set_user('Administrator')
path, filename = file_info
module = self.get_module(path, filename)
self.create_test_dependency_records(module, path, filename)
test_suite = unittest.TestSuite()
module_test_cases = unittest.TestLoader().loadTestsFromModule(module)
test_suite.addTest(module_test_cases)
test_suite(self.test_result)
def create_test_dependency_records(self, module, path, filename):
if hasattr(module, "test_dependencies"):
for doctype in module.test_dependencies:
make_test_records(doctype)
if os.path.basename(os.path.dirname(path)) == "doctype":
# test_data_migration_connector.py > data_migration_connector.json
test_record_filename = re.sub('^test_', '', filename).replace(".py", ".json")
test_record_file_path = os.path.join(path, test_record_filename)
if os.path.exists(test_record_file_path):
with open(test_record_file_path, 'r') as f:
doc = json.loads(f.read())
doctype = doc["name"]
make_test_records(doctype)
def get_module(self, path, filename):
app_path = frappe.get_pymodule_path(self.app)
relative_path = os.path.relpath(path, app_path)
if relative_path == '.':
module_name = self.app
else:
relative_path = relative_path.replace('/', '.')
module_name = os.path.splitext(filename)[0]
module_name = f'{self.app}.{relative_path}.{module_name}'
return frappe.get_module(module_name)
def print_result(self):
self.test_result.printErrors()
click.echo(self.test_result)
if self.test_result.failures or self.test_result.errors:
if os.environ.get('CI'):
sys.exit(1)
def get_test_file_list(self):
test_list = get_all_tests(self.app)
split_size = frappe.utils.ceil(len(test_list) / self.total_builds)
# [1,2,3,4,5,6] to [[1,2], [3,4], [4,6]] if split_size is 2
test_chunks = [test_list[x:x+split_size] for x in range(0, len(test_list), split_size)]
return test_chunks[self.build_number - 1]
class ParallelTestResult(unittest.TextTestResult):
def startTest(self, test):
self._started_at = time.time()
super(unittest.TextTestResult, self).startTest(test)
test_class = unittest.util.strclass(test.__class__)
if not hasattr(self, 'current_test_class') or self.current_test_class != test_class:
click.echo(f"\n{unittest.util.strclass(test.__class__)}")
self.current_test_class = test_class
def getTestMethodName(self, test):
return test._testMethodName if hasattr(test, '_testMethodName') else str(test)
def addSuccess(self, test):
super(unittest.TextTestResult, self).addSuccess(test)
elapsed = time.time() - self._started_at
threshold_passed = elapsed >= SLOW_TEST_THRESHOLD
elapsed = click.style(f' ({elapsed:.03}s)', fg='red') if threshold_passed else ''
click.echo(f" {click.style(' โ ', fg='green')} {self.getTestMethodName(test)}{elapsed}")
def addError(self, test, err):
super(unittest.TextTestResult, self).addError(test, err)
click.echo(f" {click.style(' โ ', fg='red')} {self.getTestMethodName(test)}")
def addFailure(self, test, err):
super(unittest.TextTestResult, self).addFailure(test, err)
click.echo(f" {click.style(' โ ', fg='red')} {self.getTestMethodName(test)}")
def addSkip(self, test, reason):
super(unittest.TextTestResult, self).addSkip(test, reason)
click.echo(f" {click.style(' = ', fg='white')} {self.getTestMethodName(test)}")
def addExpectedFailure(self, test, err):
super(unittest.TextTestResult, self).addExpectedFailure(test, err)
click.echo(f" {click.style(' โ ', fg='red')} {self.getTestMethodName(test)}")
def addUnexpectedSuccess(self, test):
super(unittest.TextTestResult, self).addUnexpectedSuccess(test)
click.echo(f" {click.style(' โ ', fg='green')} {self.getTestMethodName(test)}")
def printErrors(self):
click.echo('\n')
self.printErrorList(' ERROR ', self.errors, 'red')
self.printErrorList(' FAIL ', self.failures, 'red')
def printErrorList(self, flavour, errors, color):
for test, err in errors:
click.echo(self.separator1)
click.echo(f"{click.style(flavour, bg=color)} {self.getDescription(test)}")
click.echo(self.separator2)
click.echo(err)
def __str__(self):
return f"Tests: {self.testsRun}, Failing: {len(self.failures)}, Errors: {len(self.errors)}"
def get_all_tests(app):
test_file_list = []
for path, folders, files in os.walk(frappe.get_pymodule_path(app)):
for dontwalk in ('locals', '.git', 'public', '__pycache__'):
if dontwalk in folders:
folders.remove(dontwalk)
# for predictability
folders.sort()
files.sort()
if os.path.sep.join(["doctype", "doctype", "boilerplate"]) in path:
# in /doctype/doctype/boilerplate/
continue
for filename in files:
if filename.startswith("test_") and filename.endswith(".py") \
and filename != 'test_runner.py':
test_file_list.append([path, filename])
return test_file_list
class ParallelTestWithOrchestrator(ParallelTestRunner):
'''
This can be used to balance-out test time across multiple instances
This is dependent on external orchestrator which returns next test to run
orchestrator endpoints
- register-instance (<build_id>, <instance_id>, test_spec_list)
- get-next-test-spec (<build_id>, <instance_id>)
- test-completed (<build_id>, <instance_id>)
'''
def __init__(self, app, site):
self.orchestrator_url = os.environ.get('ORCHESTRATOR_URL')
if not self.orchestrator_url:
click.echo('ORCHESTRATOR_URL environment variable not found!')
click.echo('Pass public URL after hosting https://github.com/frappe/test-orchestrator')
sys.exit(1)
self.ci_build_id = os.environ.get('CI_BUILD_ID')
self.ci_instance_id = os.environ.get('CI_INSTANCE_ID') or frappe.generate_hash(length=10)
if not self.ci_build_id:
click.echo('CI_BUILD_ID environment variable not found!')
sys.exit(1)
ParallelTestRunner.__init__(self, app, site)
def run_tests(self):
self.test_status = 'ongoing'
self.register_instance()
super().run_tests()
def get_test_file_list(self):
while self.test_status == 'ongoing':
yield self.get_next_test()
def register_instance(self):
test_spec_list = get_all_tests(self.app)
response_data = self.call_orchestrator('register-instance', data={
'test_spec_list': test_spec_list
})
self.is_master = response_data.get('is_master')
def get_next_test(self):
response_data = self.call_orchestrator('get-next-test-spec')
self.test_status = response_data.get('status')
return response_data.get('next_test')
def print_result(self):
self.call_orchestrator('test-completed')
return super().print_result()
def call_orchestrator(self, endpoint, data=None):
if data is None:
data = {}
# add repo token header
# build id in header
headers = {
'CI-BUILD-ID': self.ci_build_id,
'CI-INSTANCE-ID': self.ci_instance_id,
'REPO-TOKEN': '2948288382838DE'
}
url = f'{self.orchestrator_url}/{endpoint}'
res = requests.get(url, json=data, headers=headers)
res.raise_for_status()
response_data = {}
if 'application/json' in res.headers.get('content-type'):
response_data = res.json()
return response_data
|
the-stack_0_21436 | """
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from cryptoapis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class NewUnconfirmedTokensTransactionsRequestBodyDataItem(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'address': (str,), # noqa: E501
'callback_url': (str,), # noqa: E501
'allow_duplicates': (bool,), # noqa: E501
'callback_secret_key': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'address': 'address', # noqa: E501
'callback_url': 'callbackUrl', # noqa: E501
'allow_duplicates': 'allowDuplicates', # noqa: E501
'callback_secret_key': 'callbackSecretKey', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, address, callback_url, *args, **kwargs): # noqa: E501
"""NewUnconfirmedTokensTransactionsRequestBodyDataItem - a model defined in OpenAPI
Args:
address (str): Represents the address of the transaction, per which the result is returned.
callback_url (str): Represents the URL that is set by the customer where the callback will be received at. The callback notification will be received only if and when the event occurs.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
allow_duplicates (bool): Specifies a flag that permits or denies the creation of duplicate addresses.. [optional] if omitted the server will use the default value of False # noqa: E501
callback_secret_key (str): Represents the Secret Key value provided by the customer. This field is used for security purposes during the callback notification, in order to prove the sender of the callback as Crypto APIs.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.address = address
self.callback_url = callback_url
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
|
the-stack_0_21437 | from pyxer.base import *
if GAE:
import urllib
from google.appengine.api import urlfetch
else:
import urllib2, urllib
"""
Adapted from http://pypi.python.org/pypi/recaptcha-client
to use with Google App Engine
by Joscha Feth <[email protected]>
Version 0.1
"""
API_SSL_SERVER ="https://api-secure.recaptcha.net"
API_SERVER ="http://api.recaptcha.net"
VERIFY_SERVER ="api-verify.recaptcha.net"
class RecaptchaResponse(object):
def __init__(self, is_valid, error_code=None):
self.is_valid = is_valid
self.error_code = error_code
def displayhtml (public_key,
use_ssl = False,
error = None):
"""Gets the HTML to display for reCAPTCHA
public_key -- The public api key
use_ssl -- Should the request be sent over ssl?
error -- An error message to display (from RecaptchaResponse.error_code)"""
error_param = ''
if error:
error_param = '&error=%s' % error
if use_ssl:
server = API_SSL_SERVER
else:
server = API_SERVER
return """<script type="text/javascript" src="%(ApiServer)s/challenge?k=%(PublicKey)s%(ErrorParam)s"></script>
<noscript>
<iframe src="%(ApiServer)s/noscript?k=%(PublicKey)s%(ErrorParam)s" height="300" width="500" frameborder="0"></iframe><br />
<textarea name="recaptcha_challenge_field" rows="3" cols="40"></textarea>
<input type='hidden' name='recaptcha_response_field' value='manual_challenge' />
</noscript>
""" % {
'ApiServer' : server,
'PublicKey' : public_key,
'ErrorParam' : error_param,
}
def submit (recaptcha_challenge_field,
recaptcha_response_field,
private_key,
remoteip):
"""
Submits a reCAPTCHA request for verification. Returns RecaptchaResponse
for the request
recaptcha_challenge_field -- The value of recaptcha_challenge_field from the form
recaptcha_response_field -- The value of recaptcha_response_field from the form
private_key -- your reCAPTCHA private key
remoteip -- the user's ip address
"""
if not (recaptcha_response_field and recaptcha_challenge_field and
len (recaptcha_response_field) and len (recaptcha_challenge_field)):
return RecaptchaResponse (is_valid = False, error_code = 'incorrect-captcha-sol')
def encode_if_necessary(s):
if isinstance(s, unicode):
return s.encode('utf-8')
return s
params = urllib.urlencode ({
'privatekey': encode_if_necessary(private_key),
'remoteip' : encode_if_necessary(remoteip),
'challenge': encode_if_necessary(recaptcha_challenge_field),
'response' : encode_if_necessary(recaptcha_response_field),
})
request = urllib2.Request (
url = "http://%s/verify" % VERIFY_SERVER,
data = params,
headers = {
"Content-type": "application/x-www-form-urlencoded",
"User-agent": "reCAPTCHA Python"
}
)
httpresp = urllib2.urlopen (request)
return_values = httpresp.read ().splitlines ();
httpresp.close();
return_code = return_values [0]
if (return_code == "true"):
return RecaptchaResponse (is_valid=True)
else:
return RecaptchaResponse (is_valid=False, error_code = return_values [1])
def submit_gae (recaptcha_challenge_field,
recaptcha_response_field,
private_key,
remoteip):
"""
Submits a reCAPTCHA request for verification. Returns RecaptchaResponse
for the request
recaptcha_challenge_field -- The value of recaptcha_challenge_field from the form
recaptcha_response_field -- The value of recaptcha_response_field from the form
private_key -- your reCAPTCHA private key
remoteip -- the user's ip address
"""
if not (recaptcha_response_field and recaptcha_challenge_field and
len (recaptcha_response_field) and len (recaptcha_challenge_field)):
return RecaptchaResponse (is_valid = False, error_code = 'incorrect-captcha-sol')
headers = {
'Content-type': 'application/x-www-form-urlencoded',
"User-agent" : "reCAPTCHA GAE Python"
}
params = urllib.urlencode ({
'privatekey': private_key,
'remoteip' : remoteip,
'challenge': recaptcha_challenge_field,
'response' : recaptcha_response_field,
})
httpresp = urlfetch.fetch(
url = "http://%s/verify" % VERIFY_SERVER,
payload = params,
method = urlfetch.POST,
headers = headers
)
if httpresp.status_code == 200:
# response was fine
# get the return values
return_values = httpresp.content.splitlines();
# get the return code (true/false)
return_code = return_values[0]
if return_code == "true":
# yep, filled perfectly
return RecaptchaResponse (is_valid=True)
else:
# nope, something went wrong
return RecaptchaResponse (is_valid=False, error_code = return_values [1])
else:
# recaptcha server was not reachable
return RecaptchaResponse (is_valid=False, error_code = "recaptcha-not-reachable")
if GAE:
submit = submit_gae
from pyxer.template.genshi import HTML
# Default keys for *.appspot.com
_pub_key = "6LcrngQAAAAAAC1iVJGsWhkkpu4Fx3Z_pDCKkbvF"
_private_key = "6LcrngQAAAAAAFtRJVKFZ6d-BJxZK-40BAdURQ30"
def html(pub_key=_pub_key, use_ssl=False):
if pub_key == _pub_key:
log.warn("PLEASE GET YOUR OWN RECAPTCHA KEYS ON http://www.recaptcha.net!")
return HTML(displayhtml(
pub_key,
use_ssl = use_ssl,
error = req.params.get("error")))
def test(private_key=_private_key):
if private_key == _private_key:
log.warn("PLEASE GET YOUR OWN RECAPTCHA KEYS ON http://www.recaptcha.net!")
remoteip = req.environ['REMOTE_ADDR']
cResponse = submit(
req.params.get('recaptcha_challenge_field'),
req.params.get('recaptcha_response_field'),
private_key,
remoteip)
req.captcha_error = cResponse.error_code
return cResponse.is_valid
|
the-stack_0_21438 | """ Distributor init file
Distributors: you can add custom code here to support particular distributions
of scipy.
For example, this is a good place to put any checks for hardware requirements.
The scipy standard source distribution will not put code in this file, so you
can safely replace this file with your own version.
"""
import os
import sys
import platform
if os.name == 'nt':
# on Windows SciPy loads important DLLs
# and the code below aims to alleviate issues with DLL
# path resolution portability with an absolute path DLL load
from ctypes import WinDLL
import glob
# convention for storing / loading the DLL from
# scipy/.libs/, if present
libs_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'.libs'))
if os.path.isdir(libs_path):
# for Python >= 3.8, DLL resolution ignores the PATH variable
# and the current working directory; see release notes:
# https://docs.python.org/3/whatsnew/3.8.html#bpo-36085-whatsnew
# Only the system paths, the directory containing the DLL, and
# directories added with add_dll_directory() are searched for
# load-time dependencies with Python >= 3.8
# this module was originally added to support DLL resolution in
# Python 3.8 because of the changes described above--providing the
# absolute paths to the DLLs allowed for proper resolution/loading
# however, we also started to receive reports of problems with DLL
# resolution with Python 3.7 that were sometimes alleviated with
# inclusion of the _distributor_init.py module; see SciPy main
# repo gh-11826
# we noticed in scipy-wheels repo gh-70 that inclusion of
# _distributor_init.py in 32-bit wheels for Python 3.7 resulted
# in failures in DLL resolution (64-bit 3.7 did not)
# as a result, we decided to combine both the old (working directory)
# and new (absolute path to DLL location) DLL resolution mechanisms
# to improve the chances of resolving DLLs across a wider range of
# Python versions
# we did not experiment with manipulating the PATH environment variable
# to include libs_path; it is not immediately clear if this would have
# robustness or security advantages over changing working directories
# as done below
# we should remove the working directory shims when our minimum supported
# Python version is 3.8 and trust the improvements to secure DLL loading
# in the standard lib for Python >= 3.8
try:
owd = os.getcwd()
os.chdir(libs_path)
for filename in glob.glob(os.path.join(libs_path, '*dll')):
WinDLL(os.path.abspath(filename))
finally:
os.chdir(owd)
elif sys.platform == 'darwin' and platform.machine() == 'arm64':
# On arm64 macOS the OpenBLAS runtimes of NumPy and SciPy don't seem to work
# well together unless this timeout limit is set - it results in very slow
# performance for some linalg functionality.
# See https://github.com/scipy/scipy/issues/15050 for details.
os.environ['OPENBLAS_THREAD_TIMEOUT'] = '1'
|
the-stack_0_21439 | # Copyright (c) 2019 NVIDIA Corporation
import torch
import nemo
from .parts.datasets import AudioOnlyDataset
from nemo.backends.pytorch.nm import DataLayerNM
from nemo.core import DeviceType
from nemo.core.neural_types import *
class AudioDataLayer(DataLayerNM):
"""
Data Layer for general speech tasks that loads only the audio.
Module which reads speech data. It accepts comma-separated
JSON manifest files describing the wav audio files and their metadata.
JSON files should be of the following format::
{"audio_filepath": path_to_wav_0, "duration": time_in_sec_0}
...
{"audio_filepath": path_to_wav_n, "duration": time_in_sec_n}
Args:
manifest_filepath (str): path to JSON containing data.
batch_size (int): batch sizelse.
min_duration (float): All training files which have a duration less
than min_duration are dropped. Note: Duration is read from the
manifest JSON.
Defaults to 0.1.
max_duration (float): All training files which have a duration more
than max_duration are dropped. Note: Duration is read from the
manifest JSON.
Defaults to None.
trim_silence (bool): Whether to use trim silence from beginning and end
of audio signal using librosa.effects.trim().
Defaults to False.
drop_last (bool): See PyTorch DataLoader.
Defaults to False.
shuffle (bool): See PyTorch DataLoader.
Defaults to True.
num_workers (int): See PyTorch DataLoader.
Defaults to 0.
n_segments (int): Number of samples to load per audiofile.
Defaults to 0 which indicates to load the whole file.
"""
@property
def output_ports(self):
"""Returns definitions of module output ports.
audio_signal:
0: AxisType(BatchTag)
1: AxisType(TimeTag)
a_sig_length:
0: AxisType(BatchTag)
"""
return {
"audio_signal": NeuralType({0: AxisType(BatchTag), 1: AxisType(TimeTag)}),
"a_sig_length": NeuralType({0: AxisType(BatchTag)}),
}
def __init__(
self,
*,
manifest_filepath,
batch_size,
min_duration=0.1,
max_duration=None,
trim_silence=False,
drop_last=False,
shuffle=True,
num_workers=0,
n_segments=0,
**kwargs
):
DataLayerNM.__init__(self, **kwargs)
self._dataset = AudioOnlyDataset(
manifest_filepath=manifest_filepath,
max_duration=max_duration,
min_duration=min_duration,
trim=trim_silence,
n_segments=n_segments,
)
sampler = None
if self._placement == DeviceType.AllGpu:
nemo.logging.info('Parallelizing DATALAYER')
sampler = torch.utils.data.distributed.DistributedSampler(self._dataset)
self._dataloader = torch.utils.data.DataLoader(
dataset=self._dataset,
batch_size=batch_size,
collate_fn=self._dataset.AudioCollateFunc,
drop_last=drop_last,
shuffle=shuffle if sampler is None else False,
sampler=sampler,
num_workers=num_workers,
)
def __len__(self):
return len(self._dataset)
@property
def dataset(self):
return None
@property
def data_iterator(self):
return self._dataloader
|
the-stack_0_21440 | import numpy as np
import scipy.io as sio
# load data.mat file
datadict = sio.loadmat('../saved_data/pubfig_data.mat')
# some variables are in bad format
# process them to make them in correct format
attr_names = [x[0] for x in datadict['attribute_names'][0]]
datadict['attribute_names'] = attr_names
datadict['im_names'] = datadict['im_names'][0]
datadict['class_labels'] = datadict['class_labels'][:, 0]
datadict['used_for_training'] = datadict['used_for_training'][:, 0]
datadict['class_names'] = datadict['class_names'][0]
np.save("./saved_data/datadict.npy", datadict)
"""
For loading back :
d = np.load('../saved_data/datadict.npy').item()
"""
|
the-stack_0_21442 | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
The version of the OpenAPI document: 1.11.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from polyaxon_sdk.configuration import Configuration
class V1ListComponentHubsResponse(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'count': 'int',
'results': 'list[V1ComponentHub]',
'previous': 'str',
'next': 'str'
}
attribute_map = {
'count': 'count',
'results': 'results',
'previous': 'previous',
'next': 'next'
}
def __init__(self, count=None, results=None, previous=None, next=None, local_vars_configuration=None): # noqa: E501
"""V1ListComponentHubsResponse - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._count = None
self._results = None
self._previous = None
self._next = None
self.discriminator = None
if count is not None:
self.count = count
if results is not None:
self.results = results
if previous is not None:
self.previous = previous
if next is not None:
self.next = next
@property
def count(self):
"""Gets the count of this V1ListComponentHubsResponse. # noqa: E501
:return: The count of this V1ListComponentHubsResponse. # noqa: E501
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this V1ListComponentHubsResponse.
:param count: The count of this V1ListComponentHubsResponse. # noqa: E501
:type: int
"""
self._count = count
@property
def results(self):
"""Gets the results of this V1ListComponentHubsResponse. # noqa: E501
:return: The results of this V1ListComponentHubsResponse. # noqa: E501
:rtype: list[V1ComponentHub]
"""
return self._results
@results.setter
def results(self, results):
"""Sets the results of this V1ListComponentHubsResponse.
:param results: The results of this V1ListComponentHubsResponse. # noqa: E501
:type: list[V1ComponentHub]
"""
self._results = results
@property
def previous(self):
"""Gets the previous of this V1ListComponentHubsResponse. # noqa: E501
:return: The previous of this V1ListComponentHubsResponse. # noqa: E501
:rtype: str
"""
return self._previous
@previous.setter
def previous(self, previous):
"""Sets the previous of this V1ListComponentHubsResponse.
:param previous: The previous of this V1ListComponentHubsResponse. # noqa: E501
:type: str
"""
self._previous = previous
@property
def next(self):
"""Gets the next of this V1ListComponentHubsResponse. # noqa: E501
:return: The next of this V1ListComponentHubsResponse. # noqa: E501
:rtype: str
"""
return self._next
@next.setter
def next(self, next):
"""Sets the next of this V1ListComponentHubsResponse.
:param next: The next of this V1ListComponentHubsResponse. # noqa: E501
:type: str
"""
self._next = next
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ListComponentHubsResponse):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ListComponentHubsResponse):
return True
return self.to_dict() != other.to_dict()
|
the-stack_0_21443 | from typing import List
import click
from dagster import __version__ as current_dagster_version
from dagster import check
from .dagster_docker import DagsterDockerImage
from .ecr import ensure_ecr_login
from .image_defs import get_image, list_images
from .utils import current_time_str, execute_docker_push, execute_docker_tag
CLI_HELP = """This CLI is used for building the various Dagster images we use in test
"""
AWS_ECR_REGISTRY = "public.ecr.aws/dagster"
@click.group(help=CLI_HELP)
def cli():
pass
@cli.command()
def list(): # pylint: disable=redefined-builtin
for image in list_images():
print(image.image) # pylint: disable=print-call
@cli.command()
@click.option("--name", required=True, help="Name of image to build")
@click.option(
"--dagster-version",
required=True,
help="Version of image to build, must match current dagster version",
)
@click.option(
"-t",
"--timestamp",
type=click.STRING,
required=False,
default=current_time_str(),
help="Timestamp to build in format 2020-07-11T040642 (defaults to now UTC)",
)
@click.option("-v", "--python-version", type=click.STRING, required=True)
def build(name, dagster_version, timestamp, python_version):
get_image(name).build(timestamp, dagster_version, python_version)
@cli.command()
@click.option("--name", required=True, help="Name of image to build")
@click.option(
"--dagster-version",
required=True,
help="Version of image to build, must match current dagster version",
)
@click.option(
"-t",
"--timestamp",
type=click.STRING,
required=False,
default=current_time_str(),
help="Timestamp to build in format 2020-07-11T040642 (defaults to now UTC)",
)
def build_all(name, dagster_version, timestamp):
"""Build all supported python versions for image"""
image = get_image(name)
for python_version in image.python_versions:
image.build(timestamp, dagster_version, python_version)
@cli.command()
@click.option("--name", required=True, help="Name of image to push")
@click.option("-v", "--python-version", type=click.STRING, required=True)
@click.option("-v", "--custom-tag", type=click.STRING, required=False)
def push(name, python_version, custom_tag):
ensure_ecr_login()
get_image(name).push(python_version, custom_tag=custom_tag)
@cli.command()
@click.option("--name", required=True, help="Name of image to push")
def push_all(name):
ensure_ecr_login()
image = get_image(name)
for python_version in image.python_versions:
image.push(python_version)
def push_to_registry(name: str, tags: List[str]):
check.str_param(name, "name")
check.list_param(tags, "tags", of_type=str)
image = DagsterDockerImage(name)
python_version = next(iter(image.python_versions))
local_image = image.local_image(python_version)
for tag in tags:
execute_docker_tag(local_image, tag)
execute_docker_push(tag)
@cli.command()
@click.option("--name", required=True, help="Name of image to push")
@click.option(
"--dagster-version",
required=True,
help="Version of image to push, must match current dagster version",
)
def push_dockerhub(name, dagster_version):
"""Used for pushing k8s images to Docker Hub. Must be logged in to Docker Hub for this to
succeed.
"""
check.invariant(
dagster_version == current_dagster_version,
desc=(
f"Current dagster version ({current_dagster_version}) does not match provided arg "
f"({dagster_version})"
),
)
tags = [f"dagster/{name}:{current_dagster_version}", f"dagster/{name}:latest"]
push_to_registry(name, tags)
@cli.command()
@click.option("--name", required=True, help="Name of image to push")
@click.option(
"--dagster-version",
required=True,
help="Version of image to push, must match current dagster version",
)
def push_ecr(name, dagster_version):
"""Used for pushing k8s images to our public ECR.
You must be authed for ECR. Run:
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/dagster
"""
check.invariant(
dagster_version == current_dagster_version,
desc=(
f"Current dagster version ({current_dagster_version}) does not match provided arg "
f"({dagster_version})"
),
)
tags = [
f"{AWS_ECR_REGISTRY}/{name}:{current_dagster_version}",
f"{AWS_ECR_REGISTRY}/{name}:latest",
]
push_to_registry(name, tags)
def main():
click_cli = click.CommandCollection(sources=[cli], help=CLI_HELP)
click_cli()
if __name__ == "__main__":
main()
|
the-stack_0_21444 | import simpleaudio as sa
import os
from time import sleep
import re
MODULE_PATH = os.path.dirname(__file__)
AUDIO_DIR = os.path.join(MODULE_PATH, "test_audio")
def _gwo(wave_obj_file):
return sa.WaveObject.from_wave_file(os.path.join(AUDIO_DIR, wave_obj_file))
def _clean_docstring(docstring):
lines = [x.strip() for x in docstring.strip().splitlines()]
return '\n'.join(lines)
def run_all(countdown=0):
func_checks = [LeftRightCheck, OverlappingCheck, StopCheck, StopAllCheck,
IsPlayingCheck, WaitDoneCheck]
for func_check in func_checks:
func_check.run(countdown)
class FunctionCheckBase(object):
@classmethod
def _check(cls):
raise NotImplementedError()
@classmethod
def run(cls, countdown=3):
# print function check header
print("")
print("=" * 80)
print("--", cls.__name__, "--")
print(_clean_docstring(cls.__doc__))
print("")
if countdown > 0:
print("Starting check in ...")
for tick in reversed(range(1, countdown + 1)):
print(tick, "...")
sleep(1)
print("RUNNING CHECK ...")
cls._check()
print("... DONE")
print("=" * 80)
class LeftRightCheck(FunctionCheckBase):
"""
Checks stereo playback by first playing a note in the left channel only,
then a different note in the right channel only.
"""
@classmethod
def _check(cls):
wave_obj = _gwo("left_right.wav")
wave_obj.play()
sleep(4)
class OverlappingCheck(FunctionCheckBase):
"""
Checks overlapped playback by playing three different notes spaced
approximately a half-second apart but still overlapping.
"""
@classmethod
def _check(cls):
wave_obj_1 = _gwo("c.wav")
wave_obj_2 = _gwo("e.wav")
wave_obj_3 = _gwo("g.wav")
wave_obj_1.play()
sleep(0.5)
wave_obj_2.play()
sleep(0.5)
wave_obj_3.play()
sleep(3)
class StopCheck(FunctionCheckBase):
"""
Checks stopping playback by playing three different notes simultaneously
and stopping two after approximately a half-second, leaving only one note
playing for two more seconds.
"""
@classmethod
def _check(cls):
wave_obj_1 = _gwo("c.wav")
wave_obj_2 = _gwo("e.wav")
wave_obj_3 = _gwo("g.wav")
play_obj_1 = wave_obj_1.play()
wave_obj_2.play()
play_obj_3 = wave_obj_3.play()
sleep(0.5)
play_obj_1.stop()
play_obj_3.stop()
sleep(3)
class StopAllCheck(FunctionCheckBase):
"""
Checks stopping playback of all audio by playing three different notes
simultaneously and stopping all of them after approximately a half-second.
"""
@classmethod
def _check(cls):
wave_obj_1 = _gwo("c.wav")
wave_obj_2 = _gwo("e.wav")
wave_obj_3 = _gwo("g.wav")
wave_obj_1.play()
wave_obj_2.play()
wave_obj_3.play()
sleep(0.5)
sa.stop_all()
sleep(3)
class IsPlayingCheck(FunctionCheckBase):
"""
Checks functionality of the is_playing() method by calling during playback
(when it should return True) and calling it again after all playback has
stopped (when it should return False). The output is printed.
"""
@classmethod
def _check(cls):
wave_obj = _gwo("notes_2_16_44.wav")
play_obj = wave_obj.play()
sleep(0.5)
print("Is playing:", play_obj.is_playing())
sleep(4)
print("Is playing:", play_obj.is_playing())
class WaitDoneCheck(FunctionCheckBase):
"""
Checks functionality of the wait_done() method by using it to allow the
three-note clip to play until finished (before attempting to stop
playback).
"""
@classmethod
def _check(cls):
wave_obj = _gwo("notes_2_16_44.wav")
play_obj = wave_obj.play()
play_obj.wait_done()
play_obj.stop()
if __name__ == "__main__":
run_all()
|
the-stack_0_21447 | import os
import string
import io
import sys
import trace
class KeywordProcessor(object):
"""KeywordProcessor
Note:
* Based on Flashtext <https://github.com/vi3k6i5/flashtext>
* loosely based on `Aho-Corasick algorithm <https://en.wikipedia.org/wiki/Aho%E2%80%93Corasick_algorithm>`.
"""
def __init__(self, case_sensitive=False):
"""
Args:
case_sensitive (boolean): Keyword search should be case sensitive set or not.
Defaults to False
"""
self._keyword = '_keyword_'
self.non_word_boundaries = set(string.digits + string.ascii_letters + '_')
self.keyword_trie_dict = dict()
self.case_sensitive = case_sensitive
def __setitem__(self, keyword, clean_name, punctuation=None):
"""To add keyword to the dictionary
pass the keyword and the clean name it maps to.
Args:
keyword : string
keyword that you want to identify
clean_name : string
clean term for that keyword that you would want to get back in return or replace
if not provided, keyword will be used as the clean name also.
puctuation : list[char]
list of punctuation characters to add to the keyword before adding.
"""
if punctuation is None:
punctuation = ['']
status = False
if keyword and clean_name:
if not self.case_sensitive:
keyword = keyword.lower()
current_dict = self.keyword_trie_dict
for letter in keyword:
current_dict = current_dict.setdefault(letter, {})
for punc in punctuation:
if len(punc) > 0:
final_dict = current_dict.setdefault(punc, {})
else:
final_dict = current_dict
final_dict[self._keyword] = clean_name + punc
status = True
return status
def add_keyword(self, keyword, clean_name, punctuation=None):
"""To add one or more keywords to the dictionary
pass the keyword and the clean name it maps to.
Args:
keyword : string
keyword that you want to identify
clean_name : string
clean term for that keyword that you would want to get back in return or replace
if not provided, keyword will be used as the clean name also.
punctuation : list[char]
list of punctuation characters to add to the keyword before adding.
Returns:
status : bool
The return value. True for success, False otherwise.
"""
return self.__setitem__(keyword, clean_name, punctuation)
def replace_keywords(self, sentence):
"""Searches in the string for all keywords present in corpus.
Keywords present are replaced by the clean name and a new string is returned.
Args:
sentence (str): Line of text where we will replace keywords
Returns:
new_sentence (str): Line of text with replaced keywords
"""
if not sentence:
# if sentence is empty or none just return the same.
return sentence
new_sentence = []
orig_sentence = sentence
if not self.case_sensitive:
sentence = sentence.lower()
current_word = ''
current_dict = self.keyword_trie_dict
sequence_end_pos = 0
idx = 0
sentence_len = len(sentence)
while idx < sentence_len:
char = sentence[idx]
# when we reach whitespace
if char not in self.non_word_boundaries:
current_word += orig_sentence[idx]
current_white_space = char
# if end is present in current_dict
if self._keyword in current_dict or char in current_dict:
# update longest sequence found
longest_sequence_found = None
is_longer_seq_found = False
if self._keyword in current_dict:
longest_sequence_found = current_dict[self._keyword]
sequence_end_pos = idx
# re look for longest_sequence from this position
if char in current_dict:
current_dict_continued = current_dict[char]
current_word_continued = current_word
idy = idx + 1
while idy < sentence_len:
inner_char = sentence[idy]
if inner_char not in self.non_word_boundaries and self._keyword in current_dict_continued:
current_word_continued += orig_sentence[idy]
# update longest sequence found
current_white_space = inner_char
longest_sequence_found = current_dict_continued[self._keyword]
sequence_end_pos = idy
is_longer_seq_found = True
if inner_char in current_dict_continued:
current_word_continued += orig_sentence[idy]
current_dict_continued = current_dict_continued[inner_char]
else:
break
idy += 1
else:
# end of sentence reached.
if self._keyword in current_dict_continued:
# update longest sequence found
current_white_space = ''
longest_sequence_found = current_dict_continued[self._keyword]
sequence_end_pos = idy
is_longer_seq_found = True
if is_longer_seq_found:
idx = sequence_end_pos
current_word = current_word_continued
current_dict = self.keyword_trie_dict
if longest_sequence_found:
new_sentence.append(longest_sequence_found + current_white_space)
current_word = ''
else:
new_sentence.append(current_word)
current_word = ''
else:
# we reset current_dict
current_dict = self.keyword_trie_dict
new_sentence.append(current_word)
current_word = ''
elif char in current_dict:
# we can continue from this char
current_word += orig_sentence[idx]
current_dict = current_dict[char]
else:
current_word += orig_sentence[idx]
# we reset current_dict
current_dict = self.keyword_trie_dict
# skip to end of word
idy = idx + 1
while idy < sentence_len:
char = sentence[idy]
current_word += orig_sentence[idy]
if char not in self.non_word_boundaries:
break
idy += 1
idx = idy
new_sentence.append(current_word)
current_word = ''
# if we are end of sentence and have a sequence discovered
if idx + 1 >= sentence_len:
if self._keyword in current_dict:
sequence_found = current_dict[self._keyword]
new_sentence.append(sequence_found)
else:
new_sentence.append(current_word)
idx += 1
return "".join(new_sentence)
def file_to_list(filename, drop_first=True):
items = []
with open(filename, "r", encoding="utf-8") as f:
if drop_first:
f.readline()
for line in f.readlines():
items.append(line.rstrip())
return items
def main():
proc = KeywordProcessor()
punctuation = ['.', ',', ' ', ':', ';', '?', '!', '']
no_punctuation = ['']
fields = {
os.path.join('datasets', 'firstnames.csv'): {"replacement": "<NAAM>", "punctuation": punctuation},
os.path.join('datasets', 'countries.csv'): {"replacement": "<LAND>", "punctuation": no_punctuation},
}
for field in fields:
for name in file_to_list(field):
proc.add_keyword(name, fields[field]["replacement"], punctuation)
print(proc.replace_keywords("Leo."))
print(proc.replace_keywords("Leopaart"))
print(proc.replace_keywords(".Leo"))
print(proc.replace_keywords(".Leo."))
print(proc.replace_keywords("Leo"))
if __name__ == "__main__":
main()
|
the-stack_0_21451 | import pandas
import numpy as np
from sklearn.svm import SVC
from sklearn.model_selection import KFold, GridSearchCV
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import datasets
import math
newsgroups = datasets.fetch_20newsgroups(
subset='all',
categories=['alt.atheism', 'sci.space']
)
vectorizer = TfidfVectorizer(min_df=1)
val_matrix = vectorizer.fit_transform(newsgroups.data)
grid = {'C': np.power(10.0, np.arange(-5, 6))}
cv = KFold(n_splits=5, shuffle=True, random_state=241)
clf = SVC(kernel='linear', random_state=241)
gs = GridSearchCV(clf, grid, scoring='accuracy', cv=cv)
gs.fit(val_matrix, newsgroups.target)
max_mean, c_val = 0, 0
# cv_results_
for a in gs.grid_scores_:
if a.mean_validation_score > max_mean:
max_mean = a.mean_validation_score
c_val = a.parameters['C']
clf = SVC(C=c_val, kernel='linear', random_state=241)
clf.fit(val_matrix, newsgroups.target)
coef = clf.coef_
feature_mapping = dict(zip(vectorizer.get_feature_names(), coef.transpose().toarray()))
result = sorted(feature_mapping.items(), key=lambda x: np.fabs(x[1]), reverse=True)[:10]
result.sort()
print(','.join(x[0] for x in result), end='')
|
the-stack_0_21453 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import click
import collections
import os
import pathlib
import subprocess
import webbrowser
import re
import sys
import requests
import toml
from gidgethub import sansio
from . import __version__
CREATE_PR_URL_TEMPLATE = ("https://api.github.com/repos/"
"{config[team]}/{config[repo]}/pulls")
DEFAULT_CONFIG = collections.ChainMap({
'team': 'python',
'repo': 'cpython',
'check_sha': '7f777ed95a19224294949e1b4ce56bbffcb1fe9f',
'fix_commit_msg': True,
'default_branch': 'master',
})
class BranchCheckoutException(Exception):
pass
class CherryPickException(Exception):
pass
class InvalidRepoException(Exception):
pass
class CherryPicker:
def __init__(self, pr_remote, commit_sha1, branches,
*, dry_run=False, push=True,
prefix_commit=True,
config=DEFAULT_CONFIG,
):
self.config = config
self.check_repo() # may raise InvalidRepoException
if dry_run:
click.echo("Dry run requested, listing expected command sequence")
self.pr_remote = pr_remote
self.commit_sha1 = commit_sha1
self.branches = branches
self.dry_run = dry_run
self.push = push
self.prefix_commit = prefix_commit
@property
def upstream(self):
"""Get the remote name to use for upstream branches
Uses "upstream" if it exists, "origin" otherwise
"""
cmd = ['git', 'remote', 'get-url', 'upstream']
try:
subprocess.check_output(cmd, stderr=subprocess.DEVNULL)
except subprocess.CalledProcessError:
return "origin"
return "upstream"
@property
def sorted_branches(self):
"""Return the branches to cherry-pick to, sorted by version."""
return sorted(
self.branches,
reverse=True,
key=version_from_branch)
@property
def username(self):
cmd = ['git', 'config', '--get', f'remote.{self.pr_remote}.url']
raw_result = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
result = raw_result.decode('utf-8')
# implicit ssh URIs use : to separate host from user, others just use /
username = result.replace(':', '/').split('/')[-2]
return username
def get_cherry_pick_branch(self, maint_branch):
return f"backport-{self.commit_sha1[:7]}-{maint_branch}"
def get_pr_url(self, base_branch, head_branch):
return f"https://github.com/{self.config['team']}/{self.config['repo']}/compare/{base_branch}...{self.username}:{head_branch}?expand=1"
def fetch_upstream(self):
""" git fetch <upstream> """
cmd = ['git', 'fetch', self.upstream]
self.run_cmd(cmd)
def run_cmd(self, cmd):
assert not isinstance(cmd, str)
if self.dry_run:
click.echo(f" dry-run: {' '.join(cmd)}")
return
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
click.echo(output.decode('utf-8'))
def checkout_branch(self, branch_name):
""" git checkout -b <branch_name> """
cmd = ['git', 'checkout', '-b', self.get_cherry_pick_branch(branch_name), f'{self.upstream}/{branch_name}']
try:
self.run_cmd(cmd)
except subprocess.CalledProcessError as err:
click.echo(f"Error checking out the branch {self.get_cherry_pick_branch(branch_name)}.")
click.echo(err.output)
raise BranchCheckoutException(f"Error checking out the branch {self.get_cherry_pick_branch(branch_name)}.")
def get_commit_message(self, commit_sha):
"""
Return the commit message for the current commit hash,
replace #<PRID> with GH-<PRID>
"""
cmd = ['git', 'show', '-s', '--format=%B', commit_sha]
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
message = output.strip().decode('utf-8')
if self.config['fix_commit_msg']:
return message.replace('#', 'GH-')
else:
return message
def checkout_default_branch(self):
""" git checkout default branch """
cmd = 'git', 'checkout', self.config['default_branch']
self.run_cmd(cmd)
def status(self):
"""
git status
:return:
"""
cmd = ['git', 'status']
self.run_cmd(cmd)
def cherry_pick(self):
""" git cherry-pick -x <commit_sha1> """
cmd = ['git', 'cherry-pick', '-x', self.commit_sha1]
try:
self.run_cmd(cmd)
except subprocess.CalledProcessError as err:
click.echo(f"Error cherry-pick {self.commit_sha1}.")
click.echo(err.output)
raise CherryPickException(f"Error cherry-pick {self.commit_sha1}.")
def get_exit_message(self, branch):
return \
f"""
Failed to cherry-pick {self.commit_sha1} into {branch} \u2639
... Stopping here.
To continue and resolve the conflict:
$ cherry_picker --status # to find out which files need attention
# Fix the conflict
$ cherry_picker --status # should now say 'all conflict fixed'
$ cherry_picker --continue
To abort the cherry-pick and cleanup:
$ cherry_picker --abort
"""
def amend_commit_message(self, cherry_pick_branch):
""" prefix the commit message with (X.Y) """
commit_prefix = ""
if self.prefix_commit:
commit_prefix = f"[{get_base_branch(cherry_pick_branch)}] "
updated_commit_message = f"""{commit_prefix}{self.get_commit_message(self.commit_sha1)}
(cherry picked from commit {self.commit_sha1})
Co-authored-by: {get_author_info_from_short_sha(self.commit_sha1)}"""
if self.dry_run:
click.echo(f" dry-run: git commit --amend -m '{updated_commit_message}'")
else:
cmd = ['git', 'commit', '--amend', '-m', updated_commit_message]
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as cpe:
click.echo("Failed to amend the commit message \u2639")
click.echo(cpe.output)
return updated_commit_message
def push_to_remote(self, base_branch, head_branch, commit_message=""):
""" git push <origin> <branchname> """
cmd = ['git', 'push', self.pr_remote, f'{head_branch}:{head_branch}']
try:
self.run_cmd(cmd)
except subprocess.CalledProcessError:
click.echo(f"Failed to push to {self.pr_remote} \u2639")
else:
gh_auth = os.getenv("GH_AUTH")
if gh_auth:
self.create_gh_pr(base_branch, head_branch,
commit_message=commit_message,
gh_auth=gh_auth)
else:
self.open_pr(self.get_pr_url(base_branch, head_branch))
def create_gh_pr(self, base_branch, head_branch, *,
commit_message,
gh_auth):
"""
Create PR in GitHub
"""
request_headers = sansio.create_headers(
self.username, oauth_token=gh_auth)
title, body = normalize_commit_message(commit_message)
if not self.prefix_commit:
title = f"[{base_branch}] {title}"
data = {
"title": title,
"body": body,
"head": f"{self.username}:{head_branch}",
"base": base_branch,
"maintainer_can_modify": True
}
url = CREATE_PR_URL_TEMPLATE.format(config=self.config)
response = requests.post(url, headers=request_headers, json=data)
if response.status_code == requests.codes.created:
click.echo(f"Backport PR created at {response.json()['html_url']}")
else:
click.echo(response.status_code)
click.echo(response.text)
def open_pr(self, url):
"""
open url in the web browser
"""
if self.dry_run:
click.echo(f" dry-run: Create new PR: {url}")
else:
click.echo("Backport PR URL:")
click.echo(url)
webbrowser.open_new_tab(url)
def delete_branch(self, branch):
cmd = ['git', 'branch', '-D', branch]
self.run_cmd(cmd)
def cleanup_branch(self, branch):
self.checkout_default_branch()
try:
self.delete_branch(branch)
except subprocess.CalledProcessError:
click.echo(f"branch {branch} NOT deleted.")
else:
click.echo(f"branch {branch} has been deleted.")
def backport(self):
if not self.branches:
raise click.UsageError("At least one branch must be specified.")
self.fetch_upstream()
for maint_branch in self.sorted_branches:
click.echo(f"Now backporting '{self.commit_sha1}' into '{maint_branch}'")
cherry_pick_branch = self.get_cherry_pick_branch(maint_branch)
self.checkout_branch(maint_branch)
commit_message = ""
try:
self.cherry_pick()
commit_message = self.amend_commit_message(cherry_pick_branch)
except subprocess.CalledProcessError as cpe:
click.echo(cpe.output)
click.echo(self.get_exit_message(maint_branch))
except CherryPickException:
click.echo(self.get_exit_message(maint_branch))
raise
else:
if self.push:
self.push_to_remote(maint_branch,
cherry_pick_branch,
commit_message)
self.cleanup_branch(cherry_pick_branch)
else:
click.echo(\
f"""
Finished cherry-pick {self.commit_sha1} into {cherry_pick_branch} \U0001F600
--no-push option used.
... Stopping here.
To continue and push the changes:
$ cherry_picker --continue
To abort the cherry-pick and cleanup:
$ cherry_picker --abort
""")
def abort_cherry_pick(self):
"""
run `git cherry-pick --abort` and then clean up the branch
"""
cmd = ['git', 'cherry-pick', '--abort']
try:
self.run_cmd(cmd)
except subprocess.CalledProcessError as cpe:
click.echo(cpe.output)
# only delete backport branch created by cherry_picker.py
if get_current_branch().startswith('backport-'):
self.cleanup_branch(get_current_branch())
def continue_cherry_pick(self):
"""
git push origin <current_branch>
open the PR
clean up branch
"""
cherry_pick_branch = get_current_branch()
if cherry_pick_branch.startswith('backport-'):
# amend the commit message, prefix with [X.Y]
base = get_base_branch(cherry_pick_branch)
short_sha = cherry_pick_branch[cherry_pick_branch.index('-')+1:cherry_pick_branch.index(base)-1]
full_sha = get_full_sha_from_short(short_sha)
commit_message = self.get_commit_message(short_sha)
co_author_info = f"Co-authored-by: {get_author_info_from_short_sha(short_sha)}"
updated_commit_message = f"""[{base}] {commit_message}.
(cherry picked from commit {full_sha})
{co_author_info}"""
if self.dry_run:
click.echo(f" dry-run: git commit -a -m '{updated_commit_message}' --allow-empty")
else:
cmd = ['git', 'commit', '-a', '-m', updated_commit_message, '--allow-empty']
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
self.push_to_remote(base, cherry_pick_branch)
self.cleanup_branch(cherry_pick_branch)
click.echo("\nBackport PR:\n")
click.echo(updated_commit_message)
else:
click.echo(f"Current branch ({cherry_pick_branch}) is not a backport branch. Will not continue. \U0001F61B")
def check_repo(self):
"""
Check that the repository is for the project we're configured to operate on.
This function performs the check by making sure that the sha specified in the config
is present in the repository that we're operating on.
"""
try:
validate_sha(self.config['check_sha'])
except ValueError:
raise InvalidRepoException()
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.command(context_settings=CONTEXT_SETTINGS)
@click.version_option(version=__version__)
@click.option('--dry-run', is_flag=True,
help="Prints out the commands, but not executed.")
@click.option('--pr-remote', 'pr_remote', metavar='REMOTE',
help='git remote to use for PR branches', default='origin')
@click.option('--abort', 'abort', flag_value=True, default=None,
help="Abort current cherry-pick and clean up branch")
@click.option('--continue', 'abort', flag_value=False, default=None,
help="Continue cherry-pick, push, and clean up branch")
@click.option('--status', 'status', flag_value=True, default=None,
help="Get the status of cherry-pick")
@click.option('--push/--no-push', 'push', is_flag=True, default=True,
help="Changes won't be pushed to remote")
@click.option('--config-path', 'config_path', metavar='CONFIG-PATH',
help=("Path to config file, .cherry_picker.toml "
"from project root by default"),
default=None)
@click.argument('commit_sha1', 'The commit sha1 to be cherry-picked', nargs=1,
default = "")
@click.argument('branches', 'The branches to backport to', nargs=-1)
def cherry_pick_cli(dry_run, pr_remote, abort, status, push, config_path,
commit_sha1, branches):
click.echo("\U0001F40D \U0001F352 \u26CF")
config = load_config(config_path)
try:
cherry_picker = CherryPicker(pr_remote, commit_sha1, branches,
dry_run=dry_run,
push=push, config=config)
except InvalidRepoException:
click.echo(f"You're not inside a {config['repo']} repo right now! \U0001F645")
sys.exit(-1)
if abort is not None:
if abort:
cherry_picker.abort_cherry_pick()
else:
cherry_picker.continue_cherry_pick()
elif status:
click.echo(cherry_picker.status())
else:
try:
cherry_picker.backport()
except BranchCheckoutException:
sys.exit(-1)
except CherryPickException:
sys.exit(-1)
def get_base_branch(cherry_pick_branch):
"""
return '2.7' from 'backport-sha-2.7'
raises ValueError if the specified branch name is not of a form that
cherry_picker would have created
"""
prefix, sha, base_branch = cherry_pick_branch.split('-', 2)
if prefix != 'backport':
raise ValueError('branch name is not prefixed with "backport-". Is this a cherry_picker branch?')
if not re.match('[0-9a-f]{7,40}', sha):
raise ValueError(f'branch name has an invalid sha: {sha}')
# Validate that the sha refers to a valid commit within the repo
# Throws a ValueError if the sha is not present in the repo
validate_sha(sha)
# Subject the parsed base_branch to the same tests as when we generated it
# This throws a ValueError if the base_branch doesn't meet our requirements
version_from_branch(base_branch)
return base_branch
def validate_sha(sha):
"""
Validate that a hexdigest sha is a valid commit in the repo
raises ValueError if the sha does not reference a commit within the repo
"""
cmd = ['git', 'log', '-r', sha]
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.SubprocessError:
raise ValueError(f'The sha listed in the branch name, {sha}, is not present in the repository')
def version_from_branch(branch):
"""
return version information from a git branch name
"""
try:
return tuple(map(int, re.match(r'^.*(?P<version>\d+(\.\d+)+).*$', branch).groupdict()['version'].split('.')))
except AttributeError as attr_err:
raise ValueError(f'Branch {branch} seems to not have a version in its name.') from attr_err
def get_current_branch():
"""
Return the current branch
"""
cmd = ['git', 'rev-parse', '--abbrev-ref', 'HEAD']
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
return output.strip().decode('utf-8')
def get_full_sha_from_short(short_sha):
cmd = ['git', 'log', '-1', '--format=%H', short_sha]
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
full_sha = output.strip().decode('utf-8')
return full_sha
def get_author_info_from_short_sha(short_sha):
cmd = ['git', 'log', '-1', '--format=%aN <%ae>', short_sha]
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
author = output.strip().decode('utf-8')
return author
def normalize_commit_message(commit_message):
"""
Return a tuple of title and body from the commit message
"""
split_commit_message = commit_message.split("\n")
title = split_commit_message[0]
body = "\n".join(split_commit_message[1:])
return title, body.lstrip("\n")
def find_project_root():
cmd = ['git', 'rev-parse', '--show-toplevel']
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
return pathlib.Path(output.decode('utf-8').strip())
def find_config():
root = find_project_root()
if root is not None:
child = root / '.cherry_picker.toml'
if child.exists() and not child.is_dir():
return child
return None
def load_config(path):
if path is None:
path = find_config()
if path is None:
return DEFAULT_CONFIG
else:
path = pathlib.Path(path) # enforce a cast to pathlib datatype
with path.open() as f:
d = toml.load(f)
return DEFAULT_CONFIG.new_child(d)
if __name__ == '__main__':
cherry_pick_cli()
|
the-stack_0_21457 | """ Controller module (adapter for UIs) """
from chess import engine
from chess.pieces import *
from chess.constants import *
class Select:
""" Select piece, shows suggestions and prepares piece for move """
def __init__(self):
self.pos_1 = None
self.moves = []
def make_selection(self, select, game):
if select in engine.get_location(game):
self.pos_1 = select
self.moves = engine.get_valid_moves(game, game[select])
if game.player_white:
suggestions = self.moves
else:
suggestions = [converter(coord) for coord in self.moves]
return suggestions
if self.pos_1 and select in self.moves:
move = engine.Move(self.pos_1, select, game)
if isinstance(game[self.pos_1], Pawn) and select[0] in (0, 7):
move.promote = Queen # temporary
move.execute()
self._reset()
self._reset()
def _reset(self):
self.pos_1 = None
self.moves = []
def gui_coord(player, coord):
y, x = coord
if BORD < x < B_WIDTH + BORD and BORD < y < B_HEIGHT + BORD:
row, col = (x // SQ_SIZE, y // SQ_SIZE)
if player:
return row, col
else:
return converter((row, col))
return False
def check(game):
if engine.king_checked(game, game.white_to_move):
return engine.get_location(game, game.white_to_move, find_piece=King)
return False
def mate(game):
if engine.check_mate(game):
return engine.get_location(game, game.white_to_move, find_piece=King)
return False
def converter(coord):
conv = {0: 7, 1: 6, 2: 5, 3: 4, 4: 3, 5: 2, 6: 1, 7: 0}
return conv[coord[0]], conv[coord[1]]
|
the-stack_0_21458 | from apple.protocols.protocol_message_types import ProtocolMessageTypes as pmt, ProtocolMessageTypes
NO_REPLY_EXPECTED = [
# full_node -> full_node messages
pmt.new_peak,
pmt.new_transaction,
pmt.new_unfinished_block,
pmt.new_signage_point_or_end_of_sub_slot,
pmt.request_mempool_transactions,
pmt.new_compact_vdf,
pmt.request_mempool_transactions,
]
"""
VAILD_REPLY_MESSAGE_MAP:
key: sent message type.
value: valid reply message types, from the view of the requester.
A state machine can be built from this message map.
"""
VAILD_REPLY_MESSAGE_MAP = {
# messages for all services
# pmt.handshake is handled in WSAppleConnection.perform_handshake
# full_node -> full_node protocol messages
pmt.request_transaction: [pmt.respond_transaction],
pmt.request_proof_of_weight: [pmt.respond_proof_of_weight],
pmt.request_block: [pmt.respond_block, pmt.reject_block],
pmt.request_blocks: [pmt.respond_blocks, pmt.reject_blocks],
pmt.request_unfinished_block: [pmt.respond_unfinished_block],
pmt.request_signage_point_or_end_of_sub_slot: [pmt.respond_signage_point, pmt.respond_end_of_sub_slot],
pmt.request_compact_vdf: [pmt.respond_compact_vdf],
pmt.request_peers: [pmt.respond_peers],
}
def static_check_sent_message_response() -> None:
"""Check that allowed message data structures VALID_REPLY_MESSAGE_MAP and NO_REPLY_EXPECTED are consistent."""
# Reply and non-reply sets should not overlap: This check should be static
overlap = set(NO_REPLY_EXPECTED).intersection(set(VAILD_REPLY_MESSAGE_MAP.keys()))
if len(overlap) != 0:
raise AssertionError("Overlapping NO_REPLY_EXPECTED and VAILD_REPLY_MESSAGE_MAP values: {}")
def message_requires_reply(sent: ProtocolMessageTypes) -> bool:
"""Return True if message has an entry in the full node P2P message map"""
# If we knew the peer NodeType is FULL_NODE, we could also check `sent not in NO_REPLY_EXPECTED`
return sent in VAILD_REPLY_MESSAGE_MAP
def message_response_ok(sent: ProtocolMessageTypes, received: ProtocolMessageTypes) -> bool:
"""
Check to see that peers respect protocol message types in reply.
Call with received == None to indicate that we do not expect a specific reply message type.
"""
# Errors below are runtime protocol message mismatches from peers
if sent in VAILD_REPLY_MESSAGE_MAP:
if received not in VAILD_REPLY_MESSAGE_MAP[sent]:
return False
return True
# Run `static_check_sent_message_response` to check this static invariant at import time
static_check_sent_message_response()
|
the-stack_0_21459 | #!/usr/bin/env python3
# Copyright (c) 2017-2019 The Geranium Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the listsinceblock RPC."""
from test_framework.test_framework import GeraniumTestFramework
from test_framework.messages import BIP125_SEQUENCE_NUMBER
from test_framework.util import (
assert_array_result,
assert_equal,
assert_raises_rpc_error,
connect_nodes,
)
from decimal import Decimal
class ListSinceBlockTest(GeraniumTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# All nodes are in IBD from genesis, so they'll need the miner (node2) to be an outbound connection, or have
# only one connection. (See fPreferredDownload in net_processing)
connect_nodes(self.nodes[1], 2)
self.nodes[2].generate(101)
self.sync_all()
self.test_no_blockhash()
self.test_invalid_blockhash()
self.test_reorg()
self.test_double_spend()
self.test_double_send()
self.double_spends_filtered()
def test_no_blockhash(self):
self.log.info("Test no blockhash")
txid = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1)
blockhash, = self.nodes[2].generate(1)
blockheight = self.nodes[2].getblockheader(blockhash)['height']
self.sync_all()
txs = self.nodes[0].listtransactions()
assert_array_result(txs, {"txid": txid}, {
"category": "receive",
"amount": 1,
"blockhash": blockhash,
"blockheight": blockheight,
"confirmations": 1,
})
assert_equal(
self.nodes[0].listsinceblock(),
{"lastblock": blockhash,
"removed": [],
"transactions": txs})
assert_equal(
self.nodes[0].listsinceblock(""),
{"lastblock": blockhash,
"removed": [],
"transactions": txs})
def test_invalid_blockhash(self):
self.log.info("Test invalid blockhash")
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock,
"42759cde25462784395a337460bde75f58e73d3f08bd31fdc3507cbac856a2c4")
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock,
"0000000000000000000000000000000000000000000000000000000000000000")
assert_raises_rpc_error(-8, "blockhash must be of length 64 (not 11, for 'invalid-hex')", self.nodes[0].listsinceblock,
"invalid-hex")
assert_raises_rpc_error(-8, "blockhash must be hexadecimal string (not 'Z000000000000000000000000000000000000000000000000000000000000000')", self.nodes[0].listsinceblock,
"Z000000000000000000000000000000000000000000000000000000000000000")
def test_reorg(self):
'''
`listsinceblock` did not behave correctly when handed a block that was
no longer in the main chain:
ab0
/ \
aa1 [tx0] bb1
| |
aa2 bb2
| |
aa3 bb3
|
bb4
Consider a client that has only seen block `aa3` above. It asks the node
to `listsinceblock aa3`. But at some point prior the main chain switched
to the bb chain.
Previously: listsinceblock would find height=4 for block aa3 and compare
this to height=5 for the tip of the chain (bb4). It would then return
results restricted to bb3-bb4.
Now: listsinceblock finds the fork at ab0 and returns results in the
range bb1-bb4.
This test only checks that [tx0] is present.
'''
self.log.info("Test reorg")
# Split network into two
self.split_network()
# send to nodes[0] from nodes[2]
senttx = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1)
# generate on both sides
nodes1_last_blockhash = self.nodes[1].generate(6)[-1]
nodes2_first_blockhash = self.nodes[2].generate(7)[0]
self.log.debug("nodes[1] last blockhash = {}".format(nodes1_last_blockhash))
self.log.debug("nodes[2] first blockhash = {}".format(nodes2_first_blockhash))
self.sync_all(self.nodes[:2])
self.sync_all(self.nodes[2:])
self.join_network()
# listsinceblock(nodes1_last_blockhash) should now include tx as seen from nodes[0]
# and return the block height which listsinceblock now exposes since a5e7795.
transactions = self.nodes[0].listsinceblock(nodes1_last_blockhash)['transactions']
found = next(tx for tx in transactions if tx['txid'] == senttx)
assert_equal(found['blockheight'], self.nodes[0].getblockheader(nodes2_first_blockhash)['height'])
def test_double_spend(self):
'''
This tests the case where the same UTXO is spent twice on two separate
blocks as part of a reorg.
ab0
/ \
aa1 [tx1] bb1 [tx2]
| |
aa2 bb2
| |
aa3 bb3
|
bb4
Problematic case:
1. User 1 receives GEAM in tx1 from utxo1 in block aa1.
2. User 2 receives GEAM in tx2 from utxo1 (same) in block bb1
3. User 1 sees 2 confirmations at block aa3.
4. Reorg into bb chain.
5. User 1 asks `listsinceblock aa3` and does not see that tx1 is now
invalidated.
Currently the solution to this is to detect that a reorg'd block is
asked for in listsinceblock, and to iterate back over existing blocks up
until the fork point, and to include all transactions that relate to the
node wallet.
'''
self.log.info("Test double spend")
self.sync_all()
# Split network into two
self.split_network()
# share utxo between nodes[1] and nodes[2]
utxos = self.nodes[2].listunspent()
utxo = utxos[0]
privkey = self.nodes[2].dumpprivkey(utxo['address'])
self.nodes[1].importprivkey(privkey)
# send from nodes[1] using utxo to nodes[0]
change = '%.8f' % (float(utxo['amount']) - 1.0003)
recipient_dict = {
self.nodes[0].getnewaddress(): 1,
self.nodes[1].getnewaddress(): change,
}
utxo_dicts = [{
'txid': utxo['txid'],
'vout': utxo['vout'],
}]
txid1 = self.nodes[1].sendrawtransaction(
self.nodes[1].signrawtransactionwithwallet(
self.nodes[1].createrawtransaction(utxo_dicts, recipient_dict))['hex'])
# send from nodes[2] using utxo to nodes[3]
recipient_dict2 = {
self.nodes[3].getnewaddress(): 1,
self.nodes[2].getnewaddress(): change,
}
self.nodes[2].sendrawtransaction(
self.nodes[2].signrawtransactionwithwallet(
self.nodes[2].createrawtransaction(utxo_dicts, recipient_dict2))['hex'])
# generate on both sides
lastblockhash = self.nodes[1].generate(3)[2]
self.nodes[2].generate(4)
self.join_network()
self.sync_all()
# gettransaction should work for txid1
assert self.nodes[0].gettransaction(txid1)['txid'] == txid1, "gettransaction failed to find txid1"
# listsinceblock(lastblockhash) should now include txid1, as seen from nodes[0]
lsbres = self.nodes[0].listsinceblock(lastblockhash)
assert any(tx['txid'] == txid1 for tx in lsbres['removed'])
# but it should not include 'removed' if include_removed=false
lsbres2 = self.nodes[0].listsinceblock(blockhash=lastblockhash, include_removed=False)
assert 'removed' not in lsbres2
def test_double_send(self):
'''
This tests the case where the same transaction is submitted twice on two
separate blocks as part of a reorg. The former will vanish and the
latter will appear as the true transaction (with confirmations dropping
as a result).
ab0
/ \
aa1 [tx1] bb1
| |
aa2 bb2
| |
aa3 bb3 [tx1]
|
bb4
Asserted:
1. tx1 is listed in listsinceblock.
2. It is included in 'removed' as it was removed, even though it is now
present in a different block.
3. It is listed with a confirmation count of 2 (bb3, bb4), not
3 (aa1, aa2, aa3).
'''
self.log.info("Test double send")
self.sync_all()
# Split network into two
self.split_network()
# create and sign a transaction
utxos = self.nodes[2].listunspent()
utxo = utxos[0]
change = '%.8f' % (float(utxo['amount']) - 1.0003)
recipient_dict = {
self.nodes[0].getnewaddress(): 1,
self.nodes[2].getnewaddress(): change,
}
utxo_dicts = [{
'txid': utxo['txid'],
'vout': utxo['vout'],
}]
signedtxres = self.nodes[2].signrawtransactionwithwallet(
self.nodes[2].createrawtransaction(utxo_dicts, recipient_dict))
assert signedtxres['complete']
signedtx = signedtxres['hex']
# send from nodes[1]; this will end up in aa1
txid1 = self.nodes[1].sendrawtransaction(signedtx)
# generate bb1-bb2 on right side
self.nodes[2].generate(2)
# send from nodes[2]; this will end up in bb3
txid2 = self.nodes[2].sendrawtransaction(signedtx)
assert_equal(txid1, txid2)
# generate on both sides
lastblockhash = self.nodes[1].generate(3)[2]
self.nodes[2].generate(2)
self.join_network()
self.sync_all()
# gettransaction should work for txid1
tx1 = self.nodes[0].gettransaction(txid1)
assert_equal(tx1['blockheight'], self.nodes[0].getblockheader(tx1['blockhash'])['height'])
# listsinceblock(lastblockhash) should now include txid1 in transactions
# as well as in removed
lsbres = self.nodes[0].listsinceblock(lastblockhash)
assert any(tx['txid'] == txid1 for tx in lsbres['transactions'])
assert any(tx['txid'] == txid1 for tx in lsbres['removed'])
# find transaction and ensure confirmations is valid
for tx in lsbres['transactions']:
if tx['txid'] == txid1:
assert_equal(tx['confirmations'], 2)
# the same check for the removed array; confirmations should STILL be 2
for tx in lsbres['removed']:
if tx['txid'] == txid1:
assert_equal(tx['confirmations'], 2)
def double_spends_filtered(self):
'''
`listsinceblock` was returning conflicted transactions even if they
occurred before the specified cutoff blockhash
'''
self.log.info("Test spends filtered")
spending_node = self.nodes[2]
dest_address = spending_node.getnewaddress()
tx_input = dict(
sequence=BIP125_SEQUENCE_NUMBER, **next(u for u in spending_node.listunspent()))
rawtx = spending_node.createrawtransaction(
[tx_input], {dest_address: tx_input["amount"] - Decimal("0.00051000"),
spending_node.getrawchangeaddress(): Decimal("0.00050000")})
signedtx = spending_node.signrawtransactionwithwallet(rawtx)
orig_tx_id = spending_node.sendrawtransaction(signedtx["hex"])
original_tx = spending_node.gettransaction(orig_tx_id)
double_tx = spending_node.bumpfee(orig_tx_id)
# check that both transactions exist
block_hash = spending_node.listsinceblock(
spending_node.getblockhash(spending_node.getblockcount()))
original_found = False
double_found = False
for tx in block_hash['transactions']:
if tx['txid'] == original_tx['txid']:
original_found = True
if tx['txid'] == double_tx['txid']:
double_found = True
assert_equal(original_found, True)
assert_equal(double_found, True)
lastblockhash = spending_node.generate(1)[0]
# check that neither transaction exists
block_hash = spending_node.listsinceblock(lastblockhash)
original_found = False
double_found = False
for tx in block_hash['transactions']:
if tx['txid'] == original_tx['txid']:
original_found = True
if tx['txid'] == double_tx['txid']:
double_found = True
assert_equal(original_found, False)
assert_equal(double_found, False)
if __name__ == '__main__':
ListSinceBlockTest().main()
|
the-stack_0_21460 | #!/usr/bin/env python3
from subprocess import check_output
import sys
import os.path
def main(tarname, gitroot):
"""Run this as ./compare_tar_against_git.py TARFILE GITROOT
Args
====
TARFILE: Path to the built sdist (sympy-xx.tar.gz)
GITROOT: Path ro root of git (dir containing .git)
"""
compare_tar_against_git(tarname, gitroot)
## TARBALL WHITELISTS
# If a file does not end up in the tarball that should, add it to setup.py if
# it is Python, or MANIFEST.in if it is not. (There is a command at the top
# of setup.py to gather all the things that should be there).
# TODO: Also check that this whitelist isn't growing out of date from files
# removed from git.
# Files that are in git that should not be in the tarball
git_whitelist = {
# Git specific dotfiles
'.gitattributes',
'.gitignore',
'.mailmap',
# Travis and CI
'.travis.yml',
'.github/workflows/runtests.yml',
'.github/workflows/ci-sage.yml',
'.github/workflows/comment-on-pr.yml',
'.github/workflows/release.yml',
'.ci/durations.json',
'.ci/generate_durations_log.sh',
'.ci/parse_durations_log.py',
'.ci/blacklisted.json',
'.ci/README.rst',
'.github/FUNDING.yml',
'.editorconfig',
'.coveragerc',
'CODEOWNERS',
'asv.conf.actions.json',
'asv.conf.travis.json',
'coveragerc_travis',
'codecov.yml',
'pytest.ini',
'MANIFEST.in',
'banner.svg',
# Code of conduct
'CODE_OF_CONDUCT.md',
# Pull request template
'PULL_REQUEST_TEMPLATE.md',
# Contributing guide
'CONTRIBUTING.md',
# Nothing from bin/ should be shipped unless we intend to install it. Most
# of this stuff is for development anyway. To run the tests from the
# tarball, use setup.py test, or import sympy and run sympy.test() or
# sympy.doctest().
'bin/adapt_paths.py',
'bin/ask_update.py',
'bin/authors_update.py',
'bin/build_doc.sh',
'bin/coverage_doctest.py',
'bin/coverage_report.py',
'bin/deploy_doc.sh',
'bin/diagnose_imports',
'bin/doctest',
'bin/generate_module_list.py',
'bin/generate_test_list.py',
'bin/get_sympy.py',
'bin/mailmap_update.py',
'bin/py.bench',
'bin/strip_whitespace',
'bin/sympy_time.py',
'bin/sympy_time_cache.py',
'bin/test',
'bin/test_external_imports.py',
'bin/test_executable.py',
'bin/test_import',
'bin/test_import.py',
'bin/test_isolated',
'bin/test_py2_import.py',
'bin/test_setup.py',
'bin/test_submodule_imports.py',
'bin/test_travis.sh',
'bin/test_optional_dependencies.py',
'bin/test_sphinx.sh',
'bin/mailmap_check.py',
'bin/test_symengine.py',
'bin/test_tensorflow.py',
# The notebooks are not ready for shipping yet. They need to be cleaned
# up, and preferably doctested. See also
# https://github.com/sympy/sympy/issues/6039.
'examples/advanced/identitysearch_example.ipynb',
'examples/beginner/plot_advanced.ipynb',
'examples/beginner/plot_colors.ipynb',
'examples/beginner/plot_discont.ipynb',
'examples/beginner/plot_gallery.ipynb',
'examples/beginner/plot_intro.ipynb',
'examples/intermediate/limit_examples_advanced.ipynb',
'examples/intermediate/schwarzschild.ipynb',
'examples/notebooks/density.ipynb',
'examples/notebooks/fidelity.ipynb',
'examples/notebooks/fresnel_integrals.ipynb',
'examples/notebooks/qubits.ipynb',
'examples/notebooks/sho1d_example.ipynb',
'examples/notebooks/spin.ipynb',
'examples/notebooks/trace.ipynb',
'examples/notebooks/Bezout_Dixon_resultant.ipynb',
'examples/notebooks/IntegrationOverPolytopes.ipynb',
'examples/notebooks/Macaulay_resultant.ipynb',
'examples/notebooks/Sylvester_resultant.ipynb',
'examples/notebooks/README.txt',
# This stuff :)
'release/.gitignore',
'release/README.md',
'release/Vagrantfile',
'release/fabfile.py',
'release/Dockerfile',
'release/Dockerfile-base',
'release/release.sh',
'release/rever.xsh',
'release/pull_and_run_rever.sh',
'release/compare_tar_against_git.py',
'release/update_docs.py',
'release/aptinstall.sh',
'release/build_docs.py',
'release/github_release.py',
'release/helpers.py',
'release/releasecheck.py',
'release/requirements.txt',
'release/update_requirements.sh',
'release/test_install.py',
'release/sha256.py',
'release/authors.py',
'release/ci_release_script.sh',
# This is just a distribute version of setup.py. Used mainly for setup.py
# develop, which we don't care about in the release tarball
'setupegg.py',
# pytest stuff
'conftest.py',
# Encrypted deploy key for deploying dev docs to GitHub
'github_deploy_key.enc',
}
# Files that should be in the tarball should not be in git
tarball_whitelist = {
# Generated by setup.py. Contains metadata for PyPI.
"PKG-INFO",
# Generated by setuptools. More metadata.
'setup.cfg',
'sympy.egg-info/PKG-INFO',
'sympy.egg-info/SOURCES.txt',
'sympy.egg-info/dependency_links.txt',
'sympy.egg-info/requires.txt',
'sympy.egg-info/top_level.txt',
'sympy.egg-info/not-zip-safe',
'sympy.egg-info/entry_points.txt',
# Not sure where this is generated from...
'doc/commit_hash.txt',
}
def blue(text):
return "\033[34m%s\033[0m" % text
def red(text):
return "\033[31m%s\033[0m" % text
def run(*cmdline, cwd=None):
"""
Run command in subprocess and get lines of output
"""
return check_output(cmdline, encoding='utf-8', cwd=cwd).splitlines()
def full_path_split(path):
"""
Function to do a full split on a path.
"""
# Based on https://stackoverflow.com/a/13505966/161801
rest, tail = os.path.split(path)
if not rest or rest == os.path.sep:
return (tail,)
return full_path_split(rest) + (tail,)
def compare_tar_against_git(tarname, gitroot):
"""
Compare the contents of the tarball against git ls-files
See the bottom of the file for the whitelists.
"""
git_lsfiles = set(i.strip() for i in run('git', 'ls-files', cwd=gitroot))
tar_output_orig = set(run('tar', 'tf', tarname))
tar_output = set()
for file in tar_output_orig:
# The tar files are like sympy-0.7.3/sympy/__init__.py, and the git
# files are like sympy/__init__.py.
split_path = full_path_split(file)
if split_path[-1]:
# Exclude directories, as git ls-files does not include them
tar_output.add(os.path.join(*split_path[1:]))
# print tar_output
# print git_lsfiles
fail = False
print()
print(blue("Files in the tarball from git that should not be there:"))
print()
for line in sorted(tar_output.intersection(git_whitelist)):
fail = True
print(line)
print()
print(blue("Files in git but not in the tarball:"))
print()
for line in sorted(git_lsfiles - tar_output - git_whitelist):
fail = True
print(line)
print()
print(blue("Files in the tarball but not in git:"))
print()
for line in sorted(tar_output - git_lsfiles - tarball_whitelist):
fail = True
print(line)
print()
if fail:
sys.exit(red("Non-whitelisted files found or not found in the tarball"))
if __name__ == "__main__":
main(*sys.argv[1:])
|
the-stack_0_21462 | #%%
from cil.utilities.dataexample import SIMULATED_CONE_BEAM_DATA, SIMULATED_PARALLEL_BEAM_DATA, SIMULATED_SPHERE_VOLUME
from cil.utilities.display import show2D
from cil.plugins.tigre import ProjectionOperator, FBP as FBP_t
from cil.recon import FDK as FBP
from cil.utilities.display import show2D
from cil.processors import CentreOfRotationCorrector
import numpy as np
#%%
image_data = SIMULATED_SPHERE_VOLUME.get()
data = SIMULATED_CONE_BEAM_DATA.get()
data=np.log(data)
data*=-1.0
data.reorder('tigre')
#%%
data_offset = data.copy()
reco1 = FBP(data_offset).run()
data_offset.geometry.config.system.rotation_axis.position=[20,0,0]
reco2 = FBP(data_offset).run()
show2D([reco1,reco2],title=['original','offset data'])
#%%
corrector = CentreOfRotationCorrector.image_sharpness('centre',FBP_t)
corrector.set_input(data_offset)
data_centred = corrector.get_output()
#%%
#method = 'interpolated'
method = 'Siddon'
#%%
ag = data_offset.geometry
ig = ag.get_ImageGeometry()
PO = ProjectionOperator(ig, ag, direct_method=method)
reco = FBP(data_offset).run()
bp = PO.adjoint(data_offset)
fp = PO.direct(bp)
show2D([reco,bp,fp])
#%%
ag = data_centred.geometry
ig = ag.get_ImageGeometry()
PO = ProjectionOperator(ig, ag, direct_method=method)
reco = FBP(data_centred).run()
bp = PO.adjoint(data_centred)
fp = PO.direct(bp)
show2D([reco,bp,fp])
# %%
|
the-stack_0_21463 | """Tests for the `cli` module."""
import pytest
from duty import cli
def test_no_duty(capsys):
"""
Run no duties.
Arguments:
capsys: Pytest fixture to capture output.
"""
assert cli.main([]) == 1
captured = capsys.readouterr()
assert "choose at least one duty" in captured.err
def test_show_help(capsys):
"""
Show help.
Arguments:
capsys: Pytest fixture to capture output.
"""
assert cli.main(["-h"]) == 0
captured = capsys.readouterr()
assert "duty" in captured.out
def test_show_help_for_given_duties(capsys):
"""
Show help for given duties.
Arguments:
capsys: Pytest fixture to capture output.
"""
assert cli.main(["-d", "tests/fixtures/basic.py", "-h", "hello"]) == 0
captured = capsys.readouterr()
assert "hello" in captured.out
def test_show_help_unknown_duty(capsys):
"""
Show help for an unknown duty.
Arguments:
capsys: Pytest fixture to capture output.
"""
assert cli.main(["-d", "tests/fixtures/basic.py", "-h", "not-here"]) == 0
captured = capsys.readouterr()
assert "Unknown duty" in captured.out
def test_select_duties():
"""Run a duty."""
assert cli.main(["-d", "tests/fixtures/basic.py", "hello"]) == 0
def test_unknown_duty():
"""Don't run an unknown duty."""
assert cli.main(["-d", "tests/fixtures/basic.py", "byebye"]) == 1
def test_incorrect_arguments():
"""Use incorrect arguments."""
assert cli.main(["-d", "tests/fixtures/basic.py", "hello=1"]) == 1
# we use 300 because it's slightly above the valid maximum 255
@pytest.mark.parametrize("code", range(-100, 300, 7)) # noqa: WPS432 (magic number 300)
def test_duty_failure(code):
"""
Check exit code.
Arguments:
code: Code to match.
"""
assert cli.main(["-d", "tests/fixtures/code.py", "exit_with", f"code={code}"]) == code
def test_multiple_duties(capsys):
"""
Run multiple duties.
Arguments:
capsys: Pytest fixture to capture output.
"""
assert cli.main(["-d", "tests/fixtures/multiple.py", "first_duty", "second_duty"]) == 0
captured = capsys.readouterr()
assert "first" in captured.out
assert "second" in captured.out
def test_duty_arguments(capsys): # noqa: WPS218 (too many assert statements)
"""
Run duty with arguments.
Arguments:
capsys: Pytest fixture to capture output.
"""
assert cli.main(["-d", "tests/fixtures/arguments.py", "say_hello", "cat=fabric"]) == 0
captured = capsys.readouterr()
assert "cat fabric" in captured.out
assert "dog dog" in captured.out
assert cli.main(["-d", "tests/fixtures/arguments.py", "say_hello", "dog=paramiko", "cat=invoke"]) == 0
captured = capsys.readouterr()
assert "cat invoke" in captured.out
assert "dog paramiko" in captured.out
def test_list_duties(capsys):
"""
List duties.
Arguments:
capsys: Pytest fixture to capture output.
"""
assert cli.main(["-d", "tests/fixtures/list.py", "-l"]) == 0
captured = capsys.readouterr()
assert "Tong..." in captured.out
assert "DEUM!" in captured.out
def test_global_options():
"""Test global options."""
assert cli.main(["-d", "tests/fixtures/code.py", "-z", "exit_with", "1"]) == 0
def test_global_and_local_options():
"""Test global and local options."""
assert cli.main(["-d", "tests/fixtures/code.py", "-z", "exit_with", "-Z", "1"]) == 1
def test_options_precedence():
"""Test options precedence."""
# @duty(nofail=True) is overridden by ctx.run(nofail=False)
assert cli.main(["-d", "tests/fixtures/precedence.py", "precedence"]) == 1
# ctx.run(nofail=False) is overridden by local option -z
assert cli.main(["-d", "tests/fixtures/precedence.py", "precedence", "-z"]) == 0
# ctx.run(nofail=False) is overridden by global option -z
assert cli.main(["-d", "tests/fixtures/precedence.py", "-z", "precedence"]) == 0
# global option -z is overridden by local option -z
assert cli.main(["-d", "tests/fixtures/precedence.py", "-z", "precedence", "-Z"]) == 1
# test options precedence (CLI option, env var, ctx.run, @duty
# test positional arguments
# test extra keyword arguments
# test complete (global options + local options + multi duties + positional args + keyword args + extra keyword args)
@pytest.mark.parametrize(
("param", "expected"),
[
("", 1),
("n", 1),
("N", 1),
("no", 1),
("NO", 1),
("false", 1),
("FALSE", 1),
("off", 1),
("OFF", 1),
("zero=", 1),
("zero=0", 1),
("zero=n", 1),
("zero=N", 1),
("zero=no", 1),
("zero=NO", 1),
("zero=false", 1),
("zero=FALSE", 1),
("zero=off", 1),
("zero=OFF", 1),
("y", 0),
("Y", 0),
("yes", 0),
("YES", 0),
("on", 0),
("ON", 0),
("true", 0),
("TRUE", 0),
("anything else", 0),
("-1", 0),
("1", 0),
("zero=y", 0),
("zero=Y", 0),
("zero=yes", 0),
("zero=YES", 0),
("zero=on", 0),
("zero=ON", 0),
("zero=true", 0),
("zero=TRUE", 0),
("zero=anything else", 0),
("zero=-1", 0),
("zero=1", 0),
],
)
def test_cast_bool_parameter(param, expected):
"""
Test parameters casting as boolean.
Arguments:
param: Pytest parametrization fixture.
expected: Pytest parametrization fixture.
"""
assert cli.main(["-d", "tests/fixtures/booleans.py", "boolean", param]) == expected
def test_invalid_params(capsys):
"""
Check that invalid parameters are early and correctly detected.
Arguments:
capsys: Pytest fixture to capture output.
"""
assert cli.main(["-d", "tests/fixtures/booleans.py", "boolean", "zore=off"]) == 1
captured = capsys.readouterr()
assert "unexpected keyword argument 'zore'" in captured.err
assert cli.main(["-d", "tests/fixtures/code.py", "exit_with"]) == 1
captured = capsys.readouterr()
assert "missing 1 required positional argument: 'code'" in captured.err
|
the-stack_0_21466 | from django.conf.urls import url
from django.urls import path
from blog import views
urlpatterns = [
path('', views.PostListView.as_view(), name='post_list'),
path('about/', views.AboutView.as_view(), name='about'),
path('post/<int:pk>', views.PostDetailView.as_view(), name='post_detail'),
path('post/new/', views.CreatePostView.as_view(), name='post_new'),
path('post/<int:pk>/edit/', views.PostUpdateView.as_view(), name='post_edit'),
path('post/<int:pk>/remove/', views.PostDeleteView.as_view(), name='post_remove'),
path('drafts/', views.DraftListView.as_view(), name='post_draft_list'),
path('post/<int:pk>/comment/', views.add_comment_to_post, name='add_comment_to_post'),
path('comment/<int:pk>/approve/', views.comment_approve, name='comment_approve'),
path('comment/<int:pk>/remove/', views.comment_remove, name='comment_remove'),
path('post/<int:pk>/publish/', views.post_publish, name='post_publish'),
] |
the-stack_0_21468 | #!/usr/bin/env python
import ftplib
import os
import socket
HOST = 'ftp.mozilla.org'
DIRN = 'pub/mozilla.org/webtools'
FILE = 'bugzilla-LATEST.tar.gz'
def main():
try:
f = ftplib.FTP(HOST)
except (socket.error, socket.gaierror) as e:
print('ERROR: cannot reach', HOST)
return
print('***Conntected to host ', HOST)
try:
f.login()
except ftplib.error_perm:
print('ERROR: cannot login anonymously')
f.quit()
return
print('***Logged in as anonymous')
try:
f.cwd(DIRN)
except ftplib.error_perm:
print('ERROR: cannot cd to ', DIRN)
f.quit()
return
print('***Changed to ', DIRN)
try:
f.retrbinary('RETR %s' %FILE, open(FILE, 'wb').write)
except ftplib.error_perm:
print('ERROR: cannot read file ', FILE)
os.unlink(FILE)
else:
print('***Downloaded ', FILE)
f.quit()
if __name__ == '__main__':
main()
|
the-stack_0_21472 | from threading import Lock
from meerk40t.svgelements import Length
from ..device.lasercommandconstants import *
from ..kernel import Modifier
from .elements import MILS_IN_MM
def plugin(kernel, lifecycle=None):
if lifecycle == "register":
kernel.register("modifier/Spoolers", Spoolers)
kernel_root = kernel.root
kernel_root.activate("modifier/Spoolers")
class Spooler:
"""
A spooler stores spoolable lasercode events as a synchronous queue.
* peek()
* pop()
* job(job)
* jobs(iterable<job>)
* job_if_idle(job) -- Will enqueue the job if the device is currently idle.
* clear_queue()
* remove(job)
"""
def __init__(self, context, spooler_name, *args, **kwargs):
self.context = context
self.name = spooler_name
self.queue_lock = Lock()
self._queue = []
self.next = None
def __repr__(self):
return "Spooler(%s)" % str(self.name)
def __del__(self):
self.name = None
self.queue_lock = None
self._queue = None
self.next = None
def __len__(self):
return len(self._queue)
def as_device(self):
links = []
obj = self
while obj is not None:
links.append(str(obj))
obj = obj.next
return " -> ".join(links)
@property
def queue(self):
return self._queue
def append(self, item):
self.job(item)
def peek(self):
if len(self._queue) == 0:
return None
return self._queue[0]
def pop(self):
if len(self._queue) == 0:
self.context.signal("spooler;queue", len(self._queue), self.name)
return None
self.queue_lock.acquire(True)
queue_head = self._queue[0]
del self._queue[0]
self.queue_lock.release()
self.context.signal("spooler;queue", len(self._queue), self.name)
return queue_head
def job(self, *job):
"""
Send a single job event with parameters as needed.
The job can be a single command with (COMMAND_MOVE 20 20) or without parameters (COMMAND_HOME), or a generator
which can yield many lasercode commands.
:param job: job to send to the spooler.
:return:
"""
self.queue_lock.acquire(True)
if len(job) == 1:
self._queue.extend(job)
else:
self._queue.append(job)
self.queue_lock.release()
self.context.signal("spooler;queue", len(self._queue))
def jobs(self, jobs):
"""
Send several jobs generators to be appended to the end of the queue.
The jobs parameter must be suitable to be .extended to the end of the queue list.
:param jobs: jobs to extend
:return:
"""
self.queue_lock.acquire(True)
if isinstance(jobs, (list, tuple)):
self._queue.extend(jobs)
else:
self._queue.append(jobs)
self.queue_lock.release()
self.context.signal("spooler;queue", len(self._queue))
def job_if_idle(self, element):
if len(self._queue) == 0:
self.job(element)
return True
else:
return False
def clear_queue(self):
self.queue_lock.acquire(True)
self._queue = []
self.queue_lock.release()
self.context.signal("spooler;queue", len(self._queue))
def remove(self, element, index=None):
self.queue_lock.acquire(True)
if index is None:
self._queue.remove(element)
else:
del self._queue[index]
self.queue_lock.release()
self.context.signal("spooler;queue", len(self._queue))
class Spoolers(Modifier):
def __init__(self, context, name=None, channel=None, *args, **kwargs):
Modifier.__init__(self, context, name, channel)
def get_or_make_spooler(self, device_name):
dev = "device/%s" % device_name
try:
device = self.context.registered[dev]
except KeyError:
device = [None, None, None]
self.context.registered[dev] = device
self.context.signal("legacy_spooler_label", device_name)
if device[0] is None:
device[0] = Spooler(self.context, device_name)
return device[0]
def default_spooler(self):
return self.get_or_make_spooler(self.context.root.active)
def attach(self, *a, **kwargs):
context = self.context
context.spoolers = self
bed_dim = context.root
self.context.root.setting(str, "active", "0")
kernel = self.context._kernel
_ = kernel.translation
@context.console_option(
"register",
"r",
type=bool,
action="store_true",
help=_("Register this device"),
)
@context.console_command(
"spool",
help=_("spool<?> <command>"),
regex=True,
input_type=(None, "plan", "device"),
output_type="spooler",
)
def spool(
command, channel, _, data=None, register=False, remainder=None, **kwgs
):
root = self.context.root
if len(command) > 5:
device_name = command[5:]
else:
if register:
device_context = kernel.get_context("devices")
index = 0
while hasattr(device_context, "device_%d" % index):
index += 1
device_name = str(index)
else:
device_name = root.active
if register:
device_context = kernel.get_context("devices")
setattr(
device_context,
"device_%s" % device_name,
("spool%s -r " % device_name) + remainder + "\n",
)
spooler = self.get_or_make_spooler(device_name)
if data is not None:
# If plan data is in data, then we copy that and move on to next step.
spooler.jobs(data.plan)
channel(_("Spooled Plan."))
self.context.signal("plan", data.name, 6)
if remainder is None:
channel(_("----------"))
channel(_("Spoolers:"))
for d, d_name in enumerate(self.context.match("device", True)):
channel("%d: %s" % (d, d_name))
channel(_("----------"))
channel(_("Spooler %s:" % device_name))
for s, op_name in enumerate(spooler.queue):
channel("%d: %s" % (s, op_name))
channel(_("----------"))
return "spooler", (spooler, device_name)
@context.console_command(
"list",
help=_("spool<?> list"),
input_type="spooler",
output_type="spooler",
)
def spooler_list(command, channel, _, data_type=None, data=None, **kwgs):
spooler, device_name = data
channel(_("----------"))
channel(_("Spoolers:"))
for d, d_name in enumerate(self.context.match("device", True)):
channel("%d: %s" % (d, d_name))
channel(_("----------"))
channel(_("Spooler %s:" % device_name))
for s, op_name in enumerate(spooler.queue):
channel("%d: %s" % (s, op_name))
channel(_("----------"))
return data_type, data
@context.console_argument("op", type=str, help=_("unlock, origin, home, etc"))
@context.console_command(
"send",
help=_("send a plan-command to the spooler"),
input_type="spooler",
output_type="spooler",
)
def spooler_send(
command, channel, _, data_type=None, op=None, data=None, **kwgs
):
spooler, device_name = data
if op is None:
raise SyntaxError
try:
for command_name in self.context.match("plan/%s" % op):
plan_command = self.context.registered[command_name]
spooler.job(plan_command)
return data_type, data
except (KeyError, IndexError):
pass
channel(_("No plan command found."))
return data_type, data
@context.console_command(
"clear",
help=_("spooler<?> clear"),
input_type="spooler",
output_type="spooler",
)
def spooler_clear(command, channel, _, data_type=None, data=None, **kwgs):
spooler, device_name = data
spooler.clear_queue()
return data_type, data
def execute_absolute_position(position_x, position_y):
x_pos = Length(position_x).value(
ppi=1000.0, relative_length=bed_dim.bed_width * MILS_IN_MM
)
y_pos = Length(position_y).value(
ppi=1000.0, relative_length=bed_dim.bed_height * MILS_IN_MM
)
def move():
yield COMMAND_SET_ABSOLUTE
yield COMMAND_MODE_RAPID
yield COMMAND_MOVE, int(x_pos), int(y_pos)
return move
def execute_relative_position(position_x, position_y):
x_pos = Length(position_x).value(
ppi=1000.0, relative_length=bed_dim.bed_width * MILS_IN_MM
)
y_pos = Length(position_y).value(
ppi=1000.0, relative_length=bed_dim.bed_height * MILS_IN_MM
)
def move():
yield COMMAND_SET_INCREMENTAL
yield COMMAND_MODE_RAPID
yield COMMAND_MOVE, int(x_pos), int(y_pos)
yield COMMAND_SET_ABSOLUTE
return move
@context.console_command(
"+laser",
hidden=True,
input_type=("spooler", None),
output_type="spooler",
help=_("turn laser on in place"),
)
def plus_laser(data, **kwgs):
if data is None:
data = self.default_spooler(), self.context.root.active
spooler, device_name = data
spooler.job(COMMAND_LASER_ON)
return "spooler", data
@context.console_command(
"-laser",
hidden=True,
input_type=("spooler", None),
output_type="spooler",
help=_("turn laser off in place"),
)
def minus_laser(data, **kwgs):
if data is None:
data = self.default_spooler(), self.context.root.active
spooler, device_name = data
spooler.job(COMMAND_LASER_OFF)
return "spooler", data
@context.console_argument(
"amount", type=Length, help=_("amount to move in the set direction.")
)
@context.console_command(
("left", "right", "up", "down"),
input_type=("spooler", None),
output_type="spooler",
help=_("cmd <amount>"),
)
def direction(command, channel, _, data=None, amount=None, **kwgs):
if data is None:
data = self.default_spooler(), self.context.root.active
spooler, device_name = data
if amount is None:
amount = Length("1mm")
max_bed_height = bed_dim.bed_height * MILS_IN_MM
max_bed_width = bed_dim.bed_width * MILS_IN_MM
if not hasattr(spooler, "_dx"):
spooler._dx = 0
if not hasattr(spooler, "_dy"):
spooler._dy = 0
if command.endswith("right"):
spooler._dx += amount.value(ppi=1000.0, relative_length=max_bed_width)
elif command.endswith("left"):
spooler._dx -= amount.value(ppi=1000.0, relative_length=max_bed_width)
elif command.endswith("up"):
spooler._dy -= amount.value(ppi=1000.0, relative_length=max_bed_height)
elif command.endswith("down"):
spooler._dy += amount.value(ppi=1000.0, relative_length=max_bed_height)
context(".timer 1 0 spool%s jog\n" % device_name)
return "spooler", data
@context.console_option("force", "f", type=bool, action="store_true")
@context.console_command(
"jog",
hidden=True,
input_type="spooler",
output_type="spooler",
help=_("executes outstanding jog buffer"),
)
def jog(command, channel, _, data, force=False, **kwgs):
if data is None:
data = self.default_spooler(), self.context.root.active
spooler, device_name = data
try:
idx = int(spooler._dx)
idy = int(spooler._dy)
except AttributeError:
return
if idx == 0 and idy == 0:
return
if force:
spooler.job(execute_relative_position(idx, idy))
else:
if spooler.job_if_idle(execute_relative_position(idx, idy)):
channel(_("Position moved: %d %d") % (idx, idy))
spooler._dx -= idx
spooler._dy -= idy
else:
channel(_("Busy Error"))
return "spooler", data
@context.console_option("force", "f", type=bool, action="store_true")
@context.console_argument("x", type=Length, help=_("change in x"))
@context.console_argument("y", type=Length, help=_("change in y"))
@context.console_command(
("move", "move_absolute"),
input_type=("spooler", None),
output_type="spooler",
help=_("move <x> <y>: move to position."),
)
def move(channel, _, x, y, data=None, force=False, **kwgs):
if data is None:
data = self.default_spooler(), self.context.root.active
spooler, device_name = data
if y is None:
raise SyntaxError
if force:
spooler.job(execute_absolute_position(x, y))
else:
if not spooler.job_if_idle(execute_absolute_position(x, y)):
channel(_("Busy Error"))
return "spooler", data
@context.console_option("force", "f", type=bool, action="store_true")
@context.console_argument("dx", type=Length, help=_("change in x"))
@context.console_argument("dy", type=Length, help=_("change in y"))
@context.console_command(
"move_relative",
input_type=("spooler", None),
output_type="spooler",
help=_("move_relative <dx> <dy>"),
)
def move_relative(channel, _, dx, dy, data=None, force=False, **kwgs):
if data is None:
data = self.default_spooler(), self.context.root.active
spooler, device_name = data
if dy is None:
raise SyntaxError
if force:
spooler.job(execute_relative_position(dx, dy))
else:
if not spooler.job_if_idle(execute_relative_position(dx, dy)):
channel(_("Busy Error"))
return "spooler", data
@context.console_argument("x", type=Length, help=_("x offset"))
@context.console_argument("y", type=Length, help=_("y offset"))
@context.console_command(
"home",
input_type=("spooler", None),
output_type="spooler",
help=_("home the laser"),
)
def home(x=None, y=None, data=None, **kwgs):
if data is None:
data = self.default_spooler(), self.context.root.active
spooler, device_name = data
if x is not None and y is not None:
x = x.value(ppi=1000.0, relative_length=bed_dim.bed_width * MILS_IN_MM)
y = y.value(ppi=1000.0, relative_length=bed_dim.bed_height * MILS_IN_MM)
spooler.job(COMMAND_HOME, int(x), int(y))
return "spooler", data
spooler.job(COMMAND_HOME)
return "spooler", data
@context.console_command(
"unlock",
input_type=("spooler", None),
output_type="spooler",
help=_("unlock the rail"),
)
def unlock(data=None, **kwgs):
if data is None:
data = self.default_spooler(), self.context.root.active
spooler, device_name = data
spooler.job(COMMAND_UNLOCK)
return "spooler", data
@context.console_command(
"lock",
input_type=("spooler", None),
output_type="spooler",
help=_("lock the rail"),
)
def lock(data, **kwgs):
if data is None:
data = self.default_spooler(), self.context.root.active
spooler, device_name = data
spooler.job(COMMAND_LOCK)
return "spooler", data
for i in range(5):
self.get_or_make_spooler(str(i))
@context.console_command(
"test_dot_and_home",
input_type=("spooler", None),
hidden=True,
)
def run_home_and_dot_test(data, **kwgs):
if data is None:
data = self.default_spooler(), self.context.root.active
spooler, device_name = data
def home_dot_test():
for i in range(25):
yield COMMAND_SET_ABSOLUTE
yield COMMAND_MODE_RAPID
yield COMMAND_HOME
yield COMMAND_LASER_OFF
yield COMMAND_WAIT_FINISH
yield COMMAND_MOVE, 3000, 3000
yield COMMAND_WAIT_FINISH
yield COMMAND_LASER_ON
yield COMMAND_WAIT, 0.05
yield COMMAND_LASER_OFF
yield COMMAND_WAIT_FINISH
yield COMMAND_HOME
yield COMMAND_WAIT_FINISH
spooler.job(home_dot_test)
@context.console_argument("transition_type", type=str)
@context.console_command(
"test_jog_transition",
help="test_jog_transition <finish,jog,switch>",
hidden=True,
)
def run_jog_transition_test(data, transition_type, **kwgs):
""" "
The Jog Transition Test is intended to test the jogging
"""
if transition_type == "jog":
command = COMMAND_JOG
elif transition_type == "finish":
command = COMMAND_JOG_FINISH
elif transition_type == "switch":
command = COMMAND_JOG_SWITCH
else:
raise SyntaxError
if data is None:
data = self.default_spooler(), self.context.root.active
spooler, device_name = data
def jog_transition_test():
yield COMMAND_SET_ABSOLUTE
yield COMMAND_MODE_RAPID
yield COMMAND_HOME
yield COMMAND_LASER_OFF
yield COMMAND_WAIT_FINISH
yield COMMAND_MOVE, 3000, 3000
yield COMMAND_WAIT_FINISH
yield COMMAND_LASER_ON
yield COMMAND_WAIT, 0.05
yield COMMAND_LASER_OFF
yield COMMAND_WAIT_FINISH
yield COMMAND_SET_SPEED, 10.0
def pos(i):
if i < 3:
x = 200
elif i < 6:
x = -200
else:
x = 0
if i % 3 == 0:
y = 200
elif i % 3 == 1:
y = -200
else:
y = 0
return x, y
for q in range(8):
top = q & 1
left = q & 2
x_val = q & 3
yield COMMAND_SET_DIRECTION, top, left, x_val, not x_val
yield COMMAND_MODE_PROGRAM
for j in range(9):
jx, jy = pos(j)
for k in range(9):
kx, ky = pos(k)
yield COMMAND_MOVE, 3000, 3000
yield COMMAND_MOVE, 3000 + jx, 3000 + jy
yield command, 3000 + jx + kx, 3000 + jy + ky
yield COMMAND_MOVE, 3000, 3000
yield COMMAND_MODE_RAPID
yield COMMAND_WAIT_FINISH
yield COMMAND_LASER_ON
yield COMMAND_WAIT, 0.05
yield COMMAND_LASER_OFF
yield COMMAND_WAIT_FINISH
spooler.job(jog_transition_test)
|
the-stack_0_21473 | import re
import time
import sqlalchemy as SA
import pushmanager.core.db as db
import pushmanager.core.util
from pushmanager.core.git import GitQueue
from pushmanager.servlets.checklist import checklist_reminders
from pushmanager.core.git import GitTaskAction
from pushmanager.core.requesthandler import RequestHandler
TAGS_RE = re.compile(r'[a-zA-Z0-9_-]+')
CONFLICT_TAGS = frozenset(('conflict-pickme', 'conflict-master'))
class NewRequestServlet(RequestHandler):
def _arg(self, key):
return pushmanager.core.util.get_str_arg(self.request, key, '')
def post(self):
if not self.current_user:
return self.send_error(403)
self.requestid = self._arg('request-id')
self.tag_list = [
x
for x in TAGS_RE.findall(self._arg('request-tags'))
if x and x not in CONFLICT_TAGS
]
reviewid = self._arg('request-review')
if reviewid:
try:
reviewid = int(reviewid)
except (ValueError, TypeError):
return self.send_error(500)
watchers = ','.join(map(str.strip, self._arg('request-watchers').split(',')))
if self.requestid != '':
self.requestid = int(self.requestid)
updated_values = {
'title': self._arg('request-title'),
'tags': ','.join(self.tag_list),
'reviewid': reviewid or None,
'repo': self._arg('request-repo').strip(),
'branch': self._arg('request-branch').strip(),
'comments': self._arg('request-comments'),
'description': self._arg('request-description'),
'watchers': watchers,
'modified': time.time(),
'revision': '0'*40,
}
if len(self._arg('request-takeover')):
updated_values.update({'user': self.current_user})
self.request_user = self.current_user
else:
self.request_user = self._arg('request-user')
query = db.push_requests.update().where(
db.push_requests.c.id == self.requestid
).values(updated_values)
else:
query = db.push_requests.insert({
'title': self._arg('request-title'),
'user': self.current_user,
'tags': ','.join(self.tag_list),
'reviewid': self._arg('request-review') or None,
'repo': self._arg('request-repo').strip(),
'branch': self._arg('request-branch').strip(),
'comments': self._arg('request-comments'),
'description': self._arg('request-description'),
'watchers': watchers,
'created': time.time(),
'modified': time.time(),
'state': 'requested',
'revision': '0'*40,
})
self.request_user = self.current_user
db.execute_cb(query, self.on_request_upsert_complete)
def on_request_upsert_complete(self, success, db_results):
self.check_db_results(success, db_results)
if not self.requestid:
self.requestid = db_results.lastrowid
query = db.push_checklist.select().where(db.push_checklist.c.request == self.requestid)
db.execute_cb(query, self.on_existing_checklist_retrieved)
def on_existing_checklist_retrieved(self, success, db_results):
if not success or not db_results:
# We should have the new request in db by this time.
return self.send_error(500)
existing_checklist_types = set(x['type'] for x in db_results.fetchall())
queries = []
necessary_checklist_types = set()
canonical_tag_name = { 'search': 'search-backend' }
for cl_type in ['pushplans', 'search', 'hoods']:
tag = canonical_tag_name.get(cl_type, cl_type)
if tag in self.tag_list:
necessary_checklist_types.add(cl_type)
necessary_checklist_types.add('{0}-cleanup'.format(cl_type))
types_to_add = necessary_checklist_types - existing_checklist_types
types_to_remove = existing_checklist_types - necessary_checklist_types
for type_ in types_to_add:
for target in checklist_reminders[type_].keys():
queries.append(db.push_checklist.insert().values(
{'request': self.requestid, 'type': type_, 'target': target}
))
if types_to_remove:
queries.append(db.push_checklist.delete().where(SA.and_(
db.push_checklist.c.request == self.requestid,
db.push_checklist.c.type.in_(types_to_remove),
)))
db.execute_transaction_cb(queries, self.on_checklist_upsert_complete)
def on_checklist_upsert_complete(self, success, db_results):
if not success:
return self.send_error(500)
if self.requestid:
GitQueue.enqueue_request(GitTaskAction.VERIFY_BRANCH, self.requestid, pushmanager_url = self.get_base_url())
# Check if the request is already pickme'd for a push, and if
# so also enqueue it to be checked for conflicts.
request_push_id = GitQueue._get_push_for_request(self.requestid)
if request_push_id:
GitQueue.enqueue_request(GitTaskAction.TEST_PICKME_CONFLICT, self.requestid, pushmanager_url = self.get_base_url())
return self.redirect("/requests?user=%s" % self.request_user)
|
the-stack_0_21475 |
def find_salary_threshold(target_payroll, current_salaries):
# custom algorithm, not from the book
# first i take avg salary based on payroll and total number of current_salaries in current_salaries list
# residue is the remainder left after comparing salary with avg_target for any salary that's more than avg_target, those will need to be adjusted later to the 'capped' salary,
# since for salary below cap, nothing is changed
# finally we compute cap by adding avg_target, k times for each count of salaries more than avg,
# we divide this by the same count of salaries more than avg_target
avg_target = target_payroll // len(current_salaries)
residue = 0
i = 0
while i < len(current_salaries):
if current_salaries[i] < avg_target:
residue += avg_target - current_salaries[i]
i += 1
else:
break
cap_salary = (residue + avg_target*(len(current_salaries) - i)) // (len(current_salaries) - i)
print(cap_salary)
# best case: O(1), avg case: o(k) for a[k] < avg_target, worst case: O(n)
# if input is sorted, try utilizing binary sort
A = [20, 30, 40, 90, 100]
target_payroll = 210
find_salary_threshold(target_payroll, A) |
the-stack_0_21476 | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods, bad-continuation
"""Test cases for the generic_autotools module"""
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import unittest
from helpers import centos, docker, ubuntu
from hpccm.building_blocks.generic_autotools import generic_autotools
from hpccm.toolchain import toolchain
class Test_generic_autotools(unittest.TestCase):
def setUp(self):
"""Disable logging output messages"""
logging.disable(logging.ERROR)
@ubuntu
@docker
def test_defaults_ubuntu(self):
"""Default generic_autotools building block"""
g = generic_autotools(
directory='tcl8.6.9/unix',
prefix='/usr/local/tcl',
url='https://prdownloads.sourceforge.net/tcl/tcl8.6.9-src.tar.gz')
self.assertEqual(str(g),
r'''# https://prdownloads.sourceforge.net/tcl/tcl8.6.9-src.tar.gz
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://prdownloads.sourceforge.net/tcl/tcl8.6.9-src.tar.gz && \
mkdir -p /var/tmp && tar -x -f /var/tmp/tcl8.6.9-src.tar.gz -C /var/tmp -z && \
cd /var/tmp/tcl8.6.9/unix && ./configure --prefix=/usr/local/tcl && \
make -j$(nproc) && \
make -j$(nproc) install && \
rm -rf /var/tmp/tcl8.6.9/unix /var/tmp/tcl8.6.9-src.tar.gz''')
@ubuntu
@docker
def test_no_url(self):
"""missing url"""
with self.assertRaises(RuntimeError):
g = generic_autotools()
@ubuntu
@docker
def test_both_repository_and_url(self):
"""both repository and url"""
with self.assertRaises(RuntimeError):
g = generic_autotools(repository='foo', url='bar')
@ubuntu
@docker
def test_invalid_package(self):
"""invalid package url"""
with self.assertRaises(RuntimeError):
g = generic_autotools(url='https://foo/bar.sh')
@ubuntu
@docker
def test_pre_and_post(self):
"""Preconfigure and postinstall options"""
g = generic_autotools(
directory='tcl8.6.9/unix',
postinstall=['echo "post"'],
preconfigure=['echo "pre"'],
prefix='/usr/local/tcl',
url='https://prdownloads.sourceforge.net/tcl/tcl8.6.9-src.tar.gz')
self.assertEqual(str(g),
r'''# https://prdownloads.sourceforge.net/tcl/tcl8.6.9-src.tar.gz
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://prdownloads.sourceforge.net/tcl/tcl8.6.9-src.tar.gz && \
mkdir -p /var/tmp && tar -x -f /var/tmp/tcl8.6.9-src.tar.gz -C /var/tmp -z && \
cd /var/tmp/tcl8.6.9/unix && \
echo "pre" && \
cd /var/tmp/tcl8.6.9/unix && ./configure --prefix=/usr/local/tcl && \
make -j$(nproc) && \
make -j$(nproc) install && \
cd /usr/local/tcl && \
echo "post" && \
rm -rf /var/tmp/tcl8.6.9/unix /var/tmp/tcl8.6.9-src.tar.gz''')
@ubuntu
@docker
def test_configure_opts_check(self):
"""Configure options and check enabled"""
g = generic_autotools(
check=True,
configure_opts=['--disable-getpwuid',
'--enable-orterun-prefix-by-default'],
prefix='/usr/local/openmpi',
url='https://www.open-mpi.org/software/ompi/v4.0/downloads/openmpi-4.0.1.tar.bz2')
self.assertEqual(str(g),
r'''# https://www.open-mpi.org/software/ompi/v4.0/downloads/openmpi-4.0.1.tar.bz2
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://www.open-mpi.org/software/ompi/v4.0/downloads/openmpi-4.0.1.tar.bz2 && \
mkdir -p /var/tmp && tar -x -f /var/tmp/openmpi-4.0.1.tar.bz2 -C /var/tmp -j && \
cd /var/tmp/openmpi-4.0.1 && ./configure --prefix=/usr/local/openmpi --disable-getpwuid --enable-orterun-prefix-by-default && \
make -j$(nproc) && \
make -j$(nproc) check && \
make -j$(nproc) install && \
rm -rf /var/tmp/openmpi-4.0.1 /var/tmp/openmpi-4.0.1.tar.bz2''')
@ubuntu
@docker
def test_environment_and_toolchain(self):
"""environment and toolchain"""
tc = toolchain(CC='gcc', CXX='g++', FC='gfortran')
g = generic_autotools(
build_directory='/tmp/build',
directory='/var/tmp/tcl8.6.9/unix',
environment={'FOO': 'BAR'},
prefix='/usr/local/tcl',
toolchain=tc,
url='https://prdownloads.sourceforge.net/tcl/tcl8.6.9-src.tar.gz')
self.assertEqual(str(g),
r'''# https://prdownloads.sourceforge.net/tcl/tcl8.6.9-src.tar.gz
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://prdownloads.sourceforge.net/tcl/tcl8.6.9-src.tar.gz && \
mkdir -p /var/tmp && tar -x -f /var/tmp/tcl8.6.9-src.tar.gz -C /var/tmp -z && \
mkdir -p /tmp/build && cd /tmp/build && FOO=BAR CC=gcc CXX=g++ FC=gfortran /var/tmp/tcl8.6.9/unix/configure --prefix=/usr/local/tcl && \
make -j$(nproc) && \
make -j$(nproc) install && \
rm -rf /var/tmp/tcl8.6.9/unix /var/tmp/tcl8.6.9-src.tar.gz /tmp/build''')
@ubuntu
@docker
def test_repository_recursive(self):
"""test repository and recusive option"""
g = generic_autotools(preconfigure=['./autogen.sh'],
prefix='/usr/local/zeromq',
recursive=True,
repository='https://github.com/zeromq/libzmq.git')
self.assertEqual(str(g),
r'''# https://github.com/zeromq/libzmq.git
RUN mkdir -p /var/tmp && cd /var/tmp && git clone --depth=1 --recursive https://github.com/zeromq/libzmq.git libzmq && cd - && \
cd /var/tmp/libzmq && \
./autogen.sh && \
cd /var/tmp/libzmq && ./configure --prefix=/usr/local/zeromq && \
make -j$(nproc) && \
make -j$(nproc) install && \
rm -rf /var/tmp/libzmq''')
@ubuntu
@docker
def test_runtime(self):
"""Runtime"""
g = generic_autotools(
directory='tcl8.6.9/unix',
prefix='/usr/local/tcl',
url='https://prdownloads.sourceforge.net/tcl/tcl8.6.9-src.tar.gz')
r = g.runtime()
self.assertEqual(r,
r'''# https://prdownloads.sourceforge.net/tcl/tcl8.6.9-src.tar.gz
COPY --from=0 /usr/local/tcl /usr/local/tcl''')
|
the-stack_0_21477 |
# Copyright Jamie Allsop 2015-2015
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
#-------------------------------------------------------------------------------
# MarkdownToHtmlMethod
#-------------------------------------------------------------------------------
import os.path
import itertools
import grip
import cuppa.progress
class GripRunner(object):
def __call__( self, target, source, env ):
for s, t in itertools.izip( source, target ):
in_file = str(s)
out_file = str(t)
try:
grip.export( path=in_file, render_wide=True, out_filename=out_file )
except Exception as error:
print( "cuppa: error: grip.export( path={}, render_wide=True, out_filename={}) failed with error [{}]".format( in_file, out_file, error ))
return None
class GripEmitter(object):
def __init__( self, output_dir ):
self._output_dir = output_dir
def __call__( self, target, source, env ):
target = []
for s in source:
path = os.path.join( self._output_dir, os.path.split( str(s) )[1] )
t = os.path.splitext(path)[0] + ".html"
target.append(t)
return target, source
class MarkdownToHtmlMethod(object):
def __call__( self, env, source, final_dir=None ):
if final_dir == None:
final_dir = env['abs_final_dir']
env.AppendUnique( BUILDERS = {
'Grip' : env.Builder(
action = GripRunner(),
emitter = GripEmitter(final_dir) )
} )
html = env.Grip( [], source )
cuppa.progress.NotifyProgress.add( env, html )
return html
@classmethod
def add_to_env( cls, cuppa_env ):
cuppa_env.add_method( "MarkdownToHtml", cls() )
|
the-stack_0_21478 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
estimate_gradient_norm.py
A multithreaded gradient norm sampler
Copyright (C) 2017-2018, IBM Corp.
Copyright (C) 2017, Lily Weng <[email protected]>
and Huan Zhang <[email protected]>
This program is licenced under the Apache 2.0 licence,
contained in the LICENCE file in this directory.
"""
from __future__ import division
import numpy as np
import random
import ctypes
import time
import sys
import os
import tensorflow as tf
from multiprocessing import Pool, current_process, cpu_count
from shmemarray import ShmemRawArray, NpShmemArray
from functools import partial
from randsphere import randsphere
from tensorflow.python.ops import gradients_impl
class EstimateLipschitz(object):
def __init__(self, sess, seed = 1215, nthreads = 0):
"""
sess: tensorflow session
Nsamp: number of samples to take per iteration
Niters: number of iterations, each iteration we return a max L
"""
self.sess = sess
self.seed = seed
# create a pool of workers to compute samples in advance
if nthreads == 0:
self.n_processes = max(cpu_count() // 2, 1)
else:
self.n_processes = nthreads
# set up random seed during initialization
def initializer(s):
np.random.seed(s + current_process()._identity[0])
# using only 1 OpenMP thread
os.environ['OMP_NUM_THREADS'] = "1"
self.pool = Pool(processes = self.n_processes, initializer = initializer, initargs=(self.seed,))
def load_model(self, dataset = "mnist", model_name = "2-layer", activation = "relu", model = None, batch_size = 0, compute_slope = False, order = 1):
"""
model: if set to None, then load dataset with model_name. Otherwise use the model directly.
dataset: mnist, cifar and imagenet. recommend to use mnist and cifar as a starting point.
model_name: possible options are 2-layer, distilled, and normal
"""
from setup_cifar import CIFAR, CIFARModel, TwoLayerCIFARModel, MYCIFARModel
from setup_mnist import MNIST, MNISTModel, TwoLayerMNISTModel, MYMNISTModel
from nlayer_model import NLayerModel
from setup_imagenet import ImageNet, ImageNetModel
# if set this to true, we will use the logit layer output instead of probability
# the logit layer's gradients are usually larger and more stable
output_logits = True
self.dataset = dataset
self.model_name = model_name
if model is None:
print('Loading model...')
if dataset == "mnist":
self.batch_size = 1024
if model_name == "2-layer":
model = TwoLayerMNISTModel("models/mnist_2layer", self.sess, not output_logits)
elif model_name == "normal":
if activation == "relu":
model = MNISTModel("models/mnist", self.sess, not output_logits)
else:
print("actviation = {}".format(activation))
model = MNISTModel("models/mnist_cnn_7layer_"+activation, self.sess, not output_logits, activation= activation)
time.sleep(5)
elif model_name == "brelu":
model = MNISTModel("models/mnist_brelu", self.sess, not output_logits, use_brelu = True)
elif model_name == "distilled":
model = MNISTModel("models/mnist-distilled-100", self.sess, not output_logits)
elif model_name == 'rcnn_valid':
model = MYMNISTModel('models/rcnn6t16_1.h5',self.sess,not output_logits)
elif model_name == 'rcnn_wrong':
model = MYMNISTModel('models/rcnn6t16_1.h5',self.sess,not output_logits)
elif model_name == 'rcnn_fgsm1':
model = MYMNISTModel('models/rcnn6t16_1.h5',self.sess,not output_logits)
elif model_name == 'rcnn_fgsm05':
model = MYMNISTModel('models/rcnn6t16_1.h5',self.sess,not output_logits)
elif model_name == 'rcnn_cw':
model = MYMNISTModel('models/rcnn6t16_1.h5',self.sess,not output_logits)
elif model_name == 'rcnn_hop':
model = MYMNISTModel('models/rcnn6t16_1.h5',self.sess,not output_logits)
elif model_name == 'rfnn_valid':
model = MYMNISTModel('models/rfnn3t30_1.h5',self.sess,not output_logits)
elif model_name == 'rfnn_wrong':
model = MYMNISTModel('models/rfnn3t30_1.h5',self.sess,not output_logits)
elif model_name == 'rfnn_fgsm1':
model = MYMNISTModel('models/rfnn3t30_1.h5',self.sess,not output_logits)
elif model_name == 'rfnn_fgsm05':
model = MYMNISTModel('models/rfnn3t30_1.h5',self.sess,not output_logits)
elif model_name == 'rfnn_cw':
model = MYMNISTModel('models/rfnn3t30_1.h5',self.sess,not output_logits)
elif model_name == 'rfnn_hop':
model = MYMNISTModel('models/rfnn3t30_1.h5',self.sess,not output_logits)
else:
# specify model parameters as N,M,opts
model_params = model_name.split(",")
if len(model_params) < 3:
raise(RuntimeError("incorrect model option" + model_name))
numlayer = int(model_params[0])
nhidden = int(model_params[1])
modelfile = "models/mnist_{}layer_relu_{}_{}".format(numlayer, nhidden, model_params[2])
print("loading", modelfile)
model = NLayerModel([nhidden] * (numlayer - 1), modelfile)
elif dataset == "cifar":
self.batch_size = 1024
if model_name == "2-layer":
model = TwoLayerCIFARModel("models/cifar_2layer", self.sess, not output_logits)
elif model_name == "normal":
if activation == "relu":
model = CIFARModel("models/cifar", self.sess, not output_logits)
else:
model = CIFARModel("models/cifar_cnn_7layer_"+activation, self.sess, not output_logits, activation = activation)
elif model_name == "brelu":
model = CIFARModel("models/cifar_brelu", self.sess, not output_logits, use_brelu = True)
elif model_name == "distilled":
model = CIFARModel("models/cifar-distilled-100", self.sess, not output_logits)
else:
# specify model parameters as N,M,opts
model_params = model_name.split(",")
if len(model_params) < 3:
raise(RuntimeError("incorrect model option" + model_name))
numlayer = int(model_params[0])
nhidden = int(model_params[1])
modelfile = "models/cifar_{}layer_relu_{}_{}".format(numlayer, nhidden, model_params[2])
print("loading", modelfile)
model = NLayerModel([nhidden] * (numlayer - 1), modelfile, image_size=32, image_channel=3)
elif dataset == "imagenet":
self.batch_size = 32
model = ImageNetModel(self.sess, use_softmax = not output_logits, model_name = model_name, create_prediction = False)
else:
raise(RuntimeError("dataset unknown"))
#print("*** Loaded model successfully")
self.model = model
self.compute_slope = compute_slope
if batch_size != 0:
self.batch_size = batch_size
## placeholders: self.img, self.true_label, self.target_label
# img is the placeholder for image input
self.img = tf.placeholder(shape = [None, model.image_size, model.image_size, model.num_channels], dtype = tf.float32)
# output is the output tensor of the entire network
self.output = model.predict(self.img)
# create the graph to compute gradient
# get the desired true label and target label
self.true_label = tf.placeholder(dtype = tf.int32, shape = [])
self.target_label = tf.placeholder(dtype = tf.int32, shape = [])
true_output = self.output[:, self.true_label]
target_output = self.output[:, self.target_label]
# get the difference
self.objective = true_output - target_output
# get the gradient(deprecated arguments)
self.grad_op = tf.gradients(self.objective, self.img)[0]
# compute gradient norm: (in computation graph, so is faster)
grad_op_rs = tf.reshape(self.grad_op, (tf.shape(self.grad_op)[0], -1))
self.grad_2_norm_op = tf.norm(grad_op_rs, axis = 1)
self.grad_1_norm_op = tf.norm(grad_op_rs, ord=1, axis = 1)
self.grad_inf_norm_op = tf.norm(grad_op_rs, ord=np.inf, axis = 1)
### Lily: added Hessian-vector product calculation here for 2nd order bound:
if order == 2:
## _hessian_vector_product(ys, xs, v): return a list of tensors containing the product between the Hessian and v
## ys: a scalar valur or a tensor or a list of tensors to be summed to yield of scalar
## xs: a list of tensors that we should construct the Hessian over
## v: a list of tensors with the same shape as xs that we want to multiply by the Hessian
# self.randv: shape = (Nimg,28,28,1) (the v in _hessian_vector_product)
self.randv = tf.placeholder(shape = [None, model.image_size, model.image_size, model.num_channels], dtype = tf.float32)
# hv_op_tmp: shape = (Nimg,28,28,1) for mnist, same as self.img (the xs in _hessian_vector_product)
hv_op_tmp = gradients_impl._hessian_vector_product(self.objective, [self.img], [self.randv])[0]
# hv_op_rs: reshape hv_op_tmp to hv_op_rs whose shape = (Nimg, 784) for mnist
hv_op_rs = tf.reshape(hv_op_tmp, (tf.shape(hv_op_tmp)[0],-1))
# self.hv_norm_op: norm of hessian vector product, keep shape = (Nimg,1) using keepdims
self.hv_norm_op = tf.norm(hv_op_rs, axis = 1, keepdims=True)
# hv_op_rs_normalize: normalize Hv to Hv/||Hv||, shape = (Nimg, 784)
hv_op_rs_normalize = hv_op_rs/self.hv_norm_op
# self.hv_op: reshape hv_op_rs_normalize to shape = (Nimg,28,28,1)
self.hv_op = tf.reshape(hv_op_rs_normalize, tf.shape(hv_op_tmp))
## reshape randv and compute its norm
# shape: (Nimg, 784)
randv_rs = tf.reshape(self.randv, (tf.shape(self.randv)[0],-1))
# shape: (Nimg,)
self.randv_norm_op = tf.norm(randv_rs, axis = 1)
## compute v'Hv: use un-normalized Hv (hv_op_tmp, hv_op_rs)
# element-wise multiplication and then sum over axis = 1 (now shape: (Nimg,))
self.vhv_op = tf.reduce_sum(tf.multiply(randv_rs,hv_op_rs),axis=1)
## compute Rayleigh quotient: v'Hv/v'v (estimated largest eigenvalue), shape: (Nimg,)
# note: self.vhv_op and self.randv_norm_op has to be in the same dimension (either (Nimg,) or (Nimg,1))
self.eig_est = self.vhv_op/tf.square(self.randv_norm_op)
## Lily added the tf.while to compute the eigenvalue in computational graph later
# cond for computing largest abs/neg eigen-value
def cond(it, randv, eig_est, eig_est_prev, tfconst):
norm_diff = tf.norm(eig_est-eig_est_prev,axis=0)
return tf.logical_and(it < 500, norm_diff > 0.001)
# compute largest abs eigenvalue: tfconst = 0
# compute largest neg eigenvalue: tfconst = 10
def body(it, randv, eig_est, eig_est_prev, tfconst):
#hv_op_tmp = gradients_impl._hessian_vector_product(self.objective, [self.img], [randv])[0]-10*randv
hv_op_tmp = gradients_impl._hessian_vector_product(self.objective, [self.img], [randv])[0]-tf.multiply(tfconst,randv)
hv_op_rs = tf.reshape(hv_op_tmp, (tf.shape(hv_op_tmp)[0],-1))
hv_norm_op = tf.norm(hv_op_rs, axis = 1, keepdims=True)
hv_op_rs_normalize = hv_op_rs/hv_norm_op
hv_op = tf.reshape(hv_op_rs_normalize, tf.shape(hv_op_tmp))
randv_rs = tf.reshape(randv, (tf.shape(randv)[0],-1))
randv_norm_op = tf.norm(randv_rs, axis = 1)
vhv_op = tf.reduce_sum(tf.multiply(randv_rs,hv_op_rs),axis=1)
eig_est_prev = eig_est
eig_est = vhv_op/tf.square(randv_norm_op)
return (it+1, hv_op, eig_est, eig_est_prev, tfconst)
it = tf.constant(0)
# compute largest abs eigenvalue
result = tf.while_loop(cond, body, [it, self.randv, self.vhv_op, self.eig_est, tf.constant(0.0)])
# compute largest neg eigenvalue
self.shiftconst = tf.placeholder(shape = (), dtype = tf.float32)
result_1 = tf.while_loop(cond, body, [it, self.randv, self.vhv_op, self.eig_est, self.shiftconst])
# computing largest abs eig value and save result
self.it = result[0]
self.while_hv_op = result[1]
self.while_eig = result[2]
# computing largest neg eig value and save result
self.it_1 = result_1[0]
#self.while_eig_1 = tf.add(result_1[2], tfconst)
self.while_eig_1 = tf.add(result_1[2], result_1[4])
show_tensor_op = False
if show_tensor_op:
print("====================")
print("Define hessian_vector_product operator: ")
print("hv_op_tmp = {}".format(hv_op_tmp))
print("hv_op_rs = {}".format(hv_op_rs))
print("self.hv_norm_op = {}".format(self.hv_norm_op))
print("hv_op_rs_normalize = {}".format(hv_op_rs_normalize))
print("self.hv_op = {}".format(self.hv_op))
print("self.grad_op = {}".format(self.grad_op))
print("randv_rs = {}".format(randv_rs))
print("self.randv_norm_op = {}".format(self.randv_norm_op))
print("self.vhv_op = {}".format(self.vhv_op))
print("self.eig_est = {}".format(self.eig_est))
print("====================")
return self.img, self.output
def _estimate_Lipschitz_multiplerun(self, num, niters, input_image, target_label, true_label, sample_norm = "l2", transform=None, order = 1):
"""
num: number of samples per iteration
niters: number of iterations
input_image: original image (h*w*c)
"""
batch_size = self.batch_size
shape = (batch_size, self.model.image_size, self.model.image_size, self.model.num_channels)
dimension = self.model.image_size * self.model.image_size * self.model.num_channels
if num < batch_size:
print("Increasing num to", batch_size)
num = batch_size
"""
1. Compute input_image related quantities:
"""
# get the original prediction and gradient, gradient norms values on input image:
pred, grad_val, grad_2_norm_val, grad_1_norm_val, grad_inf_norm_val = self.sess.run(
[self.output, self.grad_op, self.grad_2_norm_op, self.grad_1_norm_op, self.grad_inf_norm_op],
feed_dict = {self.img: [input_image], self.true_label: true_label, self.target_label: target_label})
pred = np.squeeze(pred)
# print(pred)
# print(grad_val)
# class c and class j in Hein's paper. c is original class
c = true_label
j = target_label
# get g_x0 = f_c(x_0) - f_j(x_0)
g_x0 = pred[c] - pred[j]
# grad_z_norm should be scalar
g_x0_grad_2_norm = np.squeeze(grad_2_norm_val)
g_x0_grad_1_norm = np.squeeze(grad_1_norm_val)
g_x0_grad_inf_norm = np.squeeze(grad_inf_norm_val)
print("** Evaluating g_x0, grad_2_norm_val on the input image x0: ")
print("shape of input_image = {}".format(input_image.shape))
print("g_x0 = {:.3f}, grad_2_norm_val = {:3f}, grad_1_norm_val = {:.3f}, grad_inf_norm_val = {:3f}".format(g_x0, g_x0_grad_2_norm, g_x0_grad_1_norm, g_x0_grad_inf_norm))
##### Lily #####
if order == 2: # evaluate the hv and hv norm on input_image
# set randv as a random matrix with the same shape as input_image
print("** Evaluating hv and hv_norm on the input image x0:")
randv = np.random.randn(*input_image.shape)
hv, hv_norm = self.sess.run([self.hv_op, self.hv_norm_op],
feed_dict = {self.img: [input_image], self.randv:[randv], self.true_label: true_label, self.target_label: target_label})
print("hv shape = {}, hv_norm = {}".format(hv.shape, hv_norm))
"""
2. Prepare for sampling:
"""
def div_work_to_cores(njobs, nprocs):
process_item_list = []
while njobs > 0:
process_item_list.append(int(np.ceil(njobs / float(nprocs))))
njobs -= process_item_list[-1]
nprocs -= 1
return process_item_list
# n is the dimension
if self.dataset == "imagenet":
# for imagenet, generate random samples for this batch only
# array in shared memory storing results of all threads
total_item_size = batch_size
else:
# for cifar and mnist, generate random samples for this entire iteration
total_item_size = num
# divide the jobs evenly to all available threads
process_item_list = div_work_to_cores(total_item_size, self.n_processes)
self.n_processes = len(process_item_list)
# select random sample generation function
if sample_norm == "l2":
# the scaling constant in [a,b]: scale the L2 norm of each sample (has originally norm ~1)
a = 0; b = 3;
elif sample_norm == "li":
# for Linf we don't need the scaling
a = 0.1; b = 0.1;
elif sample_norm == "l1":
# TODO: make the sample ball radius adjustable
a = 0; b = 30;
else:
raise RuntimeError("Unknown sample_norm " + sample_norm)
print('Using sphere', sample_norm)
## create necessary shared array structures (saved in /dev/shm) and will be used (and written) in randsphere.py:
# result_arr, scale, input_example, all_inputs
# note: need to use scale[:] = ... not scale = ..., o.w. the contents will not be saved to the shared array
# inputs_0 is the image x_0
inputs_0 = np.array(input_image)
tag_prefix = str(os.getpid()) + "_"
result_arr = NpShmemArray(np.float32, (total_item_size, dimension), tag_prefix + "randsphere")
# we have an extra batch_size to avoid overflow
scale = NpShmemArray(np.float32, (num+batch_size), tag_prefix + "scale")
scale[:] = (b-a)*np.random.rand(num+batch_size)+a;
input_example = NpShmemArray(np.float32, inputs_0.shape, tag_prefix + "input_example")
# this is a read-only array
input_example[:] = inputs_0
# all_inputs is a shared memeory array and will be written in the randsphere to save the samples
# all_inputs holds the perturbations for one batch or all samples
all_inputs = NpShmemArray(np.float32, (total_item_size,) + inputs_0.shape, tag_prefix + "all_inputs")
# holds the results copied from all_inputs
clipped_all_inputs = np.empty(dtype=np.float32, shape = (total_item_size,) + inputs_0.shape)
# prepare the argument list
offset_list = [0]
for item in process_item_list[:-1]:
offset_list.append(offset_list[-1] + item)
print(self.n_processes, "threads launched with parameter", process_item_list, offset_list)
## create multiple process to generate samples
# randsphere: generate samples (see randsphere.py); partial is a function similar to lambda, now worker_func is a function of idx only
worker_func = partial(randsphere, n = dimension, input_shape = inputs_0.shape, total_size = total_item_size, scale_size = num+batch_size, tag_prefix = tag_prefix, r = 1.0, norm = sample_norm, transform = transform)
worker_args = list(zip(process_item_list, offset_list, [0] * self.n_processes))
# sample_results is an object to monitor if the process has ended (meaning finish generating samples in randsphere.py)
# this line of code will initiate the worker_func to start working (like initiate the job)
sample_results = self.pool.map_async(worker_func, worker_args)
# num: # of samples to be run, \leq samples.shape[0]
# number of iterations
Niters = niters;
if order == 1:
# store the max L in each iteration
L2_max = np.zeros(Niters)
L1_max = np.zeros(Niters)
Li_max = np.zeros(Niters)
# store the max G in each iteration
G2_max = np.zeros(Niters)
G1_max = np.zeros(Niters)
Gi_max = np.zeros(Niters)
# store computed Lispschitz constants in each iteration
L2 = np.zeros(num)
L1 = np.zeros(num)
Li = np.zeros(num)
# store computed gradient norm in each iteration
G2 = np.zeros(num)
G1 = np.zeros(num)
Gi = np.zeros(num)
elif order == 2:
# store the max H in each iteration
H2_max = np.zeros(Niters)
# store computed 2 norm of H in each iteration
H2 = np.zeros(num)
H2_neg = np.zeros(num)
# how many batches we have
Nbatches = num // batch_size
# timer
search_begin_time = time.time()
"""
3. Start performing sampling:
"""
## Start
# multiple runs: generating the samples
## use worker_func to generate x samples, and then use sess.run to evaluate the gradient norm operator
for iters in range(Niters):
iter_begin_time = time.time()
# shuffled index
# idx_shuffle = np.random.permutation(num);
# the scaling constant in [a,b]: scale the L2 norm of each sample (has originally norm ~1)
scale[:] = (b-a)*np.random.rand(num+batch_size)+a;
# number of L's we have computed
L_counter = 0
G_counter = 0
H_counter = 0
overhead_time = 0.0
overhead_start = time.time()
# for cifar and mnist, generate all the random input samples (x in the paper) at once
# for imagenet, generate one batch of input samples (x in the paper) for each iteration
if self.dataset != "imagenet":
# get samples for this iteration: make sure randsphere finished computing samples and stored in all_inputs
# if the samples have not yet done generating, then this line will block the codes until the processes are done, then it will return
sample_results.get()
# copy the results to a buffer and do clipping
np.clip(all_inputs, -0.5, 0.5, out = clipped_all_inputs)
# create multiple process again to generate samples for next batch (initiate a new job) because in below we will need to do sess.run in GPU which might be slow. So we want to generate samples on CPU while running sess.run on GPU to save time
sample_results = self.pool.map_async(worker_func, worker_args)
overhead_time += time.time() - overhead_start
## generate input samples "batch_inputs" and compute corresponding gradient norms samples "perturbed_grad_x_norm"
for i in range(Nbatches):
overhead_start = time.time()
# for imagenet, generate random samples for this batch only
if self.dataset == "imagenet":
# get samples for this batch
sample_results.get()
# copy the results to a buffer and do clipping
np.clip(all_inputs, -0.5, 0.5, out = clipped_all_inputs)
# create multiple threads to generate samples for next batch
worker_args = zip(process_item_list, offset_list, [(i + 1) * batch_size] * self.n_processes)
sample_results = self.pool.map_async(worker_func, worker_args)
if self.dataset == "imagenet":
# we generate samples for each batch at a time
batch_inputs = clipped_all_inputs
else:
# we generate samples for all batches
batch_inputs = clipped_all_inputs[i * batch_size: (i + 1) * batch_size]
# print(result_arr.shape, result_arr)
# print('------------------------')
# print(batch_inputs.shape, batch_inputs.reshape(result_arr.shape))
# print('------------------------')
overhead_time += time.time() - overhead_start
if order == 1:
# run inference and get the gradient
perturbed_predicts, perturbed_grad_2_norm, perturbed_grad_1_norm, perturbed_grad_inf_norm = self.sess.run(
[self.output, self.grad_2_norm_op, self.grad_1_norm_op, self.grad_inf_norm_op],
feed_dict = {self.img: batch_inputs, self.target_label: target_label, self.true_label: true_label})
if self.compute_slope:
# compute distance between consecutive samples: not use sequential samples
s12_2_norm = np.linalg.norm(s[0:batch_size-1:2] - s[1:batch_size:2], axis = 1)
s12_1_norm = np.linalg.norm(s[0:batch_size-1:2] - s[1:batch_size:2], ord=1, axis = 1)
s12_i_norm = np.linalg.norm(s[0:batch_size-1:2] - s[1:batch_size:2], ord=np.inf, axis = 1)
# compute function value differences: not use sequential samples
g_x1 = perturbed_predicts[0:batch_size-1:2, c] - perturbed_predicts[0:batch_size-1:2, j]
g_x2 = perturbed_predicts[1:batch_size:2, c] - perturbed_predicts[1:batch_size:2, j]
# estimated Lipschitz constants for this batch
# for slope estimate, we need the DUAL norm
batch_L2 = np.abs(g_x1 - g_x2) / s12_2_norm
batch_L1 = np.abs(g_x1 - g_x2) / s12_i_norm
batch_Li = np.abs(g_x1 - g_x2) / s12_1_norm
L2[L_counter : L_counter + batch_size//2] = batch_L2
L1[L_counter : L_counter + batch_size//2] = batch_L1
Li[L_counter : L_counter + batch_size//2] = batch_Li
G2[G_counter : G_counter + batch_size] = perturbed_grad_2_norm
G1[G_counter : G_counter + batch_size] = perturbed_grad_1_norm
Gi[G_counter : G_counter + batch_size] = perturbed_grad_inf_norm
L_counter += (batch_size//2)
G_counter += batch_size
elif order == 2:
##### Lily #####
randv_batch = np.random.randn(*batch_inputs.shape)
perturbed_hv, perturbed_hv_norm = self.sess.run([self.hv_op, self.hv_norm_op],
feed_dict = {self.img: batch_inputs, self.randv: randv_batch,
self.true_label: true_label, self.target_label: target_label})
show_tensor_dim = False
if show_tensor_dim:
print("====================")
print("** Evaluating perturbed_hv and perturbed_hv_norm in batch {}: ".format(iters))
print("pertubed_hv_prod shape = {}".format(perturbed_hv.shape))
print("randv_batch shape = {}".format(randv_batch.shape))
print("perturbed_hv_norm = {}".format(perturbed_hv_norm[:,0])) # size: (Nimg, 1)
print("perturbed_hv_norm shape = {}".format(perturbed_hv_norm.shape))
#print("perturbed_grad_2_norm= {}".format(perturbed_grad_2_norm))
#print("perturbed_grad_2_norm shape = {}".format(perturbed_grad_2_norm.shape))
pt_hvs = []
pt_hvs.append(perturbed_hv+0*randv_batch)
#print("************** Using tf.while_loop:********************")
# compute max eigenvalue
temp_hv, temp_eig, niter_eig = self.sess.run([self.while_hv_op, self.while_eig, self.it], feed_dict = {self.img: batch_inputs, self.randv: randv_batch, self.true_label: true_label, self.target_label: target_label})
##print("converge in {} steps, temp_eig = {}".format(niter_eig, temp_eig))
# if max eigenvalue is positive, compute the max neg eigenvalue by using the shiftconst
if max(temp_eig) > 0:
shiftconst = max(temp_eig)
temp_eig_1, niter_eig_1 = self.sess.run([self.while_eig_1, self.it_1], feed_dict = {self.img: batch_inputs, self.randv: randv_batch, self.true_label: true_label, self.target_label: target_label, self.shiftconst: shiftconst})
##print("converge in {} steps, temp_eig_1 = {}".format(niter_eig_1, temp_eig_1))
else:
temp_eig_1 = temp_eig
niter_eig_1 = -1
print("temp_eig (abs) converge in {} steps, temp_eig_1 (neg) converge in {} steps".format(niter_eig, niter_eig_1))
## use outer while_loop
#max_eig_iters = 10
#print_flag = True
#final_est_eig_1 = self._compute_max_abseig(pt_hvs, batch_inputs, true_label, target_label, max_eig_iters, print_flag)
#print("************** Using outer while_loop:********************")
#print("outer loop final_est_eig_1 = {}".format(final_est_eig_1))
## use tf while_loop
final_est_eig = temp_eig
final_est_eig_neg = temp_eig_1
H2[H_counter : H_counter + batch_size] = final_est_eig
H2_neg[H_counter : H_counter + batch_size] = final_est_eig_neg
H_counter += batch_size
if order == 1:
# at the end of each iteration: get the per-iteration max gradient norm
if self.compute_slope:
L2_max[iters] = np.max(L2)
L1_max[iters] = np.max(L1)
Li_max[iters] = np.max(Li)
G2_max[iters] = np.max(G2)
G1_max[iters] = np.max(G1)
Gi_max[iters] = np.max(Gi)
# if self.compute_slope:
# print('[STATS][L2] loop = {}, time = {:.5g}, iter_time = {:.5g}, overhead = {:.5g}, L2 = {:.5g}, L1 = {:.5g}, Linf = {:.5g}, G2 = {:.5g}, G1 = {:.5g}, Ginf = {:.5g}'.format(iters, time.time() - search_begin_time, time.time() - iter_begin_time, overhead_time, L2_max[iters], L1_max[iters], Li_max[iters], G2_max[iters], G1_max[iters], Gi_max[iters]))
# else:
# print('[STATS][L2] loop = {}, time = {:.5g}, iter_time = {:.5g}, overhead = {:.5g}, G2 = {:.5g}, G1 = {:.5g}, Ginf = {:.5g}'.format(iters, time.time() - search_begin_time, time.time() - iter_begin_time, overhead_time, G2_max[iters], G1_max[iters], Gi_max[iters]))
# sys.stdout.flush()
# reset per iteration L and G by filling 0
if self.compute_slope:
L2.fill(0)
L1.fill(0)
Li.fill(0)
G2.fill(0)
G1.fill(0)
Gi.fill(0)
elif order == 2:
## consider -lambda_min
idx = H2 > 0
H2[idx] = H2_neg[idx]
idx_max = np.argmax(abs(H2))
H2_max[iters] = H2[idx_max]
#print('[STATS][L2] loop = {}, time = {:.5g}, iter_time = {:.5g}, overhead = {:.5g}, H2 = {:.5g}'.format(iters, time.time() - search_begin_time, time.time() - iter_begin_time, overhead_time, H2_max[iters]))
if order == 1:
# print('[STATS][L1] g_x0 = {:.5g}, L2_max = {:.5g}, L1_max = {:.5g}, Linf_max = {:.5g}, G2_max = {:.5g}, G1_max = {:.5g}, Ginf_max = {:.5g}'.format(
# g_x0, np.max(L2_max), np.max(L1_max), np.max(Li_max), np.max(G2_max), np.max(G1_max), np.max(Gi_max)))
# # when compute the bound we need the DUAL norm
# if self.compute_slope:
# print('[STATS][L1] bnd_L2_max = {:.5g}, bnd_L1_max = {:.5g}, bnd_Linf_max = {:.5g}, bnd_G2_max = {:.5g}, bnd_G1_max = {:.5g}, bnd_Ginf_max = {:.5g}'.format(g_x0/np.max(L2_max), g_x0/np.max(Li_max), g_x0/np.max(L1_max), g_x0/np.max(G2_max), g_x0/np.max(Gi_max), g_x0/np.max(G1_max)))
# else:
# print('[STATS][L1] bnd_G2_max = {:.5g}, bnd_G1_max = {:.5g}, bnd_Ginf_max = {:.5g}'.format(g_x0/np.max(G2_max), g_x0/np.max(Gi_max), g_x0/np.max(G1_max)))
# sys.stdout.flush()
# discard the last batch of samples
sample_results.get()
return [L2_max,L1_max,Li_max,G2_max,G1_max,Gi_max,g_x0,pred]
elif order == 2:
# find positive eig value and substitute with its corresponding negative eig value, then we only need to sort once
#print("H2_max = {}".format(H2_max))
# find max abs(H2_max)
H2_max_val = max(abs(H2_max))
# print('[STATS][L1] g_x0 = {:.5g}, g_x0_grad_2_norm = {:.5g}, g_x0_grad_1_norm = {:.5g}, g_x0_grad_inf_norm = {:.5g}, H2_max = {:.5g}'.format(g_x0, g_x0_grad_2_norm, g_x0_grad_1_norm, g_x0_grad_inf_norm, H2_max_val))
bnd = (-g_x0_grad_2_norm + np.sqrt(g_x0_grad_2_norm**2+2*g_x0*H2_max_val))/H2_max_val
# print('[STATS][L1] bnd_H2_max = {:.5g}'.format(bnd))
# sys.stdout.flush()
sample_results.get()
return [H2_max, g_x0, g_x0_grad_2_norm, g_x0_grad_1_norm, g_x0_grad_inf_norm, pred]
def _compute_max_abseig(self, pt_hvs, batch_inputs, true_label, target_label, max_eig_iters, print_flag):
## compute hv and est_eig:
i = 0
cond = False
pt_eigs = []
print("pt_hvs[0] shape = {}".format(pt_hvs[0].shape))
# perform power iteration loop outside tensorflow
while (i<max_eig_iters and cond==False):
tmp_hv, tmp_hv_norm, tmp_vhv, tmp_vnorm, tmp_est_eig = self.sess.run([self.hv_op, self.hv_norm_op, self.vhv_op, self.randv_norm_op, self.eig_est], feed_dict = {self.img: batch_inputs, self.randv: pt_hvs[i], self.true_label: true_label, self.target_label: target_label})
tmp_vhv = np.squeeze(tmp_vhv)
tmp_vnorm = np.squeeze(tmp_vnorm)
tmp_est_eig = np.squeeze(tmp_est_eig)
if print_flag:
#print("current step = {}, norm = {}".format(i, tmp_hv_norm[:,0]))
#print("current step = {}, pt_hv_prod.shape = {}, pt_hvs_norm.shape = {}".format(i,tmp_hv.shape, tmp_hv_norm.shape))
print("current step = {}, est_eig = {}".format(i,tmp_est_eig-0))
#print("current step = {}, vhv = {}".format(i,tmp_vhv))
#print("current step = {}, vnorm (check: should be 1) = {}".format(i,tmp_vnorm))
pt_hvs.append(tmp_hv+0*pt_hvs[i])
pt_eigs.append(tmp_est_eig)
# conditions
if i > 0:
cond_element = abs(tmp_est_eig-pt_eigs[i-1]) < 1e-3
if print_flag:
print("cond = {}".format(cond_element))
cond = cond_element.all()
i+=1
if i == max_eig_iters:
print("==== Reach max iterations!!! ====")
return pt_eigs[-1]
def __del__(self):
# terminate the pool
self.pool.terminate()
def estimate(self, x_0, true_label, target_label, Nsamp, Niters, sample_norm, transform, order):
result = self._estimate_Lipschitz_multiplerun(Nsamp,Niters,x_0,target_label,true_label,sample_norm, transform, order)
return result
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.