blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
93f4d1f71ff5a8ed707ce9c378c1dda19e38c177 | c7a6c08fe9e84d1e5d4f11add95d2db4c1f8e9b6 | /CMtest/wsgi.py | 2d52eb8a2bab06dab0cc1b53c367f241ce8db33d | [] | no_license | xsnx/CM-test | aa3a0686180ff71309f4a03176a59d8092cdb178 | 5c3f73976452eab7d22dbb06eb675c9a8e693ef4 | refs/heads/main | 2023-03-05T16:40:23.123897 | 2021-02-23T10:31:03 | 2021-02-23T10:31:03 | 341,462,424 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | """
WSGI config for CMtest project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'CMtest.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
3cadbe72c8bcd386865ea4e7d16db39381ee0017 | c96209b660a3df4488a534b6fec33f95ea684021 | /Code/Qiskit-Env/venv/bin/wheel | 74fcaca40a97ebbab4f739bb79b4a453949850a2 | [] | no_license | ayush0624/QIGA2-Tuned | 2f06eb52f6203c4203facdcf552749e567ec4aa6 | 924928d6d4532d9456cb883903e25b4e05ea70a4 | refs/heads/master | 2021-06-13T15:58:50.120295 | 2021-02-02T17:36:00 | 2021-02-02T17:36:00 | 131,007,971 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 260 | #!/Users/AyushHariharan/PycharmProjects/Qiskit/venv/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
923a5a7b91a896e3ef14340744cc18e1608dd7b2 | 7e97e2546fe6ea00102414d319ee4d3c9264161c | /digit_classifier.py | e1305e35fb09fb24d6952ff3ed04b48f80d2499a | [] | no_license | kochjohannes/digit_classifier | f49ee68bec91731d4ef11112d274f0e253e52e29 | 4b2985672189bce55fa7ecee3da94f4884f080dc | refs/heads/master | 2021-05-05T12:58:17.444391 | 2018-01-22T08:28:20 | 2018-01-22T08:28:20 | 118,347,803 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,403 | py | from PIL import Image as PImage
import PIL.ImageOps
import numpy as np
import sys
import seaborn as sns
import matplotlib.pyplot as plt
import pickle
#This python script attempts to classify hand-written digits in photos. The classification is done using
#a SVM (Support Vector Machine) which was trained on the MNIST database training set and uses a polynomial
#kernel of order 3. An accuracy of ~97% was achieved on the MNIST test set.
#The key for this classifier to work is to pre-process the image of the digit to make it look as similar
#to the MNIST digits. The MNIST digits are contained in a 20x20 pixel grid and the whole image is 28x28 pixels.
#Furthermore, the digits in the MNIST database have their centre of mass in the middle of the image. They have
#also been filtered to remove background and noise, and has also been normalised.
#This script loads a pre-trained SVM model (trained using sklearn for minimum implementation), and focuses on
#the feature engineering. A high-level description of the script is as follows:
#-Convert image to grayscale, normalise, and separate digit from background
#-Find the digit and crop it out as a square
#-Calculate mass centre and move it accordingly to get mass centre in middle of image
#-Change the resolution and padd with (white) background
#-Classify the digit
#Current limitations:
#-Only one digit can be visible in image
#-Static filtering of background
#Tips for higher accuracy on your own hand-written digits:
#-Have only one digit visible
#-Write the digit on a blank paper and avoid other dark things in the same image
#-Use a dark pen and draw a fat digit (the higher contrast vs. background, the better)
#-An up-close photo of the digit is a good thing, especially if the camera/image resolution to be
#processed by this script is low.
#-Try to avoid out-of focus, since that lowers the contrast and the sharpness of the digit's edges
#USAGE: Due to the rather large ML-model size (~50 MB), the training is done on
#your computer, requiring it to download the MNIST database. If your run the
#setup script, the database will be removed after the setup.
#From terminal, navigate to the folder with setup.sh and run "./setup.sh"
#Once the setup is completed, you can run the classifier as:
#"python3 digit_classifier.py /path/to/file.jpg"
path = sys.argv[1] #Path to image
#path = "/Users/johanneskoch/Downloads/digit_8 2.jpg"
img = PImage.open(path)
img = PIL.ImageOps.invert(img) #Invert image to make it similar to MNIST digits when greyscale
img = img.convert('LA') #convert to greyscale
#img = img.rotate(-90) #If image is rotated, might be if taken with iPhone
#img and tmp are similar to eachother, just different data types
#img is Image object, tmp is numpy array
#switching between the datatypes allows easy modification
#Normalise (to be exact: normalising and multiplying with 255), and filter background
tmp = np.asarray(img)[:,:,0] #[:,:,1] contains only values 255
tmp = tmp.reshape([1, img.height*img.width])
max_pixel = np.max(tmp); min_pixel = np.min(tmp)
tmp = tmp*(255/max_pixel)
for i in range(tmp.size):
if(tmp[0,i] < 0.6*255): #Static, could perhaps be made dynamic by fitting a Gaussian to the histogram
#and do some clever reasoning
tmp[0,i] = 0
tmp = tmp.reshape([img.height, img.width])
#Uncomment below for a visualization of the (hopefully) separated digit
#sns.heatmap(tmp)
#plt.show()
#Time to locate the digit
def find_digit_row(image, idx_range):
two_in_row = False
for i in idx_range:
if(np.sum(tmp[i,:])>1000): #this depends on the resolution and
#should be made independent
if(two_in_row): #bad variable name, I know...
return i
else:
two_in_row = True
def find_digit_col(image, idx_range):
two_in_row = False
for i in idx_range:
if(np.sum(tmp[:,i])>1000): #this depends on the resolution and
#should be made independent
if(two_in_row):
return i
else:
two_in_row = True
start_row = find_digit_row(tmp, range(tmp.shape[0])) - 1 #ascending i. Subtraction since two_in_row
#and want to capture the first row
end_row = find_digit_row(tmp, np.flip(range(tmp.shape[0]), axis=0)) + 1 #descending i
start_col = find_digit_col(tmp, range(tmp.shape[1])) - 1
end_col = find_digit_col(tmp, np.flip(range(tmp.shape[1]), axis=0)) + 1
#print(start_row, end_row, start_col, end_col)
#print(tmp.shape) #before cropping
tmp = tmp[start_row:end_row+1, start_col:end_col+1]
#print(tmp.shape) #after cropping
#To make the image square (which we will need in the end), we add empty background
#on both sides (i.e. 2 sides of the square)
padd_one_side = int(np.round(np.abs(tmp.shape[0]-tmp.shape[1])*0.5)) #No. of col/row to add to one side
extra_colrow = int(np.abs(2*(padd_one_side)-(tmp.shape[0]-tmp.shape[1]))) #Might need to add one more
#since round
if(tmp.shape[0]>tmp.shape[1]):
tmp = np.concatenate((np.zeros([tmp.shape[0], padd_one_side + extra_colrow]), tmp, np.zeros([tmp.shape[0], padd_one_side])), axis = 1)
if(tmp.shape[0]<tmp.shape[1]):
tmp = np.concatenate((np.zeros([padd_one_side + extra_colrow, tmp.shape[1]]), tmp, np.zeros([padd_one_side, tmp.shape[1]])), axis = 0)
#print(tmp.shape) #after padding
#Uncomment below for visualisation of the digit
#sns.heatmap(tmp)
#plt.show()
img = PIL.Image.fromarray(tmp) #convert back to Image object for resizing
img = img.resize([20,20], PImage.ANTIALIAS) #convert to 20x20 pixel
tmp = np.asarray(img) #convert back to numpy array
#Time to move the centre of the digit to the centre of the image
#Start by placing the 20x20 image in the upper-left corner of the 28z28 image
tmp_28 = np.zeros([28,28])
tmp_28[0:20,0:20] = tmp
tmp = tmp_28 #tmp is now a 28x28
#Calculate the mass centre
total_mass = np.sum(np.sum(tmp))
cdf = 0
for i in range(tmp.shape[0]): #calculate centre along axis 0
cdf += np.sum(tmp[i,:])
if(cdf>=0.5*total_mass):
mass_center_row = i
break
cdf = 0
for i in range(tmp.shape[1]): #calculate centre along axis 1
cdf += np.sum(tmp[:,i])
if(cdf>=0.5*total_mass):
mass_center_col = i
break
#print(mass_center_row, mass_center_col)
diff_row = 13 - mass_center_row
diff_col = 13 - mass_center_col
tmp_28 = np.zeros([28,28])
#place the digit at the centre
tmp_28[diff_row:20+diff_row,+diff_col:20+diff_col] = tmp[0:20,0:20]
#Do one more filtering of the background to remove background level caused
#by the antialiasing when reducing image size
tmp_28 = tmp_28.reshape([1, 28*28])
for i in range(tmp_28.size):
if(tmp_28[0,i] < 100):
tmp_28[0,i] = 0
tmp_28 = tmp_28.reshape([28, 28])
#Uncomment below for visualisation of digit
#sns.heatmap(tmp_28)
#plt.show()
#Now it is finally time to do the classifying
X_tmp = tmp_28.reshape([1, 784])
#Load pre-trained model, can be changed easily by simply loading another pre-trained model
filename = "svc_model_tmp.sav"
loaded_model = pickle.load(open(filename, 'rb'))
result = loaded_model.predict(X_tmp)
print("Predicted digit: ", result[0])
| [
"[email protected]"
] | |
568c4ab269b823e162a8355b9b5702b9958cc630 | dd5d7e549cbe8ff3d5e0d9bfc022eb40371643d1 | /test_runner.py | 5d5868aff7b7fcfe958662415970884ed5ff99df | [] | no_license | marksmerchek/cardgame | 61645f0244f727b0bff2a874c006192a3bd97115 | 1efca55725539d90aef172de40a0505d21348ef1 | refs/heads/main | 2023-01-31T09:31:29.712987 | 2020-12-20T15:08:01 | 2020-12-20T15:08:01 | 323,080,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | #!/usr/bin/env python3
import unittest
from tests import test_hand, test_deck, test_player, test_card
if __name__ == '__main__':
suite = unittest.TestSuite()
suite.addTest(test_card.CardTestCase())
suite.addTest(test_deck.DeckTestCase())
suite.addTest(test_hand.HandTestCase())
suite.addTest(test_player.PlayerTestCase())
unittest.TextTestRunner(verbosity=2).run(suite)
| [
"[email protected]"
] | |
3c0a1523f2507ba8bb60cd4470284c90438ca995 | e84e19c5d52f511ae280bb02d2febb8e0650a116 | /code23_debug.py | 3c67ebf1a514a6dd0949dcacb8799efbef50fb58 | [] | no_license | AlJamilSuvo/LeetCode | 2e3afe0c588d003aa15ea3eb08a8d2ca381c35dd | 9ad26f2974ad4a41a9654a5564fe1ad27ae2463c | refs/heads/master | 2022-03-15T15:43:21.826485 | 2022-03-04T20:40:50 | 2022-03-04T20:40:50 | 174,458,244 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,955 | py | class Solution(object):
def mergeKLists(self, lists):
finalList=ListNode(-10000)
current=finalList
while True:
minList=None
index=0
for i in range(len(lists)):
lst=lists[i]
if lst==None:
continue
if minList==None:
minList=lst
index=i
elif lst.val<minList.val:
minList=lst
index=i
if minList==None:
break
current.next=minList
current=current.next
lists[index]=minList.next
return finalList.next
def stringToListNode(input):
# Generate list from the input
numbers = json.loads(input)
# Now convert that list into linked list
dummyRoot = ListNode(0)
ptr = dummyRoot
for number in numbers:
ptr.next = ListNode(number)
ptr = ptr.next
ptr = dummyRoot.next
return ptr
def stringToListNodeArray(input):
listNodeArrays = json.loads(input)
nodes = []
for listNodeArray in listNodeArrays:
nodes.append(stringToListNode(json.dumps(listNodeArray)))
return nodes
def listNodeToString(node):
if not node:
return "[]"
result = ""
while node:
result += str(node.val) + ", "
node = node.next
return "[" + result[:-2] + "]"
def main():
import sys
def readlines():
for line in sys.stdin:
yield line.strip('\n')
lines = readlines()
while True:
try:
line = lines.next()
lists = stringToListNodeArray(line)
ret = Solution().mergeKLists(lists)
out = listNodeToString(ret)
print out
except StopIteration:
break
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
52c33af4f61224a1a25c25d2c40ae273c0f75f5a | 93c734a295ede8b284cdcdf5467ba162fbfcd2ba | /pyslmpclient/const.py | f2263c492e404e588915af80911d03843c1c1674 | [
"BSD-3-Clause"
] | permissive | sakharin/PySLMPClient | 7c91cf99a0313d2c39767b51a1561d08e4e975b3 | d583e8ca5637bbb606a21a9efeef8adbf7a95801 | refs/heads/master | 2023-04-16T03:37:50.141958 | 2021-04-28T07:51:49 | 2021-04-28T07:51:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,767 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import enum
class SLMPCommand(enum.Enum):
Device_Read = 0x0401
Device_Write = 0x1401
Device_ReadRandom = 0x0403
Device_WriteRandom = 0x1402
Device_EntryMonitorDevice = 0x0801
Device_ExecuteMonitor = 0x0802
Device_ReadBlock = 0x0406
Device_WriteBlock = 0x1406
Label_ArrayLabelRead = 0x041A
Label_ArrayLabelWrite = 0x141A
Label_LabelReadRandom = 0x041C
Label_LabelWriteRandom = 0x141B
Memory_Read = 0x0613
Memory_Write = 0x1613
ExtendUnit_Read = 0x0601
ExtendUnit_Write = 0x1601
RemoteControl_RemoteRun = 0x1001
RemoteControl_RemoteStop = 0x1002
RemoteControl_RemotePause = 0x1003
RemoteControl_RemoteLatchClear = 0x1005
RemoteControl_RemoteReset = 0x1006
RemoteControl_ReadTypeName = 0x0101
RemoteControl_NodeIndication = 0x3070
Drive_ReadDiskState = 0x0205
Drive_Defrag = 0x1207
RemotePassword_Lock = 0x1631
RemotePassword_Unlock = 0x1630
File_ReadFileInfo = 0x0201
File_ReadFileInfoWithTitle = 0x0202
File_ReadFileNoInfo = 0x0204
File_ChangeFileInfo = 0x1204
File_Search = 0x0203
File_Read = 0x0206
File_Write = 0x1203
File_FileLock = 0x0808
File_Copy = 0x1206
File_Delete = 0x1205
File_ReadDir = 0x1810
File_SearchDir = 0x1811
File_NewFileA = 0x1202
File_NewFileB = 0x1820
File_DeleteFile = 0x1822
File_CopyFile = 0x1824
File_ChangeFileState = 0x1825
File_ChangeFileDate = 0x1826
File_OpenFile = 0x1827
File_ReadFile = 0x1828
File_WriteFile = 0x1829
File_CloseFile = 0x182A
SelfTest = 0x0619
ClearError_Code = 0x1617
ClearError_History = 0x1619
OnDemand = 0x2101
DataCollection_Auth = 0x4000
DataCollection_KeepAlive = 0x4001
DataCollection_GetData = 0x4002
DataCollection_Distribute = 0x4003
NodeConnection_NodeSearch = 0x0E30
NodeConnection_IPAddressSet = 0x0E31
ParameterSetting_DeviceInfoCompare = 0x0E32
ParameterSetting_ParameterGet = 0x0E33
ParameterSetting_ParameterUpdate = 0x0E34
ParameterSetting_ParameterSetStart = 0x0E35
ParameterSetting_ParameterSetEnd = 0x0E36
ParameterSetting_ParameterSetCancel = 0x0E3A
ParameterSetting_DeviceIdentificationInfoGet = 0x0E28
ParameterSetting_CommunicationSpeed = 0x3072
NodeMonitoring_StatusRead = 0xE44
NodeMonitoring_StatusRead2 = 0xE53
NodeMonitoring_ConnectionSettingGet = 0xE45
NodeMonitoring_DataMonitoring = 0x0E29
Other_CAN = 0x4020
Other_IOLInk = 0x5000
Other_ModbusTCP = 0x5001
Other_ModbusRTU = 0x5002
CCLinkIEFieldDiagnostics_SelectNodeInfoGet = 0x3119
CCLinkIEFieldDiagnostics_CommunicationTest = 0x3040
CCLinkIEFieldDiagnostics_CableTest = 0x3050
CCLinkIETSNNetworkManagement_NetworkConfig = 0x0E90
CCLinkIETSNNetworkManagement_MasterConfig = 0x0E91
CCLinkIETSNNetworkManagement_SlaveConfig = 0x0E92
CCLinkIETSNNetworkManagement_CyclicConfig = 0x0E93
CCLinkIETSNNetworkManagement_Notification = 0x0E94
LinkDeviceParameter_LinkDevicePrmWrite = 0x320A
LinkDeviceParameter_LinkDevicePrmWriteCheckReq = 0x320B
LinkDeviceParameter_LinkDevicePrmWriteCheckResp = 0x320C
EventHistory_GetEventNum = 0x3060
EventHistory_GetEventHistory = 0x3061
EventHistory_ClearEventHistory = 0x161A
EventHistory_ClockOffsetDataSend = 0x3062
BackupRestore_GetCommunicationSet = 0x0EB0
BackupRestore_GetStationSubIDList = 0x0EB1
BackupRestore_GetDeviceInfo = 0x0EB2
BackupRestore_StartBackup = 0x0EB3
BackupRestore_EndBackup = 0x0EB4
BackupRestore_RequestBackup = 0x0EB5
BackupRestore_GetBackupPrm = 0x0EB6
BackupRestore_CheckRestore = 0x0EB7
BackupRestore_StartRestore = 0x0EB8
BackupRestore_EndRestore = 0x0EB9
BackupRestore_SetBackupPrm = 0x0EBA
SlaveStationPrmRestore_CheckPrmDelivery = 0x0EBE
StartStopCyclic_StopOwnStationCyclic = 0x3206
StartStopCyclic_StartOwnStationCyclic = 0x3207
StartStopCyclic_StopOtherStationCyclic = 0x3208
StartStopCyclic_StartOtherStationCyclic = 0x3209
ReservedStation_RsvStationConfigTemporaryRelease = 0x320D
ReservedStation_RsvStationConfig = 0x320E
WatchdogCounter_SetWatchdogCounterInfo = 0x3210
WatchdogCounter_WatchdogCounterOffsetConfig = 0x3211
class DeviceCode(enum.Enum):
SM = 0x91
SD = 0xA9
X = 0x9C
Y = 0x9D
M = 0x90
L = 0x92
F = 0x93
V = 0x94
B = 0xA0
D = 0xA8
W = 0xB4
TS = 0xC1
TC = 0xC0
TN = 0xC2
LTS = 0x51
LTC = 0x50
LTN = 0x52
SS = 0xC7
SC = 0xC6
SN = 0xC8
LSTS = 0x59
LSTC = 0x58
LSTN = 0x5A
CS = 0xC4
CC = 0xC3
CN = 0xC5
SB = 0xA1
SW = 0xB5
DX = 0xA2
DY = 0xA3
Z = 0xCC
LZ = 0x62
R = 0xAF
ZR = 0xB0
RD = 0x2C
LCS = 0x55
LCC = 0x54
LCN = 0x56
# アドレス表現が16進数のデバイスの一覧
D_ADDR_16 = (
DeviceCode.X,
DeviceCode.Y,
DeviceCode.B,
DeviceCode.W,
DeviceCode.SB,
DeviceCode.SW,
DeviceCode.DX,
DeviceCode.DY,
DeviceCode.ZR,
DeviceCode.W,
)
# 4バイトアドレスでしかアクセスできないデバイスの一覧
D_ADDR_4BYTE = (
DeviceCode.LTS,
DeviceCode.LTC,
DeviceCode.LTN,
DeviceCode.LSTS,
DeviceCode.LSTC,
DeviceCode.LSTN,
DeviceCode.LCS,
DeviceCode.LCC,
DeviceCode.LCN,
DeviceCode.LZ,
DeviceCode.RD,
)
# 4バイトアドレスと2バイトアドレスで名前の違うデバイス
D_STRANGE_NAME = {DeviceCode.SS, DeviceCode.SC, DeviceCode.SN}
class TypeCode(enum.Enum):
Q00JCPU = 0x250
Q00CPU = 0x251
Q01CPU = 0x252
Q02CPU = 0x41
Q06HCPU = 0x42
Q12HCPU = 0x43
Q25HCPU = 0x44
Q12PRHCPU = 0x4B
Q25PRHCPU = 0x4C
Q00UJCPU = 0x260
Q00UCPU = 0x261
Q01UCPU = 0x262
Q02UCPU = 0x263
Q03UDCPU = 0x268
Q03UDVCPU = 0x366
Q04UDHCPU = 0x269
Q04UDVCPU = 0x367
Q06UDHCPU = 0x26A
Q06UDVCPU = 0x368
Q10UDHCPU = 0x266
Q13UDHCPU = 0x26B
Q13UDVCPU = 0x36A
Q20UDHCPU = 0x267
Q26UDHCPU = 0x26C
Q26UDVCPU = 0x36C
Q50UDEHCPU = 0x26D
Q100UDEHCPU = 0x26E
QS001CPU = 0x230
L02SCPU = 0x543
L02CPU = 0x541
L06CPU = 0x544
L26CPU = 0x545
L26CPU_BT = 0x542
L04HCPU = 0x48C0
L08HCPU = 0x48C1
L16HCPU = 0x48C2
LJ72GF15_T2 = 0x0641
R00CPU = 0x48A0
R01CPU = 0x48A1
R02CPU = 0x48A2
R04CPU = 0x4800
R04ENCPU = 0x4805
R08CPU = 0x4801
R08ENCPU = 0x4806
R08PCPU = 0x4841
R08PSFCPU = 0x4851
R08SFCPU = 0x4891
R16CPU = 0x4802
R16ENCPU = 0x4807
R16PCPU = 0x4842
R16PSFCPU = 0x4852
R16SFCPU = 0x4892
R32CPU = 0x4803
R32ENCPU = 0x4808
R32PCPU = 0x4843
R32PSFCPU = 0x4853
R32SFCPU = 0x4893
R120CPU = 0x4804
R120ENCPU = 0x4809
R120PCPU = 0x4844
R120PSFCPU = 0x4854
R120SFCPU = 0x4894
R12CCPU_V = 0x4820
MI5122_VW = 0x4E01
RJ72GF15_T2 = 0x4860
RJ72GF15_T2_D1 = 0x4861
RJ72GF15_T2_D2 = 0x4862
NZ2GF_ETB = 0x0642
class PDU(enum.Enum):
rdReqST = 1
wrReqST = 2
rdResST = 3
wrResST = 4
rdErrST = 5
wrErrST = 6
odReqST = 7
rdReqMT = 8
wrReqMT = 9
rdResMT = 10
wrResMT = 11
rdErrMT = 12
wrErrMT = 13
odReqMT = 14
reqEMT = 15
resEMT = 16
pushEMT = 17
reqLMT = 18
resLMT = 19
errLMT = 20
ST_PDU = (
PDU.rdReqST,
PDU.wrReqST,
PDU.rdResST,
PDU.wrResST,
PDU.rdErrST,
PDU.wrErrST,
PDU.odReqST,
)
MT_PDU = (
PDU.rdReqMT,
PDU.wrReqMT,
PDU.rdResMT,
PDU.wrResMT,
PDU.rdErrMT,
PDU.wrErrMT,
PDU.odReqMT,
)
EMT_PDU = (PDU.reqEMT, PDU.resEMT, PDU.pushEMT)
LMT_PDU = (PDU.reqLMT, PDU.resLMT, PDU.errLMT)
class EndCode(enum.Enum):
Success = 0x00
WrongCommand = 0xC059
WrongFormat = 0xC05C
WrongLength = 0xC061
Busy = 0xCEE0
ExceedReqLength = 0xCEE1
ExceedRespLength = 0xCEE2
ServerNotFound = 0xCF10
WrongConfigItem = 0xCF20
PrmIDNotFound = 0xCF30
NotStartExclusiveWrite = 0xCF31
RelayFailure = 0xCF70
TimeoutError = 0xCF71
CANAppNotPermittedRead = 0xCCC7
CANAppWriteOnly = 0xCCC8
CANAppReadOnly = 0xCCC9
CANAppUndefinedObjectAccess = 0xCCCA
CANAppNotPermittedPDOMapping = 0xCCCB
CANAppExceedPDOMapping = 0xCCCC
CANAppNotExistSubIndex = 0xCCD3
CANAppWrongParameter = 0xCCD4
CANAppMoreOverParameterRange = 0xCCD5
CANAppLessOverParameterRange = 0xCCD6
CANAppTransOrStoreError = 0xCCDA
CANAppOtherError = 0xCCFF
OtherNetworkError = 0xCF00
DataFragmentShortage = 0xCF40
DataFragmentDup = 0xCF41
DataFragmentLost = 0xCF43
DataFragmentNotSupport = 0xCF44
| [
"[email protected]"
] | |
3e9003d1840d7e0214cfb609ef38ac62e0ef0d1d | a6c01fae2255037632f23be0d63167031d3024ce | /Controller/ClienteCTR.py | 924cdd6face839cf271b8050116b7ff176d9c9fa | [] | no_license | ElbertRibeiro/locadora-veiculos | 627f1e9832803573c5f212ebaa5324226d9d2421 | 16c3cb6a0e73e3da3e51e53ef0ae42155a293a7e | refs/heads/main | 2023-06-25T23:37:14.087981 | 2021-07-28T23:23:39 | 2021-07-28T23:23:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,233 | py | from Model.DTO.ClienteDTO import ClienteDTO
from Model.DAO.ClienteDAO import ClienteDAO
class ClienteCTR:
def CadastrarCliente(nome, CPF, endereco, email, telefone):
clienteDTO = ClienteDTO
clienteDTO.Nome = nome
clienteDTO.CPF = CPF
clienteDTO.Endereco = endereco
clienteDTO.Email = email
clienteDTO.Telefone = telefone
clienteDAO = ClienteDAO
clienteDAO.CadastrarCliente(clienteDTO)
def AtualizarCliente(codigoCli, nome, CPF, endereco, email, telefone):
clienteDTO = ClienteDTO
clienteDTO.Nome = nome
clienteDTO.CPF = CPF
clienteDTO.Endereco = endereco
clienteDTO.Email = email
clienteDTO.Telefone = telefone
clienteDAO = ClienteDAO
clienteDAO.AtualizarCliente(codigoCli, clienteDTO)
def PesquisarTodosClientes():
clienteDAO = ClienteDAO
query = clienteDAO.PesquisarTodosClientes()
return query
def PesquisarCliente(valor, tipo):
clienteDAO = ClienteDAO
query = clienteDAO.PesquisarCliente(valor, tipo)
return query
def ExcluirCliente(codigoCli):
clienteDAO = ClienteDAO
clienteDAO.ExcluirCliente(codigoCli)
| [
"[email protected]"
] | |
a8f6da07591b1130f0475254d8b508164e3afe18 | d063fbc0dda96bd801d9f1befbf8e379760384a5 | /pileupfilereader/pileupnotationreader.py | f962e9f67e882fe0a18d5b89aa1b3ede29849c34 | [] | no_license | lvn3668/pileupnotationvariantcaller | 6ca6c9035341c9d7470968b00c73c4e3c1d3d093 | b587483294d65f3fed7f4eb660c021cd37b02cec | refs/heads/main | 2023-08-19T22:28:40.527812 | 2021-10-23T02:00:54 | 2021-10-23T02:00:54 | 415,593,997 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,923 | py | import os
# sequence id , 1 based coordinate, reference base, number of reads covering that base,
# , / . matching positive / negative strand
# ATGCN mismatch on fwd strand
# atgcn mismatch on rev strand
#
# If this is the first position covered by the read,
# a “^” character followed by the alignment's mapping quality encoded as an ASCII character.
# A single character indicating the read base and the strand to which the read has been mapped:
# Forward Reverse Meaning
# . dot , comma Base matches the reference base
# ACGTN acgtn Base is a mismatch to the reference base
# > < Reference skip (due to CIGAR “N”) -> '>' on fwd strand and '<' on reverse strand
# * */# Deletion of the reference base (CIGAR “D”) -> * deletion on fwd strand and # on reverse strand
# Deleted bases are shown as “*” on both strands unless --reverse-del is used,
# in which case they are shown as “#” on the reverse strand.
#
# If there is an insertion after this read base, text matching “\\+[0-9]+[ACGTNacgtn*#]+”:
# a “+” character followed by an integer giving the length of the insertion and then the inserted sequence.
# Pads are shown as “*” unless --reverse-del is used, in which case pads on the reverse strand will be shown as “#”.
# If there is a deletion after this read base, text matching “-[0-9]+[ACGTNacgtn]+”:
# a “-” character followed by the deleted reference bases represented similarly.
# (Subsequent pileup lines will contain “*” for this read indicating the deleted bases.)
# If this is the last position covered by the read, a “$” character.
# seq1 272 T 24 ,.$.....,,.,.,...,,,.,..^+. <<<+;<<<<<<<<<<<=<;<;7<&
# seq1 273 T 23 ,.....,,.,.,...,,,.,..A <<<;<<<<<<<<<3<=<<<;<<+
# seq1 274 T 23 ,.$....,,.,.,...,,,.,... 7<7;<;<<<<<<<<<=<;<;<<6
# seq1 275 A 23 ,$....,,.,.,...,,,.,...^l. <+;9*<<<<<<<<<=<<:;<<<<
# seq1 276 G 22 ...T,,.,.,...,,,.,.... 33;+<<7=7<<7<&<<1;<<6<
# seq1 277 T 22 ....,,.,.,.C.,,,.,..G. +7<;<<<<<<<&<=<<:;<<&<
# seq1 278 G 23 ....,,.,.,...,,,.,....^k. %38*<<;<7<<7<=<<<;<<<<<
# seq1 279 C 23 A..T,,.,.,...,,,.,..... ;75&<<<<<<<<<=<<<9<<:<<
# Author: Lalitha Viswanathan
# Pileup read notation variant caller
from typing import TextIO, Union
def pileupnotationreader(pileupnotationfilename: str, pileupreadsfilename: str) -> Union[
bool, tuple[bool, list[str], str]]:
"""
:param pileupreadsfilename:
:param pileupnotationfilename:
"""
try:
# check if size of file is 0
if os.stat(''.join(pileupnotationfilename)).st_size == 0:
print(pileupnotationfilename, ' File is empty')
return False
else:
pileupnotationfilehandle: TextIO = open(''.join(pileupnotationfilename), 'r')
pileuplines: list[str] = pileupnotationfilehandle.readlines()
for line in pileuplines:
if len(line) > 2:
pileuplines.remove(line)
return False
else:
pileuplines[pileuplines.index(line)] = line.strip()
pileupnotationpattern: str = ''.join(pileuplines)
if os.stat(''.join(pileupreadsfilename)).st_size == 0:
print(pileupreadsfilename, ' file is empty')
return False
else:
print(pileupreadsfilename, ' file is not empty')
pileupreadsfilehandle: TextIO = open(''.join(pileupreadsfilename), 'r')
pileuplines: list[str] = pileupreadsfilehandle.readlines()
for line in pileuplines:
if line is None:
pileuplines.remove(line)
pileupreadsfilehandle.close()
return True, pileuplines, pileupnotationpattern
except Exception as exception:
print(type(exception)) # the exception instance
print(exception.args) # arguments stored in .args
| [
"[email protected]"
] | |
27126fdd2539b5a5bd8f8a678dcd4f4f3ed19d3f | 7ff0386a9fde833339ab449a83174b2a225722c0 | /structures/Revision.py | 83af42dffd52b8dbf2550d4bff5653a614bdae10 | [
"MIT"
] | permissive | wikiwho/whovis | 3646845addeba20686a26c2640cc5354a522ec1c | bc1dd57deb3c6440a57ef034e7a5861831b112fe | refs/heads/master | 2021-01-23T22:15:21.022943 | 2018-04-30T16:02:56 | 2018-04-30T16:02:56 | 29,799,585 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,839 | py | '''
Created on Feb 20, 2013
@author: maribelacosta
'''
class Revision(object):
'''
classdocs
'''
def __init__(self):
self.id = 0 # Fake sequential id. Starts in 0.
self.wikipedia_id = 0 # Wikipedia revision id.
self.contributor_id = 0; # Id of the contributor who performed the revision.
self.contributor_name = '' # Name of the contributor who performed the revision.
self.contributor_ip = '' # Name of the contributor who performed the revision.
self.paragraphs = {} # Dictionary of paragraphs. It is of the form {paragraph_hash : [Paragraph]}.
self.ordered_paragraphs = [] # Ordered paragraph hash.
self.length = 0 # Content length (bytes).
self.content = '' #TODO: this should be removed. Just for debugging process.
self.ordered_content = [] #TODO: this should be removed. Just for debugging process.
self.total_tokens = 0 # Number of tokens in the revision.
self.timestamp = 0
self.comment = ""
def __repr__(self):
return str(id(self))
def to_dict(self):
revision = {}
#json_revision.update({'id' : revisions[revision].wikipedia_id})
#revision.update({'author' : {'id' : self.contributor_id, 'name' : self.contributor_name}})
#json_revision.update({'length' : revisions[revision].length})
#json_revision.update({'paragraphs' : revisions[revision].ordered_paragraphs})
revision.update({'obj' : []})
for paragraph_hash in self.ordered_paragraphs:
p = []
for paragraph in self.paragraphs[paragraph_hash]:
p.append(repr(paragraph))
revision['obj'].append(p)
return revision
| [
"[email protected]"
] | |
e74bf718f9350dba6ccefa7c8122a8fa90583f18 | c9c75ceddd8acd42c4c08a53734e2ac8a2ec74de | /cog.py | 1734f25a961f18ff591382610d6f3bcbbcf9f889 | [] | no_license | hzafeng/COG | 3aa6c91b5070a6bc8b60ee1d2a02751f50f6f4dc | c129bab4a6e8ffae1f3df77f3c1268d35e8c6c30 | refs/heads/master | 2019-07-01T19:04:20.610748 | 2018-01-02T11:48:45 | 2018-01-02T11:48:45 | 102,475,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,138 | py | import os
import re
import math
from scipy import stats
import argparse
from string import ascii_letters
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
what_i_do = "a script to annotate A list of Genomes with COG database"
parser = argparse.ArgumentParser(description=what_i_do)
parser.add_argument('-i', dest='input_files', type=str, nargs='+',
required=True, help='input genome file in fasta(required) e.g. <dir>*.fasta', default=None)
parser.add_argument('-db', dest='local_db', type=str, nargs=1,
required=True, help='local COG database dir(required)', default=None)
parser.add_argument('-o', dest='output_files', type=str, nargs=1,
required=True, help='the output dir(required)', default=None)
parser.add_argument('-t', dest='threads_num', type=str, nargs=1,
required=False, help='the num of threads', default='1')
def check_ids(arguments):
lists = []
for file in arguments["input_files"]:
f=open(file,'r')
data = f.read().split('\n')
f.close()
for i in data:
if '>' in i:
if i.split(' ')[0] not in lists:
lists.append(i.split(' ')[0])
else:
print "The is a reduplicated id in your Genome",i.split(' ')[0]
args = vars(parser.parse_args())
'''
args={ 'local_db': ['/home/zjs/hhf/test/db'],
'output_files': ['./'],
'threads_num': 1,
'input_files': ['maxbinout_W0P5.011.fasta','maxbinout_W0P5.014.fasta']}
'''
check_ids(args)
def diamond(arguments):
mkdb = 'makeblastdb -in '+os.path.join(args['local_db'][0],'clean.fasta')+' -dbtype prot '+'-out '+os.path.join(args['output_files'][0],'COG')
print mkdb
# os.system(mkdb)
for file in arguments["input_files"]:
blast = 'blastp'+' -db '+os.path.join(args['local_db'][0],'COG')+' -query '+file+' -evalue 10e-6 -outfmt 6 -num_alignments 1 -num_threads '+str(args['threads_num'][0])+' -out '+os.path.join(args['output_files'][0],file+'.table')
print blast
os.system(blast)
'''
cog = {'COG3010': 'G',
'COG3011': 'S',
'COG3012': 'S',
'COG3013': 'S',......}
'''
def EnrichmentAnalysis(arguments):
fa = open(os.path.join(args['local_db'][0],'clean.fasta'),'r')
clean = fa.read().replace('\r','').split('\n')
fa.close()
fw = open(os.path.join(args['local_db'][0],'whog.txt'),'r')
whog = fw.read().split('\n')
fw.close()
cog={}
f_cog = open(os.path.join(args['local_db'][0],'cog_lists'),'r')
coglists = f_cog.read().split('\n')
f_cog.close()
dict_cog={}
for i in range(65,91):
j=chr(i)
each_count = 0
for k in coglists[:-1]:
if k.split(' ')[0] == j:
num = int(k.split(',')[-1])
each_count +=num
dict_cog[j] = each_count
for i in whog:
if '[' in i:
i_ = i.split(' ')
cog[i_[1]] = i_[0].split('[')[1][:-1]
clean_dicts={}
for j in clean:
if '>' in j:
clean_dicts[j[1:].split(' ')[0]] = j.split(' ')[1]
for genome_id in arguments["input_files"]:
f = open(os.path.join(args['output_files'][0],genome_id+'.table'),'r')
each_table = f.read().split('\n')[:-1]
f.close()
first_line = ''
each_cog = ''
each_cogs = []
for j in each_table:
if j.split('\t')[0]+'\t'+j.split('\t')[1] == first_line:
pass
else:
first_line=j.split('\t')[0]+'\t'+j.split('\t')[1]
j_ = j.split('\t')
cog_id = clean_dicts[j_[1]]
hit =cog_id.split('#')
if len(hit) == 1:
each_cog += cog[cog_id]
else:
for k in hit:
each_cog += cog[k]
for i in each_cog:
each_cogs.append(i)
of1 = open(os.path.join(args['output_files'][0],genome_id+'.table.count'),'w')
for i in list(set(each_cogs)):
of1.write(i+','+str(each_cog.count(i))+'\n')
of1.close()
for file in arguments["input_files"]:
genome_id = file.split('/')[-1]
f = open(os.path.join(args['output_files'][0],genome_id+'.table.count'),'r')
eachdata = f.read().split('\n')
f.close()
each_cog_gene = 0
for each in eachdata:
if each != '':
each_ = each.split(',')
each_cog_gene += int(each_[1])
oof = open(os.path.join(args['output_files'][0],genome_id+'.stats'),'w')
for each in eachdata:
if each != '':
each_ = each.split(',')
ff = stats.hypergeom.pmf(int(each_[1]),83675+int(dict_cog[each_[0]]),int(dict_cog[each_[0]]),each_cog_gene)
oof.write(each_[0]+','+str(ff)+'\n')
oof.close()
EnrichmentAnalysis(args)
| [
"[email protected]"
] | |
7e39086a74ae32265aa3a1462885eb871b792cdd | b0e08837495fb2b9bca91ad66f353a0baac71416 | /app/dashapp1/callbacks.py | 0983d1cceb695e642d6d556122af7a8223c73090 | [
"MIT"
] | permissive | cpud/youtube_viz | 7ea24d6f6666bef11666d4d833269b03c87d14e2 | 078994f9ccc33b0ccb9bad3e76ed1e916eb1fc77 | refs/heads/main | 2023-04-14T18:26:02.795338 | 2021-04-18T20:59:55 | 2021-04-18T20:59:55 | 354,966,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,298 | py | from datetime import datetime
import os
import pandas as pd
from dash.dependencies import Input, Output, State
import dash_html_components as html
from .make_tree import make_plot, make_df
#from dash.dependencies import Output
#videos = pd.read_csv("data/sy.csv")
def register_callbacks(dashapp):
@dashapp.callback([Output('tree', 'figure'),
Output('likes', 'figure'),
Output('minutes', 'figure'),
Output('views', 'figure'),
Output('channels', 'figure'),
Output('table', 'children')],
[Input('submit-val', 'n_clicks')],
[State('input-on-submit', 'value')])
def display_plot(n_clicks, value):
#videos = make_df(value)
try:
videos = make_df(value)
except:
# #videos = pd.read_csv('data/sy.csv')
videos = pd.read_csv('data/tennis.csv')
# make table of videos
dataframe = videos
dataframe = dataframe.reindex(columns = ['Title','Channel', 'Views',
'Likes','Dislikes', 'LikeRatio', 'Length',
'Uploaded', 'Polarity',])
table = html.Table([
html.Thead(
html.Tr([html.Th(col) for col in dataframe.columns]),
),
html.Tbody([
html.Tr([
html.Td(dataframe.iloc[i][col]) for col in dataframe.columns
]) for i in range(len(dataframe))
],#style = {'width': '50%'}
)
], style = {#'background-color': '#FFFFFF',
'color': '#3B846D',
'width': '98%'
#'font-style': 'italic'
})
fig, fig2, fig3, fig4, fig5 = make_plot(videos)
# overwrite last users data so default is always sonic youth
# but first grab the other data if it isn't SY because local baybeee
title = videos.reset_index()['Title'][0]
videos.to_csv('data/' + title + '.csv')
videos = pd.read_csv('data/tennis.csv')
videos.to_csv('data/videos.csv')
return fig, fig2, fig3, fig4, fig5, table
| [
"[email protected]"
] | |
a1c419a4ea54808ce0ac1b831c5c5410708662a8 | 2a61b02c26e77686e38cd9039e6f4b0530ddb7c9 | /bitbots_vision/bitbots_vision/src/bitbots_vision/vision_modules/candidate.py | 2521adb2f70962a74456ac626dc0dddf939fe016 | [
"MIT"
] | permissive | fly-pigTH/bitbots_thmos_meta | 931413e86929751024013b8e35f87b799243e22c | f45ccc362dc689b69027be5b0d000d2a08580de4 | refs/heads/master | 2023-08-27T02:58:08.397650 | 2021-10-22T17:17:11 | 2021-10-22T17:17:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,123 | py | import abc
import rospy
class Candidate:
"""
A :class:`.Candidate` is a representation of an arbitrary object in an image.
It is very similar to bounding boxes but with an additional rating.
This class provides several getters for different properties of the candidate.
"""
def __init__(self, x1=0, y1=0, width=0, height=0, rating=None):
"""
Initialization of :class:`.Candidate`.
:param int x1: Horizontal part of the coordinate of the top left corner of the candidate
:param int y1: Vertical part of the coordinate of the top left corner of the candidate
:param int width: Horizontal size
:param int height: Vertical size
:param float rating: Confidence of the candidate
"""
self._x1 = x1
self._y1 = y1
self._width = width
self._height = height
self._rating = rating
def get_width(self):
# type: () -> int
"""
:return int: Width of the candidate bounding box.
"""
return self._width
def get_height(self):
# type: () -> int
"""
:return int: Height of the candidate bounding box.
"""
return self._height
def get_center_x(self):
# type: () -> int
"""
:return int: Center x coordinate of the candidate bounding box.
"""
return self._x1 + int(self._width // 2)
def get_center_y(self):
# type: () -> int
"""
:return int: Center y coordinate of the candidate bounding box.
"""
return self._y1 + int(self._height // 2)
def get_center_point(self):
# type: () -> tuple[int, int]
"""
:return tuple[int,int]: Center point of the bounding box.
"""
return self.get_center_x(), self.get_center_y()
def get_diameter(self):
# type: () -> int
"""
:return int: Mean diameter of the candidate.
"""
return int((self._height + self._width) // 2)
def get_radius(self):
# type: () -> int
"""
:return int: Mean radius of the candidate.
"""
return int(self.get_diameter() // 2)
def get_upper_left_point(self):
# type: () -> tuple[int, int]
"""
:return tuple[int,int]: Upper left point of the candidate.
"""
return self._x1, self._y1
def get_upper_left_x(self):
# type: () -> int
"""
:return int: Upper left x coordinate of the candidate.
"""
return self._x1
def get_upper_left_y(self):
# type: () -> int
"""
:return int: Upper left y coordinate of the candidate.
"""
return self._y1
def get_lower_right_point(self):
# type: () -> tuple[int, int]
"""
:return tuple[int,int]: Lower right point of the candidate.
"""
return self._x1 + self._width, self._y1 + self._height
def get_lower_right_x(self):
# type: () -> int
"""
:return int: Lower right x coordinate of the candidate.
"""
return self._x1 + self._width
def get_lower_right_y(self):
# type: () -> int
"""
:return int: Lower right y coordinate of the candidate.
"""
return self._y1 + self._height
def get_lower_center_point(self):
# type: () -> (int, int)
"""
:return tuple: Returns the lowest point of the candidate. The point is horizontally centered inside the candidate.
"""
return (self.get_center_x(), self.get_lower_right_y())
def set_rating(self, rating):
# type: (float) -> None
"""
:param float rating: Rating to set.
"""
if self._rating is not None:
rospy.logwarn('Candidate rating has already been set.', logger_name='Candidate')
return
self._rating = rating
def get_rating(self):
# type: () -> float
"""
:return float: Rating of the candidate
"""
return self._rating
def point_in_candidate(self, point):
# type: (tuple) -> bool
"""
Returns whether the point is in the candidate or not.
In the process, the candidate gets treated as a rectangle.
:param point: An x-y-int-tuple defining the point to inspect.
:return bool: Whether the point is in the candidate or not.
"""
return (
self.get_upper_left_x()
<= point[0]
<= self.get_upper_left_x() + self.get_width()) \
and (
self.get_upper_left_y()
<= point[1]
<= self.get_upper_left_y() + self.get_height())
def set_in_mask(self, mask, value=0, grow=1):
"""
Sets the bounding box of this candidate in the given mask to the given value.
:param mask: Binary mask with the shape of the input image
:param value: The value of the bounding box region
:param grow: A scalar which defines how much arround the box is also removed
:returns mask: The input mask without this candidate
"""
width = int(self.get_width() * grow * 0.5)
height = int(self.get_height() * grow * 0.5)
mask[
max(self.get_center_y() - height, 0) : min(self.get_center_y() + height, mask.shape[0]),
max(self.get_center_x() - width, 0): min(self.get_center_x() + width, mask.shape[1])] = value
return mask
@staticmethod
def sort_candidates(candidatelist):
"""
Returns a sorted list of the candidates.
The first list element is the highest rated candidate.
:param [Candidate] candidatelist: List of candidates
:return: List of candidates sorted by rating, in descending order
"""
return sorted(candidatelist, key = lambda candidate: candidate.get_rating(), reverse=True)
@staticmethod
def select_top_candidate(candidatelist):
"""
Returns the highest rated candidate.
:param candidatelist: List of candidates
:return Candidate: Top candidate
"""
if candidatelist:
return Candidate.sort_candidates(candidatelist)[0]
else:
return None
@staticmethod
def rating_threshold(candidatelist, threshold):
"""
Returns list of all candidates with rating above given threshold.
:param [Candidate] candidatelist: List of candidates to filter
:param float threshold: Filter threshold
:return [Candidate]: Filtered list of candidates
"""
return [candidate for candidate in candidatelist if candidate.get_rating() > threshold]
def __str__(self):
"""
Returns string representation of candidate.
:return str: String representation of candidate
"""
return f"x1,y1: {self.get_upper_left_x()},{self.get_upper_left_y()} | width,height: {self.get_width()},{self.get_height()} | rating: {self._rating}"
class CandidateFinder(object):
"""
The abstract class :class:`.CandidateFinder` requires its subclasses to implement the methods
:meth:`.get_candidates` and :meth:`.compute`.
Examples of such subclasses are :class:`bitbots_vision.vision_modules.obstcle.ObstacleDetector` and
:class:`bibtots_vision.vision_modules.fcnn_handler.FcnnHandler`.
They produce a set of so called *Candidates* which are instances of the class :class:`bitbots_vision.vision_modules.candidate.Candidate`.
"""
def __init__(self):
"""
Initialization of :class:`.CandidateFinder`.
"""
super(CandidateFinder, self).__init__()
def get_top_candidates(self, count=1):
"""
Returns the count highest rated candidates.
:param int count: Number of top-candidates to return
:return [Candidate]: The count top-candidates
"""
candidates = self.get_candidates()
candidates = Candidate.sort_candidates(candidates)
return candidates[:count]
def get_top_candidate(self):
"""
Returns the highest rated candidate.
:return Candidate: Top candidate or None
"""
return Candidate.select_top_candidate(self.get_candidates())
@abc.abstractmethod
def get_candidates(self):
"""
Returns a list of all candidates.
:return [Candidate]: Candidates
"""
raise NotImplementedError
@abc.abstractmethod
def compute(self):
"""
Runs the most intense calculation without returning any output and caches the result.
"""
raise NotImplementedError
class DummyCandidateFinder(CandidateFinder):
"""
Dummy candidate detector that is used to run the vision pipeline without a neural network e.g. to save computation time for debugging.
This implementation returns an empty set of candidates and thus replaces the ordinary detection.
"""
def __init__(self):
"""
Initialization of :class:`.DummyCandidateFinder`.
"""
self._detected_candidates = []
self._sorted_candidates = []
self._top_candidate = None
def set_image(self, image):
"""
Method to satisfy the interface.
Actually does nothing.
:param image: current vision image
"""
pass
def compute(self):
"""
Method to satisfy the interface.
Actually does nothing, except the extrem complicated command 'pass'.
"""
pass
def get_candidates(self):
"""
Method to satisfy the interface.
Actually does something. It returns an empty list.
:return: a empty list
"""
return self._detected_candidates
def get_top_candidates(self, count=1):
"""
Method to satisfy the interface.
It returns an empty list.
:param count: how many of zero top candidates do you want?
:return: a empty list
"""
return self._sorted_candidates
| [
"[email protected]"
] | |
1259bceb5974460819e2be54e7535a7623bb90eb | 5dbb8361b24bda6a0957ded08fb59b34f9e0a670 | /7th_sem/data.py | 2035b87a13f96e690ad6065404a9180b3b46557f | [] | no_license | ranjanproject/BTP | 24b2624282ea0689774453374013e02653168744 | 4f3970dcdade1792daf762302a3015d4623ea8cc | refs/heads/master | 2020-04-26T18:00:51.061036 | 2019-03-04T11:28:39 | 2019-03-04T11:28:39 | 173,731,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
df = pd.read_csv("fort.txt",header=None)
data = df.values
X =[]
for i in range(1,len(data)):
X.append(data[i][0].split())
X = np.array(X);
Z = X[:,2]
X = np.delete(X,2,1)
X = X.astype(np.float)
Z = Z.astype(np.float)
x=[]
y=[]
z=[]
for i in range(0,len(X)):
x.append(X[i][0])
y.append(X[i][1])
z.append(Z[i])
x = np.array(x)
y = np.array(y)
z = np.array(z)
x = x.reshape(x.shape[0],1)
y = y.reshape(y.shape[0],1)
z = z.reshape(z.shape[0],1)
# plt.plot(x,y)
# plt.show()
# print(y)
# x, y = np.meshgrid(X[:,0],X[:,1])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x,y,z)
plt.show()
# plt.plot(x,y)
# Axes3D.plot_surface(x,y,z,rstride=10,cstride=10) | [
"[email protected]"
] | |
6f16803ad8054469cff2091212206f8b087687f3 | 137a4de8f0318f8767d1093cbbe4757674e719af | /2022/23.py | 4a37e43079291c5f6651455f4cea24b35f20ca08 | [] | no_license | fridokus/advent-of-code | a93b84c6feca01e46c6f3580201bfeb454f89cd3 | b254e1f49c54983bcdbc82e1e27e6198e06b4efc | refs/heads/master | 2023-01-20T08:47:14.477054 | 2023-01-13T09:17:16 | 2023-01-13T09:17:16 | 226,941,928 | 9 | 4 | null | 2020-12-05T17:16:18 | 2019-12-09T18:41:24 | Python | UTF-8 | Python | false | false | 1,780 | py | #!/usr/bin/python3
with open('23.in') as f:
scan = f.read().splitlines()
elves = [] # list of elf = [(pos x y), (proposal)]
positions = set()
for j in range(len(scan)):
for i in range(len(scan[0])):
if scan[j][i] == '#':
elves.append([(i, j), (i, j)])
positions |= {(i, j)}
dirs = {
'N': [(-1, -1), ( 0, -1), ( 1, -1)],
'S': [(-1, 1), ( 0, 1), ( 1, 1)],
'W': [(-1, 1), (-1, 0), (-1, -1)],
'E': [( 1, 1), ( 1, 0), ( 1, -1)]
}
order = ['N', 'S', 'W', 'E']
for r in range(10000):
moved = False
blocklist = set()
proposals = set()
for elf in elves:
p = elf[0]
# check if no neighbor
if all([all([(p[0] + dirs[d][i][0], p[1] + dirs[d][i][1]) not in positions for d in order]) for i in range(3)]):
continue
for d in order:
if all([(p[0] + dirs[d][i][0], p[1] + dirs[d][i][1]) not in positions for i in range(3)]):
proposal = (p[0] + dirs[d][1][0], p[1] + dirs[d][1][1])
if proposal in proposals:
blocklist |= {proposal}
else:
proposals |= {proposal}
elf[1] = proposal
moved = True
break
for elf in elves:
if elf[1] not in blocklist:
elf[0] = elf[1] # move to proposal
else:
elf[1] = elf[0] # reset proposal to position
order = order[1:] + [order[0]]
positions = {elf[0] for elf in elves}
if r == 9:
xpos = {elf[0][0] for elf in elves}
ypos = {elf[0][1] for elf in elves}
r1 = (max(ypos) - min(ypos) + 1) * (max(xpos) - min(xpos) + 1) - len(positions)
print(r1)
if not moved: break
print(r+1)
| [
"[email protected]"
] | |
132252c650df2d0f3e659321072f0d9295a319b8 | cde5715785bd6c6d60a3d8e9a0235b1035279058 | /venv/bin/easy_install-3.6 | 4871fc1ff2ee64b365926707aba0da5ee8e8a63b | [] | no_license | ikpune/parking_assistant | 095a097190c0ee54c7f8b3cdf23850dcc66da3b3 | d2b127048d562768a4173bd78c32a896b7ea919c | refs/heads/master | 2021-06-22T11:53:02.755389 | 2019-10-16T08:41:21 | 2019-10-16T08:41:21 | 215,462,740 | 0 | 0 | null | 2021-05-06T19:44:05 | 2019-10-16T05:17:07 | Python | UTF-8 | Python | false | false | 455 | 6 | #!/home/imran/Imran/my_work/parking_assistant/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.6'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.6')()
)
| [
"[email protected]"
] | |
9ec5ca1c68f2f64948a06e76b913d774cedacfea | c62e0eb9e244588f54689eb28c5fe3ca900197e8 | /batch.py | c4f861618a3380517039de1f37480d9d46b44070 | [
"MIT"
] | permissive | cmiranda16PonceHealthSciencesUniversity/covizu | 57a5ff0be6c8e398e69546d4e8b67f07610fb476 | ed13db6674a1ce8a24d751997ba67bf55f0e25da | refs/heads/master | 2023-04-11T22:31:57.352333 | 2021-04-14T19:57:38 | 2021-04-14T19:57:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,439 | py | import argparse
import os
import sys
import json
from datetime import datetime
import covizu
from covizu.utils import gisaid_utils
from covizu.utils.progress_utils import Callback
from covizu.utils.batch_utils import *
from covizu.utils.seq_utils import SC2Locator
from tempfile import NamedTemporaryFile
def parse_args():
parser = argparse.ArgumentParser(description="CoVizu analysis pipeline automation")
parser.add_argument('--url', type=str, default=os.environ.get("GISAID_URL", None),
help="URL to download provision file, defaults to environment variable.")
parser.add_argument('--user', type=str, default=os.environ.get("GISAID_USER", None),
help="GISAID username, defaults to environment variable.")
parser.add_argument('--password', type=str, default=os.environ.get("GISAID_PSWD", None),
help="GISAID password, defaults to environment variable.")
parser.add_argument("--infile", type=str, default=None,
help="input, path to xz-compressed JSON; if not specified, "
"download xz file from GISAID provision feed.")
parser.add_argument("--outdir", type=str, default='data/',
help="option, path to write output files")
parser.add_argument("--bylineage", type=str, default='data/by_lineage.json',
help="option, path to write JSON of features by lineage")
parser.add_argument('--minlen', type=int, default=29000, help='option, minimum genome length (nt)')
parser.add_argument('--mindate', type=str, default='2019-12-01',
help='option, earliest possible sample collection date (ISO format, default '
'2019-12-01')
parser.add_argument('--poisson-cutoff', type=float, default=0.001,
help='option, filtering outlying genomes whose distance exceeds the upper '
'quantile of Poisson distribution (molecular clock). Default 0.001 '
'corresponds to 99.9%% cutoff.')
parser.add_argument('--batchsize', type=int, default=500,
help='option, number of records to batch process with minimap2')
parser.add_argument("--ref", type=str,
default=os.path.join(covizu.__path__[0], "data/NC_045512.fa"),
help="option, path to FASTA file with reference genome")
parser.add_argument('--mmbin', type=str, default='minimap2',
help="option, path to minimap2 binary executable")
parser.add_argument('-mmt', "--mmthreads", type=int, default=8,
help="option, number of threads for minimap2.")
parser.add_argument('--misstol', type=int, default=300,
help="option, maximum tolerated number of missing bases per "
"genome (default 300).")
parser.add_argument("--vcf", type=str,
default=os.path.join(covizu.__path__[0], "data/problematic_sites_sarsCov2.vcf"),
help="Path to VCF file of problematic sites in SARS-COV-2 genome. "
"Source: https://github.com/W-L/ProblematicSites_SARS-CoV2")
parser.add_argument('--ft2bin', default='fasttree2',
help='option, path to fasttree2 binary executable')
parser.add_argument('--ttbin', default='treetime',
help='option, path to treetime binary executable')
parser.add_argument('--clock', type=float, default=8e-4,
help='option, specify molecular clock rate for '
'constraining Treetime analysis (default 8e-4).')
parser.add_argument('--datetol', type=float, default=0.1,
help='option, exclude tips from time-scaled tree '
'with high discordance between estimated and '
'known sample collection dates (year units,'
'default: 0.1)')
parser.add_argument('--binpath', type=str, default='rapidnj',
help='option, path to RapidNJ binary executable')
parser.add_argument('--mincount', type=int, default=500,
help='option, minimum number of variants in lineage '
'above which MPI processing will be used.')
parser.add_argument('--machine_file', type=str, default='mfile',
help='option, path to machine file for MPI.')
parser.add_argument("-n", "--nboot", type=int, default=100,
help="Number of bootstrap samples, default 100.")
parser.add_argument("--boot-cutoff", type=float, default=0.5,
help="Bootstrap cutoff for consensus tree (default 0.5). "
"Only used if --cons is specified.")
parser.add_argument("--dry-run", action="store_true",
help="Do not upload output files to webserver.")
return parser.parse_args()
def process_feed(args, callback=None):
""" Process feed data """
if callback:
callback("Processing GISAID feed data")
loader = gisaid_utils.load_gisaid(args.infile, minlen=args.minlen, mindate=args.mindate)
batcher = gisaid_utils.batch_fasta(loader, size=args.batchsize)
aligned = gisaid_utils.extract_features(batcher, ref_file=args.ref, binpath=args.mmbin,
nthread=args.mmthreads, minlen=args.minlen)
filtered = gisaid_utils.filter_problematic(aligned, vcf_file=args.vcf, cutoff=args.poisson_cutoff,
callback=callback)
return gisaid_utils.sort_by_lineage(filtered, callback=callback)
if __name__ == "__main__":
args = parse_args()
cb = Callback()
# check that user has loaded openmpi module
try:
subprocess.check_call(['mpirun', '-np', '2', 'ls'], stdout=subprocess.DEVNULL)
except FileNotFoundError:
cb.callback("mpirun not loaded - run `module load openmpi/gnu`", level='ERROR')
sys.exit()
# download xz file if not specified by user
if args.infile is None:
cb.callback("No input specified, downloading data from GISAID feed...")
args.infile = gisaid_utils.download_feed(args.url, args.user, args.password)
by_lineage = process_feed(args, cb.callback)
with open(args.bylineage, 'w') as handle:
# export to file to process large lineages with MPI
json.dump(by_lineage, handle)
timetree, residuals = build_timetree(by_lineage, args, cb.callback)
timestamp = datetime.now().isoformat().split('.')[0]
outfile = open(os.path.join(args.outdir, 'clusters.{}.json'.format(timestamp)), 'w')
nwk_file = os.path.join(args.outdir, 'timetree.{}.nwk'.format(timestamp))
with open(nwk_file, 'w') as handle:
Phylo.write(timetree, file=handle, format='newick')
result = make_beadplots(by_lineage, args, cb.callback, t0=cb.t0.timestamp())
outfile.write(json.dumps(result)) # serialize results to JSON
outfile.close()
# get mutation info
locator = SC2Locator()
mutations = {}
for lineage, features in get_mutations(by_lineage).items():
annots = [locator.parse_mutation(f) for f in features]
mutations.update({lineage: [a for a in annots if a is not None]})
# write data stats
dbstat_file = os.path.join(args.outdir, 'dbstats.{}.json'.format(timestamp))
with open(dbstat_file, 'w') as handle:
nseqs = sum([len(rows) for rows in by_lineage.values()])
val = {
'lastupdate': timestamp.split('T')[0],
'noseqs': nseqs,
'lineages': {}
}
for lineage, samples in by_lineage.items():
ndiffs = [len(x['diffs']) for x in samples]
val['lineages'][lineage] = {
'nsamples': len(samples),
'lastcoldate': max(x['covv_collection_date'] for x in samples),
'residual': residuals[lineage],
'max_ndiffs': max(ndiffs),
'mean_ndiffs': sum(ndiffs)/len(ndiffs),
'mutations': mutations[lineage]
}
json.dump(val, handle)
# upload output files to webserver, requires SSH key credentials
if not args.dry_run:
server_root = 'filogeneti.ca:/var/www/html/covizu/data'
subprocess.check_call(['scp', nwk_file, '{}/timetree.nwk'.format(server_root)])
subprocess.check_call(['scp', outfile.name, '{}/clusters.json'.format(server_root)])
subprocess.check_call(['scp', dbstat_file, '{}/dbstats.json'.format(server_root)])
# upload files to EpiCoV server
server_epicov = 'filogeneti.ca:/var/www/html/epicov/data'
subprocess.check_call(['scp', nwk_file, '{}/timetree.nwk'.format(server_epicov)])
subprocess.check_call(['scp', dbstat_file, '{}/dbstats.json'.format(server_epicov)])
# modify clusters JSON
epifile = open(outfile.name, 'r')
epicov_data = gisaid_utils.convert_json(epifile, args.infile)
fp = NamedTemporaryFile('w', delete=False)
json.dump(epicov_data, fp=fp) # serialize to temp file
fp.close()
subprocess.check_call(['scp', fp.name, '{}/clusters.json'.format(server_epicov)])
cb.callback("All done!")
| [
"[email protected]"
] | |
5bb2474a91304bec8b372f7f3f9219d03bece636 | c34f0355bea31edb189a0eb535ba66450af83605 | /Robin/randomDelete.py | 4b8aba1dcfc640ce25e2f0431346f27978bf9cbd | [] | no_license | sam676/PythonPracticeProblems | 0e1c1db95a107cbf56f49a3a2dfa7a5a925fee5e | 1b56aad515c9bf786d2573b28734a414a34c2efe | refs/heads/master | 2022-11-27T19:15:28.954182 | 2020-08-05T18:54:55 | 2020-08-05T18:54:55 | 58,278,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 712 | py | """
Thanos believes that in order to balance the program,
half of the elements in the list should be randomly deleted.
Write an Infinity Gauntlet program that randomly deletes and returns
half of the elements in the input list when Thanos bounces a finger
(when running the program).(Since it is randomly deleted, the output value
must be different every time even if the input value is the same)
Input example: [2, 3, 1, 6, 5, 7]
Output example 1: [2, 5, 7]
Output example 2: [3, 6, 5]
"""
import random
def randomDelete(inputList):
half = int(len(inputList) / 2)
return random.sample(inputList, half)
#driver
input = [2, 3, 1, 6, 5, 7]
print(randomDelete(input))
print(randomDelete(input))
| [
"[email protected]"
] | |
26ed6ec44be1160006e15632ac9c6f9423250349 | 84f47c12b5806eeff345b4753e321b30d0157c0e | /st01.Python기초/py12리스트/py12_ex03_반평균.py.py | 48559e79cad41f8498682d03392d190c2891e3a7 | [] | no_license | tnqls1002gh/2020python | 89ce7f7c3cebb1841a62b936b76c0557193041c9 | 55fd05ac1242a579b411869ebcf3b6101502268f | refs/heads/master | 2021-01-01T10:57:24.645952 | 2020-03-15T09:10:15 | 2020-03-15T09:10:15 | 239,247,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 566 | py |
# 1. List 만들기.
# 2. 학생수 입력 받기. 최소 3명이상
# 3. 학생 성적 입력 받기. 몇번 입력받아야 하는가?
# 4. list에 입력 받은 학생 성적을 추가한다.
# 5. 3번 학생의 성적을 100점으로 바꾸시오.
# 6. list에서 마지막 학생 삭제.
# 7. list에서 0번 값을 출력하시오.
# 8. 평균을 구하고 출력.
A = []
count = 0
a = int(input("학생수입력:"))
while a > count:
b = int(input("성적입력:"))
A.append(b)
count = count + 1
print (A)
print (sum (A))
print (sum (A)/3)
| [
"[email protected]"
] | |
6980eb56ba74e71b056b4521f24b15403278db45 | e67df516ce91ba63fa20fe6c607ef7d6f60a534e | /M17A3.py | e99bf307191a03f6dcc0ad2fc35ebcfe9fb822a5 | [] | no_license | chinglamchoi/CompetitiveProgramming | 6e10605248dcd4e0130f8a55257cef1812d12a50 | 5d57d2b868599da2b9f055b0b6d0d2b28ce34b9f | refs/heads/master | 2021-03-21T16:04:39.443224 | 2020-06-17T01:18:25 | 2020-06-17T01:18:25 | 247,310,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 61 | py | a,b = input().split(" ")
a,b = int(a), int(b)
print(a+b|a-b)
| [
"[email protected]"
] | |
fd3e48475f71d1e5527fe6259abd5f7fe2b579da | 2990b0841b63f300a722107933c01c7237a7976b | /all_xuef/程序员练级+Never/Fun_Projects/Interpreter/scheme.py | 22e2995f3dee21cbe13685a87e7050789c4ddbb1 | [] | no_license | xuefengCrown/Files_01_xuef | 8ede04751689e0495e3691fc5d8682da4d382b4d | 677329b0189149cb07e7ba934612ad2b3e38ae35 | refs/heads/master | 2021-05-15T04:34:49.936001 | 2019-01-23T11:50:54 | 2019-01-23T11:50:54 | 118,802,861 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,997 | py | def tokenize(code_s): #词法分析:-->tokens
return code_s.replace('(', ' ( ').replace(')', ' ) ').split()
def analyze(toks): #语法分析:-->表达式树
def analyze_tok(tok):
"""Return the value of token if it can be analyzed as a number, or token."""
try:
return int(tok)
except (TypeError, ValueError):
return tok
def get_operands(toks): # 2 2)-->[2 2]
operands,tok = [],toks[0]
while tok != ')':
operands.append(analyze(toks))
tok = toks[0]
toks.pop(0) # pop ')'
return operands
tok = analyze_tok(toks.pop(0))
#表达式树是一种结构化数据,为了简洁我这里使用嵌套list
if type(tok) in (int,) or tok not in ('(',')'):
return tok
elif toks[0] == '(':
return get_operands(toks)
else:
res = [toks.pop(0)]
res.extend(get_operands(toks))
return res
#############################
#对环境(environment)的基本操作
#############################
env0 = [] # empty env
def extend_env(var,val, env):
env_copy = env[:]
env_copy.insert(0, (var,val))
return env_copy
def lookup(env, var):
if not env:
raise Exception("Unbind Variable:{v}".format(v=var))
return env[0][1] if env[0][0] == var else lookup(env[1:], var) # 向外层env寻找
from operator import mul,truediv,sub; from functools import reduce
context = {'+': sum, '-': sub, '*': (lambda args:reduce(mul,args)), '/': truediv}
# 求值
is_number = lambda exp: type(exp) in (int,float)
is_symbol = lambda exp: not any([is_number(exp), isinstance(exp, list)])
SUPPORT_BASE_OPS = context.keys() # 支持的基本算术操作
class Closure: # 表示一个函数定义
def __init__(self, var, body, env):
self.var = var
self.body = body
self.env = env
def scheme_eval(exp_tree, env=env0):
if is_number(exp_tree): # 数字
return exp_tree
elif is_symbol(exp_tree): # symbol(变量或者代表函数的符号)
if exp_tree == 'globals':
return env0
return lookup(env, exp_tree)
elif exp_tree[0] == 'lambda': # 函数定义
var = exp_tree[1][0]
body = exp_tree[2]
#print("create a closure: var:{v}, body:{b}, env:{e}".format(v=var,b=body,e=env))
return Closure(var, body, env)
elif exp_tree[0] == 'let': # 绑定
bindings = exp_tree[1]
new_env = env[:]
for bind in bindings:
new_env = extend_env(bind[0], scheme_eval(bind[1], env), new_env)
e = exp_tree[2]
return scheme_eval(e, new_env)
elif exp_tree[0] == 'define':
_,var,exp = exp_tree
val = scheme_eval(exp, env)
env.insert(0, (var,val))
return val
elif not(type(exp_tree[0]) == list) and exp_tree[0] in SUPPORT_BASE_OPS: # 算术表达式
if exp_tree[0] in ('+','*'):
return context[exp_tree[0]]([scheme_eval(e, env) for e in exp_tree[1:]])
else: return context[exp_tree[0]](*[scheme_eval(e, env) for e in exp_tree[1:]])
else: # Procedure Call
proc = scheme_eval(exp_tree[0], env)
param = scheme_eval(exp_tree[1], env)
return scheme_eval(proc.body, extend_env(proc.var, param, proc.env))
raise Exception("Not Support OP!")
def r2(code):
return scheme_eval(analyze(tokenize(code)),
env0)
# repl
def repl(prompt='> '):
while True:
val = r2(input(prompt))
if val is not None:
print(schemestr(val))
def schemestr(exp):
"将一个Python对象转换回可以被Scheme读取的字符串。"
if isinstance(exp, list):
return '(' + ' '.join(map(schemestr, exp)) + ')'
else:
return str(exp)
if __name__ == '__main__':
#repl()
print(analyze(tokenize("'(1 (+ 2 3) 8)")))
| [
"[email protected]"
] | |
3480902f3cac6afb4c652921fae94f44eb349a78 | 623b3bbb1ebf7b9c48b8b46d5e9f46865f925b5f | /scripts/test.py | 192f360c7c0dc76ef6dada876ec6e5ed7a218a8c | [
"MIT"
] | permissive | iqbal-chowdhury/ACMN-Pytorch | 713b14ef401c7fc9559621679bbb231dd4f82aa5 | aef7722c45fa8bbe42b5b655e6350e098d723b1e | refs/heads/master | 2020-03-24T12:51:07.323529 | 2018-07-06T01:44:50 | 2018-07-06T01:44:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,125 | py | from tqdm import tqdm
import argparse
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
from vqa_lab.evaluator import AccuracyEvaluator
parser = argparse.ArgumentParser(description='PyTorch resTree test ON CLEVR')
parser.add_argument('--batch_size', type=int, default=64, help="training batch size")
parser.add_argument('--threads', type=int, default=0, help='number of threads for data loader to use')
parser.add_argument('--gpu', type=bool, default=True, help='use gpu?')
parser.add_argument('--run_model', type=str, default='restree', choices=['restree', 'rbn'], help='run model')
parser.add_argument('--run_dataset', type=str, default='clevr', choices=['clevr'])
parser.add_argument('--eval_type', type=str, default='test', choices=['val', 'test'])
parser.add_argument('--resume', type=str, default=None, help='resume file name')
parser.add_argument('--logdir', type=str, default='logs/test', help='dir to tensorboard logs')
opt, _ = parser.parse_known_args()
#------ get dataloaders ------
from vqa_lab.data.data_loader import getDateLoader
print('==> Loading datasets :')
Dataloader = getDateLoader(opt.run_dataset)
dataset_test = Dataloader(opt.eval_type, opt)
opt.__dict__ = { **opt.__dict__, **dataset_test.dataset.opt }
#----------- end -------------
#------ get mode_lrunner -----
from vqa_lab.model.model_runner import getModelRunner
print('==> Building Network :')
model_runner = getModelRunner(opt.run_model)(opt)
#----------- end -------------
#----------- main ------------
print('Generating test results...')
answer_map = {}
for i_batch, input_batch in enumerate(tqdm(dataset_test)):
output_batch = model_runner.test_step(input_batch)
pred = output_batch['predicts'].max(1)[1]
for i in range(pred.size(0)):
answer_map[input_batch['qid'][i]] = dataset_test.dataset.ansVocabRev[pred[i]]
with open(os.path.join(opt.logdir, 'CLEVR_TEST_results.txt' ), 'w') as f:
for i in range(len(answer_map.keys())):
print(answer_map[i], file = f)
print(os.path.join(opt.logdir, 'CLEVR_TEST_results.txt' ) + ' saved.')
#----------- end -------------
| [
"[email protected]"
] | |
2c773e8fd596a9d13a101aab8381a083857e14bb | 0bf4720c77fb583758a76a429335dca11e9e932a | /2zaynenmadrid.py | f770b4fd185ea271838241dab7009262eb92852f | [] | no_license | allinvestglobal/n8 | d47b01fce15625b0497ff319b2d24566d7a5bedd | 3397d7f5550a9b20fd3bad7ff15aaf8da1d6cac5 | refs/heads/master | 2020-08-29T11:51:19.992664 | 2019-11-13T19:29:10 | 2019-11-13T19:29:10 | 218,023,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,434 | py |
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '1.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '2.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '3.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '4.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '5.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '6.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '7.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '8.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '9.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '10.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '11.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '12.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '13.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '14.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '15.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '16.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '17.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
import time
time.sleep(1800) # delays for 30 minutes
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '18.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '20.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '21.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '22.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '23.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '24.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '25.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '27.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '28.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '29.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '30.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '31.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '32.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '26.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '34.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
import time
time.sleep(1800) # delays for 30 minutes
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '35.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '36.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '37.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '38.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '39.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '40.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '41.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '42.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '43.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '44.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '45.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '46.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '47.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '48.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '49.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
import time
time.sleep(1800) # delays for 30 minutes
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '50.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '51.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '52.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '53.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '54.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '55.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '56.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '57.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '58.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '59.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '60.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '61.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '62.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '63.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '64.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '65.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '66.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
import time
time.sleep(1800) # delays for 30 minutes
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '67.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '68.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '69.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '70.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '71.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '72.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '73.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '74.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '75.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '76.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '77.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '78.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '79.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '80.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '81.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
import time
time.sleep(1800) # delays for 30 minutes
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '82.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '83.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '84.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '85.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '86.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '87.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '88.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '89.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
from instapy_cli import client
username = 'zaynenmadrid'
password = '20million'
image = '90.jpg'
text = ''
with client(username, password) as cli:
cli.upload(image, text)
| [
"[email protected]"
] | |
04e82777d2c4c4a870c576cb0ff5ee4c573256dc | a86bca3e88fc3012bc9805c74c2e752262370326 | /AI/current_tab_ctypes.py | 510106af5fe46b20ef9f2694c844aabd7d68bc09 | [
"MIT"
] | permissive | osamhack2021/AI_NoYoutube_60Duo | 4921f7c838776305d8dc00d6ceb04b2190565916 | c1e34b7b506b43c9be6c39da3211fac49bfbcd14 | refs/heads/main | 2023-08-11T19:24:45.560000 | 2021-10-13T15:00:38 | 2021-10-13T15:00:38 | 405,925,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | import time, ctypes
time.sleep(3) # 타이틀을 가져오고자 하는 윈도우를 활성화 하기위해 의도적으로 3초 멈춥니다.
lib = ctypes.windll.LoadLibrary('user32.dll')
handle = lib.GetForegroundWindow() # 활성화된 윈도우의 핸들얻음
buffer = ctypes.create_unicode_buffer(255) # 타이틀을 저장할 버퍼
lib.GetWindowTextW(handle, buffer, ctypes.sizeof(buffer)) # 버퍼에 타이틀 저장
print(buffer.value) # 버퍼출력 | [
"[email protected]"
] | |
c7997369f3d5777fc33e2486e19c2152522d6c30 | 99aaf505f9fae90731aba60fca223abaee228f68 | /python/greene/quantile_normalization.py | be370b3b42fc150e1bfc9d4d9de863b4155bd30e | [] | no_license | pamd/foo | 13c3c354141b456f79274787aeb3e71c1f26e56b | 52ce658f3adac4aa618706d1256ead26e90ab6fe | refs/heads/master | 2016-09-05T14:35:22.727306 | 2015-12-27T22:42:58 | 2015-12-27T22:42:58 | 31,291,416 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,310 | py | #!/usr/bin/python
'''
Calculate quantile normalization, base on:
https://en.wikipedia.org/wiki/Quantile_normalization
'''
def quantile_normalize(input_matrx):
""""This function is a wrapper that calls all the other functions to calculate
an input matrix's quantile normalization.
"""
row = len(input_matrix)
col = len(input_matrix[0])
rank_matrix, sorted_matrix = get_rank_and_sorted_matrices(input_matrix, row, col)
row_average = get_row_average(sorted_matrix, row, col)
return get_output_matrix(rank_matrix, row_average, row, col)
def get_rank_and_sorted_matrices(input_matrix, row , col):
"This function calculates rank matrix and another matrix whose columns are sorted."
rank_matrix = { } # Implementation detail: instead of 2D list, a dictionary object is used.
sorted_matrix = { } # Implementation detail: instead of 2D list, a dictionary object is used.
for c in range(0, col):
current_col = [ ] # Save the current column into this variable.
for r in range(0, row):
current_col.append(input_matrix[r][c])
current_col.sort()
for r in range(0, row): # Put all elements in sorted column into sorted_matrix
sorted_matrix[r, c] = current_col[r]
for r in range(0, row): # Set values in rank_matrix
rank_matrix[r, c] = current_col.index(input_matrix[r][c])
return rank_matrix, sorted_matrix
def get_row_average(sorted_matrix, row, col):
"This function returns a list of average values in each row."
row_average = [ ]
for r in range(0, row):
sum = 0
for c in range(0, col):
sum += sorted_matrix[r, c]
row_average.append(sum / float(col))
return row_average
def get_output_matrix(rank_matrix, row_average, row, col):
"This function calculates quantile normalization based on rank matrix and row average."
output_matrix = [ ]
for r in range(0, row):
new_row = [ ]
for c in range(0, col):
rank_index = rank_matrix[r, c]
new_row.append(row_average[rank_index])
output_matrix.append(new_row)
return output_matrix
# Test cases in wikipedia:
input_matrix = [ [ 5, 4, 3 ], [ 2, 1, 4 ], [ 3, 4, 6 ], [ 4, 2, 8 ] ]
ret = quantile_normalize(input_matrix)
print ret
| [
"[email protected]"
] | |
5e25eaa15194b94c7e1c267a3fbaa3306025ff1f | 29d36fa7d2a967c850cc7df6753942f5b4b9a4d6 | /summary-project/myApp/migrations/0001_initial.py | 17ce300db15d375ab71e9aac5e4313d782e35683 | [] | no_license | rukyasu/django-test | a4a0dc95f6d935a1cf1e3446b8f443d756faf87f | 55f5dc537b70aa21c97fb3385fffe6ecd2646540 | refs/heads/master | 2021-05-15T21:52:23.190292 | 2017-10-12T21:47:52 | 2017-10-12T21:47:52 | 106,612,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 695 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-04 21:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=128)),
('last_name', models.CharField(max_length=128)),
('email', models.EmailField(max_length=264, unique=True)),
],
),
]
| [
"[email protected]"
] | |
f62d789359a7d8c897ed7892cb87c49e19fcb0aa | 7b8ee1fafb1a3ae440e21afbdcef58ff0a0cc14d | /plonker/urls.py | 39fb2f901d5af2b0d4f10b338937a647b7f96799 | [] | no_license | JasonMize/plonker | 1c250c654b700d6dd671fe5e9ad2fde227952a63 | 9cf3a0f771db3121f4eeeabcf049b6169bf13c80 | refs/heads/master | 2021-01-12T13:31:11.931636 | 2016-10-18T20:05:01 | 2016-10-18T20:05:01 | 69,370,161 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 902 | py | """plonker URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^users/', include ('users.urls', namespace = 'users')),
url(r'^accounts/', include ('accounts.urls', namespace = 'accounts')),
url(r'^', include ('core.urls', namespace = 'core')),
]
| [
"[email protected]"
] | |
d10b2a9b41e0598ff8763a1746941d17e8fd275d | 0a01e4cfbf02ef794cacf5a9103eef7b144f0f18 | /daniel_1121/settings.py | a1f0cb615f419c3e290f309c220df641568c1c29 | [] | no_license | crowdbotics-apps/daniel-1121 | 581c1afec62815ab635ccbfd754830f06fd8045f | 8d14af971984c95c2453f30c77e34c01c39e9005 | refs/heads/master | 2022-12-12T09:18:36.394521 | 2019-02-28T20:37:16 | 2019-02-28T20:37:16 | 173,183,600 | 0 | 0 | null | 2022-12-08T01:42:20 | 2019-02-28T20:36:21 | Python | UTF-8 | Python | false | false | 4,570 | py | """
Django settings for daniel_1121 project.
Generated by 'django-admin startproject' using Django 1.11.16.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#wb999fon0(&a@c2!@-2m1tvj#i4cog1)nza_lc35py!=%ohqa'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'daniel_1121.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'daniel_1121.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
import environ
env = environ.Env()
ALLOWED_HOSTS = ['*']
SITE_ID = 1
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
LOCAL_APPS = [
'home',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
# allauth
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = None
LOGIN_REDIRECT_URL = '/'
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
EMAIL_HOST = "smtp.sendgrid.net"
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
| [
"[email protected]"
] | |
7b69f735832d2f15be6951497d6e70b23ffc39bd | 386d744f965684248ae8da3043fc2fe2c6c1aa33 | /prog1.py | c73d4d223cd55255d515950f85ce9e764aeda3ca | [] | no_license | dichlofos/small_python_progs | 530566dcbbd2ce2ea8b5964cb9d744740f0227ab | 476f5345eee00042c7f388ca64882b5b06340cd1 | refs/heads/main | 2023-04-17T08:11:12.260892 | 2021-04-18T22:51:06 | 2021-04-18T22:51:06 | 358,671,642 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,511 | py | import os
import time
# os.rename("/home/turist/Downloads/test_gpx.gpx", "/home/turist/Downloads/test_gpx.txt")
with open('./test_gpx.txt', 'r') as f:
#ищем строку с trkseg, следующая - начало трека
point_start = -1
while point_start == -1:
str_new = f.readline()
print(str_new, end='')
point_start = str_new.find('trkseg')
print('i=', point_start)
#вычисляем секунду старта от начала эпохи
time_pt_start = str_new.find('time')
if time_pt_start != -1:
x = str_new[time_pt_start + 5:time_pt_start + 25]
y = time.strptime(x, "%Y-%m-%dT%H:%M:%SZ")
sec_start = time.mktime(y)
print('sec_start=', sec_start)
print('')
#собираем данные точек трека
result_dict = {}
pt_dict = {}
i = 0
break_word = -1
while break_word == -1:
str_new = f.readline()
lat = str_new.find('lat')
lon = str_new.find('lon')
ele = str_new.find('ele')
time_pt = str_new.find('time')
if lat != -1:
pt_dict['lat'] = str_new[lat + 5:lat + 15]
print('lat=', str_new[lat + 5:lat + 15])
print('Данные точки:', pt_dict)
lat = -1
if lon != -1:
pt_dict['lon'] = str_new[lon + 5:lon +15]
print('lon=', str_new[lon + 5:lon +15])
print('Данные точки:', pt_dict)
lon = -1
if ele != -1:
pt_dict['ele'] = str_new[ele + 4:ele + 8]
print('ele=', str_new[ele + 4:ele + 8])
print('Данные точки:', pt_dict)
ele = -1
if time_pt != -1:
print('time=', str_new[time_pt + 5:time_pt + 25])
x = str_new[time_pt + 5:time_pt +25]
y = time.strptime(x, "%Y-%m-%dT%H:%M:%SZ")
print(y)
sec_original = time.mktime(y)
print('sec_original=', sec_original)
pt_dict['sec_original'] = sec_original
print('Данные точки:', pt_dict)
time_pt = -1
if len(pt_dict) == 4:
for key in pt_dict:
result_dict[i][key] = pt_dict[key]
i = i + 1
print(result_dict)
pt_dict.clear()
break_word = str_new.find('/trkseg')
# os.rename("/home/turist/Downloads/test_gpx.txt", "/home/turist/Downloads/test_gpx.gpx")
| [
"[email protected]"
] | |
f3b74596e197de3b62e4616cb347d18359ceb1dc | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/334/usersdata/302/94780/submittedfiles/listas.py | 8dfcb03ca5cfcf1eaceb6d11acfe6403be9fd266 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | # -*- coding: utf-8 -*-
n = int(input('Digite o número de elementos da lista: '))
a = []
b = []
for i in range(0,n,1):
a.append(int(input('Digite a%d: ' %(i+1))))
for i in range(0,n,1):
if i < n and i != 0:
b = a[i-1]-a[i]
if b < 0:
b = -b
print(b)
| [
"[email protected]"
] | |
70d4ab111639b85d03ea82f2c7436045ae6708bb | 7ddd1f19f088c822ff9dc56038a1b281629f918c | /radioReduction.py | db5cfa13858ef8d2a6548592f6e83b4279d98798 | [] | no_license | rvanlinge/radio_project | 73a6681bd07d39d257d25d988fe60ec61179041e | f43324e6e41ac0c9986a2e1150e226878e9224d7 | refs/heads/master | 2020-04-06T20:56:42.569523 | 2019-11-11T02:38:49 | 2019-11-11T02:38:49 | 157,788,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,285 | py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import math
import glob
def binning(data,sec=5,binning=5):
'''
Creates a binned light curves
'''
# Filters out the bad data points
data = data[data['nsamp']>np.mean(data['nsamp'])-(0.5*(np.mean(data['nsamp'])))]
# Tells how many rows to bin in intervals of binning seconds
bins = sec/binning
# How many times can the data table be divided by binning time
split = len(data)/bins
# Creates binned times
data['secs'] = data['dtmin']*60
avgSecs = []
secs = data['secs']
secsSplit = np.array_split(secs,split)
for i in range(len(secsSplit)):
avgSecs.append(np.mean(secsSplit[i]))
# Calculating average MJD
avgMjd = []
mjd = data['mjd']
mjdSplit = np.array_split(mjd,split)
for i in range(len(mjdSplit)):
avgMjd.append(np.mean(mjdSplit[i]))
# Bins Flux
avgFlux = [] # holds averaged flux values
flux = data['re']
fluxSplit = np.array_split(flux,split)
for i in range(len(fluxSplit)):
avgFlux.append(np.mean(fluxSplit[i]))
# Bins uncert
avgErr = []
err = data['ure']
errSplit = np.array_split(err,split)
for i in range(len(errSplit)):
x = errSplit[i]
avgErr.append(1/np.sqrt(np.sum(1/x**2)))
# Creating binned data frame
binned = pd.DataFrame(data={'mjd':avgMjd,'dtmin':(np.array(avgSecs)/60),'secs':avgSecs,'re':avgFlux,'ure':avgErr})
return binned
def meanData(data):
data = data[data['nsamp']>np.mean(data['nsamp'])-(0.5*(np.mean(data['nsamp'])))]
# Getting MJD value
mean_mjd = np.mean(data['mjd'])
# Calculating the average flu
mean_flux = np.mean(data['re'])
# calculating the average err
mean_err = 1/np.sqrt(np.sum(1/data['ure']**2))
return mean_mjd,mean_flux,mean_err
def scatterPlotting(x,y,uncert=0,Type=0):
'''
Type:
0 = scatter
1 = plot
'''
if Type == 0:
if uncert == 0:
plt.figure(figsize=(10,8))
plt.scatter(x,y,color='k')
if uncert != 0:
plt.figure(figsize=(10,8))
plt.errorbar(x,y,yerr=uncert,fmt='o',linestyle='None',color='k')
return none
def lightCurve(path):
'''
Takes in a list of data tables and creates an average light curve
'''
# Holds the values
re = []
ure = []
mjd = []
# gathers all the data tables
dt = glob.glob(path)
for i in dt:
x = pd.read_table(i)
m,r,e = meanData(x)
mjd.append(m)
re.append(r)
ure.append(e)
new_dt = pd.DataFrame(data={'mjd':mjd,'re':re,'ure':ure})
new_dt = new_dt.sort_values('mjd')
new_dt = new_dt.reset_index(drop=True)
return new_dt
| [
"[email protected]"
] | |
bc77d10ea5e058014df19209626725ab9bdde174 | 62b5942773734fbd5373f4df9d9f983a3487c93c | /data.py | bb950ba4ac736e8cca7f2bbf4b0e566278817858 | [] | no_license | iliassjabali/cityu-add-drop-classes | 950e5e177cd9a81ebcfda59b5f94462cc6715f5a | 283fd85537573635c35bb67511ecb39193d2590f | refs/heads/master | 2022-03-04T01:17:44.416459 | 2019-11-07T08:51:39 | 2019-11-07T08:51:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,242 | py | # coding=utf-8
import time
from _datetime import datetime
from dataclasses import dataclass
import json
info_file = "info.json"
info = None
login_url = "https://banweb.cityu.edu.hk/pls/PROD/twgkpswd_cityu.P_WWWLogin"
username: str
password: str
driver = None
log_file_name: str
headers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-HK,en;q=0.9,zh-CN;q=0.8,zh;q=0.7,zh-HK;q=0.6,en-US;q=0.5,ja;q=0.4',
'Connection': 'keep-alive',
'Content-Type': 'text/plain; charset=UTF-8',
'User-Agent': 'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.80 Safari/537.36'
}
@dataclass
class Info:
start_time: time
start_timestamp: time
courses: list
def fetch_info():
# Load time and CRNs from json file
with open(info_file) as f:
data = json.load(f)
_time = datetime.strptime(data["time"], '%Y-%m-%d %H:%M:%S')
_course_list = data["courses"]
_timestamp = int(time.mktime(_time.timetuple()))
global info
info = Info(_time, _timestamp, _course_list)
if __name__ == '__main__':
fetch_info()
print(info.username)
| [
"[email protected]"
] | |
975d43e4e8606e502c7c09ea2a46a7f4eebda9e3 | ba805dd91b9a736e7ac5fd8c2a8eca132c1f85b8 | /console_scripts/scanDFT.py | fbac8f627301f8d06386be24c9bba2fb3a473e63 | [] | no_license | aaronchen0316/squid | a1a953f3f76690c6b2c7d4e68bbdf1628a4bfe62 | 9c6c534268a7e002bf05902b715267a6c37395f6 | refs/heads/master | 2020-06-04T11:02:17.593118 | 2019-03-13T13:06:22 | 2019-03-13T13:06:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,286 | py | # System imports
import sys
import os
# Squid imports
from squid import g09
from squid import orca
from squid import files
from squid import constants
from squid import units
# Plot energies
def plot(yy, start_val, x_label, y_label, title, x_range, y_range,
x_low=0, save=False):
import matplotlib.pyplot as plt
low_x, high_x = x_low, x_low + len(yy[0]) - 1
low_y, high_y = float('inf'), float('-inf')
if x_vals is None:
for i, y in enumerate(yy):
plt.plot(y, marker='.', label=str(int(start_val) + i))
if min(y) < low_y:
low_y = min(y)
if max(y) > high_y:
high_y = max(y)
else:
low_x = min(x_vals)
high_x = max(x_vals)
for i, y in enumerate(yy):
plt.plot(x_vals, y, marker='.', label=str(int(start_val) + i))
if min(y) < low_y:
low_y = min(y)
if max(y) > high_y:
high_y = max(y)
font = {'size': 16}
plt.rc('font', **font)
plt.xlabel(x_label, fontsize=18)
plt.ylabel('%s (%s)' % (y_label, u2), fontsize=18)
plt.title(title, fontsize=20)
if x_range is None:
x_range = [low_x, high_x]
if y_range is None:
y_range = [low_y, high_y * 1.05]
plt.axis([x_range[0], x_range[1], y_range[0], y_range[1]])
# plt.legend(loc=2)
if save:
plt.savefig("out.png", format="png")
else:
plt.show()
# DEFAULTS ARE HERE
(dft, u1, u2, scale, step,
out_name, comp, neb_force) = ('orca', 'Ha', 'kT_300', 1.0, 1,
'out', None, None)
(title, x_label, y_label,
x_range, y_range, x_vals) = ('Energy Landscape', 'X-Axis', 'Y-Axis',
None, None, None)
peak = []
spline = None
nebfc = None
p_vals = False
save = False
dft_list = [dft, 'orca']
# HELP SCREEN HERE
help = '''
scanDFT
---------
A command to view the energy landscape over several configurations.
There are two ways to implement this:
1.
Note, the START STOP range is inclusive on either end.
scanDFT [Sim_Name%%d] START STOP [Options]
2.
scanDFT Sim_Name
The first method is useful when utilizing flags. The second method will
prompt the user for information. Note, in the second instance it will assume
an appendage of -%%d-%%d, describing the iteration and frame. It also assumes
and NEB scan is desired.
Flag Default Description
-help, -h : : Print this help menu
-dft : orca : Specify what type of dft simulation you want to
get the energy landscape of. Other options include
'orca'.
-units, -u : kT_300 : Specify the units you want the output to be in.
-scale : 1.0 : Scale all energies by this value. Applied AFTER
unit conversion from simulation units ('Ha') to
-units.
-out, -o : out : Make an output file with this name holding all xyz
coordinates of what you're scanning over.
-step : 1.0 : Steps to take between your start and stop range (integer)
-c : : Compile multiple energy landscapes on the same graph.
Takes three arguments, separated by commas with no spaces:
char,start,stop
The character is a unique identifier in the Sim_Name that will
be replaced with values from start to stop (inclusive)
-neb : : In NEB calculations, each iteration after the first does not
include the first and last energies. Giving this flag and a
run name for the first in the NEB list will tack on these energies
to the rest of the simulations
-nebfc : : Final NEB compilation. This automates flags to determine the number
of frames in each iteration, and which iteration is the final one.
Note, if a simulation crashed on iteration N, then this will proceed
with iteration N-1.
-title, -t : : Title for the output graph
-lx : : Label for the x-axis
-ly : : Label for the y-axis
-xrange : : Set the x-axis range
-yrange : : Set the y-axis range
-xvals : : Set a custom label for x-axis (comma separated).
-print, -p : : Print out the values that are plotted. Default off
-save, -s : : Whether to save the graph to out.png (True) or not (False). Note,
when saving it will not display the graph.
ex: scanDFT water
ex: scanDFT water_ 1 10
ex: scanDFT water_%d 1 10
ex: scanDFT water%d_opt 1 10
ex: scanDFT water_^_%d 1 10 -c ^,0,4 -dft orca
ex: scanDFT water_^_%d 1 10 -c ^,2,4 -dft orca -neb water_0_0,water_0_10
ex: scanDFT water_opt_%d 1 10 -t "Water Optimization" -xrange 0,5
'''
##############################################################################
# READ IN FLAGS HERE
##############################################################################
if '-h' in sys.argv or '-help' in sys.argv or len(sys.argv) < 2:
print help
sys.exit()
# READ IN DATA
run_name = sys.argv[1]
# Check if we shall prompt user
if len(sys.argv) < 3:
dft = raw_input(
"What method of dft was used (orca/g09, default orca)? "
).lower().strip()
if dft == 'g09':
directory = "gaussian"
read = g09.read
elif dft == 'orca' or dft == '':
directory = "orca"
read = orca.read
else:
print("Error - Cannot proceed with DFT as %s." % dft)
sys.exit()
# Determine the number of iterations and frames
print("Determining number of successful iterations and frames... "),
N, M = 0, 0
if dft == 'g09':
while os.path.isfile("%s/%s-%d-%d.chk" % (directory, run_name, N, M)):
M += 1
max_frame = M - 1
while os.path.isfile("%s/%s-%d-1.chk" % (directory, run_name, N)):
N += 1
max_iter = N - 1
# Verify the last iteration did indeed succeed
success = True
for i in range(1, max_frame):
try:
_ = read("%s-%d-%d" % (run_name, max_iter, i))
except:
peak.append(i)
if len(peak) == 1:
peak = float(peak[0])
spline = 'y'
elif len(peak) > 1:
success = False
else:
pass
if not success:
max_iter -= 1
if max_iter < 0:
print("\nError - Final iteration that succeeded is less than 0.")
sys.exit()
else:
while os.path.isfile("%s/%s-%d-%d/%s-%d-%d.out"
% (directory, run_name, N, M, run_name, N, M)):
M += 1
max_frame = M - 1
while os.path.isfile("%s/%s-%d-1/%s-%d-1.out"
% (directory, run_name, N, run_name, N)):
N += 1
max_iter = N - 1
# Verify the last iteration did indeed succeed
success = True
for i in range(1, max_frame):
try:
_ = read("%s-%d-%d" % (run_name, max_iter, i)).energies[-1]
except:
peak.append(i)
if len(peak) == 1:
peak = float(peak[0])
spline = 'y'
elif len(peak) > 1:
success = False
else:
pass
if not success:
max_iter -= 1
if max_iter < 0:
print("\nError - Final iteration that succeeded is less than 0.")
sys.exit()
print("Done")
print("\tThere are a total of %d iterations of %d frames each.\n"
% (max_iter, max_frame))
plot_all = raw_input("Would you like to plot them all (y/n)? ").lower()
if plot_all in ['y', 'yes', 'sure', 'ok', 'do it', 'i dare you']:
iterations_to_plot = range(max_iter + 1)
frames_to_plot = range(max_frame + 1)
else:
try:
iterations_to_plot = input("\nWhich iterations would you like to \
plot? Input as a python range (ex. range(3,6) for iterations 3,4,5): ")
except:
print("\tDefaulting, only plotting last iteration...\n")
iterations_to_plot = [max_iter]
frames_to_plot = range(max_frame + 1)
if type(iterations_to_plot) is not list:
print("Error - iterations_to_plot must be a list!")
sys.exit()
if type(frames_to_plot) is not list:
print("Error - frames_to_plot must be a list!")
sys.exit()
# Now we can ask for plotting requests
plotting_flags = raw_input("\nUnits (%s): " % u2).strip()
if plotting_flags != "":
u2 = plotting_flags
plotting_flags = raw_input("\nScale (%lg): " % scale).strip()
if plotting_flags != "":
scale = float(plotting_flags)
plotting_flags = raw_input("\nPlot Title (%s): " % title).strip()
if plotting_flags != "":
title = plotting_flags
try:
plotting_flags = raw_input("\nX Axis Title: ").strip()
if plotting_flags != "":
x_label = plotting_flags
except:
print("\tDefaulting, X Axis label is \"%s\"...\n" % x_label)
try:
plotting_flags = raw_input("\nY Axis Title: ").strip()
if plotting_flags != "":
y_label = plotting_flags
except:
print("\tDefaulting, Y Axis label is \"%s\"...\n" % y_label)
try:
plotting_flags = input("\nX Range as an inclusive tuple (xmin,xmax): ")
x_range = plotting_flags
except:
print("\tDefaulting, X Range is [0:%d]...\n" % max_frame)
try:
plotting_flags = input("\nY Range as an inclusive tuple (ymin,ymax): ")
y_range = plotting_flags
except:
print("\tDefaulting, Y Range is [min_E, max_E*1.05]...\n")
try:
plotting_flags = raw_input("\nOutput xyz filename? ").strip()
if plotting_flags != "":
out_name = plotting_flags
except:
print("\tDefaulting, xyz filename is \"%s.xyz\"...\n" % out_name)
if ".xyz" in out_name:
out_name = out_name.split(".xyz")[0]
try:
plotting_flags = raw_input("\nSave plot to a png file instead of display \
(y/N)? ")
if plotting_flags != "":
save = plotting_flags.strip().lower() == "y"
except:
print("\tDefaulting, will display and not save.")
# At this point we have all the information we need from the user. We can
# now get the starting and ending energies of the NEB
first_E, peak_E, last_E = None, None, None
first_frame, peak_frame, last_frame = None, None, None
first_E = read("%s-0-0" % run_name).energies[-1]
first_frame = read("%s-0-0" % run_name).atoms
last_E = read("%s-0-%d" % (run_name, max_frame)).energies[-1]
last_frame = read("%s-0-%d" % (run_name, max_frame)).atoms
if spline == 'y':
peak_E = read("%s-0-%d" % (run_name, peak)).energies[-1]
peak_frame = read("%s-0-%d" % (run_name, peak)).atoms
else:
pass
# Loop through all the iterations requested
full_energy_list, energies, pathway = [], [], []
for iteration in iterations_to_plot:
energies = []
pathway = []
for frame in frames_to_plot:
if frame == 0:
energy = first_E
atoms = first_frame
elif frame == max_frame:
energy = last_E
atoms = last_frame
elif frame == peak and spline == 'y':
energy = peak_E
atoms = peak_frame
else:
energy = read(
"%s-%d-%d" % (run_name, iteration, frame)
).energies[-1]
atoms = read(
"%s-%d-%d" % (run_name, iteration, frame)
).atoms
energies.append(
units.convert_energy(u1, u2, energy - first_E) * scale
)
pathway.append(atoms)
full_energy_list.append(energies)
# Save the final iteration xyz
files.write_xyz(pathway, "%s" % out_name)
# Plot the graph
plot(full_energy_list,
iterations_to_plot[0],
x_label,
y_label,
title,
x_range,
y_range,
x_low=frames_to_plot[0],
save=save)
else:
start = int(sys.argv[2])
stop = int(sys.argv[3])
if '-dft' in sys.argv:
dft = sys.argv[sys.argv.index('-dft') + 1].lower()
if dft not in dft_list:
print("Error - %s not recognized for dft." % dft)
sys.exit()
if [s for s in ['-units', '-u'] if s in sys.argv]:
s = '-u' if '-u' in sys.argv else '-units'
u2 = sys.argv[sys.argv.index(s) + 1]
if u2 not in constants.ENERGY:
print("Error - Energy unit not available. Consider using -scale.")
sys.exit()
if '-scale' in sys.argv:
scale = float(sys.argv[sys.argv.index('-scale') + 1])
if '-step' in sys.argv:
step = int(sys.argv[sys.argv.index('-step') + 1])
if '-nebfc' in sys.argv:
nebfc = int(sys.argv[sys.argv.index('-nebfc') + 1])
if [s for s in ['-o', '-out'] if s in sys.argv]:
s = '-o' if '-o' in sys.argv else '-out'
out_name = sys.argv[sys.argv.index(s) + 1].replace(' ', '_')
if len(out_name) < 5 or out_name[-4:] != '.xyz':
out_name += '.xyz'
if '-c' in sys.argv:
comp = sys.argv[sys.argv.index('-c') + 1].split(',')
if '-neb' in sys.argv:
neb_force = sys.argv[sys.argv.index('-neb') + 1].split(',')
if [s for s in ['-t', '-title'] if s in sys.argv]:
s = '-t' if '-t' in sys.argv else '-title'
title = sys.argv[sys.argv.index(s) + 1]
if '-lx' in sys.argv:
x_label = sys.argv[sys.argv.index('-lx') + 1]
if '-ly' in sys.argv:
y_label = sys.argv[sys.argv.index('-ly') + 1]
if '-xrange' in sys.argv:
x_range = sys.argv[sys.argv.index('-xrange') + 1].split(',')
x_range = [float(x) for x in x_range]
if '-yrange' in sys.argv:
y_range = sys.argv[sys.argv.index('-yrange') + 1].split(',')
y_range = [float(y) for y in y_range]
if '-xvals' in sys.argv:
x_vals = sys.argv[sys.argv.index('-xvals') + 1].split(',')
x_vals = [float(x) for x in x_vals]
if [s for s in ['-p', '-print'] if s in sys.argv]:
p_vals = True
if [s for s in ['-s', '-save'] if s in sys.argv]:
save = True
# BEGIN MAKING ENERGY LANDSCAPE
if dft == 'g09':
read = g09.read
elif dft == 'orca':
read = orca.read
else:
print("Error - Cannot proceed with DFT as %s." % dft)
sys.exit()
##########################################################################
first_E, last_E = None, None
first_frame, last_frame = None, None
if neb_force is not None:
first_E = read(neb_force[0]).energies[-1]
first_frame = read(neb_force[0]).atoms
last_E = read(neb_force[1]).energies[-1]
last_frame = read(neb_force[1]).atoms
energies, frames = [], []
# Loop through energies
if comp is None:
comp = [None, 0, 0]
for c in range(int(comp[1]), int(comp[2]) + 1):
run_hold = run_name.replace(
comp[0], str(c)
) if comp[0] is not None else run_name
tmp_E, tmp_frames = [], []
if neb_force is not None:
tmp_frames.append(first_frame)
tmp_E.append(first_E)
for i in range(start, stop + 1, step):
# Get run name for this iteration
chk = run_hold.find('%') == -1
run = run_hold + str(i) if chk else run_hold % i
data = read(run)
tmp_E.append(data.energies[-1])
tmp_frames.append(data.atoms)
if neb_force is not None:
tmp_frames.append(last_frame)
tmp_E.append(last_E)
energies.append(tmp_E)
frames = tmp_frames
# Adjust energies
E_offset = energies[0][0]
for i in range(len(energies)):
for j, e in enumerate(energies[i]):
energies[i][j] = units.convert_energy(u1, u2, e - E_offset) * scale
if comp[0] is not None:
start -= 1
plot(energies, start, x_label, y_label, title, x_range, y_range, save=save)
# Write files
files.write_xyz(frames, out_name[:-4])
# Print out values if desired
if p_vals:
for y in energies:
print(str(y))
| [
"[email protected]"
] | |
e7671c52fc06f8c55b6d0740827fa5848efc6ed8 | bd4bcf3684f548d127551932438094069b9f1197 | /website/views.py | 6d7bddd9cb9611ce4a9b065ce1c48faa640a63d0 | [] | no_license | africaarise19/afriacaariseschool | 2f5e94690c1dadc91526173b3f0e44156eaf1a8c | ee539bf075f7c205db54fcc7093369bf3b5e580a | refs/heads/master | 2021-09-27T15:53:02.348378 | 2020-02-22T04:21:13 | 2020-02-22T04:21:13 | 241,099,675 | 0 | 0 | null | 2021-09-22T18:35:37 | 2020-02-17T12:15:30 | HTML | UTF-8 | Python | false | false | 779 | py | from django.shortcuts import render
from django.core.mail import send_mail
def home(request):
return render(request, 'home.html', {})
def contact(request):
if request.method == "POST":
#Do something
message_name = request.POST['message-name']
message_email = request.POST['message-email']
message = request.POST['message']
#Send email
send_mail(
message_name, #Subject
message, #Message
message_email, #From email
['[email protected]'], #To email
fail_silently=False,
)
return render(request, 'contact.html', {'message_name': message_name})
else:
#Return the page
return render(request, 'contact.html', {})
| [
"kdccoalpha.github.io"
] | kdccoalpha.github.io |
35e549d99b1e7bb6bdf916a8c92b8a79303bc48f | 07522d530c74339963b1132121c04de5390c779e | /src/main/python/fusion.py | bf4b55b523681db00d50afbefce0f11ac44d9617 | [
"Apache-2.0"
] | permissive | Alchemist75/anserini | 6b537214c11c4d807aec7cbdcd66ac22074750fb | dfcc0e2d3ce03381379454feb6e5d813d810a30a | refs/heads/master | 2022-08-20T18:24:05.344485 | 2020-05-26T02:03:43 | 2020-05-26T02:03:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,222 | py | # -*- coding: utf-8 -*-
"""
Anserini: A Lucene toolkit for replicable information retrieval research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import enum
from typing import List
from trectools import fusion, TrecRun
class FusionMethod(enum.Enum):
RRF = 'RRF'
COMBO_SUM = 'COMBO_SUM'
def load_trec_runs(paths: List[str]) -> List[TrecRun]:
print(f'Loading {len(paths)} runs')
return [TrecRun(path) for path in paths]
def perform_fusion(method: FusionMethod, runs: List[TrecRun], output_path: str) -> None:
print('Performing fusion ->', method)
if method == FusionMethod.RRF:
fused_run = fusion.reciprocal_rank_fusion(runs)
fused_run.print_subset(output_path, topics=fused_run.topics())
elif method == FusionMethod.COMBO_SUM:
with open(output_path, 'w+') as f:
fusion.combos(runs, strategy="sum", output=f)
else:
raise Exception(f'The requested method {method} is not implemented.')
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='performs various methods of fusion supported by trectools')
parser.add_argument('--method', type=FusionMethod,
default=FusionMethod.RRF, required=False, help='specify the fusion method')
parser.add_argument('--runs', type=str, nargs='+',
default=[], required=True, help='a list of run files')
parser.add_argument('--out', type=str,
default="fused.txt", required=False, help='the output path of the fused run')
args = parser.parse_args()
trec_runs = load_trec_runs(args.runs)
perform_fusion(args.method, trec_runs, args.out)
print(f'Fusion successful -> {args.out}')
| [
"[email protected]"
] | |
b71e9bb79c2b24efedac532b884b5dc6d0e23cb6 | 57516ed4753c5c78804472f901161aecd9745404 | /scikit-learn/lang_plot.py | 37aacffcd51990f9e3a3c082100a5034c074e939 | [] | no_license | ghfkdgml/Tensorflow | 9a0764973bd6d7b8457ad728af8c940963303d7a | 7654d3f95711824653207a19097ba7528b82a453 | refs/heads/master | 2020-06-27T21:53:23.521697 | 2019-10-01T06:08:59 | 2019-10-01T06:08:59 | 200,059,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 593 | py | import matplotlib.pyplot as plt
import pandas as pd
import json
with open("./lang/freq.json","r",encoding="utf-8") as f:
freq = json.load(f)
lang_dic = {}
for i, lbl in enumerate(freq[0]["labels"]):
fq = freq[0]["freqs"][i]
if not (lbl in lang_dic):
lang_dic[lbl] = fq
continue
for idx, v in enumerate(fq):
lang_dic[lbl][idx] = (lang_dic[lbl][idx] + v) / 2
asclist = [[chr(n) for n in range(97,97+26)]]
df = pd.DataFrame(lang_dic, index=asclist)
plt.style.use('ggplot')
df.plot(kind="bar", subplots=True, ylim=(0,0.15))
plt.savefig("lang-plot.png")
| [
"[email protected]"
] | |
a5d5f087dff9c295833e46547aa5a1176be555e2 | 9f5db5f4413c4c1d948313fa46f4f0ecbcba99ef | /keyframe_format.py | 2b56ed59965ae3858aba21b0fb6171a2676ccb69 | [] | no_license | davidmfry/AE-Keyframe-Creator | a45aaf8a61dd196186f1ff4083400e25ec8a022c | 2bd20eae5aa43602757949f9a3852072ef17bb2c | refs/heads/master | 2020-05-17T13:07:59.979881 | 2015-05-07T15:57:09 | 2015-05-07T15:57:09 | 35,223,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,093 | py | __author__ = 'david'
from create_keyframes import *
keyframe_boiler_plate = "Adobe After Effects 8.0 Keyframe Data"
keyframe_end_line = "End of Keyframe Data"
#
# keyframe_list = []
#
# def add_keyframe_to_list(keyframe_type):
#
#
#
# def export_keyframe_data():
# pass
class Keyframe_Formatter():
units_per_sec = 0.0 # Also frame per second for the video clip
src_width = 0 # Video width IE: 1920
src_height = 0 # Video height IE: 1080
src_pixel_aspect_ratio = 0 # Normal aspect would be 1
comp_pixel_aspect_ratio = 0 # Normal aspect would be 1
def __init__(self, units_per_sec, src_width, src_height, src_pixel_aspect_ratio=1, comp_pixel_aspect_ratio=1):
self.units_per_sec = units_per_sec
self.src_width = src_width
self.src_height = src_height
self.src_pixel_aspect_ratio = src_pixel_aspect_ratio
self.comp_pixel_aspect_ratio = comp_pixel_aspect_ratio
def add_video_clip_stats(units_per_sec, src_width, src_height, src_pixel_aspect_ratio=1, comp_pixel_aspect_ratio=1):
return units_per_sec, src_width, src_height, src_pixel_aspect_ratio, comp_pixel_aspect_ratio
def format_keyframe_type():
return '''Transform Position
Frame X pixels Y pixels Z pixels
0 960 540 -2666.67
'''
def format_keyframe_file(video_stats, keyframes):
# unpacks the video_stats tupel
(frames_per_sec, width, height, src_aspect, comp_aspect) = video_stats
top = '''{boiler_plate}
Units Per Second {frames_per_sec}
Source Width {width}
Source Height {height}
Source Pixel Aspect Ratio {src_aspect}
Comp Pixel Aspect Ratio {comp_aspect}
{keyframes}
{end_line}
'''.format(boiler_plate=keyframe_boiler_plate, frames_per_sec=frames_per_sec, width=width, height=height,
src_aspect=src_aspect, comp_aspect=comp_aspect, keyframes=keyframes, end_line=keyframe_end_line)
return top
print(format_keyframe_file(add_video_clip_stats(29.97, 1920, 1080), format_keyframe_type())) | [
"[email protected]"
] | |
25c784e5ff8e4732e47b131445f1b4d78109918b | 7b53052ed4f593d5112c48eda077dc2f7b8f8225 | /wild_bills/users/tests/test_models.py | 224d13c71c114a52fa9de54912999e8f2bb9e4a3 | [] | no_license | luiscberrocal/wild_bills | 074a63918d7980254c2f07fbf03becd78774853c | d13490765883d18b9c00249c534b5cf2fd157320 | refs/heads/master | 2021-05-04T08:01:23.835177 | 2016-10-16T14:37:46 | 2016-10-16T14:37:46 | 70,325,676 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 515 | py | from django.utils.translation import activate
from test_plus.test import TestCase
class TestUser(TestCase):
def setUp(self):
self.user = self.make_user()
activate('en')
def test__str__(self):
self.assertEqual(
self.user.__str__(),
'testuser' # This is the default username for self.make_user()
)
def test_get_absolute_url(self):
self.assertEqual(
self.user.get_absolute_url(),
'/en/users/testuser/'
)
| [
"[email protected]"
] | |
d81f70deae326dc9cf189382620a0dea39ecedc9 | ace6462863a254a45791c02b692c2299a262f3ed | /data/who.py | d655e17dbb9c45d903dd5ce43f8593cc9cfc8a92 | [
"Apache-2.0"
] | permissive | Cabdulahi/cadowx | 4514adbd0943c95f4fe5a03a6fb26751c2419723 | 755e59dad4dccc4f6d366a2ae5697b0abed06797 | refs/heads/master | 2020-05-15T01:31:02.894311 | 2019-04-18T07:09:42 | 2019-04-18T07:09:42 | 182,031,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,509 | py | #!/usr/bin/python
# coding=utf-8
#//Coded By : Cabdulahi Sharif
import os,sys,time,requests
import sys,time,os
import income
import pc
import subprocess as cadow
prred = ("\033[91m")
green = ("\033[92m")
pryellow = ("\033[93m")
prLightPurple=("\033[94m")
prCyan=("\033[96m")
prgray = ("\033[97m")
prBlack=("\033[98m")
p = '\033[35m' # purple
c = '\033[36m' # cyan
huh = '\033[32;1m'
cade = ("")
class action:
def __init__(self):
self.hom()
self.host= raw_input(green+'[💉]Set Fake Host:'+green+' ')
print
time.sleep(0.2)
if(self.host==''):
return action()
else:
return self.port()
def port(self):
self.port = int(raw_input(green+'[📌]Set Port :'+green+' '))
print
if(self.port==''):
return self.port()
else:
return self.ser()
def ser(self):
print '-'*43
print
print (c+' [🌍]Choose Server')
print
print (prred+' [{1}]'+green+' Serveo')
print (prred+' [{2}]'+green+' Ngrok')
print (prred+' [{0}]'+green+' None')
print
serr= raw_input('[💉]Choose Option : ')
if (serr==''):
time.sleep(1)
exit()
else:
if (serr=='1'):
return self.servio()
elif (serr=='2' or serr=='0'):
return self.local()
def action2(self):
print
self.host= raw_input(green+'[💉]Set Fake Host:'+green+' ')
print
time.sleep(0.2)
if(self.host==''):
return action()
else:
return self.port()
def device():
print '-'*43
print
print (c+p+' [🚀]Choose Device Type')
print
print (green+' [(1)] PC ')
print (green+' [(2)] Phone ')
print (green+' [(3)] None ')
print (green+' [(0)] Exit ')
return self.me()
def me(self):
print
cadow = (green+'[🚩]Cabdulahi=>>: ')
cade = raw_input(cadow+'')
if (cade==''):
return self.me()
else:
if(cade=='1'):
pc.action()
elif (cade=='2'):
return self.action2()
elif (cade=='3'):
return self.action2()
elif (cade=='0'):
time.sleep(1)
exit()
else:
exit(prred+'[🚶]Exiting')
time.sleep(1)
def servio(self):
while True:
with open("logs.txt","w") as cade:
cadow.Popen(
["php","-S","localhost:{}".format(self.port),"-t","facebook/"],stderr=cadow.PIPE,stdin=cade,stdout=cade)
print
print ('[💊]Send This Link To The Victim : http://{}.serveo.net '.format(self.host))
os.system('ssh -R {}.serveo.net'.format(self.host)+':80:localhost:{} serveo.net'.format(self.port))
print
income.home()
def local(self):
print
while True:
with open("logs.txt","w") as cade:
cadow.Popen(
["php","-S","localhost:{}".format(self.port),"-t","facebook/"],stderr=cadow.PIPE,stdin=cade,stdout=cade)
print
print ('[🌐]Listening Localhost On 127.0.0.1:{}'.format(self.port))
time.sleep(1)
income.home()
def hom(self):
print '-'*43
time.sleep(1)
print
print (c+p+''' [🚀]Choose Device Type''')
print
print (green+' [{1}] PC')
print (green+' [{2}] Phone')
print (green+' [{3}] None')
print (green+' [{0}] Exit')
return self.me() | [
"[email protected]"
] | |
827114899ba5faac8ec359adbe41410bb5e4197e | f5a5e2090600826a7f166773658a8d2106c544c6 | /constants/horsemanconst.py | 8c9ff11ff37f98ef832a85f840cc97b214f9f107 | [] | no_license | Tuskub/StrategyGameOOP | 585df98909e6822b6f61703a0158516e62a8e1a1 | 801350335e2655ae49d5684517e198f58205489c | refs/heads/master | 2021-02-03T21:23:43.569081 | 2020-03-10T11:09:52 | 2020-03-10T11:09:52 | 243,542,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37 | py | HP = 200
MOVE_RANGE = 10
DAMAGE = 75
| [
"[email protected]"
] | |
b756e831986be050b9b74f5824cf78a3c9db677f | d14b70b7521fae69ee4c3530d2a139f6cb31bf21 | /频繁项挖掘/menzhen_dup_person.py | 651668ea6745be577f645d7e3f1218fe785dc311 | [] | no_license | pan060757/mif_python | 37503c7c69871b1bbc513bb6c28ac8b397ccaf21 | b0abf9899f7c99949d33e5cd8c8580bd79ea6340 | refs/heads/master | 2021-09-10T23:58:43.146579 | 2018-04-04T14:28:29 | 2018-04-04T14:28:29 | 113,953,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 557 | py | #-*-coding:utf-8 -*-
'''
涉及重复收费的门诊记录
'''
from pyspark import SparkContext
sc=SparkContext()
#####读入存在异常的门诊记录
data=sc.textFile("file:///home/edu/songsong/python/freItem/output/menzhen_duplicate.csv")
data=data.map(lambda line:line.split(','))\
.map(lambda line:(line[0],(float(line[11]),1)))\
.reduceByKey(lambda a,b:(a[0],a[1]+b[1]))\
.sortByKey()
out = open('output/menzhen_dup_person.csv', 'w+')
for (key,value) in data.collect():
out.write("%s,%.2f,%d\n"%(key,value[0],value[1]))
out.close()
| [
"[email protected]"
] | |
4fbf08ebfb6f577f08117398b0fc7d2860c4c655 | a62adccc82ee6ef7ebd94d710b5f75056626057f | /account_tgt/wizard/__init__.py | 2a1d5b3a092e2bf98311a01e7d0478867c5d573b | [] | no_license | Lunchik/openerp_custom_modules | 614b2fa6ccf1aaaaa2b99210bcd72aea301079d0 | b7707762d66743dad139727b3903063393f0da93 | refs/heads/master | 2020-05-19T13:58:50.328789 | 2014-12-01T10:36:39 | 2014-12-01T10:36:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | import account_profit_loss_report
import account_balance
import ar_aging
import billing_report
import country_rev_wiz | [
"[email protected]"
] | |
dd09e2dd198937faffa08b2230d4a3971081399a | 0958cceb81de1c7ee74b0c436b800a1dc54dd48a | /wincewebkit/WebKitTools/Scripts/webkitpy/common/checkout/diff_parser_unittest.py | cd3d870914cd00d9d3cdcbc8659e0fde9b3d7256 | [] | no_license | datadiode/WinCEWebKit | 3586fac69ba7ce9efbde42250266ddbc5c920c5e | d331d103dbc58406ed610410736b59899d688632 | refs/heads/master | 2023-03-15T23:47:30.374484 | 2014-08-14T14:41:13 | 2014-08-14T14:41:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,917 | py | # Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
import diff_parser
import re
class DiffParserTest(unittest.TestCase):
_PATCH = '''diff --git a/WebCore/rendering/style/StyleFlexibleBoxData.h b/WebCore/rendering/style/StyleFlexibleBoxData.h
index f5d5e74..3b6aa92 100644
--- a/WebCore/rendering/style/StyleFlexibleBoxData.h
+++ b/WebCore/rendering/style/StyleFlexibleBoxData.h
@@ -47,7 +47,6 @@ public:
unsigned align : 3; // EBoxAlignment
unsigned pack: 3; // EBoxAlignment
- unsigned orient: 1; // EBoxOrient
unsigned lines : 1; // EBoxLines
private:
diff --git a/WebCore/rendering/style/StyleRareInheritedData.cpp b/WebCore/rendering/style/StyleRareInheritedData.cpp
index ce21720..324929e 100644
--- a/WebCore/rendering/style/StyleRareInheritedData.cpp
+++ b/WebCore/rendering/style/StyleRareInheritedData.cpp
@@ -39,6 +39,7 @@ StyleRareInheritedData::StyleRareInheritedData()
, textSizeAdjust(RenderStyle::initialTextSizeAdjust())
, resize(RenderStyle::initialResize())
, userSelect(RenderStyle::initialUserSelect())
+ , boxOrient(RenderStyle::initialBoxOrient())
{
}
@@ -58,6 +59,7 @@ StyleRareInheritedData::StyleRareInheritedData(const StyleRareInheritedData& o)
, textSizeAdjust(o.textSizeAdjust)
, resize(o.resize)
, userSelect(o.userSelect)
+ , boxOrient(o.boxOrient)
{
}
@@ -81,7 +83,8 @@ bool StyleRareInheritedData::operator==(const StyleRareInheritedData& o) const
&& khtmlLineBreak == o.khtmlLineBreak
&& textSizeAdjust == o.textSizeAdjust
&& resize == o.resize
- && userSelect == o.userSelect;
+ && userSelect == o.userSelect
+ && boxOrient == o.boxOrient;
}
bool StyleRareInheritedData::shadowDataEquivalent(const StyleRareInheritedData& o) const
diff --git a/LayoutTests/platform/mac/fast/flexbox/box-orient-button-expected.checksum b/LayoutTests/platform/mac/fast/flexbox/box-orient-button-expected.checksum
new file mode 100644
index 0000000..6db26bd
--- /dev/null
+++ b/LayoutTests/platform/mac/fast/flexbox/box-orient-button-expected.checksum
@@ -0,0 +1 @@
+61a373ee739673a9dcd7bac62b9f182e
\ No newline at end of file
'''
def test_diff_parser(self, parser = None):
if not parser:
parser = diff_parser.DiffParser(self._PATCH.splitlines())
self.assertEquals(3, len(parser.files))
self.assertTrue('WebCore/rendering/style/StyleFlexibleBoxData.h' in parser.files)
diff = parser.files['WebCore/rendering/style/StyleFlexibleBoxData.h']
self.assertEquals(7, len(diff.lines))
# The first two unchaged lines.
self.assertEquals((47, 47), diff.lines[0][0:2])
self.assertEquals('', diff.lines[0][2])
self.assertEquals((48, 48), diff.lines[1][0:2])
self.assertEquals(' unsigned align : 3; // EBoxAlignment', diff.lines[1][2])
# The deleted line
self.assertEquals((50, 0), diff.lines[3][0:2])
self.assertEquals(' unsigned orient: 1; // EBoxOrient', diff.lines[3][2])
# The first file looks OK. Let's check the next, more complicated file.
self.assertTrue('WebCore/rendering/style/StyleRareInheritedData.cpp' in parser.files)
diff = parser.files['WebCore/rendering/style/StyleRareInheritedData.cpp']
# There are 3 chunks.
self.assertEquals(7 + 7 + 9, len(diff.lines))
# Around an added line.
self.assertEquals((60, 61), diff.lines[9][0:2])
self.assertEquals((0, 62), diff.lines[10][0:2])
self.assertEquals((61, 63), diff.lines[11][0:2])
# Look through the last chunk, which contains both add's and delete's.
self.assertEquals((81, 83), diff.lines[14][0:2])
self.assertEquals((82, 84), diff.lines[15][0:2])
self.assertEquals((83, 85), diff.lines[16][0:2])
self.assertEquals((84, 0), diff.lines[17][0:2])
self.assertEquals((0, 86), diff.lines[18][0:2])
self.assertEquals((0, 87), diff.lines[19][0:2])
self.assertEquals((85, 88), diff.lines[20][0:2])
self.assertEquals((86, 89), diff.lines[21][0:2])
self.assertEquals((87, 90), diff.lines[22][0:2])
# Check if a newly added file is correctly handled.
diff = parser.files['LayoutTests/platform/mac/fast/flexbox/box-orient-button-expected.checksum']
self.assertEquals(1, len(diff.lines))
self.assertEquals((0, 1), diff.lines[0][0:2])
def test_git_mnemonicprefix(self):
p = re.compile(r' ([a|b])/')
prefixes = [
{ 'a' : 'i', 'b' : 'w' }, # git-diff (compares the (i)ndex and the (w)ork tree)
{ 'a' : 'c', 'b' : 'w' }, # git-diff HEAD (compares a (c)ommit and the (w)ork tree)
{ 'a' : 'c', 'b' : 'i' }, # git diff --cached (compares a (c)ommit and the (i)ndex)
{ 'a' : 'o', 'b' : 'w' }, # git-diff HEAD:file1 file2 (compares an (o)bject and a (w)ork tree entity)
{ 'a' : '1', 'b' : '2' }, # git diff --no-index a b (compares two non-git things (1) and (2))
]
for prefix in prefixes:
patch = p.sub(lambda x: " %s/" % prefix[x.group(1)], self._PATCH)
self.test_diff_parser(diff_parser.DiffParser(patch.splitlines()))
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
092bfe373b9026226163a768874b1178c1e5cd13 | bcf88ba098b452619242a65d9f4c2e01b60fe029 | /angrmanagement/plugins/log_human_activities/log_human_activities.py | db6d062bb6aa22ace02c1a93087e789245d2f1e8 | [
"BSD-2-Clause"
] | permissive | dawnadvent/angr-management | 28f742d8d08c6342c68b34e70e2f604195cf0615 | 988e240b14acdf2e17e00c148839ae9ea8fab0e9 | refs/heads/master | 2023-08-07T11:54:41.986554 | 2021-09-24T11:52:11 | 2021-09-24T11:52:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,152 | py | import asyncio
import logging
import threading
import os
from time import sleep
from getmac import get_mac_address as gma
from tornado.platform.asyncio import AnyThreadEventLoopPolicy
from angrmanagement.config import Conf
import angrmanagement.ui.views as Views
from ..base_plugin import BasePlugin
l = logging.getLogger(__name__)
l.setLevel('INFO')
try:
from slacrs import Slacrs
from slacrs.model import HumanActivity, HumanActivityEnum
except ImportError as ex:
Slacrs = None # type: Optional[type]
class LogHumanActivitiesPlugin(BasePlugin):
"""
Log human activities
"""
def __init__(self, *args, **kwargs):
if not Slacrs:
raise Exception("Skipping LogHumanActivities Plugin. Please install Slacrs.")
super().__init__(*args, **kwargs)
self._init_logger()
self.session = None
self.project_name = None
self.project_md5 = None
self._log_list = list()
self.user = gma()
self.active = True
self.slacrs_thread = None
def _init_logger(self): # pylint:disable=no-self-use
user_dir = os.path.expanduser('~')
log_dir = os.path.join(user_dir, "am-logging")
os.makedirs(log_dir, exist_ok=True)
log_file = os.path.join(log_dir, 'human_activities.log')
fh = logging.FileHandler(log_file)
fh.setLevel('INFO')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
l.addHandler(fh)
def on_workspace_initialized(self, workspace):
self.slacrs_thread = threading.Thread(target=self._commit_logs)
self.slacrs_thread.setDaemon(True)
self.slacrs_thread.start()
def handle_variable_rename(self, func, offset: int, old_name: str, new_name: str, type_: str, size: int):
"""
Log a user's activity of variable renaming.
"""
variable_rename = HumanActivity(
project=self.project_name,
project_md5=self.project_md5,
category=HumanActivityEnum.VariableRename,
function=func._name,
old_name=old_name,
new_name=new_name,
created_by=self.user,
)
self._log_list.append(variable_rename)
l.info("Add variable rename sesssion to slacrs")
def handle_function_rename(self, func, old_name: str, new_name: str):
"""
Log a user's activity of function renaming.
"""
function_rename = HumanActivity(
project=self.project_name,
project_md5=self.project_md5,
category=HumanActivityEnum.FunctionRename,
addr=func.addr,
old_name=old_name,
new_name=new_name,
created_by=self.user,
)
self._log_list.append(function_rename)
l.info("Add function rename sesssion to slacrs, project name %s, old_name %s, new_name %s",
self.project_name, old_name, new_name)
def handle_click_block(self, qblock, event):
block_click = HumanActivity(
project=self.project_name,
project_md5=self.project_md5,
category=HumanActivityEnum.ClickBlock,
addr=qblock.addr,
created_by=self.user,
)
self._log_list.append(block_click)
l.info("Block %x is clicked", qblock.addr)
return False
def handle_click_insn(self, qinsn, event):
insn_click = HumanActivity(
project=self.project_name,
project_md5=self.project_md5,
category=HumanActivityEnum.ClickInsn,
addr=qinsn.addr,
created_by=self.user,
)
self._log_list.append(insn_click)
l.info("Instruction %x is clicked", qinsn.addr)
return False
def handle_raise_view(self, view):
view_name = view.__class__.__name__
func = self._get_function_from_view(view)
if func is not None and not func.am_none:
func_name = func._name
addr = func.addr
else:
func_name = None
addr = None
raise_view = HumanActivity(
project=self.project_name,
project_md5=self.project_md5,
category=HumanActivityEnum.RaiseView,
view=view_name,
created_by=self.user,
function=func_name,
addr=addr
)
self._log_list.append(raise_view)
l.info("View %s is raised with function %s", view_name, func_name)
def handle_comment_changed(self, addr: int, cmt: str, new: bool, decomp: bool):
"""
Log a user's activity of changing comment
@param new: T if a new comment. We don't log it in slacrs.
@param comp: T if comment is in decompiler view
"""
comment_change = HumanActivity(
project=self.project_name,
project_md5=self.project_md5,
category=HumanActivityEnum.CommentChanged,
addr=addr,
cmt=cmt,
decomp=decomp,
created_by=self.user,
)
self._log_list.append(comment_change)
l.info("Comment is added at %x", addr)
return False
def handle_project_initialization(self):
"""
Set project name
"""
if self.workspace.instance.img_name is not None:
self.project_name = self.workspace.instance.img_name
else:
filename = self.workspace.instance.project.filename
self.project_name = filename
self.project_md5 = self.workspace.instance.project.loader.main_object.md5.hex()
l.info("Set project md5 to %s", self.project_md5)
l.info("Set project name to %s", self.project_name)
@staticmethod
def _get_function_from_view(view):
if isinstance(view, Views.DisassemblyView):
return view._current_function
if isinstance(view, (Views.CodeView, Views.ProximityView)):
return view.function
return None
def _commit_logs(self):
l.info("database: %s", Conf.checrs_backend_str)
asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
while self.active:
try:
sleep(3)
connector = self.workspace.plugins.get_plugin_instance_by_name("ChessConnector")
if connector is None:
# chess connector does not exist
return None
slacrs_instance = connector.slacrs_instance()
if slacrs_instance is None:
# slacrs does not exist. continue
continue
self.session = slacrs_instance.session()
with self.session.no_autoflush:
while len(self._log_list) > 0:
log = self._log_list.pop()
self.session.add(log)
self.session.commit()
self.session.close()
except Exception: # pylint:disable=broad-except
pass
def teardown(self):
self.active = False
| [
"[email protected]"
] | |
98677381f2349ff59c4a0dfd93aca8610f1311ae | b83c7e27a6f03523fd10ec850a303d7356ed899b | /firstAttempt.py | 70d7203b756f6d7341f4cee11ed1bae5d9b56207 | [] | no_license | aseth99/StonkMonitor | 63f884a66028f0bec4e039daef5fd15ae293f152 | 9b93dfa1286e695cb639b46bd0d98281f3287f1d | refs/heads/main | 2023-03-07T18:35:05.892400 | 2021-02-18T01:07:13 | 2021-02-18T01:07:13 | 337,616,100 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 723 | py | import bs4
import requests
from bs4 import BeautifulSoup
from urllib.request import urlopen
ticker = input("what stonk you want the price of? ")
url = "https://finance.yahoo.com/quote/"+ticker+"?p="+ticker+"&.tsrc=fin-srch"
try:
page = urlopen(url)
except:
print('Error opening the URL')
soup = bs4.BeautifulSoup(page,'html.parser')
def parsePrice():
url = "https://finance.yahoo.com/quote/"+ticker+"?p="+ticker+"&.tsrc=fin-srch"
page = urlopen(url)
price = soup.find('div',{'class': 'My(6px) Pos(r) smartphone_Mt(6px)'}).find_all('span')
return price
stonkInfo = parsePrice()
print("Current Price: " + stonkInfo[0].text)
print("$$ Change (% Change) : " + stonkInfo[1].text)
print("Time: " + stonkInfo[2].text) | [
"[email protected]"
] | |
ffdb7b8da5e921e50cdf3f502f190c909e1b723e | 3026871549eaa98611d146d9229a92002caeedd1 | /po/case_list/user_manage/third_party_sta.py | 5ca945e53a038d90151dae6c260cae6274eb4f6f | [] | no_license | treeytang/PDT | b76ee1315b2a36bbe7c8cf9678f47e2ead80defe | 58470e0ca04dd9f89ec22cf41b225216f2b32d6b | refs/heads/master | 2020-04-11T07:08:55.663857 | 2019-11-28T09:55:14 | 2019-11-28T09:55:14 | 161,602,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,956 | py | from time import sleep
import unittest, random, sys,time
from comm import myunit, picture
from test_cases.user_manage.third_party_page import ThirdParty
import HTMLTestRunner
class Ticket_Query(myunit.MyTest):
def user_login(self, username="", password=""):
ThirdParty(self.driver).user_login(username, password)
def test_third_user(self):
'''检查进入第三方用户界面 验证页面显示数量'''
self.user_login(username="admin", password="admin")
msg = ThirdParty(self.driver).page_show_num()
self.assertEqual(msg.strip(), '17')
def test_third_user_1(self):
'''检查进入第三方用户界面 验证分页面显示'''
self.user_login(username="admin", password="admin")
msg = ThirdParty(self.driver).paging_verify()
self.assertEqual(msg.strip(), '1')
def test_add_user(self):
'''检查进入第三方用户界面 点击用户添加并保存(正确的号码、别名...) 验证是否添加成功'''
self.user_login(username="admin", password="admin")
msg = ThirdParty(self.driver).add_user()
self.assertEqual(msg, True)
def test_add_user_1(self):
'''检查进入第三方用户界面 点击用户添加并保存 号码为英文'''
self.user_login(username="admin", password="admin")
msg = ThirdParty(self.driver).add_user_1()
self.assertEqual(msg, True)
def test_add_user_2(self):
'''检查进入第三方用户界面 点击用户添加并保存 号码为数字+英文'''
self.user_login(username="admin", password="admin")
msg = ThirdParty(self.driver).add_user_2()
self.assertEqual(msg, True)
def test_add_user_3(self):
'''检查进入第三方用户界面 点击用户添加并保存 号码为中文 '''
self.user_login(username="admin", password="admin")
msg = ThirdParty(self.driver).add_user_3()
self.assertEqual(msg, True)
def test_add_user_4(self):
'''检查进入第三方用户界面 点击用户添加并保存 号码为符号 '''
self.user_login(username="admin", password="admin")
msg = ThirdParty(self.driver).add_user_4()
self.assertEqual(msg, True)
def test_add_user_5(self):
'''检查进入第三方用户界面 点击用户添加,添加一个已存在的用户并保存 是否出现弹窗'''
self.user_login(username="admin", password="admin")
msg = ThirdParty(self.driver).add_user_5()
self.assertEqual(msg, True)#在测试中,如果添加后直接到了列表页面,并找到了用户,那么说明没有提示窗口
def test_del_user(self):
'''检查进入第三方用户界面 点击删除一个用户号码 验证:是否成功删除用户'''
self.user_login(username="admin", password="admin")
msg = ThirdParty(self.driver).del_user()
self.assertEqual(msg, True) | [
"[email protected]"
] | |
3474d91f1df53d1b3284ac83ec374c3631c941ab | 62286f3495132b40ef081e0faeef0fd38c91e7ba | /programmes/merge.py | 0497b115672f6129b96a7e3b139eaecf72f99663 | [] | no_license | fali007/python | 373916b8ee9ee75b380d36eb368cf402e7cf2557 | d6128267d9217284de700cf5ecba9cc5770c97ec | refs/heads/master | 2021-07-24T18:37:01.509921 | 2020-05-24T19:38:02 | 2020-05-24T19:38:02 | 178,662,132 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 522 | py | def merge(array):
if(len(array)>1):
mid=len(array)//2
left_half=array[:mid]
right_half=array[mid:]
merge(left_half)
merge(right_half)
i=0
j=0
k=0
while i<len(left_half) and j<len(right_half):
if(left_half[i]>right_half[j]):
array[k]=right_half[j];
j+=1
else:
array[k]=left_half[i];
i+=1
k+=1
while i<len(left_half):
array[k]=left_half[i]
i+=1
k+=1
while j<len(right_half):
array[k]=right_half[j]
j+=1
k+=1
arr=[9,5,6,7,1,4,2,3,0,8]
merge(arr)
print(arr)
| [
"[email protected]"
] | |
31a569e9f424a8e17c8ff58f279f8c56d5f966f4 | 3b0ae1a9a07ab2561b355088f895ebe480fb84f2 | /apitest/apiauto_testcase_unittest.py | eebe24c1d2f36066a8a3f86bdfb6e6fb4f32d02d | [] | no_license | lishuibo/testplatform | 0bb45ecbe8099bfbc924e012e61dd408aafa71c5 | 8f83bf5ae886d3f22f45b4da2535abf8c39ea5f4 | refs/heads/master | 2020-03-28T03:59:14.968199 | 2018-10-26T09:38:54 | 2018-10-26T09:38:54 | 147,687,104 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,216 | py | __author__ = 'Administrator'
import requests, time, sys, re
import urllib, zlib
import pymysql
from apitest import HTMLTestRunner
import unittest
from trace import CoverageResults
import json
from idlelib.rpc import response_queue
from time import sleep
HOSTNAME = '127.0.0.1'
class ApiFlow(unittest.TestCase):
def setUp(self):
time.sleep(1)
def test_readSQLcase(self):
sql = 'SELECT id,apiname,apiurl,apimethod,apiparamvalue,apiresult,apistatus from apitest_apistep WHERE apitest_apistep.apitest_id=1;'
conn = pymysql.connect(user='root', passwd='123456', db='autotest', port=3306, host='127.0.0.1', charset='utf8')
cursor = conn.cursor()
aa = cursor.execute(sql)
info = cursor.fetchmany(aa)
for i in info:
case_list = []
case_list.append(i)
interfaceTest(case_list)
conn.commit()
cursor.close()
conn.close()
def tearDown(self):
time.sleep(1)
def interfaceTest(case_list):
res_flags = []
request_urls = []
responses = []
strinfo = re.compile('{TaskId}')
strinfo1 = re.compile('{AssetId}')
strinfo2 = re.compile('{PointId}')
assetinfo = re.compile('{assetno}')
tasknoinfo = re.compile('{taskno}')
schemainfo = re.compile('{schema}')
for case in case_list:
try:
case_id = case[0]
interface_name = case[1]
url = case[2]
method = case[3]
param = case[4]
res_check = case[5]
except Exception as e:
return '测试用例格式不正确%s' % e
if param == '':
new_url = 'http://' + 'api.test.com.cn' + url
elif param == 'null':
new_url = 'http://' + url
else:
url = strinfo.sub(TaskId, url)
param = strinfo2.sub(PointId, param)
param = strinfo.sub(TaskId, param)
param = tasknoinfo.sub(taskno, param)
new_url = 'http://' + '127.0.0.1' + url
request_urls.append(new_url)
if method.upper() == 'GET':
headers = {'Authorization': '', 'Content-Type': 'application/json'}
if '=' in urlParam(param):
data = None
print(
str(case_id) + 'request is get' + new_url.encode('utf-8') + '?' + urlParam(param).encode('utf-8'))
results = requests.get(new_url + '?' + urlParam(param), data, headers=headers).text
print('response is get ' + results.encode('utf-8'))
responses.append(results)
res = readRes(results, '')
else:
print('request is get ' + new_url + ' body is ' + urlParam(param))
data = None
req = urllib.request.Request(url=new_url, data=data, headers=headers, method='GET')
results = urllib.request.urlopen(req).read()
print('response is get')
print(results)
res = readRes(results, res_check)
print(res)
if 'pass' == res:
writeResult(case_id, '1')
res_flags.append('pass')
caseWriteResult(case_id, '1')
else:
res_flags.append('fail')
writeResult(case_id, '0')
caseWriteResult(case_id, '0')
if method.upper == 'PUT':
headers = {'Host': HOSTNAME, 'Connection': 'keep-alive', 'CredentialId': id,
'Content-Type': 'application/json'}
body_data = param
results = requests.put(url=url, data=body_data, headers=headers)
responses.append(results)
res = readRes(results, res_check)
if 'pass' == res:
writeResult(case_id, 'pass')
res_flags.append('pass')
else:
res_flags.append('fail')
writeResult(case_id, 'fail')
writeBug(case_id, interface_name, new_url, results, res_check)
try:
preOrderSN(results)
except:
print('ok')
if method.upper() == 'PATCH':
headers = {'Authorization': 'Credential ' + id, 'Content-Type': 'application/json'}
data = None
results = requests.patch(new_url + '?' + urlParam(param), data, headers=headers).text
responses.append(results)
res = readRes(results, res_check)
if 'pass' == res:
writeResult(case_id, 'pass')
res_flags.append('pass')
else:
res_flags.append('fail')
writeResult(case_id, 'fail')
writeBug(case_id, interface_name, new_url, results, res_check)
try:
preOrderSN(results)
except:
print('ok')
if method.upper() == 'POST':
headers = {'Authorization': 'Credential ' + id, 'Content-Type': 'application/json'}
if '=' in urlParam(param):
data = None
results = requests.post(new_url + '?' + urlParam(param), data, headers=headers).text
print('response is post' + results.encode('utf-8'))
responses.append(results)
res = readRes(results, '')
else:
print(
str(case_id) + 'request is' + new_url.encode('utf-8') + 'body is' + urlParam(param).encode('utf-8'))
results = requests.post(new_url, data=urlParam(param).encode('utf-8'), headers=headers).text
print('response is post' + results.encode('utf-8'))
responses.append(results)
res = readRes(results, res_check)
if 'pass' == res:
writeResult(case_id, '1')
res_flags.append('pass')
else:
res_flags.append('fail')
writeResult(case_id, '0')
writeBug(case_id, interface_name, new_url, results, res_check)
try:
TaskId(results)
except:
print('ok1')
try:
PointId(results)
except:
print('ok2')
def readRes(res, res_check):
res = res.decode().replace('":"', "=").replace('":', "=")
res_check = res_check.split(';')
for s in res_check:
if s in res:
pass
else:
return '错误,返回参数和预期结果不一致' + s
return 'pass'
def urlParam(param):
param1 = param.replace('"', '"')
return param1
def CredentialId():
global id
url = 'http://' + 'api.test.com.cn' + '/api/Security/Authentication/Signin/web'
body_data = json.dumps({"Identity": 'test', "Password": 'test'})
headers = {'Connection': 'keep-alive', 'Content-Type': 'application/json'}
response = requests.post(url=url, data=body_data, headers=headers)
data = response.text
regx = '.*"CredentialId":"(.*)","Scene"'
pm = re.search(regx, data)
id = pm.group(1)
def preOrderSN(results):
global preOrderSN
regx = '.*"preOrderSN":"(.*)","toHome"'
pm = re.search(regx, results)
if pm:
preOrderSN = pm.group(1).encode('utf-8')
return preOrderSN
return False
def TaskId(results):
global TaskId
regx = '.*"TaskId":(.*),"PlanId"'
pm = re.search(regx, results)
if pm:
TaskId = pm.group(1).encode('utf-8')
return TaskId
return False
def taskno(param):
global taskno
a = int(time.time())
taskno = 'task_' + str(a)
return taskno
def writeResult(case_id, result):
result = result.encode('utf-8')
now = time.strftime("%Y-%m-%d %H:%M:%S")
sql = "UPDATE apitest_apistep SET apitest_apistep.apistatus=%s,apitest_apistep.create_time=%s WHERE apitest_apistep.apitest_id=%s;"
param = (result, now, case_id)
print('api autotest result is ' + result.decode())
conn = pymysql.connect(user='root', passwd='123456', db='autotest', port=3306, host='127.0.0.1', charset='utf8')
cursor = conn.cursor()
cursor.execute(sql, param)
conn.commit()
cursor.close()
conn.close()
def caseWriteResult(case_id, result):
result = result.encode('utf-8')
now = time.strftime("%Y-%m-%d %H:%M:%S")
sql = "UPDATE apitest_apitest SET apitest_apitest.apitestresult=%s,apitest_apitest.create_time=%s WHERE apitest_apitest.id=%s;"
param = (result, now, case_id)
print('api autotest result is ' + result.decode())
conn = pymysql.connect(user='root', passwd='123456', db='autotest', port=3306, host='127.0.0.1', charset='utf8')
cursor = conn.cursor()
cursor.execute(sql, param)
conn.commit()
cursor.close()
conn.close()
def writeBug(bug_id, interface_name, request, response, res_check):
interface_name = interface_name.encode('utf-8')
res_check = res_check.encode('utf-8')
now = time.strftime("%Y-%m-%d %H:%M:%S")
bugname = str(bug_id) + '_' + interface_name.decode() + '_出错了'
bugdetail = '[请求数据]<br/>' + request.decode() + '<br/>' + '[预期结果]<br/>' + res_check.decode() + '<br/>' + '<br/>' + '[响应数据]<br/>' + '<br/>' + response.decode()
print(bugdetail)
sql = 'INSERT INTO bug_bug(bugname, bugdetail, bugstatus, buglevel, bugcreater, bugassign, create_time, product_id) VALUES ("%s","%s","1","1","李天乐","李天乐","%s","2");' % (
bugname, pymysql.escape_string(bugdetail), now)
conn = pymysql.connect(user='root', passwd='123456', db='autotest', port=3306, host='127.0.0.1', charset='utf8')
cursor = conn.cursor()
cursor.execute(sql)
conn.commit()
cursor.close()
conn.close()
if __name__ == '__main__':
now = time.strftime("%Y-%m-%d-%H_%M_%S", time.localtime(time.time()))
testunit = unittest.TestSuite()
testunit.addTest(ApiFlow("test_readSQLcase"))
filename = "F:\\pycharmwork\\autotest\\report\\" + "apitest_report.html"
fp = open(filename, 'wb')
runner = HTMLTestRunner.HTMLTestRunner(stream=fp, title=u"流程接口测试报告", description=u"流程场景接口")
runner.run(testunit) | [
"[email protected]"
] | |
1776e7639c66d4c60a2db17dacbd5e09c04f2af9 | 19e92659e6c6b1230951656f6eec8309d9707c59 | /server.py | 235d22635e78c2037d24183984688db704cf6050 | [] | no_license | basketcase03/chatroom | d0dd6167902cf4a39c4bf3efd2890a6dffb76674 | b099d840f0a34c54a798caa41bceeb6664ae1924 | refs/heads/main | 2023-02-04T17:48:09.553473 | 2020-12-29T12:14:02 | 2020-12-29T12:14:02 | 325,278,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,309 | py | import threading
import socket
host = '127.0.0.1'
port = 55555
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((host,port))
server.listen()
clients = []
nicknames = []
def broadcast(message):
for client in clients:
client.send(message)
def handle(client):
while True:
try:
message = client.recv(1024)
broadcast(message)
except:
index = clients.index(client)
clients.remove(client)
client.close()
nickname = nicknames[index]
broadcast("{} left the chat!".format(nickname).encode('ascii'))
nicknames.remove(nickname)
break
def receive():
while True:
client,address = server.accept()
print("Connected with {}".format(str(address)))
client.send('NICK'.encode('ascii'))
nickname = client.recv(1024).decode('ascii')
nicknames.append(nickname)
clients.append(client)
print(f'Nickname of the client is {nickname}!')
broadcast("{} joined the chat!".format(nickname).encode('ascii'))
client.send('Conncted to the server!'.encode('ascii'))
thread = threading.Thread(target=handle,args=(client,))
thread.start()
print("Server is listening...")
receive()
| [
"[email protected]"
] | |
b80c80132e0cb7eecb8c0bc6ba79cc3e8ad004cc | 893af5bd2a75a37634d970eb67288bb78cad6a04 | /723e_server/django_723e/api/views.py | 6f5286b0c79fa0ffb7feb8b1e3a3504a2f441987 | [
"MIT"
] | permissive | Natim/723e | 9e0c0f3cf2884283ab377bbc51d40df5b040d778 | ed6a5f102b45f5d7ab723209eecb87bf306b23d8 | refs/heads/master | 2021-01-17T13:22:34.513449 | 2015-08-24T09:36:01 | 2015-08-24T09:36:01 | 41,594,079 | 0 | 0 | null | 2015-08-29T13:20:22 | 2015-08-29T13:20:21 | HTML | UTF-8 | Python | false | false | 1,127 | py | # -*- coding: utf-8 -*-
import json
from django.http import HttpResponse
from django.contrib.auth.forms import AuthenticationForm
from rest_framework.authtoken.models import Token
from django.core import serializers
from rest_framework.permissions import IsAuthenticated
from rest_framework.decorators import api_view, permission_classes
@api_view(["GET"])
def api_init(request):
result = {}
# Return API Version.
result['api_version'] = "1.0.0"
if request.user.is_authenticated():
result['is_authenticated'] = True
# If user is authentificated, we return some details which might
# be usefull like displaying name, or sending mail.
result['id'] = request.user.id
# result['username'] = request.user.username
# result['first_name'] = request.user.first_name
# result['last_name'] = request.user.last_name
# result['email'] = request.user.email
else:
result['is_authenticated'] = False
# Return json format string.
j = json.dumps(result, separators=(',',':'))
return HttpResponse(j, content_type='application/json')
| [
"[email protected]"
] | |
5c8ddac3358536b6bea399a80fcc5b05e0dafe3b | 5600d31c62b629b95ef971ba1fa7859f3facbea1 | /src/assignment5/no2.py | 0ff3af83af7e85b932de42be29bf40bbf265861b | [] | no_license | giginet/PatternMatch | 18864e480eb12c1e94abc631b58758d5ffd596da | eb843eb4847acf32ff72bd8423cb9d3bd838ecb3 | refs/heads/master | 2021-01-01T05:30:59.343337 | 2011-08-11T20:46:14 | 2011-08-11T20:46:14 | 2,070,055 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,922 | py | # -*- coding: utf-8 -*-
#
# assignment5.no2
# created by 25090335 Kohki Miki on 2011/08/12
#
from PIL import Image
from assignment4.no1 import DensityDistribution
class BinaryImage(object):
XMAX = 5.
def __init__(self, image):
self.image = image
@classmethod
def to_xc(cls, kc):
return cls.XMAX * kc/256.0
@classmethod
def to_kc(cls, xc):
return 256.0 * xc/cls.XMAX
def binarization(self, xc):
kc = self.to_kc(xc)
binarization = Image.new('L', self.image.size)
binarization.putdata([0 if p < kc else 255 for p in self.image.getdata()])
return binarization
def diff(self, other):
diff = Image.new('L', self.image.size)
diff.putdata([abs(b1 - b2) for b1, b2 in zip(self.image.getdata(), other.getdata())])
return diff
def get_change_point(self, other):
seq = range(256)
seq.reverse()
max = -1
max_x = 0
for kc in seq:
xc = BinaryImage.to_xc(kc)
pb = self.binarization(xc)
qb = other.binarization(xc)
d = BinaryImage(pb).diff(qb)
dd = DensityDistribution(d)
black = DensityDistribution(Image.new('L', self.image.size, 0))
divergence = dd.divergence(black)
if abs(divergence) > max:
max = abs(divergence)
max_x = xc
return max_x
if __name__ == '__main__':
p = BinaryImage(Image.open(r"../../Resources/CIMG0209.gif"))
q = BinaryImage(Image.open(r"../../Resources/CIMG0210.gif"))
r = BinaryImage(Image.open(r"../../Resources/CIMG0211.gif"))
pb = p.binarization(3.73)
qb = q.binarization(3.73)
rb = r.binarization(3.06)
pqd = BinaryImage(pb).diff(qb)
prd = BinaryImage(pb).diff(rb)
map(lambda i: i.show(), (pb, qb, rb, pqd, prd)) | [
"[email protected]"
] | |
234903f9a230f61c4fad2c3d34720a41e3529356 | 4d25eabd9e75fa2c433b9b0e6bc792dfeaeb1655 | /TERMINAL/c02.py | b21b950fdf921b332d3a6270b51204887842dc2c | [] | no_license | kwanderer/DMI | cbcceaaa34837064e6c4ba4e21f726080c5513bd | 83748be54d072a501354659377705e5362dafd67 | refs/heads/master | 2021-09-14T07:25:10.507226 | 2018-05-09T17:10:08 | 2018-05-09T17:10:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | f = open("c01.txt","rb")
f.seek(0)
while 1:
b = f.read(1)
if not b:
break
print hex(ord(b))
print chr(ord(b))
f.close()
| [
"[email protected]"
] | |
952409350fb7d319466c166395028df75823dadc | 25355a24e32351d3acbffa781e25702802ddff20 | /app/auth.py | f2b22196a293e1da199f9faecc16e1bc1c31a147 | [] | no_license | khoi-backyard/flask-tutorial | e24dae386727bd3cf0e9e7a719903a2de0eea870 | c72b3041b892840e2dde0848c238f387be95cf1d | refs/heads/master | 2021-02-17T07:35:09.263323 | 2020-03-05T07:24:08 | 2020-03-05T07:24:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,503 | py | import functools
from flask import Blueprint, flash, g, redirect, render_template, request, session, url_for
from werkzeug.security import check_password_hash, generate_password_hash
from app.db import get_db
bp = Blueprint('auth', __name__, url_prefix='/auth')
@bp.before_app_request
def load_logged_in_user():
user_id = session.get('user_id')
if user_id is None:
g.user = None
else:
g.user = get_db().execute(
'SELECT * FROM user WHERE id = ?', (user_id,)
).fetchone()
@bp.route('/register', methods=('GET', 'POST'))
def register():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
db = get_db()
error = None
if not username:
error = 'Username is required.'
elif not password:
error = 'Password is required.'
elif db.execute(
'SELECT id FROM user WHERE username = ?', (username,)
).fetchone() is not None:
error = 'User {} is already registered.'.format(username)
if error is None:
db.execute(
'INSERT INTO user (username, password) VALUES (?, ?)',
(username, generate_password_hash(password))
)
db.commit()
return redirect(url_for('auth.login'))
flash(error)
return render_template('auth/register.html')
@bp.route('/login', methods=('GET', 'POST'))
def login():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
db = get_db()
error = None
user = db.execute(
'SELECT * FROM user WHERE username = ?', (username,)
).fetchone()
if user is None:
error = 'Incorrect username.'
elif not check_password_hash(user['password'], password):
error = 'Incorrect password.'
if error is None:
session.clear()
session['user_id'] = user['id']
return redirect(url_for('index'))
flash(error)
return render_template('auth/login.html')
@bp.route('/logout')
def logout():
session.clear()
return redirect(url_for('index'))
def login_required(view):
@functools.wraps(view)
def wrapped_view(**kwargs):
print(g.user)
if g.user is None:
return redirect(url_for('auth.login'))
return view(**kwargs)
return wrapped_view
| [
"[email protected]"
] | |
a1fa689511eb2f16f6bc8e9a66cbe49bb4021baf | 95d18e82eabb7f122a835a58b94260ae84385505 | /4/4.2_homework/task_7.py | ff419e889198c8c94d853b2c65f55db5917ca4c6 | [] | no_license | kkravchenkodev/main_academy | 161c0b6eae0bb06745f6a689def356a8a29806aa | 5ccd5fbca88141180f2ec6adabac7a8b4a0b5d7a | refs/heads/main | 2023-03-03T15:49:06.992816 | 2021-02-15T15:29:54 | 2021-02-15T15:29:54 | 316,764,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 746 | py | """
Написать программу, которая принимает 2 аргумента: список элементов любого типа и элемент любого типа.
Программа возвращает кол-во вхождений элемента в список. (аналог lst.count(n))
input: [1, 2, 3, "one", "onetwo", "one"]
result: 2
"""
input_data = [1, 2, 3, "one", "onetwo", "one", 2, 3, 4, 2, 6, 0, 1.2, 3.4, 12, 3, 1.2]
def custom_counter(lst, element):
count = 0
filtered_list = list(filter(lambda item: isinstance(item, type(element)), lst))
for i in filtered_list:
if i == element:
count += 1
return count
lst2 = custom_counter(input_data, 3)
print(lst2)
| [
"[email protected]"
] | |
4f2109f02224b5f6894716723d8361c39bdefa70 | f353f19174ea2fd8569c8f46bb2d99142fff919c | /Basic Part I/Exercise-17.py | 4fae615a0edb7478de7a74f6cbf6700ce0981e76 | [] | no_license | self-study-squad/Python-examples | a283d9efe47d71d2e134e8fc2e919dccffe5108a | 07de281fbcc9fb7f421b89a74ff24bafc78a5d4b | refs/heads/master | 2022-12-28T01:35:03.927338 | 2020-10-09T06:41:16 | 2020-10-09T06:41:16 | 298,136,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | def nearthousand(n):
return (abs(1000-n)<=100) or (abs(2000-n)<=100)
print(nearthousand(1000))
print(nearthousand(900))
print(nearthousand(800))
print(nearthousand(2200)) | [
"[email protected]"
] | |
c6352e740b138c67bc561fec69e91924a683d988 | 0517a260198fbdba92292b9a79e6c1783934161d | /houghTransform.py | c56691c126ec6e839a75f6c645cf21e386f66454 | [] | no_license | AkhmadMuzanni/HoughTransform | bad75f7e8d862c171a0b6bacf9efcb3ee46f50cc | 5e0bf783a88c72721bf115b01860561b15785a87 | refs/heads/master | 2020-03-15T16:07:29.169970 | 2018-05-05T08:26:14 | 2018-05-05T08:26:14 | 132,228,804 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,191 | py | import cv2
import numpy as np
import matplotlib.pyplot as plt
img = cv2.imread('Monas.jpg')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray,50,150,apertureSize = 3)
np.set_printoptions(threshold='nan')
baris,kolom = edges.shape
print edges.shape
cv2.namedWindow("Monas.jpg",cv2.WINDOW_NORMAL)
cv2.imshow('Monas.jpg',edges)
def hough_line(img, akurasi_pixel=1):
arraytheta = np.deg2rad(np.arange(-90.0, 90.0, akurasi_pixel))
panjangdiagonal = int(np.round(np.hypot(baris, kolom)))
arrayrho = np.linspace (-panjangdiagonal, panjangdiagonal, panjangdiagonal * 2)
panjang_theta = len(arraytheta)
cos_t = np.cos (arraytheta)
sin_t = np.sin (arraytheta)
acumulator = np.zeros ((2 * panjangdiagonal, panjang_theta), dtype=np.uint8)
# (row, col) indexes to edges
yindeks, xindeks = np.nonzero (img)
for i in range (len (xindeks)):
x = xindeks[i]
y = yindeks[i]
for t_idx in range (panjang_theta):
rho = panjangdiagonal + int(round (x * cos_t[t_idx] + y * sin_t[t_idx]))
acumulator[rho, t_idx] += 1
return acumulator, arraytheta, arrayrho
def show_hough_line(accumulator, thetas, rhos):
import matplotlib.pyplot as plt
plt.imshow(accumulator, cmap='jet',aspect='auto',
extent=[np.rad2deg(thetas[-1]), np.rad2deg(thetas[0]), rhos[-1], rhos[0]],)
# plt.axis('off')
plt.show()
a, t, r = hough_line(edges)
hasil = []
print a
print t
print r
b,k = a.shape
print a.shape
for i in range (b):
for j in range (k):
if a[i][j] > 15:
indexi =np.abs(np.round(r[i]))
indexj =(t[j])
hasil.append([(indexi, indexj)])
print hasil
hasilarray = np.asarray(hasil)
print hasilarray
lines = cv2.HoughLines(edges,1,np.pi/180,200)
print lines
for rho,theta in hasilarray[:,0,:]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv2.line(img,(x1,y1),(x2,y2),(0,0,255),2)
cv2.imwrite('best.jpg',img)
print 'finish'
show_hough_line(a,t,r)
# for i in range(a[0]):
# for
| [
"[email protected]"
] | |
638d43eae7682711cbff3acc82c3cca1ae006d93 | b1ba5707a5cbe918d33bc2082b3eb4ff1378c060 | /SDPython/samples/plugins/custom_graph/custom_graph.py | 0f3cf3c55f3ba01049983183476f61e58c69ba8e | [] | no_license | qq781217732/SubstanceDev | 2eb1d9ed48d477cf70c7bfdac2103bb884e9204c | b9ffab0a1b8f3c01783259074940b2712a8142b8 | refs/heads/master | 2023-03-26T00:43:35.047305 | 2021-03-01T04:12:28 | 2021-03-01T04:12:28 | 342,539,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,012 | py | import sd
import os
from sd.api.mdl.sdmdlgraphdefinition import *
import logging
logger = logging.getLogger(__name__)
class CustomGraph:
@staticmethod
def init(aSDGraphDefinitionId = 'custom_graph'):
context = sd.getContext()
sdApp = context.getSDApplication()
# Add MDL Root path
currentScriptAbsPath = os.path.abspath(os.path.split(__file__)[0])
mdlRootPath = os.path.join(currentScriptAbsPath, 'data', 'mdl')
sdApp.getModuleMgr().addRootPath('mdl', mdlRootPath)
# Create new Graph definition
graphDefinitionMgr = sdApp.getSDGraphDefinitionMgr()
assert(graphDefinitionMgr)
# Add Graph Definition if not already exist
sdGraphDefinitionId = aSDGraphDefinitionId
sdGraphDefinition = graphDefinitionMgr.getGraphDefinitionFromId(sdGraphDefinitionId)
if not sdGraphDefinition:
sdGraphDefinition = SDMDLGraphDefinition.sNew(sdGraphDefinitionId)
assert(sdGraphDefinition)
assert(sdGraphDefinition.getId() == sdGraphDefinitionId)
sdGraphDefinition.setLabel('Custom Graph')
sdGraphDefinition.setIconFile(os.path.join(os.path.abspath(os.path.split(__file__)[0]), 'custom_graph_icon.png'))
# Add the new graph definition
graphDefinitionMgr.addGraphDefinition(sdGraphDefinition)
else:
assert(sdGraphDefinition.getId() == sdGraphDefinitionId)
# Add some Node definition to teh graph definition
sdModuleMgr = sdApp.getModuleMgr()
sdModules = sdModuleMgr.getModules()
selectedDefinitions = []
selectedTypes = []
for sdModule in sdModules:
sdModuleId = sdModule.getId()
# Discard non 'mdl' modules
if not sdModuleId.startswith('mdl::'):
continue
# Add some definitions from the MDL 'builtin' module
if sdModuleId == 'mdl::<builtins>':
# Add some base types
baseTypes = ['bool', 'bool2', 'bool3', 'bool4',
'int', 'int2', 'int3', 'int4',
'float', 'float2', 'float3', 'float4',
'double', 'double2', 'double3', 'double4',
'string', 'mdl::texture_2d']
for sdType in sdModule.getTypes():
sdTypeId = sdType.getId()
logger.debug(sdTypeId)
if sdTypeId in baseTypes:
# Add some base Types
selectedTypes.append(sdType)
elif sdTypeId.startswith('matrix<'):
# Add matrices
selectedTypes.append(sdType)
continue
# Add all definitions from the MDL module 'mtlx'
if sdModuleId.startswith('mdl::custom_graph'):
for sdDefinition in sdModule.getDefinitions():
selectedDefinitions.append(sdDefinition)
continue
# Add the selected types
for sdType in selectedTypes:
# existingNodeDefinition = sdGraphDefinition.getNodeDefinitionFromId(definition.getId())
# if existingNodeDefinition:
# sdGraphDefinition.removeNodeDefinition(existingNodeDefinition)
logger.debug('[%s] Adding Type "%s"' % (sdGraphDefinition.getId(), sdType.getId()))
sdGraphDefinition.addType(sdType)
# Add the selected node definitions
for definition in selectedDefinitions:
existingNodeDefinition = sdGraphDefinition.getDefinitionFromId(definition.getId())
if existingNodeDefinition:
sdGraphDefinition.removeDefinition(existingNodeDefinition)
logger.debug('[%s] Adding Definition "%s"' % (sdGraphDefinition.getId(), definition.getId()))
sdGraphDefinition.addDefinition(definition)
@staticmethod
def uninit():
pass
| [
"[email protected]"
] | |
6bf99d9793ffdc0c62d6793b678910e9549e95d3 | 2949466be9b2761a8d8945938b8ed5be8bdc2fa7 | /第7回/b.py | 835bf9039dc0e4f9745b37a0302d1a686fe4b44c | [] | no_license | greedtn/EEIC-Algorithms2021 | ab1087977e45fb6d386bff9db8ae4984363b203c | d38235f776ad51fac93be5a7972a68299a7e0706 | refs/heads/main | 2023-06-12T09:48:16.287802 | 2021-07-12T12:49:09 | 2021-07-12T12:49:09 | 357,838,621 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | N, S = map(int, input().split())
A = list(map(int, input().split()))
dp = [[False for _ in range(S+1)] for _ in range(N+1)]
dp[0][0] = True
for i in range(1, N+1):
for j in range(S+1):
if j - A[i-1] >= 0:
dp[i][j] = dp[i-1][j] or dp[i-1][j-A[i-1]]
else:
dp[i][j] = dp[i-1][j]
if dp[N][S]:
print("Yes")
else:
print("No") | [
"[email protected]"
] | |
c7c1319d6e8b0ddbaad9fb3d2febc853dd7f2e3a | 7790f148c1bfe71ab6aca75857e410613cacefb5 | /Built-in Functions/functions_test.py | 393873695b3f2eadbbde51e6a86c8fb39713b32b | [] | no_license | EthanShapiro/PythonCompleteCourse | de89e674c25f69f8af026fc9f58301e4ae4e2dcf | 687f7b91404fd0f32e8dfc4e76ea9534e98d1c50 | refs/heads/master | 2021-01-25T01:55:21.064855 | 2017-09-26T03:30:07 | 2017-09-26T03:30:07 | 94,653,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,650 | py | from functools import reduce
# Problem 1
# Use map to get the length of each word in the sentence
sentence = "How long are the words in this phrase"
print(list(map(len, sentence.split())))
# Problem 2
# Use reduce to take a list of digits and return the number they correspond to
digits = [3, 4, 3, 2, 1]
def digitsToNum(digits):
return reduce(lambda x, y: x*10 + y, digits)
print(digitsToNum(digits))
# Problem 3
# Use filter to return the words from a list of words which start with a letter
l = ['hello', 'are', 'cat', 'dog', 'ham', 'hi', 'go', 'to', 'heart']
def filterWords(wordList, letter):
return list(filter(lambda x: x[0] == letter, wordList))
print(filterWords(l, 'h'))
# Problem 4
# Use zip and list comprehension to return a list of the same length where each value is the two strings from L1 and L2
# concatenated together with connector between them. Look at the example output below:
def concatenate(L1, L2, connector):
return [word1 + connector + word2 for (word1, word2) in zip(L1, L2)]
print(concatenate(['A', 'B'], ['a', 'b'], '-'))
# Problem 5
# enumerate and other skills to return a dictionary which has the values of the list as keys and the index as the value
# You may assume that a value will only appear once in the given list.
def dList(L):
return {key: value for value, key in enumerate(L)}
print(dList(['a', 'b', 'c']))
# Problem 6
# enumerate and other skills from above to return the count of the number of items in the list value equals its index
def countMatchIndex(L):
return len([num for count, num in enumerate(L) if num == count])
print(countMatchIndex([0, 2, 2, 1, 5, 5, 6, 10])) | [
"[email protected]"
] | |
9ea84a460b207810ee568378dc986a95089b75ee | d06d2c997cfe8ba0579a2aa116cf2b94b264201a | /WROOM-WIP/gfx.py | e9b60b07cd1fe2d88d8ecf769fdb30e88f21b947 | [] | no_license | SilverLogix/esp32_MicroPython | 7e944dbf0852c96576691f0b2291128071a5d2f2 | b9387c4f3b0db31cd1e8e03d8641ecdc30189ab0 | refs/heads/master | 2023-08-05T10:29:26.259473 | 2021-09-12T05:38:25 | 2021-09-12T05:38:25 | 367,536,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,705 | py | # ----------#
import st7789 as st
from machine import Pin, SPI
from micropython import const
# noinspection PyArgumentList
tft = st.ST7789(
SPI(1, baudrate=30000000, sck=Pin(18), mosi=Pin(19)),
135, 240,
reset=Pin(23, Pin.OUT),
cs=Pin(5, Pin.OUT),
dc=Pin(16, Pin.OUT),
backlight=Pin(4, Pin.OUT),
rotation=3)
tft.init()
BLACK = const(0x0000)
BLUE = const(0x001F)
RED = const(0xF800)
GREEN = const(0x07E0)
CYAN = const(0x07FF)
MAGENTA = const(0xF81F)
YELLOW = const(0xFFE0)
WHITE = const(0xFFFF)
def fill(col):
tft.fill(col)
def text(string: str, x: int, y: int, fg=WHITE, bg=BLACK):
import font
tft.text(font, string, x, y, fg, bg)
def pixel(x, y, col):
tft.pixel(x, y, col)
def scroll(dx, dy):
tft.scroll(dx, dy)
def text_long(otitle, oline1, oline2, oline3, oline4, oline5, oline6, oline7, fg=WHITE, bg=BLACK):
import font
tft.text(font, otitle, 0, 0, YELLOW, bg)
tft.text(font, oline1, 0, 18, fg, bg)
tft.text(font, oline2, 0, 34, fg, bg)
tft.text(font, oline3, 0, 50, fg, bg)
tft.text(font, oline4, 0, 66, fg, bg)
tft.text(font, oline5, 0, 82, fg, bg)
tft.text(font, oline6, 0, 98, fg, bg)
tft.text(font, oline7, 0, 114, fg, bg)
def rect(x, y, w, h, col):
tft.rect(x, y, w, h, col)
def fill_rect(x, y, w, h, col):
tft.fill_rect(x, y, w, h, col)
def hline(x, y, w, col):
tft.hline(x, y, w, col)
def vline(x, y, h, col):
tft.vline(x, y, h, col)
def line(x1, y1, x2, y2, col):
tft.line(x1, y1, x2, y2, col)
def triangle(x0, y0, x1, y1, x2, y2, col):
# Triangle drawing function. Will draw a single pixel wide triangle
# around the points (x0, y0), (x1, y1), and (x2, y2).
tft.line(x0, y0, x1, y1, col)
tft.line(x1, y1, x2, y2, col)
tft.line(x2, y2, x0, y0, col)
def circle(x0, y0, radius, col):
# Circle drawing function. Will draw a single pixel wide circle with
# center at x0, y0 and the specified radius.
f = 1 - radius
ddf_x = 1
ddf_y = -2 * radius
x = 0
y = radius
tft.pixel(x0, y0 + radius, col)
tft.pixel(x0, y0 - radius, col)
tft.pixel(x0 + radius, y0, col)
tft.pixel(x0 - radius, y0, col)
while x < y:
if f >= 0:
y -= 1
ddf_y += 2
f += ddf_y
x += 1
ddf_x += 2
f += ddf_x
tft.pixel(x0 + x, y0 + y, col)
tft.pixel(x0 - x, y0 + y, col)
tft.pixel(x0 + x, y0 - y, col)
tft.pixel(x0 - x, y0 - y, col)
tft.pixel(x0 + y, y0 + x, col)
tft.pixel(x0 - y, y0 + x, col)
tft.pixel(x0 + y, y0 - x, col)
tft.pixel(x0 - y, y0 - x, col)
def round_rect(x0, y0, width, height, radius, col):
"""Rectangle with rounded corners drawing function.
This works like a regular rect though! if radius = 0
Will draw the outline of a rectangle with rounded corners with (x0,y0) at the top left"""
# shift to correct for start point location
x0 += radius
y0 += radius
# ensure that the radius will only ever half of the shortest side or less
radius = int(min(radius, width / 2, height / 2))
if radius:
f = 1 - radius
ddf_x = 1
ddf_y = -2 * radius
x = 0
y = radius
tft.vline(x0 - radius, y0, height - 2 * radius + 1, col) # left
tft.vline(x0 + width - radius, y0, height - 2 * radius + 1, col) # right
tft.hline(x0, y0 + height - radius + 1, width - 2 * radius + 1, col) # bottom
tft.hline(x0, y0 - radius, width - 2 * radius + 1, col) # top
while x < y:
if f >= 0:
y -= 1
ddf_y += 2
f += ddf_y
x += 1
ddf_x += 2
f += ddf_x
# angle notations are based on the unit circle and in diection of being drawn
# top left
tft.pixel(x0 - y, y0 - x, col) # 180 to 135
tft.pixel(x0 - x, y0 - y, col) # 90 to 135
# top right
tft.pixel(x0 + x + width - 2 * radius, y0 - y, col) # 90 to 45
tft.pixel(x0 + y + width - 2 * radius, y0 - x, col) # 0 to 45
# bottom right
tft.pixel(x0 + y + width - 2 * radius, y0 + x + height - 2 * radius, col) # 0 to 315
tft.pixel(x0 + x + width - 2 * radius, y0 + y + height - 2 * radius, col) # 270 to 315
# bottom left
tft.pixel(x0 - x, y0 + y + height - 2 * radius, col) # 270 to 255
tft.pixel(x0 - y, y0 + x + height - 2 * radius, col) # 180 to 225
def wipe(col):
import font
tft.fill(col)
tft.text(font, " ", 0, 0, col, col)
tft.text(font, " ", 0, 18, col, col)
tft.text(font, " ", 0, 34, col, col)
tft.text(font, " ", 0, 50, col, col)
tft.text(font, " ", 0, 66, col, col)
tft.text(font, " ", 0, 82, col, col)
tft.text(font, " ", 0, 98, col, col)
tft.text(font, " ", 0, 114, col, col)
# -------------- Screen Splashes ---------------#
def boot(col=BLACK):
import font
tft.fill(col)
tft.text(font, "Boot", 0, tft.height() - 16, WHITE, 0) # Boot text on screen
def gwifi(col=BLACK):
import font
tft.fill(col)
tft.text(font, "WIFI", 0, tft.height() - 16, WHITE, 0)
def g_update(col=BLACK):
import font
tft.fill(col)
tft.text(font, "UPDATE", 0, tft.height() - 16, WHITE, 0)
def micrologo(col=BLACK):
import font
tft.fill(col)
tft.jpg('logo.jpg', 0, 0, 1)
tft.text(font, " MICROPYTHON ", int(tft.width() / 2 - 105), int(tft.height() - 18), WHITE, 0)
| [
"[email protected]"
] | |
67e18bdcbbd80a906fe7d2881b539d4f5a44ae7c | 6ce0a02d12eb530ac4a59d833f48ad17e154dfcd | /DOA_gcc_phat.py | 354b8bfcc2c2506c92cd5ff0173eb0e25682dc53 | [] | no_license | oozzZZZZ/PySoundTools | ae7fa3f8c23e881587ad83b70c9908297d3a5ec0 | 4693b86317ee820b439453ae0041e50951e22376 | refs/heads/master | 2020-12-27T02:46:19.858543 | 2020-03-14T08:14:12 | 2020-03-14T08:14:12 | 237,738,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,509 | py | """
4ch micを用いたGCC-PHATによる簡易音源方向推定プログラム
"""
import numpy as np
from scipy.io.wavfile import read
import math
tap = 2**10
# Import Sound Sorce
dataB = "/content/DOAtest1_B.wav"
dataF = "/content/DOAtest1_F.wav"
dataL = "/content/DOAtest1_F.wav"
dataR = "/content/DOAtest1_F.wav"
rate, dataB = read(dataB)
_, dataF = read(dataF)
_, dataR = read(dataR)
_, dataL = read(dataL)
SOUND_SPEED = 343.2
MIC_DISTANCE_4 = 0.08127
MAX_TDOA_4 = MIC_DISTANCE_4 / float(SOUND_SPEED)
def gcc_phat(sig, refsig, fs=1, max_tau=None, interp=16):
'''
この関数は、一般化相互相関-位相変換(GCC-PHAT)メソッドを使用して、
信号sigと基準信号refsigの間のオフセットを計算します。
'''
# FFT長をlen(sig) + len(refsig)以上にする
n = sig.shape[0] + refsig.shape[0]
# 一般化相互相関(GCC)フェーズ変換
SIG = np.fft.rfft(sig, n=n)
REFSIG = np.fft.rfft(refsig, n=n)
R = SIG * np.conj(REFSIG)
cc = np.fft.irfft(R / np.abs(R), n=(interp * n))
max_shift = int(interp * n / 2)
if max_tau:
max_shift = np.minimum(int(interp * fs * max_tau), max_shift)
cc = np.concatenate((cc[-max_shift:], cc[:max_shift+1])) #相関係数
# 最大相互相関指数を見つける
shift = np.argmax(np.abs(cc)) - max_shift
tau = shift / float(interp * fs) #オフセットを求める
return tau, cc
# DataList = [dataF,dataR,dataB,dataL]
def My_get_direction(DataList):
best_guess = None
MIC_GROUP_N = 2
tau = [0] * MIC_GROUP_N
theta = [0] * MIC_GROUP_N
for i in range(MIC_GROUP_N):
tau[i], _ = gcc_phat(DataList[i], DataList[i+2], fs=rate, max_tau=MAX_TDOA_4, interp=1)
theta[i] = math.asin(tau[i] / MAX_TDOA_4) * 180 / math.pi
if np.abs(theta[0]) < np.abs(theta[1]):
if theta[1] > 0:
best_guess = (theta[0] + 360) % 360
else:
best_guess = (180 - theta[0])
else:
if theta[0] < 0:
best_guess = (theta[1] + 360) % 360
else:
best_guess = (180 - theta[1])
best_guess = (best_guess + 90 + 180) % 360
best_guess = (-best_guess + 120) % 360
return best_guess
def main():
i = 0
while True:
data1,data2,data3,data4 = dataF[tap*i:tap*(i+1)],dataR[tap*i:tap*(i+1)],dataB[tap*i:tap*(i+1)],dataL[tap*i:tap*(i+1)]
DataList=[data1,data2,data3,data4]
direction = My_get_direction(DataList)
print(direction)
i+=1
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
e1ca61cff6ec0075b618a7b92a90386594b54e70 | 5d5d2100425892d67e44b2416a0f86440f7b8768 | /scripts/integration_test.py | 1b5db1586486bf74f60bfa2f22e1be6d7668d3e8 | [] | no_license | charliekelley21/Lisp_Interpreter | 94488ff8e81eee803f2170c0918f6c92bdeb7cf4 | 8188d3d0b600ba0abe57dd716b7e8e203fc7883b | refs/heads/main | 2023-02-16T12:37:58.682005 | 2021-01-13T00:35:08 | 2021-01-13T00:35:08 | 329,148,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,286 | py | import pexpect
import pexpect.replwrap as replwrap
import unittest
import os
# the slisp executable
cmd = './slisp'
# the prompt to expect
prompt = u'slisp>'
class TestREPL(unittest.TestCase):
def setUp(self):
self.wrapper = replwrap.REPLWrapper(cmd, prompt, None)
def test_add(self):
output = self.wrapper.run_command(u'(+ 1 2)')
self.assertEqual(output.strip(), "(3)")
output = self.wrapper.run_command(u'(+ 1 2 10)')
self.assertEqual(output.strip(), "(13)")
output = self.wrapper.run_command(u'(+ 1 2 0)')
self.assertEqual(output.strip(), "(3)")
output = self.wrapper.run_command(u'(+ 0 1 2)')
self.assertEqual(output.strip(), "(3)")
output = self.wrapper.run_command(u'(+ 1 2 -2)')
self.assertEqual(output.strip(), "(1)")
output = self.wrapper.run_command(u'(+ -1 -2)')
self.assertEqual(output.strip(), "(-3)")
def test_define(self):
output = self.wrapper.run_command(u'(define a True)')
self.assertEqual(output.strip(), "(True)")
output = self.wrapper.run_command(u'(define b a)')
self.assertEqual(output.strip(), "(True)")
output = self.wrapper.run_command(u'(and a b)')
self.assertEqual(output.strip(), "(True)")
def test_conditional(self):
self.wrapper.run_command(u'(define a -1)')
self.wrapper.run_command(u'(define b 1)')
self.wrapper.run_command(u'(define c 0)')
output = self.wrapper.run_command(u'(if (< a b) c False)')
self.assertEqual(output.strip(), "(0)")
def test_error(self):
output = self.wrapper.run_command(u'(define begin True)')
self.assertTrue(output.strip().startswith('Error'))
class TestExecuteCommandline(unittest.TestCase):
def test_sub(self):
args = ' -e ' + ' "(- 4 2)" '
(output, retcode) = pexpect.run(cmd+args, withexitstatus=True, extra_args=args)
self.assertEqual(retcode, 0)
self.assertEqual(output.strip(), b"(2)")
def test_parse_error(self):
args = ' -e ' + ' "(+ 1 2) (+ 3 4)" '
(output, retcode) = pexpect.run(cmd+args, withexitstatus=True, extra_args=args)
self.assertNotEqual(retcode, 0)
self.assertTrue(output.strip().startswith(b'Error'))
#FAILED
def test_error(self):
args = ' -e ' + ' "(- 4 2 12)" '
(output, retcode) = pexpect.run(cmd+args, withexitstatus=True, extra_args=args)
self.assertNotEqual(retcode, 0)
self.assertTrue(output.strip().startswith(b'Error'))
class TestExecuteFromFile(unittest.TestCase):
def test_file(self):
args = ' /vagrant/tests/test3.slp'
(output, retcode) = pexpect.run(cmd+args, withexitstatus=True, extra_args=args)
self.assertEqual(retcode, 0)
self.assertEqual(output.strip(), b"(2)")
def test_file_lf(self):
args = ' /vagrant/tests/test4.slp'
(output, retcode) = pexpect.run(cmd+args, withexitstatus=True, extra_args=args)
self.assertEqual(retcode, 0)
self.assertEqual(output.strip(), b"(-1)")
def test_file_crlf(self):
args = ' /vagrant/tests/test_crlf.slp'
(output, retcode) = pexpect.run(cmd+args, withexitstatus=True, extra_args=args)
self.assertEqual(retcode, 0)
self.assertEqual(output.strip(), b"(-1)")
def test_file_error(self):
args = ' /there/is/no/such/file'
(output, retcode) = pexpect.run(cmd+args, withexitstatus=True, extra_args=args)
self.assertNotEqual(retcode, 0)
self.assertTrue(output.strip().startswith(b'Error'))
# run the tests
unittest.main()
| [
"[email protected]"
] | |
c84a08760c821d1b48b2632ff79308662f638488 | 82770c7bc5e2f27a48b8c370b0bab2ee41f24d86 | /microblog/flask/venv/lib/python2.7/site-packages/scipy/misc/__init__.py | d1e05f256c77c79c83cf1a81110bb9f8ca32311e | [
"Apache-2.0"
] | permissive | johankaito/fufuka | 77ddb841f27f6ce8036d7b38cb51dc62e85b2679 | 32a96ecf98ce305c2206c38443e58fdec88c788d | refs/heads/master | 2022-07-20T00:51:55.922063 | 2015-08-21T20:56:48 | 2015-08-21T20:56:48 | 39,845,849 | 2 | 0 | Apache-2.0 | 2022-06-29T23:30:11 | 2015-07-28T16:39:54 | Python | UTF-8 | Python | false | false | 2,459 | py | """
==========================================
Miscellaneous routines (:mod:`scipy.misc`)
==========================================
.. currentmodule:: scipy.misc
Various utilities that don't have another home.
Note that the Python Imaging Library (PIL) is not a dependency
of SciPy and therefore the `pilutil` module is not available on
systems that don't have PIL installed.
.. autosummary::
:toctree: generated/
ascent - Get example image for processing
bytescale - Byte scales an array (image)
central_diff_weights - Weights for an n-point central m-th derivative
comb - Combinations of N things taken k at a time, "N choose k" (imported from scipy.special)
derivative - Find the n-th derivative of a function at a point
face - Get example image for processing
factorial - The factorial function, n! = special.gamma(n+1) (imported from scipy.special)
factorial2 - Double factorial, (n!)! (imported from scipy.special)
factorialk - (...((n!)!)!...)! where there are k '!' (imported from scipy.special)
fromimage - Return a copy of a PIL image as a numpy array
imfilter - Simple filtering of an image
imread - Read an image file from a filename
imresize - Resize an image
imrotate - Rotate an image counter-clockwise
imsave - Save an array to an image file
imshow - Simple showing of an image through an external viewer
info - Get help information for a function, class, or module
lena - Get classic image processing example image Lena
logsumexp - Compute the log of the sum of exponentials of input elements
pade - Pade approximation to function as the ratio of two polynomials
toimage - Takes a numpy array and returns a PIL image
who - Print the Numpy arrays in the given dictionary
"""
from __future__ import division, print_function, absolute_import
__all__ = ['who', 'source', 'info', 'doccer']
from . import doccer
from .common import *
from numpy import who, source, info as _info
from scipy.special import comb, factorial, factorial2, factorialk
import sys
def info(object=None,maxwidth=76,output=sys.stdout,toplevel='scipy'):
return _info(object, maxwidth, output, toplevel)
info.__doc__ = _info.__doc__
del sys
try:
from .pilutil import *
from . import pilutil
__all__ += pilutil.__all__
del pilutil
except ImportError:
pass
from . import common
__all__ += common.__all__
del common
from numpy.testing import Tester
test = Tester().test
| [
"[email protected]"
] | |
cce6cc7038621aa13a49a8489ce3158c991590ce | b0d0a4d18d2da39a8159ae3275be973e4beab42e | /admin.py | 95be154941c25047c45d41a86b75b9316491b711 | [] | no_license | mandatomarina/boaspraticas | 2e533db2212cf3f186febd9374cc21fd5edbe66f | 8740f0084accdc292e42a286ae21cf700d596fd3 | refs/heads/master | 2023-03-07T17:55:30.503123 | 2021-02-25T15:14:31 | 2021-02-25T15:14:31 | 342,270,090 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,260 | py | from django.contrib import admin
from import_export import resources
from import_export.fields import Field
from import_export.widgets import ManyToManyWidget, ForeignKeyWidget, CharWidget
from .models import Projeto, Tema, Autor, Natureza
from import_export.admin import ImportExportModelAdmin
from cidadaos.admin import M2MCreateWithForeignKey, M2MField
class ForeignCreateWidget(ForeignKeyWidget):
def clean(self, value, row=None, *args, **kwargs):
return self.model.objects.get_or_create(**{self.field:value})[0] if value else None
# Register your models here
class ProjetoResource(resources.ModelResource):
autor = M2MField(attribute="autor",column_name='autor',widget=M2MCreateWithForeignKey(Autor,',', 'nome', create=True))
tema = M2MField(attribute="tema",column_name='tema',widget=M2MCreateWithForeignKey(Tema,',', 'tema', create=True))
natureza = Field(attribute="natureza",column_name='natureza',widget=ForeignCreateWidget(Natureza, 'nome'))
class Meta:
model = Projeto
class ProjetoAdmin(ImportExportModelAdmin):
resource_class = ProjetoResource
list_display = ('nome',)
admin.site.register(Projeto, ProjetoAdmin)
admin.site.register(Tema)
admin.site.register(Autor)
admin.site.register(Natureza) | [
"[email protected]"
] | |
79202942af3a996a72feb9f8f3d3fb8a65e71f54 | e10c398b68adc374d3f2886b8b22d78b2f5328d0 | /Requests_Pics_test.py | 7407b66017ecca6dd4ab8bcea7307cc5e74f456b | [] | no_license | QueenDekimZ/spider_notebook | e5c475462efd782cf2b56b5c30d21b1ad5192570 | 0ebb5846fc75aa77227b4d1b9266ce0336fa4ca1 | refs/heads/master | 2020-07-01T16:03:57.240917 | 2019-08-09T08:19:37 | 2019-08-09T08:19:37 | 201,220,030 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 502 | py | import requests
import os
url = 'http://placekitten.com/1000/400'
root = 'C:\\Users\\MSI-PC\\Desktop\\'
path = root + url.split('/')[-2] + '_' + url.split('/')[-1] +'.png'
try:
if not os.path.exists(root):
os.mkdir(root)
if not os.path.exists(path):
r = requests.get(url)
with open(path, 'wb') as f:
f.write(r.content)
f.close()
print('图片保存成功')
else:
print('图片已存在')
except:
print('爬取失败')
| [
"[email protected]"
] | |
66e0361b6f2044afc61d439dec6561f1f40f127f | a5c9b05657225b07d5e715fcb0721004ceef7ad6 | /src/utils/housing.py | 0f79103289bb4c6dce70e59113d46487c19fa653 | [
"MIT"
] | permissive | mbeni12333/machine-learning | 0d868805a1a6efb33b517b7d853b3acc53db5222 | 3421b86c75452a7c285326ae52747f3dc9083553 | refs/heads/master | 2021-07-12T18:11:10.105370 | 2021-03-30T07:59:57 | 2021-03-30T07:59:57 | 122,761,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,301 | py | import os
import urllib
import tarfile
import pandas as pd
DOWNLOAD_ROOT = "https://raw.githubusercontent.com/ageron/handson-ml2/master/"
HOUSING_PATH = os.path.join("datasets", "housing")
HOUSING_URL = DOWNLOAD_ROOT + "datasets/housing/housing.tgz"
def fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH):
"""
This function downloads California housing datasets
housing_url: url to fetch the dataset from
housing_path: path to store the dataset to
"""
# create the folder if don't exist
os.makedirs(housing_path, exist_ok=True)
# the path to the compressed version
tgz_path = os.path.join(housing_path, "housing.tgz")
# Download the data
urllib.request.urlretrieve(housing_url, tgz_path)
# Extract the data
housing_tgz = tarfile.open(tgz_path)
housing_tgz.extractall(path=housing_path)
housing_tgz.close()
def load_housing_data(housing_path=HOUSING_PATH):
"""
Load housing data from csv into pandas dataframe
housing_path: path to the dataset
return: pandas.DataFrame
"""
# file path
csv_path = os.path.join(housing_path, "housing.csv")
#return the dataframe
return pd.read_csv(csv_path)
| [
"[email protected]"
] | |
9d3c4ebbdf12c468362bc277b6fb306ae9189e36 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5670465267826688_1/Python/vtlinh/c.py | 156a68308ff94957326b24e88214f34b41b3f7b1 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 993 | py | mat = [
[],
[0, 1, 2, 3, 4],
[0, 2, -1, 4, -3],
[0, 3, -4, -1, 2],
[0, 4, 3, -2, -1]
]
def calc(t, pos, req, till_end):
cur = 1
for i in xrange(pos, len(t)):
if cur < 0:
cur = -1 * mat[-cur][int(t[i])]
else:
cur = mat[cur][int(t[i])]
if cur == req and not till_end:
return cur, i+1
return cur, len(t)
if __name__ == '__main__':
with open('input.in') as fi:
lines = fi.read().splitlines()
tests = int(lines[0])
for i in xrange(tests):
x = int(lines[1+i*2].split()[1])
if x >= 16:
x = x % 12 + 12
while x < 12:
x += 4
t = lines[2+i*2].replace('i','2').replace('j','3').replace('k','4') * x
yes = False
cur, pos = calc(t, 0, 2, False)
if pos != len(t) and cur == 2:
cur, pos = calc(t, pos, 3, False)
if pos != len(t) and cur == 3:
cur, pos = calc(t, pos, 4, True)
if cur == 4:
yes = True
print 'Case #%s: %s' %(i+1, 'YES' if yes else 'NO')
| [
"[email protected]"
] | |
4375332d8f960933504e9cb26befe1775cd69710 | a3ac9a65f794c03b99c733c82df7354aff0afc18 | /rhaptos/atompub/plone/browser/interfaces.py | f9cafe5c9d9799c5d371d710b5a988cc23f7c6f7 | [] | no_license | Rhaptos/rhaptos.atompub.plone | 35a8efa1496f74df24071e6ba4573872719335d6 | fe6d20cb8e95be1633a42f3d7af515262a00b7f8 | refs/heads/master | 2021-01-21T21:54:27.252226 | 2016-03-31T17:52:39 | 2016-03-31T17:52:39 | 6,147,154 | 0 | 1 | null | 2016-03-31T17:52:40 | 2012-10-09T20:19:22 | Python | UTF-8 | Python | false | false | 343 | py | from plone.theme.interfaces import IDefaultPloneLayer
class IThemeSpecific(IDefaultPloneLayer):
"""Marker interface that defines a Zope 3 browser layer.
If you need to register a viewlet only for the
"Rhaptos AtomPub Plone Theme" theme, this interface must be its layer
(in atompub/viewlets/configure.zcml).
"""
| [
"[email protected]"
] | |
079cc07b4e12fb309fa55d655d198fe64513f9c4 | bc7d84eeb509e66b56e02606dd43fa61e8cbd741 | /build.py | 72414479ad25796ff6d909dfe847cda5ca35f451 | [
"Apache-2.0"
] | permissive | weekmo/biodb_team3 | 4ef5e6c9b0b1277b21c59c19ed4f267e828cf901 | c2918f629d42c1137dcc249a549991e92f63a7f9 | refs/heads/master | 2023-01-06T19:58:41.336749 | 2021-06-23T11:09:02 | 2021-06-23T11:09:02 | 141,455,842 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | import sys
from biodb_team_3.db_manager import Manager
m=None
if len(sys.argv) <2:
m = Manager()
else:
m = Manager(str(sys.argv[1]))
m.populate_db()
| [
"[email protected]"
] | |
f0c5bc704d53938498f79858f12f0a79c328b56c | b7abd802cc3289d5d951cbf88e590737f069f1bb | /PyMS/Utilities/UIKit/Widgets/Toplevel.py | aef887605a98c70e76d2ec7251d840cbe67fe570 | [] | no_license | poiuyqwert/PyMS | 0c5396a859c23ff13028efac0b014e50dede911b | 124cc3fd2f6907166d84799edcd8f0450e6b7944 | refs/heads/master | 2023-08-31T21:04:28.241270 | 2023-06-16T17:54:00 | 2023-06-16T17:54:00 | 31,783,698 | 44 | 18 | null | 2023-06-16T17:54:01 | 2015-03-06T18:48:15 | Python | UTF-8 | Python | false | false | 479 | py |
from .. import Theme
from .Extensions import Extensions
try: # Python 2
import Tkinter as _Tk
except: # Python 3
import tkinter as _Tk
class Toplevel(_Tk.Toplevel, Extensions):
def __init__(self, *args, **kwargs):
_Tk.Toplevel.__init__(self, *args, **kwargs)
Theme.apply_theme(self)
def make_active(self):
if self.state() == 'withdrawn':
self.deiconify()
self.lift()
self.focus_force()
if not self.grab_status():
self.grab_set()
self.grab_release()
| [
"[email protected]"
] | |
99be3902a03da7f048764e27b2998ab563462dab | 6d7abc02fc93dd1002c213be5202dcd00baf338b | /users/urls.py | 3019358dfb79d2eb6b7cccda9c16e449946207ef | [] | no_license | AnabellJimenez/GC_Ecommerce | 595b5b7d8a14cf0edfd3acaec9d69e7b4022b731 | 2e2115fc5b6455da74d95c62ed9b9915a90c4e3d | refs/heads/master | 2020-07-03T08:25:18.270036 | 2016-08-22T16:15:32 | 2016-08-22T16:15:32 | 35,298,280 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^index', views.index, name='index'),
url(r'^$', views.users, name='users'),
url(r'^sign_up', views.sign_up, name = 'sign_up'),
url(r'^log_in', views.log_in, name='log_in'),
url(r'^verify', views.verify, name='verify'),
url(r'^home', views.home, name='home'),
url(r'^log_out', views.log_out, name='log_out'),
]
| [
"[email protected]"
] | |
0a67c0e99351d4619966c92b9672f96906b5d098 | 031953ffeeb036b0620460ddf8c7ea5c61f2fa9e | /djviews/blog/forms.py | f159307a872ce8e9981137d3e60598e9292ac8a1 | [] | no_license | PichaiLim/python-django-view | cc7aa3701e4db773ce187f6f7fbe21317a14972a | 545124d46a7c6f67f7d18f1c43bee3842de2878d | refs/heads/master | 2020-03-25T22:43:55.054396 | 2018-08-10T05:09:34 | 2018-08-10T05:09:34 | 144,238,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | from django import forms
from .models import SeialNumber
class SeialnumberModelForm(forms.ModelForm):
class Meta:
model = SeialNumber
fields = [
'running_number'
] | [
"[email protected]"
] | |
56ae9b3fcaa09790666db765a0cbd5692dc0b016 | 00830809a8b7b1f00c92a6c700a3e9270196699c | /apiteste/apiteste/urls.py | 83302f9667a9491f6fd37f274941aa62d1e8d8f6 | [] | no_license | Marcos-osg/IMDB-Busca | afae909933dfcd20216f82cb7ee2fabfb76227e2 | 723a86d4b0146daeac2806e07d0cba38e7aa57a9 | refs/heads/main | 2023-04-21T05:51:44.187681 | 2021-05-07T00:54:28 | 2021-05-07T00:54:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 797 | py | """apiteste URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('testeapi.urls'))
]
| [
"[email protected]"
] | |
81e771c136b9233b0c508cdef3123bc72423fb52 | 17d5e587896e92dcd7c83a1e6ebe210f092da304 | /users/models.py | 0fab80a462f5dd768a6d32b958aba8be24f55ad4 | [] | no_license | wecode-bootcamp-korea/19-1st-whiteSpace-backend | a5edbd0622d759e12a8b9780889fd998a729fe66 | 2f230e664d99ac997b5e3f6c7075164c28f4e19f | refs/heads/master | 2023-04-16T02:06:02.286734 | 2021-04-25T15:17:18 | 2021-04-25T15:17:18 | 357,093,520 | 0 | 1 | null | 2021-04-22T10:51:30 | 2021-04-12T07:12:04 | Python | UTF-8 | Python | false | false | 2,057 | py | from django.db import models
class User(models.Model):
email = models.EmailField(max_length=100, unique=True)
password = models.CharField(max_length=200)
name = models.CharField(max_length=45)
phone_number = models.CharField(max_length=45, unique=True)
is_active = models.BooleanField(default=True)
mileage = models.PositiveIntegerField(default=0)
coupon = models.ManyToManyField('Coupon', through='UserCoupon')
wish = models.ManyToManyField('products.Product', through='WishList')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
db_table = 'users'
class Address(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
postal_code = models.CharField(max_length=45)
main_address = models.CharField(max_length=100)
detail_address = models.CharField(max_length=100)
is_main = models.BooleanField()
phone_number = models.CharField(max_length=45)
class Meta:
db_table = 'addresses'
class WishList(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
product = models.ForeignKey('products.Product', on_delete=models.CASCADE)
is_deleted = models.BooleanField()
class Meta:
db_table = 'wish_lists'
class Coupon(models.Model):
name = models.CharField(max_length=100)
discount_rate = models.DecimalField(max_digits=3, decimal_places=2, null=True, blank=True)
discount_price = models.DecimalField(max_digits=6, decimal_places=2, null=True, blank=True)
valid_days = models.SmallIntegerField()
class Meta:
db_table = 'coupons'
class UserCoupon(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
coupon = models.ForeignKey(Coupon, on_delete=models.CASCADE)
is_used = models.SmallIntegerField()
expire_at = models.DateTimeField()
class Meta:
db_table = 'users_coupons'
| [
"[email protected]"
] | |
c67b441c98e470639dc85185462dd575cb90d98f | d63e87256e60db86f0e84d055b1a114f103506ba | /kingpin | 7d55c4476b8ce72aa2b6fc1fb6068be594cbc72b | [] | no_license | ahinkka/kingpin | 44a765daff7e02fc2a4aa5c10c4a6c2df027c1c7 | cd239b71578941ccabf6c11d6132a9554fd6bc67 | refs/heads/master | 2021-05-27T18:33:10.262555 | 2013-01-01T18:51:44 | 2013-01-01T18:51:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import signal
import gevent
from dialer import DialLet
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-d", "--debug", action="store_true", dest="debug", default=False,
help="logging level DEBUG")
opts, args = parser.parse_args()
if opts.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
d = DialLet()
# gevent.signal(signal.SIGTERM, d.kill)
# gevent.signal(signal.SIGINT, d.kill)
d.run()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | ||
b3e57e4c05c7628b1a8aef386e7e55747b8689e0 | 93e6cfece0a5f24d4ac562e61247c4ff820ef875 | /model.py | e2eb1b2c1fda21b5678ae28e458175160fb461b4 | [] | no_license | lancerQA/RSAN | 0b3a50c1323752e03159c1689d3b8f6f061752e1 | b36d3145d012b219861fd26fe72cbb75f02146cc | refs/heads/main | 2023-08-23T07:23:26.576950 | 2021-09-28T13:04:22 | 2021-09-28T13:04:22 | 411,204,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,633 | py | import torch.nn as nn
import torch
# G
class Generator(nn.Module):
def __init__(self, opt):
super(Generator, self).__init__()
self.fc1 = nn.Linear(1024, opt.ngh)
self.fc2 = nn.Linear(opt.ngh, opt.resSize)
self.lrelu = nn.LeakyReLU(0.2, True)
self.relu = nn.ReLU(True)
self.fc3 = nn.Linear(opt.attSize + opt.nz, 1024)
self.apply(weights_init)
def forward(self, noise, att):
h = torch.cat((noise, att), 1)
h = self.lrelu(self.fc3(h))
h = self.lrelu(self.fc1(h))
h = self.relu(self.fc2(h))
return h
# D
class D2(nn.Module):
def __init__(self, opt):
super(D2, self).__init__()
self.discriminator = nn.Linear(opt.mapSize, 1)
self.hidden = nn.Linear(opt.mapSize, 1024)
self.classifier = nn.Linear(1024, opt.nclass_seen)
self.logic = nn.LogSoftmax(dim=1)
self.mapping = nn.Linear(opt.mapSize, 4096)
self.lrelu = nn.LeakyReLU(0.2, True)
self.mapping2 = nn.Linear(4096,opt.mapSize)
self.apply(weights_init)
def forward(self, x):
m = self.lrelu(self.mapping(x))
m = self.lrelu(self.mapping2(m))
dis_out = self.discriminator(m)
h = self.lrelu(self.hidden(m))
pred = self.logic(self.classifier(h))
return dis_out, pred, m
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
| [
"[email protected]"
] | |
b0c5317d1bb311310ee42aa1c026e38ffd5a9d1b | 29d6eb6f7c970cd95dca58912ea52bffc6730ba9 | /reverse.py | 0e71084ec0d2c5c4159f40ebe2eb6bf6482d3788 | [] | no_license | 30jawahar/pythonPRGMS | 13462659c45cc1a9d1d516934c6d5e15692862b8 | 73d26a1e3ebf14508d85ea4a264c52e6bdaee1a8 | refs/heads/master | 2020-03-25T02:04:48.882316 | 2019-06-18T10:31:01 | 2019-06-18T10:31:01 | 143,272,972 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | a=int(input())
b=0
for i in range(0,3):
c=a%10
b=(b*10)+c
a=a//10
print(b)
| [
"[email protected]"
] | |
63c4d88b1367e5315171946431e0c06728eb83c1 | 5c6e19225d224dd853cb1708dbed8cdba4c1ec6c | /MarkovDesigns/2-11-2015/chain.py | 1daf7d991a502f9242f9971a18ac76ab23858074 | [] | no_license | scj7t4/thesis | 47deab8d02933122d08523185e26830fd057a6e3 | f2d2e1eb01411dc94f885212d8fead1a1fdf1616 | refs/heads/master | 2021-05-01T15:41:06.921840 | 2016-06-21T20:54:56 | 2016-06-21T20:54:56 | 12,057,059 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,627 | py | import pykov
import itertools
import functools
import collections
import functools
class memoized(object):
'''Decorator. Caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned
(not reevaluated).
'''
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
if not isinstance(args, collections.Hashable):
# uncacheable. a list, for instance.
# better to not cache than blow up.
return self.func(*args)
if args in self.cache:
return self.cache[args]
else:
value = self.func(*args)
self.cache[args] = value
return value
def __repr__(self):
'''Return the function's docstring.'''
return self.func.__doc__
def __get__(self, obj, objtype):
'''Support instance methods.'''
return functools.partial(self.__call__, obj)
def AYCsFail(p):
return 2*(p**2)*(1-p**2)+(1-p**2)**2;
def make_chain(p):
chain = {
# Checked
('1,1','1,1') : AYCsFail(p)+((p**2)**2)*(1-p**2),
# !(AYC^AYC^INV^ACCEPT)
('1,1','1,2') : 0, # Not possible
('1,1','2,1') : (p ** 6)*(1-p), # AYC^AYC^INV^ACCEPT^!PEERLIST
('1,1','2,2') : (p ** 7), # AYC^AYC^INV^ACCEPT^PEERLIST
# Checked
('1,2','1,1') : 1, # Member does not detect failure until it can't elect
('1,2','1,2') : 0, # Not possible
('1,2','2,1') : 0, # Not possible
('1,2','2,2') : 0, # Not possible
# Checked
('2,1','1,1') : AYCsFail(p)+((p**2)**2)*(1-p**2),
# AYC^AYC^!(INV^ACCEPT) | !(AYC) | !(AYC) | !(AYC*AYC)
('2,1','1,2') : 0, # Not Possible
('2,1','2,1') : (p ** 6)*(1-p), # AYC^AYC^INV^ACCEPT^!PEERLIST
('2,1','2,2') : (p ** 7), # AYC^AYC^INV^ACCEPT^PEERLIST
# Checked
('2,2','1,1') : (1-p**2)**2, # !AYC ^ !AYC
('2,2','1,2') : (p**2)*(1-p**2), # AYC ^ !AYC
('2,2','2,1') : (p**2)*(1-p**2), # AYC ^ !AYC
('2,2','2,2') : (p**2)**2, # AYC ^ AYC
}
return chain
def chain_state_to_proj_state(fs):
(fr,_) = fs.split(',')
return fr
def chain_key_to_proj_key(fs,ts):
fr = chain_state_to_proj_state(fs)
to = chain_state_to_proj_state(ts)
return (fr,to)
def project_transitions(p):
chain = make_chain(p)
#A projection is created by finding the steady state of the whole chain:
T = pykov.Chain(chain)
ss = T.steady()
# We then need to transform the steady state into the probability you're in
# state x v y
normalsteady = {}
for fs in ss:
proj = chain_state_to_proj_state(fs)
try:
normalsteady[proj].append( (fs,ss[fs]) )
except KeyError:
normalsteady[proj] = [ (fs,ss[fs]) ]
# Produce the probability which original state you are in given you in the projected
# state x.
normalsteady2 = {}
for key in normalsteady:
normalsteady2[key] = {}
s = sum([ x[1] for x in normalsteady[key] ])
assert s <= 1
for i in range(len(normalsteady[key])):
k = normalsteady[key][i][0]
normalsteady2[key][k] = normalsteady[key][i][1]/s
#print "NORAML STEADY ",normalsteady2
#normalsteady2 now has the probability of being in a fully described state. You can
#use that to estimate the projection
collapsed = {}
for (fs,ts) in chain:
fr,to = chain_key_to_proj_key(fs,ts)
try:
collapsed[ (fr,to) ].append( (fs,ts) )
except KeyError:
collapsed[ (fr,to) ] = [ (fs,ts) ]
newchain = {}
# Collapsed is the projection transition, and the actual transitions that can happen
#print "COLLAPSED ",collapsed
for key in collapsed:
for oldstate in collapsed[key]:
oc = chain[oldstate]
nc = normalsteady2[key[0]][oldstate[0]]
f,t = key
f = int(f)
t = int(t)
try:
newchain[ (f,t) ] += nc * oc
except KeyError:
newchain[ (f,t) ] = nc*oc
return newchain
@memoized
def sys_config(procs, p):
assert(procs > 0)
#From the subdesign, you can determine the probability
#the rest of the system is in a given state. Using
#This, you can comput the probability that you will
#do an election with those processes.
sub_design = design(procs, p)
if sub_design == None:
return []
#An election can have lots of different outcomes, but
#the list of outcomes is decided by the number and sizes
#of the groups. Get the steady state of the sub design.
if procs != 1:
T = pykov.Chain(sub_design)
ss = T.steady()
else:
ss = {1:1}
#ss yields a partial state. You need to keep yielding
#partial states until you have enoigh to describe the
#remainder of the system.
result = []
for config in ss:
if config < procs:
for subconfig in sys_config(procs-config, p):
# Subconfig is a tuple of (config and probability)
# Any subconfig can be combined config to yield
# a config of size procs.
sconf, sp = subconfig
# so we combine the config with the subconfig
result.append( ([config]+sconf, sp * ss[config]) )
else:
result.append( ([config], ss[config]) )
return result
@memoized
def detection(lgroup, p):
# For processes in the leaders group, each of them does an
# AYC/AYT exchange to stay in the group.
# then each of those groups go through an AYC/AYT exchange
# before the elections start. A certain number of processes
# will enter a state where they won't participate in the coming
# election.
pingroup = lgroup-1
fails = 0
noparticipate = 0
sp = p ** 4
fp = 1 - sp
d = {}
for combo in itertools.product([True,False], repeat=pingroup):
stay = 0
leave = 0
stayp = 1
for p in combo:
if p:
stay += 1
stayp *= sp
else:
stayp *= fp
try:
d[stay] += stayp
except KeyError:
d[stay] = stayp
r = []
for stay in d:
r.append( (stay+1, pingroup-stay,d[stay]) )
return r
@memoized
def election_outcomes(leaders, p):
sp = p ** 4 * p ** 2
fp = 1 - sp
r = []
for combo in itertools.product([True, False], repeat=leaders):
gp = sp ** sum([ 1 for c in combo if c ])
bp = fp ** sum([ 1 for c in combo if not c])
r.append( (combo,gp*bp) )
return r
def transition(lgroup, config, p):
#param lgroup integer - size of the leaders group
#param config list of integers - size of each other group in the system.
#This is a list of all AYC/AYT outcomes for the leaders group
#print "LGROUP: {}".format(lgroup)
#print "CONFIG: {}".format(config)
ldrs = detection(lgroup, p)
cmb = [ ]
# For each group in the configuration, make a list of AYC/AYT outcomes.
# Result will be in the form (stay, leave, probabilty)
for group in config:
grps = detection(group, p)
#print "GRPS {}".format(grps)
cmb.append(grps)
ds = {}
#print "CMB {}".format(cmb)
# For each possible AYC/AYT result for each group create the product
# To get all possible interleavings of successes & failures.
for combo in itertools.product( *cmb ):
# Combo is a tuple of type ((stay, leave, p), (stay, leave, p)...)
#print "COMBO {}".format(combo)
for ldr in ldrs:
# For each remaining group, check to see if the election succeeds.
ele = election_outcomes(len(config),p)
# outcome will be a T,F list of if the groups will join with the leader
# outp is the chance that happens
for outcome, outp in ele:
#print "OUTCOME {}".format(outcome)
#print "OUTP {}".format(outp)
solos = ldr[1] # Processes that leave the leader's group
mygroup = ldr[0] # The processes in my group to start
mygroup_p = outp*ldr[2] # The probability this election works
r = zip(outcome,combo) # Mapping it with each possible leader's observed procs
for sr in r:
#print "SR: {}".format(sr)
if sr[0]:
mygroup += sr[1][0]
mygroup_p *= sr[1][2]
solos += sr[1][1]
# Do I care about the solos, other than that they won't participate?
try:
ds[mygroup] += mygroup_p
except KeyError:
ds[mygroup] = mygroup_p
rlist = []
for group in ds:
rlist.append( (group, ds[group]) )
#print rlist
return rlist
@memoized
def design(procs, p):
print "DESIGN: {}".format(procs)
if procs == 0:
return None
if procs == 1:
return {(1,1):1}
if procs == 2:
return project_transitions(.95)
else:
trans = {}
for others in range(procs):
#print "OTHERS: {}".format(others)
#Others sets the source and size of the main group
main_group = procs-others
if others > 0:
remain = sys_config(others,p)
else:
remain = []
#print "REMAIN: {}".format(remain)
# The transistion function gets back a transition for the given
# other groups state. You take that transition and multiply it
# by the probability the system is in that state.
goneto = {}
if remain:
for (conf,confp) in remain:
for dest, destp in transition(main_group, conf, p):
#print "DEST: {}".format(dest)
try:
goneto[dest] += destp * confp
except KeyError:
goneto[dest] = destp * confp
else:
mg_change = detection(main_group, p)
#print "MGCHANGE: {}".format(mg_change)
for stay, _, stayp in mg_change:
try:
goneto[stay] += stayp
except KeyError:
goneto[stay] = stayp
#print "GONETO: {}".format(goneto)
for dest in goneto:
try:
trans[ (main_group, dest) ] += goneto[dest]
except KeyError:
trans[ (main_group, dest) ] = goneto[dest]
return trans
def verify_design(d):
vd = {}
for (f,t) in d:
try:
vd[f] += d[(f,t)]
except KeyError:
vd[f] = d[(f,t)]
for s in vd:
print "VERIFY {}: {}".format(s,1.0-vd[s])
d = design(100, 0.5)
verify_design(d)
print d
| [
"[email protected]"
] | |
e9d5e424f4993447195081dbf41cd2b21b8763a5 | 2bb8c48b333cad834d7a7d8eef8f80b3fd864e2a | /ddpg2.py | ea92ce5c97db4fa581612644988629619908c895 | [
"MIT"
] | permissive | iitkliv/RL-cardriving | cfc80bffa81f96c0aaa0b70fd242c81a93b14ed2 | 4bae4a7e1440bdec07193aa6a03c5460947a6922 | refs/heads/master | 2021-01-20T10:55:37.258750 | 2017-10-20T04:24:00 | 2017-10-20T04:24:00 | 101,656,197 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,448 | py | from gym_torcs import TorcsEnv
from os import system
import numpy as np
import random
import argparse
from keras.models import model_from_json, Model
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.optimizers import Adam
import tensorflow as tf
from keras.engine.training import collect_trainable_weights
import pyscreenshot as ImageGrab
import json
from PIL import Image
from ReplayBuffer import ReplayBuffer
from ActorNetwork import ActorNetwork
from CriticNetwork import CriticNetwork
from OU import OU
import timeit
OU = OU() #Ornstein-Uhlenbeck Process
def playGame(train_indicator=1): #1 means Train, 0 means simply Run
images = []
images_i = 0
images_g = 0
images_save = 0
images_size = 20
record = 0
record_time = 1
BUFFER_SIZE = 100000
BATCH_SIZE = 32
GAMMA = 0.99
TAU = 0.001 #Target Network HyperParameters
LRA = 0.0001 #Learning rate for Actor
LRC = 0.001 #Lerning rate for Critic
action_dim = 3 #Steering/Acceleration/Brake
state_dim = 29 #of sensors input
np.random.seed(1337)
vision = True
EXPLORE = 100000.
episode_count = 2000
max_steps = 100000
reward = 0
done = False
step = 0
epsilon = 1
indicator = 0
#Tensorflow GPU optimization
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
from keras import backend as K
K.set_session(sess)
actor = ActorNetwork(sess, state_dim, action_dim, BATCH_SIZE, TAU, LRA)
critic = CriticNetwork(sess, state_dim, action_dim, BATCH_SIZE, TAU, LRC)
buff = ReplayBuffer(BUFFER_SIZE) #Create replay buffer
# Generate a Torcs environment
env = TorcsEnv(vision=vision, throttle=True,gear_change=False)
#Now load the weight
print("Now we load the weight")
try:
actor.model.load_weights("actormodel.h5")
critic.model.load_weights("criticmodel.h5")
actor.target_model.load_weights("actormodel.h5")
critic.target_model.load_weights("criticmodel.h5")
print("Weight load successfully")
except:
print("Cannot find the weight")
print("TORCS Experiment Start.")
for i in range(episode_count):
print("Episode : " + str(i) + " Replay Buffer " + str(buff.count()))
if np.mod(i, 7) == 0:
ob = env.reset(relaunch=True) #relaunch TORCS every 3 episode because of the memory leak error
else:
ob = env.reset()
# if vision == True:
# img = Image.fromarray(ob.img, 'RGB')
# print ob.img
# img.show()
# img.save('my.png')
# vision = False
s_t = np.hstack((ob.angle, ob.track, ob.trackPos, ob.speedX, ob.speedY, ob.speedZ, ob.wheelSpinVel/100.0, ob.rpm))
total_reward = 0.
for j in range(max_steps):
loss = 0
epsilon -= 1.0 / EXPLORE
a_t = np.zeros([1,action_dim])
noise_t = np.zeros([1,action_dim])
a_t_original = actor.model.predict(s_t.reshape(1, s_t.shape[0]))
noise_t[0][0] = train_indicator * max(epsilon, 0) * OU.function(a_t_original[0][0], 0.0 , 0.60, 0.3)
noise_t[0][1] = train_indicator * max(epsilon, 0) * OU.function(a_t_original[0][1], 0.5 , 1, 0.1)
noise_t[0][2] = train_indicator * max(epsilon, 0) * OU.function(a_t_original[0][2], -0.1 , 1.00, 0.05)
#The following code do the stochastic brake
if random.random() <= 0.1:
# print("********Now we apply the brake***********")
noise_t[0][2] = train_indicator * max(epsilon, 0) * OU.function(a_t_original[0][2], 0.2 , 1.00, 0.10)
a_t[0][0] = a_t_original[0][0] + noise_t[0][0]
a_t[0][1] = a_t_original[0][1] + noise_t[0][1]
a_t[0][2] = a_t_original[0][2] + noise_t[0][2]
ob, r_t, done, info = env.step(a_t[0])
s_t1 = np.hstack((ob.angle, ob.track, ob.trackPos, ob.speedX, ob.speedY, ob.speedZ, ob.wheelSpinVel/100.0, ob.rpm))
# print ob.trackPos
buff.add(s_t, a_t[0], r_t, s_t1, done) #Add replay buffer
if record == 1:
if images_i == images_size:
if record_time == 1:
start_time = timeit.default_timer()
for im in images:
im.save('data/out_' + str(images_save) + '.jpg', 'JPEG')
images_save+=1
images = []
images_i = 0
if record_time == 1:
print "saving: " + str(timeit.default_timer() - start_time)
else:
if record_time == 1:
start_time = timeit.default_timer()
images_i += 1
images_g += 1
images.append(ImageGrab.grab(bbox=(66,51,710,535)))
if record_time == 1:
print "grabbing: " + str(timeit.default_timer() - start_time)
else:
system('scrot -u data/screenshot'+str(images_g)+'.jpg')
images_g += 1
#Do the batch update
batch = buff.getBatch(BATCH_SIZE)
states = np.asarray([e[0] for e in batch])
actions = np.asarray([e[1] for e in batch])
rewards = np.asarray([e[2] for e in batch])
new_states = np.asarray([e[3] for e in batch])
dones = np.asarray([e[4] for e in batch])
y_t = np.asarray([e[1] for e in batch])
target_q_values = critic.target_model.predict([new_states, actor.target_model.predict(new_states)])
for k in range(len(batch)):
if dones[k]:
y_t[k] = rewards[k]
else:
y_t[k] = rewards[k] + GAMMA*target_q_values[k]
if (train_indicator):
loss += critic.model.train_on_batch([states,actions], y_t)
a_for_grad = actor.model.predict(states)
grads = critic.gradients(states, a_for_grad)
actor.train(states, grads)
actor.target_train()
critic.target_train()
total_reward += r_t
s_t = s_t1
if np.mod(step, 17) == 0:
print("Episode", i, "Step", step, "Action", a_t, "Reward", r_t, "Loss", loss)
step += 1
if done:
break
if True or np.mod(i, 3) == 0:
if (train_indicator):
print("Now we save model")
actor.model.save_weights("actormodel.h5", overwrite=True)
with open("actormodel.json", "w") as outfile:
json.dump(actor.model.to_json(), outfile)
critic.model.save_weights("criticmodel.h5", overwrite=True)
with open("criticmodel.json", "w") as outfile:
json.dump(critic.model.to_json(), outfile)
print("TOTAL REWARD @ " + str(i) +"-th Episode : Reward " + str(total_reward))
print("Total Step: " + str(step))
print("")
env.end() # This is for shutting down TORCS
print("Finish.")
if __name__ == "__main__":
playGame()
| [
"[email protected]"
] | |
f018f27fad6688e18eb4f7cc4e3a814455dc2612 | ec9a28aa745c0f507c16f3b9cc5223677ba15bd6 | /node_modules/karma/node_modules/chokidar/node_modules/fsevents/build/config.gypi | d17f7d61897144ef0586649f347ca804c29746d3 | [
"MIT"
] | permissive | jprystowsky/example-hyper-angular-project | aba56991640494cf9a36929c060ceca4fc64ef03 | a2cf2a4826410f327c29f5449e12fb8e1c1243fd | refs/heads/master | 2021-06-18T00:17:09.640371 | 2014-04-20T14:50:27 | 2014-04-20T14:50:27 | 18,119,542 | 0 | 3 | null | 2017-03-17T04:12:07 | 2014-03-25T22:55:55 | JavaScript | UTF-8 | Python | false | false | 3,098 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 1,
"host_arch": "x64",
"node_install_npm": "true",
"node_prefix": "",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"python": "/usr/bin/python",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "false",
"nodedir": "/Users/jprystowsky/.node-gyp/0.10.26",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"save_dev": "",
"browser": "",
"viewer": "man",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"shell": "/bin/bash",
"parseable": "",
"shrinkwrap": "true",
"email": "[email protected]",
"init_license": "ISC",
"cache_max": "null",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"registry": "https://registry.npmjs.org/",
"fetch_retries": "2",
"npat": "",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"cache_lock_retries": "10",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"proprietary_attribs": "true",
"json": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/jprystowsky/.npm-init.js",
"userconfig": "/Users/jprystowsky/.npmrc",
"node_version": "v0.10.26",
"user": "24561",
"editor": "vi",
"save": "",
"tag": "latest",
"global": "",
"username": "jprystowsky",
"optional": "true",
"bin_links": "true",
"force": "",
"searchopts": "",
"depth": "null",
"rebuild_bundle": "true",
"searchsort": "name",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"strict_ssl": "true",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"version": "",
"cache_min": "10",
"cache": "/Users/jprystowsky/.npm",
"searchexclude": "",
"color": "true",
"save_optional": "",
"ignore_scripts": "",
"user_agent": "node/v0.10.26 darwin x64",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"umask": "18",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/fl/3t03lmm57v3btrcdbznfgkcr0000gn/T/",
"unsafe_perm": "true",
"link": "",
"prefix": "/usr/local"
}
}
| [
"[email protected]"
] | |
3eea468ceb51c63b198faf657321babe1e09b22b | 852130b4772157b8f564ffeb9e1a6fb6c6f9b4b5 | /eth2/_utils/bls/backends/__init__.py | 013ac96997a324b6cb2eecbba0d32b55b9e9a49d | [
"MIT"
] | permissive | nanspro/trinity | 58d14e55e1fd1462b9ee3d09c214207dfa286f59 | 1f6b90a5fe2a25c93bd7d5db0f93367bbe33bced | refs/heads/master | 2020-06-20T16:38:24.596423 | 2019-07-02T13:41:20 | 2019-07-16T10:04:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 202 | py | from .chia import ChiaBackend
from .noop import NoOpBackend
from .py_ecc import PyECCBackend
DEFAULT_BACKEND = ChiaBackend
AVAILABLE_BACKENDS = (
ChiaBackend,
NoOpBackend,
PyECCBackend,
)
| [
"[email protected]"
] | |
8e335ed82f11764ef9b485e840b67c807e15af5f | 097eae4e0190da97570ae7db748fca306f977fbd | /py/learn/test/mysql/add.py | 82cd36d8cfc97e84de13a09dd3dd95bdc3f21291 | [] | no_license | jiaolj/other | 42257c593495d97ab98b9a9af00d3791ccce7a57 | 78d0366cbd599f4dde7bf6e44ca4cfc373132418 | refs/heads/master | 2021-05-24T04:14:03.829126 | 2016-08-28T07:40:49 | 2016-08-28T07:40:49 | 64,064,262 | 0 | 1 | null | 2020-07-23T17:05:36 | 2016-07-24T12:25:56 | JavaScript | UTF-8 | Python | false | false | 2,814 | py | #-*- coding:utf-8 -*-
from mysql_config import conn_151_DInsight
from JMysql import JMysql
import re
db=JMysql(conn_151_DInsight)
db.open()
def getjson():
# data={}
data=''
result=db.fetchall('select nm,el,nl from latitude_longitude')
for rst in result:
#data[rst[0]]=[rst[1],rst[2]]
data+="'"+rst[0]+"':["+str(rst[1])+","+str(rst[2])+"], \n"
return '{'+data+'}'
def get_format(dt,d,c):
dt=dt.replace(' ',' ')
rl=dt.split(' ')
nm=rl[0]
p=nm
el=float(rl[1].replace(':','.'))
nl=float(rl[2].replace(':','.'))
print d,c,p,nm,el,nl
#return
argument=[d,c,p,nm,el,nl]
ret=db.fetchone('select id from latitude_longitude where nm=%s',[nm])
if not ret:
db.update('insert into latitude_longitude(d,c,p,nm,el,nl) values(%s,%s,%s,%s,%s,%s)', argument)
def get_format2(dt,d):
dt=re.sub(' ','~',dt)
dt=re.sub('[~]+','~',dt)
dt=re.sub(' ','~',dt)
dt=dt.replace(' ','').replace(' ','')
rl=dt.split('~')
print dt
c=rl[0]
nm=rl[1]
p=nm
el=float(rl[2].replace(':','.').replace('东经','').replace('西经','-'))
nl=float(rl[3].replace(':','.').replace('北纬','').replace('南纬','-'))
print d,c,p,nm,el,nl
#return
argument=[d,c,p,nm,el,nl]
ret=db.fetchone('select id from latitude_longitude where nm=%s',[nm])
if not ret:
db.update('insert into latitude_longitude(d,c,p,nm,el,nl) values(%s,%s,%s,%s,%s,%s)', argument)
#添加中国各地区
files=open('1.txt','r')
n=0
for r in files:
n+=1
'''if n>3 and n<630:
rl=r.split(' ')
if len(rl)==4:
p=rl[0]
nm=rl[1]
nl=float(rl[2].replace('北纬',''))
el=float(rl[3].replace('东经',''))
argument=['亚洲','中国',p,nm,el,nl]
ret=db.fetchone('select id from latitude_longitude where nm=%s',[nm])
if not ret:
db.update('insert into latitude_longitude(d,c,p,nm,el,nl) values(%s,%s,%s,%s,%s,%s)', argument)
if n==682:get_format(r,'亚洲','朝鲜')
if n>=684 and n<689:get_format(r,'亚洲','韩国')
if n>=690 and n<700:get_format(r,'亚洲','日本')
if n>=702 and n<713:get_format2(r,'亚洲')
if n>=718 and n<725:get_format2(r,'亚洲')
if n>=726 and n<737:get_format2(r,'亚洲')
if n>=739 and n<794:get_format2(r,'欧洲')
if n>=796 and n<823:get_format2(r,'美洲')
if n>=825 and n<853:get_format2(r,'美洲')
if n>=855 and n<889:get_format2(r,'非洲')
if n>=891 and n<898:get_format2(r,'大洋洲')
if n>=900 and n<915:get_format2(r,'欧洲')
if n>=917 and n<948:get_format2(r,'亚洲')
if n>=949 and n<1004:get_format2(r,'欧洲')
'''
files.close()
print getjson()
db.close() | [
"[email protected]"
] | |
4ff33526a5296d3ee163149c749ff5b664b8a69c | 373ad14b1df083915b83d9ef72648206df23d48c | /countries/countries.py | db1c79ef5f4b76adf22be3179a5537c604b5433b | [] | no_license | Zadigo/my_python_codes | 684f2788175f555a90d51566f4c07d3a3a505419 | 994943e7e7dc82c1873f1cf3e2dcb13f367faf84 | refs/heads/master | 2021-05-26T06:00:17.040723 | 2020-05-26T18:43:33 | 2020-05-26T18:43:33 | 127,773,216 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 9,417 | py | COUNTRIES = (
('afghanistan', 'afghanistan'),
('åland islands', 'åland islands'),
('albania', 'albania'),
('algeria', 'algeria'),
('american samoa', 'american samoa'),
('andorra', 'andorra'),
('angola', 'angola'),
('anguilla', 'anguilla'),
('antarctica', 'antarctica'),
('antigua and barbuda', 'antigua and barbuda'),
('argentina', 'argentina'),
('armenia', 'armenia'),
('aruba', 'aruba'),
('australia', 'australia'),
('austria', 'austria'),
('azerbaijan', 'azerbaijan'),
('bahamas', 'bahamas'),
('bahrain', 'bahrain'),
('bangladesh', 'bangladesh'),
('barbados', 'barbados'),
('belarus', 'belarus'),
('belgium', 'belgium'),
('belize', 'belize'),
('benin', 'benin'),
('bermuda', 'bermuda'),
('bhutan', 'bhutan'),
('bolivia (plurinational state of)', 'bolivia (plurinational state of)'),
('bonaire, sint eustatius and saba', 'bonaire, sint eustatius and saba'),
('bosnia and herzegovina', 'bosnia and herzegovina'),
('botswana', 'botswana'),
('bouvet island', 'bouvet island'),
('brazil', 'brazil'),
('british indian ocean territory', 'british indian ocean territory'),
('united states minor outlying islands', 'united states minor outlying islands'),
('virgin islands (british)', 'virgin islands (british)'),
('virgin islands (u.s.)', 'virgin islands (u.s.)'),
('brunei darussalam', 'brunei darussalam'),
('bulgaria', 'bulgaria'),
('burkina faso', 'burkina faso'),
('burundi', 'burundi'),
('cambodia', 'cambodia'),
('cameroon', 'cameroon'),
('canada', 'canada'),
('caboverde', 'cabo verde'),
('cayman islands', 'cayman islands'),
('central african republic', 'central african republic'),
('chad', 'chad'),
('chile', 'chile'),
('china', 'china'),
('christmas island', 'christmas island'),
('cocos (keeling) islands', 'cocos (keeling) islands'),
('colombia', 'colombia'),
('comoros', 'comoros'),
('congo', 'congo'),
('congo (democratic republic of the)', 'congo (democratic republic of the)'),
('cook islands', 'cook islands'),
('costa rica', 'costa rica'),
('croatia', 'croatia'),
('cuba', 'cuba'),
('curaçao', 'curaçao'),
('cyprus', 'cyprus'),
('czech republic', 'czech republic'),
('denmark', 'denmark'),
('djibouti', 'djibouti'),
('dominica', 'dominica'),
('dominican republic', 'dominican republic'),
('ecuador', 'ecuador'),
('egypt', 'egypt'),
('el salvador', 'el salvador'),
('equatorial guinea', 'equatorial guinea'),
('eritrea', 'eritrea'),
('estonia', 'estonia'),
('ethiopia', 'ethiopia'),
('falkland islands (malvinas)', 'falkland islands (malvinas)'),
('faroe islands', 'faroe islands'),
('fiji', 'fiji'),
('finland', 'finland'),
('france', 'france'),
('french guiana', 'french guiana'),
('french polynesia', 'french polynesia'),
('french southern territories', 'french southern territories'),
('gabon', 'gabon'),
('gambia', 'gambia'),
('georgia', 'georgia'),
('germany', 'germany'),
('ghana', 'ghana'),
('gibraltar', 'gibraltar'),
('greece', 'greece'),
('greenland', 'greenland'),
('grenada', 'grenada'),
('guadeloupe', 'guadeloupe'),
('guam', 'guam'),
('guatemala', 'guatemala'),
('guernsey', 'guernsey'),
('guinea', 'guinea'),
('guinea-bissau', 'guinea-bissau'),
('guyana', 'guyana'),
('haiti', 'haiti'),
('heard island and mcdonald islands', 'heard island and mcdonald islands'),
('holy see', 'holy see'), ('honduras', 'honduras'),
('hong kong', 'hong kong'),
('hungary', 'hungary'),
('iceland', 'iceland'),
('india', 'india'), ('indonesia', 'indonesia'),
("côte d'ivoire", "côte d'ivoire"),
('iran (islamic republic of)', 'iran (islamic republic of)'),
('iraq', 'iraq'),
('ireland', 'ireland'),
('isle of man', 'isle of man'),
('israel', 'israel'),
('italy', 'italy'),
('jamaica', 'jamaica'),
('japan', 'japan'),
('jersey', 'jersey'), ('jordan', 'jordan'),
('kazakhstan', 'kazakhstan'),
('kenya', 'kenya'),
('kiribati', 'kiribati'),
('kuwait', 'kuwait'),
('kyrgyzstan', 'kyrgyzstan'),
("lao people's democratic republic", "lao people's democratic republic"),
('latvia', 'latvia'),
('lebanon', 'lebanon'),
('lesotho', 'lesotho'),
('liberia', 'liberia'),
('libya', 'libya'),
('liechtenstein', 'liechtenstein'),
('lithuania', 'lithuania'),
('luxembourg', 'luxembourg'),
('macao', 'macao'),
('macedonia (the former yugoslav republic of)', 'macedonia (the former yugoslav republic of)'),
('madagascar', 'madagascar'),
('malawi', 'malawi'),
('malaysia', 'malaysia'),
('maldives', 'maldives'),
('mali', 'mali'),
('malta', 'malta'),
('marshall islands', 'marshall islands'),
('martinique', 'martinique'),
('mauritania', 'mauritania'),
('mauritius', 'mauritius'),
('mayotte', 'mayotte'),
('mexico', 'mexico'),
('micronesia (federated states of)', 'micronesia (federated states of)'),
('moldova (republic of)', 'moldova (republic of)'),
('monaco', 'monaco'),
('mongolia', 'mongolia'),
('montenegro', 'montenegro'),
('montserrat', 'montserrat'),
('morocco', 'morocco'),
('mozambique', 'mozambique'),
('myanmar', 'myanmar'),
('namibia', 'namibia'),
('nauru', 'nauru'),
('nepal', 'nepal'),
('netherlands', 'netherlands'),
('new caledonia', 'new caledonia'),
('new zealand', 'new zealand'),
('nicaragua', 'nicaragua'),
('niger', 'niger'),
('nigeria', 'nigeria'),
('niue', 'niue'),
('norfolk island', 'norfolk island'),
("korea (democratic people's republic of)", "korea (democratic people's republic of)"),
('northern mariana islands', 'northern mariana islands'),
('norway', 'norway'),
('oman', 'oman'),
('pakistan', 'pakistan'),
('palau', 'palau'),
('palestine, state of', 'palestine, state of'),
('panama', 'panama'),
('papua new guinea', 'papua new guinea'),
('paraguay', 'paraguay'),
('peru', 'peru'),
('philippines', 'philippines'),
('pitcairn', 'pitcairn'),
('poland', 'poland'),
('portugal', 'portugal'),
('puerto rico', 'puerto rico'),
('qatar', 'qatar'),
('republic of kosovo', 'republic of kosovo'),
('réunion', 'réunion'), ('romania', 'romania'),
('russian federation', 'russian federation'),
('rwanda', 'rwanda'),
('saint barthélemy', 'saint barthélemy'),
('saint helena, ascension and tristan da cunha', 'saint helena, ascension and tristan da cunha'),
('saint kitts and nevis', 'saint kitts and nevis'),
('saint lucia', 'saint lucia'),
('saint martin (french part)', 'saint martin (french part)'),
('saint pierre and miquelon', 'saint pierre and miquelon'),
('saint vincent and the grenadines', 'saint vincent and the grenadines'),
('samoa', 'samoa'),
('san marino', 'san marino'),
('sao tome and principe', 'sao tome and principe'),
('saudi arabia', 'saudi arabia'),
('senegal', 'senegal'),
('serbia', 'serbia'),
('seychelles', 'seychelles'),
('sierra leone', 'sierra leone'),
('singapore', 'singapore'),
('sint maarten (dutch part)', 'sint maarten (dutch part)'),
('slovakia', 'slovakia'),
('slovenia', 'slovenia'),
('solomon islands', 'solomon islands'),
('somalia', 'somalia'),
('south africa', 'south africa'),
('south georgia and the south sandwich islands', 'south georgia and the south sandwich islands'),
('korea (republic of)', 'korea (republic of)'),
('south sudan', 'south sudan'),
('spain', 'spain'),
('sri lanka', 'sri lanka'),
('sudan', 'sudan'),
('suriname', 'suriname'),
('svalbard and jan mayen', 'svalbard and jan mayen'),
('swaziland', 'swaziland'),
('sweden', 'sweden'),
('switzerland', 'switzerland'),
('syrian arab republic', 'syrian arab republic'),
('taiwan', 'taiwan'),
('tajikistan', 'tajikistan'),
('tanzania, united republic of', 'tanzania, united republic of'),
('thailand', 'thailand'),
('timor-leste', 'timor-leste'),
('togo', 'togo'),
('tokelau', 'tokelau'),
('tonga', 'tonga'),
('trinidad and tobago', 'trinidad and tobago'),
('tunisia', 'tunisia'),
('turkey', 'turkey'),
('turkmenistan', 'turkmenistan'),
('turks and caicos islands', 'turks and caicos islands'),
('tuvalu', 'tuvalu'),
('uganda', 'uganda'),
('ukraine', 'ukraine'),
('united arab emirates', 'united arab emirates'),
('united kingdom of great britain and northern ireland', 'united kingdom of great britain and northern ireland'),
('united states of america', 'united states of america'),
('uruguay', 'uruguay'),
('uzbekistan', 'uzbekistan'),
('vanuatu', 'vanuatu'),
('venezuela (bolivarian republic of)', 'venezuela (bolivarian republic of)'),
('viet nam', 'viet nam'), ('wallis and futuna', 'wallis and futuna'),
('western sahara', 'western sahara'),
('yemen', 'yemen'),
('zambia', 'zambia'),
('zimbabwe', 'zimbabwe'),
)
import csv
with open('countries.csv', 'w', newline='', encoding='utf-8') as f:
c = csv.writer(f)
for country in COUNTRIES:
c.writerow(country) | [
"[email protected]"
] | |
dfe69f776d3890c30073a3cdc73f2020dcb0e00d | 85618742f1bded79cdbe0615d2096351a3b87c03 | /prediction2.py | 6a4e90d071fdd483b4f8544b3f4c543eeea579b0 | [
"MIT"
] | permissive | ferdyandannes/Monocular-3D-Object-Detection | ea66aea34ae2987c60d023e722a0c0c9f258a70b | 85c424ce0ab386da9b30629819d63f7ec888c9c1 | refs/heads/master | 2022-11-20T12:30:40.826235 | 2020-07-21T05:19:19 | 2020-07-21T05:19:19 | 272,321,968 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,218 | py | ###############
import sys
print(sys.path)
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
print(sys.path)
import cv2
##############
import os
import argparse
import numpy as np
from utils.read_dir import ReadDir
from data_processing.KITTI_dataloader import KITTILoader
from utils.correspondece_constraint import *
import time
import math
from config import config as cfg
if cfg().network == 'vgg16':
from model import vgg16 as nn
if cfg().network == 'mobilenet_v2':
from model import mobilenet_v2_early_element as nn
if cfg().network == 'vgg16v2':
from model import vgg16v2 as nn
if cfg().network == 'vgg16_one':
from model import vgg16_minggu_pre as nn
def predict(args):
# complie models
model = nn.network()
# model.load_weights('3dbox_weights_1st.hdf5')
model.load_weights(args.w)
# KITTI_train_gen = KITTILoader(subset='training')
dims_avg, _ =KITTILoader(subset='training').get_average_dimension()
# list all the validation images
if args.a == 'training':
all_imgs = sorted(os.listdir(test_image_dir))
val_index = int(len(all_imgs)* cfg().split)
val_imgs = all_imgs[val_index:]
else:
val_imgs = sorted(os.listdir(test_image_dir))
start_time = time.time()
for i in val_imgs:
image_file = test_image_dir + i
depth_file = test_depth_dir + i
label_file = test_label_dir + i.replace('png', 'txt')
prediction_file = prediction_path + i.replace('png', 'txt')
calibration_file = test_calib_path + i.replace('png', 'txt')
#calibration_file = os.path.join('/media/ferdyan/NewDisk/Trajectory_Final/bbox_3d/0000.txt')
# write the prediction file
with open(prediction_file, 'w') as predict:
img = cv2.imread(image_file)
img = np.array(img, dtype='float32')
dpth = cv2.imread(depth_file)
dpth = np.array(dpth, dtype='float32')
P2 = np.array([])
for line in open(calibration_file):
if 'P2' in line:
P2 = line.split(' ')
P2 = np.asarray([float(i) for i in P2[1:]])
P2 = np.reshape(P2, (3,4))
for line in open(label_file):
line = line.strip().split(' ')
#print("line = ", line)
obj = detectionInfo(line)
xmin = int(obj.xmin)
xmax = int(obj.xmax)
ymin = int(obj.ymin)
ymax = int(obj.ymax)
if obj.name in cfg().KITTI_cat:
# cropped 2d bounding box
if xmin == xmax or ymin == ymax:
continue
# 2D detection area RGB image
patch = img[ymin : ymax, xmin : xmax]
patch = cv2.resize(patch, (cfg().norm_h, cfg().norm_w))
patch -= np.array([[[103.939, 116.779, 123.68]]])
# extend it to match the training dimension
patch = np.expand_dims(patch, 0)
# 2D detection area depth map
#patch_d = dpth[ymin : ymax, xmin : xmax]
#patch_d = cv2.resize(patch_d, (cfg().norm_h, cfg().norm_w))
#patch_d -= np.array([[[103.939, 116.779, 123.68]]])
# extend it to match the training dimension
#patch_d = np.expand_dims(patch_d, 0)
# one
prediction = model.predict([patch])
# two
#prediction = model.predict([patch, patch_d])
dim = prediction[0][0]
bin_anchor = prediction[1][0]
bin_confidence = prediction[2][0]
# update with predict dimension
dims = dims_avg[obj.name] + dim
obj.h, obj.w, obj.l = np.array([round(dim, 2) for dim in dims])
# update with predicted alpha, [-pi, pi]
obj.alpha = recover_angle(bin_anchor, bin_confidence, cfg().bin)
if math.isnan(obj.alpha) :
continue
# compute global and local orientation
obj.rot_global = obj.alpha +np.arctan(float(obj.tx) / float(obj.tz))
# compute and update translation, (x, y, z)
#obj.tx, obj.ty, obj.tz = translation_constraints(P2, obj, rot_local)
# output prediction label
output_line = obj.member_to_list()
output_line.append(1.0)
# Write regressed 3D dim and orientation to file
output_line = ' '.join([str(item) for item in output_line]) + '\n'
# output_organized = format(obj.name) + ' ' + format(obj.truncation) + ' ' + format(obj.occlusion) + ' ' + format(obj.alpha)
# + ' ' + format(obj.xmin) + ' ' + format(obj.ymin) + ' ' + format(obj.xmax) + ' ' + format(obj.ymax) + ' ' + format(obj.h)
# + ' ' + format(obj.w) + ' ' + format(obj.l) + ' ' + format(obj.tx) + ' ' + format(obj.ty) + ' ' + format(obj.tz) + ' ' + format(obj.rot_global)
output_organized = str(obj.name) + ' ' + str(obj.truncation) + ' ' + str(obj.occlusion) + ' ' + str(obj.alpha) + ' ' + str(obj.xmin) + ' ' + str(obj.ymin) + ' ' + str(obj.xmax) + ' ' + str(obj.ymax) + ' ' + str(obj.h) + ' ' + str(obj.w) + ' ' + str(obj.l) + ' ' + str(obj.tx) + ' ' + str(obj.ty) + ' ' + str(obj.tz) + ' ' + str(obj.rot_global) + ' 0.7' + '\n'
predict.write(output_organized)
print('Write predicted labels for: ' + str(i))
print("")
print("")
end_time = time.time()
process_time = (end_time - start_time) / len(val_imgs)
print(process_time)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Arguments for prediction',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# parser.add_argument('-d', '-dir', type=str, default='/media/ferdyan/NewDisk/3d_bbox_depth/train_dataset/', help='File to predict')
parser.add_argument('-d', '-dir', type=str, default='/media/ferdyan/NewDisk/3d_bounding_box/train_dataset/', help='File to predict')
parser.add_argument('-a', '-dataset', type=str, default='tracklet', help='training dataset or tracklet')
parser.add_argument('-w', '-weight', type=str, default='/media/ferdyan/NewDisk/3d_bbox_depth/Weights_train/weight_7Okt_Sore/model00000125.hdf5', help ='Load trained weights')
args = parser.parse_args()
# Todo: subset = 'training' or 'tracklet'
dir = ReadDir(args.d, subset=args.a,
tracklet_date='2011_09_26', tracklet_file='own')
test_label_dir = dir.label_dir
test_image_dir = dir.image_dir
test_depth_dir = dir.depth_dir
test_calib_path = dir.calib_dir
prediction_path = dir.prediction_dir
if not os.path.exists(prediction_path):
os.mkdir(prediction_path)
predict(args) | [
"[email protected]"
] | |
8ead6ef11a5ecfe08408957453a05ceaf07011eb | e7c3d2b1fd7702b950e31beed752dd5db2d127bd | /code/a_polynomial_modulo_the_square_of_a_prime/sol_457.py | 313cf886ddcfbb07ff41548f050bf154569ba645 | [
"Apache-2.0"
] | permissive | Ved005/project-euler-solutions | bbadfc681f5ba4b5de7809c60eb313897d27acfd | 56bf6a282730ed4b9b875fa081cf4509d9939d98 | refs/heads/master | 2021-09-25T08:58:32.797677 | 2018-10-20T05:40:58 | 2018-10-20T05:40:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 648 | py |
# -*- coding: utf-8 -*-
'''
File name: code\a_polynomial_modulo_the_square_of_a_prime\sol_457.py
Author: Vaidic Joshi
Date created: Oct 20, 2018
Python Version: 3.x
'''
# Solution to Project Euler Problem #457 :: A polynomial modulo the square of a prime
#
# For more information see:
# https://projecteuler.net/problem=457
# Problem Statement
'''
Let f(n) = n2 - 3n - 1.
Let p be a prime.
Let R(p) be the smallest positive integer n such that f(n) mod p2 = 0 if such an integer n exists, otherwise R(p) = 0.
Let SR(L) be ∑R(p) for all primes not exceeding L.
Find SR(107).
'''
# Solution
# Solution Approach
'''
'''
| [
"[email protected]"
] | |
cc1c7971c80045e851cb8087415cf80b29690b3a | 77a2199488e00a2d71ce92bfc2cb1e5d1067ee3f | /api/dorest/dorest/libs/sh/verbose.py | 5fef8b1dfb4e8d29a20b80d98b47bc294edcba6e | [
"BSD-3-Clause"
] | permissive | ichise-lab/uwkgm | 541314967c3161af40338b3e8c9ed61d8fcb95db | 7f6545c614848b73950b66406df8787f361f317f | refs/heads/master | 2023-04-27T07:49:11.679307 | 2022-12-08T04:16:04 | 2022-12-08T04:16:04 | 249,624,364 | 1 | 1 | BSD-3-Clause | 2023-04-21T20:49:20 | 2020-03-24T05:51:09 | Python | UTF-8 | Python | false | false | 1,920 | py | """Prints styled messages to command-line interface
Usage example:
---
from dorest import verbose
verbose.info('Some information')
verbose.info.append('Second line of the information')
---
The Dorest project
:copyright: (c) 2020 Ichise Laboratory at NII & AIST
:author: Rungsiman Nararatwong
"""
import os
import sys
import datetime
from dorest.libs.sh import styles
_verbose = '--verbose' in sys.argv or os.environ.get('DOREST_VERBOSE')
class _Say:
def __init__(self, category: str, style: callable = None):
def no_style(string: str) -> str:
return string
self.category = category
self.style = style or no_style
def __call__(self, message: str, caller: callable = None) -> None:
if _verbose:
now = datetime.datetime.now()
sys.stdout.write(self.style(now.isoformat().replace('T', ' ')[:19].ljust(21)))
sys.stdout.write(self.style('[ %s ] ' % self.category.ljust(9)))
if caller is not None:
sys.stdout.write(self.style('%s.%s\n' % (caller.__module__, caller.__name__)))
sys.stdout.write(self.style(': %s\n' % message))
sys.stdout.flush()
class _Append:
def __init__(self, style: callable = None):
def no_style(string: str) -> str:
return string
self.style = style or no_style
def __call__(self, message):
sys.stdout.write(self.style(': %s\n' % message))
sys.stdout.flush()
def verbose(on=True):
global _verbose
_verbose = on
info = _Say('INFO')
highlight = _Say('IMPORTANT', styles.HIGHLIGHT)
success = _Say('SUCCESS', styles.SUCCESS)
warn = _Say('WARNING', styles.WARNING)
error = _Say('ERROR', styles.ERROR)
info.append = _Append()
highlight.append = _Append(styles.HIGHLIGHT)
success.append = _Append(styles.SUCCESS)
warn.append = _Append(styles.WARNING)
error.append = _Append(styles.ERROR)
| [
"[email protected]"
] | |
219d33b94d7b21f8bc57897e1a2bbca5077e7b24 | f5424dacc1caf649fb02585656839cf70dfb30ff | /docs/conf.py | a565d89046e684d71509fc46d314f5c7372d8c68 | [
"Apache-2.0"
] | permissive | fgregg/civic-scraper | 81fc8f889b00677c3af50b27197770c3661ca958 | aaf5a76b53d19a23beb386a19bc30dd1b0e0eef9 | refs/heads/master | 2023-08-18T06:27:57.986155 | 2021-01-14T19:05:47 | 2021-01-14T19:05:47 | 419,768,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,748 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# civic-scraper documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import civic_scraper
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
"sphinx_rtd_theme",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'civic-scraper'
copyright = u"2020, Big Local News"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = civic_scraper.__version__
# The full version, including alpha/beta/rc tags.
release = civic_scraper.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'collapse_navigation': True,
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
]
}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'civic-scraperdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'civic-scraper.tex',
u'civic-scraper Documentation',
u'Serdar Tumgoren', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'civic-scraper',
u'civic-scraper Documentation',
[u'Serdar Tumgoren'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'civic-scraper',
u'civic-scraper Documentation',
u'Serdar Tumgoren',
'civic-scraper',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| [
"[email protected]"
] | |
dede89d1db066c42ca2fbc3e50fffb709bdb23eb | 5170bc564149192634defc547a0bfb156e4cf455 | /calc_functions.py | fdfd2ada5f075e7817f50d037888eaab9a112100 | [] | no_license | sicknick21/cmpt120siconolfi | 2e3c966de4aee61da9845dbc927d8843535f6a6c | 9342c19c669d8f87adffbc8f3fcbe1a203d20d0d | refs/heads/master | 2020-03-27T16:45:03.671565 | 2018-12-14T22:41:36 | 2018-12-14T22:41:36 | 146,791,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,102 | py | #calc_functions.py
#This program contains the functions for the calculator
from tkinter import *
def __init__(self):
self.total = 0
self.current = ""
self.new_num = True
self.op_pending = False
self.op = ""
self.eq = False
def num_press(self, num):
self.eq = False
temp = text_box.get()
temp2 = str(num)
if self.new_num:
self.current = n=temp2
self.new_num = False
else:
if temp2 == '.':
if temp2 in temp:
return
self.current = temp + temp2
self.display(self.current)
#Finding the outcome of the sequence
def calc_total(self):
self.eq = True
self.current = float(self.current)
if self.op_pending == True:
self.do_sum()
else:
self.total = float(text_box.get())
def display(self, value):
text_box.delete(0, END)
text_box.insert(0, value)
#Defining the operations for the calculator
def do_sum(self):
if self.op == "add":
self.total += self.current
if self.op == "subtract":
self.total -= self.current
if self.op == "multiply":
self.total *= self.current
if self.op == "divide":
self.total /= self.current
self.new_num = True
self.op_pending = False
self.display(self.total)
def operation(self, op):
self.current = float(self.current)
if self.op_pending:
self.do_sum()
elif not self.eq:
self.total = self.current
self.new_num = True
self.op_pending = True
self.op = op
self.eq = False
def cancel(self):
self.eq = False
self.current = "0"
self.display(0)
self.new_num = True
def all_cancel(self):
self.cancel()
self.total = 0
def sign(self):
self.eq = False
self.current = -(float(text_box.get()))
self.display(self.current)
main()
| [
"[email protected]"
] | |
721842d40ead0bfe7c4b860cfb2cdfab9cf35cad | 1dacbf90eeb384455ab84a8cf63d16e2c9680a90 | /lib/python2.7/site-packages/statsmodels/sandbox/tsa/example_arma.py | d2375c5c976580c51b30f78fcf12abc5d4e37199 | [
"Python-2.0",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] | permissive | wangyum/Anaconda | ac7229b21815dd92b0bd1c8b7ec4e85c013b8994 | 2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6 | refs/heads/master | 2022-10-21T15:14:23.464126 | 2022-10-05T12:10:31 | 2022-10-05T12:10:31 | 76,526,728 | 11 | 10 | Apache-2.0 | 2022-10-05T12:10:32 | 2016-12-15T05:26:12 | Python | UTF-8 | Python | false | false | 11,572 | py | '''trying to verify theoretical acf of arma
explicit functions for autocovariance functions of ARIMA(1,1), MA(1), MA(2)
plus 3 functions from nitime.utils
'''
from __future__ import print_function
from statsmodels.compat.python import range
import numpy as np
from numpy.testing import assert_array_almost_equal
import matplotlib.mlab as mlab
from statsmodels.tsa.arima_process import arma_generate_sample, arma_impulse_response
from statsmodels.tsa.arima_process import arma_acovf, arma_acf, ARIMA
#from movstat import acf, acovf
#from statsmodels.sandbox.tsa import acf, acovf, pacf
from statsmodels.tsa.stattools import acf, acovf, pacf
ar = [1., -0.6]
#ar = [1., 0.]
ma = [1., 0.4]
#ma = [1., 0.4, 0.6]
#ma = [1., 0.]
mod = ''#'ma2'
x = arma_generate_sample(ar, ma, 5000)
x_acf = acf(x)[:10]
x_ir = arma_impulse_response(ar, ma)
#print x_acf[:10]
#print x_ir[:10]
#irc2 = np.correlate(x_ir,x_ir,'full')[len(x_ir)-1:]
#print irc2[:10]
#print irc2[:10]/irc2[0]
#print irc2[:10-1] / irc2[1:10]
#print x_acf[:10-1] / x_acf[1:10]
# detrend helper from matplotlib.mlab
def detrend(x, key=None):
if key is None or key=='constant':
return detrend_mean(x)
elif key=='linear':
return detrend_linear(x)
def demean(x, axis=0):
"Return x minus its mean along the specified axis"
x = np.asarray(x)
if axis:
ind = [slice(None)] * axis
ind.append(np.newaxis)
return x - x.mean(axis)[ind]
return x - x.mean(axis)
def detrend_mean(x):
"Return x minus the mean(x)"
return x - x.mean()
def detrend_none(x):
"Return x: no detrending"
return x
def detrend_linear(y):
"Return y minus best fit line; 'linear' detrending "
# This is faster than an algorithm based on linalg.lstsq.
x = np.arange(len(y), dtype=np.float_)
C = np.cov(x, y, bias=1)
b = C[0,1]/C[0,0]
a = y.mean() - b*x.mean()
return y - (b*x + a)
def acovf_explicit(ar, ma, nobs):
'''add correlation of MA representation explicitely
'''
ir = arma_impulse_response(ar, ma)
acovfexpl = [np.dot(ir[:nobs-t], ir[t:nobs]) for t in range(10)]
return acovfexpl
def acovf_arma11(ar, ma):
# ARMA(1,1)
# Florens et al page 278
# wrong result ?
# new calculation bigJudge p 311, now the same
a = -ar[1]
b = ma[1]
#rho = [1.]
#rho.append((1-a*b)*(a-b)/(1.+a**2-2*a*b))
rho = [(1.+b**2+2*a*b)/(1.-a**2)]
rho.append((1+a*b)*(a+b)/(1.-a**2))
for _ in range(8):
last = rho[-1]
rho.append(a*last)
return np.array(rho)
# print acf11[:10]
# print acf11[:10] /acf11[0]
def acovf_ma2(ma):
# MA(2)
# from Greene p616 (with typo), Florens p280
b1 = -ma[1]
b2 = -ma[2]
rho = np.zeros(10)
rho[0] = (1 + b1**2 + b2**2)
rho[1] = (-b1 + b1*b2)
rho[2] = -b2
return rho
# rho2 = rho/rho[0]
# print rho2
# print irc2[:10]/irc2[0]
def acovf_ma1(ma):
# MA(1)
# from Greene p616 (with typo), Florens p280
b = -ma[1]
rho = np.zeros(10)
rho[0] = (1 + b**2)
rho[1] = -b
return rho
# rho2 = rho/rho[0]
# print rho2
# print irc2[:10]/irc2[0]
ar1 = [1., -0.8]
ar0 = [1., 0.]
ma1 = [1., 0.4]
ma2 = [1., 0.4, 0.6]
ma0 = [1., 0.]
comparefn = dict(
[('ma1', acovf_ma1),
('ma2', acovf_ma2),
('arma11', acovf_arma11),
('ar1', acovf_arma11)])
cases = [('ma1', (ar0, ma1)),
('ma2', (ar0, ma2)),
('arma11', (ar1, ma1)),
('ar1', (ar1, ma0))]
for c, args in cases:
ar, ma = args
print('')
print(c, ar, ma)
myacovf = arma_acovf(ar, ma, nobs=10)
myacf = arma_acf(ar, ma, nobs=10)
if c[:2]=='ma':
othacovf = comparefn[c](ma)
else:
othacovf = comparefn[c](ar, ma)
print(myacovf[:5])
print(othacovf[:5])
#something broke again,
#for high persistence case eg ar=0.99, nobs of IR has to be large
#made changes to arma_acovf
assert_array_almost_equal(myacovf, othacovf,10)
assert_array_almost_equal(myacf, othacovf/othacovf[0],10)
#from nitime.utils
def ar_generator(N=512, sigma=1.):
# this generates a signal u(n) = a1*u(n-1) + a2*u(n-2) + ... + v(n)
# where v(n) is a stationary stochastic process with zero mean
# and variance = sigma
# this sequence is shown to be estimated well by an order 8 AR system
taps = np.array([2.7607, -3.8106, 2.6535, -0.9238])
v = np.random.normal(size=N, scale=sigma**0.5)
u = np.zeros(N)
P = len(taps)
for l in range(P):
u[l] = v[l] + np.dot(u[:l][::-1], taps[:l])
for l in range(P,N):
u[l] = v[l] + np.dot(u[l-P:l][::-1], taps)
return u, v, taps
#JP: small differences to using np.correlate, because assumes mean(s)=0
# denominator is N, not N-k, biased estimator
# misnomer: (biased) autocovariance not autocorrelation
#from nitime.utils
def autocorr(s, axis=-1):
"""Returns the autocorrelation of signal s at all lags. Adheres to the
definition r(k) = E{s(n)s*(n-k)} where E{} is the expectation operator.
"""
N = s.shape[axis]
S = np.fft.fft(s, n=2*N-1, axis=axis)
sxx = np.fft.ifft(S*S.conjugate(), axis=axis).real[:N]
return sxx/N
#JP: with valid this returns a single value, if x and y have same length
# e.g. norm_corr(x, x)
# using std subtracts mean, but correlate doesn't, requires means are exactly 0
# biased, no n-k correction for laglength
#from nitime.utils
def norm_corr(x,y,mode = 'valid'):
"""Returns the correlation between two ndarrays, by calling np.correlate in
'same' mode and normalizing the result by the std of the arrays and by
their lengths. This results in a correlation = 1 for an auto-correlation"""
return ( np.correlate(x,y,mode) /
(np.std(x)*np.std(y)*(x.shape[-1])) )
# from matplotlib axes.py
# note: self is axis
def pltacorr(self, x, **kwargs):
"""
call signature::
acorr(x, normed=True, detrend=detrend_none, usevlines=True,
maxlags=10, **kwargs)
Plot the autocorrelation of *x*. If *normed* = *True*,
normalize the data by the autocorrelation at 0-th lag. *x* is
detrended by the *detrend* callable (default no normalization).
Data are plotted as ``plot(lags, c, **kwargs)``
Return value is a tuple (*lags*, *c*, *line*) where:
- *lags* are a length 2*maxlags+1 lag vector
- *c* is the 2*maxlags+1 auto correlation vector
- *line* is a :class:`~matplotlib.lines.Line2D` instance
returned by :meth:`plot`
The default *linestyle* is None and the default *marker* is
``'o'``, though these can be overridden with keyword args.
The cross correlation is performed with
:func:`numpy.correlate` with *mode* = 2.
If *usevlines* is *True*, :meth:`~matplotlib.axes.Axes.vlines`
rather than :meth:`~matplotlib.axes.Axes.plot` is used to draw
vertical lines from the origin to the acorr. Otherwise, the
plot style is determined by the kwargs, which are
:class:`~matplotlib.lines.Line2D` properties.
*maxlags* is a positive integer detailing the number of lags
to show. The default value of *None* will return all
:math:`2 \mathrm{len}(x) - 1` lags.
The return value is a tuple (*lags*, *c*, *linecol*, *b*)
where
- *linecol* is the
:class:`~matplotlib.collections.LineCollection`
- *b* is the *x*-axis.
.. seealso::
:meth:`~matplotlib.axes.Axes.plot` or
:meth:`~matplotlib.axes.Axes.vlines`
For documentation on valid kwargs.
**Example:**
:func:`~matplotlib.pyplot.xcorr` above, and
:func:`~matplotlib.pyplot.acorr` below.
**Example:**
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py
"""
return self.xcorr(x, x, **kwargs)
def pltxcorr(self, x, y, normed=True, detrend=detrend_none,
usevlines=True, maxlags=10, **kwargs):
"""
call signature::
def xcorr(self, x, y, normed=True, detrend=detrend_none,
usevlines=True, maxlags=10, **kwargs):
Plot the cross correlation between *x* and *y*. If *normed* =
*True*, normalize the data by the cross correlation at 0-th
lag. *x* and y are detrended by the *detrend* callable
(default no normalization). *x* and *y* must be equal length.
Data are plotted as ``plot(lags, c, **kwargs)``
Return value is a tuple (*lags*, *c*, *line*) where:
- *lags* are a length ``2*maxlags+1`` lag vector
- *c* is the ``2*maxlags+1`` auto correlation vector
- *line* is a :class:`~matplotlib.lines.Line2D` instance
returned by :func:`~matplotlib.pyplot.plot`.
The default *linestyle* is *None* and the default *marker* is
'o', though these can be overridden with keyword args. The
cross correlation is performed with :func:`numpy.correlate`
with *mode* = 2.
If *usevlines* is *True*:
:func:`~matplotlib.pyplot.vlines`
rather than :func:`~matplotlib.pyplot.plot` is used to draw
vertical lines from the origin to the xcorr. Otherwise the
plotstyle is determined by the kwargs, which are
:class:`~matplotlib.lines.Line2D` properties.
The return value is a tuple (*lags*, *c*, *linecol*, *b*)
where *linecol* is the
:class:`matplotlib.collections.LineCollection` instance and
*b* is the *x*-axis.
*maxlags* is a positive integer detailing the number of lags to show.
The default value of *None* will return all ``(2*len(x)-1)`` lags.
**Example:**
:func:`~matplotlib.pyplot.xcorr` above, and
:func:`~matplotlib.pyplot.acorr` below.
**Example:**
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py
"""
Nx = len(x)
if Nx!=len(y):
raise ValueError('x and y must be equal length')
x = detrend(np.asarray(x))
y = detrend(np.asarray(y))
c = np.correlate(x, y, mode=2)
if normed: c/= np.sqrt(np.dot(x,x) * np.dot(y,y))
if maxlags is None: maxlags = Nx - 1
if maxlags >= Nx or maxlags < 1:
raise ValueError('maglags must be None or strictly '
'positive < %d'%Nx)
lags = np.arange(-maxlags,maxlags+1)
c = c[Nx-1-maxlags:Nx+maxlags]
if usevlines:
a = self.vlines(lags, [0], c, **kwargs)
b = self.axhline(**kwargs)
kwargs.setdefault('marker', 'o')
kwargs.setdefault('linestyle', 'None')
d = self.plot(lags, c, **kwargs)
else:
kwargs.setdefault('marker', 'o')
kwargs.setdefault('linestyle', 'None')
a, = self.plot(lags, c, **kwargs)
b = None
return lags, c, a, b
arrvs = ar_generator()
##arma = ARIMA()
##res = arma.fit(arrvs[0], 4, 0)
arma = ARIMA(arrvs[0])
res = arma.fit((4,0, 0))
print(res[0])
acf1 = acf(arrvs[0])
acovf1b = acovf(arrvs[0], unbiased=False)
acf2 = autocorr(arrvs[0])
acf2m = autocorr(arrvs[0]-arrvs[0].mean())
print(acf1[:10])
print(acovf1b[:10])
print(acf2[:10])
print(acf2m[:10])
x = arma_generate_sample([1.0, -0.8], [1.0], 500)
print(acf(x)[:20])
import statsmodels.api as sm
print(sm.regression.yule_walker(x, 10))
import matplotlib.pyplot as plt
#ax = plt.axes()
plt.plot(x)
#plt.show()
plt.figure()
pltxcorr(plt,x,x)
plt.figure()
pltxcorr(plt,x,x, usevlines=False)
plt.figure()
#FIXME: plotacf was moved to graphics/tsaplots.py, and interface changed
plotacf(plt, acf1[:20], np.arange(len(acf1[:20])), usevlines=True)
plt.figure()
ax = plt.subplot(211)
plotacf(ax, acf1[:20], usevlines=True)
ax = plt.subplot(212)
plotacf(ax, acf1[:20], np.arange(len(acf1[:20])), usevlines=False)
#plt.show()
| [
"[email protected]"
] | |
7757d0c108a82d761e9e7d13a4ab8cc706351b15 | c52ed125dde91b381dcff4538c99aa7dda71a3bf | /my_proj/main.py | 70831866b43ab3aa73b9bac4d970ea2d5da37503 | [] | no_license | raghavan97/projdir | a8e8b27174c77ca7dab742e8d4119792ce777516 | 89e67c45c1a66dc3a35219a6d9c859304d4204be | refs/heads/master | 2021-01-13T10:57:17.071282 | 2017-02-14T00:32:59 | 2017-02-14T00:32:59 | 81,840,196 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 847 | py | import logging
from colorlog import ColoredFormatter
from my_proj.m1.m1_src import m1f
def setup_root_logger():
formatter = ColoredFormatter(
'%(log_color)s%(asctime)s:%(levelname)s:%(name)s:%(lineno)s'
':%(reset)s%(message)s'
)
logger = logging.getLogger('my_proj')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
def main():
setup_root_logger()
logger = logging.getLogger(__name__)
logger.setLevel(logging.ERROR)
logger.critical("Appearing coz log level >= ERROR")
logger.error("Appearing coz log level for >= ERROR")
logger.warning("Not appearing coz log level >= ERROR")
logger.info("Not appearing coz log level >= ERROR")
logger.debug("Not appearing coz log level >= ERROR")
m1f()
| [
"[email protected]"
] | |
5b03be0e5e2197ce7057fe42ac1a67c223b1aa58 | d75292aeddc4dd161a8ce404f12c32069c8cdeca | /CRAB/MuNu/MNu-MCPat.py | 0f7872cd9d9fda5116fe8745d8c4bf396d08b97d | [] | no_license | isobelojalvo/UWAnalysis | bfd173fdf0f9b49788614992cc6372c1dc8d2b34 | 2968611938d3b54e3ba2fa7713fab7b56ad92557 | refs/heads/master | 2020-04-15T17:31:36.880516 | 2016-01-25T14:24:54 | 2016-01-25T14:24:54 | 13,874,373 | 1 | 1 | null | 2016-09-30T10:08:15 | 2013-10-25T23:23:13 | Python | UTF-8 | Python | false | false | 4,620 | py | import FWCore.ParameterSet.Config as cms
import sys
sys.setrecursionlimit(10000)
process = cms.Process("ANALYSIS")
process.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_cff')
process.GlobalTag.globaltag = 'MC_42_V17::All'
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
# $inputFileNames
'root://cmsxrootd.hep.wisc.edu//store/user/cepeda/WJetsToLNu_TuneZ2_7TeV-madgraph-tauola/WplusJets_madgraph_2012-05-29-7TeV-PatTuple-67c1f94/98f6272e3f90a8dc5047059cf7088f10/output_1000_0_eYb.root'
#'root://cmsxrootd.hep.wisc.edu//store/user/ojalvo/W-WlnuSKIM/1/WlnuSKIM-6A304A54-1EF3-E011-993E-002618943920.root'
#'root://cmsxrootd.hep.wisc.edu//store/user/ojalvo/W-WlnuSKIM/1/WlnuSKIM-16096E47-BFF2-E011-BC13-0018F3D0961A.root',
),
inputCommands=cms.untracked.vstring(
'keep *',
'drop *_finalState*_*_*',
'drop *_patFinalStateEvent*_*_*'
)
)
process.load("PhysicsTools.PatAlgos.patSequences_cff")
from UWAnalysis.Configuration.tools.analysisToolsPT import *
defaultReconstructionPT(process,'HLT',['HLT_IsoMu12_v',
'HLT_IsoMu17_v',
'HLT_IsoMu20_v',
'HLT_IsoMu24_v',
'HLT_IsoMu30_eta2p1_v'
#'HLT_IsoMu24'
#'HLT_IsoMu20_v','HLT_IsoMu17_v','HLT_Mu17_v','HLT_Mu24_v','HLT_IsoMu24_v','HLT_IsoMu30_v','HLT_IsoMu24_eta2p1_v'
])
global metCalibration
process.metCalibration = cms.PSet(
applyCalibration = cms.bool(True),
calibrationScheme = cms.string("OneLeg"),
responseU1 = cms.string("1.33223-0.917782*x"),
responseU2 = cms.string("-0.013"),
resolutionU1 = cms.string("11.1566+0.0654529*x+0.000124436*x*x"),
resolutionU2 = cms.string("11.1235+0.0449872*x-6.39822e-5*x*x"),
responseMCU1 = cms.string("1.26247-0.950094*x"),
responseMCU2 = cms.string("-0.00544907"),
resolutionMCU1 = cms.string("10.6449+0.0436475*x+3.07554e-5*x*x"),
resolutionMCU2 = cms.string("10.5649+0.0225853*x-5.81371e-5*x*x")
)
process.metCalibration.applyCalibration = cms.bool(True)
process.metCalibration.calibrationScheme = cms.string("OneLeg")
#EventSelection
createGeneratedParticles(process,
'genDaughters',
[
"keep++ pdgId = {W+}",
"keep++ pdgId = {W-}",
"drop pdgId = {W+} & status = 2",
"drop pdgId = {W-} & status = 2",
"drop pdgId = {W+} & status = 2",
"drop pdgId = {W-} & status = 2",
"keep pdgId = {mu+}",
"keep pdgId = {mu-}",
]
)
createGeneratedParticles(process,
'genWs',
[
"keep++ pdgId = {W+}",
"keep++ pdgId = {W-}"
]
)
createGeneratedParticles(process,
'genbbCands',
[
"keep abs(pdgId) = 5"
]
)
createGeneratedParticles(process,
'genssCands',
[
"keep abs(pdgId) = 3"
]
)
createGeneratedParticles(process,
'genuuCands',
[
"keep abs(pdgId) = 2"
]
)
createGeneratedParticles(process,
'genddCands',
[
"keep abs(pdgId) = 1"
]
)
createGeneratedParticles(process,
'genccCands',
[
"keep abs(pdgId) = 4"
]
)
process.load("UWAnalysis.Configuration.wMuNuAnalysisPT_cff")
process.eventSelection = cms.Path(process.selectionSequence) ##changing to multiples see below
from UWAnalysis.Configuration.tools.ntupleToolsPT import *
addMuNuEventTreeCSV(process,'muNuEventTreeCSV')
addEventSummary(process,True)
#process.TFileService.fileName=cms.string("$outputFileName")
| [
"[email protected]"
] | |
4e4f9e2f9fc07e380d0a61a2fa80e3464ae05ddf | 16060e83fe4e025296f7279a8624cef6bafc2c60 | /project/rpi/send_i2c.py | 13d1cdb607beb39a02fe3804c29b6b9441b1ffe5 | [] | no_license | fjasic/bsc | 7d2ac418904496046367a0ce4a626b737e1c8c0b | 624ad1914f786438eafa3f12d53e0743e89a5aea | refs/heads/master | 2022-03-16T18:30:10.676612 | 2019-10-23T18:43:06 | 2019-10-23T18:43:06 | 184,892,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | # coding: utf-8
"""
Generating I2C signal,10000 times.
Used modules in sendI2C.py :
--smbus
"""
import smbus
bus = smbus.SMBus(1)
for x in range(0, 10000):
bus.write_byte(0x53, 0xff)
| [
"[email protected]"
] | |
f52d8c492c695ba6e033137bd656361a729f39d1 | d7737bfd840a7d0b25d1168dda352a81a6f54a25 | /Learning Python/pythonchallenge 02.py | bc09514d4db44b5d0b42b19873acbdfcfae72bcf | [] | no_license | wfSeg/pythonlearning | fbe509a8c131162e68d4816074d1a48b3b1b095e | c06a195988bce801e9b5d637ec3f1a57bb90da3e | refs/heads/master | 2021-01-02T08:42:36.421051 | 2017-10-24T23:10:21 | 2017-10-24T23:10:21 | 99,047,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100,055 | py | line = %%$@_$^__#)^)&!_+]!*@&^}@[@%]()%+$&[(_@%+%$*^@$^!+]!&_#)_*}{}}!}_]$[%}@[{_@#_^{*
@##&{#&{&)*%(]{{([*}@[@&]+!!*{)!}{%+{))])[!^})+)$]#{*+^((@^@}$[**$&^{$!@#$%)!@(&
+^!{%_$&@^!}$_${)$_#)!({@!)(^}!*^&!$%_&&}&_#&@{)]{+)%*{&*%*&@%$+]!*__(#!*){%&@++
!_)^$&&%#+)}!@!)&^}**#!_$([$!$}#*^}$+&#[{*{}{((#$]{[$[$$()_#}!@}^@_&%^*!){*^^_$^
]@}#%[%!^[^_})+@&}{@*!(@$%$^)}[_!}(*}#}#___}!](@_{{(*#%!%%+*)^+#%}$+_]#}%!**#!^_
)@)$%%^{_%!@(&{!}$_$[)*!^&{}*#{!)@})!*{^&[&$#@)*@#@_@^_#*!@_#})+[^&!@*}^){%%{&#@
@{%(&{+(#^{@{)%_$[+}]$]^{^#(*}%)@$@}(#{_&]#%#]{_*({(])$%[!}#@@&_)([*]}$}&${^}@(%
(%[@%!}%*$}(*@)}){+@(%@*$&]*^*}*]&$[}*]%]+*}^!}*$^^_()#$^]++@__){&&+((#%+(&+){)$
%&&#($[[+##*%${)_!+{_[})%++)$#))]]]$]@]@($+{&%&%+!!!@]_]+])^*@$(@#${}}#}{%}#+{(@
#__+{{]${]!{(%${%%^)(_*_@+)$]$#_@$)]](}{}$(}*%+!}#+)$%$}+#@*&^{##}+@(%[*@_}{(^]^
+_*{@+[$!!@%$+{_&(#^(([&[][[&@#+}_]&&]}^*&$&)#_^$@$((%)}+{}$#+{+^}&[#[#_+${#[#]{
(@@[%}[}$%+*#$+[%(**!$+@$@&+$_$#!_&&&&{***+)}][}#^!%#&$*)$!%}*&#}}##(^_%^]{+]&&]
}^]#^(}@]&$]*_][])$]{_+})^_}]))()^&)(!*![!&}{][(]})[(*^}$&$_@^$)#${%[$_]!^]}}}*+
*^_(+}^)(%(}{&)[}!$$&&+}&[{%}^+#$]@)^&*%{@}]&!%*%$*&][}&{$&*@{@#]$*_[]%%[#]#*%)@
$_^#%$!{#]^$}%^@^+{($!^($%)]+&}+$@[$*)*&)*%!_!!+@&^*{}%#&{}$!(*^*@]@@})[($!)]]})
})(&+##]##%&##$}@{#_])*%(*(@$)}[+(+_)!{{#^{_@)!&)$}@^^^[$#__+$^!*#%%]_!#$]$&+^}%
@])])%}]#$((^+{{@++^])$^*#[$}*]}}{)@+)[_}*@^%#]]#+()+)(]_[!!!)+)$+&@@])!}+*%]$[]
&&[@+$_&#[$!$${}{%[]#+@)*!#)*!{$#*$%}[(&@$&_@($$]]]_[+(#@}&_}+]@$#_+](}^})!@@}@)
}^]^]*}]+&(@@!!](*@#(++*)]!(^$})&_^@+]{#_@*%^[$[%&_%@%_![&&]&_@*#_}[{{])^$[_$_&_
@%%[@#[@_[&+]}[+)!_#_+++%)[@%$(&$[{#@(}$*![#^#{}_)[$^_$${_@&}*![#*#_+%[@{*^$){)#
#%}]{+((*^]+{})&#$!#(*%({_!^*[{%@_&#){![&]@$#[#(!{*#^*%)]!%(#]%${*_^{+}(@}{_^(](
_+!_)^&}!#([(+&[@])[_(]@]@&@{#@(%[@+[^@%@+]*_[{]$[_(_@[!]]^%+@#(@$}]@(^**+]%^)^(
@}^[]@@[@[@}^(^!]%*_]&$!!^^#*[#*[*_}+[$#(_#%@](+[^+}%{_*#]+*(]}!$(%@%#^)}]_&]{${
}$[*{+&+&}[#_#}_(}){^#{[_%*!$+[#)%]@&&_{)#[+*&+#!&)%)%++$_}){%%*@!*&%__(_!]#$*(_
$^!@@}_())%(&$%]]{{{@+!&%@(^!+*{%[*[!]){(#$@)(^{]%[&*(&!{&}!%*$)*]]$%(__[}_+&)!(
^_&*]*+#@{@[_({$*&}][(*!+$+#%&![%^)^#(#}+*+(@)&&!({^^_*($^+)&{)%$@%)&!$$&&^+#[)$
+!$^]*!%^_$}$+!!&%_&){$%{((&^{{(&_&_]{^}@[$^+]}]^{@!^@_%_{^@*)+^*#$#!+*}#)}@(}!]
_*)}$**@}[^_&*^)*+#()]&{{]*+#${@&}#)$[]_+(^_@^][]_)*^*+_!{&$##]((](}}{[!$#_{&{){
*_{^}$#!+]{[^&++*#!]*)]%$!{#^&%(%^*}@^+__])_$@_^#[{{})}$*]#%]{}{][@^!@)_[}{())%)
())&#@*[#}+#^}#%!![#&*}^{^(({+#*[!{!}){(!*@!+@[_(*^+*]$]+@+*_##)&)^(@$^]e@][#&)(
%%{})+^$))[{))}&$(^+{&(#%*@&*(^&{}+!}_!^($}!(}_@@++$)(%}{!{_]%}$!){%^%%@^%&#([+[
_+%){{}(#_}&{&++!@_)(_+}%_#+]&^)+]_[@]+$!+{@}$^!&)#%#^&+$@[+&+{^{*[@]#!{_*[)(#[[
]*!*}}*_(+&%{&#$&+*_]#+#]!&*@}$%)!})@&)*}#(@}!^(]^@}]#&%)![^!$*)&_]^%{{}(!)_&{_{
+[_*+}]$_[#@_^]*^*#@{&%})*{&**}}}!_!+{&^)__)@_#$#%{+)^!{}^@[$+^}&(%%)&!+^_^#}^({
*%]&@{]++}@$$)}#]{)!+@[^)!#[%@^!!+{(@&+++_{!$}{]_%_#^#%&{!_(#$%%&@[})]+_@!(*[_@[
*_&+][^][}^@}])!(&^*[_%+(}!!{!!^*@!({%]#[_&()$]!$]@}*][)#()})[*^[^}]#(((_^#%%]@}
^###%!{(@+]$%*^}(![$@*]_{#*!$*@%*(^+#!)$&]*%$&*@$[)_$!&+_[$)%_*((%+##*]@+#*[$$)^
@)]}!)$^%+%&_#+]&&_!(}+^*#)$%%^+&%^_]@*%^^_#]%{%[&(*_(%(*{^@[@&+!@&[+[++$})$!*}+
(_^%%*}^{+}(+]]_][_(@}^#_{_}*){*)}+*)%#%++}{}__%$$$[%%*})_#*!_!%&*$!]!}{*+{^()$}
*$%*$]][{@+*]_*&!^]_*!_{_@(}+%#$+@}_]#@$#^%((#$%+++]])#*@)&([^#]_$%$)[#)){({%@_^
@#}@*!!()[]%$*+*{*$%@**!}&#[*#[[{(@&_){{!}!)++@*{{({_!#^]}+{{#]{$^)&]%}})^@&$%@$
$!_+!{]*^_+@&@){#*!_#+{[@$^(__}*[^$&{&]!(&+++_@+)&}))$%]${+*!(#@(}&&&!)!_!$&@&{[
[@!#!]]#%)(_^!{*[{^{]})$)^&(*)%}#]#()^#+}!{_}*+{@&_^)+%@!%%${$&%}(%*_!)%$((+$&^^
}#[@%+)&^!](]%+_{{]}@]+^]{(!_*&@][]@_%}%(%&)})&!#)[_]^+$)[(%*%({]$[(#+&+[@[*([$#
^*!@{]]#![[{_]#^@])_[[+%]#[%[+_{)^+([^}[]_[}])*^!_+$}^+_)+*@$$^}(&[)_^[+})^]&)))
}*+}%){@_)]_)&)!@)*#^_%{}(]]$)+^@+}+$_*&)]%^@&)![!@$[@)@}%!)@$((^![{(%([+#&{$+#[
#&)!+{)__]+%)#@)]*%#]*{)$@*!^#[]**+]&])$@*@]{$_+]]^_*+*+)%!_!}#}^@*[
$[##&_^+&)((_$#!]}[_*]_$^_*{[^$#[{@$[()+*@_$((+}*^!]){][_}!)%{}{&#@[&#$(}#}%%{!_
@)[($}&+&$}}%[)@[{^_+%+[)[^[*{{^#]*__$^%^}#]}*{^+{!@#(+*]$)^(*!^^]^)[}@{%(($(+_#
*){@}]+}&)[(^^(*$&_$@#[#_$)^_()}{[]]{^@*)_!{@)(!))^_+_]{+_}$%(@#+{*+%@!${*&&@${]
(}&($(^*{^$])*}$(#}%}#)_@^*}#!)$)&$*__+!!+[&}])*_{+$}!@)*^{{({@}{@}+@#${$^*@^^}(
[)!^){!+@%(^_)[]@(]+&^_@[*(_@^*##*&*$!{^{!&#@(%&(@!]^[]({!+(]+^}&&${{]^!+#^*#&%{
$[}@&(]^&^@*#[&&}^[!%+#(+(%+&){_@_&%&!$}&)[$]%_^]*@^]&_}&^^^(&(${%#^(#[}}{%))&}{
%&___*&*((!#&(^)}%**$+_[!#{&_{$_))[${$*([&*%^!$%%&}$&_))}{(_]!+{}@+%{^*_[[@@)%}%
@)%*(}($$)^!#}+}#$]@})+]&@^!*{@_**{_^{@(^(@++&)!#](&#*[)+!!%{]_*$*(]%+&_^%)$$$*$
&]@}!&{@[{(+**#)}${[*@$(+%__{{}#!}@(%^+*)$+&){^(_*&}&__]^*)}]^!!%&$#[)$)+_{!$%@)
_##)#^*%#}{$}$[!!__$)}(%+[^(^$(%_)#!$[$@+]${$({)[^$+&$($]*!+$^{_(_%}(^)(!_))![*^
^%^&})[@#}#_%$*}&#!_)!)){*}!@&]*(@^_&[)]!*$&_[@&]($}!]!{)[)[{{_{#(+_!_#{]_(_(${}
%*+[}#{)@]&@($+&!^[}![}&$}*^[!)#^_#**${+(!+!#)!^%_#%[][(@(!!}&)_*{%@_^!)_!_@!^!(
{^](#%%&@#))$&#_&[[#&^&^}))([]&^+^@($!}{+^+*{%[}_*+_*^+$)+%^(}&[_%)$+{}{[*]+$]}&
@}_]_[%&)[@+}{+&^!#@_{!__!^%[(]@!+*%[!+)!{+_]*&+@*[_(*%+}*@+@&$!+_@+*&[#@%###}^^
%_@)$[&){&#&$!&}{@&&*[&!!!({)&{)+]^*&)]+[+^%^[_&+^$!$!+!+_(}+^}+&#*_&*(+_+[[)$!}
^$]}%](]]#_}[(&${{+((][_*%!)(##[@*&(^!_]**}[}{[%]*!$])[_))&(^$)%))&(+^@$&${_@![!
#(}_{$(&_&[_]%*&*@]}]}[{!&*}{{&+*$@%%}$+^[}{&$^%%^]#%{#](_){[()@@)[@]!#!%)&*+]_}
}[([#}[}&&}+{@!@+}]){_)%_+({{!]*}}^(!])#*^)(#&!)_#$[%_#{!##%%+)@{**)%+&*[*#[+*$)
@(!%)*)^]![_^%#)}*#!_@_@)(#)(_(]!%@)!_+_)]!*@{&!^%_+*(%%$!!_^}@^^{[@#*&+[(%#[{({
+)&+_$!${]+}^$[!]_#{{#]!{[&[$@*^]}@]}+{)_#}@_^%%$^{+($*[###@))]}@!@]]{^[_@)!@{@}
*+^(_]*$!_^_+[#))$$]*+%}{[&([&)@#{#)*%)])!_^+%%*#(+!}+{[#+#%%]@((]!_#@*%&(@[(@@#
^#}@}[*@})&$)@#+*!}^^()[^#^])+$$*%^*@^^!^$^#_!@^^(_&&][)@(%([[]&]]{[^}!@^[+${*%#
{[!({&_(_!+$]@}@!))$[[+#+{(@&@^{]]*][]}_$$$)##&&^#_{}}#[*%(+&]@%{@)!&{+{*^_#_$*^
[]}+^+*{[)}![}@[#$)}_^}#$!#%{([*_^+!#}$@{{&)!#*$#*@&)@&]^}!+{!}{)%}^[}]}[$&)^$)$
+){@)%$*[$_#))[({)&()[_^&^+#}%#((+@@[$+!^[%*_{]*+)}#$${+!@_)@@)@%**]_]))$$^&**!_
{!]^*+#]$!@+!$)^%)!&[$*[@!(_)[![)(}$}*)$(&%%&+^}+^%%&^_}]!(&]#+$*__*))#*{_&*]{*@
}_{%]]$)(#@![!(_]!)+&$&^(){$%_(_%+}%*%&%!!^^)(_*@{^#())[{^&@}#{{^_$[*()*$&%^_{)%
#@#{%]_{%#^)@(]#]}})#)*%)[{_^%[}][]^]^**]]}]@+%_}(])#+*^&]$^[$$%]$&+({!^{^@+]{(}
&^$@[$#(&+_%{*+%%(][%*+*{[)*$#%{({#@]_(#[{*(*$}{$^}[]{%]&{@#@^]{[)({%[)%!*$$}@&&
_+&_}@!^$}!$@_+^^]}@}%}%#+&($)]*+[%^#*@+_[((#!}%[+])[}[^)!(&*^}*+]){$#&}&*{)%^&!
]@][(&@)#{#_*^[@]$[(]{()*}$[}*{^]&]$!%)*!}}()^^(]+*!]^*[^&+$({]%!@)]^#$(^{@^!(}]
&*^_(+_&++_]])&]#%@^#%$^^_[+[&*[*^^}_**!}*}_%&([%^$%_$]]](_@$*%!)({&##([&%((}$*%
]*_${+(#^+^*!@@@![&&!}$)]%^@)#%_]&(])_@+{*#(}*_(!(}$[(_^_^]_}!&_&()(@+&_*^+)}#}[
+[[%){#[#()#_$(]!(^]{@(_%}$[))}*)(]@@#+@[+$[))[%*#%@}*_$)*@@@}{&^#@!}{+{^&))+}#]
%{%{&#(*]$}}{}&]$*%(}]{#*(+#%(@]&[^!{&}(^*[@)$^!^+$*]&])@##@*+![&+%_{+_)[[)+)(#_
&{^#}!!]_]}}}][@{^[%{*+!!!(&*$@^!_)^*$^@][+[{&#%!!_^$&{(_(^!}*@%([@^{%^%(+()(+%^
])*_)^$}_}}*{[[*{+[+*@%}!%#%%$!]]}}(^@@^!*)%^+(+!}!#$_&}_}_*#^&^)[(&^&}#$*}*#}*^
&{%{[}!{_$_[]^&%}})!}]%$+@!@(%)@[}%_@]]%!+$*_)%}#$[&_])}][@%[^_$#$((%]+})%#$)]@#
]@(!@#)[{$@^!]{#^&+)[**[}%#)+(%]#*$*&}}_%%]]&!&$[)]@{+@*#_!{@{#{_$%[!+&$[}+((_]}
^}[[+![})&+^&@_){(&_^&&]#&&))@@%[{%$+%})!_&[[$}&_@[+%}&*]$)}$]&^@{^})))_#@*@{([+
+%}^{]{]]]+}${$!^)]#()}#{_^+__&#*^&+{!{&}&&@{#&(_%#^*#+_${}!]##]*[(%])[{(+!((}&]
)!(}##{+%#_[%([}*}]!_(!^[{[}^{@*(}{@+&$)$){!^@]{](@{)#[_!*_*[#!$@)!}@}]_}[*#(}@#
%@^}&[^]$^(%(&&+!{^!)!#%{[^&+@[+*!])}^])+[&[)}!}{$)}!)^#)^+}+${][!%_%$%&%$)){*_&
*+@!}+${()}{(&$^_)&+#{)[^$*[!$]_$%)]^]@[$!#%((_&&[%]!}$%$$[$*}$(*(+&!#^^#][*{(^$
]{!(}#])#%{&!)+(({#{#*_+{[%[!#&%*])_&*)_}+@{&}#*&#[(!&]]*)#%](#^^&%]@(*]+^{@{#!*
](%$()%!]+}_([](@[^&@{][%])*]{^@@{#$*{!^#)$&!)!%_}&[(*#[[&^_&!_&!@#_$*__@{_#%&#}
@@+[+&%&$[%[@@(@!_&&%#__!@{$#&@[*($]*](&&{}#!^^$[%&{+%}]_}!#([}^{{**@*($[[#[%}*#
*^^[%)%@[&%}!+&!}*&*!)_$[^^#]$$}@(*)&)}@}^+@@!!}&%[]*#$}+}+#!&+}]&&#^))(*_][@!*{
!}*#^()_^*+((&+($]!!$){@$#$}*][]]&}(^{{]_]+^$+)[^##)})@^+$[[%[(_+@$$*#&%%_]+}[!$
@])(]){)#_$_)[$!({@#$#^#_&^(_%(*}^%$]+$%&%{{!{]}]$*@_&%(&&{)*]^}&}^&[@)++]!(#[([
@^#^$&}#)}#*@)*#&[^]{)#*@(%(%[$}!{$}_}#(_}#^^+[(#]_}#}__&^{{@$+^!#}}[#{!+#&_^!_$
+)]#%*{+((*_+_]+{]*)#*]*_!+_*$(!^%%#%*%^$%{*@@[+[((*(!([}_]}@^^+(_)@{[({_^]^)@&!
!+]^]#%]^!{(!#){_!![({)+@{&^}*[}%}(}%_%*&]+*)_!#+{^)$##_]*}@$^#()]*)@%}[^_)!%++!
_(#(&}#@%!)]$#^@&]%[[#!)(}){}}([)+%}@[![^%_**@[]@%]*)])$+_[%]#@!$^]&}*!(}(^}+)(%
})!{^^)#+%%#[^!{{&#_}^*^@@}#}$(!&#((_*_]_)$[(%}[+^(@}{+{}][#^_{]]^)!##$#&!@@%@%}
}_!@[##$@%)}&+!!@#&}$[]+%+({()@+}]#+%_#_{$_^}^}!&[^*^[&@@@^*[@}{+[[%^+]{@@}&+]](
*$**&+{%$@[%^]+#@]}@[*%*]]@*!*%[^+(&_!{[%)}}]&$^[[+_[%@!!}_@!])*@^+*&+&!#(*]!}%(
^![%$&[+*#]}&$}&_%^&}*!*^&_[@&#{^%]$^[*#]^@{#%)%{)@{)*]]!]@((![[(($$[%{(!#^^%!__
{^*!*%^[}&]_}#]{_(_&((([(]!}@(]^!+]#+!&}_@}@_)$$)}()]{_(&}(%%^}_^+[+[$}[__{(*$+^
!%${[&{@#%}^#_*$^+%&+{%)]^%##+{^@$[&_{++)_@_}#[[(_+!{&@[!%@*{_*][$$$###[!}%%&@(!
@+}{{$#%^(#@&(!!_]*$[#(&)^&$$%#{{#_*^{}@&{*@$!_(+*@]@$(+}+@%}*+]()_&_(]%)@]{(_#_
$*@]%($}}*}$}[$(!+(&@+]!#+{@@%%[[)#(+]{}+%@%+_+*+{#&(]$}})^!*%_][@{{!@)&%{@$^}!(
}&]&&[*^@#}&++#{]%*^@%)}@)]+(_^!}*^]_@#__#^#[&]&%]{_(_{)}&}}(_{}+(]&{^))}${}%]_]
%)[*+_[)^]+(+{#&_([^)^}!_*}#}^]}^]}^@&($@[!&#]{$%$}_#(^^[%@]%_}]+%&&%$}]){@}}]{@
]!%_$}&#]&+_](*_(*)*][]%%$#+!!^((})+{}%]@@_+}&_&[{]}**^$^)[&}]^%$_#{}%)]+!%@!*)}
!!{[{*!+_^+[&(_&@{]@+]{%_+%%+][*]*}{^](%*)_]!$#]#@%}!+&][!@%[$((}@[[#&{!^)%#&+*+
*#*{@)^[{_][{]!*$[#@^+#{)#%_^&*{){+[^(!^%$(&$&[{(^!%)]^{]%}}%}#)(#}#{_}!$$**{%#)
+}***@+[!)@{[+^[+[##}*%$[!!%_!!@[#@#@&#![%]@())_}!##%+#{}#(+_&{}!%_@)^}++^*!%)!_
{($#[#+_%}^)_^!#%*&#[_#_)_[#+&%_!*{[!**@}($!+(!__(^}%#{)^%}[)[){^!*{@}]!+{]){]*_
_[{!_%[!#^*[{#[!}{!($%+^$]]{]*)&@&#&+(}!*&])[%()*]}^_+@[^+%*]%^!${@*[{])}){}^$(_
!(+]{%(((&+!*%@[#*}^)_(@+*@*&(){{@}+]]+^*)({{%#})&(*$]{&$#_{{{[@](*$[!%!@%!*&!+&
^@[_&$&[_*&^}+__&%[#+#+})[+#!$*#^}[#&*}(+(]+!*]*(+(^_#_+^%]]@!][@%!+{{_%_*$!@{!^
$^%**@$[$^]@%+%__*}+$)*(*]{[$](]#)!)#@}_]+**)@!%##*^#(!*!+}^{!){}$^@_^!_%$}*&[#}
*^**{%*#@!}#+%@[&##]]%+$*@[+&%@){$%{}}$^]}&**%$(^%%@[$&)_}*)*(#_#%_+@%)]*{{!{{*}
$^(_*$_$&&%](_@}]&(&}@$+]_%+@!++_*@*@%&[*%]@{)#%_]@_@&{#!%]+^^$*{]#@[+[)^)&%{@$&
#++$+!#{([%%+)}+}_+$){{#++&%((^^@%!}^&^$($#%}+$}([]][@^_@}##&_)$##[{{@)%{+*}]{&^
)$%!#}@!*_[_%!^+[)}!#{}{)*]!@#)%{&*#(&_[$)&)@[_+][(${%%((}#+[)!+[*@+$$[^$[!]_!#&
&)^^@@[*^^_%_@*#$!}_&**}@_@__%_$*}^][(($$[(*%)}+*$((}${@^$_)$#@]!(}{^}]!%%!}}^#(
}!_($(^%^]}][{(^]@+{+%}+]!{%}%!@}&[}[&&^+!#$)]_{#$[]_@}_$${^%^^($%*]}$*_]^{%+$$_
[!+@@)^]_&&[{)+@!%[)+^![_))&_^!(^$((!_}@$+)!@]!]+${#_@^@@+_*$([##)@^^*#[]@$}^@){
*})($_()()[*!!@#(&&}]{]+{[*+}%_$(@(}&{]*$!$]_]+&!%+{}$}+_**!@]_+(&[$}__@&[!}{#&_
)(}(+(&#$($$@$)){&*#($]&(@{+*+@)}+(_]()^)}@$^&]+}#**+(%+]_*]+}_}$%]%)]}{[$*&{$_^
()(]]#^*)#(*_^}}_(*!&{@%($+[](*$+}_!))*$#@@^!#*$**%!!)+@+%!^)_[}{*@{(_^#}}+_}))[
]&[]{{](}*#^%(!@%$@)&})+#[@[(}%+&)_*}#!)+]*&}+%](++**]!(([#[$*])}{{!+_*])$${!]&%
[%]@@++#$@^_^[(+)@%_^_%+^^^*![@{+%_{[(([]@][)&^^*#&[@*^+}&$+@*_!!^}{)&_!!$]@^))]
+^]+^}&@@$!${*([}}{&{}_&]#&)!{*#}_*]_{@%}_]%#%@**}$[#%*%([$#+)^(*^^#)%}[)!+[}])[
[{)_[)*@_#*($(%${[)$[%^@+%&!}]_&(^()#()_{_&_&*&[}+!$^$!]++*}])$$]!}[+@%{!]*^}(%]
&+!]]!&^{(*+[!&]$%%_#&]+}*$_%!#&^]*!*($_@+#(#&&)#)&+![%[^{^%&}@{(&#^^^&&#@]{!!@^
{@(*_{*[}+${(!$]%![*}!#*%&)^&&@#{#&^{)#@_(%&}^[!@_^+__{_{){$_)&#(}(*+)%[)@+)}[}#
[%[!*@$${[&^[&@%&]%#+_}%##%${$)]}@&&)_)*#%#${_+}+{*^{{{$&$^[@%[[]$@]%}#$)_[^!__]
$*]&[[+([&{!}}}%[{}#@}!!}^_(}@{{$_##%}{]][!!@^[&)#*(%^_!!%^*_][_%}^%}[}]}()]}_%)
!@_}^{*!$)@){#)_*{}@&]&(@()!&!#%_(]^[@$*{{{[#)*@%!@}^}+%^$!]+}$*(_&}}{(+)%(&{!!_
(%$#!^%{[)##^]**@+]*+]_&#{{&%^&#%)^#}^)*$*&)[]**!#^*@(^*^{[$$$+$+}+[%&*%[_]^#@$(
@)*}*}(+#%{^(+@&!@%^#$&^}&}&}%{#}+!)!^}#{^_}_(%&(#_$+!%+$*@#)%#{}(($!&^%&}+&@%]!
%&*&)*$!##)%[&)(_)&}*{%{]@[#[$@^]&*&@{+{){*^^)@}$%#&)%^)@+##_]$@_{{}({+$#{[@_}()
^+@])!%}%^[)^)[(_$^@{**{(^%_[^{$)&*$^{^#%@)](}!^_#_[)!_%{[%]{#&*(^^[[{(+}^%+##]@
+^_$@{^+_{[+^#@)&+%_+)[^}}*{$)![#][!^@{}&[^]&@}@{%+@}{+([{(](}&%}%+&^]+(}{][_[&@
#_#^$$[!%}#@[@+&%&*(&&[}@)^!@[@&{*&{[{@+%)$!#!!]{@(@@&+)++&_+)[_(@{&[_@&&#})%[+@
^[${+![}[&][)(@+[+*+#{*!(^&()&#_^(%@]]&(+^)](&^]@@)%[@#*[_}$$]*][@(())#)@%*{&{!+
_[(^$@!+**!!*!#*]$*]@{%{$*$]*{#%)*^})*$[{$&^(^@)%!_!}({@#)%&$(+(^{+[%}++#]{[((^@
&])(^+$%@+$&)](](^]@^^]}%[%[**#^_$+$!_[{}#_{)]!$@]@$}+(]_}^#{%#$(!%+&*!_&%!@]*^^
($&*#*&){+@{$#@[()[*!{}_)!&$%%^@!%!&@$!&^$_}!]&!]+[_*(^)_^]}**}&%)}[&#&[[)$]!&({
}#@]_#@}@$#__%#}*}{++![#[}[+@($()){#^)(#$!^!&@}@*@{{+#(}@+^$[&&%!{_${$#@])&_[]#)
]@)+##@]^@}@[&#%_!*@]#]$+&+[%+!*}*+}_}]*^#+^#}!)#!}&#%({@[{#@$!)@!)!+%$&{[{(_$$!
(})$]{}^+_%(*]*&&))[])+(+]}@*%!%{]%@*%$_#)%(&(&%!#(&^_*&*&#]{]^][!(%&}(@&{[[]+$)
&%(*@@#+!_[}&^$)}%[$%([$)!##{@))^_}_{*@+^)[&}]*@^{*^(#!(&^@{)*$&{(]**$*+)%$($!@!
[*]!]_*%[]#__+_}}__)[{$!&&({)[#{#_&()$@{)+)#+}#&{^^{#^##_*&#}*+*%&}%@]_!}}!#**&%
$&@{$*]%[!@*&%+%&&)#_+!_&^^%+${!+++)+$#@[$)+%%!{#(&{]&){!&@$*#%&}[+^{!#+$}++$#(}
#%$(%$&#@$!))*#!_]#^@%%&!*}!)%+&@+*$#_[__^[%+(*!*)^&))(_(%&{}#%&[)+_]{#+&!#]](%!
($@&^}_@^+%$($_${$[+@]#*#[}]&#!!{&@!&@(!&#{[+]#*)@)[&[}))^[[@#!*}+(&)(*_*%%#!(]_
!_&!&_{[)%@{_){_{**!@[[$]__^([)^++_+{+^_&!&$}![#}$$]]@[(_)^}_(&+_]{&}}}{_[][##+#
{@^{+)_*%}@%*(_{^+&)[]&{*(}]]$}}^@$!&[^@@^__]}[^[((@*+%_%%@])&{^_&$]#)!+!_}{[(&(
#}{&^){{!#(+#!}[)+}]$+^{)#!_%_[@}(]}%}#&&!][[%@}^]@))+!))]#!+]*[)]_]*{][$!+*@{#{
&)&&^+_*!}!%({)}^)))$[&_%@#]]!]@)&$(^{@&@[^_#)@@+#%(]&)+!){$]}]){_{}@#%%*%#!)[]_
_[#@@$}}{^&&$^_{}%]{]&#(@_!]%_)^$$!#*@##^!($+*&$+&__@***][!@$]$)*$^[}$^&{}([+{}&
_[$[&&*#@[[@&{_$%!{)[)&[^[+^^^{#$&$_*{*^&)(+(&$}^)%+(#[%*#[*[([+[]${&({%@!](&]*[
+_^[^[#&)^[@$*+@@[!}&){}{^+@[)^&*$]]%^_!^$+%&)_}}([{$$_][(*]$&]]{^)&(^#[]%*%&}}#
+*[[@)_{}&%}_+#)!^{(}*^[@)}@(+[#+#*{$)&_!}[#[*+)!#%{%*)#@++&^]$[$&#$@}}_)*&]))#^
({^(](}+#&[][%+]}^(#^*+&[{{$_$$@^(!%#^*{()%&$))#]{(@%*}}))@+^&)+%$^)&[(]{}^]}}*+
^%&@)!_[${!)&%#)^*)#{)})@*_]){{{[$@$#{!@}_^{{!_$&$]+[[[))_]@)[{#)$_*(}*]#$#%@+]@
$(^_[[^}^&%+)([#_!*})%%)%)^!#%)]&!@^}#_!)[*@*[{!(_}{{^^}
]@*{*)!!(@+]__*@$[}&($[)#{*[}(@@%!}%[{$]&^%)@&(@][+{}*{%++}$&+!&[^^%]+%_(_!#)++(
]+)($[#]@(#$+%]+$!^_&}+[!$)][)(}((+!@{^^^*{[#$_@}$!@%{(]{+^(!$*!@@*}^+*!]])$!)*[
^%[(&[{#@}*!*$_#@@+{_&&$@(#*_]#@$}[_)*][$][_!_(_++$+)$_}^++_]$+(*+!%[}}*_^}({&[_
$[]]@@+!(_$$([#_%_$#(%!#[+)[_&!_*]+!&%}&*[{]*+[!!]+_})[)]))(}_$+{{){[#}^+[{@$[!_^]&@](^!&**^@[^($_{%{++[@[@%^[#(*[+([{}+[{%#+}{_+(%#*[&^&!)*_*+[#&)
_}_^$%#(&+_!#$($*^)@(#%}+^($**][}){+(#{}*&^!(@#&][&*$#!{_!*%$)*(&@]^_*+^^#$}^{}(
+)%&!)^{^$*{!$$[+{])%_^^%!*&@[%#*+##{#^+^(_])$(]_!{*+_)#]}%]^&*{)(+$!_#[*^)[@&@+
^&[(__+_#})_*))#%#!)(]@%{^{#^&][_[+!^&++$++_#$*(&$]))@_#+&#{!)[%!^+{%#{+(&$^_)&]
#^+%&&#(#!$}#((*_+&$_![+}+)[!!+*]@^!_#%^)}%+({![]_%@*[+(}@!$%$(@)+(#)%]}}@]#_%$@
_]^*+}!$+]{{*[{{]%$^)[]_@}#+@*[+@]^%)##[{^^(}_^(@}{*!(+}]#{+(@@@@@{+@(}*(*(%%*!@
)@^$%#+]!&^$*$#%*!+%]#$}^)[@_#%*_!&]&!{$#)$**[[*]+%#!{]^@&]#}^^%(%!*%#{@(*)![*(+
[@(++&]#!{_({%+@)}](&*^*!{$^_{)]}}[^+)&{##*!++_([}^})[]__@!]]&^{^]#}@++{&&{)][[[
}[}}*{)%&]}}+*!$%$[}[@[}%*^{%(^])&&_[*)+%*!%^[*()[)#%_!{]}%@)_&@#$%&(*+)#(]$&!_*
[#){*%+}(+#@*[[_!)^%*%&_#_(%^^$}*(_)(@+(#+*!+*+_^$&($+$&{@[@]{%*!*_{}^%$%^@%%&+}
((^+@{$}(^$$}%()({{^#{]]{{#){&%[!+*[)#%$}*]+}+%{))[((##__$^*%{#_$#(^)){%}*@#(%**
{!]^!@[$)++%@}+]]{]+@#!*]{)+!}!+_@{*@__##]&$)#%{[#![{%+_)&_#**]#$]_#!*@]*&(@})(]
^_+#+$({}@%{^%*#()(^@%$^%%]#}&^)_{%(!$)]{@(#)*@}$&(){*%+](_+}#)(_!@*$@$]$)@%{*%$
$*!{&$$@$@+&)#}}}[{){}([+__+_+](_)@++^%[!*)(+(%}}+%@%!)#$*[$@)$+){_!@*}!]]{{++[}
&@&&(%*#$!^)*_({))]*(^)_^_%_@%(@)]]!_!)&%{[(]*+^+#*#^%)*[]#[[}@$%#{{^#_+[[@+)@+)
+!+__#[]{*)^#%}()]}**$!%[$!*(+[}!)}+(_($@)[#(}*(]#{}{[!{)^@*%*[!!&$$&({%)+{#@]}}
%[)[&(*%[)!)}$&$%@{*#${{%&[#}%@#}@!!*@*)(%(!_*(+]^&{_{_(@}#)[}#%%^*%](_)+*@^{&{@
!{!_*#*[*^(*%_@&_^]^^#!((!*#{#]#(%!{)]#++@@_&}&@+_}+&!#$${^^_(^%_+&)!@*[(])])&+_
[({*%&[%!@&&&_*#_@{_*]@&$+)}(&)+(#]^}{#%&([^^%{^}){$#](^##^%*%&#%#$#}*@#!$#}+]!!
&*+!^%@]^&&++[[$}+@_%)]$_}*@*[%&*$}%&$)*#*%[^@!#@%!)}_![_%(}!$(*_$!*]+)@]}@(@{#^
[}+{[]#@)@^{!(@_]][#}+@&$$#)$*_!_[@**{^(+$$)!!$!](}!)+)^!}](+_{({!{[{}+%)$)@$%$[
$(@^*)@!^^!}#*@]%!^(@{}_!@]&^#({}{](*){+}[_}_($+@}+]@[^*!@]++_%*{^*&+[%*{})%_+&&
@{@!+%*#)@^%#$&}^){[){]}]%*{+)&+)#}*#![())@&#+!*))]%@[$$^+%#}+_!}{#((}@+]]%$)%#%
$]&]{&%^}^(&[}%]#!][}_]+$)${^%[#{)#&$+!^%@%%_]%_*&*!_!]{%+@]&%(*[_^(_[!$]!){!*[+
#$!(}$)#&}^](%!(^_$]*_@!^{]+)_(*^{^{&@(_#(!+!}+%${+_){%!@%++_&)}@}^_)+___*&](!}[
!}(%%@_+}{(]$+@[+%_+$%){#[{[++&)&&^&@&*%&&}@@{^!*^((@]^^{}($}_$_]@[&]%++$[^#]{]^
^{_@%#__%{&%]%_{}++!_]}_][$+@**$^^{_@]){@)}[)!__@_$}%_$}^&!}^@%%+{&^][})@*%(]+([
[!!){[{]]@^_)*!!%@(}}^^}!@$^_${#*_@]]}@}&[*&@%[#*$]_(*%++&$+))}_}+{_!^@&)${%*}{)
@)]+#(}(*_!*]%$@)_][)]_%#{$[!%$_@)]#@]*$$}[#$&+&%$[{*@^$_%$@([$}[%%_(_&!^$#((!(^
{!!+!^+{@$(^^@#(]$($#]_]!%[*#%&_[%]]]*@^(})){!_@_#(*![@}}}$[$]^@_%%}{(&[})!!#}!)
]*%!]!&{%+%@{_}*#_@$)^{{]&&^]{+)@(&+!&@_*@}^@}%(]@$}${_}{#*)!@!*)@%(%$*}(]#&&{&+
}!(*+[)!}}_*_$*@($+]+#+{)%_!{%^!{]^]_{([*))!^&))@&}*!_#^++{)]$#)}(#)%))+)+$})#(+
^{+))%$_%]$&{#+(+!+_&!{^(@]}(@)^$$@@+_$#@^_){%)#*]+][$&(&&&*$_*{*$#(*^&*(_%%^*++
$(&#[{@*#{]_@!(}#${)(!#@+#{^@_^${[+]*(![$(_{$%+(!+(!}[&)((((*^)^@%+![!_{][#%++*_
&[&%)$![(]#$+@@*#_}@]&^@@%+{%(+(+![@))#{$*]{}+{[*!(^_^}]%#]%+[@*_&&+#}^[@[&$_]@}
^[})![*(&{#{&+}(^[)&_%[@*_)(@()!(^)^((({})^}_&]*#[*^[@^+{$&#[{[^%&_&)*{[+!^(&{*@
!)%&}{^&[{{!%{}([+](!*$]#&+++%+($*[({_$}!*^_[%{*(+###^^{(_$}(&}@}}(@)]*%)!&_%^[)
#^+%]#*%{#[%@*@]{$*_$*!}^)}%!{)[))+@[%}$_#@+!_+^!}{{#^!)[+!&($![!@#!^}}{^@*$[]#!
%+{+*)+#@@&([[((@)##%@)!^[$^}(##[}))%%([^*+${)(@[}$[&)%@[$])](!]]{@+)(&*#*@&]+[^
)]{$%$$}^^}&&^]&(%@*!)%[!})&(!_^]%*[&)#&!^+@(#%+@+{*%}^]$!)]{}]{&@]]$]#$_[${*@%{
(^$][(@))(!{(#))%+{{{+#{]{^}&#&+%_@#$%]_&($[!!}]++{%%(#%^(%+*_#^#[*!+&$!]_(@%^_]
$!^#){]%}*_%&@$$[*[&{*@[^}+&)_{+])}][]))%%([[[}[_%}!}[^(}^{{%![@+]][*+{^[}+++![(
)$&]_+#[+}({#+}*{)[+[[([@})+^{^{*%[#{^$@#@]][}{{%&]#_{(%#@${)]]*(}(]$}&&@*&+@](#
_^({%+&^&}((_#${+](+]}@!]}#($${{!}[}}$[}{$}#*((}%[){*%+^}]%+](}&&%][!#$]#[+@&&{_
*}&!)%)%%*{#%%@__[_+%^@&$#@(%*+^$_[)%({*$()(]@*[^_*%}*%]%[%+#_))^@(+$#_+&(_@]$&@
*{}_^@){)*((](@${}[%!)_+!!%^*&${([^+$**^_{*](&({^%![&_$%!]&%%[@]}}%!^^$%@}]@%(!*
+%(*$[&@]*(&@[#{_%!^{)!*!@_]^[(}*]}(!]__{)!**}(!}++[$+([!]*()${){+%_(!&[{*]])#]&
++(_$%_!])$))((_^+[_&++@_$}%*!&}&[%@@_})@%)[{})^{*%@]$]!#%*#%%*%+*&{^*&](^}#*!*_
#_%@([]^*][%!{@)#}[])($[]*()_*[@)^&!%+]%%]&{(^{{%[!!!!_#+!@$]&#(({!_*]]+{#**^*&)
$!$(#[*}+}*__$]!])#$!}]&{]_&#*_[^&}(@*[##^*!{)[+[(}_&@+&+&_(#@[{^*[]}${^*{!@+$^$
#^]$}((&){#@^*}_#]##&@@}^@%)@}{*_{&+[&}}{@+(#+{#]@#^!(%}))}^
{__&(]&#$@&!@((${))_^!][$)%@&%(&_]]^)$@$(]}&$)}}$)(([&{){%{%{^#!%+)*}#@_%{%*#@[@
(%{^}(@$$(^_]($)]_*}&+{^$%!%@$)!#$+(!*^}&(*!(+!$_^#}!*&@_%%{#!$+)]@{}((__$}{[!(@
#[](]$!_#%}&][!!&***(#(@(!@!+)&!&(*$+@#$&]@^_}{((^!@!_[^)##@([&]()[}()+!(+!]#@[&
}[}*(+)[*$*@_}[+&&}**_+]+]+#*$(%)%}[+{)*[{)^#%$]}}(^[{%]%#+[$%&*#][++}&)@^^]&([(
*}]#!_@(!$)@)]&^[_@{+%@($&#{$%@{#!(}(@[^[#__[!]}$+#__*&#^+[#]&%({@_(%}^[]!)$&}]$
&&*&(){[+#%($]^&[($(@$^*[^%](*#[$_*{&{!_#*}$&]&}^}_[}{@*(!@**^!()]#%$[^}&]%}}^%}
^^$*[$+*![%++({&^^_@{[)_([@*#)&_+$&{[{(+[}^_!_^#}++*$$+^)}%]@*#(}%^!)^&)_{)&&@][
@@&}}![+!%{+*#}(#[%*#@)&$(@(_$(]}]{%&]^)&}]_#$@(_})$$^]**&$_%!!##)+(%($([!&$[@$}
(^][&}$]##({[)^$[*}@*)(^]$+($$+]+[]&!&*(}$]&[}{_^}#]*+!!}{__^+${%%!*{*}})&](+^{^
_(*#*^}*}{]++_([##$%&[$%]**#$!}%[)&(](!((*_(&_]][(_!{_@]!%@+_){+)]@&[{[(_$()&)[#
[_(*@*_)([_&&{$)@@[}*&+!(##^+#*$#*)(}{(_]@%!@!)!%%[%%$*$$#(
}!_+*!(^!@[$)_{[@{@}%%@^##$*[#++_(]#}!)!^_%%][[}#{}*[[$#!{*(+)$$}@^{^$$$+]^]$}$%
))@[}@][_((_%@+#_{@)#_*)_*]@%$)!!&!_)&%(}{[#++*}!]{)_$[&([!^[){{{+]%@%[)&@](^}(%
[(@(**__*{$$%}}!#@+@&$!_#!@]@)]{+]&))(^_}[%}#@^&{&_({+_[_()()}__+##+_^+!)%!#[!![
@$*]^]!^}[)#%!]+$@%[^**{*+!*@{}$!]%[(*(]+)+!)}[^{+&{[{%{+$))(]^%(@%]}&_(%@$)_$+)
{($#%_!%!&%!@^$@)}}%[_%@}$@!^*!%$^%+%)!]_[)}*{&%^$$)}+^!_][^}@##%#$}*!&*@%}*{{%#
$**!_$!#&+%@^@@#@%!#__(#[})^(}@{(]$%!@&&@*++)((@#[@]]+@@{*++$$(%}&_[*%#(](_!*}[#
$]#%{%${!!#^!#{}@)]$[%$(&[!&#![&+([@*&@]!}[[*+)%][*}@&!$*]_*+++{!!*!_+%{*++#*^#}
(&!@!+!#($@%+[))*&]*&%)$+_**%^]&%})+]{{{(#{$$[$[[*}$%!]!+(*%*$[[+_(}[}+}*{$)^]&*
!#%^%^(@*&$(!$#$^(}+[&##(})$+#!(*]!]#!^{+%$(&**#^{!+_#}&%^{]$+[!&}^@@+*#_+#)@]$$
]{%]{%&^#!@@)}*)(]_{{@^&)&(@%{@{++}_^{)]#+*)_}@)[&[(^&!}_^&)&@}*((%]][$$#$[&!}$@
!#&^^^}_^&!#%#$![)(]_)^}@[{[)*}[#@(*&#^*%*[_{)([{[(](+{^)@#&_%_&+}@^]$_@(&(_@![)
#_!&_)^[[#$(^#+}@#&[!##_{!^![}@#+))&**$((*^[#]^!%^]_(_#$+^[]{)*+]!%@+@&+$++}((]]
]+)%]){($)$]&*$*]_)({#}))!]{[*+&%[$!#^^%#[($^*$_#^(+)^#{!#}%&*#^]{$)%!](*$$*%+]%
##*_{)^+@(]{_#))+*[+$#^@]+)@!*#%&)[{{{&&*_%}%]*(+]}[#$$)*$$$}!}*{@%!+)^]%(({+}&&
[__$#)(%##*^&{($#+!{})#^&#!%^$*#]+*]*[{[*$*!^{&+#(@@##($!#_^*_+($$_%@[^%^[)$_$&{
%!&#*{*[&}](*&)*)!(%%#$%)[&_&]@{*%+@%@%**}&+]+!*][&^)%_^@@}#%%&]#$${%}_[@(%!+}))
(+[#_&[$#%%__+{+[[([)@[}(&^(_$)#[&!)_##*{__@&!^@+[!_{(*%]!+^](&@&!{]{^^$)(#]%+@@
!{_]#@]&%((+&+*@^@$&&$*{+##_}&_(!%(^}%)#&_^$][]#(^@@(@+&(%![({}[$}_$%*]!)$#{$@[#
(%%]@{$(^{$(*$(*#[^]}%%(@%@)}@[^+)$+![%[(!&+&(_*@^@_$(]_@]_[]#{^@%_!%{+][]$}__!#
*[(!]{*{[^)*(%[%*!(]}%*^_}&[)+(*_+(#((]]}$![@%})[+__!{(#+{%!}#&&^%+*%(*$%}+_#&&*
^@_)}+$(^}(([^$&^((*+*!{%_[{))$$+]%_&%!]#{&!#^^%(^$_#&!^@]&*#]&))$])+$^](^^]))+@
&[($&}#]__%(}&_&()*(&#*&)))(%+]&_(*#^_{%}$@[$#*&]+&%}+^)^{}]*]_@]_&&%@!{%$^}$##$
)*^))+(}#^!(&([$$](*_%)$&(#!__%+)^@)$**%(%_]{)^)+@^!))+$@#&^!&@!^{{)##%}_{%}%[^$
&!]}(}]{#^##}#@[!){*!%^()!{_@{*+%_$+#*{@}]_]^}[]*[#%_!%{*(*({!)+]{})!{[!{[!#}@)}
!*#)[)%(]_&)#%}(_&(*&$}%*$}%))}(%]]@*_}@+%]{@##(!@_+(^%&^]]#_(!#&+@(+^)^*[[!&+&*
^&&+[!_$*)}{]{!}_@_]*__*)]}(]_(]#_!_)#^!!$#**#^](!++*[)(]+(&*(!_^@*#]{!!}^$(_#(_
]!__!^%}}+%)+$]_*#&++]##*&!$]}+^_*!]%])(++})!#&$()##!%^&}][)[_{&]]]@%(}][(]%&*%]
!)@]{##&}#!][%!(^)#)]!#^!%!#_(#%]{_}%({)}%+}]()$$)$(((}*{]!_])$!)%[#{%)!}{%!@#)&
}${}$[@$]&)^@**($][&[{{!)+@#}+[$$(+*[_}&@*^%]}{_[$(#{$[_!)##${[(*(^($@(^_##{#}[]
%(+%{!$}(&$!)])}!]]))]!^)*&^@[%)*%}*(^{{)+_&%_%$$(!$$&({[&&)^_}#!)%$$]*))_!+]]{{
]&@}^&[)&)!}!+_[$)%!!)%($)*!^[^+]&*[))+)^(#&%^+{@][(@%$!^#$^]_[(((}(})*@])@)%&[[
+@]{%%!&}%#[_^_#+)[&%${)[_^@{}]^&&&^@({#$+]]]*#[%)[_{!)%)]![[##*!]_+_(+${[@^}!#(
^^*(}&$]{*()!#]([%$^$[)+)!%{(__!{$$&%}+!*(}%#[[+}]+$$%[]*_{(^@}&@%@+((*!@[%+[+)#
!!_$@@+%@*%]&#+@%%}[*&+!]{+&{}_[#_%^&)]{^[}^&+[}}^&}+*[[{&}[^&{[}!+[(_%)!){(^__(
^%&@%@#!%@$*(*([([&}$_{+**%&%%&^![#]^[_}#[]%@]+[]&[}@{!^}%#%{]^#@}#})@$_}}{{}#]{
*^^[^+}(*&&^{*{[_&[]+^[(**}$^)+**(}]^@^{$*(]%])##+[!(_}+{($[+*@}}*[$*_@){_{_%!#)
$}{@[#!(@+@^}#}(^($#{[()%&*%_#@&$[^[(@}%*$^*_%{)[](@+$*{(+![]{$%%&[(]__+)]^$*_^]
{@[]$*({${#(%&+!)$^(#!}_}}%%@}^]((%{)*${([_+@^+]${)&+%(@)!{[(+*{[__*}*%)$&^$%[$)
_[[%]!(&{({])*][{!%_)@%!%_&)_+(@!*)(]^{)$)^*)*{[+$#(}_]_%_*+^[_)*}(]{)}^+[&[@$&{
#^)%(+@@+#(]**[#^[!($_]^@}+]_$[!(%![$$^!#+&$&#[*{]{$!@{{]!![&^%}[{(_)[@**_]&())^
*+^_)(#!$%#{)^$%(_[^%%*(}&!{@#^@#)](]++%%]$^*)@+]&_^(^@!{%%({[([$]{}%]^*+%^*$(%^
$]_!&++!)*+%*)@#}!)@)_*]&{)[**)*[@[%{(%#)%$!*}&%[^]*_+{(!+&%(_]{#(!#)#!]]}*^+[}+
[{(%!{(*_)_[@^&_+&}@{#+^$^*$[+!+(!@+*[@!+%]_^{[][#^)([}&!_@_#_&_)!+*{$[{#^_$&&+&
#(*+##$%$+}$]%&]&(+!+})$]![]%_]]+}@}^*^[!])@(!_[]+$*}@][!&)%@^_!#%^[@&$(^_(&})%}
(@%@[]+%{)+([!(!@){!^_$]&*(+@#]#]{#)#+][#*#!{^&@[@$%^[!^@#*#@!%!]}$^{&$$*][(%$]^]&]@_&$!$[&&[^*&![$^}$+{_&!@%[%(_)]&^!!*_}
*[^]}{{_@^^!{##%&]$(*_)#+{{[$++]%%+(&^$#!#$^&){!_+!@[_$@_+](%#*#!}[&$&#[{)$+#@+)
%[)[+$$*}#[*${$)[%$!$)*(]([%@%#)!(^#!)[]@]{)}*(#(){^%)@${$#}])@^(@#!}^(&]%_]^@@$
_%+[%&{_(($+[_!#)]+*)[^&*%$*^]!^+{&*%{^(%))[(]}$&})($&@((%#[)_%^]_#{**}+&[**_&[[
](+]&}&#[!#[)^^}@^)+&(&@[&!!)$]*{{$**)^(*[)$#}*)_{@(}&^$#([((*_]&^[!+()(&)(^]#*%
]{(&!!%+#^^!#}@@&+[{@@$^{+%%{{!}**%!*+_#!_([(!*%!@)(!@)[+*!!*_+_[%_}]&)$^{!)+!*]
)_)&*]!{]($&[&*(*##{^%*_#!&}}{)%#}^@#@%$&]($(_@[{##})^(%+%)$(_[#@_)[){@[@)+)#]+^
%{[!])]^]_[%%]{&%#{!*_$[%@}@^]@)!#(&#{(#(_{([+@)%!@(@[$$_$_!@_$&[))*}){+(]}(*^[)
%!@!!!%{[(}{$$@%$}+#)*[^$}({)*{(@*(%]&%#%)&+[+$[]*{%)$$##)+}}!+@!%_$#+#!)[&%{*!@
#&&{{^+[}@$$)]*{!_]#}+^{}*%*$[$)@@^_!*^#*(+}_()$@%$#){_!{_%!${_^!}}!^$#_]$)^]*)]
]{++}(!}}%@*[!{**_+^#^!(+%[^){_&&($!(!!@*){_)[]$($}@[&$!![_#+}[}%#@&+%%}*{[@#&)(
]#!_][+{[^![_([&{})$*({**@+#]+%#(}(^!+@&}]$[{*#++{&^@}&@!)_^{%[})))%%&((#}{#]@*%
@+$%_[(((!&{@^#[$#$@@)_}^){}%_**){^$+)*$+[%!)^%#}&)(@^_}}[+_[}&@#!*$+!&}[_})*[_)
+*({}{*&($(})@]+#_{!!@*%^_$+#($(}%}&[!*}[$^{(*#[#%+}%${}^*[#&{!&@(%^^{+_}@{*[%!!
]_]{}{*&%([){$@*$_$+^&!_()}}[!${^{}@&+}#(*!}@+[*[*(*[%!^*+$]^[)${*@#]_@(+%[)$#!]
}#%}+))(}+$)]@$^*$^$+^[#${*#%]{)@$@(_@%(@#+$]+(({@!#$__&*[[*}[!!+#%%%*(*%}%*}(*+
@^+(}{)#_}^)[$%}+]^}$()@#%#{!*{!(%%[!(&_![_#)_]{((!]%+^(#@[!]%}]%+@[)^@^]#}[{$(_
#_{+[)^}))%%#!*{+[+}&%_$@!_)@$)*{]*[&^!{&}$%([$]+)}#$@#]}&*@$+_($])#_(#+[+@*${*^
!%!!^&*^+{*(*@$((]_$_*+]!{%^$)#__]*+@(__$%&#]@#]%(}$*)#*!(^#_]&&))(+]@%(_{__+%)!
+}&(%*!]][!&)$}([)$@{*{{##+&&@_]%(*+(&@_@)*$}^#[+!@%_$@{&!]+&){{(&{]#&*!@}*[%[@+
}[+]](]_(#*{#*&%_*@$!_)^!*%#^+$*](*{$!{#)]^!}{+)^}][^%(%@({)}&_}+]!}%)}{}&$%$&{^
{+[@#@$)@[])__$^&++]+(%*[#]%#@([#]({#%&%%^+&+(](@}@{@*([$)%%}&$%[+[[(@#]*!][_][}$+[)#)$&**)
)[*#&#(***+@{}^@$$!]&+%&&$]##!@)%@#!}}%%_*%[^)]%{%)%^[}}[+}#+*({_*%*!]({#}+!*_)#
*]([*$@+!_#&@#)}&!(%)}{(+!)]{_^#{+%}{[!^(+@!++$)}{[_@[)$]_)%#*+{)})($**{{]&^*_^%
[)@#!}%!+$&@]@_&+*$[$_(&(}@)()_^([%!^^*_+*}^))#))$!!]$}^(#&#$$[}^_!]){^%[]^&_(**
^!{_!!%])[__^)^%}_^))_($@&$*#&+#)!}[@%+%(&&$$%#++%^%}+$_%%!_(@(+!@$[}^]]!*^}#_{{
*{*&$}$+@[!@&)])[{%]^($]%&#^+&{[[(&^]#{}{}*!{_!*+_&)(]%_&__$@{]_)^#_+{*[+[^($^[@
)(+%_&($]$){!#_%$!)$(^$%)^[_*$$*#{([^$_]%{%%+@]^{)}]+%$^%[)@^+(+}_+)$#)##][&{^$^
#}@}%]+*}{^)*$)^!#)%()#)]$*@*(&}$#%&%]#*%{##^_(*(][$[_$@${#&%)#%%&}]^_%*_@!)&%}_
(%*@!}{%@@)^}#&}&{%__{@^*}#)([+![]&_%@#$%^*)}}{$[@+{^%*(@#[&[!^
(*+%}(^(%!%^!+)!!)[#+_^+{+)]+^$%}{_]^@*@%#*#*%[^){([*[__##$)&{&+_)%$^}_@{^__)%[)
+^_[)!$]^#(+^)%(}!]&__[]^!@}}@{*+!*!_*+%($^$}}[()]_!)(*)[&(+!(([#)+[%^(&}$&_}[{}
(%*!+[+[^[#][}@+!^*^%[}$]@!%%(%[[(#_+{#({@{&[$_%)+$%}@{_+[{}+]![}@]+[#{{]))}+#[*
#]**@$@@+_@)[&[#$+]&]&*$()[#^})()$$#^*+^+%]}^]]&%(#}&(+!%]%]](!#+}$%^_^^@{}+*&}_
_%{#*!{!@]@${%!_$}#_@_#(!(^!#_*#_$&@(@^]_@%)!^&^&{%)({+}}_{%%]%^{&&^@_@&&^[](}(]
_&&^(#_])*)+_]!+&)$%+[](){)+_#*_#[[[%$!^#!!$(_^}(#%({%$!_}$}(()$]!&]}^^)+&+!@%$@
#[_$%!(&[!]@[#]{{*]*(@{+&#_^*[!&)#$_)%[!&!))%@&{[)@![[^[+}&#*$*}+!*&{@(%&}[$^&%_
!!!$+]&@})%%[@[&%!($_)@[}({_[$#}^}@)(%%^^*&&+]%]&$_#^!$!(_$%}*_$_!#!_@^^)${*)%+!
$})&}*}#&([[+^[)*#&%}+[*_+!!&_@%!^##&&{#%@{{*%_+%_{(&#+{[[[*$(%&}(()#)[!%)%@&{[#
+_%(()!!$&(+{{@*(!*!&^#^!(}+{[@^}*%)]#%(!^!]@]!{!{_%!!)#@!_*_#$$})}%&[)[!&*{@]_!
+{%_+]+(+}%#_[)+#%%$+@{([]#^_$#@(}%$]#&^#%%&$%(+()+_}!&}^&*)}]*+]]]*_{%(!][}[_{}
{{(^[!&{!]$&(]]+%^%%{@}_{%@+%+[(^}&@#+^^@^])&!@}])$+$}[)![^%$@_[}(%@$![!+}#@+{&)
^*&_^%+{^{$+$[^_*)*++%^++#%#%*^$*@+${!+%@!}^q(%!({@]%@]]@&#^$[&![(**${)]*^))[$}#
_*^}[}+]{_([#_*)@}{#)@$__!_(^_!{]++$(&)*(}{}_!^*!!++}(_+}$()@%&#{]]+_!$&+#&[{$^$
)^]()@$!(#_!((@&**)*_[@^)#}$%(}&)()+))&[[!%&}{&{[+](#&+_#(({*#]^(#]))#}}@_*{^%+^
%!!%&((&&)@*!*!!^+^#*}#!&!*!+$)$!_%^+&[_+%{})(@[*{$$_)})[!*((_(++_@(${*#](#_]!{{
]]])(^)*%[_*{!@##}[%#(&%%$[#+#{]+}@*+}}!($_}$}^[%_{][%{]@]_[$(_{#&)_![@)^*%{*#&$
}_+#{)@^$]_*$@+@][%^*%+&&[*^[[(*)(#!+]()$#$_@*+__)!&!+*@(&_^*[)${$+^$&]))_({@+[*
_!_&}*$#%_&[@^^%{&&&${}!}{}{]{]}{]]_&%+![+!]_}[$[%[&*(_[_!@(+%_@({*_]+^*(_@##_&{
*&}@&^#}%%!^)!{}))!%[$^_^{@%*#!_[_&&!!^)[]@!{[+!^+([+%+*@[]}*^$$^#$]&$%$}@_[[[}!
$(+$*@!*&^[!{+@[}$#&{}[+&^)}&*[]}*^#]$*]%^}&@)]${)$@@%[*$((_)[*@[%%&^&^}*{#^}&@}
)*)_*^}%+_!{(_!#@@__&]*&_}_+*)(%_@]_@)&]{@(]*+&+@})@(__#}%$($[@)@$@}*}*#)%(((${!
}{[(+#}^{}@((^]%@}({)%&(&[}(!&!!$^+_%^}_{&}!__){+$(}*)[![#%&%^&^__]&[!+{_!)${))*
]]]^_}#%!]_!*^}}!(%{)}($_&@^}]&]^#@)[^@(@)%_($!)[}*!^@#_^]^(^}(_(+%*)}&^][@^]}(}
}{)}][[*]{#(+(@)[_]$$_&[]_&#$)_(&}][&}&%#)@%!)+!]{%*)^%{([!&^%)}+*)%&&(@*$$[{@$&
]^_!_@&@(*$)[*^)*()$({!!_)[((!*]{+[_{*+(#{%]%!(%!(^[{@}(]&$(%^%#^$$*[^#!_(&]$}!{
!^&&^*$!@*{}[*{{{_&(+#&+$%!^_^[($&+#_&@&]}[%}^{{$&!)}_[}(){[)%)$$_#_$}+$^()[%_]_
]])![(]_(*@!)_!&{@__%{$[)]&!*$@()+_][@}&#_*)+%_%^&#${^]$$@+$&]&(%][&[@[^{*%@#+${
}^!_}{)_{$]_@{%*_&^_+{$[__]^&*[&*${{#[$%*&#&{_^$_[)!)%]^+(%}[$_@[$^_*_!{_{#&{()]
}*_)@(&*[@%$$&))()}]!^+[{##@%+**)$)]&_]{^([&*&#$*&(]{([+&&^}*^$!_&%}&}!(}!__$[{&
@*#(*&!_)%&__%#+%^[}!^![}@{}())%]!]!@*+)){!{&*+_}]{}{!^^$)@_)#_(!@^+^%@+(%]!{*+}
*$]$*%}&__!{%)+)@)%&!]}!#&[*&#&+@(^{[$**$^#+)&}#)[][}$}*@_@!%&{{@!_#%]&]_]^%][({
_]*!]}@@+{{]%_($(^^^&#@[[%*]_@[#]{*+{_}!{&)+^@@$#)(){[!])$#[$&@)#+@][]{^](}%)+#%
&[$%*#!}+_@$)_}[+^[^{})!}]_#(&}+[!)!}}*}}[_^((![#*_+[$$[*)(_{+&{@^*}()@@&$]^#+^&
#&@{[^@)(}#[@@&$)]!%@)+*})[{%#{%^*}}{{}]&_$&&$()&#_{!}@(+$%@!*@]*$+&_()&!#}@@{+!
(^&!^*@){%)@@)+*]!@$!#](%&}$_+}+@)}}[+&_+#!*[$$+[(&{!^{)[@_%[])@+)@&&(!!#$&+_+)*
]#&*^}&*}%+#()%()[+}([!$]#{%%+@@@{^^_#*]()^*^%]{+{$(][$])@}]{%]+]+*!!}^![#@*[@)+
+($&}]#^]%%(})${&&!&@@]$_+$[&@%})!*$]{_!_(^+%&_(*(+**%(_})[]$))%+([!{]#**)()([*)
]%+({^)(+#(&*#%#]^^**+^}}+[$_(+&!_{%&{(@&*^[%[_]*]@#@&)$#+!${!_$[#@!)@}}+_^#%{}#
#({@)[&[})[+({_!+^+)]#[#[$_^((^@%}{[^_*$^!]*!*(^{@^}}*{{&*@+}![_#%^%[$&+&{@_%@#}
^^!([)]]^((@!_[#[^#+^+&)#[$#{}$+$]()!__$$#(#!#+[*#)@#_}_]@%#&$!@)]$&]##{*(&}}}[@
$&&]#@($%{(![$((^&*#(^@$}+[%_[}[]!!*%&]![%!*)[[%)[$({%[@[%]_!){!*]$}(@((#}[$^{@(
%{#@!)++)&}$%)_^}[@$_}&)*#^$_)&@}[![+%+{@!$]*}[!!!([[^{}!{&$*)*@**^]_@&_%](*_[*(
^!@(*&_)$[$]@]!^*]*!^)@(*]{@[)]}&+!%[][(#$_[$}!!+{*](((([#@!($(_$@*&^#)&+*%{_&%$
}}&&[(%*]*[_]])$%}^)!#!*&_@(%$@_[_]$*)+)}+*+]#!^)_@#%(&(#}&([[${(+_{{!}#]+$@^]{}
(@_{^%*]##*^!!)(}#{@*&#_}[[$)}[#&[)@}!_*}]#@+&!}{^@!*}{+$#}$]}([{&@+]+++*+[+@+&(
[+${^!)}($[!#$&(^!{^])({%%@{+$)!)[#$@!]({(}&$$&{]](@+@)*$&$[&(!!(^**[*#!){+!!)$$
_{%{}!&+]*&[$}_!@_&{+%{+({(!]}}&_^_!@![)]}##)+!]^_#@%@[#^*+!*^${*)($_#___[*_)+)$
*!}^^+^$++&++*%]#$$$#^*^$!]]^%$%&*%@{#+)&)_](__#^]^&%!+(!#[}@[*_+^+_^)&%!&}#{*#(
{}+${&@{}]]$%[^%%!(![}]}[)@(_%+&[}^#(@[^#&[[(+){+$[)(}*+{&}%{+_#_]#+}([^*}!$)&^!
+!&}^%)%})#&*{]{%}^)^$+})*[&#$*&!]_]{$#)+&[(]$)@(+)&))^_{[_&&@%#{%}}_!++#!@{)}$}
{)(*(]+##$[&({{_][$[*)#[[#!]{&)${&(*[*}%++&**&&%#}^}^]*(#!]*@{)#[_{}$[[#&{#@!%]+
{^{]{@*(%##[#$[$&^][_}]}!&{_!&[^&)[%&+*[+#_*)*+$]**%$]{_%**#+{]+}^_)@{{}]}{[+&@&
#@^@@[*(^)[_#}{]](])!$&{}&*{}&)(**}*[@+}$]][)@}[&[#%$@){[^@%&{+}{{#*]_&[%#&]+$&_
]^{(}+#^][(__#]}(}${)@(*)$(^*^!_!!{(!#{)#_]}[*_{_]](*@&&_]_{{]}]%{$_${!]]+$@][@$
$^&*(+$(*{$$%)%+]#_&#}*+@%[(__$}$(@_]{)&%&$_^%])(]$()(^#]_(!^@{{))&^$_({^)@()#%+
{%&((#)}}[#&$(}]+{^@{+}@]}+#}{(}+]!{*!![)}}+$&_%%*]*(!)$+#^$]}+*#*!(^^*_{)+]%)!*
}^{)[)%])$$&&(]{&{@&#$*{@)%@+!%*#%%((@@[#*_@%($##!&!#$!#{$&^()]]($%]}#(_]!(!_%!_
]$^%^&$#%%())_)!_{}]{@#{&})]$_!]%%]]}}[{[_)}}^((^^[!&*)&+)#&}%*($%+[@$[+}&#@[$(!
@}{[!&_%]{_{*+$#&#*$%!@]}[$]!!^#^&)#}#__[@$#(})(_!((*#){)$#+%_%[{+#_+&@}[}^*%$&#
@{[}^#{@}!@%}(*&@(!)]@)_@}%+!%}]%$&%][#$$)[#{)[^%]+{{*^&^}}^@%^]^{)[[)][])_+##*@
($!^(#[+$#@[]{*([!@]]}%$^+@#(%[^}@_&@(!&{_)^%&$@^$[&!+^(%+]}@_]%!&(&{^${%*({_}%$
_%})%@__@$_@+)#+(^@^{^@_!](*]%_^^**@_#(*]_)$^]&}_**(+!}@}}}+@*]])&^_[$!!_*&)$)^[
{@@*!!}*_&)#[&{)]**$!_!_*&)+![&+)&^[$#&&!%])@]$_+&+)))!&@}[$+!&**%&*!+(^&[%!*}@$
&[@}]_[)[())^&%#+$#(}^]*}}_[#*_$#{(_+#}&&+%%}{+)[}))*^#^_+!+))&_]#({)@+*%_$_)}!&
{&&%!$)&@%!}(&(]]]%!))#{@*@$&{_[%})!(@]@)${}{[_*^({&!_#&&^#!*{_{&!^+!%{}+{{&%@&[
!(%*(@[^+$@_&}#}#[}{^#({^}!)}*$$}(_(+)*!+)[]#+@(%&}}!)}!]$$^(%_)_&[&_%*#(^%)@[#)
+$(_}^}}{]@_&+}_{}&#[**)#(!#!%_&&_!^!(+_@}%)#[&^)])_#_)#]{#!([$%%{+{&%$^!+_@%(]{
})]#]({][*%)_&^+}]!@&]&_{($^($*!%&#&[!(^@+@!}%]{@_@}[_$_@@^_&![@$+^+^^$!*#*{$[]!
^(!+[}&&@##_*!$%_{+)^%+_)@*][{!]$]#%{[%#(*(+$@{^*{+@#+#&#&+})*+%}[^+_$@@&@$+&}@*
#}@%*}^&_@%)[&@]^{(!^}#_^(}(+{_}$&#!]{%@_^{}^#_#!]*@%)){*[$@&%]_)%}${+_(!*[^{})$
]!*])&}[%&)*&#}}(][]&{+@)(+&^[(#}^*]#+&}]#![@*}()($#{}+_(#[{&}*{$_&[$^%%[$*[{%^)
!#%**!^!&^@}!*@)[&[!__]]^(#&$%#&(@&#+{${%(+##$$[%%%^}@%+]!^)+#%{%%!+{[#}}+!)+#%[
!$${](]}_!&_(^^(_!{#*^{*#}{^[!#)&)!$_!@*^@^^)]@!{{{^[!!)])]@%+({*![@%#%^}))!${)]
#))_&*]@^!!+@){[)][}$%!^+)%#$&]%_}(]$#}*&^_&){+%[)]}}[$*^_+})(%+&]^*$@[&!#}%}}(#
}&]#)&]^$&[^%[*)^&(]}&+$@%^]@)+!+)&_})*%(_+]*_)#+#&_^{#+!!)(_#]*[%}]*!!@)%()![%+
{{%$^)#^+[+^}#+^&}}{%%+*(+}(&%}%}){_&$]+)+)^#^*+@[@^[!^&)^!@(}{$*{*&_{@$&@!@#{!{
$)#![*]%+#*@$_^^!+&!]#)(#*$*%*@*+(*#_^!@&*]+{](*[+#_@]%{#[}^^%}[_}$+({!%+@@]]+&^
(*^(_@]_%(]+%_)@]&!{]@$[__)@[+$)%$!^{%^!)}]{[[(+*[&*(_^*{*^}]){[_))!(%!}$![#^$+&
**${*+{$!^$$]*_%+@}{{(+_&$+*)]*@&!$#)*]}@@%!(#+){[!!+&)$$){#_@&%](^#]#}$)^*}!]&+
]!%![_)]}){}&_$]%!_[_{%])#!#}%^{{@*$_@@%_&)%{)*}+$#{!($!{{[!@_+(@_+!$]#][]}{{%_(
(!_*$%^{@@^#{[[!_[&!)}!&%#$[+#]{](&^*%^@&})_)*[([%($^$^#*&%_*&%+$)}@^@+^#@%^&+^*
+&#@%&](*@}&}#[{@@]+))&^#%!#*}#[+{!^]*+&{^)&}{#+}@+!]$@(&(##)_]$%#_+![}_}}}(($*[
)^#^*)_]%&%)^]_)!(+{^}{#^^]{@^_&#[^^&!#+_#]#&]((]]}@!#()$*!){*((%+^]+]_&&}}{(]{@
}$^##]+^$(*_{@%{({($&!_])*#(_]&(^!!)@{[^%$_{]^_^_[)%_]#&{{_#$&}{]#%){_&{_%!&[@)]
!_${]*[+]}&$}@[[_}{#_^*#&}!+^{}#+)^%]{*+($({}^*^]#$+%+]#}({]{{*}{]&#_@&+[+]${)})
)&@*+@])](_%^#!))@#%)+!(!^@%[}{{(^}{}%&[$[+&$+}]!$%_[$!*]!{]^#@)+))$)#)@^^&]+#[^
@%_%*%)(}&%!%]%]$)&!]!}+)*(^&+&}@}}+]{^@^]^!$)+{!{(@]}]$@}])}}{%^@]#*&!!!%^&_&@@
&)}$*(!_*&!!(})(+)([]!]*^&+^^#{@*++}*+&+@!##}[$^_&_%(%&}*!]@}$]}]+))(!@@+^+{]!)%
^$^}@!!_$@{_{(}($%%{&@}_][#@${}&}*_)}@%)!}&{}*}@@(*&{+)%**&%]^}&(!_&]#$(}[#^[)#^
{@@#$&]][(@&]{_&[}&*$+[(^][&^][%*}+!]#%{!##$*{}++}+{!(#![@^()}!+)&**{{[**&&$!@%%
)_#$&()$+[((_}]*!!_[*{*%[%+&$!}@({#%@%[%!%[!*](]%^}##@(*)]{@%@^@^#(]^[{_&&**)^+!
*@($&!+@$]@_]&!(##@]&#]+*%[}(_@_@+!+^+$&(_!({++[#@!#(+_^)($&^*%(^&#_^!^^(+}$+_{)
)_{^%%$]_!]$+@^[*}^*]+_$$&]^^+{&&&(}[{*^^@%%+)[^+$&@&)^%&($}*!&%#!*&$[%(]_{$])*[
*@}@*(!_[]{)@*]][(^%(##]{+&+$&($&^@{@^[%*+((%]$])(#$[%)#(*_#&^*$*+_[#{{{%{}&({+$
#$)&$!]!^**[)^!!@(][^$$$(*@*(*{(&##%_%}]%^)*^%#$_($_@(&+#@{){{{_^!#!!*#$#$_]}*^#
!{&++#^({{@$$@#)&*%[!]$&{^!%$+)}]_@+{*_]@)]{*]@+^]$}}]&)]#!_)}]@$@_[&_*)+(_}%#u(
)^(())(){+@]&+_){(_%!{^^*^!)$+{+^#!}}]_[}^**(}*%($(+_%]))${)_*%&&]$!%^))&#({]$^$
_&**[_&[(%@%**)[*$[]#_&+^{@_&!{^]%#_)![]![@#&[}]_]!+{}{$+_((}]_{!)})%[*$^(+^)+}*
_{@@@@)()#)@&[]*(}@%%@[*][(!%$@#%($#*]_[(*!{]+)*#({*{[{%[#{$^)]%!+#&](__}(]%+$&(
{#${$&*$]#](}[[[)($%@!(@@^_#^&})_(![+_)}_%*}@%{!{+%@_(%&{#])()]#!(]%!$$#_*%@%_*[
}@$$)%}*@^}}{)%({_&[$)_}^(&!#)!@*%{%#^_@$)((()^)$@*@%_$%)*$(!$]*#*#+++_$&}{^]$@]
!**$*{[}#@#{+}@$]]_[)@&](]*{]#(**^_!^(@^!#**}#}+{!$@]_]@!&}}*[#$}!!]{[{]!_{&!](^
[()[)#$*&!^[+%}(@{*%*{!}!$(%(#]^]&^#@!$)!{}#+&{)@)[*]($)@!{)*^*([{*}}+]}$++[%+^_
}#^+@[%$%$(*]_(*&]!+)[#}((([+{]##&%!)%{*({@^*#!]++[!^!#+@)$%*!_@[{^[${$}#{(}#)_[
)^}(+{(++(]}#@{&^@^_}!]!]%+[[(!_]${(*+[}*_@_@*_^@^_]+!)$)!)]*&*$}&[+[)%}#%^))]%^
+($@!]^[_%$__&@+[]^%@)[((]#&$&+}!+(}$^^+&{!)&@$$]}!![$&)&][+}+)#]#}(_@@^*!)_)[^$
@^&$^@*}_[!+^!#{^}!{[$[{{++^[+*##%+$(}{[^%@[&#!!*+[@(!#+){[)^!+_[[***+#[+&))*#@{
@{}#^^^*]{_%]+@*)$*[$@]#]{%_$_^}$&$]@]))#((*(&_@$[)]!%_$#]({&&[)])@_}*@]^!@}}%[{
)!%}_%!{^]_{&@%@%(+#^+}]*#)*%$%{%*#(#{}@)}([%_{^}_]#^[!_)&$*+{))_^+%!+}[@{]^+__#
^[_@{[%*@+$[*!$)$$&#(_[^+!)(^%_[{&}$]^}{&[!$[^{*[^)#@)(_(]#[&)]%[+@)]_+$_{}[@{)+
_+&]]!@^}#^&+@[&[%{{[)_]%[_^%**@}))]^*_}@@[}@{)&}#[{$!)}]%]]$_!!%!$@#@^()#]+%(&&
+%@)($@)@^^^]+@_)})#)!^_#!@{*^!@*%^(_^@!!$%!&_+}((%(#&$$#}@#]^$+]$@&)^%}+)!$)&&^
!#[(}$}((&*$&+][++#]^_%!]]&+#%}&$*%*#_(#}+!^{[#!$)!@%_!!()(%!(_]{[$*!^+#]^{{}+{#
_{*{+(#)#!@+${+$@]&*%}_+_@](%(*_}}+^(}$${}^!%}*#&*__(${(]}%^[^(!}##@@&)}*]%)[%$+
}][#{@$)!}*_{&+_{!]**($_[@^}&+&!(@%&%$)$_(!$_{^*]}+_](&^{!%_${@]^}&]%%@#!+%(%&_@
$@]&&_)^((}{}&^(_}@[]_^%&^)^)_@#%_*]&&[!}()*!_(@@+}@($!%^)$!]$%{[^[#({[*^^#{@_^}
]+!${)[^+!$*#%#{!#)__@$_^}&#{%)++)_!*{){_*]^&}{$[{(^{__+[[)@)@#%_%$^{@_}{}+$^)+@
]((!*}^])@!^{+#%%{}+]{[}[![(^#{_}[(#_]%+)}*&}**]@*}]}[(}[{{#*+@#$#^)$++({[^#+**&
]}++(@!&^!#_{[&*!(![(&@{(]!+{(![$^&&}{}&{^*])#&)^#{!]&+)}@_]^$}@{+(#@[([@[++_#_[
##(]{^^%{)*)!$#_*@$({[!))!@^*&@&(]+([^(*!+#@[^^^+!$]!}@+&{_*&{{!)}(!@]%_(&+[__%)
$]]%++!!^^[$@(&@(%^!@!$)[{[%@))&^#}*&+((]([_^&}%!&+^*@($}}$&[$(}__{)+][%!@{({$]&
$^+$#&*+*)!!+_*}&$&^#^*+*_}{%$*][#!$!{#*%%(}*%*@[](}][+)]@{#]($)[_#@^[!]%}%#[+[{
[%{*]&{${#_)(+%^}$}$}#)^^#($##%@{&}@){+*!+!%^{}@]%!}#*_^#+_&&&##^^[{})*((*{!_+)]
$[@@}#[*%%_@}({##)[]%&]_]_[#(](}#)_]*#_)}%$[&}!^!)@#&*)&@%^{@#{@@)}{$)+&%%%](^${
+@##$*({+#_]!_{(]{!}%$&_})#($(_]*%+]_^%)$_)%^^!+{]}}{@%$@$+!]_!([[$(%)!$&}[%]]@}
!(@}]{^^*{#*&(^}${!(]]^^%}&_%(*#]%!_$@}($%@(_#{!*@[&+#!{{[^]!#{${{*{![(_*^(_]%$[
@(^$_)@(!^@@#{{]})]*]^^*@][!@&)@^]%(]_$%${$^)@)_))][*^#))@*&%_{)}^_&&__#!*^&)^*_
+*!}^^{}{([&#{}]}$}@!%[$@]!#!+!^({))$}]#{&)!^)@}${@%^]%}#$%+^*)[&^+)+)@@#_^+)^&*
&[^(({{*)_*)++#${[&+)]$&)]_%_&%&{!(}_}_}^[)]_@@&(![@}{{$}+#+^@%))@$*^)+^+]++&&*#
@)###%^)&][%*_^&()%*[@^+%$@+@{*@([_+]}{){(^&}$(#[*&{)]&{$_^@&&}(+$@!++%*!+%^)!#[
#]^}{$}*@#@$$[]_!+]&+^@@){#&**}{{[%$[^!&@])&)_]%(!{*#@]#())+^_#{^](&](##[&[}^&%@
({[!+@(]$&]&$[}+(&%%[!!&(@(*$*[}$@($%*}_]!_@!^_((${[}^%}+^**@%{%_!&$}$_)&*^^@&]!
}*!@!&^{^}*@#[){%)%+]_$$+%$[(@){%*{++}$&[^&{]&#&@{#[]}]%$__$@^+$}*%))&!%!**#%+**
$}&@*&*}^+^[&]}[_}__][]![&(!![]!(@#@&**}*([}*_}&!{})*_&$_][]@[@[)__}@]*]+]}^[$%!
!{_^++}}))$*%!&}#[@{[^&&[_{!)}_]%@&+}}+[[&+}[[+&&!)!_{{^&]%*+!@^%$)+(%^+*^[+[]}*
]$#{_&_){*_@[@{@}{(^]%^%&(@&)(&&@[*&*%@{]!)$^^[[^}$#!$&_(@}%^()_+!)}[*!){}!(}*)&
@+([!@{+{$^*##{(^@$#+![*}#$[&&^^#_^#%$*}#*+^@[]%#$@^+*)*#^$${)][#)&)(]^!@)!%@$]&
%]&}!_%@*#}#}^&#[[!*)(#}]}])])+]#($)%]_@$(&(%][&)}_[@%^!{&!+&]$&#@+[&!^@^%%$[#%)
#^%&{+(+^$+]}^(!}^#*}}&)*)$(}*&&}$@@#&!&[]_]+*)!)+_%))}%_&!%_$_+##^(@^&)%_@*_(&(
}$]^@(%(*)((_^!%}%[}__]##+(_@{%%+*}*(^}(%)$}@+@_*[$+%*+)$}&![)}+#($#{@^##%!}!@)_
_)}}%+}&[+%%#_}]!%_$%#([#*](({(_$[!!}^{$_@^+(*_[#)]!$_^)*)!_}*$*$&(]!_[+*+&_*)*(
&](]&&@+!@$$%{]+(($@&)}%}$]_{{)&*}%+[}($$&%_#]%#^^_}[+#!+(!#@%}_${(!])!]}{%$%%*]
[)]}(_%]]!+*{+]_+#@)!{#()*__{+^^@#!((}#()*[${#(}^**[^+$&&*]_%&{$]!%{+$+)*}[&(}(#
++]}#[*%$}[$_$([$$}[**!!%{*&[$@^_&$$*#{![&^($([%$@{{{@))]#_]{$([[+[%#[^*{+(_%$+(
^$(!$}[(_^^}(&#*[[@]*[[_#]+__+*{_*)_&()@!@}#[++^!#^!!&^*((@%%(^^{{*[&+#(+&][{*){
#++!]%#_!+}@^](_]*^!+$]*$[^_[*&&@]*$![%%{}{&%{}][+(&}{)(&%]^))&}^*#_+$+@_@*#%%[$
!#}*!(_]+$@_[@%^(#_[*{!!($!)%+#%)%*%@@%{(!##*{&@#*{!!!)^(___*]_[#!%^[!*+!}%{#]($
}^+$]}$)}^__$)())@](@]]_&!*[&(*^_##$_)&)_!${$(($$$+$($)^#$(*}&$$)%]*{%@%(!*]&^[_
]&*)](}{%$]}}%%]!_*{(_[]([^[@!}[[$^!}#)**%#$}#{#&%!%![_&(]^_#!!{+@+*@$^#+#@[}%*%
$*%%{^^(*#*+^}{]]^&^{!@$*&%{%^)]$[_${[{^!(+{_*]@]}{%^}^$%(([}[+&[]^&^&#^)^$}[!@[
#[&[_##!&]*_$%$&^++^!]{%*&$%%)+%+!+({+)#$&@$@@*}__[@&{++$[$_](*_&{{_^*)%^_$@^{#_
@**]^)])*{^)*@%^)+**!%#)%!(_#!)(+#!+&{$*]^}%]{!_$^&!_@_)$%&#{^@^&!#&+&${#]*_)]^]
#%_%@{}!+&#}}$*(){}]_))%)}}_![}@}(#}#[}{[#^%#_*){_$&]!{#(]^]&[!)&&}(]^$%_(^{)}!+
)##]]}_@]}__{#}++!{![*^%]{]]^}%!{{&@}!#&[^$[}]{%$)({#(%@$%[(()#^]%^$!}$$$&_%#&{+
_(@%{}(%#]*^!@$[&^]}&*&}%%%^()]!((_%^{$[_[}_$$+&^%@@+%[(]+{$+$_^+#*%[#_!%)!#%_@%
$)_!]&*[)[{{]&{!*[*}#{*}}!&_+$!#*)[})]%+{#$+}@$&+_{$%{)&$!{*}}]${#[^{#{%{{+)@+_$
[{{[(^[[!}*$!+})+^$*_!+_{*(*#%)]@${*(#&#{{[!%*_(@([$(+$#(^*${[$%}}&&@#)^*!(^&[^$
]&#@^][*(^@%]&#%@((]{)(@@}[^@[*@(}@{%$%{}&+{+^}^)^^&!%!@_!*$![^__(+[(_$}}}*#^})*
)%$%@_#*#}^!}}^*$+}%}#]_[]&+{$]%(_&{&[_{^^##!$^*_+@#&@{_)!)*#(%[%$_(]{@_(#(]&#_[
[(#%]$+{#_{){&[$%%{+&&$)_@*%(%(^^!$)__%*&$$@@])&{%[_&!}]%]_[(}#}{_^!(){&+(]+{#__
*@_])_&&%+([!(}&+(#)(&)+$(]*(^_&^}{^][_&^#(_@[!%[#]}]$^+(_&!&[)_^#^!}[{]&}^#{%^$
[)${]$$*^}+(#%+^&{%*[&&%*_)@{)](](+)(#$_^[^_]@*!&[(%]^}%{*)_+**@(%*{^[&[$[@*#}+]
]@$*+%)(^$$)#}!}[@)%{+#$%##@)%$))_)#{!@#)_*@}[[(#)#)*+]{[(_]$}_@{*$$[%$]!^+&(!([
})}%((}*()##)%(({}!)[_[*}*#!}*{]!#]}^][))_{%[&{!(@&@%(]{*&*)()}+!)@}%}+!)$&!_]]*
+&*%)@#!##@&*(_^]${{]!$!)*$!+][[[@^]%^#{&%!+@**&}]%$$[]!!![+)}%)]](![[[![!}*^###
[^*@}@})[%{_{_^_((&%^{@#%}()$}[&*{]*])%)@{]#((&#+)&+&_}#+&]{_@#%[*)&+!@!^)+$[@![
^_}%@#!)&$#!%)%)***+_$+(!@{#$)&!$&_^(%!*(^]^]^^(&(]@^&*(&%_(!@*({%)&!$_]$$((**@+
])_^_([{{()}+}&$($#&$*+#&$%*^}[@*@@*[^+)&(]^)()$+!(^@!!*@]*{@^]){_(}%*$]$#@&!$(^
@*)*%*_&!##*![_$_^#+!}{!&]+%*@^#^[+({!#*%@+^*@!+})^$+[@@_](]$}($^}^$&*}}$%@#!%[*
)[*)(%(!}*%$_+&(^$[_$%_]]+$#{)}_##[*&#@[@!(&&$@{!^^%%_@*($(!%#[}[!$[__{+]*$$]*@)
*)(!^)&($)*!+^^#)}[&^!%&#]({]&[%!$!++^))&_%}+]#{%)}%[[+[^]+#+})[]*){_{[]#)_&^[{*
[^+$)_&%(^+_&*(]]]^^#{)%^{@@&*{#+_)^}^]}_#]%_[##@)}[*[+_(]{*&#$$#*(*)(_}}}@[%*()
^@)[$#[($_]{$%[&!(&+!]*(&@!{%(][*[]${#{)}+]]&@)!$#}(&])#^^!)%!^_{}[]_[@{[@#[&{(]
@{)%_]#*{%^&@%$[@((*)_%!#+*^]]!}$]+}^]&$#[}{$*#^$$_{{}}@[@+@+&}}^^@[[@@})()(@&)[
$&[}#%&%[!!$_!(}!___!#^%)^[${#_&&{&])}*]^}&_$@{[@$@&}[%)!@{*%&*}%%&^$^)}^{}{&_+!
__^}[}+_]$*}[^$#@%_^&{}!}&*(@)+^{@+[_*(!@(}{#][+&]}&_)*]&*$*^]@}+$!#$(@{{%([@+@}
*{]_@{*[[($@]&)#_!}%)($@@)$+{)(*(#{&#{!&[&+](@*!#)}%+{(%$#]&[))@{(&*@(!&[$#*$*!(
^+#%*}*($[%@{_#@!&[&&{[+&&%&!_@%#%$#_)&%@($+#@!{+[(]{^*^(^[)}_&_[}(#[^*%!+)@+%^#
&#$#@^}*$##@$+#]^%}[[(]{+{[#!}$)(##@(_)#^&%]_%_%(&@)!((+!(]%#@#)!&][$[)]]*(+({@_
)}%)+*]%#${(!&*&!$^){^})$#}}^%+@$*_])}%@(&!#&%@@+]]^^]_**)^]&+[$_%]*__*@}@)([[@^
^#%!^}!&%%&!$+&%%$#$#$^**]!]+#((#_^$&#()(_#_{}}&_#^+++#_)@_)+@%[+(+![^&#][%**$+#
#}([]{{]}_]#$]}$$&}_%$([%$&#[#]+[_)@([!+${)$}%*{%!)##}*{&]&!#^$*}+]_&#}{]&#]#&(_
^+@@+##_]]*#^_^&%_&@!$[}*&$+!_@*)$[&$[*$(!^#@#+%_*[^{@}]}#+]}&!@_+)(++&&(#{[#*@{
]$]}(]^*!%@]&&!%&})(])+@^&@$@@[(%*@*^++^$##{[$^{*$&{@$!%^@$_#]$_^#@@**+^$%)*%%{]
(&[[)*](*}%*%@+$${&)_#+^^]@$}%$]@[]$[_@]^!]&&+[[$!%{_((%&{@[)_^#)%!](__[)+_**$(]
$##&(*[#)}[^&%[@@)*(()_#@)[*#%!&}[)%^+@+{)[++}_+[+])&#^!)+#%(@*]$*]$_^^#$$%!_*&&
%$){^[])&$@{^@+_]([(])**)](*+$*{@%]+)^*+[]&+%%[(%&#!{_^%#*[^})*$]*%^&*)+{(%}#]_#
{($#&!^!{[}(}@}}{%&(@!+_)]!*%)!(^!**!{!({}}}&{%&@#({_](+}##@$)!]^!]#!^^_[)%}@}))
[#^#}(%{#*%**{]]((__)![]^+]+@__*_{)$*#)&][!)_{&$}}#_$#([++{[$*()_**#_#%_$)#]*%_(
*+^*(}*$_%[${)+[%[%^#_]&*+{&)^*{**$[(!}*&@![&({}*#&@_&$)@$]!*@}}&{%{*[}+]_%$])_%
%#+&$}#&_%&[%{+[{@_$!$])!*#*_$($))#!+)))&_[^!&)*}+)_){@{+(&&_^&{]$&!%_{*&&!#[+*@
]{[]]}]%$!$_]]%%$@^](!}]!^[!^(%$%*$]&)@%!#+@@**$^%!*$$(_^[%*!)^$]^@_#}@(*}){({+(
$}^%_]#$!#@[@])[_]*[]_+!(}([]$&}{([[&&}@$@*+]]{&{#[_{^)@&@[$(#{&{$]!$#_#(}^]{+_[
@{&_@[#&^!#!%%@]^]]@{[[@](]][)+)^%![#%@%]%&@)[@@]^[!*]&!^+^!]{+&#%()+{@$)*{#([^(
_*[*#^@[&#@^}!@%!)*^%!]]#&%@$&^+{${^(]!{[%[+_}$!(]}!+&*}#}@@#}[*$*}^^)#)&_!![(@_
__&([^$)+@##{**%&!%}$)@(+*])&!&_![^@)}{!_}}*@[!^^)]{$%{+^({@$*!((*%%}_@)_{!*%{))
(*&)$*)#_#&%%!$}_^}+%]${}&($}!]!*()@!{*)%%*%}}_^_*}@@_}%&^[])*(_}_#_$@(!$*+#*)+$
!+[*[%*+([&[*@&_]^$]&_+^^+_@]_+}^*@{_})@!%@_!*&^!((}!__$#*)!})[))[{^($]@$@$($&%)
@#@%!!&*^*&@#*}&[(@#&@(*!^@{%]^{%&!}#[%}%()%!*@+&!)]!]^%{(^!$!(^@+&$%)@%!@_{[#[%
)#]**+*]@}!&($*])$}![%%%^+#{]*[$@@){@_}]@([}{](!&#[%_%}{&#^%@^_!}@[${&$&*$)]#(&_
{{*#[]+$%*_])))^+#*[++#*#}$]@$#(+[_]#]^(_^#)_%__![(%(&((^$*%%&])+)]&(&$&#)]()${&
_#$@)$@$@@})[$_([(_^[+##)@]*(#^%(%(${[&+#)@^))[^#$^_(]**&%+%&(+$^#[+}))^^]_%$@#[
$*$%}*{!^[#@}&{&[(+@$)&!$&#}{]]++#]@!)[&@[_#)!!%){$]&)})*@)[{()@!)@^(#@]+@_$&#*@
+^{}^*_&^$*(#^!%)@()@{!%%}^#@_)#%%_%!++{&(}{}}%{++}+&$]]@)@(+{()][%*&_*!!#!@[!]@
]+*_^$@&!)+!{+{}&_&))@&{{++{{}@#[*)%%$]{%#!+#%@@_*}!!)&%!{{($$&%[*{[!^**&[_[_+}^
_$]^[]+__${$@%&@[^}}#}+]_#(@_}+^+%^##]^$_{@+!_$}##_@#&{@_++@)*${*%^))%{}_@@&[)*$
_^]}#&(%^@^+[*{^}{_*)%@)$$#[(+(&^]#![{^_{([+&(!*^#&{$}@#(&!!(!{({^%%@!{+{)])^^({
(+{_%#(%)%(**(@&%$*#_+$^^!$)!$$%*&&%&[^[+(@$${!@)$#_[^}{)_%!@&%{{$*]@&%){[}!!^}+
%+[!*$]}(+#@*#&&[_)!*}&*#&#*^@&[%(_)!&+#^{#%#((+#+]]#@]$!!$@@!@[{))#()}(}[^@*)}#
_#[!^${]$)]_!@@+$@(@+^$()*{!{{(@#+$&*@_$&@*++[][%]$[(@{]+}}$*{%[%#&%)]!}(+__$]_*
+][}!&^*}_@!*}*#()$^&*%%#{_*@_+[({!_%_]!^#@(&@$&]]%%+)!]_{&[}^^)!#]_{$^({(^**_[$
]}@)%_(!##]#![{]%*((}$)]{}&(%{{$#+&)_!#)_%&@^()%*@#%&!%+^_+(+$@($&^!$_$%()!!_!(*
%(]^_}^#$])$]#{%@*#%)+}([^+*](!_^%[]%{}!_&+[$@*%{[@*!{}+^(@@&)+!_[^}*${@)^){@_&$
!*_@&+!]^(*#^[%&^%^+%}{[$^@]!$^{}{(()#!%@]{]}(!)[#(*$)$()@*{@_!$*#)$*+)])^{!}{#^
]]${!@&&^!_}@^$$+_(@%_[{##%@*]}[##@${$*(${(#${)}{}%#!%#$
(#}&_#%!+](!$@)}##[^!#{^}&&!)&+@$^$%!$^$}(*&_&[(_(*]{$+#@_($@_}!+#({*(^@(_([_#++
}[&&*!%)#!@&(_[{{+}@)[_^!&($)_%*%){{!^$_*^]@%%+@&!#}(%}#])&(^%$$+@%_#^^%+[!^}!)!
%*!*]^!!](@!([[]@_(#}*%&}*{}%&^($+!*%^!*[!_$_%@($^$[!#[^]^}}]!{##^^!(#++!()$$([[
][__{}(&{!{_[{+!{*(&)[^}!@%{]()_^$+&_)!&$[#_)(&%!]$^+}%]([_@{)*!^#^*%[!^*!*#}!*]
]{&[+&$@[+%@^$%(#!+$*^]^{#!&{^@]]@@*[@^%#%(_+{}&@(_)+)(+++^#})}!+@)^_#}_[{^&]#%(
%(^]^]$(#_*_)@)@!^}(&_$#]})^(}+#(_}$]@$&#{^}*!*]{}}{^}[#^(^]}[)+@^{@)^@{&**__]}]
]_}!%$+_$}#&_%)%[*&%_@}+&+&$]]#(([+([{*[{$+!@[%{#^#$_]#){+$%*_&%(+##$_}%[}!%!&{[
&@^%&(%%%#[^*@[*{&*![[+$){!)%#_*(]^#!}#+%)&!^)*)({$[^%})^}}@)}!)+^_[_^$)+)*+&@*^
%!!#!&}!%+[#!@[#@__*](_*+&^$]*#}!&*@*[*{)%)$*][}^!][_@*]*}&*}{%*((^^@+]&_%#[&}{^
[[!+}&[&}$)@{!)&*{*[$)$)!{&!]#*[((!(]{}]#![)@&}]{_!&{]({$_^*%]#{_]{#]!&*#*(_&)@}
}+^)(}*^(+)+)($)#])^{%#&()!+%$}(+$+#*[%)*@!([$_+[}&&!^&$[_]][{%)[#%$*^%#$!*}+!$(
_[^}{$%@$))_$]+[[##%$]&+]^]!)+^![(%+{&$%&)_$[#%_%_{*{)({{&+^@_(&!$&!![[!{%_**%%]
&*^}!{)]^^[(@+[{!&%%$[@]&+$&[)({{#)[([({{#!(](]$!$+#_[@(({&+@}^}!!{@@**#_*_]$]](
*&]]{#)%%@{@$##[*][@#$[(@+()%%!@^]{$#!([+#@}&_)#_@{&{[#_}+__*_%+__+&&*!]&#%*%**&
###$@{#!+[){$_}[{#%$@+[{#)!#^}[$%}}[@_@%+!!}*}]$)#)@%^*]!}_*@&)@!]*#*_}##^[#@^%]
$)!_%&*^*#!{%&+%#^![%$)*@%*+}+%_#[&_&+}(#$+#@!#!!#!%$@&+[@[#+@@+[_^#}$&)}$#]]&*#
+*!@!#(+$!][!}@![$[_)$$&)^)[[*{@])}+^(!&*#%![(*+@+&_[)+)]$(++($+)#]}][]{*@%&@$$_
*+})+*&^_*!$)#@_%&]}+%#(&${$+(@}()](^}&!&+[]^[#()+!#^_@]{*#{{[_]_[*)[+#+^]+&%&!!
*%($)_@]%)_!$_*!*@(_$_^^^$)}{&!@)%({(+(&[+%&+%}_)(#]$!!)%[!&+)*@%+}])$}#&)*[!++}
*)](+{^!&$)_#[*&^)))[#}$({(}!+{]#[_{[}*+]!@[*^%]&{^&{)]#{#)&${(]+{$])&@]{{)}&_$}
($^$+}[{#%]@_(]@@)(&!_$}[&!!@{!%%_&[{[@&)&$_${%_*%%&@+#+%*!$]}])^!](+_)[%{^%{+@*
&%+[%!{_{![+&[&!!_^_#^[%&[[}*++#!#**}{+}[+&+_$^^]}^^_{$)${!_)@_^_[}*#}&{!*@$#}}#
*!^+&)%**}*{@#^^$^*)**!@&]]#[@@##%}@@[(!)!}*)%@+#+**)_%^&#}(+]][(%#*]_(}&){#%*#_
{^}_%*{^**@&)[&!#_#){@+@}&$()!][*_**#(&*{&]@&@#^${&!]}%^*@)!**&&(@^()@*!_$@]@($+
}]{!}#]$[!&&[*(!]!$))%+$%{!{^[^[$@[((]#!%@)!]__+{}%{_^_!{@{)^)+&%&!^*{_&$][!]@_%
%&!#a}#+#^#{_&)({%!_]!_][}^_%*+}$)&!@[)#@{@[%*!*#_[$$(_[+!^[[[[+*{[*+{!#&*^@&+%)
%#}&)#%*]!@#_&@!^{@]#)_&@){*#]@{@}%@]!(}$*)%#[)^{)}&#[]@}%+)@@}#^#_[]*(%@)!)#+@{
$}*@@[})+(})}}{@{%{%*{{%*+^*%]@]{[^[_&+#(&_]!([#&_[%]![[_#)&@%&]!$_&#&^#@^*+@%))
{{)!%[$#!_[@!(([)++}*)%]@^#![!{+$+((#)+$_^]__]+_^@+_}[%+[{()_%!*}$+[$#%%%}$%]}{&
^+&)^@)$%}]!(%^%(]+!^&_}_*%@]((%%&!)[*_([#{&!)@$!!!$){(&$}&!%*$]%&{})^+}@]@(*(*!
[)}%+^_[^%(_%**{%({{$]}[*_)&(*(+{@!&$%{!{#*{!%{)![_}%}{[!(]@*@#([(*${*[[*($+!])#
@}({%&@}#_^#+_^(*+$)$@+(^[$*#!{_!_]}!(#)_)*(!{^(@(*!#$){[#]&$^[+]!%_}+*]}}}%^_@#
+@&][&^[[&!]*!)$%[#*]!*[*^^(_[*]^]!+#$[*##!!__&}^&^(^*%!&%{[**@%$%]}+^!]_#&+@+[{
$$^@)(#(}@{&*}^$!![%^$*$##^$#+)&[%+}}#[@!}!{#}+@$*$+{}^[)[]^}!)!])]^@}+{^_%(@*()
@]$]!#$$%)())+)}_#*)(_}*@](^@!*&_^!$(!(_!$+@*[)$$*($)+%!{@_}!@_+}+]@(#}[^%@(][(}
(_@^&^*)[*$*)+&$@){]$#^(#_({}**+!#${()_)#^+)&)*[%$%@^]})#{+^&*]_#^!]!)][#*#%!_%$
]$&@@&[!$(!+_^#*($(_]]{&+!]*_)!$_%)*}&][[+(^]_[{[^^$*^{*!#*))!{@+{#$[+(^+*%(+*++
!+)&_)*}{!!#$_&%%*]&}+*&&}%}))&#($##!$!#**@^%]{##$!)*#+@(%))}]__[^$^})_@)]@*{&[$
)]^_!#]%&#[}}(][+}^^_}{@+%^[)$@%+_(&{*%*)]_@+]($!@*[)%#$]}#[_$!%_}*)(*_+}_&%&{})
&+(]({$]{+%*}]@&(([&(&^&!@{][^+^^)!@#[[%_!@^}_[(+@!!^+@${[]%]]@&[{}^)+()@%&}#!(@
[!)*%)^!}&(}+[(#$^@*}%_$+)[)}!+^](+__@@{%}(#_#)[%[([$)*#)@&&*&#_$}{!_+_!$_*_)&@)
!}}(_[@)!_[[[&^$#+$)[)%##_[%{+*@}($&)!}}{%%#[#!#*&*@@_%@@#!#{!%][}+_](*+*{@}}@%@
)+_#_^!#(#+}]!]()@$%&!{^&*!^&##]^_+})}+]*%%*@_[(!%]}^([%{]%#][$@!@{&+]**^{%}^&([
(][@+^$^$[#{@#)][}_@!]][)$_({%#)%&)!}(@])!{_#$((*[@[}##^($&]+{^]!$}^]&[&]}*{[{_^
%!)%]&]^#!+$_*+)$$(@]+]*#+@%]&$_]#*}%$[#_%^+{+}$$[&&*^_]@^*@!@^+*]%^!(){#^^(){{^
[^]]$$^{&*{)%@^$%}!%](@&!^&]!@{%}[]_![#]!]#[}]{!)+@+#!_&]^_@+$}_[_%([$(&)$)!&+){
__@!&$@!]}^&}[%[[&&_#$({}(#{%_*[#!{!{@+_{*$_%_]^^*#_@}[+${$)!%_!#+#^*](%{}_@^)(@
&$__@@)$!**_[*%!]#$%]@!@]()]%!!%_+%}[]$+%)^)#_[[}+)*{%(^%#}*%&[{{%%%[)%*%_(%%&)*
%(]#%!*(!]($!!&}%()+)*%$!}]+}!%{&+$}(#]]&()&%!&(}!&^{@]!__$_@_[)]]@&)}@$[)+{^)*%
}%%@@)]^)_](&!#[@+^$)$#}{@^{***{[^*@](}%}(#&*!}+}%][]]@#^$[*!+!@{](*]*&&@$%&[}^+
]{[#[&+&_[]^(})!#!}]{}&{_$]&&@($[[[+_{!@$_+]}](![@}$[^!)_%]^*$#)*&)#[{%@!)[_({&_
!&!@!#+&^(!%^)#_^$)@*]{_#(*{%$${}_&&$+@+[@&&!!%][##&(+]+_[}[&}_((}%[]![^$(!&_$$#
!{_}&[&+^*)]!($]@*#^&**]_($(_^])^_^^$!_!@{}]+{]^&!)}%)^{]&*}!%%!%[(_%}{}$#+!])!!
}[&}!([!)%%(}&((}]+!!)&#(&}}{+&)%^[%_%(}+*!(_[*!_[#)^&&+}_$!_$!&+_[+%*(^!&*@^{@[
]$&]}#[&[#]]]^#%])](!+]]%!+$(*@^_#^*_&)@[${{$]]#&]!+&_!{!)!%*+%_%_)$]#!]{+_[){$^
_#[^$_#%^]^&[)$@)]]{@@^(&[([!}@}%]!+_((_^[{$&^(^)*((![*{_+#(]#^[]_^#([[^[!!%]$!(
[&(_@!*^{&{+(_%{!$%#[&}&@&)}[^){^}!$){{%$&)_@))+(%#@+{(^+)}%$_{*@{#]@([!)@&+*!%{
^}%{+&#]^{[%%]+((*@+@{+#_(({{$[[@#%!+{+]%^@+#^][}#+$*&@+@&]$%^}*@_)}}*[]+}_*@*@)
[$!&_[^@){[_{#{%{#$+!}{#!@&&#(_#@&}(!@+#*#@%[@%%[%[$&@!]@_]!{@#))@]]&]{$}}%$[}*]
)^(^(&}[$#}^$_@@{&^}}[%[*{)$![&*&$![#)*@_$]^$)%&&!_%%][+$()#_&)_^+(%+)_&^]!+*){#
)_]!_&{[^_}+@%#}#[*%]%+!!(()!*_(^{#^!*}#^}$_^}$}_}{_#^@}@%!_{^[!]&%_](}]$#()#{}#
)*+*}*{&$+}[{${[#_#{}_!@}&)_@[!)_))}))#($[_}^^${)@^(&[_+!_^[(&#}@_}{]%#%)[]}]*{)
&@!]}]%_{+)!@%]#$_*#{]+!%!}[^{[)#[@(+!*&_{*]]&]$[*&%^{^!$!@*)*(_!+[{*^]@}+#($$&)
!$(^)]!#*[@][^]@}(&*{}())@{%^#))*+_+#_})(^($$%{{![^+@!+&}_((*#[*++}*^^[*@&!{+%+]
_&%}*++_(*![%^{$_!!&_$}{+%{!!}+!@&+}(}}[}]){!{{&_]_]&)%!!*^#__(^%([#%+&{{)*}&}$(
#^!^#@{%${^*$^(^{(}#)%!&#})(}#}_(&#%^${#}#]{*@^!+}!$$${&#{{[[*^]]%^+#[+%$@]@#]}&
)%&)}[^+^&@$^)]&[$*{^%^@**]^*!^!&_#%@+(@][)([&^(@@^@&!&#&_}&[^!^@]**{__}_!+}%!_^
_^])^@^}&(&#})[(@&*}_{{$[&^][+*&{!+#%@#_]$[{$@+[{[@}]}{*(+$#^[%[@&}]^^)_(]{@{##[
[_&!$]_$%$*]#!^}(%!*+_^}&@++_!%$(!!_#]{{$()))&*+}#[@@!_%%%*!_+)(@*@%)_{^!%![#_+!
{_*)!(@[%@{*{)]$*_&%*$+!%$&&!(%}^]^!)*@+@!){_(&#%[_)_]*[$++&!}@])#%)}@)!+!%]@*+#
+%&*])!{()@}*$$+){_)%@[%[*+$]#$}[#{#$)]{*!}!&##_^+&]])%!^((+}](*!&&&!^^&!!)^!)[#
+(${^&^%+}@!#[#%_&]({$&)##^%+#@*]{_%#^{%_%+}@@]+%}@}]_)*^@*}+#*##+]+^))*#^^}&]&[
$[}%@{)&#_}#$&[&(]})&__!_@&]$]&(_{}^&_*$!@)[+%)+#()(+$_$!)*+((_)*@##{+#++}[^@{@+
_(}+}%[@&{%((*{{]+^}*]@%}@)%#![&)])^}]^&[@]!#(&}^!][{}$)]!)^^&^_&^[#)+[%#@+}@+!+
&$%)$&{]@%)%%))+%*{^#[@$+}(%@[})#$*@}+(}[&_%^&$&}}]%({^@*$&^@{]}!@(})!)[!}@!_+&^
&}_&%+!#(%#{[#&[![%&)$#+@[!)_!@_}})!#^((^#!+[}+&)(%!!&#[_]##!+%][]]+[_@_(}_^!&@{
+$^_]$()$({])##+(+%)+[%&+[_{%[##(#(!&_$*@#+{#}*&!&{(^#))$^%%@${}!{@%^^((#&)$!%!{
^_^_&!%}#()@+(^%%)_]@%^[]%%!*)}!}}]!#{$^+*&[!@@)!&$^]{+((*]]_*_#{(*!#)$#$&+^)#_$
+!*+#_)!(&%[+}(+(_*&$]$%+&&[])!#*!{(}_[{_%]^(%[)*+(#(^+%__{[@+)@]#%(($^+@@+%(_)}
]+!%*_!++[[+]]@+]@}$)$^)_#^@}%}#+[+%^*!!{+${$]*$!#@](@@$*#*^+@{^_+%#!^()*&(}+$_}
{&&&{]%$^_%_!+#%@$%_}#{_*!%@+^+{%&&![*%(]#]$![)#)***${*)#!()+{}+*($($*)%*)%([(!^
{[#)*^&(_^^$]{][%))[+!^[&[%@[*!]%_+@@}$^$^}**%%_&__{&&)_]+^(@%)+!}!]]^%}!++(&)(@
]@{^^^]$]%}%_%($*&^*(%#&#)&*&[$@&+_}_!+!{^])_!}}*![!({&$)%(%^(})})[@&]*(}]@$@{[*
))$&%(#&_^@(&$]&@*%]+_{@^)+({&&}[*%}[{#$${@*}@]*!(]&)+&!!^}+*%[])[)_!{$+^&#%+@(_
#$]+#{#!$+%(*)#]}[%]**{![(}++$!#)&}%#(*@)$]+!@++*&_]}!![#}&[[)@%(&{_[^{{[}$[}^{*
[##_]]!}(}^^@!(*#&[%!!]_^(@^((_#%$+@{^$%{)#]{^&&!@#]_!}{#*!!{]@{)(@&^*^]!&^@](+#
_^!_^*]@_%$(]^$!}{*]&!+*+#!+(+^[%}*(}](%%{]##)_}*(]++]#*}]$^%({*{$+&@@#^!()&)%_{
[*!(($$)^%+_]]{^{%{&^!*}[{]+]%(*^]{@)[#{_@_&^%%+${}{]{%*_%#$)_*]#$_@$%#^{%*%)@%]
^{]()$%^_($]{^*]+!*!_])!_)%}{%^&]$)^[)^@^#_{{)!*
%{)[[^(!]*$$#^({)[)%[&!![__@}(*){]%)]@#*%}_^}#_$(&*^+%%+^}!#]*[!}!&@#{+$}*#@#*((
}@+%]_@+!!_!%@_}_{[$*_(}%)#{}]_^!+#*@])+$+**[]#^_}{+}*&{)#%{!_$$**}&(@]_+)[#}}]^
%^[[{%@)%%+$}((*(_^_${*[$&+}@%&^$%&[+!@#]*%+$_@*!}#)#*((*&[$!+_]%[^$[]+$[({$}}!$
[^&^([!*])_@+@%!^!%}[(!#***)&)@{{@]_#^_+{*)^^_+&#@{]{@#&]@+*$$_#{!^#&&^{]*#)__]+
&)&)*]##)%#&){%!+*[$}{#]]$_(@(%])$^](*%#])[*({!^{&{%__!(%[*__)+#*_(*]_[%{(!_}^*{
$[%$}{^@_+*(]##&(+{(}[!_[}}$%[{$&[@!!@[%{[]&%[&}^^!]{]#@$)%(@)+_)}{_](&[(^[{$+%%
*!}}(}*&[!)*!&!%}}*}]%]{^@{**@[@{]*{#@}+([@}}[[&_%*)_){&[)(]]^}^@#]%#_!(^_[$$+!!
#&&+{#+!^+++*!![^]@)#][&(_}$&_(!_[$&%{%@]%]}&$#{!^@%}%{$$^]_!((**!{%)*&^]}#)%%$]
{$^^{#[!+*]{}(&{{{{}(]#[#_+@[#+[}@)@#_!{_[&&*)@#[*@@[$(*)@^[[]^%][&@@_$+_%&_*@&!
@*{*&_&({{[@{]*&![]*_}&#!+@#+&&_(+#*[+{^%@_+*+}}^@*(]#{*$++)_@@]%_}!$%+]+$&^!$@$
[}^_#[({^+_%{*@}@_[]{+][_*!#{])_^_@$(@%!@!(]%))){%](#]]#}@&}+%[@&[[)%#%{{+@[(^@+
{)}{%(&+]^}^+!$!^*_*+^@*}+]#}_#^(%#$]({+{#(+@$][%$#(_[$@]@+@[@]@%]@}[[@(*^@($#_(
*)[&_{{(!*+%(_&*}@#[%[()@}]}[#&{)*}&(}]&{%][_%$@&_%*)&_^]{##]${_(*%*{!_+)^]][_&+
]{_{()%(_&@}@}[%+#^@}#**!}+$@]@+*($*}#_{)!+])#$[@%]&])*%___!*${++&%^$^#@&_{)#[$+
$+]@#@_^^_#@}!)$_#$*&{+^{+@*{#!#$!%*^!}[[{]+{[@(%_&}^$%^()[&#%@_+{(*[$#!}[+_{*%(
_+&}__!_#^&(%!&^%)(@)^!^)$([_[_{_$!){*[!*&#+((%(@#)!%!_[!#[_]%[#]}}%[$)+#@%%*[%+
^!*[([#&${}^%@{[&!!$}!&#![_&[^&_{@#))+&%$!!{([@[}*({$]&}}@&**[++!_*^^[{^(!*^^%][
_[^])_{}!*^^]@@%*(@_])[&&$[}%@$$##]&#)+!#_%@}#(%&++&){#!+!&]!{&$&})[]}$*(&&)@#[$
%^^(}^#&@!_#*%)^}{&&*]+(^{({+$^]()@[)&&[&[##[][%]*&!_}&[}{{[$@}+!{[$[^++}]}[#[^#
^!%!+}(^*^%#@}{@_[#^@)(+)#%^)+@!{}_}{(!$[[+_[%@}!){{]]$]!^[@)(}&#([((%$#&%*]#!&^
!%$+}+&[[{[![!{(_@_(^^#]+!&%([_[*}^}}%!^&^&#)&#[)*@$+$%(@+*][}}(#@%!++^_!*[+#%&_
(@@)]#)#{}@&#{{&@_^&$+$@**{(][&]#{@+{#*$_)#_!&{#]%#(%!*()+)%#&{!+*^[[[{*))$!*__}
)[%%]&_[{]{^[#)*)+#*}}(($)*(*{${}{}#[&}[![%!]%}^{&}&$[(%}^*_[)]+{!&+)[@*&&{@&%#[
!*___^_&]]#&[](*+}(^]%+^^)*}]{$!({%+_*]#&{+&)]&}}]}^+[)#_&_+&!&{[{_)]%{+&{*}*%[+
]#%{_[){!)}_#]}!#%{#][_+]}^$}))#{@{+#(!_]$[!!&{{&+}!!)@)&)}]^^@^((]&^!+!]$}${#$*
}]*_&%_]{^(_&$@&_({#!(_]@#%+{##_+*+^]!#]_]#](]${]][_]]%_{$*}[&^{!_)##%%)+)*&*__!
}&*_*]&*#(]]@[%{%]#{]+_(}^(}*!#&]^[!*]&^$+!_^_++%+##(*@+(^#}#&*[{*])#)]$*%*+](+[
[{+&${}%!){!_%&&]*!!({$#)&}+[)(!}@&!^][)[#)}@_$*)^%${]][^]$!_$$#*&&#!{!!*+_##^#!
[#&##(}+[@!{_+}%]&$((@$*(#{]@&(]&^)%#^^^[)()+*^{]%#%][[*{%@_))}@])%#*!)#_(}(&&}$
_&&@%)%#(^}&]_(_%@]}^$]_#@^]+{^#^*&&@!^$]$*#}$(!])#)_@]@[@^+}#&_*#^(_%${%(}]+&}!
{&_}}{*){)_*^_[!)$]^%%]&]_*]&*_@}]_{+@!!$@(]$))!+#)*!](@[@*&!+%}@$+@*@_(_!)_[]}!
)[^!^^^!$^$^_*#}#&{{}*}}[$#!$#(&_(_}^+]@#{]}_^]^}{&_[)[&_##_#*$[_]&}$_&]()+&$_{]
+$_{#_]%}[+)$++$}[(*!+[$]*}!$${@#@{^{{#}[!%!#$&^^[+[+^(%}(^]#+!)(@{$&}#{*)#$(&{$
($^*%*#+$%{*&]@(*_+&{*_]}%(!{)*([+()(@[}&!+{$!+%{^{{}]!(]}!)**__*]$^()@$&*+})_!+
%@_)%@([#{@$({[^*(${(}#*@[))+[^!([#]{$*^[)_^}*{$#&$$%+!%}($$!{+[$}&#^$&!^@^@{!**
&*}]@#)#&*&%&{#^((({%}$*^*!__*$_!%^{_!^$*]#*_{!(*){$}^${%@$$$_}%!)*(^}+)@}$)&+(_
#([]_)&_*^_^*($$+&[$!}&[[@{])[%*_$+%])[(!+)#@(()!+^{)})%@&&^@]}#^@]$]+)])&^_]_]#
$&_*))[*[{%_##^#(*[$$&!$^#^*++$**}){[*+%))!!+%(#}@)[$$$&$+{+%&+&[{]^{!_)%(*)}#[(
$@[_)([@}_%&%)@{*)]^%*^!!!%]{}!([#!^^[+!^$+}^&{}*(+]{![!)$$&{!{{[^#$){+)(&^{)_{!
{{!%}&&%#}!]!_&%@@_])((}}(@^]*+})}{*{@[$[&%(]%!_[(}%+)((*(}]&#*_$[^#[![_)%_+((@}
!&(_&^+[(!#+{@#[[%[_)_*]%+)!@[(%#&^+{#$)$]]![(@+@(]*%#{@#$&#&*&!#_)*[@*+++{+}@](
#]#()_)#^}&%#(^$&(}^#]!$]^+*$*]*%])&@}$!{^_&+$]&{}{*^$_(]]%##%)!#^(@&)](](]}]_@#
%+]^+%&%([^)!#_+^]%++#+!({*)^@#)(_&^$*(_$](@[{@_++_%@_#][(&$&#}%@##}*!_[[+@@!&}*
$@^*)(*^!$%$[)}${))&^*+&_#*[{))(*_##&*_$+^&^!#![@@$[@#!&&)_+#%)&@(!!^$$!^!(_{%!(
{^$[[#[@@(]}{!+)[($%({@#%[}}+#^]#{%#^#*]#{)&__&@%+)@@}_!*_#&]{])&_#)){%!&]%##++[
({+{_#[}}#%!&&*#{[##][]*@_&#*+!)]__#^+_!^*_}#+}}((!+]]++]%_]*){]%_}]]&%{_%]^)!})
[@*!*!@_[]__{**[}*{{+]@&[@*!&]^_{[($]}^[}%!!(@+[]$)#!}${*#&}}#*^%&!{*#$}){+!{!@#
]*$]@(*$}[!@{($*&+[_[_*+*@@%_]*$[*%#{%![]!_@}!_{#)]!*@+[*%&[{^_]!%#+!}]%*#%[@{}$
^[[})(&&_%#[}+%]{{*%](*^[%])(_$]+[[&^$+{&]*$}]%$#(_$!&##}$%&@[[{)@#&+&(&@!+)@@+[
@}$][([]*]&&%__*{*++!($#@$*+]^&!%)!)*@]$#]*@#*!^%+#(!^#{{#*(][)([&!@!*%^*(#{&{{[
{}*_#+*%(}*(%$^^&$[_)[*)%)_(^&&!&&%$(([**+)_)$[!]%{$[({[$$!}_(]^_%{^[%$*@^_!!&))
]_(_#!}]&&{]{*]]%{@{+$&!@&!_{!&!#]_(!%@[{)(&&[#)#$#{[!^{_*]%[^+%{^*+#[!%*#[(@^#(
#{*#&+_{]@^#[[^_!+*}]!^$#$#)#[$!})%}#^#%%%@_+$((]^*#^&^)[]$[]!{^$%&*%&!^^!(+$#$&
$(+({[({@&{^)+@]]$_(%_&^%_&%!^(]_!{*@*+[#}}[}{@&&#(}@#^[^{(@_})_*!+{*]_(&+]#)*[@
{{$@)&&{&%%*@&_++)$[_}^&@$%@_[^]_}^&#^]#&^[%#*[!}!&}@##!@
[@+[_#[({*%)&{*^%]+[_+%(&[(%!@&$_*}_+^)+(}))+%]))[#($^!]+^$%}([}!%#%&!&}^)@(_{[@
+@)^#)]$#&!+_]_@]${^^)[+$[[)*%{!({&#@+@!*!&*&_&!*!*@}#&%]%!]&%^@&#_$%}++[%(*$&%(
$(()%}]#!])+#[]({{*!$&(@&#}[}#[]#[(![#{*})@^]*!})#*+}@}^}^%]^!}}#({[&!^(%}]}{$*}
*@^{*@^[&}])(!%&(_%*&}{*$}^@#]*^%&^$__$)(![@$)@]+*!+__{#*_^}%@)_$]]#@{$__%*#!_*+
])[%][!]({#+&@{}}{)()[#{_{_%_+&^{[!#$$]&_(]+@{)#&^+${]!@&][}%(]&!*(*@!)@]__![*+#
}+%&((]+&#^!$+__@*+(&#])!^!%]$^#_)[+]+*&@+@{%{[{_@]([!_@_&{{$[*%]}#[!&@%}(%#{_}&
*}&{)_[*[_*%[$_(@]!@#}${^#+)^$]@^{#]^_%&@%(})@!{{!_%@#(@(_@{^#[!^**)!*&](![[![&_
{#%!#$}#*!$+&)}$%*_&&#}+]{__@!!^%@[+]%[#!*!]@{_%+#{))&#@+}}[%&(@@(]((*@@!}]}{#!^
+_}][^[^#}]+][+%]$__*&&+]!+$[+(@$__&#+)}@[*{+(%*)&@#)+*+!}&&_$+[#$[*_#%@{((&[$%$
}+$#&+}!^([^!](!%&)#!_^!#$*)[[{}#[_(@@^#+$)(}_$]^&[+#+})[#]%)(}!}+!}##_$$&#@^*]]
$%^))}#($]$_*%%+*${!_(){@_^(*[_^{]{()]){^&#@_&@{!)!&)}]%{$*^(&#]}*])&{&[+[^_*+@[
]%^[^_%%{]!!$([^!*##^)^%%&@[{#+%)[)!#&[]@[{]+!##@_)%&[#@(+)#&&)_[%[#[*(}&#_@[[$)
%^%{[{{}+&{*]*_[*$[%[)_{!*&*+@)_@@_!+*(#!}_[(!]@@*{#&#(_{]@&$[[&&(#^{)++}#(#&{+(
()#](()]]_&)!}(][@_%{)*+^$[((){(#)(_#%+%!%}))}%@)#*}_)#$&}(*@][@}+##%+}}_[[%(!&@
&)&@#[$^*^^#*&)#!#{_][@#*^}$&!#!][$+@{)}^^^(*^}$%[&(@^#$*!}*$&^)!_[]]}@[+#)**@&]
@[}{_[@$}**_%+]${[&*#[^!#]*[(*$)@]*[*}*&_{%(@)]%%$]({]@&%]&_)%_#%)#**{&*(%&!*+_}
[^]^_*#[!^+#$!_{[}&&])#$_@!#]&_{{[[+*()#{*}&#(+}}_@@(@&^+$@&}*!^!_*[%](%[$)_[]&_
+(&$@})+{{_}[&*_!*^^!!+!{((({[[$^^%!_&[][!%]}^&{&&]_}$@$}[(_{_&{#$@$@!#[)}_{($}&
&@#+(((&%+$&$!!*^}{+}%&^{&&!_]+}}][!%[$){)_[+(&+{)$(+(+*#%{%&+(+}+*%%}_]&+&[^_[#
)_+#}!#@$){+}##+}+*%}@}[#!!@_)@&%*{_!{{#!!##]{}{#+_}]+({]^}@{]$_@*^+{_{^]@**+)[^
[%&[[&+{}%@}}*]##]}%#[_+{)!*_)[[^#!(+$+(_!})]()#)}*!)_{^#[@!#]]^()_@]^]$@!+_$!(^
&{[([}&!]{_%%$+}+!%!({_]&+@]@[@^*)_+_(%#}*#_#%[#*+(!)}]^$)%**}]@&]%++#})[_((@[}(
$(]!]$@&!+]{#]*_{)(@(^]*[+[]@*#{&#+%%&(@!@{)(#[]]%[!+(&!&@)&{^++&}*_*_#{(_&[(}{!
}&#(({#%$^(()^}^^{$][)+![}%}[!()@%_^][)@+]+@!!%+^#@++$%(@*$]^*{]!+###)^#&@[^[(#}
)+{!}(_@#@)([$^{$@*$)^{#!]_)_&]{}+(^]}*[(**]))@)$+]*+[_]@&&({#(}[_*+%){$&^}}(*[_
*^!_+^_#(_*}))#{#$)[^$*(_+}[#+_@^#{+){${]*)[]]}((_*%_^+&(&]}!!!)@(++{)%&#}*[^+$^
]^&]}&&@}#*#@%**[]${%!}*](([![@^}^![^+@%[^$*&#)}*}^_%_]%{[_*_#}!_!$({^&[(@#)$$$@
$@_$*@_{(_{$$$%_%}(}]+(}){}]]&$+*][)%]^@&#]]&}+%}**!+%*${^)^{%)&%%&#]}&%+^_@_^#]
{@*&!$&]%{[_(^$}(({]^!#[)@@[[{*]{)_}]#*}$#_((#*+^]&]}]@^#^%^^[*@$}&{{&#*[_{]%#**
}[%(^{_&!++[_)+@&^])&)+!(@%+([![$*$+&_)&_*#%!#]${[}_)+)&$#^*##_&}$]]++!#+(@#!#[}
)[+)]+{@*)&(^{^!+^^]!^)[$_!{&{+]{@&#!_)#%_[@@_[#%*%)*])$}%{++%[)&^[+!#))#(+_+$${
#})(%&!!!${($[$&[]+)^}_$$[%}^[^%!)$#%!}[@}%%*_^[+!{!_!!@^{{_]%}#+{}{{$+#}]%[{*}_
+#@[(+!%]*#]{#$%[]+[*[#_+(^]{}#(!!{]&!}}#{#&{)!(]%*#*$(@}!^]+{!(&+%**#@@$](%#[!+
!!)&!]!+^$(}(@{#@${]{^&$^)[!*[@#]*{%)+^}@)(%$$*{{+@!!@{@{}@+@!*&{%]_@^)*$+&&@+)*
^}{&*{*}_$%&((#&}%($*%]#+!*)@{$@#^+}([[*%)+%$@]}@]%({(]$$__+!}}+@@!${%(])+{}![@{
{_]+[&&@%%(#{(^%)++%)!&!)+&)&]&}[&[^*!${&!#&&*^)[&$]!]%@{%%!&@@+}{#*]+%&!#^_]!_@
@}_%^^[+{_*[]%!@(#]*%}{+@_}*{[%^@_#{@}}[%[@+]@_()$)[[*!)(#)$%$%(^[^++_++@})$[&+(
%^^%#{!)#*{[+!(!_}[!}_)&$#&]$%##))#&%!+^#}()+@{^^@)^)}]^{]+[]+[[_(]+}*+_*+]$%)&(
[)%&$}&!{+&}]{@%]@#_[]({+]@%@&]@}))!@({$]*!)])[!@(&%++(}[[$%!![$*&^+}]][)!)_^*&#
%[+#}(&!&^_*]$^${[^_)_%!}%*{@$]^}}!_$%*%_$#_({+${_]*_$[)[^{%^@@[##&{)]%]%*%)&_#^
&@(^}(){)&$[#[##%]*^@*{&(]$$](+%(^}@!&)]@##!&@!^)![#@%[&+@%^&@^{_&%&[(^(}+&[(&%}
(%+{*{)]^+[{*+&+_)^)$)[]{}]&}%((_%%[_#}[}*%[^_@!$%)*^@]@%+[#$}##&!_}[%[![*^}_)+#
_*+@[!(^)&++[+&_^%(_@]_@!^&{]+*+&@(&{@@&{##@)_*#{)^[%+%]@^$%@([#)[+@[_]+#}!#+!&]
@[(&+_{@#^&{$$[#]&@!$#$%$%(((_)!)]]*}(@*^$)!!+%_!!^__@@}[*^)%@**@!@}&!{&%+!#!(^@
@{^#*)+$!@$&([++@$@_##+%}_[{}_#{@!@@#$(@&]^]*%!+$)(+{[+^%^{+!}!&$[&#@}_&{%![^#*+
#]@(&}]!@[}+_]!{]%+&)^{[@[__}}$$&!]#)_!(**@(+*!^}+#)}!)@$^![^){!#$[*{%&![!^^^_{[
*[*#_*&%)&)}@%!@!&#]+%^#))#_#(#]*#*!@^{()&}[{%(&[^)@$^%(}&#@(%+@%{%%]*{$(@%$]*$]
}(}@$(!!^]+)%($)_[!@%#{[@#((%+]*!*)%([]{(}*$]%#^*#_@@}+_*{$+(%]}$@{!#*%*[[^!{)&#
#*!#^*+[_!$^%&)%$_@%}[%}#{{}%!$)[%+[[&%)^(@(_^)!*@#^#@@+@[(@)$(^&_}%%)@{$%+){(+[
})#[)!!@))^@_!}(+{{(%#&[_}+_)_{#%%[%^(]]([(_!@$#$%*)$+(!_##}]_@+*!]&}@}$&@!#)%_#
#@+&(@[*_*})&&#@^]{(()!#^)]{+$&(}!%{!}([^^*}(])[@(($@^!^___)!}[{}*#%_${_&}{+[}{%
^$!%@{_]@%%+$]%[)]#_#**_(_*@!_(({(&&^$#]@+{&]]{$)]$)*}^+_($#$_*][@^%&$(_+}&]${(%
+_$!$^]#@}{+#@[]_%#^${]$(@$#[!^+&)%)+&#)&{}}@([&{+{_@}[++&!&}#$}^({])^&[)&)]_{%+
@+]_*^&+}^@%*+))}@!@#@{%$_&$%(@([!)))@(+]&$*^}$_+()*[(^(((+[@@#%)&$]{}{]*(@(@+^_
){*#@)))#)}#^)%&](%(_}[{&$#]#$@*[_[]_%+&%}+%#)!^[}[%$!_](^}]){)#^[#]_(%(!+[{^^%{
^[+))[]#@}$_)((+*_]_[^(_*$)&$($!#%&_#]_^))+%+]#{[{{@*}[#(#($&@}%@%#(*}}]#+^{{&%&
{}+_}}{%*]_#$@}({%)}%_!]@$[${+]@+&@!]&$$!%}]^!%_#%#)$#^+{+%&#+^()_%@%!&[!$&[###]
+[_++$%]&_#()&#{]&]_($$)$%]+^*$[]%{*^!(}%#([!%[@{**_)@##)_$({[#()@{]}%#*@$(!^%}&
$#[()}}%)@)*([(!$+*^__]!!+}&+$*^+}{^@]!)!*{[^&_^)%{!^##}[#$!+&}$[]&_]#_%$(@^{^)}
{]#%]_%#]@*[}&[$@_*_$@]{[_#)&!@@}+]}@{*%({({((!@{{*#&#+!)$}_!!^#{^^{&}[_!*}(_}(@
@@_@@%[)$)!&^^]{$@&{]+(#}+#{^#&*&__@*&&_&!{]+%+^!)*%!$}_()$#%^{)+@@^_]_$&_)(_*&)
}]!${+_[*)+#[#^^&))@^$%&^_!(^{}[])%$][&_!)])@%}+({}+]%#{$]@^{@_]%*#!_#!((+_(#_]+
[@**!)^[#^^%#*(!_{((&!*%!!+&+%)_{$}+^@[)[@]$_$*+&(&{)^%]}(()*){[{]@}[]]*%!#](*%@
))((])]*%%%$+(%}$+%#[#^%]^@)@^_^)#%#([*%*@+(+)&+++(^%]*!$_$$$%$+&]_+[@_}%&@@%){)
_^{^+!+%^)]!_&+}@+^$_]*#]((^&$#!_)}][&#$+&)]_*#{%%]}}[%*${&)]}((^(_}(](]})*]_&))
+}^_}$)@@+{([@%!@[_]]+%(%%%{(@[_#+@&^__+!^!_^@]@^#]]##%*^]!$$!+[#)^![@%++%$[&[)[
$$!^&!({]{)(@(%]%}{]]{%#{&!{*@&)%%&)#$+($[[[$+_#@!]%#)&&&+*^%*]#_@)[]]+++[]^}]%$
+&^^%({}}{])]!!}]**&!{[}+*$@*{$$_}(^+(^(!%@^+*+%$!}{((%$%}^{%@[^@]^{{%_(#$(&+]$*
_^[$$+!(_(!](!+^{}$]@[]$}*)]})_[#+%]@%&@*&{@&+)+({[^%^++)*#+*(+!}*%^})+++@}_&#&]
][*}^+[!@*+$[%%(*[_$+}$]*}]%$%@&]@)!@+]$(&]^_$!)@+%!+&(%]&[(#[#}_@%&_{{]^@#}&_(+
#$^#$&$_(@(()$(@+*)^{(})[#%}%$(@@[*&!]_+&%%^###]%[+)$[_+$%$$_]#&#*#$+@#)%&^#_}_}
_%[@{(*)${##{*@{[]&^[&%^[)%*[]*)$*@)[$%{{^#^(}!!_$!+*^&!)([!!]_%)([(#])+$*%[@{&^
^++{&{@%{({^&}@!^)@]%&@&_+#]]%%[]+%[^)@#+{+)&{*@+&&}_!_!!$$${#({{{#+))]*(&&@{{%&
@+}{%*%[$}(#)$]][)!_%{(!){@%_##%{$)&))_!^(!#([+)]#_*)_$#}!$$})%^[%(_+!&{+^*^()![
*@$}^)}{$^+%@@^)!){@*{#*[#*#)^{@!(&@#+_#^[&^$+&$!*!@&[}&&#&@^})&(%[*%!)@[@])*{%]
@!]@([@&%${{%*@^[*$#])__&+{@@@{$+[(@!%!^_{$*^*$)%$!%_]&$($!#&)@!}#&*#(#_*#]*#%{)
*@}(]+@#@&}}_}$+&^&[#%^*%*&(!!@{^^%$_{+[!@^](@*&%#!}**^$@{$#&!!!^)]%$_)%!&{^^}!%
(*$**!(@&*+)[+(!_(]![%!^[)[!@]_$*))+(+}[+%($%!+%!&^[(^^@(_]&#@[[^]]*@_{}(}[#_{*_
!+(_^]_%&&#$*}^*+!*^}][&_[}[@]$#{]%{)*[$!^_@(&$^!%$+]{#&@%{!)@&#^}%%^+@}}%%&^^}@
*##+)__()+]!]])#!%(&+{))&)@(][++_*[@@)%{[%[+{}^*(_&(&@#&^$@^_}%^{!!][+)$%&[&]@$*@^#^#%&}*@_))!^#})$!@%)(}&_^]!![+#()}%)
%@}}%^(#(})*}){+$_[+]%%]#(*[#_(![&{#*$){_%^}+*))&+)$#_*}[][@}_#%@{+(+[#}(]^%}^}+
_$#*(^]{&$#}{@_^^!(!++]@}${]{_)%#&{@^%$+{)]+_$&@[)#[!%^((@(}$([#$%!]&[&*&!#![^@+
{#@+}@^@([++{^%$^@@}}*{!$*&^&)$)$$$}[#[%!_]+]]*_)!!&[]}+$!}%+{!(&^!#{##}!}&@(])+
^(+{+_%%])}(*!*+@+^$*#})+&{}%)(@%[{#^+&@)_[(+[@@&{@){#({++_@(((*&&{@$&[[%($}^{(+
&*%}%((!&#!$[)_(}*@^%[@)]#%*}}]#!&*$&+[^{#+##(_%_^)]@^}*^]{[^^]!!+^+^$%#))^%}*^#
%#(*}&)[{_$+@}}&_]$&_{]$)]&[{**$^(#$][__{@_%#%&^)^%!#[]@$@$([!$+%@{(@)@*^}(+{)+(
_@$)^&@+})_^^]%*$^^}_]@(*&)+){(^)_#${%{$+#@))#)[)&$][(*&}}^]^_%!*#*]$@%}+$#^$[&(
%!*_*(%))^[*+__*{+[_)++))(#_%(+!@#+@^^&[)*_^_+_%([&)@#}!#%(!#!!}$[@]_@{)^[{%]*$%
@&*!$_#{!$_*_!{*()@]+_&%{#!$%^()$](^]&#@!$%))#]@*{@($#&(*&&@^%@{_({{*))+#)){$^+_
*^{+)+{]^+%{^^&+@)#+@@}*^(^)^)_!%!&())^!+]&*@[*^@{+]$[_@%%%&)(&$$#[@&$%*{[_@)[{[
_[!^}(#$({#%[&[{]@*^^+&&((*{((!&%}$$(##[+_#]&!{$}@*]((!%_]&@]![!]{$#%^%+#{#+#[*$
(]@!%&}(]@_)!{*))+^}#&*}@##@}](&)!#${_)&]&[^%_^^{{+&&%+@&@!+@__+@#]$]*!(_^+!$^{_
*}$[%^$(%{(&])[*[@^+_[]#%#*!}{(!}#](])@++{&}%!%+*#&^&!_$(#_%([#[_^[[{$(@#]}@+[@%
@(&&@}]]!@]$}@*}#(^[%^!_(&+(!)[**(&_!*_*!*))+&@)%&{#*[%[{@+$^&@]__][#_)^+)#^&)}[
_&$)](!^{&@(&[&}$#%$%!%[#)}*_(!]%[}%__^%(@}}(^@_(&]^^#]!%@{}{&*+@)&(#$#+%*&@{_]!
}&&[(_*_%&($%(@#){@_+}$!])}%$_[+$(@)_}![_&*%_[!$}*#}&]{[^+&![}%_#{#$[{)({$$[}{%$
^!!{{})))!{#^]*@&}]_)}[%()[](*%@[__%#*}&_(+!{#[)!@(]+]}$+%_{##[(+#$^*@&@{*}%)({!
*#^$(]))^^}{&})@**}!@[{{[*@^}!}}#))@*][[[##@)([_$#*+)]%#{]![$^[@!^@[([(){+$)(]]}
($)[!+#!)*($!}%!%)]{!@}][_{]({)*^[{%[#]$]![#^(!{_(@$@{]_^%!%]%[#%)_%%{@&)@{&}@*%
)}@&+[)!]__*(#*@)_[@}+}$!^&_^]*@!)_@!)$*()}[@*&}){%@}_@$[@]$*{_^}+[{}_}#+[&(^]]#
^^^@%#((}!_*[(}({}@{{)+*}$@!^)[_%(%}#!!)+&+#&%}$*${)](^+!!])#]@}[$^!}[}}[]#_@@]}
)+#&{${%+(*_$$^]}&#+^%())^_^@(_]*^]{))]+_)$@_%*([}{${^(]{[[#(#]&}+l}%#@}{{)%@{+}
{$})($)({%@]!]]_(#_$[@_][+_)(!^!&+*{@*$#$@$$)(@$*]]{{}])}+[}!^^]}@(*%^%![+)&$}]$
^%)[*%%@(@[#_+#{*#$%{_%*{_%{{#&[_@&!}#__)$+*+*$_[+*]+#*(}&}!*!_@#%%!^[+(}[}!*{!+
#(!^_#@^{$__!_*&+}@[%+&${}()@$&^(^{[%&]_}}%^}$&+&{]*+%][@%@@#*^(^*@+*#*^](_+!$)+
*{[]{@*]$%}{&#${_!%_(@(}_)){^(#_#@*}_]+))$_$[@+])^]{$]]__%*%(%}_@@^[[)^_[@(@&**+
@(_#_&[!%$@&)&![*(^$+!^$#&@!+_}_[_]&$!]^]#{
}([{]%@^+)[_[^*}[_[}$^(&)#*&&^)}!%{[{#_#(^%^_&_&_)!+}}*]@*($^$&*{[+&}*^&%])^%]*(
@[)+)%}]){)[##&+#_)(*(#&)}_&(()){*_!}*^[$^*+$@{++@_#%_^($*}_+([]*&^$()$+])&)!]}}
{(%$#(+))^{!]@()]]$%*]%&+&)_*_{_(()^$!!_[#+^@(%%([*#{)&+)))@*$@]#_)#}!__(_!%#*{]
(!%(^@)@_&%@)(_[(#@&^[+([)+%}^*{!)!{+(+&!)+%^{*_]+&*%&_*$])&^${%+#+^+!(}+&@]$+{*
]!@$})^+*$^!$$!}_&#})+{)}[_#^[(*!)@%{!!(&^#${++*#@&^&!]%}]{!$+*[*#]}{_{_!&){%[^_
{#_(_$(^)#*@(##^*@}}(+&{}_#{*^]&)+]!++)%[^%+!+%!++%+*&$]*$!**&$$$+(_!{[++(@#[+{_
+)^$]#]*#+#_&@$#&&]*]{_**##%#{}^!]{])&($@(+**[_!+_}&#!]^&@{*_@[&*${[+}@_{}}[]+#_
^+*#!**[_@#@))@@$!!)#%$%${[(&#(&_#[{*{%@##!^@*)_!{^{%[]+%]}}()[$%(_{$[[^{(]]@%{_
[^_(@[!*%*$][+@)^]+#$!_)@}!*_&$&%^@{*%)!(*[*)(}^&{{}_[$%)*()@%)#}_)#}{}##&]{$](#
]]_$[[@!))^*&^@!#_}{_)@$&[%#[]*!${%!#[{(%$*_*+$)]%#&$!){&&_%##_]*%$@^%@&&)*$_&(]
@}!^+[]&}{&{&%]*{_*#^++#+_&(%]+_&}}^^+@+]@])}[${%$*^@]{^^{+#(#*%*[&_*(+#(*[$[*]$
!}#_(#![^%%+(^&*(({%[]@^]$([@@@@*@@)&}@)^^{*^_@*{){*((%!($+^_!_!^$$_!(@+%&[!_$#$
[*%!@+*^{}&^&]}$#{*]!}{+#%@]$!+^$_*(@]@%})%$!{[&!({[##$))(@#+(%}@)$)*@++}%*&(#^{
@^[&[%*_%#$_}${#{@@^^![[_$(!$&}[&^}_*}@@_}*+^$%[*_(+}$)%{@)&^*&(*]&$!_[{)&{[[_@!
+_!]%^[)}}){!+{%}@##&@@([%&]+]+)!@}^}&@@_[[!(!+[&!)@*%@_#*(!({^}$++!#*^!]+^%*$$)
]^(]#}]+)[@__@]#$]{^)&@[{*+%%$(&}^!_++}&&@+]}*{!}^+#(@(@$[*%)*$((&*{*@#$)]*+_%@)
@^^]^^)()*+){!+&{$}+&{!{!+@}_*&*$}$){]}&{({_]%+{_)%(#@()]}@]]#+$*{*$$@{])${*($#}
@)+}!{*_}__)(@%*&}!*@#+!_#@!&^$&})&*]!}{]]$#]&$_@[*!)%_[}}_@+%{&_+_$^&^)]]&&}(*%
((@#$$(#[(@!({{}#&$*]$[{+!@@[@*#+_#)(*_^_(%#%&[%!(+}$&&)%)[{^$(]*{++})@_[%*%]*^+
%(@#)])+^_*+*#%+_+}^{{*[+%*]$)]&^*}+#}^!(^(^)^_*[&@[}_*+$!{(@+}]{_}[^$[+_][{*]]+
!#+$$_}!%^%$]}^!*)]+*(+{#{[%^&!!^_$*+@}_$($#)&!)$_))*+##$&$$(]]^@_@_(!^%{_&@&+^{
*]!^&+%_}#@&&]%}@#^#!##*#[])*#}*](){+*}@)]+)](_(+&%{#}}})_!+*_}%%}%}(#!&*+{!!]](
+_%_([_&$*{^$*)_)#*@^_+#({$][)%@$^@$[&!+}*^$(^!)()){&&^{)]&)]{@)$_]*^@]&)@$#%#](
)+}[@({%[$!(](!)_)#[&[]}^]@*#([}^]@%%%@}[)($($]&^+$][*(%%+*$}%_]]$!@+(_%+&+{))@}
*_$#^{%_^_{$}$]^&)_!_#$[}#$}}%(*&)%+++&$[{]!#{^_{(*$[({(&(_$)])%]#(&)%$@]{${}@#&
]$&&]%^}^*#$@{#_&&(#%{)]+_[*{@^{@^%}_$[]$[((#@(+^}}!})!)$@})$%}*@]${+}%+_!!@@^*{
@^)[{#%&$*}$[*}#_^}%+}#^_+)^+$!_*#+)^_((^^+(*+[]][^(^^^&^}}}^*!(^]]*&+}[(#%)+(#!
&[&&_))#@+*^]_#[#{{$+!!%]]!!!$]^*!^^_+(%)%&@$}{&]$}#[[^($(*)]]%_(#}^((_@{)]}*#})
%$)$_%$]%!{)})!^@)[())#&#}@+_$##&$[#!}^^&_)%][]&$^#+@}_{@({[++&(_!@%@#&}^#[&[^%+
)[{^)#++[__(*+&*%(@]#](+(]]}%(^!(+@]({_[][)(%(&}}&[[_{#@#)@[#_[$#$[%[}{]{[)$)%{&
+&(@&%&^)(){)_[_]#^$)^)[!_]]&!)}+{_%(&+{(+%*}][^%)#{{+#@!$_*_[+[&}{*%+*!!%$))$(_
*@($]}^{[_&$@%^%#@([#_[_#_%]@(+^))]_@%%}%({_*^^@#_{$#_[&%{@$$^{%]}#!$^)+#)[%*^{$
$_[#^!+^[_&*%!^(%^![@^!_+{_(&*!!!$)]**^!%*$%{&*([+_!^]}&@^$)(_(%(%[}%#_^$#]@*^]!
%%][^]!%^%[@[{#!}[!}$@)@{^^[![[*$&$[#+{+(*)!^!)*+%*{{##[)%*!&#*[{%@!+((@##_}&+$*
({#!+*]+)$@+[[&#*!%(]&@^&#_^*@&@_((}_!!_^+#}%@_{%}$]&{{+{]+^]#*[^@]&}*_*}#!*_$#%
${!_{]{)$)#{*@+@@{$_]]+&+)^+!()[_*]{^^]*([(@!{&{}@()%_{+_[+&&@}{}(}$*%@$_^){*![{
(^@^&^@&!+#&(@%!)[]%((##@(}]_*#_*@%(_{$_#](#_]*]#_+_[_])(#&%#]_(!)]*$&*##){}]+*@
$+%([}$))^^!{}&%_%+$%&#[}^*_^^_#$$&^{@#}%[$&&({&^]}(}(@#*&#^)&{$(+^++$!*[{})]}^$
$#!&#[%{)(%&}^}[]()]$@])$])^#!]!%)_)!(])(}&*($%@%]#&(+^+]+%@[([@^]$_)&&%^*${[!_*
$)$__$+&@!@+#{]]@##(*]&+^}[!{(}%_}[]_^{(}@)}[$^{*_+@+[[$&#}^]^(})()())@*)!}[#^@{
(+_$$+]&#]+*^]))@}}**&[]}$]&!)^{)[]+#%$(}##!)_@$_)$_})[#%[!{*)_*(*)*$}#(^&){+]_]
()))_)+]{)*&)@!@&*__%{@*]&${]${@_&+)@&)*+*!][#_][(&])@}@!#^!+*@]!#)[+__&&%}_+$&$
]#({*#]_#]&*}_((+#!}]_}+&_+](!#%+@$}+@#&{}(&_}^*!#$_@}^*}${)_}%)_!@&#])%{#&)*!(#
##%*@!]##(_*{}@$!][]&+*#){(_!$$_]_^^]#{#))}_())[[)}@$+_}_*!{%%*![$*^#){([&&[}#%[
&]@_[@@_*)}!}#_})]_@)^[^&#&^!&{](&_[!&#}%!{)$[$()}*^#*{@&{]*$%$$*}@^+*)@(&(+%$_[
)*^{{![!&&[*]#)$}){^]%)@*]&^@@#}#*%#([]^&%}]&_)%!@@!*$%+*+#+_%%+$%#!%]+]{^{+[$%!$![^)[&#*@{+]#
_)*^@})]{])*@&]@#+$&(#$)!^$$][(*&(]))^[*^})!!#))(})]&@{}({_&)*$@{+!!]{($!{+@!({$
#*![]@@{%_^)+_#_][^$!)#*!^&]___}%%&_[}#(@+}^*)%[&!&}$^!!&]#!}(@!)%&^__@@{[+)[}#(
+@#}_*+$})@&_{_@@)(@#$!){}@${][#[#([)%[@&@@%[{%!&^#[!_{_@(&@}!^^*$(^*!%}+$)$]__%
_$%@%}*)!@[$+$[_[]^!][]{+!#]]*+&{){+)__$)_*%!{^&$&$!)*#_{{$!$!+[%[&$+!@!}@*_[$&_
)*$*}%(*^{}!$^$%*(!([##@&^*!+!%@[+[+)@({{_&**&#_@}$$[)%]@[}]}{*[++&)#)&!$!)_)}$+
#]!_^#^*%()^#&(]&}!}[[[{_&)#}#@^#[{__^]^#((^%%[}!)((%(@](#(())#{@)[!*](%}}))+!)(
})(*}]#$[_($^_)@+]@)_)*[&@]#+(_))]#}#@+#@@!_+#[_+_]*)[!$&@**_+!]@_]&[@%]+@[}%$})
_#)&&]&@}!&[&^*#!^&{*$[{+*)]&*##%!{@)@*!^_&@*#^%_]%(}]{_!)_%}_&&$$+^(_*())%#&&**
)(#!@+^!+(^+#^}[#($}*&*!@&[)+[@][[]])#&!%[++@!}]$%]*_{@%{@_!{#%([$@[^[}^!({!}^(*
[*&#$_*{$^!_!(}{*}]@($)%^)[}#&&_&@]__[$#*$*$^%]#!}[*&_@](#$_[#@]}!_[&+}[%]{)@#&)
]+(_@_{#]^^!&}%^(*^*[)#]({+@+@}#(*##{)&[#}]{!#!@+++^_](%+(()_^]*}!}!$(+}&{*_!@#]
)*@#}[}([%{[#^${[&)*[}#+(%^$@+]&@{$[^_^[*)@$]]+}{{^%&({](!&(%#^[!}&]$)+*@%%)_&{(
)%*@_]+][&{@$$]]+*}&+*{+$)$}^)!{+**!@[(])@)^*%@^@]}%^^!!$&!}^{&^@_+]_]$)*#({%]#*
[+%]@[^$%_}))_({_]%${)}_[}^+(%_+}*}*!_+^^@#]@{)}&[*#((#!$[@}$)!!]&[{)_#%]}*^@[@$$]&#[@[()${*#){)$(&*#(}@_%]})&[][*])+%#{{^
#}%)!))({*^@^_%@!)(@_+$&){[[(_+&^_@+%[&_*&#%#)[)^_[*+]+)[!^}]%&([}%@[+@&^^((^+^]
&(^[%[$!!]#]^_!!!&&{]}]]&)@#}@_]]]]${&#)@{}{!{@%*)^@{$^!^+@]$$$&)**_{[[(%)]@{*^(
_++]]}[%{(!!(*]_!^]{]$#{&$#$})+*}$^}&!]{}^_{#!}{(!%[%%{$%(}]@&$]#+]**!_#&[&$$!{^
#+*&!_^@@^_#$[&@(@$+&!)_^+{{__}#_)^+(@@{[){))@[+#*}_#]])^]^%^*_$#}]%)@@%*!!{^&+$
$_+($!%{_]$^+^@_()#^[[^)$+@))&}+_$[_&+{!$]}]%$_!}[@)}[_($^+#}_%*%@*+(^!+)()[#*_#
({}[*$@]#*[#&%#!^){@__[]#]@}^$*]$%%(^@^]+!}$#$#@$[@}{#+!)!#%%!#[(^*(*_+*{#%#)%(*
_@([()+#_){${^[__}+#%{[&{_@![{}+[#][)!&!&^$(_*_^^}[)&)}$%%)(#[)&)+!+{@^@%#__]$+]
{}+[(^{!(&)%*(@^#+#)*&{)[^+@#{{}&$]{{@_}@{&${#%]}*!@)}^{}!)(!(_$%((#%%%!{(_(_$@@
[@(!%!@_]!%*(+(^[_!_#!)[+@{(#%!%*]$&#@_@!*&){__(@@_}&*+){_^#^_}*+@*()[}){]!#!&^#
@!_%&))_^@!$)%^!*%@{+)@%$$]&{!*_!}@{&**(}&^+[@%(%*^$(^+{{($&]!_{})!*)_!}!%++[%)@
$){^@]#^{(!&][%]+[^**^$&*@&[)%%#$)%[^_[@^*+)@)}#&&#(_^+(%{)}}!@^+$&&$]{(^*(@]@%&
#+{$]$_[^{[*#}%@+}[@}+@**$})^@]#&[+(@^&%_%!$_}*@{]}}_(#@[*]}+!_#}))+]]&]]@$^^{+[
(!_}&&}&}!_@$#)]{!#^{%(#]#^&]#}^%{&&*}&(+@^+_%]#)#)_()[(_&!$+^#[%@+$]]]!!&^%[]!^
%%@%)]**%+_^${$(}]}^{]])@!%+@!$#!})!($%$@)+*[![}]&__[$%!&$^})%]^&%(+^+#@&^$]{{!)
[(%%!{![]#[^$%_!#]^)!]![])!$@+!^@{%}$@[_#_+{#](!^*(%#@_^}^__[(@&]}]@&!)_!$^%*(}[
+*}[%]*#@{_![]$+@_)]#@+]#_^(!*{#]!()@}}%!_&@]()()]*+(%*_{@)]#{[*^${_+$)@[{[$*!!{
%)+$^^[!!#^]^+*}#{_(^*!_!@]}[}%]}#]!(_+[[_)%!+($*@&$#*_{^@[()&+)$_[%}(^*^+}[^&^#
@$}]%(%&_&&*))&%$![}[$%}@!]^*}*)_{^$_!(%]^}!_#_$$^__)}[#^(]+@&^!&*($_[_$%])]*%%!
#!%+_{]$}($_+{^*}]&[@$^($^]()]+)+]+_]!*_^*^!@{]_){#+_#%(*#&#%(]*$[%%]$[}!&*!^^()
!}[}{!+^{@}!$)%+)}{*#}%}@]#}+_#+&(*)_]}#(!{(*&#&)$_{^%$*)]!##*}$}[_&(#^{&)%+{(_%
&[#$!&+}!*#%_!%+&&[@(![+*@}^%@)@+(_@(%{$[]_[%)}_))}$*#+$(]@%{#!)&_#@!!]{!}){&@@(
)(_)[&{!]%*{^{{]$_&]^![{_##($%)%}#})(]$&^^}&!#@@#]*^^$[)@}!!)@)_*$$[{@%)_^!}_^]]
})]]{!_@)[%!$#{&@!_+_$[_*@!@@@_(}$!$!%*($[%)[(]{[]#%*(**{#%$)_@_*]({^@!$))[$*$#+
[+!&#$$!})[{#(@{}&&@)&}^$!%_*@%#*)++{+]@}{@}*@^!}+])+{[^*#%(*(+$_!{])}+$](!*{{[{
^[#++^*[_^&![@&^^])&%#_*}^$(#^&[&(#(@{)%$(%]*%!)^*+[!_%@^+&(+([(^)#[{***![{*$@[[
]}_&]{[})+[^+%!^^@&[+[)$%)}(%}&[_}}(&#^]#!@(+*)){([)**({{^]@_}+@$%{)_&{[{_}{_[_#
!&@@$][{)_{$_)[&+]^!$*]}]+[$$#+@*_}$*!#]()*[[&{*!#)(@%+%[{)@@@}}[}&[+)[}{_^}*{+[
$}([#)%(!(#[([@^)%+[#&[#%)}+&*{(^*(])^{_]%{^+^^}{#&#[*$^*{#!&](}_#$++#]$[^%+}*&@
]+]@]&[{*!)[{(((^&$%}[^#[+][@_%!#[}][}^{%{^*(!!*+)^]][+{_%^*^&+{#{$}#$%_!*!&*#^!
%*)_@)+_$+[&@*{@(+^@&({##}#{*([^_+@]*{+@})!)&))]@@[(({!!)*(}($__(]+*}[}&+@}]$$]*
%%%@$+$]$!%@(&]]}{([_$*_)(}$]&*[%_#**^(!$#(+}$}))$]]!#^&[}]#!&$){@({$%(&@*}](+@]
_@[(%)])^!#(}_}!)$%@*+]@{&(^}!([!%$!!]@$$!}*%!_#{($*]+(!@@)_(+^]*#*)]){}!_^&&&]&
)[^@$+%]@%){!]]}}$!&}###)&[[@$)_*@}^[**)+#{{}_{#]!+(}%]$###{(!]*+[&+^(_(&$)_%]![
})+$)#]](#!{+)(${$*)[)$_&)[_%)+@*_]]{[{&_!}%%*$+]@%#^}^+*_}$!}@$]@&[{%(&%](}!)&*
%![[!$$^]}*!$[&_{_@](+(_}@_@^{$+%&(+[((@}[&${@%@[@%]})$)&&#*(%}_)]%%&@]&&{[*#@@@
!^__!&+^_@_){}[)*[#__[[+&{(!*&[!_@*&}*)%{*+{(^++^!][&#))*]*^_&^+({)[)$#+(+%{&[^(
*}*&*#[*!+^^#}!($[!&@!&_*$}+%&(%$!!^$#]&*[#)[}!^^@+^+#)^$])$^[%#[_+}]_)$}+[!]%*$
@_%]^@}))}*&){!%^{_(!]&+*%!*)@)@%$%{[@**!)#!%@+&!)@{[})*@)*[+}((])*)[{!@#[[}}*!+
(!%$^({*[^#&_](^]*%_}*}@+(}!}*}%$)[$}_{###@*&})_+%!*)[#__*]){_^&$%+$*@{%!}!!(![!
$(){)%!+{!_]+][}(($!_+^&_+_#&{%)$_#)!#_+@[{#*{]^+)@%&{@$$$@+_+^[%&(&#^^%}}%!)&&*
!([+{#(+})+${@*!{]}_^&_^}(@(](%^)%+[$!^^@_}[&][}@*]{@[]$#$@)++#&)#@[#)^(@{[!)}!&
###*#()&#*}%$#@^(*[!+#}*(}*!&{*$%%@@%[)_{))(+)[#_}$(^#$%}!$(#+(*{]![(]]^)@##@{#%
]*{+#%@^_%[+${+]{{]@@#%!+}#_{_(^!^}%[{$(@]&[#^_^}$!}%}@$(&[&**]++[*!(!@$[@&+]+(]
]^!]})@%_([{)(+$^{{_+_&_^&!+}*(}%&)+(^}{)@*^#(%[{]_+^!%!_++_[{}(_[}@$@]!^*_+{&{]
@}]}*!#@%{{)**)&!#@#{}(_*^))[{](@!(#!)}]}$]++]_$!+(!)[&}^])+(]#$[%&]#@%^](&&#+]{
[(*}*[{(%&_{#^}#*([&)*$+__#^@%]#^@])^*)$_^}@(_%+*}%%^_+###_{+*_$]%$%*[@%_((_#[+&
![%({@]#{]+*{@{!+]$+&^)_$[([([!!%&&+]^{_)*[#[)[*&}){%%]@$([@{%!(#+]^@(%*}([!^[_(
(]](}^]#}_!@!#)!+_}&!+_()]{&+]_+*[([))$_)$!#(+@@*_[$)$[!^&%@{^%{@%+!+{$&[#!&!$!^
+]@#[&&*[^$(@&&{((^)%[%%@[])&}%]]^[)]&@{*@([($_]_{[[@}^(%#]@^]^$%_!%_$+*@&+[+)(&
@)[$#+[}#[}^!)$+!_[}@&}**{+&%$](_&](&[]){($_^}}*!_*!]@$+($%]){[_^^_%([)_$@(*###@
}^*&%}{%[^]$&*]_%@&&]))$_@%[@@}!^@%[!_#@(#[[$_&!_+)@+!*#*({)**(]&])#^][[%_%[}${]
]_(!%@$++])^+_!#___(^#{!(}^${%[##+]+[!%%*&{*#^$]#[$}(^@_%*$[@][%{#*$[^*]&}[+#*+!
]#)}]&#}&%^$]%!#$!{)[*%))!!%_%*%%#@@*}&[&@([[})$&($*}+*&((%$}+$})(^^_%!+%+(@#]#)
}${@&$![)#)$$(#][(%{$+(({[)$$@%}+#*]%{&[#_}@_%%@!]+){{@}&}%*][}+*]*%@@{{}**+{%^)
%]![]!$_)@(@#+)[$(^![&@})%]}{%*%%+!]]}+&@^*^__%&*$]]}$^$)$}*@}+}{}_)[{_+_+%^[)}!
$*^%$%$+}_)#%+[}*)!](!^*&[*[%+[&+$^$@]!(!]+[!}+(&*^&#}@}([]*@$]]%_{]$*!_{)(!+_+@
((!&}!({^(#*(*#@%__(_)])*])($}[]&@_{(#*_&$&++&@(}@)+^&#+})@{]({]&})&_^*]_#^}$$)[
*$}(@^{[[#_{*^)+(_@&@)*^]@{$]+}*+@}!^[!!$^@*_*_!$!_{][{#*!%{^[&[}*%!(_@+*^*(+_}[
&#&]#_$}@_]$@_+_)%*@^${)$(^&**$!{)+[%]%!!@%^&$[)%&*&]^+$}#}{^_*}%&)!*%[[!!#_*@+[
^@^$!{_^&%%]@&_)}&_@#%*{!*[(($#[[}{#&_+%&+{*_*$%[{{[[$@@^_%(*#)#@%{*)#&)]{[_{(]+
^*&#%!%){+#)!%@]]{#&^)_{%&+[*](%@+]]}}&+@&]#}{{*@+]_!^@@)_[@_)@)&]%{_#]$_!({_!!_
$&{(@*{_$$*)(^*[)+_{!+%}+)}!}$^#++@@^$@[${%+([+_[&]}_$]!^%_+$%*[!%(()%$%{!@{^*@#
_&$)]^!(!*%&#+)^)^$&}(((!^%*[+({(&!*_#[@)!_}%!_{_)%_)$%$^%^!+)*&_*)*@})&&@#@*]}@
_@+#+&[^$#[&)%+@]!*(}@+#*[^@]%#^!*#+#$()+))[!)]]}@)!]*@#&#*!$&@!{]{^$*{{$$]%&++[
^(_}{%%}+%](#+}^&@*){+]@]}{)!@[#{!(%{!&@({_}{_#&(^()[}[[%##*{}$}$$()}&@++[!%}@$_
_[!(_^@{#[$))(#$^]*_%[$!&]$!@_+[#%@+()^[(]&!)[{$+*$)#)@)_@_)([%[&{&^[}*+!_]_}]##
)*!@&[+$}#!&@{&@+#[]*)}!_+}){{+)@!}!!#(#)_#!{][}@{^#}}_$]&*%*[^(@]]*@($]$$]$_+^[
$$%][[_^[*$*)(+_]{]}$%&}}(^{+#&]$&^&!!{[[@)_%!][_]@![[$_%}_[[{]!_*}[&{$+![_%(#!}
$)#^)#*&*+(#)#*$+)#]_%]%!+(()+_^({$**[}%@*!_)}%!@[_+$_))&(**}][$([)%{}#}&(]}[&+(
$&+!*]!{+_&@@}}@!&}@#%*{}%_^]%(_%)^!#(]^^@@}}(}_&#+_+*$!}[)*^_#!)+@(%]&#[_)[({*+
#!}^#^]]@$[(%&}#!#$+)^#$++*+^_]_)[$_]((+(}+*_#&*{}_&+%#+@&!}#%]#)@&__}@})}))*]*_
#)$&%%)%$+#&[(&*&^$*%@[)_)^(%^()!]!{$$*}(]}#_)}+*&&$}^(@)$^+*+%*(]+}_!$%@&%@_}*[
*[*$$@}@_![^]+_}!&_&{^+!@{{^@}}_*))%)]]}#*[*@+@#^+[+(#)]{_&&%@$&$$@}*}!%_!^*((%[
^*!_(${)(_+)!))$&!*%^#@!${($*)^)@+@+*}%(}@(+@#^%![([%*)@@)++*!@&(+[$_*^$}%)})^)^
_+[{__[@(&%!)^!^^@}![{{^]$!(]!{}!*^)@)_^$][#+}$[(])]&+@^_[]@^@&^$]$&!@_)[$$[(_#)
+^})*$%&!#)!)}*[#&@!_{&)[&@*@^#_(%!*]%#!#)(^[^@[%)&!+]#${%{&#{+^^___@+[{]%&!#((#
@@^})&)$@[@*&#$&+*!)]*+%#]$%$(}&!#}*[))(]@#+_%_#]}}+(^[_)&##&^&){[#{*+#)}#!!&#!}
+*#(}@^(+^+[^(]]^{*}}_^{@&+(}(+)%+}[!*@)#{{#{#&_&$*+&%[_!)($([+%$^$!)%]%&^[^@@%+
(*(*#_*_](@!$$#{&#*!)_@#})_]^$$^)[^*!*@!#})]){}{^{@)#&}[^^])!$^${^$$#{]^}#_*+!%{
^!+@#@[)@{^){{])#((!(@!%&$%#+}#&!^}+(#()^){}]{)!(%)^#}#)!*+}))+(+(&&[+%[##*$[)}]
_(@}(^[&@)^&}#+}{#(!@$@&*@+}%(@)]!#_#){^%^%_[&+[(&{^[**@^(&]+!#(}&%]_^_{(&**$@*_
}{{$&)^##]()_}_}])@$[$]}%}(^[^{(!%[)$[]^[{+(!}%]^#&#!*^%)^(&])%!#($[+^[($%$&$[(]
(+}%$*@+{)&]@+_[_]!!^^&#_&(^$[*}])%+{(@$$+_}%)}^)(&#)_(()!@[]&*++^]#$)!*]*+)^$[(
{}@*^[@)@![+#^$!%$!_!+_^*@$@@@$$+[$+}*)}#{))&)]^@($[__+%&+__}$_&#}[#&[)}^^]}}]]}
!*&^%]+_@#+[[@_%+$!@$%!^)*^}(+}$&&!!*_[}+@*@)^_%]]$%!&%*+_[&![}(*!}!+(*+[#}{_)}*
&&!]+++^^#{#__$@&([)*}]%$+@##}[&[*]_{*@]%&!)$}@&}!^)&@$+@#&!%#+*%[*[]$)##)##_+{{
$@^)[($^}@%}(&&)_}$%}&{$)@[+!}+^]+{$+$({_[)@*)&!{}+[$*}#!}@_{{(]]@*%*)]%_}%[{&$)
*_![&()!%]@[{[@%_)$$^#+$)_$]+_*_{%&{*__#_*+^_)$[%&@}}##+(}{%#+!%]!({$+_(}^@^#_@+
!($(#@)*%{]*+$++)]{){{@#}%)^]#{&){^+]+]++{_)&$*+&][}&#^^{__{))^^{&@{%+][[{*!_!+$
(&*}]{%{#!^!**+}[(}^}!%){(!&#^[+![*$&${]^(])#&[#](}[%%*${[)(*@_@_(((+@(({]%#})%)
#&^#*&+$)&($]!+]&^$@)#*^_^]{#%)*_@^!_+*+*{](*&[}*[[]#*%!*(&!(@)[))!%%)&@_{{!@({#
!_}![($%)}__*&%(^_&+){_#]{_!]&(@^{[#)%)(++&{{^}!^}&%$#%_}_!($%]$}_()&&#{))$(%*&{
([^@+^![{@%_@@@!(%}#@})#_){@__^@_[_!^$(#!^^(@}]+${)]*_^%@$%$(_^]@_$+^_!#}(]%+%[@
@)][!!*((]}^(*([__#*#}%$!]+&_[}*@(@^()_*]%&%)&[){((@*%%+)@$+_+{]^$+{%^%}@[*_${]!
[!^%%$+%*%&&!!&+^])}&^$$!*&(#){&^&[$}#*&}%#(}@@_*}*(}]_*}%*]+&){*{_&^%+]$)&($!!_
#(&$*!@^*[&#@(#[{]{%@!@[#@@[+%_{^[]%(]#&^$&+{{$+*@+_(&^{^!)}+^$$(*)${(%@_{!{}(#(
}{#$_!*^)@}&%*^_&^)])[#!}##^%@]([[{@_*}$^(}%&+&{[@#)){$[+){@}$]{)@_){{^($*_[($+@
@@@$]&@{_#[{!&$!%##+&(%@)^_}+^%#@{^{%[*%{&#[}(^}%((@#&_)}^][#})[%%}(_)_+*%{^*{}_
{%(#+(@%($*&%*##^+){([%[_&#(]*@+_!^_{%{@%_&%&[[$%+)@{[&(_@%+*{#_*!%+&[*(+{+*$})+
#%[^{#(_}!+!$)%@{}&^#+_](]+)}_([*@[^#$@&}]}*!@{@[++$++(&]_}@{+}#%)}+&_$[}%[%{+@)
#@%{&_@})$}*&!$}@^&[}%*!$(](#[#$+}@#%&(+($*}$]%]$$]$*%%%
$(+((!)){*&!@_%((^${&($@+[!!])&!#%)_$[{$]&[)@[$[^)$^#^)@^]%[&{*[_{}&$)_)+}$#}{$(
#_)}}@^${#%)@@[]]}]%$_#&}[@%{{(+{@%)_^%}*^}$#]$!{)#])&#^(]]#}^^*])*#$#@()^!)$$!@
+_[)*^_*^+(_]){*!^&][&!])$_@@]!@*!*$^+&@__##^##[%_^[@)_$%^^_&+@^{(}#]+#}$[^!*(_^
[[^#(%%([#)${*]}#$+))%}&#_](}$+*@]#]**$+@{}&{*[{%*[@^)+]([]{![_&&!_}#${%@]!(+%!}
%*#}![%&$]^!+({}+])()[]_$(^#)*^$}$^)%]{&#^)!^$}!_&++#&_[&!!_#$!_%#}[+)+@}^_)&&__
*}&+*{^[!#!}&+_(+]_#*+^@*({%((])}%%!%+#^_&^}[*#{])()]@#%@^$[$^!_)!@_@({@#&@%$+)]
_^{+#$@^*^{%]]^&)&}!_}[@[({[+){+%%+&_@}@^&#$#_[*_[%{){_[]*!$]^__{)!%[#^_*+!)&&%@
]{{{%[@_%)^&]{#($]&&$}#%++)*+&#__}&$*{]@}@}{{+*}!&{&#{[++*)}&{%*)!)_@@}%#@@+{)[!
$+**(@!{%!@^^]^#(@}[]@]#^[*+*[^!*_&))*_&((]#}&^$^!#+[)(#+(}[^(%}[${*)[@(&^^*{{%$
^^_)&([^()^!@)!}&$@([&%@${[$%#@}+@_*!}&]%]#_[]([%]+}}&[&![_}*+)}{&*)@*)@@$_^]@^%
)^%({{%_^_+@%%+#[#$$@#_&%##{+)+$%*((!{{}{]+%%+^_!+{@]{^^+^}{[_([_^)!%_[+%@!%!@(+
@&$^$(+^^]$$**{^&*&!_[_{_)*[{@![@%+@){*{&}%_%(%(@+{!&)![$$+(}^)[]##$^($+[#]}!_#&
_^]!$_)[]+@)_*@![_($+!@[#)&]!+&(_*]]%%_@*@!&&_!{^*#{+[%$$##[]@&[#{$^%@&$([{+_#{(
})_{&{}#_+&@$)}#&]$(&}+&+!&_[]%@}[@^]{)!_*+$!$(}+*%{!*)]}@%!*&!}([*+]*%*)_%}(@^]
%&[^!%!%]$!((+&%%@)!%_!+#^&}&$*^%%)#%#_%+[_$^#)%#@})]%^![^]*$&%^@)$+##}]#*]&$*()
&)))+&^[_}()^@@!)&]&&!$*!#+#()^{]__@!+%(({[__&**]&!*(@+!{^@^*[!)@#))$!]^[%}]#(![
*)$_($)){@%}}*+*#{]}}@)!){$+$^&$!]*)$_**(_+@)+{)![!&+&_#[_$%^+&^}&**&_*)]+)]]%{!
]*$[$^$^+%&*!&[(%(^+&{+&]+*&$_!+{)#]#}{+***)+}&@%!^{{@+#^&*$)&^_&!@!#%^%!@]){{+]
@)$%{&+{@)+)&^&*(!(%^_&+]%_)]_+!)@!)$$@}(@@$#^[%$(&@[(($}!&^#]()[%){]@]$*[{)&%})
!+!{%[)[(%$#[*)]@!^{#^@}&]&([^*{$#$&%#@][)^+!%@]]]$$(!$@[&]{%%!^&(+{!&#*_]$@_$^]
$*&}_&$!]%+[){+{@[(&!%@&$*@()&!@}@^!#]!)#^*{)^}}+@[[!_}@![$]!((&+]}}@%^]@[]])%_&
_@*!^_%}@[$)#%($+!]!)(!}]+%!+*{%]$^*][%({]@][}!%_*)^]}+)[[)(!&{]$(^%#_$!#!#!}&#(
*!]((**^))({^]{#@}$_$^*)$+)!]}()^+^!_&{][%%^#}{#}_)&}*&@(@%[##@*(}+@$_#$%)_{(!$[
^!!+)*^^])[+{&++{[[##$**]]%^]_+*%*@%!{)@%(}*!#]@$]^@)(%$$+$#$[^_${)&_)!}{[+&[@}&
(]!@^](!![@#}%@][{*}$*!]{(!#})(&^^@${&)]$^(#}{***%{!&[%)!#%(%+(_@}^}*{^!${[)#}_#
%[_!#]_+(!%[&]+^%^&![^+##*&{_*[($#[@$%&${}_[_%)_*_&{#*#[!)&_#^}&&@#))]+{(&$##**+
^_*($_^)]#!%{]_[!@(&$}($!_#&!_&^%&((&#*{}[$}&@{&(]#&_+*}&)#^}([{$&(]^@@}]%))(&@[
)$)@+[(}(*%^%$](_)#*_+*%#+{{&^@*!@&([#}{+_#{(@&_%}(]]}^_}+^+@*)_(&}]$($&+@]&[!@{
{)*]%({&%^@))_)&(}*((_]&$+&(}_)]{$^+]$+}{[#[#$%]#(%+_(@*&!]@)&*${@#+&_#^_}+&@*&#
}&[)}${@([!+]$}@%(%$_)&#{[]@_*@$}$+!$)@]%@*}%^}{**#{]!$]{$%%))!{((^)[}+]}{{&*$)_
_[+%{{@)@(!*_]#]((^+!!{_&]^%[^!^)%]$]#_^%$^#{}^}+[]}@$)}*@$)*{![})}([_{_*&[[{{+#
{&@[@+(*@(&&^%*[_${%{^}[[_]*)[!%%)$)])]^#+&}#[^(}]{[$%^^)$$&_(_#)@_)!#}*)+]}{!!%
&@&!##}_]#[*^^&&^$!#^%{%{#$%]%}{%^_+&%_]#__*&*{%!(]#)^*+&@#[$)}}{%^}!#%)[{@!{&[^
$]$++##!$*((#[(+[&)&^&&{{{)+##)_${{(!^*{)^!)}%}@]#*^%_@@*#[*(_#((_}*}]&+}$+)[$)@
@{!%#&{+$]#%]!*[{$![%(^!$$$$+{!@!#+&]%%^_(@}^*@%+&[!%$[})($(([{@#]*{_[@#&(![{$_)
${([*!{!)(%&]*%@{^_%[__#%[^%({[][^{{$%!$[_$@${]!(+*@}$((_%{$$(]](}##[!{@[@_+))*[
^}%${(@^]#%*^#+}^]&#$&{}[[_!)[$[&)}#[$&%])_^)[)!_}{@^^###_#(#!*##[*_+@(!$%@%$)#_
@%%}@$*}(!(+_[&^([]%}(%@@]%_}#(@&$!]$_@^!@}$_{]@#($)$)!{*_%$)]#%]$)_^&@($*}^&_[^
)%}%]}&]$]*#^{]%^$}&$(#&+[[][!@&}$]_]({)(#!@_((^@@%%_]___%]][&}})]{%%}[#@__+%)(%
}_]*$$*_@+])&{$(+*+]!_[#&_)}{#)!^^}[%^+*})!(@}*@}$]))((@$(@[$_{@$^^@^*_%@%]_++(#
$$)*@&[%@((#**]*][{(@+#%_+_++!&[$%&#_#!#{_(^^@[}@&#$[[*!]+^{$]!}+[}$!$!&#$__$%]!
@]_$}*}____$%[)$]#++#!+*&{$$!@%^$*)}{]}}*)^(^%!]+$%_@%}$]++($#&&[](&^%&}!(]@@!)_
}%%*%(#+{@@_&[$+*{]&&$!*}#$$[]%+}@_&)[@+!%{@%()[*{@*[}&%@]#@*$${_^{}^{]]$_+[+)({
)@$)}^+#[$@!+&(@*^}}@{!#[][#[&)%%^#]_$}})_@{$&*&)+^#%}{__)}#%[[%_$[[)}()()^_#$%!
*+$}&_)_@!{(}*{[&!)$[&@^#(@(!#(#!*_}!++#{#(]}!))]!}(#**({]&{)$+)+&$[^!&&[&!^[*(^
#()#![_$)^@+@)[!_({(])+{)@$[$_&+*$%)+[[][+]@^}**((!%!$+&#][_#}$)_@!}[*%)$%@#&(@(
^[&[}#]_}{)#}_[#]+}@$[(%+&${[[+%&%]!&*(^(#)%%[+]@[+]#}#@#]}+_+}__[^$#+^)$$!^!)!+
$}##]]*&#[@()!)#([]@#+)^+{*&$_@_([[]^@_)+$#+)]*@!!]})}__!@##{[(&%![[)[*){@^[@(]+
&]{#%]__@_^$@&+$$@![@_#@[!%_!*{}}^%_]&$$!&&$#^^*[&!#%!&}%&}[^$$*([&(_*{{%{#&{{){
!^$(()[*&{]&&_}]+]#[[^]#}+#+($^&$[^!+$^!}+*[]%+]@]*^)}(%$}(+)^#[@}}[*!@]*[}[]^&_
#)]*%_}[{%[%!^][^^)^(_@$!_*)$$&&#]^##[)[#$$$_&$((_)[{+%[**^#{)%#^^_[]_&(**_(}(&}
)**$}#})*@$@*$^(]+)#^$]!^&]$#!#$+](!%(*^$@*!_]@_@){+(%*^^&*@]%+_([^^%}&%!#@_({+[
)[^[*+${{_)*@)){(}[{{{(@{$_}+@&}^^%)*@*!{(!%#_}!&++%&+_({!{!][)]!}!%[}_{{+@[*([[
[++#[^%_[*+(%)^@@+_+}(#*@!))!%@(!#_*_}$@!$)^($#]([@*)$#*%*_&_!%%[}_}](#$&+)_^[[$
_][[#@%!_})!]@$[}&&#)#_%[!%]*&)###}*#!^{+@[!%^)[%{}__]+%%{&)}!$!}}*&{{#++{_@{(})
)@%}{&]%!*#_#]+[][!+)[{$!{+*+($&@%&+)&#_&(@}}}}(*$+_((!!$[+{]@])_($($}]]+&)#][@_
@#]&{]$(%}$@^{$@]*%]_*}#}}!]^*&{+{_*$(##{{_^%#!}+^!$_}^)^{]+*@_{{&+)^$[_[[!+)^)^
!#]]({_*[%((+(+%&+&$[]++{!{)})_*^{[$^+(+%({!(@{#(^[(%}*$_**[)+#*&*[_%{*@)_{%(@}#
_+@+_!]#]([[*]}]}#]$)[^+%}+]#@%@&%^%@$(%(^+@$^_$_((*{[&%{}^$[_&_}!%(^$[+%[[}!@@#
*}+^@){^#%}]{!#^]]!#}{{$][}%[*)#^}@![$@%!&)^]^({![#{$*@*%@+_@%@%*)@]+%[[++$(#{@_
)]$%]*[*$(}&}%)}{&(_^}{_*[^}$(+*}(!_{[_!@*[+}*&+^^]]@@{^+]^%}!^{)_}!%%&@)$]_(*@@
!^]}()*+{{}$@[{)+%}[{)$&@[{*%%@(%(@[+%@_^%$+#@[^{*%[++&%*({_]])(@!%!#*$%$}{+{+}[
{})$+)](^}{_&+%!}^&[)_%{(_#[{[[!$*&*&!%*%+)&+++!^#&$%@})$^{(+{)(@{(@]!^!%_)}*#$$
{{}}^)@[$]^%*#])#(]%&)]+}_*{_}{*!]+]%$!!)&%#](*^[)]&_&[}[}$}*&^{@}*(#*{+!%)++*@_
%]{#@+_&$^&}_{(#)*!{%+#+()+]{&[+#]([{%^_)+{!)+()!%*#__##^*!_+((%@_@!+[&![_*[$*}#
}]_)^&{]#@({}+_&!+#_([+)!#[%#{[}+&+(!%*{}^#)%)&^()^{((^^((%{#^&!%_%*+%{)$)^_}$][
#@^%+(!{$}+_+]!#^[]_&!{]_]]]&($}^)}&*}}(#(%&$%*__{)!])[(%)#})$*$_]^($*[^{+(^}}&[
](}}^](^{%!*[[[[_#&)_(#&$[#&&+&^]$])^}((#%_)+%]}@]*{*({[{${!+@[@_%]!(}!$%]*%[}_%
}&_]*_@$!}{}(]#)_#)%[)!+!${}(}_(}^@{_@!#*_()#$!_]%#@}[#{@)!{+{!&(]}][_#%*^#%!{!!
*%&[$$]+@){!{($$*!}]{$!{+^+*%%%!{%^$(!$)%^{(&)!{!^^]#$]%#^@!^(]$$^!^]@&*{)_())!(
+)]!(*@#%$&)]]*))%$!#$@{]$#^@]__!*]^}#(#+$_#+@$+]{)]{(]]*__%&%$_^+)${$$&^{%^%){%
][(%_[])+!(&][#@_%+{##@($($#@}#}!(}*+}_(_[]+{^@@}@@^(&@[(+({+$^^_%*!##]%]#}$({)[
[@%**}){@}}[*!@@)^]*+%{!}&+^*]}&#{*[{}]]^#!%_@!}*+(#*$@@{&}_$^^[_[@[%}]@}{])!+*@
@)(}&*@+*$$*@&&^](+#%+)+@*]_%}*^@*_!#@%)$![*&#&[]&%@}_}$]{}&^{%+{})#%%$[+*#!@{*[
%{#)&_%^(@&)#$#*@+*)$)+]_+#[$((@%[%&!%#!{_^^]{]@%^){(+!)#_^!_[+!*{*)&&*&]#{[[^!{
_*]#*_{$(!(&#@%&+_$&}@^+]+[(@{[))**![&)*&#]]]*)_&+!^#]
{!!!]_}#&+^&)&[^@#)[[}}]@+$#_^@}*%#++_)_&*)#*}][&@*_%_!#*^!}[[}]*}}+*_{!#%(&&)+$
*#+}+^)&)^+&{)^$*}}%@^+@%^)}!!%#+_@*(*]+)+_@#*}&%&*}[#!{@@%&_$)[]$#!}${_+!({]$}[
{!!@(+})(%!{+#+${{})(@*+@@&{@&+&$&&[&&&^###![)&#))@@_*!([^$$%$#[&[&+%}$*)[+$%!*%
@*})[_+@!]#$^^@@$&$)!#[!#[_@@@]_*$}^^[]@_{(%#*[^[#[[}@{__+@!#{[&)@&)^#()$^+*[@!+
}%{^*)@[{#)%)]{_*__^%!@)#[![_}[{(}]@[)]%(&^$+)%_#&_)])[(}(!&[(&(]@@$!&@&!&*!_}&!
#+{@$^$%#]$([**&_{)}#!(((@$#%})+#++(__{%#}+}!)%#^**+!}%+}})#}[@^**!*_+_++&}_^_**
&^+_(}@{((][#&)*__!$#(@_^{{)}_^$[+)!}@&*$$^{&&()%[+!&#&^]}{%{!@{[}$_#[#$[(_)&%&#
{}[_(}*$$)@!{@]{+&!+#@*}_]^[}&%&_^^({}{]!![^}+^(!{+[%})@][}]}[#)}]%{{%&!#^#_^[+^
@&{!+%{&$_#}(*!@]^+!@+!&$)++@[)^^##@+@*(}))%))%{}%((!%_*]!^)(!$^[}](!+){@++]]^%@
#+(][{!##}^{)$##{*^+#}$)+(*%)&&*[!]]^(**@{+[^%^*){{&$&[{}!]_$@]!#$@[(]^%&%$]!%$+
^*_%#}%+[$^#!#*+}_)!%+[)$%}(@+**)(}{$%!{}!!{^!@__{+#+)*]+[@_}+*^(&^}]]@)($(]&_*!
**#$_##%([*#%(*+^(@&_((*)*$%%+_@])}&$%+*$$+{#^&}[#$^*_][}]@](]+#&*[#+*%(&*&(%*[]
^])^%]*+_&{)}^!!@%$#*]*^&{[$^}[$}@%[%%@$+^]#)}^&})#)@]^&+}#!{**+(@+}}&^()$)#%)]_
$()#}$%}@^_()+[!(!*($##%!)$])$+@*[{)&+)&%+}[[}^_#}#*&(^)@[_*^[%$}%)#)#!](+([%*+)
$&$!_]+&)}[_%[%(!!#*}&(][_@}+@*+&&_{_(#%(!!+{&}_@$#^!#&}}@[%_$&]*$_^}%)^_({][}$}
#]{%($@%%]&)))$*^%+]^^&{*&#[))]*(+]*{[!_#[}]{_^%!_{[{%#]}{_#]&^^^)+!^{*_{+[}*#+)
[_)^_}_]&![!+&+_#@*%_#]#!&^!*[#{+%]{{]*%$}!*}$#$_[%})##}}_#}%]_}@*^]*@^(_)}+^^!+
*^]*([&{{#%{[{&@%)%+&!^&]^**}+_!!_(#&}{@@*}({^&!^*)^]%_**((@++#&)@&*%[]]+!$_[*^]
+$!)(%[{]{((_{*}%+_$+&_)^}@@^*+!(_@($&@()]]]&!{_++(^^_{_[!$[*!%@(][(]_{!(}%[*!])
!]%+@*+@#$^)^[^^)&#}#($*#&)}!#[*]%[}#*}_@(]}+*]]^)_(%&}*^+%#*%&{^}%(}]{$+)!*(&&&
+]$^@#_@[{!+^)}}[)_([!%[{(%)@)&^*{!)%&)&!$@]}@([*(^#%@^&))%*[[}_((@&)]+]}}_))(}*
!_}}]@#&({_#@{&))$^@@*@}]*+[*%)+[[{&!!^$($+]#$@)*%*_^{_^%)__!&$+#]#**)+*@+@%#]]{
*_[)+)*((]!{@^!@]%(%@[]^%[&$+^}$$@&{_(!*}$]%_#_!%*++}_])]}$@+&#(]$*[+[)&)([@+])_
[+!%^#%)!#_}]#]]_)]^**_^)^%&$$)%!*!+@#*[^&@^}^[{(@{%(![]#{&%[$]))(}[)^(({_&*#}*}
[+(]+@){$(@!{%$%)$+{$!$$&*^+&@)%}![)*]{^%#(+{}![_&[@]+!@*^}}{})++_${_&%!@((@]{$#
{+@&[}^&%%$&())#!_##$!(&@@*}^[%@$^#*&!@@&_[(!}+{}*&+{**$)}])%()!_&_)!*((^+%)_#[(
_^&!&(%{_%*[($])}%[{@{{^_[$}@&&_)^()({%![#][(}+&^(]&}!*@#{)]{i]%_]%^(&]^{+_([)$%
{&*[$@^{(]@}]%)(@&}&)}_@$}&]{#$^}@@&[]#+!%^]@!]]$+$]#^_]{](^*&!%_!!&}$^&#!}++)_!
^@]]*$*_#+$!^{$&[$_+#^@@}#)(#*&)$#&+#}+{{&@$^%+{++[&}#}[*#]^^()+(#![]^$)^#*])%((
*[)]#!]$]{+([^)%@{^_$!(*@}#@{{^%@}#&##*!_&^{%$_{_+%)#{{!$[&&]#^)+!]*&]{[!^)%}}*%
_{{$%_+*^{+!*!$)}*)_&%@[$*!&*#][!!&#{]$}}]^_$*!*&]&(@%_$*@+!{}^^+()+_$(!]*@)&#[#
(#[@!$[+{_{^%+&}($[^^}${^_[#)({)++&_%*{@+(^+_%!_)%)^+@#(${)*]&!)^{[#%+[*))(${&{#
&$+#])%@_*}{[}$!{#}!^%*)++$]&]!_{_+_]%#&@&$&)*!(]+_+}}]_+#){[^]#^+)$#!)&)[+#[)}}
)*(!%&]*{$+(_()_%$#_{}[]_%#{![}}{(!@{#$[*&(^((*+^(^(_]%{!]}+^)%^{{$**]@$$$_!]((%
(&[${_&)}[$$_{$[]{#{%%!&@(#+%@%)+_}&*##]!&^_[^[*]([*!]]!]{#*!^&$!*)!}_#{_#*[%[^)
[!*@]%*[_^!#{{)@![$+^__[]{($+}}{}[$[]^{+(*(^)&*&#^][@{&@)+^*{%@^)+++&!&[{(@{!]{]
)%$]{()*{)[)}@&@&@%^#{*]@@]&_]!))$^(%@+@+]&}]+{*[]+(!_{]@_[(]_][}}_}@[_}##+@]]]*
+@*_*[&%%(_++!{]*+&(*%%@{!!*%!*^}]}$&}#[+*&+@((#](@(&*{+](#%!%[%]@#+[_+(^[*&[}${
($@]^)!&_*]#{#&}({@](*_{!{*})%}#&#_][%)]$_*&(]_*%]%$(&+$(#+[][*{]&+^+!&^@}$}(}]$
]*%@_+!}&+}_}[%^]#&{{+*%$&_#}#*]&%%})$+]@)+^})[&(+@$)^#)${@_(%!($]}^!{@}){+@!__)
$]&{*%#]_!$&@$(}#^{]!!%^%_#&!$+$&%^_#((#)$@($[+}^[_+$@}_)]*!^$!{^)@&*[[%!_&*$_$#
^+][](^[]^^($_#[+})__}@{{%_%*&_[)!}+[!@&*}}$#%**#}%})_^$^}}&+*}({(]_!*)#[%(!+_%*
)+@%[+#$(+%@%%%_{+&_&_]()$][)(${{@(+}+_$_!+##@@&}[#+!(#+!){))@*+((}@#![!)&(@@)(]
^#!+{_(_$+!!%##{[{]($+@)#&$%)))&[&*!^%+]!#]^#{)][()**$^_!@)^}%$}+_]+{[]*_}[[*)*{
{+]@[!%)@&^^@$%!!({##_#[}[+@_*@]&@[]}${((^[{][]+#%![(_{[*)#}]@{]#(^])_&!%{^!#%{@
_]}]!^_[!)&&&]_(#]+_!_}&&)#$*+^###[**@{}{%^[&#+&__@@@[+t]+&)^{*((@$!$)%]$[{}$}&$
%!+$[(*%](@*!*})!#+*#+(}$(*@*[#]#)[^*#@}*_#%@@+@[!!{*^})_!^&^({(%(%%@(#_(&{__[!+
(#})___!{^@*}#(%#)_]_%{{]+@%${+![^{(*$+)$[&[${)&#%+$![{}(@^+}]#(}@#]}($($[$[+{}(
&}+%*$[(_+{#!+]@)%#)}_+{%&*)#^[$@_@}[^*+_*(!%&#*^@)@%^[@%*$_{}{{%[@^+%[$])])@[!^
+#@$%^@^#%}+)*!+!$%(}](&)##$+&[[#&^*!^$]*!#}{%#{*+&[]$)]%}*[*_)*#@^{%)}{+$^)_{$(
%){!}(#]^_(!^]({%@_@$%*{*@)*#!%$(*_(]!#*#%[*[&*)^[%&$_)!$[_&($]]%{!%&)[(]&{[[[{+
{{@]+](@&@$_^^(*@})$!}{@$_^+{*)[({^}_!#[@$[*%!%^_*@@}#_{[{_@**++)!]!@{_#]&&*{+&$
**^[)[%$_^)*)_%+]&_[)&$}}_]%+%)}^_]#}]__]*!}&#[[##![$[)%+_))_&)$_(@&@}&&{)+#_%![
]}(^#*@^)$$%[%*(({(^]}_$+^%{$#*#^+({)[*}@]+![&%_%&_$#@[$^+@$(##[[$}+*$!@*!{_@})&
}![+_#}%{{_$}}+]+#{]#+$![@(!%_&$$}+^*{^#^^[&&(^^##**$_{+*!]}][}&&%]]*&(}{+@+!]({
!$@+[&@)]_!__})]+]+&{_($!$)#$)&$]&&}{!^$)$}(!@%$%(!+*!*#)+$&_&[[]})^#]{$}&@$^{##
]#%@+!^)$^+&^_{({+[}#()_(!*_@$}}!}*+_[@^{{{#+)%&&&}*{*))+}&[#++{}%@(]_@$![$$$&^*
__}$)$+%$%(*^@)++@!*%]^){]]_}]++$!()&[{*^$%+]+*_{[{$[#*[[%^}]&_[^@^+@@^)#)${$^&+
(}$)][$&}#*_&+%#)(%^){](*]}}]}!+[)##&!^!+{_!@&^[[(#{[&#%$!(#{__}#&@$*}#^*#]@!}^_
!^$!@y{$][%@+^##](_*(##^_{#)$+$*&}[#%&_!+)*@{][_($#_$*{(}_[{+)$[)+{#)+($_]{}!]+#
(#_$!@*+#%+(#@_}}@^!$_[&_&@})}}$(]^]^(_^**@%**#&^+@[!]^+}+&+&[^()+$*$(}$!%@!({^[
)]*{(%#[_%{}(+!##[)%&!((^[}&(!#^!([)[&!_)(%&#@)&*$+]&!]^#!!^$*^$!(_+![]*{!${@%+)
^#)$#{}]%%$(*}(]#&&$)@_&+)%}}*(([]![$!!^&[%!{&^(&@&%$)@{!@}!}$_*$%+#]{])@!@)@_)]
}]{_}!%{^$))&_(+}+#&+*&+!{_*^)[}(((}_@(]^)_}!]}&}{&[((}@{+(([{])_}^(@^+^+^}(!)&]
_%*}_!^#[*$_+]@&#+{*@*+{)]^^!](]_@^}#^^%(*+]@^@]$*%_$#^*@[$]]_)]$+$+@*{$[}[%*{+)
(&{@%^+*}^(^&_+$#(@$[#@@(){!($)^)!])(_&%#*&[@{]{]#@(]%@}{${[})($+++@*${+&}(^%)+*
{#]!#)]*&@)+#[+_)@&}+]+_*}}]*{{%^!+$+#$(%!^**!])%*_}$]!)({$^_^+]*#{(_*[&!(*))#@&
@^%@@}]]}%#%]{{#(#**[#(_#(#$]]*)_*#+[_#+}{&!]@&[]+{*^]!%^*_@)]^%#++$&@[)([+}!*](
&%+(&])^[)@$](**}]&}$]&%^]@)[&(*[(#^{$+^]@[%![_{[#_[){_$)!%![]^_%*$!@+{[&%)!_#((
$)[__^{%_!]_#[&_$(!)!_&}&$$}](]%{^(&{%$!]+[+_^+{*[@+*+%[$@&#+#$*}&{@%##*@(({)_(]
}_)[^$}^{[$@^$@$&##)@[#$&$&_@]@{_][{}!(+[!+@%&^&[%&${()@@_[&+^^+{)#^#)&%_]@{$&(*
{()}$]!%*+{[[}!+][_*!&]{%+)!^)!*{{})_&[*[})+[(@!__!{!]&{^@%!@]&[&^}+#[{_}@!+_*{&
^[%#!^]+(*#&))([%[%$_[#_+{{_%!#&)^&#)#!](+(@!(}}*#(&&+%!}@]%@#$*_[$](#[@#[_^+(%{
@#(*!__{)_#^!{!&%_*@+*(&[^_(*$#!@_*}#+$_*${@!}*]!}@)$^@_@{^(++(%({[#$)!}!##%]&[{
!(+}(*!(&_[}}{}+#{!#)_[!)&))%%#}_!]${*}@})_)}%+&#$]&(^*[^+&{)}+@((&]])%$@((_(^$[
_@$)[[+(!@]_()}*]*+[{&@&[##}[&]%$](+*{]!%)]_&%^$+%_@!#&+@[(&{){)(]+[]{)!^}+!}{[$
{)@_&{_[^++^{[%*!(]]@_]}_})(%+_#!+]$$_&!+*[(])$(!^($)}^+{&!&__+_{@+}[((&%)$][^{&
*{_%#&${{!@$)$(@%{{*%[+[*@#$[@_{}{[#($}}_)%)&+*]((}*)+_%#{%]_$]%][!+[+[%[@&&){!@
(&(+*[($}**}$^_!@]_{%#{](]@{!#&&&)[$!_(#(#$!*![##!$_!*{{$@@*_!#[%)}%^%(%#$@(}+}$
_#@&({+)+}^*]^!^})[(^@)*+#@]%_(**_+}###[_}]*$${]&_[&{[*}+@#}&^{]_!&#{%())](^@}%*
$[%@##)(@__+{#@^_$}}$)}]#^^@#&_^++!$^^%%#($+]&%&+]}^+_@%$})*$^*&*+[##@_{(&}@^*]_
_$_[@%#[+***&@%!^{}!$#}](_({@]]{)^$]^*[*]+}}!({)
[%%(@_${[(#@%*_+^]{}+^{}!)&#}*#%(##))%(+[+@!}$^}](!_$%}$&([#%}[##*[#*]{$^#*(^+[^
}!&]!%!+@){](^(*}^_!$%]^[&*)_^}!@]*+((!^+_$+]_%[@&+(*@}}+}!]#})!*}!)+@}^}_+*#+^!
)#$_#{&)!$@]@@[#(!]^&^+!_+@^!&[*!)(*)*&[{_@%$!__!%%[#$(%#(]$$![[@!^#%(_)#!{%]]*[
+^$@$&!^%+!^[_}&*$__@}{])%)((}_^)(%^)$@}#)]_)[)#{!}*^&&__}!{&)]#_)[$$%@{@$&*@)#{
^#{}%^&]+}(%$@+{*^})]@^#^#]@$%([[#^(%[)]%#$}}*_$]}^]*$@#%$#[^[[%__*#@)(_![{${{{$
^*{##%*!!&]{_[$#_]!&{(!@*(+%*[%_($]*#)($)%][^]#+}[+_{})@)}*&(]{(&(}%%@(++$}@(#[_
}(#[(@])[+(^$}}+!){_&*)&&$((+[+)+#&]!@^+]}[#}$!*$_}_$__@^))$*){%!@}_){(@^($)_&^%
]))^)^][&$+)[!(#!()(&[%(&[@$*!{]+{{$(#^&_!!%@)%[@_(+^]#@$]#*!$%#()@++&}+%[[_#(*]
#!&&([_}[+]]*%_$+(^[^)$*#{+{!$%}_*!%_([{*^{*(#}&[$@[[_^$&!()*(({]##$@@&@$}&#{#@!
&_@+){!($$(_}++&+*%@[%+([)(}!%%{$_{@$[*}_!^)#+)+{*&)^]+[$^))+{(++%*^!]({!&^}&_(_
[&^#)(&)[)}[}}+$]*)+)&_{%}(!}(+%(]+*#([+*##{()_(}}[%[]*]{${+(&)}]){)_^}[]()}#$@%
]_}(_]&}[&#%!{+@(##({^[+#_)]@!$]_@+[[%*_)}]([$}}*+#$+{$+_{}^][]{!^!#^{{_$}$(%)+[
[^%]]+@}_%){$%&[@!*{){)%##(_{!#(![#*(^@{$$))#}@_]{#_@{)]#!]!#&^]!@^_++(^($)^#^%}
*($%[*(++@_([!@)%&%^])&&]_%*_+)$[+)){_[)+*+**)][_@@!]&[%@$(!#__@]+_{$@+*+)_[%^}[
(++$%*@_](}_(+!}!(%!*([&#[$@]#}+@@%^[]&^[%]+[{!_#+{(*)!*+@*}+(+!*+#@[@#!)#*[]#%&
[_%^!#%_]$}#+[+&[@)_#]+$%{]*_%#}}&[}#*(!))@_+@$}$#[]}*@%!}^^&$%&]_@}!!}}{_{&#_&}
$@$+(*!{{{_}!+[}$+_)_++$+}$({$^!*_@]$&^${%$}_!%_{*_[$+)@%+{%&_^%%!+_([$_]+&&%_%[
*]+[!%[^_*+&*$(&@@(+)$!(!#)}!}{+*)_^_*^(}^}+}][&*_@#%{^!&{)%_](**_%&%!!{${#+@$#^
%)^!^$!$#*^]$*}&{]#{*]!{%%+_({%)%+}&$%+_(}_^(%{*++!@^}*_{([[_#_++@+(*&$(%+)+$}[)
[!}&#{$+_@&_!}){{$(}[{*@%[(!@]!{&&%$!#[[(){%#%_^#_{_!}$!{)$$#&_^){[(#&$_^{%$!^}!
((*&@}}&$)!*@$}*^!]+]))!!*%^[%(+[{!(_%]&^$[#!#]{+$+]*}[[*@&&!+^#%!})&$]{*(&+@&+^
{$!#&$[$}$!][@{%@$$$}([{)(]*+$!}$*$&+@%[$*)#]}_&_#@{^#@!@@%+@([)]}{!_[@^+^%]{){&
$@(%@)^]*]&%%_%*#[@(&]])#$#!$%$}@{}!}[[@}#@#](@)$%{&)}[&][[_%%(!!(}%([[){^$@[@[}
%#**%{@_)%{@{*[@#(+&+%[]{+&{}_*[%#!!**+{{_^+@[[@^}[$@(}@[${@@}!*@!(%{}!#*_[&^@[%
)]!)(*(@]#@{%_*+@_&(&*$+&$$$$)+}+@$&)@}}+_*}!(){@@@]%]$}@%@())$^]!]*{!^!$&!([*%*
{]){#}@!+*%(#((($+(_++)*$%#!)$*[]_%)]&}@_{#]]&!##&$$)&^@&{*!{{[))(*{([^*&$})@$*{
}]]}%}!!*_%_(^%{%&*)@^&]]_!*[*{[^%[(]%]*!+])[*(!}&^)]#{&&%*)$[(]#(*@^}[(!](+_[%[
%@&!&*_]^#*_$]^$}^]##+_}*@)%{@[$$#)$*_&)+()@*@&^_${[@%&&$[!+_)#^_${+!&{[#^^(*)&!
#%(^&!+$@!)_*##{[&]^+}(](+%#*%#&##!(]%)!($#!^^)!(_$*!_]&%@#}**+@&+])[%$%@$!]$[!@
%*}_@^$%^]$&#{+]!({[@}&{+@]!{&!&)#((&&(!]!_$}_!!(#}#&&[@_]+%%[_]!}%###*&}&]^^[[_
}[}^*{+]@_)]@)_#*+]+$}+]{]!+&}}}@@)&{*+&#*}#*)__*@@!]!]}}#{!$}*@@@}#^{{!}##^!&@!
)##!]#$[@!{*%+)*#+__)_(%}^*#&[*}{_@&+[]_*[[}[@{]][@)#[%(*$[*{%@]]#${)!%_!*}+]$_)
})_%())]{(]^+)[+)#^{*_^([%]&*+_)][%^&*)++^&{]+]$&_+[@!%$_([&%%!@!%*)+(}]+)()!#}{
(^*&^{[!#$](%_!_**!}$$!&[^{(!#{#@_&^]{)[*+^](!&!(@^(@!@]%+]$(#%_}+)$@}&!#&&{^*{]
+%[!{!$((^_+&]_!@^%#_+}({^^}*{{%]^@&+${%%^[*!#}_(_&&)!$}!_^{[(&$][(%_+$^&}#&((#^
!&]{[+)@}%![(#*)$+$#){)[^+@^_)]%$#}!&}&]$&{*&[$!}%$]&_}[*$^)%&{]}]![[^_}(#{^*!!&
*&[(_{{+}($[}$*()&}$&#%!%#)]{@&$)[}&{&_%_[(((%@{]_*^(!+*[[(*}@]%_}])^%+%&([]}{*&
+![}{([&]$^[+{*]()&!&@}^#+][(!^*^+^&&$#]_{@$+@[({]&)!&)))&#*(%+*&}$^_*+_&@[}{}%^
{_$^+%+@(&@[[)}*@{!_$)@((_*^_$${*#{_#{%@()[@(+$#[)!#*^!(}{!@[+%[&_%()+#%%{)}^)$*
&(@%[{^&%^({(@$*(**$}+)%}&)!+$[((&)@]@@+{{]])]]^$#(@%)!(&&]+#+[&[&}$__$$@$+]@{*#
{]@]@*)!])})!}!%[+$)@)]](}*&!}]+![##]])]_[](+${+]_*@)_^{}&^)_*{*]#[]*{+{)(*&^_#_
&[#_)$){$*!)!){$*$(]{]_&%%}$@&[**+![#{$}]@$$[@@]@{[#])^$***+_#%@$%]{_+}&*!&_%&()
$]*&[)#(%^{]%^)])@#{!^@(@)#+}{$}@_${_(_}[^[+#*&^{%!%+$)){_]*%+(@^{_*#$*(!)[*[&))
^&+^@!!!+])*@__%__@${%#_(^@@*}!&])[#%_!%}!%{@}#!)(_*^@*_)$&_+!#$)&}#_&$}%!{^!#&^
)_)$}%^+_!*$(@%}}@)})}_&{!@^^!&_]**!*[[()]*%(]}!#))+*@($%)+){!#@^+}@((*@[}&%#{_+
{@^+!([!)_!+@+}_+^!%_([(+)($@&@##($_&!@##$%+$@#[_[$!^&*])!&(_]*]}&)[(((]}%[@&[^@
]{*+&_)#!(@&#+((&!%!%}^*&[]#$*^}}$]&(&_{}+_}$%#&%[&}*)*]]&+!_)#[^+%_*}]])+$)%!]{
]##]^($^_)}[}[[)}{++}+(^%^!}}%)&[}${{*+&+@%}&(})@)+%!%_*(*[!+$_)[!#!@[%)@^}(#*%+
#]$]%^)$!{]&_[%%*}_#_)__^[^^#(})}&^%%%_&}&$_$!&{![*^}#+@!*){%)+!]_&*[$)%[)!{]!#^
[{*__(+#_)+^%(%]_%@[++[((^*($_*(_!*+$+[&!(*_[{{&}&%*%@#&%[#*_[_&@&]__+{@$)^%_#$^
@@%!+%+]_{{}*{[]+^*$!]&[$+_}{])]$]*##__##{#!!&)%!@^!!*+#}_{^)%{^*(}])]@$_!__)!#+
@%({&[^${{_{}#([{+{]!@((&*@!)*[}$}(]%+#@$%%&!%&]@$(_][#)))@$+}@%*#^^@%&]}{()%%^!
#&&)++!(}{]}*}}!}(@*@!]^%*!$_[($)!_^^$_[#!(%[!}#&$)@$}#$))&)[##**@](]]$)}}[^@@^#
}&&){[$${&}+[+%_}!#^#%{]_%#%*&_}}+]&_$*!&&][_%!]_*+#^!]{}_!@(}(*(^^*__+$#@#^]*%%
%]^}!_{}!)%!{)%+[___]]$](*^)))*^${)^^$_&[)&}*}($#{#^^#_@[[+[^{{[*__{%$^$}*{{+#{{
$%&+])(^^*$(}#*[$_#%$}!!^%&($}!!(]*{!}(!_&{##[{!+]&#(((%@]!($#%$^@(%))@_)@}*})+[
^]^(}${[(^!*{}!(_[{^*&{&})]]&}![}$!}*+])}[@(_&)[}*@_$_{%[+&#(*_#+)^)!&@!%($!}#[%
[@&[+^@$}&{]{)+^&^#{{}@!}{^{%}#)@!%([$(_!([+({)@^(#@!)$[_&](!}@$*$@!(#[$+!@][}_*
_^#&{)][@*!])^))+@+$%[%&}(^(!@}([#+***_*[^)$((^*(}([!]@##@$%^[{+^**{&[&@@##)#(@#
{+_**$%(#$&^$^]__*)%$*+{#+()[%[(}#]}*&$^%]{}%&(_([]&_}&}*@}#{((&!@!{#+__#*#))&[(
&[[*+]&{[$_}*#@!{]}__!+!!@$@#}+@!%^(^_^{}]+^({)*[**%!@[^)#$%{&[[_!(^(}}{!}!)@###
(#*+!#@&+{_{&%&$}+}]!@*$#&&({(^(_#{+$*+_)}^&$##&+$]$(&$}!}!](%#+^]*$%]%)}^}])#_!
@)*%_$^]$@*&(()&+)_[*#@&{+&}^#*+*{[)[^$+#**^^(^]}][$_+]*&&)@}{&]](_]_]^&&}%@+}[$
(+[%*^!#)#+(}!)[!^{&[*[($%{+([#[[@&})$]@{^^#!$]_}!{*{*{![@$_@]+*{}[!#%@}]&#{*%{}
!!(}&+%%{%{%({!_(%_]@}+^%@_!*[&{)$%&)@]{+&_+%&^_}([_@@)$^!^}{#[$@{(_]{))*$(#!(+%
&!+]}]__^&@)^[}]!*#@)%!(%#)_*#}%*__}_+(**{[+)]%${+@^(]$()@([[{(]}$)}#!&+{%@$+{{^
&}_!$_+$_($*{!*#&$]%_)[({}+^}[@#+**%*}#[([@$][)[&+_%!!}@@}*!@%(#[}&$#]&{+^{#_*^]
@_%(]${&*&#^#_@^{%*^%$^+*(%}[!!)(!%_}!@@#&+)^^{##%&}$+*+!#}+%{%#)^@%[{}^%#+(+$[@
#%}+%%#_&}++)}{]%#]*)]+)]+^(*({}+}@_&&!![)^_$[][@*]!%@(@*+*_]{&}%*^)+[{!_%$+%[*+
&&&!#[_]])$}%[_*!)@}[{*]%!@!_))]*%#^{{+]%]]*@%{%@+^[@}(^#%&**+)*^*[)[)%$[^#@^&+#
+++}%_@@[(}[*%$#}^#+&}+%%)$+_{^(#%*{&*%(*)+([!@)*#[&!@(&_$@%$%]^^&{]@$}[({$((^}!
&%+{#_]{{{^[*_^#^!*@}_}*}+*&(@!^#%)@%[#(&$!&%)][#{*$${(+}($*(*(*&*$^{{@^]{]&*@!)
%&)**+]][!+@]*})(@)_]@{($!%+%%]_)(@{+!*__@[(%&&^]@([@[&%$+(}{{&]]+*}($+&}%(!%*@!
(^)${)%)]]$*!++[_(_($}(@++[^{]%{{!%#!%+*$)_&@&__#([__$[&^!}%$)(]}@+]+_@*]%%{&(@@
$_[(![!)+@[]][]($@%*(}$_(!^^)]^%){{(_#)*#][}$([)[]!_![![@}}}!%^^!}!*#%&{$&!#_!}#
[^{%{$^+@}&_}*_]%(}@*^}]^@*_^&&)^^}[}@]+^*%({*$^{+])_^%*#!${!#+$](+&]{@_+&[!()@{
@)[[){&(#[#{&@&(_]@##])}#&%*)_&!(}_^^$_)))}}}+&&$%&]}}$)![&{#_!(!![[![&(+{{++!}}
%}^%%#)))!}^)}%{*@*_{}*^{&(^+$}!@$_^}{$*^#)(@^]_@@%)@_[}&[)++)$&]+!}![#]@$%@]]!@
^()&++$(_[!^#[&@*@@(){#%@((!(%@(#[&+*%+(^%{{*$%#!(#^{(&*_}!^#%({*_#)%+#{$##{!$]*
{+_!{+^$!_&}%(]+}_}@**(&}(+@^%#+!#{#*@@{+!)}^{^+#(!&}[+*%+@}_+&+&]&+(+)_)((!{%*)
([)_+_[]&}}[{{+[)]!%{&&__&$+${_+#]_$}!&#%[@^^]!)#)_+#$]((*@+#$#)@[)*{[}(()$(@{*[
]}#*_+{)%[+!^{+{(&#_[_}_!{!*{[[)]][$[}*@[$*&))+$[&]@)[*}+^___%!]()^)&!@&[*@_+{}&
[{]$#{!^]^$#+*$}#*)(]!@^&#){][$)}!+%^)@#&!%(+^^($(%}^+[*)#+{%!))}(*&]__})][_))}#
())#&##{]$#$](&$%&&$)^{(@%)$%()#)&&*{]&^^+%$##%{!(_$(**&(_]+{%[%$!_){$*@@++]&^$(
@+{+&%]$)+@({$(+{!*#(%)]+[}){]]#)*[]%&{+)$){!&$]+^++_@]#%)[&&^%]#@#@)]@}%$[_*@%)
[&*^*})@(!{&^#!([%@_![{)+)$}_+)%&^#@#$$}))^&)}({+*&_()&@]$^#(&&{){)_[}{@(}#)!)%&
({+$[!#()[]%{$_*]*^%&]@{^@{)}}_^}@!^*)_[([{}]{*#{]&}}[$_[}!%%&_{{!$[}&[[@#[&_$()
*_$+)&}*){${}!]+%[{{!+)+{!&]$!}{_]&)!!^+){&*#{@!##_(^%^$([!+&+($&)##[&[^_{##{(**
{{)#*%@*[(^(}!%}@*}@+]^_}&&&}&{[$(@[#*+%[&%{$$**]]%(!$+$!]^+[^_(&*{#_^%[[#+{]#_[
*}]#)!%!_[})^%*@{!{$)*_+$$*}%(&]%^+$@!&{[]}**})}#}[#{%{$#@##(])&)((${^]^[%^&(!_&
{@((&@&)]!&{}@#])$($}#}@)#[+$^{%%&*]&_!+{$+[*{()_&&(}%[})}!}(}[!%@$#!*(^^![+(^@{
(+]]@{++#)@@%(!&_#@^$*%^)*](^}#]@]}]@++*^+$_+]^_][]@^#$^&!_+!(^+((&@^@_^@)$[#!)*
$)_#]*^{@_*[}@}*@@^+**+[[)**{!)!{##[(*}{)+@{}}{]+!*+&*&)_^&{*+@[*_#{)#(}_*]%{+!%
(}_%$#)^*&+[([@*!!{@*[{%@[__)&*&@{__+_}[_#{]!@*%(^&^_$_+[([(($@)])]))%_{(^@!{!#@
#*%%[#&#[+)%__]{++*)]}![[_%+__{${}%!}+!!)}*)_+!#%^}[!)[@[]@@_(@&&*]^_{+[)}@#{#*{
*%!%@{$%!_^+&]+@{$)!&_}_}&!}#)#[$&_&&_)*({({$[$)]]%#^{^%}}^%#]&+^}[!&_[[[(&{@+&^
_()%@#@{%_({${*!)(*+$*!!+$&&]{^^!(#}@[&@&[}^#)]+{)__@_[+]%$)]}$[([^{)%&)@[+]!_!+
_#$))}!+&&#(^[^(}%(%%$%+}{$^^)&^[@}#$]{!}+*}]{_}}*(*@]}#+{}@@!$(}])%+^!#@(_^(@[(
_#}$[^[@^+_&}*#$}%^)(#*}%_+]@_%]%&&()[^(}[*[}#@(%%$}]_)^(!#%]#@(#+!#{#$]^!}))_]*
]+%^+%))]+%$]+!%@[#@[@@[*#!+$#}}*#()$_*$[^}+)@#^^$(^+)^@%](!+)^[#!#_*{^^]&[_[_+}
$]%@^+!##}*(*)&([]+]##%$)+$_^%^@&((+@&)%}${#&$!!($#&^](^^{{(&+]_]@&*#_^+#!(}]$*&
_+@#[})]])[((#@&]!&]*&{*&#_[#(]{(}!]_+@[{^+{{!)*{!}]@@^#*{*({(%#(@@(]{%]!@@+%!!*
%(!{&^%%&$(!#@{+*#+*{]!%&)%*]*#$]()
]!{][@$}@)$__*_]}^(#*!#!_!**@{(&![]$_+_%^#_!%!$&@](!%%((%[#]&&}_{]+[+*##])(]%^(+
#(_}((]@}#$^_})%#&&((!^![^}+!}{$(%*{*$@%][)[[%&^[{](&+^*!!!([__[{^}&%+&^(*&])*$&
$#_}*!(+([_&%{^&[([%]}*^{{([@+@]@*&@_!]_+([(#&!]]#$$#]@#{_]][_{@]{*))$({%}_![@$]
#)+[])%]$+^^(}^!([&{)!#}#}}#!}[]]{[++&!)]#]]%^%_&_}!&&$@#&!#}&+]$)^_^*$]!$)}&{#)
+[+!_${^](+([&_%&$)#{$%#[%%][($()+!*_(*&!}%@}@%_#+%{%&*$]*{$(}}{)}]%))$}*^$]$(^^
!&*[^]]&#%&%!_+#&}_#}_]&^+@]%(!+!_*](@]}__+@%+^!&[@[)@(!*[%}^$!$(]!^_])!&_!!_[{*
(+*]_}%+(%[{)]({#[+$&&[^@{&#!@%)!+&}$@+@[+&_*!$(+#*%!+$@{{^**{)(]*$(}+(#+^}%@%^^
!$%}$$}+@^$$%{}{#!(%[]$!*}+(]!%{(^{&^{$[$)]&&^+{+%!#[([%^!{]]#^@!{#(&]@_$*_&!%(!
+_+}%@#{_}^#*)%*(*}*![}[%_[[^@$&%)([*{_${)$^^_!+}{)!)@_[*$_}*}$#[+}{]*+!^])}&{+#
+!@!^*@}!}&{]*#^@}_[)}#@%!_*#!$}!)[(${+^&!{[&&&*[{}+*+(#+_[}{$$)#([*!)%@^%_]#%$$
(++^+&)}*_&%@#[^^+^&@_%]+$%$#$*)@!(]*+@]}%$$}$(#$&^(%[*([&]*^&}(!#{&_}^(*{(+$#}}
(&_+][&_@)$&$&^[_$(++$^}]&^^*(+*!&#_$]*+@!]+{%^_*+!&}@$!#^{+_#([+@(((*+)[()__}(^
@)](+[$*_(]*$[[&@^(_*#(*&!^{+]_%)_)^[}@]#]%#@+^+[%{_*{!)}#$@#)_$!_(!*+#}%%}+$&$[
%&]!{{%*_!*}&)}$**_{*!#%[[#]!](^^$![#[[*}%(_#^^!%))!_^@)@**@}}(%%{#*%@(((]^%^![&
}!)$]&($)@](+(#{$)_%^%_^^#][{*[)%}+[##(##^{$}^]#&(&*{)%)&][&{]&#]}[[^^&[!#}${@_(
#@}&$[[%]_&$+)$!%{(}$^$}*
line = line.translate(None, '!@#$%^&*()_+-={}[]')
print line | [
"[email protected]"
] | |
f07bf1658b8fb9a73a42f6b0e5754749024c1f5f | ce32e0e1b9568c710a3168abc3c638d6f9f6c31b | /vnpy/api/easytrader/joinquant_follower.py | 9e654ea918e1a639ec127fa4540b01545d15b6b9 | [
"MIT"
] | permissive | msincenselee/vnpy | 55ae76ca32cae47369a66bd2d6589c13d7a0bdd4 | 7f4fd3cd202712b083ed7dc2f346ba4bb1bda6d7 | refs/heads/vnpy2 | 2022-05-19T10:06:55.504408 | 2022-03-19T15:26:01 | 2022-03-19T15:26:01 | 38,525,806 | 359 | 158 | MIT | 2020-09-09T00:09:12 | 2015-07-04T07:27:46 | C++ | UTF-8 | Python | false | false | 5,421 | py | # -*- coding: utf-8 -*-
from datetime import datetime
from threading import Thread
from . import exceptions
from .follower import BaseFollower
from .log import logger
class JoinQuantFollower(BaseFollower):
LOGIN_PAGE = "https://www.joinquant.com"
LOGIN_API = "https://www.joinquant.com/user/login/doLogin?ajax=1"
TRANSACTION_API = (
"https://www.joinquant.com/algorithm/live/transactionDetail"
)
WEB_REFERER = "https://www.joinquant.com/user/login/index"
WEB_ORIGIN = "https://www.joinquant.com"
def create_login_params(self, user, password, **kwargs):
params = {
"CyLoginForm[username]": user,
"CyLoginForm[pwd]": password,
"ajax": 1,
}
return params
def check_login_success(self, rep):
set_cookie = rep.headers["set-cookie"]
if len(set_cookie) < 50:
raise exceptions.NotLoginError("登录失败,请检查用户名和密码")
self.s.headers.update({"cookie": set_cookie})
def follow(
self,
users,
strategies,
track_interval=1,
trade_cmd_expire_seconds=120,
cmd_cache=True,
entrust_prop="limit",
send_interval=0,
):
"""跟踪joinquant对应的模拟交易,支持多用户多策略
:param users: 支持easytrader的用户对象,支持使用 [] 指定多个用户
:param strategies: joinquant 的模拟交易地址,支持使用 [] 指定多个模拟交易,
地址类似 https://www.joinquant.com/algorithm/live/index?backtestId=xxx
:param track_interval: 轮训模拟交易时间,单位为秒
:param trade_cmd_expire_seconds: 交易指令过期时间, 单位为秒
:param cmd_cache: 是否读取存储历史执行过的指令,防止重启时重复执行已经交易过的指令
:param entrust_prop: 委托方式, 'limit' 为限价,'market' 为市价, 仅在银河实现
:param send_interval: 交易发送间隔, 默认为0s。调大可防止卖出买入时卖出单没有及时成交导致的买入金额不足
"""
users = self.warp_list(users)
strategies = self.warp_list(strategies)
if cmd_cache:
self.load_expired_cmd_cache()
self.start_trader_thread(
users, trade_cmd_expire_seconds, entrust_prop, send_interval
)
workers = []
for strategy_url in strategies:
try:
strategy_id = self.extract_strategy_id(strategy_url)
strategy_name = self.extract_strategy_name(strategy_url)
except:
logger.error("抽取交易id和策略名失败, 无效的模拟交易url: %s", strategy_url)
raise
strategy_worker = Thread(
target=self.track_strategy_worker,
args=[strategy_id, strategy_name],
kwargs={"interval": track_interval},
)
strategy_worker.start()
workers.append(strategy_worker)
logger.info("开始跟踪策略: %s", strategy_name)
for worker in workers:
worker.join()
# @staticmethod
# def extract_strategy_id(strategy_url):
# return re.search(r"(?<=backtestId=)\w+", strategy_url).group()
#
# def extract_strategy_name(self, strategy_url):
# rep = self.s.get(strategy_url)
# return self.re_find(
# r'(?<=title="点击修改策略名称"\>).*(?=\</span)', rep.content.decode("utf8")
# )
def extract_strategy_id(self, strategy_url):
rep = self.s.get(strategy_url)
return self.re_search(r'name="backtest\[backtestId\]"\s+?value="(.*?)">', rep.content.decode("utf8"))
def extract_strategy_name(self, strategy_url):
rep = self.s.get(strategy_url)
return self.re_search(r'class="backtest_name".+?>(.*?)</span>', rep.content.decode("utf8"))
def create_query_transaction_params(self, strategy):
today_str = datetime.today().strftime("%Y-%m-%d")
params = {"backtestId": strategy, "date": today_str, "ajax": 1}
return params
def extract_transactions(self, history):
transactions = history["data"]["transaction"]
return transactions
@staticmethod
def stock_shuffle_to_prefix(stock):
assert (
len(stock) == 11
), "stock {} must like 123456.XSHG or 123456.XSHE".format(stock)
code = stock[:6]
if stock.find("XSHG") != -1:
return "sh" + code
if stock.find("XSHE") != -1:
return "sz" + code
raise TypeError("not valid stock code: {}".format(code))
def project_transactions(self, transactions, **kwargs):
for transaction in transactions:
transaction["amount"] = self.re_find(
r"\d+", transaction["amount"], dtype=int
)
time_str = "{} {}".format(transaction["date"], transaction["time"])
transaction["datetime"] = datetime.strptime(
time_str, "%Y-%m-%d %H:%M:%S"
)
stock = self.re_find(r"\d{6}\.\w{4}", transaction["stock"])
transaction["stock_code"] = self.stock_shuffle_to_prefix(stock)
transaction["action"] = (
"buy" if transaction["transaction"] == "买" else "sell"
)
| [
"[email protected]"
] | |
ce13d280984b9699b791c9a9c7ffd9de648809c2 | 70fa6468c768d4ec9b4b14fc94fa785da557f1b5 | /lib/surface/ml/language/__init__.py | f511dd8316455437eb6664c2e962454fef136788 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | kylewuolle/google-cloud-sdk | d43286ef646aec053ecd7eb58566ab2075e04e76 | 75f09ebe779e99fdc3fd13b48621fe12bfaa11aa | refs/heads/master | 2020-04-20T22:10:41.774132 | 2019-01-26T09:29:26 | 2019-01-26T09:29:26 | 169,131,028 | 0 | 0 | NOASSERTION | 2019-02-04T19:04:40 | 2019-02-04T18:58:36 | Python | UTF-8 | Python | false | false | 1,001 | py | # -*- coding: utf-8 -*- #
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command group for ml language."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
class Language(base.Group):
"""Use the Google Cloud Natural Language API to analyze text.
For more information, please see
https://cloud.google.com/natural-language/.
"""
| [
"[email protected]"
] | |
6443cc941ca5cf0c8c0c59cd75851fbf86f4999e | 93cdde5d9d5d248b85b0c7cbf1f1ae76ad51b6b0 | /new_soft/main_Control/DataM/DataManager.py | 8bfa06d30f72ee1ac1a8083cdbe5f2f1814faf71 | [] | no_license | 841661831/perception | 539a476d649e9a4364ad01f3405a8db1c078640f | 31918e28fa3390390c4ea6208132d48164f95f73 | refs/heads/master | 2021-07-08T16:05:41.591419 | 2021-06-18T05:29:52 | 2021-06-18T05:29:52 | 246,080,065 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,024 | py | from Common.CommonDefine import *
from Common.CommonInterface import StructSysState, parse_sys_state, DataBase
class DataManager(object):
"""数据管理"""
def __init__(self, data_bus_server, log, list_channel, expire_time):
"""
:param data_bus_server: redis服务
:param log: 日志模块
:param list_channel: 订阅频道列表
:param expire_time: 数据过期时间
"""
self.b_running = True
self.__list_channel = list_channel
self.__log = log
self.__mod_name = log.name
self.__data_bus_server = data_bus_server
# 订阅数据频道
self.__list_sub = self.__data_bus_server.get_subscriber(self.__list_channel)
# 订阅线程列表
self.list_sub_th = []
# 订阅线程列表
self.dataBase = DataBase(strHost, expire_time)
# 订阅系统状态
self.__sys_state = StructSysState()
self.__sys_state_channel = 'sys_state'
self.__sub_sys_state = self.__data_bus_server.get_subscriber([self.__sys_state_channel])
self.__th_sub_state = threading.Thread(target=self.__th_sub_sys_state)
self.__th_sub_state.daemon = True
self.__th_sub_state.start()
self.__log.warning('DataManager Started!')
def __start_state_th(self):
"""启动状态服务功能"""
for i in range(len(self.__list_sub)):
th_tmp = threading.Thread(target=self.th_sub_data, args=(i,))
th_tmp.daemon = True
th_tmp.start()
self.list_sub_th.append(th_tmp)
def th_sub_data(self, index):
while self.__sys_state.b_mod_running:
list_msg = self.__list_sub[index].parse_response()
channel = str(list_msg[1], encoding="utf-8")
dict_data = json.loads(list_msg[2])
self.save2db(channel, dict_data)
def save2db(self, channel, dict_data):
"""
存数据库
:param channel: 频道
:param dict_data: 对应的数据
"""
date = datetime.datetime.now().strftime("%Y%m%d")
if channel == 'track_data':
n_frameId = dict_data['globalFrameNo']
byteParticipant = dict_data['e1FrameParticipant']
npParticipant = np.frombuffer(byteParticipant, dtype=np.float64).reshape((-1, 44))[:, 0:14]
for i in range(dict_data['participantNum']):
targetId = int(npParticipant[i, 9])
byte_info = npParticipant[i, :].tobytes()
self.dataBase.write(date, 'E1', n_frameId, byte_info, targetId=targetId)
elif channel == 'event_data':
b4_data = dict_data['b4Data']
if len(b4_data) != 0:
b4_eventId = b4_data['eventFrameId']
self.dataBase.write(date, 'B4', b4_eventId, json.dumps(b4_data))
b5_data = dict_data['b5Data']
if len(b5_data) != 0:
b5_eventList = b5_data['b5eventList']
for i in range(b5_data['eventNum']):
b5_eventId = b5_eventList[i]['eventId']
self.dataBase.write(date, 'B5', b5_eventId, json.dumps(b5_eventList[i]))
def __th_sub_sys_state(self): # 定义发布方法
"""系统状态订阅线程"""
while self.__sys_state.b_mod_running:
list_msg = self.__sub_sys_state[0].parse_response()
bytes_msg = list_msg[2]
list_state = parse_sys_state(bytes_msg)
self.__sys_state.n_sys_state = list_state[0]
if self.__sys_state.n_sys_state == 3:
self.__sys_state.b_mod_running = False
self.__sys_state.b_act_state = list_state[1]
self.__sys_state.b_sys_online = list_state[2]
self.__sys_state.b_update_param = list_state[3]
time.sleep(0.1)
self.__log.warning('state-server th_sub exit...')
def join(self):
self.__th_sub_state.join()
self.__log.warning('Data Manager exit...')
| [
"[email protected]"
] | |
c7f7c159d7df0921ad7844266ffeadf088accb21 | ccc83a52a4ba6afc0fe6ead2e8c9430312976036 | /src/models.py | 27cb35a5af8abf407360f19b9652c1a40cd0e238 | [] | no_license | akostyukov/gallery-fastapi | 2d3094585565a33d83082d3b001599f8c869dde3 | d31fc033e2f0e180a23241855cfb7cfd6e7d9413 | refs/heads/master | 2023-06-01T20:11:29.540648 | 2021-06-22T10:06:02 | 2021-06-22T10:06:02 | 354,082,418 | 0 | 0 | null | 2021-06-22T10:06:03 | 2021-04-02T17:05:10 | Python | UTF-8 | Python | false | false | 1,207 | py | from tortoise import Tortoise, fields
from tortoise.contrib.pydantic import pydantic_model_creator
from tortoise.models import Model
class User(Model):
email = fields.CharField(max_length=255, unique=True)
username = fields.CharField(max_length=100)
class Comment(Model):
author = fields.ForeignKeyField("models.User")
text = fields.CharField(max_length=500)
image = fields.ForeignKeyField("models.Image")
class Image(Model):
image = fields.CharField(max_length=500)
title = fields.CharField(max_length=100)
author = fields.ForeignKeyField("models.User")
likes = fields.ManyToManyField("models.User", related_name="likes")
Tortoise.init_models(["models"], "models")
User_Pydantic = pydantic_model_creator(User, name="User")
UserIn_Pydantic = pydantic_model_creator(User, name="UserIn", exclude_readonly=True)
Image_Pydantic = pydantic_model_creator(Image, name="Image")
ImageIn_Pydantic = pydantic_model_creator(Image, name="ImageIn", exclude_readonly=True)
Comment_Pydantic = pydantic_model_creator(Comment, name="Comment")
CommentIn_Pydantic = pydantic_model_creator(
Comment, name="CommentIn", exclude_readonly=True, exclude=["author_id", "image_id"]
)
| [
"[email protected]"
] | |
ffa298f53b6d7c4dee1a0c63e9d7f3a7de101945 | a741660302b90e8951ee1ef9a4cfce9e19b2070e | /Enhancement.py | 9bd3be442e3d61dc31ee2b49d6441aeaf19189b7 | [] | no_license | asal97/Computer-Vision-Assignments | cca082b1b52e103b8dbca80dbf0621ecf73cf78e | 23d74bca4992f40457d512c37af9e082bbc99d60 | refs/heads/master | 2021-04-08T16:40:52.283697 | 2020-04-19T17:41:47 | 2020-04-19T17:41:47 | 248,790,983 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,642 | py | import cv2
import numpy as np
#read input image
image = cv2.imread("Capture.png")
# define sharpening kernel
# In image processing, a kernel, convolution matrix, or mask is a small matrix
# It is used for blurring, sharpening, embossing, edge detection, and more.
# This is accomplished by doing a convolution between a kernel and an image.
sharpeningKernel = np.array(([0, -1, 0],[-1, 5, -1],[0, -1, 0]), dtype="int")
# filter2D is used to perform the convolution.
# The third parameter (depth) is set to -1 which means the bit-depth of the output image is the
# same as the input image. So if the input image is of type CV_8UC3, the output image will also be of the same type
output = cv2.filter2D(image, -1, sharpeningKernel)
# after sharpening the image we want to increase the contrast so we use
# our output as the input for our next process
#convert to YCrCb color space
imageYcb = cv2.cvtColor(output, cv2.COLOR_BGR2YCrCb)
# split into channels
Y, C, B = cv2.split(imageYcb)
# histogram equalization which enhances the image
# Y is the luma component and CB and CR are the blue-difference and red-difference
# luma represents the brightness in an image (the "black-and-white" or achromatic portion of the image)
# Luma Controls adjust the brightness of the image and can increase or decrease the contrast of an image
Y = cv2.equalizeHist(Y)
# merge the channels to change the picture back to normal
imageYcb = cv2.merge([Y, C, B])
#convert back to BGR color space
result = cv2.cvtColor(imageYcb, cv2.COLOR_YCrCb2BGR)
cv2.imshow("image", image)
cv2.imshow("result", result)
cv2.imwrite('result.png', result)
cv2.waitKey(0)
| [
"[email protected]"
] | |
1ba6dbfa34e1a2a81ebe61854152f8c935ac8fca | 19f9c3c5ab80bcb03a5afafa7797b1bf76ff638c | /Python_Programming/chapter1/gui4.py | 725cfca909c7f0bd267e6b5fc9fc45d60aade1f3 | [] | no_license | komakim/python_training | 9c8edba0fc73e0396838bfd7f293742f6d2e3d44 | 2e6429f2b36302363cf7e07d3cab73a62bddd832 | refs/heads/master | 2021-07-25T09:20:14.811067 | 2017-11-06T14:55:31 | 2017-11-06T14:55:31 | 109,708,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | from Tkinter import *
def greeting():
print 'Hello stdout world!'
win = Frame()
win.pack()
Label(win, text='Hello container world!').pack(side=TOP)
Button(win,text='hello',command=greeting).pack(side=LEFT)
Button(win,text='Quit',command=win.quit).pack(side=RIGHT)
win.mainloop()
| [
"[email protected]"
] | |
7466e2d7e4f98cd5ced460881b5a36bc4eb09515 | e7d38a7ae5ba729dc60faedc7fb3a05455557734 | /public.py | 880bfc5dc33579b416241a24b87115482fc2b4a5 | [] | no_license | jagatsastry/crossie | 974811db42b363cc31e2c65cc8ff5b77ecc374d6 | 7865dbc75431514147a4bcafa3572756c60abe2e | refs/heads/master | 2021-01-17T15:58:32.434190 | 2011-05-14T04:46:42 | 2011-05-14T04:46:42 | 1,746,806 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | from google.appengine.ext import webapp
from google.appengine.api import users
from google.appengine.ext.webapp.util import run_wsgi_app
import simplejson
class GetUserInfo(webapp.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'application/json'
user = users.get_current_user()
if user is None:
self.response.out.write(simplejson.dumps({'login': users.create_login_url("/")}))
else:
self.response.out.write(simplejson.dumps({'user': user.nickname()}))
application = webapp.WSGIApplication([('/public/v1/myinfo', GetUserInfo)])
if __name__ == "__main__":
run_wsgi_app(application)
| [
"[email protected]"
] | |
924874fa0493391185c2ec0708e615b41916834c | 1f5f8f95530003c6c66419519d78cb52d21f65c0 | /projects/golem_api/tests/golem_endpoint/get_golem_actions.py | 582257358bedfddeceba8c2dcedfd9930ec62a4d | [] | no_license | golemhq/golem-tests | c5d3ab04b1ea3755d8b812229feb60f513d039ac | dff8fd3a606c3d1ef8667aece6fddef8ac441230 | refs/heads/master | 2023-08-17T23:05:26.286718 | 2021-10-04T20:34:17 | 2021-10-04T20:34:17 | 105,579,436 | 4 | 1 | null | 2018-11-19T00:14:24 | 2017-10-02T20:05:55 | Python | UTF-8 | Python | false | false | 310 | py | from projects.golem_api.pages import golem_
def test(data):
response = golem_.get_golem_actions()
assert response.status_code == 200
assert response.headers['Cache-Control'] == 'max-age=604800, public'
action = [x for x in response.json() if x['name'] == 'click']
assert len(action) == 1
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.