id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
11350837
|
<gh_stars>0
# Title : Sort list by unique characters
# Author : <NAME>.
# Date : 16:10:2020
list1 = ['abc', 'ab', 'aaaaa', 'bababa', 1234, 1, 5,
'abcdeddd', 'aaaabbbbbcc', 'aaaaaabbbbbbb', 234, 567, 112211]
def sort_by_unique_char(list_in):
return len(set(str(list_in)))
list1.sort(key=sort_by_unique_char)
print(list1)
|
StarcoderdataPython
|
12830489
|
<reponame>DavidMinarsch/ledger-api-py<gh_stars>10-100
import io
from fetchai.ledger.serialisation.integer import encode, decode
from .common import SerialisationUnitTest
class IntegerSerialisationTests(SerialisationUnitTest):
def test_small_unsigned_encode(self):
buffer = io.BytesIO()
encode(buffer, 4)
self.assertIsEncoded(buffer, '04')
def test_small_signed_encode(self):
buffer = io.BytesIO()
encode(buffer, -4)
self.assertIsEncoded(buffer, 'E4')
def test_1byte_unsigned_encode(self):
buffer = io.BytesIO()
encode(buffer, 0x80)
self.assertIsEncoded(buffer, 'C080')
def test_2byte_unsigned_encode(self):
buffer = io.BytesIO()
encode(buffer, 0xEDEF)
self.assertIsEncoded(buffer, 'C1EDEF')
def test_4byte_unsigned_encode(self):
buffer = io.BytesIO()
encode(buffer, 0xEDEFABCD)
self.assertIsEncoded(buffer, 'C2EDEFABCD')
def test_8byte_unsigned_encode(self):
buffer = io.BytesIO()
encode(buffer, 0xEDEFABCD01234567)
self.assertIsEncoded(buffer, 'C3EDEFABCD01234567')
def test_1byte_signed_encode(self):
buffer = io.BytesIO()
encode(buffer, -0x80)
self.assertIsEncoded(buffer, 'D080')
def test_2byte_signed_encode(self):
buffer = io.BytesIO()
encode(buffer, -0xEDEF)
self.assertIsEncoded(buffer, 'D1EDEF')
def test_4byte_signed_encode(self):
buffer = io.BytesIO()
encode(buffer, -0xEDEFABCD)
self.assertIsEncoded(buffer, 'D2EDEFABCD')
def test_8byte_signed_encode(self):
buffer = io.BytesIO()
encode(buffer, -0xEDEFABCD01234567)
self.assertIsEncoded(buffer, 'D3EDEFABCD01234567')
# Decode counter parts
def test_small_unsigned_decode(self):
encoded = self._from_hex('04')
self.assertEqual(decode(encoded), 4)
def test_small_signed_decode(self):
encoded = self._from_hex('E4')
self.assertEqual(decode(encoded), -4)
def test_1byte_unsigned_decode(self):
encoded = self._from_hex('C080')
self.assertEqual(decode(encoded), 0x80)
def test_2byte_unsigned_decode(self):
encoded = self._from_hex('C1EDEF')
self.assertEqual(decode(encoded), 0xEDEF)
def test_4byte_unsigned_decode(self):
encoded = self._from_hex('C2EDEFABCD')
self.assertEqual(decode(encoded), 0xEDEFABCD)
def test_8byte_unsigned_decode(self):
encoded = self._from_hex('C3EDEFABCD01234567')
self.assertEqual(decode(encoded), 0xEDEFABCD01234567)
def test_1byte_signed_decode(self):
encoded = self._from_hex('D080')
self.assertEqual(decode(encoded), -0x80)
def test_2byte_signed_decode(self):
encoded = self._from_hex('D1EDEF')
self.assertEqual(decode(encoded), -0xEDEF)
def test_4byte_signed_decode(self):
encoded = self._from_hex('D2EDEFABCD')
self.assertEqual(decode(encoded), -0xEDEFABCD)
def test_8byte_signed_decode(self):
encoded = self._from_hex('D3EDEFABCD01234567')
self.assertEqual(decode(encoded), -0xEDEFABCD01234567)
# Error cases
def test_invalid_large_integer(self):
too_big = 1 << 64
buffer = io.BytesIO()
with self.assertRaises(RuntimeError):
encode(buffer, too_big)
|
StarcoderdataPython
|
5190325
|
<reponame>MaxTurchin/pycopy-lib
a = 1
# comment
b = 2
|
StarcoderdataPython
|
9747859
|
<filename>interfaces/interface_messages.py
from Utils import logs
import shutil
import traceback
import os
from services import config
import datetime,uuid
__DOCUMENT_TYPE = {
'document' : 'document',
'image' : 'image',
'video' : 'video',
'audio' : 'audio',
'ptt' : 'ptt',
'chat' : 'chat'
}
class IdMessage():
_idMessage = None
message = None
def __init__(self,message):
self.message = message
self._idMessage = dict({"id":None,"sendBy":None})
def get(self):
_id = self.message.id.split("_")
self._idMessage["id"] = _id[2]
self._idMessage["sendBy"] = "Agent" if self.message._js_obj['sender']['isMe'] else "Client"
return self._idMessage
class ContentMessage():
__DOCUMENT_TYPE = {
'document' : 'document',
'image' : 'image',
'video' : 'video',
'audio' : 'audio',
'ptt' : 'ptt',
'chat' : 'chat'
}
content = None
message = None
def __init__(self,message):
self.message = message
self.content = dict({
"content" : None,
"type" : "txt",
"caption" : "false"
})
def get(self):
if self.message.type not in self.__DOCUMENT_TYPE :
# MEDIA NOT SUPORTED #
self.content["content"] = 'Contenido no soportado'
elif self.message.type != "chat" and self.message.type in self.__DOCUMENT_TYPE :
# SAVE MEDIA #
self.content["content"] = str( self.message.save_media(config.pathFiles,True) ).replace(config.pathFiles,"")
print("1---->"+self.content["content"] )
newName = uuid.uuid1().hex + self.content["content"]
os.rename(config.pathFiles+self.content["content"],config.pathFiles+newName)
self.content["content"] = newName
print("2---->"+self.content["content"] )
else :
# GET TEXT #
self.content["content"] = self.message.content
if self.message.type in self.__DOCUMENT_TYPE and self.message.type != "chat" :
# GET TYPE AND CAPTION#
self.content["type"] = self.message.type
self.content["caption"] = self.message.caption
return self.content
####################### getFormat(message,driver) ###################
# Desc : Give format to message #
# Params : message objWapi driver obj #
# Return : obj {chat,sendBy,messsage,type,caption} #
# Last Update : 30-05-19 #
# By : g4w4 #
######################################################################
def getFormat(message,driver):
try:
_id = IdMessage(message).get()
chat = message._js_obj.get('chat').get('id').get('_serialized')
chat = '521{}'.format(str(chat)[-15:len(str(chat))])
contentMessage = ContentMessage(message).get()
return {
"chat": chat,
"sendBy": _id["sendBy"],
"message": contentMessage["content"],
"type": contentMessage["type"],
"caption": contentMessage["caption"],
"akc": 1,
"date": message.timestamp.strftime("%Y-%m-%d %H:%M"),
"id": _id["id"],
"app": "whatsApp"
}
except Exception :
logs.logError('Error getFormat --> ',traceback.format_exc())
def getFormatText(message,chatId):
try:
return {
"chat": chatId,
"sendBy": "Agent",
"message": message,
"type": "txt",
"caption": "false",
"akc": 1,
"date": datetime.datetime.now().strftime("%Y-%m-%d %H:%M"),
"id": uuid.uuid1().hex,
"app": "whatsApp"
}
except Exception :
logs.logError('Error getFormatText --> ',traceback.format_exc())
def getFormatFile(message,chatId,typeFile,caption):
try:
return {
"chat": chatId,
"sendBy": "Agent",
"message": message,
"type": typeFile,
"caption": caption,
"akc": 1,
"date": datetime.datetime.now().strftime("%Y-%m-%d %H:%M"),
"id": uuid.uuid1().hex,
"app": "whatsApp"
}
except Exception :
logs.logError('Error getFormatText --> ',traceback.format_exc())
def getLocation(message,diver):
_id = IdMessage(message).get()
chat = message._js_obj.get('chat').get('id').get('_serialized')
chat = '521{}'.format(str(chat)[-15:len(str(chat))])
return {
"chat": chat,
"sendBy": _id["sendBy"],
"message": "Ubicación",
"type": "location",
"caption": "false",
"lng": message._js_obj['lng'],
"lat": message._js_obj['lat'],
"akc": 1,
"date": message.timestamp.strftime("%Y-%m-%d %H:%M"),
"id": _id["id"],
"app": "whatsApp"
}
|
StarcoderdataPython
|
339662
|
import argparse
import sys
import tensorflow as tf
parser = argparse.ArgumentParser()
parser.add_argument('model', metavar='model', type=str, help='skipgram|cbow')
parser.add_argument('--version', metavar='version', type=str, help='mm.dd-hh:mm:ss')
args = parser.parse_args()
if args.model != 'skipgram' and args.model != 'cbow':
print('usage: python evaluate.py skipgram|cbow')
sys.exit(0)
if args.version:
model_path = 'models/{0}.{1}'.format(args.model, args.version)
else:
model_path = 'models/{0}'.format(args.model)
def read_dictionary():
with open('{0}/{1}.tsv'.format(model_path, args.model), 'r') as file:
words = file.read().split()
dictionary = {}
for (i, word) in enumerate(words):
dictionary[word] = i
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return dictionary, reversed_dictionary
dictionary, reversed_dictionary = read_dictionary()
def get_nearest(embeddings, word=None, embedding=None):
if word != None:
word_embedding = tf.nn.embedding_lookup(embeddings, [dictionary.get(word, 0)])
else:
word_embedding = embedding
similarity = tf.matmul(word_embedding, embeddings, transpose_b=True)
sim = similarity.eval()
nearest = (-sim).argsort()[0]
return nearest[1:11]
with tf.Session() as sess:
saver = tf.train.import_meta_graph('{0}/{1}.ckpt.meta'.format(model_path, args.model))
saver.restore(sess, '{0}/{1}.ckpt'.format(model_path, args.model))
embeddings = tf.get_variable_scope().global_variables()[0]
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keepdims=True))
normalized_embeddings = embeddings / norm
print('Write search queries (type q for quit):')
query = input('query = ')
while query != 'q':
query = query.lower()
if dictionary.get(query, -1) != -1:
nearest = get_nearest(normalized_embeddings, word=query)
nearest_words = [reversed_dictionary[id] for id in nearest]
print('Nearest to {0}: {1}'.format(query, ', '.join(nearest_words)))
else:
print('unknown word')
query = input('query = ')
|
StarcoderdataPython
|
6630530
|
# Generated by Django 2.0.13 on 2020-09-03 13:19
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0005_auto_20200903_2215'),
]
operations = [
migrations.RemoveField(
model_name='temp',
name='date',
),
]
|
StarcoderdataPython
|
3511039
|
<gh_stars>0
#imports
from tkinter import *
from tkinter import ttk
import threading
import xlrd #module to read excel file
from decimal import * #decimal module for precise floating point calculation
import time #time module(used here for the 2 second wait)
running = True #Thread terminator
#data list and sum list
entries = [None] * 30 #list to hold the data read from excel in str data type
sumEntries = [None]*8 #list to hold the summed values in float type
master = Tk()
master.title("Excel Reader")
master.configure(background="#565f6f")
#master.wm_iconbitmap("favicon.ico")
master.resizable(0,0)
startButton = Button(master, text="START",bg="green",fg="white",width=20,font=("Helvetica",10))
startButton.grid(column=1, row=1)
btSep= Label(master,bg="#737373",width=40).grid(column=2,row=1)
stopButton = Button(master, text="STOP",bg="red",fg="black",width=20,font=("Helvetica",10))
stopButton.grid(column=3, row=1)
#Entry widgets to hold 30 values read from excel sheet
dispData = [None]*30
for i in range(30):
dispData[i] = Entry(master,bd=1,relief=GROOVE,fg="#737373")
dispData[i].grid(column=1,row=i+2,pady=2)
dispData[i].configure(highlightcolor="#000000",highlightthickness=0.2)
#end of data widgets
#mid column widget
sumLabel = Label(master,fg="#8fa9b9",bg="#565f6f",text="Maximum is",font=("Helvetica",10)).grid(column=2,row=16)
#end of mid column widget
#sum widget
dispSum = Entry(master,fg="#737373")
dispSum.grid(column=3,row=16)
dispSum.configure(highlightcolor="#000000",highlightthickness=2)
#end of sum widget
def readData():
try:
excelFile = xlrd.open_workbook('File.xls') #change the name & path of the excel file as per your requirement
wsheet = excelFile.sheet_by_name('Sheet1') #change the sheet name if you need to.
if wsheet.cell(8, 0).ctype != 2: #checks if the cell content is a number (value 2 means the content is a number. the values for text, date and other data can be obtained from xlrd documentation)
return(0) #if not a number, return 0
else:
value = Decimal(wsheet.cell(8, 0).value) # if number, assign the value in the cell to variable named value
return('%30f'%value)# return the value with 6 decimal digit precision
except:
return("N+") # if the cell is empty return a unique string N+
def processThread():
i=0
res_val = 0
while sumEntries[7]== None: # outer loop
while (entries[29] == None):# inner loop
for j in range(30): # loop to assign 30 values read from excel to list
res_val = readData()
if((res_val !="N+")and running): #check if cell is not empty
entries[j] = readData() #assign values to list.
dispData[j].insert(1,(entries[j]))#print list values
time.sleep(2)#wait for 2 seconds
else:
break
if(res_val =="N+"):#break loops if cell is empty
break
if entries[29] != None: # the summing part of the loop
floatEntries = map(float,entries) # convert string type list to float
sumEntries[i] = sum(floatEntries) # call the inbuilt sum() if list[29] is not empty
strSum = '%30f'%sumEntries[i]
dispSum.delete(1,END)
dispSum.insert(1,str(strSum))#print the sum list
i=i+1 #increament index for sum list
entries[29] = None #release the content from list[29] and make it none
elif res_val == "N+": # break the outer loop if cell is empty
break
t1_stop= threading.Event() #thread to process data
t1 = threading.Thread(target=processThread)
def start():
global running
running = True
dispSum.delete(1, END)
for j in range(30):
dispData[j].delete(1,END)
try:
t1.start()
except:
pass
def stop():
global running
running = False
startButton.configure(command = start)
stopButton.config(command = stop)
master.mainloop()
|
StarcoderdataPython
|
5006365
|
#!/usr/bin/python3
import socket,select
import urllib.parse
Host = '' #symbolic name means all available interface
Port =8989
fds={}
user={}
def server(host,port):
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)#allow port reuse
s.bind((Host,Port))
s.listen(15)
fds[s]=s
print("http proxy is listening...")
while 1:
try:
infds,outfds,err=select.select(fds,[],[])
for sock in infds:
if sock==s:
conn,addr=s.accept()
handle_connection(conn)
else:
data=b''
while 1:
buf=sock.recv(8129)
data+=buf
if not len(buf):
sock.close()
break
user[sock].sendall(data)
user[sock].close()
#-----------------------------#
#clean fds
fds.pop(sock)
user.pop(user[sock])
user.pop(sock)
#------------------------------#
except KeyboardInterrupt:
print("bye...")
break
pass
def getline(conn):#if \r\n,return
line=''
while 1:
#print("read from client")
buf=conn.recv(1).decode("utf-8")
#print(buf)
if buf=='\r':
line += buf
#print("come interface")
buf=conn.recv(1).decode("utf-8")
#print("endl")
if buf=='\n':
#print("huiche")
line += buf
return line
pass
pass
else:
line += buf
pass
def get_headers(conn):
headers=''
while 1:
line=getline(conn)
#print(line)
if line is None:
break
if line =="\r\n":
break
else:
headers+=line
pass
return headers
pass
def parse_headers(raw_headers):
lines=raw_headers.split("\r\n")
request_line=lines[0].split(' ')
method=request_line[0]
full_path=request_line[1]#broswer generate if use proxy,different normal modle
version=request_line[2]
print("%s %s"%(method,full_path))
(scm,netloc,path,params,query,fragment)=urllib.parse.urlparse(full_path,"http")
i=netloc.split(':')
if len(i)==2:
address=i[0],int(i[1])
else:
address=i[0],80
return method,version,scm,address,path,params,query,fragment
pass
def handle_connection(conn):
req_headers=get_headers(conn)
if req_headers is None:
return
method,version,scm,address,path,params,query,fragment=parse_headers(req_headers)
path=urllib.parse.urlunparse(["","",path,params,query,""])
req_headers=' '.join([method,path,version])+"\r\n"+"\r\n".join(req_headers.split("\r\n")[1:])
#create socket
soc=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
print("connect",address)
soc.connect(address)
if req_headers.find("Connection")>=0:
req_headers = req_headers.replace("keep-alive","close")
else:
req_headers+=req_headers+"Connection:close\r\n"
req_headers+="\r\n"
#send request to real server!
soc.sendall(req_headers.encode("utf-8"))
#----------------------#
fds[soc]=soc
user[conn]=soc
user[soc]=conn
#---------------------#
if __name__ == '__main__':
server(Host,Port)
|
StarcoderdataPython
|
3359758
|
import numpy as np
class KMedoids:
def __init__(self, n_clusters, max_iter=100):
self.n_clusters = n_clusters
self.max_iter = max_iter
self.idx_next_centroid = []
self.idx_centroid = []
self.centroid = []
self.labels_ = []
self.cost = 0
self.next_cost = 0
def init_centroid(self,data,n_clusters):
idx = np.sort(np.random.choice(len(data), n_clusters, replace=False))
return idx
def manhattan_dst(self,x,y):
distance = 0
for i in range(len(x)):
distance += abs(x[i]-y[i])
return distance
def get_cluster(self, n_cluster, instances, centroid, data):
distance = list(self.manhattan_dst(instances,data[centroid[i]]) for i in range(n_cluster))
return distance.index(min(distance))
def find_cluster_member(self,label,cluster):
indices = list(i for i, x in enumerate(label) if x == cluster)
return indices
def new_medoids(self,label,curr_medoid,n_clusters):
new_medoid = curr_medoid
change_med = np.random.choice(n_clusters,1,replace=False)
ran_med = np.random.choice(self.find_cluster_member(label,change_med),1,replace=False)
while (new_medoid[change_med[0]] == ran_med[0]):
ran_med = np.random.choice(self.find_cluster_member(label,change_med),1,replace=False)
new_medoid[change_med[0]] = ran_med[0]
return new_medoid
def count_cost(self,label,data,centroid):
cost = 0
for i in range(len(data)):
cost += self.manhattan_dst(data[i],data[centroid[label[i]]])
return cost
def fit(self,data):
self.idx_centroid = self.init_centroid(data,self.n_clusters)
self.labels_ = list(self.get_cluster(self.n_clusters,data[i],self.idx_centroid,data) for i in range(len(data)))
self.cost = self.count_cost(self.labels_,data,self.idx_centroid)
self.next_cost = self.cost
convergance = False
iteration = 0
while not convergance:
if (self.cost != self.next_cost):
self.cost = self.next_cost
self.idx_centroid = self.idx_next_centroid
self.idx_next_centroid = self.new_medoids(self.labels_,self.idx_centroid,self.n_clusters)
self.labels_ = list(self.get_cluster(self.n_clusters,data[i],self.idx_next_centroid,data) for i in range(len(data)))
self.next_cost = self.count_cost(self.labels_,data,self.idx_next_centroid)
iteration += 1
convergance = (self.cost <= self.next_cost or iteration >= self.max_iter)
self.labels_ = list(self.get_cluster(self.n_clusters,data[i],self.idx_next_centroid,data) for i in range(len(data)))
self.centroid = list(data[self.idx_next_centroid[i]] for i in range(self.n_clusters))
def predict(self,instances):
distance = list(self.manhattan_dst(instances,self.centroid[i]) for i in range(self.n_clusters))
return distance.index(min(distance))
|
StarcoderdataPython
|
1604390
|
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def gif_repository():
maybe(
http_archive,
name = "gif",
urls = ["https://downloads.sourceforge.net/project/giflib/giflib-5.2.1.tar.gz"],
strip_prefix = "giflib-5.2.1/",
build_file = "@third_party//gif:package.BUILD",
sha256 = "31da5562f44c5f15d63340a09a4fd62b48c45620cd302f77a6d9acf0077879bd",
)
|
StarcoderdataPython
|
5154335
|
<filename>emsapi/models/adi_ems_web_api_v2_dto_navigation_navigation_navaid_py3.py<gh_stars>0
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AdiEmsWebApiV2DtoNavigationNavigationNavaid(Model):
"""Various pieces of information associated with a waypoint.
:param id: The unique identifier of the navaid.
:type id: int
:param callsign: The radio callsign of the navaid.
:type callsign: str
:param type: The navaid type.
:type type: str
:param country_code: The navaid's country code.
:type country_code: str
:param state_code: The navaid's state code.
:type state_code: int
:param name: The official name of the navaid.
:type name: str
:param frequency: The radio frequency of the navaid.
:type frequency: float
:param usage_code: The airspace structure in which the navaid is utilized
(e.g. high, low, terminal, etc.)
:type usage_code: str
:param channel: The navaid's radio channel.
:type channel: str
:param radio_class_code: The radio class code of the navaid (e.g.
low-power NDB, high-power NDB, etc)
:type radio_class_code: str
:param range: The effective range of the navaid in nautical miles.
:type range: float
:param latitude: The latitude of the navaid.
:type latitude: float
:param longitude: The longitude of the navaid.
:type longitude: float
:param elevation: The navaid's elevation.
:type elevation: float
:param magnetic_variance: The magnetic varation from true north at the
navaid.
:type magnetic_variance: float
:param dme_latitude: The latitude of the DME equipment colocated with the
navaid, if any.
:type dme_latitude: float
:param dme_longitude: The longitude of the DME equipment colocated with
the navaid, if any.
:type dme_longitude: float
:param dme_elevation: The elevation of the DME equipment colocated with
the navaid, if any.
:type dme_elevation: float
:param associated_airport: The airport code of the associated airport, if
any.
:type associated_airport: str
:param status: The status of the navaid (e.g. in service, out of service,
etc.)
:type status: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'int'},
'callsign': {'key': 'callsign', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'country_code': {'key': 'countryCode', 'type': 'str'},
'state_code': {'key': 'stateCode', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'},
'frequency': {'key': 'frequency', 'type': 'float'},
'usage_code': {'key': 'usageCode', 'type': 'str'},
'channel': {'key': 'channel', 'type': 'str'},
'radio_class_code': {'key': 'radioClassCode', 'type': 'str'},
'range': {'key': 'range', 'type': 'float'},
'latitude': {'key': 'latitude', 'type': 'float'},
'longitude': {'key': 'longitude', 'type': 'float'},
'elevation': {'key': 'elevation', 'type': 'float'},
'magnetic_variance': {'key': 'magneticVariance', 'type': 'float'},
'dme_latitude': {'key': 'dmeLatitude', 'type': 'float'},
'dme_longitude': {'key': 'dmeLongitude', 'type': 'float'},
'dme_elevation': {'key': 'dmeElevation', 'type': 'float'},
'associated_airport': {'key': 'associatedAirport', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(self, *, id: int=None, callsign: str=None, type: str=None, country_code: str=None, state_code: int=None, name: str=None, frequency: float=None, usage_code: str=None, channel: str=None, radio_class_code: str=None, range: float=None, latitude: float=None, longitude: float=None, elevation: float=None, magnetic_variance: float=None, dme_latitude: float=None, dme_longitude: float=None, dme_elevation: float=None, associated_airport: str=None, status: str=None, **kwargs) -> None:
super(AdiEmsWebApiV2DtoNavigationNavigationNavaid, self).__init__(**kwargs)
self.id = id
self.callsign = callsign
self.type = type
self.country_code = country_code
self.state_code = state_code
self.name = name
self.frequency = frequency
self.usage_code = usage_code
self.channel = channel
self.radio_class_code = radio_class_code
self.range = range
self.latitude = latitude
self.longitude = longitude
self.elevation = elevation
self.magnetic_variance = magnetic_variance
self.dme_latitude = dme_latitude
self.dme_longitude = dme_longitude
self.dme_elevation = dme_elevation
self.associated_airport = associated_airport
self.status = status
|
StarcoderdataPython
|
11212018
|
import tensorflow as tf
from tensorflow.python.ops import tensor_array_ops, control_flow_ops
from avatar.relgan.utils.ops import *
def generator(x_real, temperature, vocab_size, batch_size, seq_len, gen_emb_dim, mem_slots, head_size, num_heads,
hidden_dim, start_token):
start_tokens = tf.constant([start_token] * batch_size, dtype=tf.int32)
# build LSTM unit
g_embeddings = tf.get_variable('g_emb', shape=[vocab_size, gen_emb_dim],
initializer=create_linear_initializer(vocab_size))
gen_mem = create_recurrent_unit(emb_dim=gen_emb_dim, hidden_dim=hidden_dim)
g_output_unit = create_lstm_output_unit(hidden_dim, vocab_size)
# Initial states
h0 = tf.zeros([batch_size, hidden_dim])
init_states = tf.stack([h0, h0])
# ---------- generate tokens and approximated one-hot results (Adversarial) ---------
gen_o = tensor_array_ops.TensorArray(dtype=tf.float32, size=seq_len, dynamic_size=False, infer_shape=True)
gen_x = tensor_array_ops.TensorArray(dtype=tf.int32, size=seq_len, dynamic_size=False, infer_shape=True)
gen_x_onehot_adv = tensor_array_ops.TensorArray(dtype=tf.float32, size=seq_len, dynamic_size=False,
infer_shape=True) # generator output (relaxed of gen_x)
# the generator recurrent module used for adversarial training
def _gen_recurrence(i, x_t, h_tm1, gen_o, gen_x, gen_x_onehot_adv):
h_t = gen_mem(x_t, h_tm1) # hidden_memory_tuple
o_t = g_output_unit(h_t) # batch x vocab, logits not probs
gumbel_t = add_gumbel(o_t)
next_token = tf.stop_gradient(tf.argmax(gumbel_t, axis=1, output_type=tf.int32))
next_token_onehot = tf.one_hot(next_token, vocab_size, 1.0, 0.0)
x_onehot_appr = tf.nn.softmax(tf.multiply(gumbel_t, temperature)) # one-hot-like, [batch_size x vocab_size]
# x_tp1 = tf.matmul(x_onehot_appr, g_embeddings) # approximated embeddings, [batch_size x emb_dim]
x_tp1 = tf.nn.embedding_lookup(g_embeddings, next_token) # embeddings, [batch_size x emb_dim]
gen_o = gen_o.write(i, tf.reduce_sum(tf.multiply(next_token_onehot, x_onehot_appr), 1)) # [batch_size], prob
gen_x = gen_x.write(i, next_token) # indices, [batch_size]
gen_x_onehot_adv = gen_x_onehot_adv.write(i, x_onehot_appr)
return i + 1, x_tp1, h_t, gen_o, gen_x, gen_x_onehot_adv
# build a graph for outputting sequential tokens
_, _, _, gen_o, gen_x, gen_x_onehot_adv = control_flow_ops.while_loop(
cond=lambda i, _1, _2, _3, _4, _5: i < seq_len,
body=_gen_recurrence,
loop_vars=(tf.constant(0, dtype=tf.int32), tf.nn.embedding_lookup(g_embeddings, start_tokens),
init_states, gen_o, gen_x, gen_x_onehot_adv))
gen_o = tf.transpose(gen_o.stack(), perm=[1, 0]) # batch_size x seq_len
gen_x = tf.transpose(gen_x.stack(), perm=[1, 0]) # batch_size x seq_len
gen_x_onehot_adv = tf.transpose(gen_x_onehot_adv.stack(), perm=[1, 0, 2]) # batch_size x seq_len x vocab_size
# ----------- pre-training for generator -----------------
x_emb = tf.transpose(tf.nn.embedding_lookup(g_embeddings, x_real), perm=[1, 0, 2]) # seq_len x batch_size x emb_dim
g_predictions = tensor_array_ops.TensorArray(dtype=tf.float32, size=seq_len, dynamic_size=False, infer_shape=True)
ta_emb_x = tensor_array_ops.TensorArray(dtype=tf.float32, size=seq_len)
ta_emb_x = ta_emb_x.unstack(x_emb)
# the generator recurrent moddule used for pre-training
def _pretrain_recurrence(i, x_t, h_tm1, g_predictions):
h_t = gen_mem(x_t, h_tm1)
o_t = g_output_unit(h_t)
g_predictions = g_predictions.write(i, tf.nn.softmax(o_t)) # batch_size x vocab_size
x_tp1 = ta_emb_x.read(i)
return i + 1, x_tp1, h_t, g_predictions
# build a graph for outputting sequential tokens
_, _, _, g_predictions = control_flow_ops.while_loop(
cond=lambda i, _1, _2, _3: i < seq_len,
body=_pretrain_recurrence,
loop_vars=(tf.constant(0, dtype=tf.int32), tf.nn.embedding_lookup(g_embeddings, start_tokens),
init_states, g_predictions))
g_predictions = tf.transpose(g_predictions.stack(),
perm=[1, 0, 2]) # batch_size x seq_length x vocab_size
# pre-training loss
pretrain_loss = -tf.reduce_sum(
tf.one_hot(tf.to_int32(tf.reshape(x_real, [-1])), vocab_size, 1.0, 0.0) * tf.log(
tf.clip_by_value(tf.reshape(g_predictions, [-1, vocab_size]), 1e-20, 1.0)
)
) / (seq_len * batch_size)
return gen_x_onehot_adv, gen_x, pretrain_loss, gen_o
def discriminator(x_onehot, batch_size, seq_len, vocab_size, dis_emb_dim, num_rep, sn):
emb_dim_single = int(dis_emb_dim / num_rep)
assert isinstance(emb_dim_single, int) and emb_dim_single > 0
filter_sizes = [2, 3, 4, 5]
num_filters = [300, 300, 300, 300]
dropout_keep_prob = 0.75
d_embeddings = tf.get_variable('d_emb', shape=[vocab_size, dis_emb_dim],
initializer=create_linear_initializer(vocab_size))
input_x_re = tf.reshape(x_onehot, [-1, vocab_size])
emb_x_re = tf.matmul(input_x_re, d_embeddings)
emb_x = tf.reshape(emb_x_re, [batch_size, seq_len, dis_emb_dim]) # batch_size x seq_len x dis_emb_dim
emb_x_expanded = tf.expand_dims(emb_x, -1) # batch_size x seq_len x dis_emb_dim x 1
print('shape of emb_x_expanded: {}'.format(emb_x_expanded.get_shape().as_list()))
# Create a convolution + maxpool layer for each filter size
pooled_outputs = []
for filter_size, num_filter in zip(filter_sizes, num_filters):
conv = conv2d(emb_x_expanded, num_filter, k_h=filter_size, k_w=emb_dim_single,
d_h=1, d_w=emb_dim_single, sn=sn, stddev=None, padding='VALID',
scope="conv-%s" % filter_size) # batch_size x (seq_len-k_h+1) x num_rep x num_filter
out = tf.nn.relu(conv, name="relu")
pooled = tf.nn.max_pool(out, ksize=[1, seq_len - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1], padding='VALID',
name="pool") # batch_size x 1 x num_rep x num_filter
pooled_outputs.append(pooled)
# Combine all the pooled features
num_filters_total = sum(num_filters)
h_pool = tf.concat(pooled_outputs, 3) # batch_size x 1 x num_rep x num_filters_total
print('shape of h_pool: {}'.format(h_pool.get_shape().as_list()))
h_pool_flat = tf.reshape(h_pool, [-1, num_filters_total])
# Add highway
h_highway = highway(h_pool_flat, h_pool_flat.get_shape()[1], 1, 0) # (batch_size*num_rep) x num_filters_total
# Add dropout
h_drop = tf.nn.dropout(h_highway, dropout_keep_prob, name='dropout')
# fc
fc_out = linear(h_drop, output_size=100, use_bias=True, sn=sn, scope='fc')
logits = linear(fc_out, output_size=1, use_bias=True, sn=sn, scope='logits')
logits = tf.squeeze(logits, -1) # batch_size*num_rep
return logits
def create_recurrent_unit(emb_dim, hidden_dim):
# Weights and Bias for input and hidden tensor
Wi = tf.get_variable('Wi', shape=[emb_dim, hidden_dim], initializer=create_linear_initializer(emb_dim))
Ui = tf.get_variable('Ui', shape=[hidden_dim, hidden_dim], initializer=create_linear_initializer(hidden_dim))
bi = tf.get_variable('bi', shape=[hidden_dim], initializer=create_bias_initializer())
Wf = tf.get_variable('Wf', shape=[emb_dim, hidden_dim], initializer=create_linear_initializer(emb_dim))
Uf = tf.get_variable('Uf', shape=[hidden_dim, hidden_dim], initializer=create_linear_initializer(hidden_dim))
bf = tf.get_variable('bf', shape=[hidden_dim], initializer=create_bias_initializer())
Wog = tf.get_variable('Wog', shape=[emb_dim, hidden_dim], initializer=create_linear_initializer(emb_dim))
Uog = tf.get_variable('Uog', shape=[hidden_dim, hidden_dim], initializer=create_linear_initializer(hidden_dim))
bog = tf.get_variable('bog', shape=[hidden_dim], initializer=create_bias_initializer())
Wc = tf.get_variable('Wc', shape=[emb_dim, hidden_dim], initializer=create_linear_initializer(emb_dim))
Uc = tf.get_variable('Uc', shape=[hidden_dim, hidden_dim], initializer=create_linear_initializer(hidden_dim))
bc = tf.get_variable('bc', shape=[hidden_dim], initializer=create_bias_initializer())
def unit(x, hidden_memory_tm1):
previous_hidden_state, c_prev = tf.unstack(hidden_memory_tm1)
# Input Gate
i = tf.sigmoid(
tf.matmul(x, Wi) +
tf.matmul(previous_hidden_state, Ui) + bi
)
# Forget Gate
f = tf.sigmoid(
tf.matmul(x, Wf) +
tf.matmul(previous_hidden_state, Uf) + bf
)
# Output Gate
o = tf.sigmoid(
tf.matmul(x, Wog) +
tf.matmul(previous_hidden_state, Uog) + bog
)
# New Memory Cell
c_ = tf.nn.tanh(
tf.matmul(x, Wc) +
tf.matmul(previous_hidden_state, Uc) + bc
)
# Final Memory cell
c = f * c_prev + i * c_
# Current Hidden state
current_hidden_state = o * tf.nn.tanh(c)
return tf.stack([current_hidden_state, c])
return unit
def create_lstm_output_unit(hidden_dim, vocab_size):
Wo = tf.get_variable('Wo', shape=[hidden_dim, vocab_size], initializer=create_linear_initializer(hidden_dim))
bo = tf.get_variable('bo', shape=[vocab_size], initializer=create_bias_initializer())
def unit(hidden_memory_tuple):
hidden_state, c_prev = tf.unstack(hidden_memory_tuple)
logits = tf.matmul(hidden_state, Wo) + bo
return logits
return unit
|
StarcoderdataPython
|
1730000
|
<gh_stars>0
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name: test_myTLWE.py
# Purpose:
#
# Author: <NAME>
#
# Created: 2022 Mar. 24
# Copyright: (c) sakamoto 2022
# Licence: <your licence>
#-------------------------------------------------------------------------------
import unittest
from myTLWE import Torus
from myTLWE import TLWE
LOOP = 100000
N = 32
S = 2**-15
Q = 6
P = 11
class TestTLWE(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
TLWE.init(N, S, P)
def test_encryption(self):
for i in range(LOOP):
### Gen test vector
sk = TLWE.keyGen()
mu = TLWE.rand_plaintext()
### Enc and Dec
c = TLWE.enc(mu, sk)
res = TLWE.dec(c, sk)
self.assertEqual(mu, res)
print(f"PASS: {LOOP} test_encryption")
def test_HomAddition(self):
for i in range(LOOP):
### Gen test vector
sk = TLWE.keyGen()
mu1 = TLWE.rand_plaintext()
mu2 = TLWE.rand_plaintext()
### Enc and Dec
c1 = TLWE.enc(mu1, sk)
c2 = TLWE.enc(mu2, sk)
c3 = c1 + c2
res = TLWE.dec(c3, sk)
self.assertEqual(mu1 + mu2, res)
print(f"PASS: {LOOP} test_HomAddition")
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
3525928
|
<filename>sample/sample.py
# Once upon a time...
class Vampire:
def __init__(self, props):
self.location = props['location']
self.birthDate = props['birthDate']
self.deathDate = props['deathDate']
self.weaknesses = props['weaknesses']
def get_age(self):
return self.calc_age()
def calc_age(self):
return self.deathDate - self.birthDate
# ...there was a guy named Vlad
Dracula = Vampire({
'location': 'Transylvania',
'birthDate': 1428,
'deathDate': 1476,
'weaknesses': ['Sunlight', 'Garlic']
})
|
StarcoderdataPython
|
3337133
|
<reponame>sgaoshang/seeker<filename>app/component/routes.py
from flask import render_template, flash, redirect, url_for, request, current_app, jsonify, session
from flask_login import current_user, login_required
from flask_babel import _, get_locale
from app import db
from app.models import Component
from app.component.forms import NewComponentForm
from app.component import bp
@bp.route('/new_component', methods=['GET', 'POST'])
@login_required
def new_component():
form = NewComponentForm()
if form.validate_on_submit():
component = form.component.data
if Component.query.filter_by(component=component).first():
flash(_('Component %(component)s already exist...', component=component))
else:
db_component = Component(component=component, search_date=form.search_date.data)
db.session.add(db_component)
current_user.last_component = component
db.session.commit()
session['component'] = component
session['components'].append(component)
# if session.get('new_case_id_list'):
if 'new_case_id_list' in session:
session.pop('new_case_id_list')
flash(_('Congratulations, new component has been added!'))
return redirect(url_for('index'))
return render_template('component/new_component.html', title=_('New Component'),
form=form)
|
StarcoderdataPython
|
5162067
|
<filename>PoseEstimation/Script/Main/body_part_classification.py<gh_stars>0
# -*- coding: utf-8 -*-
import time, cv2, os
import numpy as np
import multiprocessing as mp
from scipy import stats
import pandas as pd
from sklearn.externals import joblib
from sklearn.ensemble import RandomForestClassifier
from Modules.data_preparation import prepare_train_data, prepare_test_data, prepare_offsets
from Modules.utils import get_parameter, get_args, figure_disappears, bvh_exists, enum_train_files, enum_test_files
__all__ = ["BodyPartClassification"]
class BodyPartClassification:
def __init__(self, n_train_images=2000, n_target_pixels_per_image=2000, n_offsets=500, n_sep=1):
self.n_target_pixels_per_image = n_target_pixels_per_image
self.n_offsets = n_offsets
self.train_setting_str = "_" + str(n_train_images)
self.test_setting_str = "_" + str(n_train_images)
self.n_sep = n_sep
self.compression_type = "gzip"
self.offsets = None
self.rf = []
self.part_labels = np.array([(63,0,0), (0,63,0), (255,0,0), (127,0,63), (127,255,0), (191,255,191), (255,255,191), (127,255,127), (191,191,191), (63,127,0),
(0,191,63), (255,255,0), (255,191,0), (0,255,255), (0,191,255), (127,63,0), (0,63,127), (255,63,255), (63,255,255), (255,63,0),
(0,63,255), (127,63,255), (127,63,63), (63,127,255), (255,63,63), (63,0,63), (63,0,127), (255,127,127), (63,255,63), (191,127,63),
(63,63,0), (255,255,255), (0,0,0)])
def train(self, train_filenames):
n_train_images = train_filenames.shape[0]
bpc_path = "/".join(train_filenames[0].split("/")[:-3]) + "/"
intermediate_path = bpc_path + "Intermediate/"
evaluation_path = bpc_path + "Evaluation/"
offset_path = intermediate_path + "offsets.csv"
pkl_path = intermediate_path + "pkl/RF" + self.train_setting_str + "_not_balanced.gz"
fitting_time_path = "%strain_time_%d" % (evaluation_path, n_train_images)
self.offsets = prepare_offsets(offset_path, self.n_offsets)
if os.path.exists(pkl_path):
print("Loading Random Forest...")
self.rf = joblib.load(pkl_path)
#self.rf = None
else:
fitting_time = 0
self.rf = []
# n_sep > 1の時は学習データ分割によるメモリ消費量削減
stride = int(n_train_images / self.n_sep)
n_rem_estimators = 10
n_rem_sep = self.n_sep
n_jobs = int(mp.cpu_count() / 2)
for i in range(0, n_train_images, stride):
features, labels, sample_weight = \
prepare_train_data(train_filenames[i: min(i+stride, n_train_images)],
self.offsets, self.n_target_pixels_per_image, self.compression_type)
print("Training Random Forest...")
n_estimators = int(n_rem_estimators / n_rem_sep)
n_rem_estimators -= n_estimators
n_rem_sep -= 1
rf = RandomForestClassifier(n_estimators=n_estimators, random_state=1, max_depth=17,
class_weight=None, criterion="entropy", n_jobs=n_jobs)
#rf = RandomForestClassifier(n_estimators=n_estimators, random_state=1, max_depth=17,
# class_weight="balanced", criterion="entropy", n_jobs=mp.cpu_count())
fit_start = time.time()
rf.fit(features, np.ravel(labels), sample_weight)
fit_end = time.time()
fitting_time += fit_end - fit_start
print("Took %fsec for fitting random forest." % (fit_end - fit_start))
del features, labels, sample_weight
self.rf.append(rf)
print("Saving Random Forest...")
tmp = time.time()
joblib.dump(self.rf, pkl_path, compress=3)
print("Took %fsec for saving random forest." % (time.time() - tmp))
pd.DataFrame([fitting_time]).to_csv(fitting_time_path, header=False, index=False, mode='a')
def predict(self, test_filename, save=True):
bpc_path = "/".join(test_filename.split("/")[:-3]) + "/"
intermediate_path = bpc_path + "Intermediate/"
out_path = bpc_path + "Output/"
n_part_labels = self.part_labels.shape[0] - 1
test_filename_id = "/".join(test_filename.split("/")[-2:])
test_feature_path = intermediate_path + test_filename_id + "_features.gz"
target_pixels_path = intermediate_path + test_filename_id + "_target_pixels.gz"
test_BPC_image_path = out_path + test_filename_id + self.test_setting_str + "_nb_BPC.png"
test_BPC_proba_path = out_path + test_filename_id + self.test_setting_str + "_nb_BPC_proba.gz"
if os.path.exists(test_BPC_proba_path) and os.path.exists(test_BPC_image_path):
return None, None, None
features, image_shape, target_pixels = prepare_test_data(test_filename, test_feature_path, target_pixels_path,
self.offsets, self.compression_type)
height, width = image_shape
test_predict = np.ones((height, width, self.n_sep), dtype=np.uint8) * 31
test_predict_proba = np.zeros((height, width, n_part_labels))
test_predict_proba[:, :, 31] = 1
test_predict_proba[target_pixels[:, 0], target_pixels[:, 1], 31] = 0
# n_sep > 1の時はメモリ消費量削減のための分割処理
print("Predicting test data label...")
tmp = time.time()
for s, rf in enumerate(self.rf):
tmp_predicts = rf.predict(features)
tmp_predict_probas = rf.predict_proba(features)
for i, target_pixel in enumerate(target_pixels):
test_predict[target_pixel[0], target_pixel[1], s] = tmp_predicts[i]
test_predict_proba[target_pixel[0], target_pixel[1], :] += tmp_predict_probas[i, :]
print("Took %fsec for predict." % (time.time() - tmp))
test_predict_proba /= self.n_sep
# 分類結果の描画
predict_px = np.ones((image_shape[0], image_shape[1], 3), dtype=np.uint8) * 255
for v, h in target_pixels:
predict_px[v, h, :] = self.part_labels[int(stats.mode(test_predict[v, h, :])[0])]
if save:
cv2.imwrite(test_BPC_image_path, predict_px[:, :, ::-1])
# 分類結果の確率分布をデータで保存
test_predict_proba = test_predict_proba.reshape((height * width, n_part_labels))
if save:
pd.DataFrame(test_predict_proba).to_csv(test_BPC_proba_path, compression=self.compression_type, header=False, index=False)
return predict_px, test_predict_proba, target_pixels
def video_predict(self, test_filename):
bpc_path = "/".join(test_filename.split("/")[:-3]) + "/"
intermediate_path = bpc_path + "Intermediate/"
out_path = bpc_path + "Output/"
n_part_labels = self.part_labels.shape[0] - 1
test_filename_id = "/".join(test_filename.split("/")[-2:])
print(test_filename_id)
test_feature_path = intermediate_path + test_filename_id + "_features.gz"
target_pixels_path = intermediate_path + test_filename_id + "_target_pixels.gz"
test_BPC_video_path = out_path + test_filename_id + self.test_setting_str + "_BPC.mov"
test_BPC_proba_path = out_path + test_filename_id + self.test_setting_str + "_BPC_proba.gz"
features, video_shape, target_pixels = prepare_test_data(test_filename, test_feature_path, target_pixels_path,
self.offsets, self.compression_type)
n_frames, height, width = video_shape
test_predict = np.ones((n_frames, height, width, self.n_sep), dtype=np.uint8) * 31
test_predict_proba = np.zeros((n_frames, height, width, n_part_labels))
test_predict_proba[:, :, :, 31] = 1
for f, v, h in target_pixels:
test_predict_proba[f, v, h, 31] = 0
# n_sep > 1の時はメモリ消費量削減のための分割処理
for s in range(self.n_sep):
rf = self.rf[s]
print("Predicting test data label...")
rf.n_jobs = 1
tmp_predicts = rf.predict(features)
tmp_predict_probas = rf.predict_proba(features)
for i, target_pixel in enumerate(target_pixels):
f, v, h = target_pixel
test_predict[f, v, h, s] = tmp_predicts[i]
test_predict_proba[f, v, h, :] += tmp_predict_probas[i, :]
test_predict_proba /= self.n_sep
# 分類結果の描画
predict_px = np.ones((n_frames, height, width, 3), dtype=np.uint8) * 255
tmp = -1
for f, v, h in target_pixels:
if tmp < f:
tmp = f
print("frame%d" % f)
predict_px[f, v, h, :] = self.part_labels[int(stats.mode(test_predict[f, v, h, :])[0])]
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
predict_out = cv2.VideoWriter(test_BPC_video_path, fourcc, 30.0, (width, height))
for frame_px in predict_px[:, :, :, ::-1]:
predict_out.write(frame_px)
# 分類結果の確率分布をデータで保存
test_predict_proba = test_predict_proba.reshape((n_frames * height * width, n_part_labels))
pd.DataFrame(test_predict_proba).to_csv(test_BPC_proba_path, compression=self.compression_type, header=False, index=False)
return predict_px, test_predict_proba, target_pixels
def run_bpc(bpc_model=BodyPartClassification):
args = get_args()
bpc_args = {"n_sep": args.n_sep, "n_train_images": args.n_train_images, }
n_train_images = args.n_train_images
n_test_images = args.n_test_images
full_rotation = args.full_rotation
if bpc_model is not BodyPartClassification:
bpc_args["discr_setting_type"] = args.discr_setting_type
data_path = args.data_path
train_filenames = enum_train_files(data_path, n_train_images, bpc_model, full_rotation)
if bpc_model is not None:
print("====%s====" % bpc_model.__name__)
bpc = bpc_model(**bpc_args)
else:
raise ValueError
bpc.train(train_filenames)
test_filenames = enum_test_files(data_path, args.test_path, n_test_images)
if "CapturedVideos" in args.test_path:
for i, test_filename in enumerate(test_filenames):
test_filename_id = "/".join(test_filename.split("/")[-2:])
print("%d: %s" % (i, test_filename_id))
_, _, _ = bpc.video_predict(test_filename)
elif "CapturedImages" in args.test_path or "SyntheticImages" in args.test_path:
for i, test_filename in enumerate(test_filenames):
test_filename_id = "/".join(test_filename.split("/")[-2:])
print("%d: %s" % (i, test_filename_id))
_, _, _ = bpc.predict(test_filename)
else:
raise ValueError("Invalid test path.")
if __name__ == "__main__":
run_bpc(BodyPartClassification)
|
StarcoderdataPython
|
182420
|
from base64 import b64encode
from io import BytesIO, StringIO
import pytest
from _pytest.monkeypatch import MonkeyPatch
from sutta_publisher.shared import github_handler
def test_generate_request_headers(bot_api_key: str) -> None:
header = github_handler.__get_request_headers(bot_api_key)
assert header["Authorization"] == f"token {bot_api_key}"
def test_generate_request_body(
monkeypatch: MonkeyPatch, file_like_edition: BytesIO, edition_path_in_repo: str, repo_url: str
) -> None:
monkeypatch.setattr(github_handler, "__get_file_sha", lambda *args: "someshanumber")
body = github_handler.__get_request_body(file_like_edition, edition_path_in_repo, repo_url)
file_like_edition.seek(0)
assert body["message"] == f"Uploading {edition_path_in_repo}"
assert body["content"] == b64encode(file_like_edition.read()).decode("ascii")
assert body["sha"] == "someshanumber"
def test_raise_attribute_error(monkeypatch: MonkeyPatch, edition_path_in_repo: str, repo_url: str) -> None:
monkeypatch.setattr(github_handler, "__get_file_sha", lambda *args: "someshanumber")
file_content = "file_content"
with pytest.raises(AttributeError):
github_handler.__get_request_body(file_content, edition_path_in_repo, repo_url)
def test_raise_type_error(monkeypatch: MonkeyPatch, edition_path_in_repo: str, repo_url: str) -> None:
monkeypatch.setattr(github_handler, "__get_file_sha", lambda *args: "someshanumber")
file_content = StringIO("file_content")
with pytest.raises(TypeError):
github_handler.__get_request_body(file_content, edition_path_in_repo, repo_url)
|
StarcoderdataPython
|
9612719
|
<filename>pyABC/Modified/visualization/walltime.py
"""Walltime plots"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.axes
from matplotlib.ticker import MaxNLocator
import datetime
from typing import List, Union
from ..storage import History
from .util import to_lists, get_labels
SECOND = 's'
MINUTE = 'm'
HOUR = 'h'
DAY = 'd'
TIME_UNITS = [SECOND, MINUTE, HOUR, DAY]
def plot_total_walltime(
histories: Union[List[History], History],
labels: Union[List, str] = None,
unit: str = 's',
rotation: int = 0,
title: str = "Total walltimes",
size: tuple = None,
ax: mpl.axes.Axes = None) -> mpl.axes.Axes:
"""Plot total walltimes, for each history one single-color bar.
Parameters
----------
histories:
The histories to plot from. History ids must be set correctly.
labels:
Labels corresponding to the histories. If None are provided,
indices are used as labels.
unit:
Time unit to use ('s', 'm', 'h', 'd' as seconds, minutes, hours, days).
rotation:
Rotation to apply to the plot's x tick labels. For longer labels,
a tilting of 45 or even 90 can be preferable.
title:
Title for the plot.
size: tuple of float, optional
The size of the plot in inches.
ax: matplotlib.axes.Axes, optional
The axis object to use.
Returns
-------
ax: Axis of the generated plot.
"""
# preprocess input
histories = to_lists(histories)
labels = get_labels(labels, len(histories))
n_run = len(histories)
# check time unit
if unit not in TIME_UNITS:
raise AssertionError(f"`unit` must be in {TIME_UNITS}")
# create figure
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
# extract total walltimes
walltimes = []
for h in histories:
abc = h.get_abc()
walltimes.append((abc.end_time - abc.start_time).total_seconds())
walltimes = np.asarray(walltimes)
# apply time unit
if unit == MINUTE:
walltimes /= 60
elif unit == HOUR:
walltimes /= (60*60)
elif unit == DAY:
walltimes /= (60*60*24)
# plot bars
ax.bar(x=np.arange(n_run), height=walltimes, label=labels)
# prettify plot
ax.set_xticks(np.arange(n_run))
ax.set_xticklabels(labels, rotation=rotation)
ax.set_title(title)
ax.set_xlabel("Run")
ax.set_ylabel(f"Time [{unit}]")
if size is not None:
fig.set_size_inches(size)
fig.tight_layout()
return ax
def plot_walltime(
histories: Union[List[History], History],
labels: Union[List, str] = None,
show_calibration: bool = None,
unit: str = 's',
rotation: int = 0,
title: str = "Walltime by generation",
size: tuple = None,
ax: mpl.axes.Axes = None) -> mpl.axes.Axes:
"""Plot walltimes, with different colors indicating different iterations.
Parameters
----------
histories:
The histories to plot from. History ids must be set correctly.
labels:
Labels corresponding to the histories. If None are provided,
indices are used as labels.
show_calibration:
Whether to show the calibration iteration (-1). Defaults to whether
there are samples in the calibration iteration.
unit:
Time unit to use ('s', 'm', 'h', 'd' as seconds, minutes, hours, days).
rotation:
Rotation to apply to the plot's x tick labels. For longer labels,
a tilting of 45 or even 90 can be preferable.
title:
Title for the plot.
size: tuple of float, optional
The size of the plot in inches.
ax: matplotlib.axes.Axes, optional
The axis object to use.
Returns
-------
ax: Axis of the generated plot.
"""
# preprocess input
histories = to_lists(histories)
# show calibration if that makes sense
if show_calibration is None:
show_calibration = any(
h.get_all_populations().samples[0] > 0 for h in histories)
# extract start times and end times
start_times = []
end_times = []
for h in histories:
# start time
start_times.append(h.get_abc().start_time)
# end times
end_times.append(h.get_all_populations().population_end_time)
return plot_walltime_lowlevel(
end_times=end_times, start_times=start_times, labels=labels,
show_calibration=show_calibration, unit=unit, rotation=rotation,
title=title, size=size, ax=ax)
def plot_walltime_lowlevel(
end_times: List,
start_times: Union[List, None] = None,
labels: Union[List, str] = None,
show_calibration: bool = None,
unit: str = 's',
rotation: int = 0,
title: str = "Walltime by generation",
size: tuple = None,
ax: mpl.axes.Axes = None) -> mpl.axes.Axes:
"""Low-level access to `plot_walltime`.
Directly define `end_times` and `start_times`."""
# preprocess input
end_times = to_lists(end_times)
labels = get_labels(labels, len(end_times))
n_run = len(end_times)
# check start times
if start_times is None:
if show_calibration:
raise AssertionError(
"To plot the calibration iteration, start times are needed.")
# fill in dummy times which will not be used anyhow
start_times = [datetime.datetime.now() for _ in range(n_run)]
# check time unit
if unit not in TIME_UNITS:
raise AssertionError(f"`unit` must be in {TIME_UNITS}")
# create figure
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
# extract relative walltimes
walltimes = []
for start_t, end_ts in zip(start_times, end_times):
times = [start_t, *end_ts]
# compute stacked differences
diffs = [end - start for start, end in zip(times[:-1], times[1:])]
# as seconds
diffs = [diff.total_seconds() for diff in diffs]
# append
walltimes.append(diffs)
walltimes = np.asarray(walltimes)
# create matrix
n_pop = max(len(wt) for wt in walltimes)
matrix = np.zeros((n_pop, n_run))
for i_run, wt in enumerate(walltimes):
matrix[:len(wt), i_run] = wt
if not show_calibration:
matrix = matrix[1:, :]
# apply time unit
if unit == MINUTE:
matrix /= 60
elif unit == HOUR:
matrix /= (60*60)
elif unit == DAY:
matrix /= (60*60*24)
# plot bars
for i_pop in reversed(range(matrix.shape[0])):
pop_ix = i_pop - 1
if not show_calibration:
pop_ix = i_pop
ax.bar(x=np.arange(n_run),
height=matrix[i_pop, :],
bottom=np.sum(matrix[:i_pop, :], axis=0),
label=f"Generation {pop_ix}")
# prettify plot
ax.set_xticks(np.arange(n_run))
ax.set_xticklabels(labels, rotation=rotation)
ax.set_title(title)
ax.set_xlabel("Run")
ax.set_ylabel(f"Time [{unit}]")
ax.legend()
if size is not None:
fig.set_size_inches(size)
fig.tight_layout()
return ax
def plot_eps_walltime(
histories: Union[List[History], History],
labels: Union[List, str] = None,
unit: str = 's',
xscale: str = 'linear',
yscale: str = 'log',
title: str = "Epsilon over walltime",
size: tuple = None,
ax: mpl.axes.Axes = None) -> mpl.axes.Axes:
"""Plot epsilon values (y-axis) over the walltime (x-axis), iterating over
the generations.
Parameters
----------
histories:
The histories to plot from. History ids must be set correctly.
labels:
Labels corresponding to the histories. If None are provided,
indices are used as labels.
unit:
Time unit to use ('s', 'm', 'h', 'd' as seconds, minutes, hours, days).
xscale:
Scale of the x-axis. Use matplotlib's notation.
yscale:
Scale of the y-axis. Use matplotlib's notation.
title:
Title for the plot.
size: tuple of float, optional
The size of the plot in inches.
ax: matplotlib.axes.Axes, optional
The axis object to use.
Returns
-------
ax: Axis of the generated plot.
"""
# preprocess input
histories = to_lists(histories)
# extract end times and epsilons
end_times = []
eps = []
for h in histories:
# end times
end_times.append(h.get_all_populations().population_end_time)
eps.append(h.get_all_populations().epsilon)
return plot_eps_walltime_lowlevel(
end_times=end_times, eps=eps, labels=labels, unit=unit,
xscale=xscale, yscale=yscale, title=title, size=size, ax=ax)
def plot_eps_walltime_lowlevel(
end_times: List,
eps: List,
labels: Union[List, str] = None,
unit: str = 's',
xscale: str = 'linear',
yscale: str = 'log',
title: str = "Epsilon over walltime",
size: tuple = None,
ax: mpl.axes.Axes = None) -> mpl.axes.Axes:
"""Low-level access to `plot_eps_walltime`.
Directly define `end_times` and `eps`. Note that both should be arrays of
the same length and at the beginning include a value for the calibration
iteration. This is just what `pyabc.History.get_all_populations()` returns.
The first time is used as the base time differences to which are plotted.
The first epsilon is ignored.
"""
# preprocess input
end_times = to_lists(end_times)
labels = get_labels(labels, len(end_times))
n_run = len(end_times)
# check time unit
if unit not in TIME_UNITS:
raise AssertionError(f"`unit` must be in {TIME_UNITS}")
# create figure
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
# extract relative walltimes
walltimes = []
for end_ts in end_times:
# compute differences to base
diffs = end_ts[1:] - end_ts[0]
# as seconds
diffs = [diff.total_seconds() for diff in diffs]
# append
walltimes.append(diffs)
# disregard calibration epsilon (inf)
eps = [ep[1:] for ep in eps]
for wt, ep, label in zip(walltimes, eps, labels):
wt = np.asarray(wt)
# apply time unit
if unit == MINUTE:
wt /= 60
elif unit == HOUR:
wt /= (60 * 60)
elif unit == DAY:
wt /= (60 * 60 * 24)
# plot
ax.plot(wt, ep, label=label, marker='o')
# prettify plot
if n_run > 1:
ax.legend()
ax.set_title(title)
ax.set_xlabel(f"Time [{unit}]")
ax.set_ylabel("Epsilon")
ax.set_xscale(xscale)
ax.set_yscale(yscale)
# enforce integer ticks
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
if size is not None:
fig.set_size_inches(size)
fig.tight_layout()
return ax
|
StarcoderdataPython
|
9772607
|
<filename>PyObjCTest/test_nsdictionary.py<gh_stars>0
import types
import objc
import Foundation
from PyObjCTest.testhelper import PyObjC_TestClass3
from PyObjCTools.TestSupport import TestCase, min_os_level
class TestNSDictionarySubclassing(TestCase):
# These tests seem to be specific for macOS
def testExceptionInInit(self):
if objc.platform != "MACOSX":
return
class DictTestExceptionClass(Foundation.NSDictionary):
pass
# Don't use self.assertRaises here, we once had a bug that
# causes this to fail, while the assertRaises version would
# (probably) have worked.
import warnings
warnings.filterwarnings("ignore", category=objc.UninitializedDeallocWarning)
try:
try:
_ = DictTestExceptionClass.alloc().initWithDictionary_({})
self.fail()
except ValueError:
pass
finally:
del warnings.filters[0]
def testAnotherExceptionInInit(self):
if objc.platform != "MACOSX":
return
class DictTestExceptionClass2(Foundation.NSDictionary):
def initWithObjects_forKeys_count_(self, o, k, c):
return objc.super(
DictTestExceptionClass2, self
).initWithObjects_forKeys_count_(o, k, c)
import warnings
warnings.filterwarnings("ignore", category=objc.UninitializedDeallocWarning)
try:
try:
_ = DictTestExceptionClass2.alloc().initWithDictionary_({})
self.fail()
except ValueError:
pass
finally:
del warnings.filters[0]
def testExceptionInInitClsMeth(self):
if objc.platform != "MACOSX":
return
class DictTestExceptionClass3(Foundation.NSDictionary):
def initWithObjects_forKeys_count_(self, o, k, c):
return objc.super(
DictTestExceptionClass3, self
).initWithObjects_forKeys_count_(o, k, c)
try:
_ = DictTestExceptionClass3.dictionaryWithDictionary_({})
self.fail()
except ValueError:
pass
class TestNSDictionaryInteraction(TestCase):
def testMethods(self):
for nm in dir(dict):
if nm.startswith("__"):
continue
if isinstance(
getattr(dict, nm), (types.BuiltinFunctionType, types.FunctionType)
):
# Skip class methods, that needs more work in the core
continue
self.assertTrue(
hasattr(Foundation.NSMutableDictionary, nm),
"NSMutableDictionary has no method '%s'" % (nm,),
)
def testRepeatedAllocInit(self):
for _ in range(1, 1000):
_ = Foundation.NSDictionary.alloc().init()
def testBasicInteraction(self):
d = Foundation.NSMutableDictionary.dictionary()
d[b"a".decode("ascii")] = b"foo".decode("ascii")
d[b"b".decode("ascii")] = b"bar".decode("ascii")
self.assertEqual(
d[b"a".decode("ascii")],
b"foo".decode("ascii"),
"Failed to retrieve the same thing that was put into the dict.",
)
try:
d[b"c".decode("ascii")]
self.fail("Should have raised...")
except KeyError:
pass
def testPythonIteraction(self):
d = Foundation.NSMutableDictionary.dictionary()
d[b"a".decode("ascii")] = b"foo".decode("ascii")
d[b"b".decode("ascii")] = b"bar".decode("ascii")
k = list(d.keys())
k.sort()
self.assertTrue(k == [b"a".decode("ascii"), b"b".decode("ascii")])
k = list(d.values())
k.sort()
self.assertTrue(k == [b"bar".decode("ascii"), b"foo".decode("ascii")])
k = list(d.items())
k.sort()
self.assertTrue(
k
== [
(b"a".decode("ascii"), b"foo".decode("ascii")),
(b"b".decode("ascii"), b"bar".decode("ascii")),
]
)
def testIn(self):
d = Foundation.NSMutableDictionary.dictionary()
d[b"a".decode("ascii")] = b"foo".decode("ascii")
d[b"b".decode("ascii")] = b"bar".decode("ascii")
d[1] = b"baz".decode("ascii")
d[0] = b"bob".decode("ascii")
self.assertTrue(b"a".decode("ascii") in d)
self.assertTrue(1 in d)
# self.assertTrue( -1 in d )
# self.assertTrue( d[-1] is None )
self.assertTrue(b"q".decode("ascii") not in d)
for k in d.allKeys():
self.assertEqual(d.objectForKey_(k), d[k])
for k in d:
self.assertEqual(d.objectForKey_(k), d[k])
del d[b"a".decode("ascii")]
self.assertTrue(b"a".decode("ascii") not in d)
def test_varargConstruction(self):
u = Foundation.NSDictionary.dictionaryWithObjects_forKeys_(
[1, 2, 3, 4],
[
b"one".decode("ascii"),
b"two".decode("ascii"),
b"three".decode("ascii"),
b"four".decode("ascii"),
],
)
v = Foundation.NSDictionary.alloc().initWithObjects_forKeys_(
[1, 2, 3, 4],
[
b"one".decode("ascii"),
b"two".decode("ascii"),
b"three".decode("ascii"),
b"four".decode("ascii"),
],
)
w = Foundation.NSDictionary.dictionaryWithObjects_forKeys_count_(
[1, 2, 3, 4, 5],
[
b"one".decode("ascii"),
b"two".decode("ascii"),
b"three".decode("ascii"),
b"four".decode("ascii"),
b"five".decode("ascii"),
],
4,
)
x = Foundation.NSDictionary.alloc().initWithObjects_forKeys_count_(
[1, 2, 3, 4, 5],
[
b"one".decode("ascii"),
b"two".decode("ascii"),
b"three".decode("ascii"),
b"four".decode("ascii"),
b"five".decode("ascii"),
],
4,
)
y = Foundation.NSDictionary.dictionaryWithObjectsAndKeys_(
1,
b"one".decode("ascii"),
2,
b"two".decode("ascii"),
3,
b"three".decode("ascii"),
4,
b"four".decode("ascii"),
None,
)
z = Foundation.NSDictionary.alloc().initWithObjectsAndKeys_(
1,
b"one".decode("ascii"),
2,
b"two".decode("ascii"),
3,
b"three".decode("ascii"),
4,
b"four".decode("ascii"),
None,
)
self.assertEqual(len(u), 4)
self.assertEqual(len(v), 4)
self.assertEqual(len(w), 4)
self.assertEqual(len(x), 4)
self.assertEqual(len(y), 4)
self.assertEqual(len(z), 4)
self.assertEqual(u[b"one".decode("ascii")], 1)
self.assertEqual(v[b"two".decode("ascii")], 2)
self.assertEqual(w[b"three".decode("ascii")], 3)
self.assertEqual(x[b"one".decode("ascii")], 1)
self.assertEqual(y[b"two".decode("ascii")], 2)
self.assertEqual(z[b"four".decode("ascii")], 4)
def test_varargConstruction2(self):
u = Foundation.NSMutableDictionary.dictionaryWithObjects_forKeys_(
[1, 2, 3, 4],
[
b"one".decode("ascii"),
b"two".decode("ascii"),
b"three".decode("ascii"),
b"four".decode("ascii"),
],
)
self.assertIsNot(u, None)
v = Foundation.NSMutableDictionary.alloc().initWithObjects_forKeys_(
[1, 2, 3, 4],
[
b"one".decode("ascii"),
b"two".decode("ascii"),
b"three".decode("ascii"),
b"four".decode("ascii"),
],
)
self.assertIsNot(v, None)
w = Foundation.NSMutableDictionary.dictionaryWithObjects_forKeys_count_(
[1, 2, 3, 4, 5],
[
b"one".decode("ascii"),
b"two".decode("ascii"),
b"three".decode("ascii"),
b"four".decode("ascii"),
b"five".decode("ascii"),
],
4,
)
self.assertIsNot(w, None)
x = Foundation.NSMutableDictionary.alloc().initWithObjects_forKeys_count_(
[1, 2, 3, 4, 5],
[
b"one".decode("ascii"),
b"two".decode("ascii"),
b"three".decode("ascii"),
b"four".decode("ascii"),
b"five".decode("ascii"),
],
4,
)
self.assertIsNot(x, None)
y = Foundation.NSMutableDictionary.dictionaryWithObjectsAndKeys_(
1,
b"one".decode("ascii"),
2,
b"two".decode("ascii"),
3,
b"three".decode("ascii"),
4,
b"four".decode("ascii"),
None,
)
self.assertIsNot(y, None)
z = Foundation.NSMutableDictionary.alloc().initWithObjectsAndKeys_(
1,
b"one".decode("ascii"),
2,
b"two".decode("ascii"),
3,
b"three".decode("ascii"),
4,
b"four".decode("ascii"),
None,
)
self.assertIsNot(z, None)
self.assertEqual(len(u), 4)
self.assertEqual(len(v), 4)
self.assertEqual(len(w), 4)
self.assertEqual(len(x), 4)
self.assertEqual(len(y), 4)
self.assertEqual(len(z), 4)
self.assertEqual(u[b"one".decode("ascii")], 1)
self.assertEqual(v[b"two".decode("ascii")], 2)
self.assertEqual(w[b"three".decode("ascii")], 3)
self.assertEqual(x[b"one".decode("ascii")], 1)
self.assertEqual(y[b"two".decode("ascii")], 2)
self.assertEqual(z[b"four".decode("ascii")], 4)
class MyDictionaryBase(Foundation.NSDictionary):
def count(self):
if hasattr(self, "_count"):
return self._count
return -1
def keyEnumerator(self):
return None
def objectForKey_(self, key):
return None
class MyDictionary1(MyDictionaryBase):
def initWithObjects_forKeys_count_(self, objects, keys, count):
self._count = count
self._objects = objects
self._keys = keys
return self
class MyDictionary2(MyDictionaryBase):
def dictionaryWithObjects_forKeys_count_(self, objects, keys, count):
if self is not MyDictionary2:
raise AssertionError(self)
return (objects, keys, count)
class TestSubclassing(TestCase):
def testInitWithObjects(self):
o = PyObjC_TestClass3.makeDictFromClass_method_(MyDictionary1, 1)
self.assertIsInstance(o, MyDictionary1)
self.assertEqual(o._count, 4)
self.assertEqual(len(o._keys), 4)
self.assertEqual(len(o._objects), 4)
def testDictWithObjects(self):
o = PyObjC_TestClass3.makeDictFromClass_method_(MyDictionary2, 0)
self.assertIsInstance(o, tuple)
self.assertEqual(o[2], 4)
self.assertEqual(len(o[1]), 4)
self.assertEqual(len(o[0]), 4)
class TestVariadic(TestCase):
def testDictionaryWithObjectsAndKeys(self):
o = Foundation.NSDictionary.dictionaryWithObjectsAndKeys_(42, "a", 43, "b")
self.assertEqual(o, {"a": 42, "b": 43})
self.assertIsInstance(o, Foundation.NSDictionary)
o = Foundation.NSMutableDictionary.dictionaryWithObjectsAndKeys_(
42, "a", 43, "b"
)
self.assertEqual(o, {"a": 42, "b": 43})
self.assertIsInstance(o, Foundation.NSMutableDictionary)
def testInitWithObjectsAndKeys(self):
o = Foundation.NSDictionary.alloc().initWithObjectsAndKeys_(42, "a", 43, "b")
self.assertEqual(o, {"a": 42, "b": 43})
self.assertIsInstance(o, Foundation.NSDictionary)
o = Foundation.NSMutableDictionary.alloc().initWithObjectsAndKeys_(
42, "a", 43, "b"
)
self.assertEqual(o, {"a": 42, "b": 43})
self.assertIsInstance(o, Foundation.NSMutableDictionary)
class TestNSDictionary(TestCase):
def testMethods(self):
self.assertResultIsBOOL(Foundation.NSDictionary.isEqualToDictionary_)
self.assertResultIsBOOL(Foundation.NSDictionary.writeToFile_atomically_)
self.assertArgIsBOOL(Foundation.NSDictionary.writeToFile_atomically_, 1)
self.assertResultIsBOOL(Foundation.NSDictionary.writeToURL_atomically_)
self.assertArgIsBOOL(Foundation.NSDictionary.writeToURL_atomically_, 1)
self.assertArgIsSEL(
Foundation.NSDictionary.keysSortedByValueUsingSelector_, 0, b"i@:@"
)
self.assertArgIsIn(
Foundation.NSDictionary.dictionaryWithObjects_forKeys_count_, 0
)
self.assertArgSizeInArg(
Foundation.NSDictionary.dictionaryWithObjects_forKeys_count_, 0, 2
)
self.assertArgIsIn(
Foundation.NSDictionary.dictionaryWithObjects_forKeys_count_, 1
)
self.assertArgSizeInArg(
Foundation.NSDictionary.dictionaryWithObjects_forKeys_count_, 1, 2
)
self.assertArgIsIn(Foundation.NSDictionary.initWithObjects_forKeys_count_, 0)
self.assertArgSizeInArg(
Foundation.NSDictionary.initWithObjects_forKeys_count_, 0, 2
)
self.assertArgIsIn(Foundation.NSDictionary.initWithObjects_forKeys_count_, 1)
self.assertArgSizeInArg(
Foundation.NSDictionary.initWithObjects_forKeys_count_, 1, 2
)
self.assertArgIsBOOL(Foundation.NSDictionary.initWithDictionary_copyItems_, 1)
self.assertIsNullTerminated(Foundation.NSDictionary.initWithObjectsAndKeys_)
self.assertIsNullTerminated(
Foundation.NSDictionary.dictionaryWithObjectsAndKeys_
)
@min_os_level("10.6")
def testMethods10_6(self):
self.assertArgIsBlock(
Foundation.NSDictionary.enumerateKeysAndObjectsUsingBlock_,
0,
b"v@@o^" + objc._C_NSBOOL,
)
self.assertArgIsBlock(
Foundation.NSDictionary.enumerateKeysAndObjectsWithOptions_usingBlock_,
1,
b"v@@o^" + objc._C_NSBOOL,
)
self.assertArgIsBlock(
Foundation.NSDictionary.keysSortedByValueUsingComparator_, 0, b"i@@"
)
self.assertArgIsBlock(
Foundation.NSDictionary.keysSortedByValueWithOptions_usingComparator_,
1,
objc._C_NSInteger + b"@@",
)
self.assertArgIsBlock(
Foundation.NSDictionary.keysOfEntriesPassingTest_,
0,
objc._C_NSBOOL + b"@@o^" + objc._C_NSBOOL,
)
self.assertArgIsBlock(
Foundation.NSDictionary.keysOfEntriesWithOptions_passingTest_,
1,
objc._C_NSBOOL + b"@@o^" + objc._C_NSBOOL,
)
@min_os_level("10.13")
def testMethods10_13(self):
self.assertArgIsOut(Foundation.NSDictionary.writeToURL_error_, 1)
self.assertResultIsBOOL(Foundation.NSDictionary.writeToURL_error_)
self.assertArgIsOut(Foundation.NSDictionary.initWithContentsOfURL_error_, 1)
self.assertArgIsOut(
Foundation.NSDictionary.dictionaryWithContentsOfURL_error_, 1
)
|
StarcoderdataPython
|
162527
|
<filename>src/Math2D.py<gh_stars>1-10
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun May 5 14:55:49 2019
@author: luke
"""
import numpy as np
def Grad2D(u):
"""
2D gradient of a scalar
basis function style:
"""
ur = Dr*u
us = Ds*u
ux = np.multiply(rx,ur) + \
np.multiply(sx,us)
uy = np.multiply(ry,ur) + \
np.multiply(sy,us)
return ux,uy
|
StarcoderdataPython
|
5149801
|
<reponame>Quant-Network/python-api-client<filename>quant_trading/models/__init__.py
# coding: utf-8
# flake8: noqa
"""
Quant Trading Network API
This API will use JSON. JSON looks like this: { \"key\": \"value\", \"anotherKey\": \"anotherValue\" } # noqa: E501
OpenAPI spec version: 1.0.0
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into model package
from quant_trading.models.algo_params_response import AlgoParamsResponse
from quant_trading.models.avg_open_position_hours import AvgOpenPositionHours
from quant_trading.models.closed_position_hours import ClosedPositionHours
from quant_trading.models.current_position_pct import CurrentPositionPct
from quant_trading.models.current_unrealised_pct import CurrentUnrealisedPct
from quant_trading.models.error import Error
from quant_trading.models.exec_algo_response import ExecAlgoResponse
from quant_trading.models.exec_position_manager_algo_request import ExecPositionManagerAlgoRequest
from quant_trading.models.exec_position_swinger_algo_request import ExecPositionSwingerAlgoRequest
from quant_trading.models.last_open_stake_hours import LastOpenStakeHours
from quant_trading.models.open_position_hours import OpenPositionHours
from quant_trading.models.position_state import PositionState
from quant_trading.models.stake_state import StakeState
from quant_trading.models.x_rate_limit_limit import XRateLimitLimit
from quant_trading.models.x_rate_limit_remaining import XRateLimitRemaining
from quant_trading.models.x_rate_limit_reset import XRateLimitReset
|
StarcoderdataPython
|
308670
|
<filename>Ensemble_stress_dominated_1.py
# -*- coding: utf-8 -*-
"""
Created on Wed May 12 13:37:09 2021
Triaxial test cases [deviatoric hardening(DH) model]
Generating stress-strain sequence via DH model
@author: <NAME>
Note: Tensile normal stress is positive
"""
import numpy as np # import module
import pandas as pd
import glob, os
from pandas.core.frame import DataFrame
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['font.family'] = 'Times New Roman'
# ------------------------------------------------
# material data
RF = 1.331 # failure slope; slope of the failure envelope
RC = 1.215 # critical slope; the slope of zero dilatancy line
A = 0.011 # hardening parameters; the constant A which appears in the hardening function
DP = 0.3 # Increments of mean effective stress(stress-dominated loading required)
DQ = 0.9 # Increments of deviatoric stress increment(stress-dominated loading required)
# ------------------------------------------------
# Loop and integral
Number_iteration = 400
data1 = pd.DataFrame({'p': [1],
'deviatoric_stress': [2],
"strain": [3],
"volume": [4],
"item1": [5]})
df = []
# 假设最大轴向应变为 0.1,常规三轴压缩 CTC
jj = 0
x_strain = []
z_strain = []
P11 = []
Volume = []
data1 = []
DEV = []
DEQ = []
P1 = 50
def numerical_integration(DP, DQ, Number_iteration=2500): # the use of keyword argument
EV = 0
EQ = 0
Mean_P = []
Devi_q = []
Devi_strain = []
Volume = []
global P
Q = 0
global BM, RF, RC, G, A, HP, HE
global R0
for i in range(Number_iteration):
G = 3719.81018 * P ** 0.18 # shear modulus
BM = 6720.74398 * P # bulk modulus
R0 = Q / P # 实时关系 用来判断什么时候停
FP = -R0 # f是屈服函数 df/dp= 3.61
FQ = 1. # 3.61中对q分别求导
QP = RC - R0 # 公式 3.65
QQ = 1. # 第一个q是3.64对q求导
if R0 > RF:
break
if EQ > 0.25:
break
# Stan's hardening
HP = P * (RF - R0) ** 2 / (A * RF) # 3.66 --可从3.57第二项里面推导;plastic hardening modulus hp
HE = BM * FP * QP + 3 * G * FQ * QQ # 3.57的第一项
# strain dominated loading
D = np.array([[BM - (BM * BM * FP * QP) / (HE + HP), -(3 * G * BM * QP * FQ) / (HE + HP)],
[-(3 * G * BM * QQ * FP) / (HE + HP), 3 * G - 9 * G * G * QQ * FQ / (HE + HP)]])
# stress dominated loading
DET = D[0][0] * D[1][1] - D[0][1] * D[1][0] # 行列式
C = [[1, 1], [1, 1]]
C[0][0] = D[1][1] / DET # 应力加载
C[0][1] = -D[0][1] / DET
C[1][0] = -D[1][0] / DET
C[1][1] = D[0][0] / DET
# 累积
dEV = C[0][0] * DP + C[0][1] * DQ # Increments of volumetrical strain
dEQ = C[1][0] * DP + C[1][1] * DQ # Increments of deviatoric plastic strain
EV = EV + dEV # 累积循环
EQ = EQ + dEQ
P = P + DP
Q = Q + DQ
# Store data
Mean_P.append(P)
Devi_q.append(Q)
Devi_strain.append(EQ)
Volume.append(EV)
DEV.append(dEV)
DEQ.append(dEQ)
R0 = Q / P # 更新 判断
# converting list to dataframe and then concatinate dataframe
Mean_P1 = pd.DataFrame(Mean_P)
Devi_q1 = pd.DataFrame(Devi_q)
Devi_strain1 = pd.DataFrame(Devi_strain)
Volume1 = pd.DataFrame(Volume)
data = pd.concat([Mean_P1, Devi_q1, Devi_strain1, Volume1], axis=1) # The axis(0 or 1)to concatenate along.
names = ['p', 'deviatoric_stress', 'deviatoric_strain', 'volume']
data.columns = names
return data
def Generate_stress_strain_pairs():
# 创建一个空的 DataFrame
data2 = pd.DataFrame(columns=['p', 'deviatoric_stress', "deviatoric_strain", "volume", 'case'])
global P
for ii in range(475):
P1 = 50 # initial mean effective stress
P = P1 + ii * 2
P0 = P1 + ii * 2
data1 = numerical_integration(DP, DQ, Number_iteration=2500)
data1['confining_stress'] = P0
global jj
jj = jj + 1
data1['case'] = jj
data2 = pd.concat([data2, data1], axis=0) # ( axis = 0,列对齐(增加行))
return data2
new_data = Generate_stress_strain_pairs() # Call function
new_data['Deviat_plastic_strain'] = new_data.apply(
lambda x: (x['deviatoric_strain'] - x['deviatoric_stress'] / (3 * G)), axis=1)
new_data = new_data.drop(new_data.index[0], axis=0)
order = ['case', 'Deviat_plastic_strain', "deviatoric_strain", 'deviatoric_stress', "volume", 'p', 'confining_stress']
new_data = new_data[order]
## write csv file
new_data.to_csv('synthesis_data.csv', sep=',', header=True, index=True) # , names=["","q","strain","volume"]
fig, ax = plt.subplots()
ax_plot = plt.scatter(new_data['deviatoric_strain'], new_data['deviatoric_stress'], c=new_data['confining_stress'],
cmap=plt.get_cmap('coolwarm'), s=5, alpha=0.5, linewidth=0, label='q') # coolwarm
plt.xlabel('Deviatoric strain', fontsize=14)
plt.ylabel('Deviatoric stress (kPa)', fontsize=14)
plt.xlim(0, 0.3)
plt.ylim(0, 2250)
ax.set_xticks([0.05, 0.1, 0.15, 0.2, 0.25, 0.3])
ax.get_xaxis().set_major_formatter(mpl.ticker.ScalarFormatter())
# 加上颜色棒
fig.colorbar(ax_plot)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.savefig("DH_stress_strain_pairs.png", dpi=600, bbox_inches="tight")
plt.show()
fig2, ax2 = plt.subplots()
ax_plot = plt.scatter(new_data['deviatoric_strain'], new_data['volume'], c=new_data['confining_stress'],
cmap=plt.get_cmap('jet'), s=5, alpha=0.5, linewidth=0, label='q') # coolwarm
plt.xlabel('Deviatoric strain', fontsize=14)
plt.ylabel('Volumetric strain', fontsize=14)
plt.xlim(0, 0.3)
plt.ylim(0, 0.025)
# ax.set_xticks([0.05, 0.1, 0.15, 0.2, 0.25, 0.3])
# ax.get_xaxis().set_major_formatter(mpl.ticker.ScalarFormatter())
# 加上颜色棒
fig.colorbar(ax_plot)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.savefig("DH_volumetric_p.png", dpi=600, bbox_inches="tight")
plt.tight_layout()
plt.show()
|
StarcoderdataPython
|
1866358
|
<filename>papermill/tests/test_s3.py
# The following tests are purposely limited to the exposed interface by iorw.py
import os.path
import pytest
import boto3
import moto
from moto import mock_s3
from ..s3 import Bucket, Prefix, Key, S3, split
@pytest.fixture
def bucket_no_service():
"""Returns a bucket instance with no services"""
return Bucket('my_test_bucket')
@pytest.fixture
def bucket_with_service():
"""Returns a bucket instance with a service"""
return Bucket('my_sqs_bucket', ['sqs'])
@pytest.fixture
def bucket_sqs():
"""Returns a bucket instance with a sqs service"""
return Bucket('my_sqs_bucket', ['sqs'])
@pytest.fixture
def bucket_ec2():
"""Returns a bucket instance with a ec2 service"""
return Bucket('my_sqs_bucket', ['ec2'])
@pytest.fixture
def bucket_multiservice():
"""Returns a bucket instance with a ec2 service"""
return Bucket('my_sqs_bucket', ['ec2', 'sqs'])
def test_bucket_init():
assert Bucket('my_test_bucket')
assert Bucket('my_sqs_bucket', 'sqs')
def test_bucket_defaults():
name = 'a bucket'
b1 = Bucket(name)
b2 = Bucket(name, None)
assert b1.name == b2.name
assert b1.service == b2.service
def test_bucket_missing_params():
with pytest.raises(TypeError):
Bucket(service=None)
with pytest.raises(TypeError):
Bucket()
def test_bucket_list(bucket_sqs):
# prefix_test = ''
# assert bucket_sqs.list(prefix_test)
#
# prefix_test = 'abc'
# assert bucket_sqs.list(prefix_test) is None
#
# prefix_test = 'ec2'
# assert bucket_sqs.list(prefix_test) is None
#
# prefix_test = 'sqs'
# assert bucket_sqs.list(prefix_test)
pass
def test_prefix_init():
with pytest.raises(TypeError):
Prefix()
with pytest.raises(TypeError):
Prefix(service=None)
with pytest.raises(TypeError):
Prefix('my_test_prefix')
b1 = Bucket('my_test_bucket')
p1 = Prefix(b1, 'sqs_test', service='sqs')
assert Prefix(b1, 'test_bucket')
assert Prefix(b1, 'test_bucket', service=None)
assert Prefix(b1, 'test_bucket', None)
assert p1.bucket.service == p1.service
def test_prefix_defaults():
bucket = Bucket('my data pool')
name = 'bigdata bucket'
p1 = Prefix(bucket, name)
p2 = Prefix(bucket, name, None)
assert p1.name == p2.name
assert p1.service == p2.service
def test_prefix_str(bucket_sqs):
p1 = Prefix(bucket_sqs, 'sqs_prefix_test', 'sqs')
assert str(p1) == 's3://' + str(bucket_sqs) + '/sqs_prefix_test'
def test_prefix_repr(bucket_sqs):
p1 = Prefix(bucket_sqs, 'sqs_prefix_test', 'sqs')
assert p1.__repr__
def test_key_init():
pass
def test_key_defaults():
bucket = Bucket('my data pool')
name = 'bigdata bucket'
k1 = Key(bucket, name)
k2 = Key(bucket, name, None, None, None, None, None)
assert k1.size == k2.size
assert k1.etag == k2.etag
assert k1.storage_class == k2.storage_class
assert k1.service == k2.service
assert k1.is_prefix is False
@mock_s3
def test_s3_defaults():
s1 = S3()
s2 = S3(None, None, None, 'us-east-1')
assert s1.session == s2.session
assert s1.client == s2.client
assert s1.s3 == s2.s3
@pytest.mark.parametrize(
"value,expected",
[
('s3://foo/bar/baz', ['foo', 'bar/baz']),
('s3://foo/bar/baz/', ['foo', 'bar/baz/']),
('s3://foo', ['foo', '']),
('s3://', ['', '']),
('s3:///', ['', '']),
],
)
def test_split_success(value, expected):
assert (split(value)) == expected
def test_split_error():
with pytest.raises(ValueError):
split('foo/bar/baz')
with pytest.raises(ValueError):
split('https://foo/bar/baz')
local_dir = os.path.dirname(os.path.abspath(__file__))
test_bucket_name = 'test-pm-bucket'
test_string = 'Hello'
test_file_path = 'notebooks/s3/s3_in/s3-simple_notebook.ipynb'
with open(os.path.join(local_dir, test_file_path)) as f:
test_nb_content = f.read()
no_empty_lines = lambda s: "\n".join([l for l in s.split('\n') if len(l) > 0])
test_clean_nb_content = no_empty_lines(test_nb_content)
read_from_gen = lambda g: "\n".join(g)
@pytest.yield_fixture(scope="function")
def s3_client():
mock_s3 = moto.mock_s3()
mock_s3.start()
client = boto3.client('s3')
client.create_bucket(Bucket=test_bucket_name)
client.put_object(Bucket=test_bucket_name, Key=test_file_path, Body=test_nb_content)
yield S3()
try:
client.delete_object(Bucket=test_bucket_name, Key=test_file_path)
client.delete_object(Bucket=test_bucket_name, Key=test_file_path + '.txt')
except Exception:
pass
mock_s3.stop()
def test_s3_read(s3_client):
s3_path = "s3://{}/{}".format(test_bucket_name, test_file_path)
data = read_from_gen(s3_client.read(s3_path))
assert data == test_clean_nb_content
def test_s3_write(s3_client):
s3_path = "s3://{}/{}.txt".format(test_bucket_name, test_file_path)
s3_client.cp_string(test_string, s3_path)
data = read_from_gen(s3_client.read(s3_path))
assert data == test_string
def test_s3_overwrite(s3_client):
s3_path = "s3://{}/{}".format(test_bucket_name, test_file_path)
s3_client.cp_string(test_string, s3_path)
data = read_from_gen(s3_client.read(s3_path))
assert data == test_string
def test_s3_listdir(s3_client):
dir_name = os.path.dirname(test_file_path)
s3_dir = "s3://{}/{}".format(test_bucket_name, dir_name)
s3_path = "s3://{}/{}".format(test_bucket_name, test_file_path)
dir_listings = s3_client.listdir(s3_dir)
assert len(dir_listings) == 1
assert s3_path in dir_listings
|
StarcoderdataPython
|
3437856
|
<reponame>evelinacs/semantic_parsing_with_IRTGs
#!/usr/bin/env python3
import sys
import argparse
from nltk.tree import ParentedTree
parser = argparse.ArgumentParser(description = "Filters trees which contains subtrees that have more than 3 children. Also removes trace subtrees.")
parser.add_argument("-s", "--sanitized", action = "store_true", help = "for sanitized input")
args = parser.parse_args()
def filter_trees():
with open(sys.argv[1]) as np_doc:
if args.sanitized:
trace_subtree_pos = "HYPHENNONEHYPHEN"
else:
trace_subtree_pos = "-NONE-"
for line in np_doc:
t = ParentedTree.fromstring(line)
maxlen = 0
found = False
treeposition = []
for subtree in t.subtrees():
if subtree.label() == trace_subtree_pos:
parent = subtree.parent()
if parent is not None:
treeposition.append(subtree.treeposition())
if parent.parent() is not None:
treeposition.append(parent.treeposition())
found = True
width = len(subtree)
if width > maxlen:
maxlen = width
if found:
treeposition.sort(key=len)
for position in treeposition[::-1]:
del t[position]
if maxlen <=3:
if t.leaves():
print(t.pformat(10000000), end = "\n")
if __name__ == "__main__":
filter_trees()
|
StarcoderdataPython
|
5127799
|
<gh_stars>1-10
import diskcache as dc
from os.path import expanduser
cache = dc.Cache(expanduser('~') + '/.opus_api')
def clearCache():
"""
Delete all items from the cache.
"""
cache.clear()
def jcache(function):
"""
Decorator for caching API json results
"""
def wrapper(*args, **kwargs):
key = None
if function.__name__ == 'get':
key = args
elif function.__name__ == 'langs':
key = 'langs'
if key and key in cache:
return cache[key]
result = function(*args, **kwargs)
cache[key] = result
return result
return wrapper
def hcache(function):
"""
Decorator for caching crawler html
"""
def wrapper(*args, **kwargs):
src = args[0]
trg = args[1]
key = ('html', src, trg)
if key in cache:
html = cache[key]
else:
html = function(src, trg)
cache[key] = html
return html
return wrapper
|
StarcoderdataPython
|
5154438
|
# Queue implementation using List in Python
try: #try catch so that the program does not crash
queue=[]
while True:
op = int(input("Press--> 1 to insert into queue | 2 to remove from queue | 3 to display values of queue | 4 to reverse the exisiting queue| 5 to exit "))
if op==1: #to insert an elelment in the queue
ele = int(input("enter elem to insert "))
queue.append(ele)
elif op==2: #to remove an element from the queue
if len(queue)==0:
print("The queue is empty, insert values if required")
else:
ele=queue.pop(0)
print("Element removed is - ",ele)
elif op==3: #to display the elements in the queue
if len(queue)==0:
print("The queue is empty, insert values if required")
else:
print(queue)
elif op==4: #to reverse queue
queue.reverse()
elif op==5: #to exit
break
else:
print("invalid option")
except ValueError:
print("Please enter integer only") #If user inputs an alphabet or string the program should not crash
except:
print("There's been some issue please check the data you've entered")
|
StarcoderdataPython
|
1877026
|
# Copyright 2015 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The intention of the module is providing various DB related lookup functions
for more convenient usage within the workflow engine.
Some of the functions may provide caching capabilities.
WARNING: Oftentimes, persistent objects returned by the methods in this
module won't be attached to the current DB SQLAlchemy session because
they are returned from the cache and therefore they need to be used
carefully without trying to do any lazy loading etc.
These objects are also not suitable for re-attaching them to a session
in order to update their persistent DB state.
Mostly, they are useful for doing any kind of fast lookups with in order
to make some decision based on their state.
"""
import cachetools
import threading
from mistral.db.v2 import api as db_api
from mistral.workflow import states
_TASK_EXECUTIONS_CACHE_LOCK = threading.RLock()
_TASK_EXECUTIONS_CACHE = cachetools.LRUCache(maxsize=20000)
def find_task_executions_by_name(wf_ex_id, task_name):
"""Finds task executions by workflow execution id and task name.
:param wf_ex_id: Workflow execution id.
:param task_name: Task name.
:return: Task executions (possibly a cached value).
"""
cache_key = (wf_ex_id, task_name)
with _TASK_EXECUTIONS_CACHE_LOCK:
t_execs = _TASK_EXECUTIONS_CACHE.get(cache_key)
if t_execs:
return t_execs
t_execs = db_api.get_task_executions(
workflow_execution_id=wf_ex_id,
name=task_name
)
# We can cache only finished tasks because they won't change.
all_finished = (
t_execs and
all([states.is_completed(t_ex.state) for t_ex in t_execs])
)
if all_finished:
with _TASK_EXECUTIONS_CACHE_LOCK:
_TASK_EXECUTIONS_CACHE[cache_key] = t_execs
return t_execs
def find_task_executions_by_spec(wf_ex_id, task_spec):
return find_task_executions_by_name(wf_ex_id, task_spec.get_name())
def find_task_executions_by_specs(wf_ex_id, task_specs):
res = []
for t_s in task_specs:
res = res + find_task_executions_by_spec(wf_ex_id, t_s)
return res
def find_task_executions_with_state(wf_ex_id, state):
return db_api.get_task_executions(
workflow_execution_id=wf_ex_id,
state=state
)
def find_successful_task_executions(wf_ex_id):
return find_task_executions_with_state(wf_ex_id, states.SUCCESS)
def find_error_task_executions(wf_ex_id):
return find_task_executions_with_state(wf_ex_id, states.ERROR)
def find_cancelled_task_executions(wf_ex_id):
return find_task_executions_with_state(wf_ex_id, states.CANCELLED)
def find_completed_tasks(wf_ex_id):
return db_api.get_completed_task_executions(workflow_execution_id=wf_ex_id)
def clean_caches():
with _TASK_EXECUTIONS_CACHE_LOCK:
_TASK_EXECUTIONS_CACHE.clear()
|
StarcoderdataPython
|
6510711
|
import os
import inspect
import json
import pkgutil
from flask import request
import api
from api.rest import config
from api.rest.base import SecureResource, rest_resource
from storage.common.base import divide_dict
MODULES_PATH = 'api.rest.modules.'
__dict__ = {}
for importer, modname, ispkg in pkgutil.walk_packages(path=MODULES_PATH):
if MODULES_PATH in modname and ispkg:
module = __import__(modname)
__dict__[modname.replace(MODULES_PATH,'')] = module
__all__ = [k for k in __dict__]
#@cached
def get_modules():
return api.rest.modules.__all__
def get_module(name):
return api.rest.modules.__dict__[name]
#@cached
def get_config(module_name, typenames=None):
constructor = get_constructor(module_name)
config = constructor.config()
preserve_keys = ['id', 'init', 'categories']
if type(typenames) == str:
preserve_keys.append(typenames)
elif type(typenames) == list:
preserve_keys += typenames
cfg = {k: config[k] for k in preserve_keys if k in config}
return cfg
def get_constructor(module_name):
for name, obj in inspect.getmembers(get_module(module_name)):
if name in ['Alphabet']:
continue
if inspect.isclass(obj) and 'config' in obj.__dict__:
return obj
def get_type_modules(type_name):
for module in get_modules():
config = get_config(module, type_name)
if type_name in config:
yield module
def get_allowed_modules(allowed_types):
for allowed in allowed_types:
for module in get_type_modules(allowed):
yield module
def first(dict_):
return dict_[list(dict_.keys())[0]]
#@cached
def check_module(module_name, allowed_types):
return module_name in get_allowed_modules(allowed_types)
class Modules_(SecureResource):
""" /api/modules """
endpoints = ['/modules/']
allowed_types = ['cipher', 'eds']
#@cached
def get(self):
modules = [module for module in get_allowed_modules(self.allowed_types)]
configs = [get_config(module, self.allowed_types) for module in modules]
if (request.args.get('categorized', False, bool)):
allowed_categories = request.args['categories[]'] or request.args['categories'] or ['stream', 'block', 'transition']
categories = {}
for config in configs:
for category in config['categories']:
if category in allowed_categories:
if category not in categories:
categories[category] = []
categories[category].append({'id': config['id'], 'name': config['id']})
return [{'id': name, 'title': name, 'items': items} for name, items in categories.items()]
else:
return modules
class Module_(SecureResource):
""" /api/modules/module """
endpoints = ['/modules/<string:module>']
allowed_types = ['cipher', 'eds']
#@cached
def get(self, module):
if check_module(module, self.allowed_types):
return get_config(module, self.allowed_types)
else:
return {'error': 'Module {} not found!'.format(module)}, 404
def post(self, module):
if check_module(module, self.allowed_types):
config = get_config(module, self.allowed_types)
data = request.json.copy()
action, data = divide_dict(data, 'action')
if not action:
return {'error': 'Action not set!'}
actions, _ = divide_dict(config, self.allowed_types)
method = first(actions)
if not action in method:
return {'error': 'Wrong action {}!'.format(action)}
init = {}
if 'init' in config:
for item in config['init']:
param = item['name']
if param in data:
init[param] = data[param]
else:
return {'error': 'Missing init param {}!'.format(param)}
_, data = divide_dict(data, list(init.keys()))
opentext, params = divide_dict(data, 'content')
"""
if not opentext:
file = request.files['file']
file_bytes = file.read(config['MAX_FILE_SIZE'])
if bool(file.filename):
opentext = json.loads(file_bytes.decode('utf-8'))
"""
if not opentext:
return {'error': 'You should set input text or upload file!'}
constructor = get_constructor(module)
singletone = constructor(**init)
return getattr(singletone, action, None)(opentext, **dict(params))
return {'error': 'Module {} not found!'.format(module)}, 404
@rest_resource
class Modules(Modules_):
""" /api/modules """
endpoints = ['/modules','/modules/']
allowed_types = ['cipher', 'eds']
@rest_resource
class Module(Module_):
""" /api/modules/module """
endpoints = ['/modules/<string:module>']
allowed_types = ['cipher', 'eds']
@rest_resource
class Ciphers(Modules_):
""" /api/ciphers """
endpoints = ['/ciphers','/ciphers/']
allowed_types = ['cipher']
@rest_resource
class Cipher(Module_):
""" /api/ciphers/name """
endpoints = ['/ciphers/<string:module>']
allowed_types = ['cipher']
@rest_resource
class EDS(Modules_):
""" /api/eds """
endpoints = ['/eds','/eds/']
allowed_types = ['eds']
@rest_resource
class DS(Module_):
""" /api/eds/name """
endpoints = ['/eds/<string:module>']
allowed_types = ['eds']
|
StarcoderdataPython
|
1763228
|
'''initialize'''
from .nostalgicstyle import NostalgicstyleBeautifier
|
StarcoderdataPython
|
6686474
|
<filename>RecoParticleFlow/Configuration/python/RecoParticleFlow_cff.py
import FWCore.ParameterSet.Config as cms
from RecoParticleFlow.PFTracking.particleFlowTrack_cff import *
#from RecoParticleFlow.PFTracking.particleFlowTrackWithDisplacedVertex_cff import *
from RecoParticleFlow.PFProducer.particleFlowSimParticle_cff import *
from RecoParticleFlow.PFProducer.particleFlowBlock_cff import *
from RecoParticleFlow.PFProducer.particleFlowEGamma_cff import *
from RecoParticleFlow.PFProducer.particleFlow_cff import *
from RecoParticleFlow.PFProducer.pfElectronTranslator_cff import *
from RecoParticleFlow.PFProducer.pfPhotonTranslator_cff import *
#from RecoParticleFlow.PFProducer.pfGsfElectronCiCSelector_cff import *
from RecoParticleFlow.PFProducer.pfGsfElectronMVASelector_cff import *
from RecoParticleFlow.PFProducer.pfLinker_cff import *
from CommonTools.ParticleFlow.pfParticleSelection_cff import *
from RecoEgamma.EgammaIsolationAlgos.particleBasedIsoProducer_cff import *
from RecoParticleFlow.PFProducer.chargedHadronPFTrackIsolation_cfi import *
from RecoJets.JetProducers.fixedGridRhoProducerFastjet_cfi import *
fixedGridRhoFastjetAllTmp = fixedGridRhoFastjetAll.clone(pfCandidatesTag = cms.InputTag("particleFlowTmp"))
particleFlowTmpSeq = cms.Sequence(particleFlowTmp)
particleFlowReco = cms.Sequence( particleFlowTrackWithDisplacedVertex*
# pfGsfElectronCiCSelectionSequence*
pfGsfElectronMVASelectionSequence*
particleFlowBlock*
particleFlowEGammaFull*
particleFlowTmpSeq*
fixedGridRhoFastjetAllTmp*
particleFlowTmpPtrs*
particleFlowEGammaFinal*
pfParticleSelectionSequence )
particleFlowLinks = cms.Sequence( particleFlow*particleFlowPtrs*chargedHadronPFTrackIsolation*particleBasedIsolationSequence)
from RecoParticleFlow.PFTracking.hgcalTrackCollection_cfi import *
from RecoParticleFlow.PFProducer.simPFProducer_cfi import *
from SimTracker.TrackerHitAssociation.tpClusterProducer_cfi import *
from SimTracker.TrackAssociatorProducers.quickTrackAssociatorByHits_cfi import *
particleFlowTmpBarrel = particleFlowTmp.clone()
_phase2_hgcal_particleFlowTmp = cms.EDProducer(
"PFCandidateListMerger",
src = cms.VInputTag("particleFlowTmpBarrel",
"simPFProducer")
)
_phase2_hgcal_simPFSequence = cms.Sequence( pfTrack +
hgcalTrackCollection +
tpClusterProducer +
quickTrackAssociatorByHits +
simPFProducer )
_phase2_hgcal_particleFlowReco = cms.Sequence( _phase2_hgcal_simPFSequence * particleFlowReco.copy() )
_phase2_hgcal_particleFlowReco.replace( particleFlowTmpSeq, cms.Sequence( particleFlowTmpBarrel * particleFlowTmp ) )
from Configuration.Eras.Modifier_phase2_hgcal_cff import phase2_hgcal
phase2_hgcal.toReplaceWith( particleFlowTmp, _phase2_hgcal_particleFlowTmp )
phase2_hgcal.toReplaceWith( particleFlowReco, _phase2_hgcal_particleFlowReco )
from Configuration.Eras.Modifier_pp_on_XeXe_2017_cff import pp_on_XeXe_2017
from Configuration.Eras.Modifier_pp_on_AA_2018_cff import pp_on_AA_2018
for e in [pp_on_XeXe_2017, pp_on_AA_2018]:
e.toModify(particleFlowDisplacedVertexCandidate,
tracksSelectorParameters = dict(pt_min = 999999.0,
nChi2_max = 0.0,
pt_min_prim = 999999.0,
dxy = 999999.0)
)
e.toModify(particleFlowBlock, useNuclear = cms.bool(False))
e.toModify(pfNoPileUpIso, enable = cms.bool(False))
e.toModify(pfPileUpIso, enable = cms.bool(False))
e.toModify(pfNoPileUp, enable = cms.bool(False))
e.toModify(pfPileUp, enable = cms.bool(False))
|
StarcoderdataPython
|
11233184
|
<gh_stars>100-1000
from __future__ import division, absolute_import, print_function
import yaml
__all__ = [
'ConfigError', 'NotFoundError', 'ConfigValueError', 'ConfigTypeError',
'ConfigTemplateError', 'ConfigReadError']
YAML_TAB_PROBLEM = "found character '\\t' that cannot start any token"
# Exceptions.
class ConfigError(Exception):
"""Base class for exceptions raised when querying a configuration.
"""
class NotFoundError(ConfigError):
"""A requested value could not be found in the configuration trees.
"""
class ConfigValueError(ConfigError):
"""The value in the configuration is illegal."""
class ConfigTypeError(ConfigValueError):
"""The value in the configuration did not match the expected type.
"""
class ConfigTemplateError(ConfigError):
"""Base class for exceptions raised because of an invalid template.
"""
class ConfigReadError(ConfigError):
"""A configuration file could not be read."""
def __init__(self, filename, reason=None):
self.filename = filename
self.reason = reason
message = u'file {0} could not be read'.format(filename)
if (isinstance(reason, yaml.scanner.ScannerError)
and reason.problem == YAML_TAB_PROBLEM):
# Special-case error message for tab indentation in YAML markup.
message += u': found tab character at line {0}, column {1}'.format(
reason.problem_mark.line + 1,
reason.problem_mark.column + 1,
)
elif reason:
# Generic error message uses exception's message.
message += u': {0}'.format(reason)
super(ConfigReadError, self).__init__(message)
|
StarcoderdataPython
|
4803373
|
# Copyright 2016 datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Helpers for testing based on command output capture.
"""
import urllib
import hashlib
import os
import sys
import pexpect
class FilteredOutputFile(object):
@staticmethod
def python_threaded_exit_crash_filter(text):
lines = text.split("\n")
res = []
for line in lines:
if line.startswith("Exception in thread ") and "(most likely raised during interpreter shutdown)" in line:
break
res.append(line)
return "\n".join(res)
@staticmethod
def normalize_output(text):
"""Remove blank lines and ^C at the end; clean up all line endings"""
lines = text.splitlines() # Throw away end-of-line ^M characters
while lines and (not lines[-1].strip() or lines[-1].strip() == "^C"):
del lines[-1]
if lines[-1].strip().endswith("^C"):
lines[-1] = "".join(lines[-1].rsplit("^C", 1))
return "\n".join(lines) + "\n"
def __init__(self, filename, filters, trace=False):
self.file = open(filename, "wb", 0)
self.filters = filters
self.trace = trace
try:
len(self.filters)
except TypeError:
self.filters = [self.filters]
self.filters.append(FilteredOutputFile.python_threaded_exit_crash_filter) # XXX HACK FIXME etc.
self.filters.append(FilteredOutputFile.normalize_output)
self.captured = []
def get_data(self):
data = "".join(self.captured)
for filt in self.filters:
data = filt(data)
return data
def write(self, data):
if self.trace:
sys.stdout.write(data)
self.captured.append(data)
self.file.seek(0)
self.file.truncate()
return self.file.write(self.get_data())
def flush(self):
if self.trace:
sys.stdout.flush()
return self.file.flush()
class Captured(object):
def __init__(self, cwd, source_file, output_file, command, filters, timeout, trace=False, env=None):
self.cwd = cwd
self.source_file = source_file # name of command file or none
self.output_file = output_file # name of output file for this command
self.command = command # command that was run
self.filters = filters
self.timeout = timeout
self.trace = trace
if env is not None:
self.child_env = {}
self.child_env.update(os.environ)
for name in env:
if env[name] is None:
if name in self.child_env:
del self.child_env[name]
else:
self.child_env[name] = env[name]
else:
self.child_env = None
self.output = None # output for this command, i.e. contents of output_file
def spawn(self):
child = pexpect.spawn("/bin/bash", ["-c", self.command],
cwd=self.cwd, timeout=self.timeout, env=self.child_env)
child.logfile_read = FilteredOutputFile(self.output_file, self.filters, self.trace)
return child
def update_capture(self, child):
if child.isalive():
child.expect(pexpect.TIMEOUT, timeout=0)
self.output = child.logfile_read.get_data()
def finish_capture(self, child):
if child.isalive():
child.expect(pexpect.EOF, timeout=self.timeout)
self.output = child.logfile_read.get_data()
class BGProcess(object):
def __init__(self, cap):
self.cap = cap
self.child = cap.spawn()
def terminate(self):
if self.child.isalive():
self.child.sendintr()
self.child.expect([pexpect.EOF, pexpect.TIMEOUT], timeout=0)
return self.child.close(force=True)
def get_captured(self):
self.cap.update_capture(self.child)
return self.cap
def noop(self):
if self.child.isalive():
self.child.expect(pexpect.TIMEOUT, timeout=0)
def __enter__(self):
return self
def __exit__(self, ex_type, ex_value, ex_traceback):
self.terminate()
def filter_nocmp(filenames):
return [filename for filename in filenames if "-nocmp" not in filename]
class Session(object):
def __init__(self, name, cwd, output_dir):
self.name = name
self.cwd = cwd
self.output_dir = output_dir
self.counter = 0
self.bg_processes = []
def _get_output_name(self, command, nocmp):
content = urllib.quote_plus(command)
if len(content) > 64:
hash_obj = hashlib.sha256()
hash_obj.update(self.name + "-" + command)
content = hash_obj.digest().encode("hex")
suffix = "-nocmp" if nocmp else ""
return os.path.join(self.output_dir, "out-" + content + suffix + ".txt")
def call_noop(self):
for bg_process in self.bg_processes:
bg_process.noop()
def capture(self, command, nocmp=False, filters=None, timeout=90, trace=True, env=None):
"""
Run the command synchronously and capture the output. Return an
instance of Captured. Set option nocmp to True to tell the
test driver not to compare this file with expected output. Pass
filters to process the output before writing it to disk.
"""
if filters is None:
filters = []
cap = Captured(self.cwd, None, self._get_output_name(command, nocmp), command, filters, timeout, trace, env)
child = cap.spawn()
cap.finish_capture(child)
self.call_noop()
return cap
def capture_bg(self, command, nocmp=False, filters=None, timeout=90, trace=False, env=None):
"""
Run the command asynchronously, capturing the output. Return an
instance of BGProcess. Use nocmp and filters as with capture.
"""
if filters is None:
filters = []
cap = Captured(self.cwd, None, self._get_output_name(command, nocmp), command, filters, timeout, trace, env)
res = BGProcess(cap)
self.bg_processes.append(res)
self.call_noop()
return res
def capture_many(self, command_file):
"""
Read lines from a command file (essentially a shell script).
Run the command from each line synchronously by sending it to
bash and capture the output. Return a list of instances of
Captured. FIXME: Unimplemented.
"""
raise NotImplementedError()
def capture_many_bg(self, command_file):
"""
Not sure what this should do. Unimplemented.
"""
raise NotImplementedError()
|
StarcoderdataPython
|
4968126
|
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
import re
import sys
from .functions import *
import torch.fx
grayscale = torchvision.transforms.Grayscale(num_output_channels=1)
def convert_data_for_quaternion(batch):
"""
converts batches of RGB images in 4 channels for QNNs
"""
assert all(batch[i][0].size(0) == 3 for i in range(len(batch)))
inputs, labels = [], []
for i in range(len(batch)):
inputs.append(torch.cat([batch[i][0], grayscale(batch[i][0])], 0))
labels.append(batch[i][1])
return torch.stack(inputs), torch.LongTensor(labels)
# does not find an application yet
def apply_quaternion_gradient(model, layers):
"""
hooks real-valued gradients and transforms them into one for
quaternion gradient descent
@type model: nn.Module
"""
for n, ((_, layer), parameter) in enumerate(zip(model.named_children(), model.parameters())):
layer_name = re.match("^\w+", str(layer)).group()
if layer_name in layers and len(parameter.shape) > 1 and n != 1:
parameter.register_hook(to_conj)
return model
@torch.fx.wrap
def check_shapes(x):
if x.dim() in [3, 5]:
x = torch.cat([*x.chunk()], 2).squeeze()
return x
def convert_to_quaternion(Net, verbose=False, spinor=False):
"""
converts a real_valued initialized Network to a quaternion one
@type Net: nn.Module
@type verbose: bool
@type spinor: bool
"""
last_module = len([mod for mod in Net.children()])
layers = ["Linear", "Conv1d", "Conv2d", "Conv3d",
"ConvTranspose1d", "ConvTranspose2d", "ConvTranspose3d"]
for n, (name, layer) in enumerate(Net.named_children()):
layer_name = re.match("^\w+", str(layer)).group()
if n != last_module - 1:
if layer_name in layers[1:]:
params = re.findall("(?<!\w)\d+(?<=\w)", str(layer))
in_features, out_features, kernel_size, stride = \
int(params[0]), int(params[1]), (int(params[2]), int(params[3])), (int(params[4]), int(params[5]))
assert in_features % 4 == 0, "number of in_channels must be divisible by 4"
assert out_features % 4 == 0, "number of out_channels must be divisible by 4"
init_func = initialize_conv
args = (in_features // 4, out_features // 4, kernel_size)
elif layer_name == layers[0]:
params = re.findall("(?<==)\w+", str(layer))
in_features, out_features, bias = int(params[0]), int(params[1]), bool(params[2])
assert in_features % 4 == 0, "number of in_channels must be divisible by 4"
assert out_features % 4 == 0, "number of out_channels must be divisible by 4"
init_func = initialize_linear
args = (in_features // 4, out_features // 4)
else:
continue
quaternion_weight = init_func(*args)
if spinor:
weight = quaternion_weight._real_rot_repr
else:
weight = quaternion_weight._real_repr
getattr(Net, name).weight = nn.Parameter(weight)
if getattr(Net, name).bias != None:
getattr(Net, name).bias = nn.Parameter(torch.zeros(out_features))
traced = torch.fx.symbolic_trace(layer)
for node in traced.graph.nodes:
if node.op == 'placeholder':
with traced.graph.inserting_after(node):
new_node = traced.graph.call_function(
check_shapes, args=(node,))
if any(lay in node.name for lay in ["conv", "lin"]):
with traced.graph.inserting_before(node):
all_nodes = [node for node in traced.graph.nodes]
new_node = traced.graph.call_function(node.target,
(all_nodes[1], *node.args[1:]), node.kwargs)
node.replace_all_uses_with(new_node)
traced.graph.erase_node(node)
if node.op == 'output':
all_nodes = [node for node in traced.graph.nodes]
with traced.graph.inserting_before(node):
new_node = traced.graph.call_function(
Q, args=(node.prev,))
node.replace_all_uses_with(new_node)
traced.graph.erase_node(node)
with traced.graph.inserting_after(node):
new_node = traced.graph.output(node.prev, )
if verbose:
print("-" * 20, layer_name, "-" * 20, sep="\n")
print(torch.fx.GraphModule(layer, traced.graph))
traced.graph.lint()
setattr(Net, name, torch.fx.GraphModule(layer, traced.graph))
return Net
|
StarcoderdataPython
|
1636222
|
<reponame>36000/cnn_colorflow<gh_stars>0
import numpy as np
import sys
import os
from keras.models import load_model
sys.path.append("../utilities")
import constants
from data import get_train_test
from metrics import plot_n_roc_sic
datasets_c = ['h_qq_rot_charged', 'h_gg_rot_charged', 'cp_qq_rot_charged', 'qx_qg_rot_charged', 's8_gg_rot_charged', 'zp_qq_rot_charged']
datasets_s = ['h_qq', 'h_gg', 'cp_qq', 'qx_qg', 's8_gg', 'zp_qq']
def comp_all(i, datasets = datasets_s, n = 150000):
name = 'all_' + datasets[i] + '_comps'
X_tests = []
y_yests = []
models = []
model_types = []
labels = []
sig = datasets[i]
for j in range(6):
if j == i:
continue
bg = datasets[j]
constants.SIG_H5 = os.path.join(constants.DATA_DIR, sig + '.h5')
constants.BG_H5 = os.path.join(constants.DATA_DIR, bg + '.h5')
X_train, X_test, y_train, y_test, \
_, _, sig_metadata, \
bg_metadata, _ = get_train_test(n=n)
if os.path.isfile('../best_model/' + sig + '_vs_' + bg + '_model'):
model_name = sig + '_vs_' + bg
else:
model_name = bg + '_vs_' + sig
model = load_model('../best_model/' + model_name + '_model')
X_tests.append(X_test)
y_yests.append(y_test)
models.append(model)
model_types.append(True)
labels.append(model_name)
plot_n_roc_sic(name, 'final_curves/sic_'+name, X_tests, y_yests, models, model_types, labels, True, fontfac=0.5)
plot_n_roc_sic(name, 'final_curves/roc_'+name, X_tests, y_yests, models, model_types, labels, False, fontfac=0.5)
if __name__ == '__main__':
for i in range(len(datasets_s)):
comp_all(i)
|
StarcoderdataPython
|
1763626
|
<filename>grafeas/models/vulnerability_occurrences_summary_fixable_total_by_digest.py
# coding: utf-8
"""
grafeas.proto
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1beta1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class VulnerabilityOccurrencesSummaryFixableTotalByDigest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'resource': 'V1beta1Resource',
'severity': 'VulnerabilitySeverity',
'fixable_count': 'str',
'total_count': 'str'
}
attribute_map = {
'resource': 'resource',
'severity': 'severity',
'fixable_count': 'fixableCount',
'total_count': 'totalCount'
}
def __init__(self, resource=None, severity=None, fixable_count=None, total_count=None): # noqa: E501
"""VulnerabilityOccurrencesSummaryFixableTotalByDigest - a model defined in Swagger""" # noqa: E501
self._resource = None
self._severity = None
self._fixable_count = None
self._total_count = None
self.discriminator = None
if resource is not None:
self.resource = resource
if severity is not None:
self.severity = severity
if fixable_count is not None:
self.fixable_count = fixable_count
if total_count is not None:
self.total_count = total_count
@property
def resource(self):
"""Gets the resource of this VulnerabilityOccurrencesSummaryFixableTotalByDigest. # noqa: E501
The affected resource. # noqa: E501
:return: The resource of this VulnerabilityOccurrencesSummaryFixableTotalByDigest. # noqa: E501
:rtype: V1beta1Resource
"""
return self._resource
@resource.setter
def resource(self, resource):
"""Sets the resource of this VulnerabilityOccurrencesSummaryFixableTotalByDigest.
The affected resource. # noqa: E501
:param resource: The resource of this VulnerabilityOccurrencesSummaryFixableTotalByDigest. # noqa: E501
:type: V1beta1Resource
"""
self._resource = resource
@property
def severity(self):
"""Gets the severity of this VulnerabilityOccurrencesSummaryFixableTotalByDigest. # noqa: E501
The severity for this count. SEVERITY_UNSPECIFIED indicates total across all severities. # noqa: E501
:return: The severity of this VulnerabilityOccurrencesSummaryFixableTotalByDigest. # noqa: E501
:rtype: VulnerabilitySeverity
"""
return self._severity
@severity.setter
def severity(self, severity):
"""Sets the severity of this VulnerabilityOccurrencesSummaryFixableTotalByDigest.
The severity for this count. SEVERITY_UNSPECIFIED indicates total across all severities. # noqa: E501
:param severity: The severity of this VulnerabilityOccurrencesSummaryFixableTotalByDigest. # noqa: E501
:type: VulnerabilitySeverity
"""
self._severity = severity
@property
def fixable_count(self):
"""Gets the fixable_count of this VulnerabilityOccurrencesSummaryFixableTotalByDigest. # noqa: E501
The number of fixable vulnerabilities associated with this resource. # noqa: E501
:return: The fixable_count of this VulnerabilityOccurrencesSummaryFixableTotalByDigest. # noqa: E501
:rtype: str
"""
return self._fixable_count
@fixable_count.setter
def fixable_count(self, fixable_count):
"""Sets the fixable_count of this VulnerabilityOccurrencesSummaryFixableTotalByDigest.
The number of fixable vulnerabilities associated with this resource. # noqa: E501
:param fixable_count: The fixable_count of this VulnerabilityOccurrencesSummaryFixableTotalByDigest. # noqa: E501
:type: str
"""
self._fixable_count = fixable_count
@property
def total_count(self):
"""Gets the total_count of this VulnerabilityOccurrencesSummaryFixableTotalByDigest. # noqa: E501
The total number of vulnerabilities associated with this resource. # noqa: E501
:return: The total_count of this VulnerabilityOccurrencesSummaryFixableTotalByDigest. # noqa: E501
:rtype: str
"""
return self._total_count
@total_count.setter
def total_count(self, total_count):
"""Sets the total_count of this VulnerabilityOccurrencesSummaryFixableTotalByDigest.
The total number of vulnerabilities associated with this resource. # noqa: E501
:param total_count: The total_count of this VulnerabilityOccurrencesSummaryFixableTotalByDigest. # noqa: E501
:type: str
"""
self._total_count = total_count
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(VulnerabilityOccurrencesSummaryFixableTotalByDigest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, VulnerabilityOccurrencesSummaryFixableTotalByDigest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
StarcoderdataPython
|
3395391
|
<filename>python_lambda_logging/lambda_logging.py
"""Lambda logging decorator to standarize logging."""
import logging
def setup_lambda_logger():
r"""
A utility function for configuring python logging for use in lambda functions using the format.
%(levelname)s RequestId: %(aws_request_id)s\t%(message)s\n
"""
logger = logging.getLogger()
for handler in logger.handlers:
logformat = '%(levelname)s RequestId: %(aws_request_id)s\t%(message)s\n'
handler.setFormatter(logging.Formatter(logformat))
logger.setLevel(logging.INFO)
return logger
def logged_handler(logger):
"""
A decorator that wraps a lambda_handler.
This logs the function name, event, return value and any exception if one is raised.
"""
def decorator(function):
def wrapper(*args, **kwargs):
event = args[0]
context = args[1]
function_arn = 'arn:unknown'
function_ver = 'ver:unknown'
try:
if context and hasattr(context, 'invoked_function_arn'):
function_arn = context.invoked_function_arn
if context and hasattr(context, 'function_version'):
function_ver = context.function_version
except TypeError:
pass
logger.info("Function: %s - %s", function_arn, function_ver)
if event:
logger.info("Event: %s", str(event))
try:
result = function(*args, **kwargs)
logger.info("Return Value: %s", str(result))
return result
except Exception:
if context and hasattr(context, 'invoked_function_arn'):
logger.error("There was an unexpected exception raised in %s", context.invoked_function_arn)
else:
logger.error("There was an unexpected exception raised")
raise
return wrapper
return decorator
|
StarcoderdataPython
|
6484137
|
<gh_stars>1-10
import unittest
import utils
# Built-in string searching.
class Solution:
def rotateString(self, a, b):
"""
:type a: str
:type b: str
:rtype: bool
"""
if len(a) != len(b):
return False
a += a
# See CPython fast search
# https://github.com/python/cpython/blob/master/Objects/stringlib/fastsearch.h
return b in a
class Test(unittest.TestCase):
def test(self):
cases = utils.load_test_json(__file__).test_cases
for case in cases:
args = str(case.args)
actual = Solution().rotateString(**case.args.__dict__)
self.assertEqual(case.expected, actual, msg=args)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
5023270
|
<filename>cogs/todo.py
import discord
from discord.ext import commands
doob_logo = "https://cdn.discordapp.com/avatars/680606346952966177/ada47c5940b5cf8f7e12f61eefecc610.webp?size=1024"
class todo(commands.Cog):
def __init__(self, client):
self.client = client
# Gives the todo list from GitHub.
@commands.command(aliases=['board', 'whatsnext', 'update'])
async def todo(self, ctx):
embed = discord.Embed(title="Here's the link for what's up next for Doob.", description="The Todo list for Doob.", colour=discord.Color.blue())
embed.add_field(name="GitHub Issue Board", value="https://github.com/mmatt625/doob/projects/1")
embed.set_thumbnail(url=doob_logo)
await ctx.send(embed=embed)
def setup(client):
client.add_cog(todo(client))
|
StarcoderdataPython
|
6530126
|
'''
Wrapper interface for the VDB Athena backend.
All vdb operations use the following environment variable overrides:
VDB_DB: your VDB database name (default: vdb)
VDB_BUCKET: your VDB S3 endpoint (default: s3://spiral-vdb)
You may also optionally set AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, and
AWS_DEFAULT_REGION. This is not required if you are using vdb from an instance
deployed with a role with sufficient privileges.
'''
import gzip
import os
import re
import uuid
from datetime import datetime
from multiprocessing import Pool
from pathlib import Path
from time import sleep, time
from types import SimpleNamespace
import pandas as pd
import orjson as json
import boto3
from pyathena.connection import Connection
from pyathena.pandas.async_cursor import AsyncPandasCursor
from biograph.utils import timestamp, typed, plural, confirm, chunked
from biograph.vdb import create_table_sql
from biograph.vdb.cache import fetch_from_cache, get_table_mtime, update_table_mtime, clear_table_mtime
from biograph.vdb.filter import parser, ParseException
from biograph.tools.refhash import refhash
from biograph.tools.log import debug, log, error
class connect: # pylint: disable=too-many-lines
'''
Wrapper for the Athena VDB backend
'''
def __init__(
self,
database=None,
bucket=None,
aws_region=None,
aws_access_key_id=os.environ.get('AWS_ACCESS_KEY_ID', None),
aws_secret_access_key=os.environ.get('AWS_SECRET_ACCESS_KEY', None),
allow_db_create=True
):
''' Set up the vdb connection '''
self.aws_region = aws_region or os.environ.get('AWS_DEFAULT_REGION', 'us-west-2')
os.environ['AWS_DEFAULT_REGION'] = self.aws_region
self.s3 = boto3.resource('s3')
self.athena = boto3.client('athena')
self.database = self.get_database_name(database or os.environ.get('VDB_DB', 'main'))
self.bucket = bucket or os.environ.get('VDB_BUCKET', 'vdb-demo')
self.bucket = self.bucket.rstrip('/')
# root should never equal 'meta' or 'data' as it confuses AWS
self.path = SimpleNamespace(
# path names for VCF data, stored under self.bucket/self.path.vcf.root/
vcf=SimpleNamespace(
root=Path(f'{self.database}/vcf'), # top level local path
meta=Path(f'{self.database}/vcf/headers'), # VCF headers and metadata path
data=Path(f'{self.database}/vcf/variants'), # VCF variants path
files=Path(f'{self.database}/vcf/files'), # raw VCF files
),
# path names for study data, stored under self.bucket/self.path.study.root/
study=SimpleNamespace(
root=Path(f'{self.database}/study'), # top level local path
meta=Path(f'{self.database}/study/meta'), # study metadata
data=Path(f'{self.database}/study/variants'), # VCF variants path
merged=Path(f'{self.database}/study/merged'), # optional merged path
frozen=Path(f'{self.database}/study/_frozen'), # frozen flag, _ files are ignored
header=Path('_header'), # merged header file (relative to current study)
export=Path('_export'), # export prefix (relative to current study)
),
# path names for annotations and metadata, stored under self.bucket/self.path.annotation.root/
anno=SimpleNamespace(
root=Path(f'{self.database}/annotations'), # top level local path
meta=Path(f'{self.database}/annotations/anno_meta'), # annotation metadata path
data=Path(f'{self.database}/annotations/anno'), # actual annotations path
files=Path(f'{self.database}/annotations/files'), # raw VCF files
),
# cache paths, stored under self.bucket/self.path.results.root/
results=SimpleNamespace(
root=Path(f'{self.database}/results'), # Results root
stage=Path(f'{self.database}/results/stage'), # Athena query results stage
cache=Path(f'{self.database}/results/cache'), # VDB query cache
mtime=Path(f'{self.database}/results/mtime'), # VDB partition modified times
),
ready=Path(f'{self.database}/_ready') # VDB is ready to go flag
)
# Athena table names
self.table = SimpleNamespace(
vcf=SimpleNamespace(
meta='headers', # one global headers table
data='variants', # one global variants table
),
anno=SimpleNamespace(
meta='anno_meta', # one global annotations metadata table
data='anno', # one global annotations data table
),
study=SimpleNamespace(
meta='study_meta', # study metadata
data='study_variants', # study variants
merged='study_merged', # optional merged table
),
)
self.cursor = Connection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=self.aws_region,
schema_name=self.database,
s3_staging_dir=f"s3://{self.bucket}/{self.path.results.stage}/",
cursor_class=AsyncPandasCursor
).cursor(max_workers=10)
# Create Athena tables if needed.
if allow_db_create:
self.create_tables()
# __init__() complete
# input validation methods
@staticmethod
def validate_aid(aid):
'''
If aid is a valid UUID, return it lowercased.
If aid is invalid, raise SystemExit.
'''
try:
return str(uuid.UUID(aid)).lower()
except (RuntimeError, ValueError):
raise SystemExit(f"Malformed aid '{aid}'. Must be of the form: {uuid.uuid4()}")
@staticmethod
def validate_study_name(study_name):
''' Ensure a well-formed study name '''
if len(study_name) > 64:
raise SystemExit('Study names must be <= 64 characters')
if not re.match(r'^[a-zA-Z0-9_]+$', study_name):
raise SystemExit(f"Study names may only consist of alphanumerics or _: '{study_name}'")
if study_name in ('meta', 'data'):
raise SystemExit(f"'{study_name}' is not a valid study name.")
return study_name
@staticmethod
def validate_sample_name(sample_name):
''' Ensure a well-formed sample name '''
if len(sample_name) > 64:
raise SystemExit('Sample names must be <= 64 characters')
if not re.match(r'^[a-zA-Z0-9_-]+$', sample_name):
raise SystemExit(f"Sample names may only consist of alphanumerics, -, or _: '{sample_name}'")
return sample_name
@staticmethod
def get_database_name(db_name):
''' Ensure a well-formed database name. If valid, db_name is returned with vdb_ prepended to it. '''
# Don't accidentally stack vdb_s
if db_name.lower().startswith('vdb_'):
db_name = db_name[4:]
# The actual max db name length is 64 characters, but leave room for vdb_
if len(db_name) > 60:
raise SystemExit('Database names must be <= 60 characters')
if not re.match(r'^[a-z0-9_-]+$', db_name):
raise SystemExit(f"Database names may only consist of lowercase letters, numbers, - or _: '{db_name}'")
if db_name.endswith(('-', '_')):
raise SystemExit(f"Database name must not end with - or _: '{db_name}'")
return f"vdb_{db_name}"
@staticmethod
def validate_wildcard(wildcard):
''' Ensure a reasonable wildcard '''
if len(wildcard) > 64:
raise SystemExit('Wildcards must be <= 64 characters')
if not re.match(r'^[a-zA-Z0-9*_-]+$', wildcard):
raise SystemExit(f"Wildcards may only consist of alphanumerics, -, _, or *: '{wildcard}'")
return wildcard
@staticmethod
def validate_format_field(format_field):
''' Ensure a well-formed format field '''
if len(format_field) > 64:
raise SystemExit('FORMAT fields must be <= 64 characters')
if not re.match(r'^[a-zA-Z0-9]+$', format_field):
raise SystemExit(f"FORMAT fields may only consist of alphanumerics: '{format_field}'")
return format_field
def quoted_sample_list(self, items):
''' Return a SQL friendly string quoting every sample name in a sequence '''
return ','.join([f"'{self.validate_sample_name(i)}'" for i in items])
def quoted_format_list(self, items):
''' Return a SQL friendly string quoting every format field in a sequence '''
return ','.join([f"'{self.validate_format_field(i)}'" for i in items])
def quoted_aid_list(self, items):
''' Return a SQL friendly string quoting all aids in a sequence '''
return ','.join([f"'{self.validate_aid(i)}'" for i in items])
@staticmethod
def scalar(result):
'''
For db results that boil down to a single scalar, just return the single value
If no results, return None
'''
if result:
return result[0][0]
return None
def query(self, query, params=None, cache=True, block=True):
''' Execute query_with_id and strip the query_id '''
return self.query_with_id(query, params, cache, block)[1]
def query_with_id(self, query, params=None, cache=True, block=True):
'''
Execute a query and return a tuple of (query_id, all_rows).
If cache == True, attempt to fetch from the cache, falling back a direct query
If block == False, immediately return (query_id, future)
'''
if cache:
return fetch_from_cache(self, query, params=params, block=block)
try:
query_id, future = self.cursor.execute(query, params)
if block:
result = future.result()
if result.state != 'SUCCEEDED':
raise SystemExit(result.state_change_reason)
return query_id, result.fetchall()
# async error handling is up to the caller
return query_id, future
except Exception as e:
raise SystemExit(f"Query failed:\n{query}\n{e}")
@staticmethod
def collect_futures(futures):
'''
Collect all results and pending query IDs.
Removes completed jobs from futures and requeues throttled requests.
'''
results = []
pending = []
for i, f in enumerate(futures):
query_id, future = f
if future.done():
futures.pop(i)
result = future.result()
if result.state != 'SUCCEEDED':
if any(err in result.state_change_reason for err in ['ThrottlingException', 'SlowDown']):
sleep(1)
pending.append(query_id)
else:
raise RuntimeError(result.state_change_reason)
results.append(result.fetchall())
sleep(0.1)
else:
pending.append(query_id)
return (results, pending)
def athena_query_status(self, pending):
'''
Return the status of a list of Athena jobs as a dict of { 'STATE': [ list of query ids ] }
'''
ret = {'QUEUED': [], 'RUNNING': [], 'SUCCEEDED': [], 'FAILED': [], 'CANCELLED': []}
status = self.athena.batch_get_query_execution(QueryExecutionIds=pending)
if status and 'QueryExecutions' in status:
for job in status['QueryExecutions']:
ret[job['Status']['State']].append(job['QueryExecutionId'])
return ret
def parallel_query(self, query, iterate_param, const_params=None, cache=True, parallel=10, complain_time=30):
'''
Run up to parallel queries at a time.
iterate_param is a dict with one key pointing to a list of param values to substitute.
const_params is optional and are constant for every query.
No attempt is made to do combinatorial expansion of iterate_param; it should contain
only one key (eg. 'chrom') pointing to a list of values.
Raises SystemExit on any query failure. Returns all results and blocks until done.
If complain_time seconds pass and queries are still queued on Athena, log it.
'''
merged_params = []
for k in iterate_param:
for v in iterate_param[k]:
if const_params:
merged_params.append({k: v, **const_params})
else:
merged_params.append({k: v})
results = []
futures = []
last_log = datetime.now()
for params in merged_params:
try:
futures.append(self.query_with_id(query, params=params, cache=cache, block=False))
except Exception as e:
raise RuntimeError(f"Query failed:\n{query}\n{e}")
# Don't let too many futures accumulate
while len(futures) >= parallel:
new_results, pending = self.collect_futures(futures)
for r in new_results:
results.append(r)
if pending:
if (datetime.now() - last_log).total_seconds() > complain_time:
last_log = datetime.now()
queued = len(self.athena_query_status(pending)['QUEUED'])
if queued:
log(f"{queued} job{plural(queued)} queued / {len(pending) - queued} running / {len(results)} completed / {len(merged_params) - len(results)} to go")
sleep(1)
while futures:
new_results, pending = self.collect_futures(futures)
for r in new_results:
results.append(r)
if pending:
if (datetime.now() - last_log).total_seconds() > complain_time:
last_log = datetime.now()
queued = len(self.athena_query_status(pending)['QUEUED'])
if queued:
log(f"{queued} job{plural(queued)} queued / {len(pending) - queued} running / {len(results)} completed / {len(merged_params) - len(results)} to go")
sleep(1)
return results
def parallel_queries(self, queries, cache=True, parallel=10, complain_time=30):
'''
Run up to parallel queries at a time. No parameters are allowed.
This is useful for running several precomposed queries in parallel.
Raises SystemExit on any query failure. Returns all results and blocks until done.
If complain_time seconds pass and queries are still queued on Athena, log it.
'''
results = []
futures = []
last_log = datetime.now()
for query in queries:
try:
futures.append(self.query_with_id(query, cache=cache, block=False))
except Exception as e:
raise RuntimeError(f"Query failed:\n{query}\n{e}")
# Don't let too many futures accumulate
while len(futures) >= parallel:
new_results, pending = self.collect_futures(futures)
for r in new_results:
results.append(r)
if pending:
if (datetime.now() - last_log).total_seconds() > complain_time:
last_log = datetime.now()
queued = len(self.athena_query_status(pending)['QUEUED'])
if queued:
log(f"{queued} job{plural(queued)} queued / {len(pending) - queued} running / {len(results)} completed / {len(queries) - len(results)} to go")
sleep(1)
while futures:
new_results, pending = self.collect_futures(futures)
for r in new_results:
results.append(r)
if pending:
if (datetime.now() - last_log).total_seconds() > complain_time:
last_log = datetime.now()
queued = len(self.athena_query_status(pending)['QUEUED'])
if queued:
log(f"{queued} job{plural(queued)} queued / {len(pending) - queued} running / {len(results)} completed / {len(queries) - len(results)} to go")
sleep(1)
return results
def parallel_query_template(self, query_template, iterate_param, const_params=None, cache=True, parallel=10, complain_time=30):
'''
Run up to parallel queries at a time using a template.
iterate_param is a dict with a single k pointing to a list of values
to substitute and inject into the query_template using string
substitution. This is necessary for IN (list...) constructs where
simple parameter substition can't be used.
const_params is optional and are constant for every query.
No attempt is made to do combinatorial expansion of iterate_param; it should contain
only one key (eg. 'chrom') pointing to a list of values.
Raises SystemExit on any query failure. Returns all results and blocks until done.
If complain_time seconds pass and queries are still queued on Athena, log it.
'''
queries = []
for k in iterate_param:
for v in iterate_param[k]:
if isinstance(v, str):
queries.append(query_template.replace(f"%({k})s", v))
elif isinstance(v, float):
queries.append(query_template.replace(f"%({k})f", v))
elif isinstance(v, int):
queries.append(query_template.replace(f"%({k})d", v))
else:
raise SystemExit(f"parallel_query_template: Unknown type: {v}")
results = []
futures = []
last_log = datetime.now()
for query in queries:
try:
futures.append(self.query_with_id(query, params=const_params, cache=cache, block=False))
except Exception as e:
raise RuntimeError(f"Query failed:\n{query}\n{e}")
# Don't let too many futures accumulate
while len(futures) >= parallel:
new_results, pending = self.collect_futures(futures)
for r in new_results:
results.append(r)
if pending:
if (datetime.now() - last_log).total_seconds() > complain_time:
last_log = datetime.now()
queued = len(self.athena_query_status(pending)['QUEUED'])
if queued:
log(f"{queued} job{plural(queued)} queued / {len(pending) - queued} running / {len(results)} completed / {len(queries) - len(results)} to go")
sleep(1)
while futures:
new_results, pending = self.collect_futures(futures)
for r in new_results:
results.append(r)
if pending:
if (datetime.now() - last_log).total_seconds() > complain_time:
last_log = datetime.now()
queued = len(self.athena_query_status(pending)['QUEUED'])
if queued:
log(f"{queued} job{plural(queued)} queued / {len(pending) - queued} running / {len(results)} completed / {len(queries) - len(results)} to go")
sleep(1)
return results
def query_pandas(self, query, params=None, cache=True, block=True):
''' Execute query_pandas_with_id and strip the query_id '''
return self.query_pandas_with_id(query, params, cache, block)[1]
def query_pandas_with_id(self, query, params=None, cache=True, block=True):
'''
Execute a query and return the entire result as a pandas dataframe.
Note: this bypasses the database fetch and uses the CSV result directly,
which is faster for larger query results that still fit in memory, but
introduces the overhead of converting to pandas.
Fetch from the cache if cache == True.
'''
if cache:
return fetch_from_cache(self, query, params=params, output='pandas', block=block)
try:
query_id, future = self.cursor.execute(query, params)
if not block:
return query_id, future
return query_id, future.result().as_pandas()
except Exception as e:
raise SystemExit(f"Query failed:\n{query}\n{e}")
def query_fetch_csv(self, query, out_csv, params=None, cache=True, block=True):
''' Execute query_fetch_csv and strip the query_id '''
return self.query_fetch_csv_with_id(query, out_csv, params, cache, block)[1]
def query_fetch_csv_with_id(self, query, out_csv, params=None, cache=True, block=True):
'''
Execute a query and save the results to csv.
Fetch from the cache if cache == True.
'''
if cache:
debug(f"cache == {cache}, fetch_from_cache({query}, {params}, {out_csv}, {block})")
return fetch_from_cache(self, query, params=params, output=out_csv, block=block)
try:
debug(f"cursor.execute({query}, {params}")
query_id, future = self.cursor.execute(query, params)
if not block:
return query_id, future
future.result()
self.fetch_result_to_csv(query_id, out_csv)
except Exception as e:
raise SystemExit(f"Query failed:\n{query}\n{e}")
return query_id, out_csv
def fetch_result_to_csv(self, query_id, out_csv):
''' Fetch Athena CSV results to a file '''
debug(f"{self.path.results.stage}/{query_id}.csv to {out_csv}")
return query_id, self.download_file(f"{self.path.results.stage}/{query_id}.csv", out_csv)
def upload_to_s3(self, local_file, dest_path):
''' Upload a single file to S3 '''
self.s3.meta.client.upload_file(
Filename=str(local_file),
Bucket=self.bucket,
Key=str(dest_path)
)
@staticmethod
def upload_to_s3_parallel(args):
''' Helper to allow parallel upload via Pool.map() '''
boto3.resource('s3').meta.client.upload_file(
Filename=args['local_file'],
Bucket=args['bucket'],
Key=args['dest_path']
)
@staticmethod
def s3_cp_parallel(args):
''' Helper to allow parallel copy via Pool.map() '''
boto3.resource('s3').meta.client.copy(
{'Bucket': args['bucket'], 'Key': args['src_path']},
args['bucket'],
args['dest_path']
)
def sync_to_s3(self, local_path, dest_path=None, parallel=True):
''' Recursively copy the contents of a local path to S3 '''
if not dest_path:
dest_path = self.path.vcf.root
skip = len(str(local_path)) + 1
queue = []
for (path, _, files) in os.walk(local_path):
for f in files:
queue.append({
"local_file": str(Path(path) / f),
"dest_path": str(Path(dest_path) / path[skip:] / f),
"bucket": self.bucket
})
if parallel:
with Pool() as p:
p.map(connect.upload_to_s3_parallel, queue)
else:
for t in queue:
self.upload_to_s3(t["local_file"], t["dest_path"])
def download_s3_path(self, s3_path, out_path):
'''
Download an arbitrary s3_path from any bucket. Returns the local
filename saved under out_path.
'''
parts = Path(s3_path).parts
out_file = str(Path(out_path) / parts[-1])
self.s3.meta.client.download_file(
Bucket=parts[1],
Key='/'.join(parts[2:]),
Filename=out_file
)
return out_file
def download_file(self, prefix, out, full_path=False):
'''
Download an arbitrary prefix from the current s3 bucket to out.
If out is a directory, save to the original filename in that location.
If full_path is True, save the whole prefix locally, creating directories as needed.
'''
prefix = Path(prefix)
out_path = Path(out)
if out_path.is_dir():
if full_path:
out = out_path / prefix
out.parent.mkdir(parents=True, exist_ok=True)
else:
out = out_path / prefix.parts[-1]
debug(f"{prefix} -> {out}")
self.s3.meta.client.download_file(
Bucket=self.bucket,
Key=str(prefix),
Filename=str(out)
)
return out
def download_fileobj(self, prefix, out):
''' Download an arbitrary prefix from s3 to a filehandle '''
self.s3.meta.client.download_fileobj(
Bucket=self.bucket,
Key=prefix,
Fileobj=out
)
return out
def download_gz_fh(self, prefix):
''' Download an arbitrary prefix from s3 and return an open filehandle, gzip on the fly '''
return gzip.GzipFile(fileobj=self.s3.Object(self.bucket, prefix).get()["Body"])
def download_aid(self, aid, out, dest_path, full_path=False):
''' Download an aid from s3 to local '''
obj = self.ls(str(dest_path), f"aid={aid}")
if obj:
for f in obj:
self.download_file(f, str(out), full_path)
else:
raise SystemExit(f"Could not find aid {aid}")
def ls(self, prefix, filt=None):
''' List everything at a prefix with an optional filter '''
objects = []
for obj in self.s3.Bucket(self.bucket).objects.filter(Prefix=str(prefix)):
if filt is None or str(filt) in obj.key:
objects.append(obj.key)
return objects
def s3_path_exists(self, prefix):
''' Returns True if any object with the given prefix exists, otherwise False. '''
return bool(list(self.s3.Bucket(self.bucket).objects.filter(Prefix=prefix).limit(1)))
def s3_rm_recursive(self, prefix, filt=None):
''' Remove all objects matching the prefix '''
prefix = str(prefix)
if not prefix or len(prefix) < 3:
raise SystemExit(f'Refusing s3_rm_recursive() without a proper prefix ({prefix})')
if filt:
count = 0
for obj in self.s3.Bucket(self.bucket).objects.filter(Prefix=str(prefix)):
if filt in obj.key:
obj.delete()
count += 1
return count
# This method is much faster when the exact prefix is known
resp = self.s3.Bucket(self.bucket).objects.filter(Prefix=str(prefix)).delete()
if resp:
return len(resp[0]['Deleted'])
return 0
def s3_cp_recursive(self, src_prefix, dest_prefix):
''' Recursively copy all objects from src_prefix to dest_prefix '''
count = 0
for obj in self.ls(src_prefix):
count += 1
self.s3.meta.client.copy(
{'Bucket': self.bucket, 'Key': obj},
self.bucket,
f"{dest_prefix.rstrip('/')}/{obj[len(src_prefix):]}"
)
return count
def s3_cp_aid(self, aid, dest_prefix):
'''
Copy an aid to a new prefix. Similar to s3_cp_recursive() but drops the build=.../ partition.
vdb_rob/vcf/variants/sample_name=VDB004/build=GRCh37/aid=xxx/yyy.parquet
-> vdb_rob/vcf/variants/sample_name=VDB004/aid=xxx/yyy.parquet
'''
count = 0
queue = []
for obj in self.ls(self.path.vcf.data, f"/aid={aid}"):
count += 1
dest_obj = Path(obj)
src_len = len(self.path.vcf.data.parts)
queue.append(
{
'bucket': self.bucket,
'src_path': obj,
'dest_path': f"{dest_prefix.rstrip('/')}/{'/'.join(dest_obj.parts[src_len:src_len+1] + dest_obj.parts[src_len+2:])}"
}
)
with Pool() as p:
p.map(connect.s3_cp_parallel, queue)
return count
def delete_aid(self, aids, aid_type="vcf"):
''' Delete an aid and drop relevant database partitions '''
if aid_type == "vcf":
s3_path = self.path.vcf.root
tables = (self.table.vcf.meta, self.table.vcf.data)
elif aid_type == "anno":
s3_path = self.path.anno.root
tables = (self.table.anno.meta, self.table.anno.data)
else:
raise SystemExit(f"Unknown aid_type: {aid_type}")
if isinstance(aids, str):
aids = [aids]
for aid in aids:
self.validate_aid(aid)
log(f"Deleting {aid_type} data")
for aid in aids:
if not self.s3_rm_recursive(s3_path, f"/aid={aid}"):
if len(aids) == 1:
raise SystemExit(f"No aid found in {aid_type}: {aid}")
# If bulk deleting, just complain
log(f"No such aid: {aid}")
continue
debug(f"Dropping partitions")
for table in tables:
update_table_mtime(self, table)
self.parallel_query(
f"ALTER TABLE `{table}` DROP IF EXISTS PARTITION (aid=%(aid)s);",
iterate_param={'aid': aids}
)
@staticmethod
def get_study_path(study_name, base):
''' Return the correct s3 path for a given study and base path '''
return str(base / Path(f'study_name={study_name}'))
def create_tables(self):
''' Create all necessary VDB tables '''
db_exists = self.database in [d[0] for d in self.query("SHOW DATABASES;")]
if db_exists:
if self.ls(self.path.ready):
return
if not confirm(f"VDB database {self.database} does not exist in AWS region {self.aws_region}. Create it? n/Y: ", default=True):
raise SystemExit("Aborted.")
log(f"Initializing new VDB '{self.database}' at s3://{self.bucket}/{self.database}/")
self.query(f"CREATE DATABASE IF NOT EXISTS `{self.database}`;")
for table, s3path in (
(self.table.vcf.data, self.path.vcf.data),
(self.table.vcf.meta, self.path.vcf.meta),
(self.table.study.data, self.path.study.data),
(self.table.study.meta, self.path.study.meta),
(self.table.study.merged, self.path.study.merged),
(self.table.anno.data, self.path.anno.data),
(self.table.anno.meta, self.path.anno.meta)
):
update_table_mtime(self, table)
self.query(
create_table_sql(table),
params={
"location": f"s3://{self.bucket}/{s3path}/"
}
)
self.s3.Object(self.bucket, str(self.path.ready)).put(
Body=json.dumps({"tables_created_on": timestamp()})
)
def get_annotation_query(self, study_name, anno):
'''
Return a query suffix for annotations, or None if no annotation is requested.
TODO: expand this to include anno version and optional aid
'''
if not anno:
return None
aid = self.query(
f"""
SELECT am.aid
FROM {self.table.anno.meta} AS am, {self.table.study.meta} AS sm
WHERE
am.anno_name = %(anno)s
AND sm.key = 'build'
AND am.build = sm.value
AND study_name = %(study_name)s
;
""",
params={"study_name": study_name, "anno": anno}
)
if not aid:
raise SystemExit(f"There is no annotation named {anno} with a matching reference build.")
if len(aid) > 1:
raise SystemExit(f"There are multiple matching annotations for {anno}. Please specify a version or aid.")
if anno == "Ensembl":
return f"a.aid = '{self.scalar(aid)}' AND a.feature = 'gene'"
return f"a.aid = '{self.scalar(aid)}'"
def get_current_study_checkpoint(self, study_name):
''' Get the most recent checkpoint for this study '''
self.assert_study_exists(study_name)
checkpoint = self.scalar(self.query(
f"SELECT max(checkpoint) FROM {self.table.study.data} WHERE study_name = %(study_name)s ;",
params={"study_name": study_name}
))
if pd.isna(checkpoint):
return 0
return checkpoint
def get_study_chroms(self, study_name, checkpoint):
''' Fetch all chroms in a study at a given checkpoint '''
return [chrom[0] for chrom in self.query(
f"""
SELECT DISTINCT(chrom)
FROM {self.table.study.data}
WHERE study_name = %(study_name)s
AND checkpoint = %(checkpoint)d
ORDER BY chrom
;
""",
params={"study_name": study_name, "checkpoint": checkpoint}
)]
@staticmethod
def get_merge_partition(study_name, checkpoint):
''' Return the partition to be used for merged variants '''
return f"study_name={study_name}/checkpoint={checkpoint}"
def merge_study(self, study_name, force_merge=False, anno_name=None, checkpoint=None, square_off=None, format_fields=None): # pylint: disable=too-many-statements
''' Merge all samples in this study at the given checkpoint '''
self.assert_study_is_unfrozen(study_name)
if checkpoint is None:
checkpoint = self.get_current_study_checkpoint(study_name)
elif checkpoint > self.get_current_study_checkpoint(study_name):
raise SystemExit(f"Requested checkpoint {checkpoint} does not exist in study {study_name}.")
count = self.scalar(
self.query(
f"SELECT count(*) FROM {self.table.study.data} WHERE study_name = %(study_name)s AND checkpoint = %(checkpoint)d ;",
params={"study_name": study_name, "checkpoint": checkpoint}
)
)
if not count:
raise SystemExit(f"No variants found in study {study_name}")
if format_fields:
# unique fields only
format_fields = set(format_fields)
# GT is mandatory
format_fields.add('GT')
partition = self.get_merge_partition(study_name, checkpoint)
if square_off:
if format_fields:
samples = f"""
transform_values(
map(array['{square_off}'], array[element_at(m.samples, '{square_off}')]),
(k, v) -> map_filter(v, (k, v) -> k in ({self.quoted_format_list(format_fields)}) )
)
"""
else:
samples = f"map(array['{square_off}'], array[element_at(m.samples, '{square_off}')])"
export_location = f"{self.path.study.merged}/{self.path.study.export}/{partition}/_sample_name={square_off}/"
else:
if format_fields:
samples = f"transform_values(m.samples, (k, v) -> map_filter(v, (k, v) -> k in ({self.quoted_format_list(format_fields)})))"
else:
samples = "m.samples"
export_location = f"{self.path.study.merged}/{self.path.study.export}/{partition}/_sample_name=ALL_SAMPLES/"
# annotations are exported separately
annotation_query = self.get_annotation_query(study_name, anno_name)
if annotation_query:
export_location = f"{export_location}_anno={anno_name}/"
# Only need to merge once for each checkpoint, since they can't be changed.
if force_merge or not self.s3_path_exists(f"{self.path.study.merged}/{partition}"):
self.s3_rm_recursive(f"{self.path.study.merged}/{partition}/")
self.s3_rm_recursive(export_location, partition)
log(f"Merging variants for checkpoint {checkpoint}")
self.merge_study_variants(study_name, checkpoint)
header_path = f"{self.path.study.merged}/{self.path.study.export}/{partition}/{self.path.study.header}"
if force_merge or not self.s3_path_exists(header_path):
log("Merging headers")
self.merge_study_headers(study_name, checkpoint, header_path)
# Reuse the existing export if possible
if self.s3_path_exists(export_location):
log("No updates since the previous export, reusing existing merge.")
return (header_path, export_location)
chroms = self.get_study_chroms(study_name, checkpoint)
debug(chroms)
# Create a temporary TSV table
table_id = f"{study_name}_merge_{str(hex(int(time()*10000000))[8:])}"
self.query(f"DROP TABLE IF EXISTS {table_id};")
self.query(
f"""
CREATE EXTERNAL TABLE {table_id} (
`pos` bigint,
`varid` string,
`ref` string,
`alt` string,
`qual` float,
`filt` string,
`info` string,
`samples` string
)
PARTITIONED BY (`chrom` STRING)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY '\\t'
ESCAPED BY '\\\\'
LINES TERMINATED BY '\\n'
NULL DEFINED AS ''
LOCATION %(location)s
;
""",
params={
"location": f"s3://{self.bucket}/{export_location}",
}
)
if annotation_query:
log(f"Annotating variants with {anno_name}")
# partition on chrom, samples are JSON
self.parallel_query(
f"""
INSERT INTO "{table_id}"
(chrom, pos, varid, ref, alt, qual, filt, info, samples)
WITH annospan AS
(
SELECT anno.*, spanpos
FROM anno
CROSS JOIN unnest(sequence(pos/100, varend/100)) span(spanpos)
WHERE pos <= 999999999
AND varend >= 0
AND chrom = %(chrom)s
)
SELECT
m.chrom AS chrom
,m.pos + 1 AS pos
,array_join(array_agg(a.varid),';') AS varid
,m.ref AS ref
,m.alt AS alt
,arbitrary(m.qual) AS qual
,arbitrary(array_join(m.filters, ';')) AS filt
,arbitrary(array_join(zip_with(map_keys(m.infos), map_values(m.infos), (v1, v2) -> concat(v1, '=', v2)), ';')) AS info
,arbitrary(json_format(cast({samples} AS JSON))) AS samples
FROM
{self.table.study.merged} as m
LEFT JOIN annospan AS a
ON {annotation_query}
AND m.chrom = a.chrom
AND m.pos >= a.pos
AND m.varend <= a.varend
AND m.pos / 100 = a.spanpos
WHERE
m.chrom = %(chrom)s
AND study_name = %(study_name)s
AND checkpoint = %(checkpoint)d
AND m.pos BETWEEN 0 AND 999999999
GROUP BY
m.chrom, m.pos, m.ref, m.alt
;
""",
iterate_param={"chrom": chroms},
const_params={"study_name": study_name, "checkpoint": checkpoint},
)
else:
# partition on chrom, samples are JSON
log(f"Writing variants")
self.parallel_query(
f"""
INSERT INTO "{table_id}"
(chrom, pos, ref, alt, qual, filt, info, samples)
SELECT
m.chrom,
m.pos + 1,
m.ref,
m.alt,
m.qual,
array_join(m.filters, ';'),
array_join(zip_with(map_keys(m.infos), map_values(m.infos), (v1, v2) -> concat(v1, '=', v2)), ';'),
json_format(CAST({samples} AS json))
FROM
{self.table.study.merged} m
WHERE
chrom = %(chrom)s
AND study_name = %(study_name)s
AND checkpoint = %(checkpoint)d
;
""",
iterate_param={"chrom": chroms},
const_params={"study_name": study_name, "checkpoint": checkpoint}
)
# The table only exists to generate the TSV, so drop it.
self.query(f"DROP TABLE IF EXISTS {table_id};")
return (header_path, export_location)
def merge_study_headers(self, study_name, checkpoint, header_path):
'''
Merge VCF headers in the given study and save to header_path.
Returns the merged header up to (but not including) the sample column names.
'''
study_headers = self.query(
f"""
SELECT refhash, header FROM {self.table.vcf.meta} WHERE aid IN (
SELECT DISTINCT aid
FROM {self.table.study.data}
WHERE study_name = %(study_name)s
AND checkpoint = %(checkpoint)d
)
;
""",
params={
"study_name": study_name, "checkpoint": checkpoint
}
)
contigs = []
for line in study_headers[0][1].splitlines():
if line.startswith('##contig='):
contigs.append(line)
headers = []
the_date = datetime.today()
headers.append(f'##fileformat=VCFv4.1')
headers.append(f'##fileDate={the_date.year}{the_date.month:02}{the_date.day:02}')
headers.append(f'''##source="Spiral Genetics VDB",description="biograph vdb study export {study_name} --checkpoint {checkpoint}"''')
study_meta = self.get_metadata_from_study(study_name)
for chkpt in [c for c in sorted(study_meta) if c.startswith('checkpoint ')]:
_, rev = chkpt.split()
if int(rev) > checkpoint:
continue
headers.append(f'''##checkpoint="{chkpt}: {study_meta[chkpt]}"''')
# contigs must appear in the original order
for contig in contigs:
headers.append(contig)
# Merge all INFO, FORMAT, FILTER, and ALT lines
old_headers = set()
for header in study_headers:
for line in header[1].splitlines():
if line.startswith(('##source=', '##contig=', '##fileDate=', '##fileformat=', '#CHROM')):
continue
old_headers.add(line)
# Add in computed fields
old_headers.add('##INFO=<ID=N_MISS,Number=1,Type=Integer,Description="Number of samples missing this variant">')
old_headers.add('##INFO=<ID=F_MISS,Number=1,Type=Float,Description="Fraction of samples missing this variant">')
for line in sorted(old_headers):
headers.append(line)
headers.append(
f'#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t'
)
header = '\n'.join(headers)
self.s3.Object(self.bucket, header_path).put(
Body=header
)
return header
def merge_study_variants(self, study_name, checkpoint):
''' Merge variants in a study. This only needs to be done once per checkpoint. '''
partition = self.get_merge_partition(study_name, checkpoint)
self.s3_rm_recursive(f"{self.table.study.merged}/{partition}")
update_table_mtime(self, self.table.study.merged, partition=partition)
self.query(f"ALTER TABLE {self.table.study.merged} DROP IF EXISTS PARTITION (study_name='{study_name}', checkpoint={checkpoint});")
study_variants_mtime = get_table_mtime(self, self.table.study.data, auto_update=True, partition=partition)
# INFO['N_MISS'] == number of individuals missing this variant
# INFO['F_MISS'] == N_MISS / samples_in_study
samples_in_study = self.scalar(self.query(
f"""
SELECT COUNT(DISTINCT sample_name)
FROM {self.table.study.data}
WHERE study_name = %(study_name)s
AND checkpoint = %(checkpoint)d
;
""",
params={"study_name": study_name, "checkpoint": checkpoint}
))
chroms = self.get_study_chroms(study_name, checkpoint)
update_table_mtime(self, self.table.study.merged, partition=partition, ts=study_variants_mtime)
self.query(f"""ALTER TABLE {self.table.study.merged} ADD IF NOT EXISTS PARTITION (study_name='{study_name}', checkpoint={checkpoint});""")
# QUAL is the maximum value for any sample
# FILT is a sorted array of distinct filter entries from all samples
# INFO is an aggregate of all unique info map entries from all samples
# with the proper NS count and N_MISS + F_MISS added
# FORMAT does not exist; it is later derived from SAMPLES
# SAMPLES is a map of sample names to all sample fields
#
# Counting NS as count(DISTINCT(sample_name)) instead of count(sample)
# is necessary since multiple calls at the same site in the same
# individual would be counted multiple times, but subsequently collapsed
# into a single sample entry.
the_query = f"""
INSERT INTO "{self.table.study.merged}"
(spans, reflen, chrom, pos, varend, varid, ref, alt, qual, filters, infos, samples, study_name, checkpoint)
SELECT
arbitrary(spans),
arbitrary(reflen),
arbitrary(chrom),
arbitrary(pos),
arbitrary(varend),
arbitrary(varid),
arbitrary(ref),
arbitrary(alt),
max(qual),
array_sort(array_distinct(array_agg(filt))),
map_concat(
map_union(map_filter(info, (k, v) -> k != 'NS')),
map(ARRAY['NS'], ARRAY[CAST(count(DISTINCT(sample_name)) AS VARCHAR)]),
map(
ARRAY['N_MISS', 'F_MISS'],
ARRAY[
CAST({samples_in_study} - count(DISTINCT(sample_name)) AS VARCHAR),
CAST(round(({samples_in_study} - count(DISTINCT(sample_name))) / {samples_in_study}.0, 5) AS VARCHAR)
]
)
),
map_agg(sample_name, sample),
%(study_name)s,
%(checkpoint)d
FROM
{self.table.study.data}
WHERE
chrom = %(chrom)s
AND study_name = %(study_name)s
AND checkpoint = %(checkpoint)d
GROUP BY
chrom, pos, ref, alt
"""
# Run the chroms in parallel
self.parallel_query(
the_query,
iterate_param={"chrom": chroms},
const_params={"study_name": study_name, "checkpoint": checkpoint},
)
update_table_mtime(self, self.table.study.merged, partition=partition, ts=study_variants_mtime)
def study_freeze(self, study_name):
''' Freeze a study '''
self.assert_study_exists(study_name)
self.s3.Object(self.bucket, f"{self.get_study_path(study_name, self.path.study.meta)}/{self.path.study.frozen}").put(
Body=json.dumps({"frozen_on": timestamp()})
)
def study_unfreeze(self, study_name):
''' Unfreeze a study '''
self.assert_study_exists(study_name)
self.s3.Object(self.bucket, f"{self.get_study_path(study_name, self.path.study.meta)}/{self.path.study.frozen}").delete()
def study_is_frozen(self, study_name):
''' Check if a study is frozen '''
self.assert_study_exists(study_name)
return self.s3_path_exists(f"{self.get_study_path(study_name, self.path.study.meta)}/{self.path.study.frozen}")
def assert_study_is_unfrozen(self, study_name):
''' Raise if the current study is frozen '''
if self.study_is_frozen(study_name):
raise SystemExit(f"The study '{study_name}' is frozen and cannot be altered.")
def study_exists(self, study_name):
''' Returns True if study exists, otherwise False. '''
return self.s3_path_exists(f"{self.get_study_path(self.validate_study_name(study_name), self.path.study.meta)}/")
def assert_study_exists(self, study_name, must_exist=True):
''' Check if a study exists. If must_exist is True or False, raise if the expectation is not met. '''
exists = self.study_exists(study_name)
if must_exist == exists:
return exists
if must_exist:
raise SystemExit(f"No such study '{study_name}'.")
raise SystemExit(f"Study '{study_name}' already exists.")
def create_study(self, study_name):
''' Returns True if study exists, otherwise False. '''
self.assert_study_exists(study_name, must_exist=False)
update_table_mtime(self, self.table.study.meta)
self.add_metadata_to_study(study_name, 'created_on', timestamp(), 'timestamp')
def add_metadata_to_study(self, study_name, key, value, dtype='str'):
''' Create a new tiny tsv for this metadata entry '''
partition = f"study_name={study_name}"
update_table_mtime(self, self.table.study.meta, partition=partition)
self.s3.Object(self.bucket, f"{self.get_study_path(study_name, self.path.study.meta)}/{key}").put(
Body=f'{key}\t{value}\t{dtype}\n'
)
self.query(f"ALTER TABLE `{self.table.study.meta}` ADD IF NOT EXISTS PARTITION (study_name='{study_name}');")
def remove_metadata_from_study(self, study_name, key):
''' Delete a metadata entry '''
partition = f"study_name={study_name}"
update_table_mtime(self, self.table.study.meta, partition=partition)
self.s3.Object(self.bucket, f"{self.get_study_path(study_name, self.path.study.meta)}/{key}").delete()
def get_metadata_from_study(self, study_name, key=None):
'''
Get all metadata from a study as a dict.
If key is specified, return only the value for that key.
'''
if key:
debug(f"query {study_name} for {key}")
row = self.query(
f"""
SELECT value, dtype
FROM {self.table.study.meta}
WHERE study_name = %(study_name)s
AND key = %(key)s
;
""",
params={"study_name": study_name, "key": key},
)
if not row:
debug(f"{key} not found")
return {}
value, dtype = row[0]
debug(f"{typed(dtype, value)}")
return typed(dtype, value)
ret = {}
debug(f"query {study_name}")
for row in self.query(
f"""
SELECT key, value, dtype
FROM {self.table.study.meta}
WHERE study_name = %(study_name)s
;
""",
params={"study_name": study_name},
):
key, value, dtype = row
ret[key] = typed(dtype, value)
debug(ret)
return ret
def get_study_sample_names(self, study_name, checkpoint=None):
'''
Return a sorted list of all sample names from the latest checkpoint for this study
'''
if checkpoint is None:
checkpoint = self.get_current_study_checkpoint(study_name)
return [
sample_name[0] for sample_name in self.query(
f"""
SELECT DISTINCT sample_name
FROM {self.table.study.data}
WHERE study_name = %(study_name)s
AND checkpoint = %(checkpoint)d
ORDER BY sample_name ASC
;
""",
params={"study_name": study_name, "checkpoint": checkpoint}
)
]
def delete_study(self, study_name, checkpoint=None):
'''
Remove all objects under the given study prefix and drop all associated tables.
If checkpoint is given, only delete the given checkpoint.
Returns the number of objects deleted from S3.
'''
self.assert_study_is_unfrozen(study_name)
if checkpoint:
partition = f"study_name={study_name}/checkpoint={checkpoint}"
if not (self.get_metadata_from_study(study_name, f"checkpoint {checkpoint}") or self.s3_path_exists(f"{self.path.study.data}/{partition}")):
raise SystemExit(f"No such checkpoint {checkpoint} for study {study_name}")
log(f"Removing S3 data for '{study_name}' at checkpoint {checkpoint}")
else:
partition = f"study_name={study_name}"
log(f"Removing S3 data for '{study_name}'")
# NOTE: trailing / is important here so we don't delete studies with the same name prefix
count = self.s3_rm_recursive(self.path.study.root, f"/{partition}/")
# drop the tables
if checkpoint:
log(f"Dropping SQL partitions for '{study_name}' at checkpoint {checkpoint}")
for table in (self.table.study.meta, self.table.study.data, self.table.study.merged):
update_table_mtime(self, table)
update_table_mtime(self, table, partition=f"study_name={study_name}")
clear_table_mtime(self, table, partition=partition)
for table in (self.table.study.data, self.table.study.merged):
self.query(
f"ALTER TABLE `{table}` DROP IF EXISTS PARTITION (study_name=%(study_name)s, checkpoint=%(checkpoint)d);",
params={"study_name": study_name, "checkpoint": checkpoint}
)
self.remove_metadata_from_study(study_name, f"checkpoint {checkpoint}")
else:
log(f"Dropping SQL partitions for '{study_name}'")
for table in (self.table.study.meta, self.table.study.data, self.table.study.merged):
update_table_mtime(self, table)
update_table_mtime(self, table, partition=partition)
self.query(f"ALTER TABLE `{table}` DROP IF EXISTS PARTITION (study_name='{study_name}');")
return count
def find_all_matching_vcf_aids(self, items):
'''
Turn a list of potentially redundant aids, sample names, and wildcard
globs into a sorted list of (sample_name, [aids]) tuples for matching VCFs.
'''
debug(items)
if not isinstance(items, list):
items = [items]
aids = set()
samples = set()
wildcards = set()
for item in items:
try:
aids.add(self.validate_aid(item))
except (SystemExit, ValueError):
if '*' in item:
wildcards.add(self.validate_wildcard(item))
else:
samples.add(self.validate_sample_name(item))
if samples:
for aid in self.query(f"SELECT aid FROM {self.table.vcf.meta} WHERE sample_name IN ({self.quoted_sample_list(samples)}) ;"):
aids.add(aid[0])
if wildcards:
reply = self.parallel_query(
f"SELECT aid FROM {self.table.vcf.meta} WHERE sample_name LIKE %(wildcard)s ;",
iterate_param={"wildcard": [wc.replace('*', '%') for wc in wildcards]}
)
for r in reply:
for aid in r:
if not aid:
continue
aids.add(aid[0])
debug("samples:", samples)
debug("wildcards:", wildcards)
debug("aids:", aids)
if not aids:
return None
return self.query(
f"""
SELECT sample_name, cast(array_agg(DISTINCT(aid)) AS JSON)
FROM {self.table.vcf.meta}
WHERE aid IN ({self.quoted_aid_list(aids)})
GROUP BY sample_name
ORDER BY sample_name ASC
;
""")
def find_all_matching_study_aids(self, study_name, checkpoint, items):
'''
Turn a list of potentially redundant aids, sample names, and wildcard
globs into a sorted list of (sample_name, [aids]) tuples for matching
study variants.
'''
debug(items)
if not isinstance(items, list):
items = [items]
aids = set()
samples = set()
wildcards = set()
for item in items:
try:
aids.add(self.validate_aid(item))
except (SystemExit, ValueError):
if '*' in item:
wildcards.add(self.validate_wildcard(item))
else:
samples.add(self.validate_sample_name(item))
if samples:
for aid in self.query(
f"""
SELECT DISTINCT(aid)
FROM {self.table.study.data}
WHERE study_name = %(study_name)s
AND checkpoint = %(checkpoint)d
AND sample_name IN ({self.quoted_sample_list(samples)})
;
""",
params={"study_name": study_name, "checkpoint": checkpoint}):
aids.add(aid[0])
if wildcards:
reply = self.parallel_query(
f"""
SELECT DISTINCT(aid)
FROM {self.table.study.data}
WHERE study_name = %(study_name)s
AND checkpoint = %(checkpoint)d
AND sample_name LIKE %(wildcard)s ;
""",
iterate_param={"wildcard": [wc.replace('*', '%') for wc in wildcards]},
const_params={"study_name": study_name, "checkpoint": checkpoint}
)
for r in reply:
for aid in r:
if not aid:
continue
aids.add(aid[0])
debug("samples:", samples)
debug("wildcards:", wildcards)
debug("aids:", aids)
if not aids:
return None
return self.query(
f"""
SELECT sample_name, cast(array_agg(DISTINCT(aid)) AS JSON)
FROM {self.table.study.data}
WHERE study_name = %(study_name)s
AND aid IN ({self.quoted_aid_list(aids)})
GROUP BY sample_name
ORDER BY sample_name ASC
;
""",
params={"study_name": study_name})
def check_matching_refnames(self, aids):
'''
Ensure that all aids use the same refname.
Returns the refname, or raises if more than one is found.
'''
reply = self.query(
f"SELECT aid, refname FROM {self.table.vcf.meta} WHERE aid IN ({self.quoted_aid_list(aids)});",
)
# [(f6f8fee8-c488-4154-9286-988c124498e2, grch38), da5279e9-9ccc-4a40-bf3b-40fad12b98c1, hs37d5)]
if len({r[1] for r in reply}) > 1:
log(f"All variants in a study must be called against the same reference:")
for ref in reply:
log(f" {ref[0]} -> {ref[1]}")
raise SystemExit("\nAborted.")
return reply[0][1]
def add_to_study(self, study_name, items):
'''
Add VCF variants to a study
'''
self.assert_study_is_unfrozen(study_name)
samples = self.find_all_matching_vcf_aids(items)
if not samples:
raise SystemExit(f"No matching VCFs found.")
debug(samples)
# [('HG002', ['6bc81054-d0e6-4b2a-92d1-82dd2950a33d']),
# ('HG003',
# ['cabceac9-932f-42aa-8043-7a93dacc13bf',
# '9649df68-5b62-4993-8e67-89de88694606']),
# ('HG004', ['59890231-b506-4098-acb0-c51bf270536d'])]
#
# -> {
# '6bc81054-d0e6-4b2a-92d1-82dd2950a33d': 'HG002',
# 'cabceac9-932f-42aa-8043-7a93dacc13bf': 'HG003',
# '9649df68-5b62-4993-8e67-89de88694606': 'HG003',
# '59890231-b506-4098-acb0-c51bf270536d': 'HG004'
# }
aids = {aid:sample for sample, aids in samples for aid in aids}
debug(aids)
log(f"Matching VCFs:")
for sample in samples:
log(f" {sample[0]}: {', '.join(sample[1])}")
log("")
vcf_ref = self.check_matching_refnames(aids.keys())
vcf_build = refhash(lookup=vcf_ref).build()
study_ref = self.get_metadata_from_study(study_name, 'refname')
if not study_ref:
debug(f"{study_name} has no ref, set to {vcf_ref}")
self.add_metadata_to_study(study_name, 'refname', vcf_ref)
self.add_metadata_to_study(study_name, 'build', vcf_build)
elif study_ref != vcf_ref:
raise SystemExit(f"Study {study_name} uses reference {study_ref}, but the specified VCFs use {vcf_ref}.")
checkpoint = self.get_current_study_checkpoint(study_name)
in_study = {a[0] for a in self.query(
f"""
SELECT DISTINCT aid
FROM {self.table.study.data}
WHERE study_name = %(study_name)s
AND checkpoint = {checkpoint}
;
""",
params={"study_name": study_name}
)}
if set(aids.keys()).intersection(in_study):
log(f"The following VCFs are already in this study at checkpoint {checkpoint} and will be skipped:")
for aid in set(aids.keys()).intersection(in_study):
log(f" {aids[aid]}: {aid}")
aids.pop(aid)
log("")
if not aids:
raise SystemExit("Nothing left to import.")
count = self.scalar(
self.query(
f"SELECT count(*) FROM {self.table.vcf.data} WHERE aid IN ({self.quoted_aid_list(aids.keys())});"
)
)
if not count:
raise SystemExit(f"No variants found.")
log(f"Adding {count:,} variants from {len(aids)} VCF{plural(len(aids))} to study {study_name}")
new_checkpoint = checkpoint + 1
update_table_mtime(self, self.table.study.data, partition=f"study_name={study_name}")
# carry previous rev forward
# TODO: make this a parallel query
if checkpoint:
self.query(
f"""
INSERT INTO "{self.table.study.data}"
(spans, reflen, chrom, pos, varend, varid, ref, alt, qual, filt, info, sample, study_name, checkpoint, sample_name, aid)
SELECT
spans,
reflen,
chrom,
pos,
varend,
varid,
ref,
alt,
qual,
filt,
info,
sample,
%(study_name)s,
%(new_checkpoint)d,
sample_name,
aid
FROM {self.table.study.data}
WHERE study_name = %(study_name)s
AND checkpoint = %(old_checkpoint)d
;
""",
params={"study_name": study_name, "old_checkpoint": checkpoint, "new_checkpoint": new_checkpoint}
)
# Create partitions pointing to the variants. This saves the time and cost of making a copy.
queries = []
parts = []
for aid, sample_name in aids.items():
parts.append(
f"""(study_name='{study_name}', checkpoint={new_checkpoint}, sample_name='{sample_name}', aid='{aid}') LOCATION 's3://{self.bucket}/{self.path.vcf.data}/sample_name={sample_name}/build={vcf_build}/aid={aid}/'""")
# Keep the query to a reasonable size
if len(parts) > 100:
queries.append(
f"""ALTER TABLE {self.table.study.data} ADD IF NOT EXISTS PARTITION {' PARTITION '.join(parts)};""")
parts = []
if parts:
queries.append(
f"""ALTER TABLE {self.table.study.data} ADD IF NOT EXISTS PARTITION {' PARTITION '.join(parts)};""")
parts = []
# Add partitions in parallel
self.parallel_queries(queries)
# Since we no longer copy data, create a placeholder for the path on s3
self.s3.Object(self.bucket, f"{self.get_study_path(study_name, self.path.study.data)}/checkpoint={new_checkpoint}/_study_add").put(
Body=json.dumps(aids)
)
# Add metadata
self.add_metadata_to_study(study_name, f"checkpoint {new_checkpoint}", f"added {'; '.join([f'{v}: {k}' for k, v in aids.items()])}")
def copy_from_study(self, src_study, src_checkpoint, dest_study, items):
'''
Copy variants from one study to another
'''
self.assert_study_is_unfrozen(dest_study)
self.assert_study_exists(src_study)
if src_checkpoint:
if src_checkpoint > self.get_current_study_checkpoint(src_study):
raise SystemExit(f"No such checkpoint {src_checkpoint} for study {src_study}")
else:
src_checkpoint = self.get_current_study_checkpoint(src_study)
dest_ref = self.get_metadata_from_study(dest_study, 'refname')
src_ref = self.get_metadata_from_study(src_study, 'refname')
if not all([src_checkpoint, src_ref]):
raise SystemExit(f"Study {src_study} has no variants.")
if not dest_ref:
debug(f"{dest_study} has no ref, set to {src_ref}")
self.add_metadata_to_study(dest_study, 'refname', src_ref)
self.add_metadata_to_study(dest_study, 'build', refhash(lookup=src_ref).build())
if dest_ref and src_ref != dest_ref:
raise SystemExit(f"Studies use different references ({src_study}:{src_ref} vs. {dest_study}:{dest_ref})")
samples = self.find_all_matching_study_aids(src_study, src_checkpoint, items)
if not samples:
raise SystemExit(f"No matching VCFs found.")
debug(samples)
# [('HG002', ['6bc81054-d0e6-4b2a-92d1-82dd2950a33d']),
# ('HG003',
# ['cabceac9-932f-42aa-8043-7a93dacc13bf',
# '9649df68-5b62-4993-8e67-89de88694606']),
# ('HG004', ['59890231-b506-4098-acb0-c51bf270536d'])]
#
# -> {
# '6bc81054-d0e6-4b2a-92d1-82dd2950a33d': 'HG002',
# 'cabceac9-932f-42aa-8043-7a93dacc13bf': 'HG003',
# '9649df68-5b62-4993-8e67-89de88694606': 'HG003',
# '59890231-b506-4098-acb0-c51bf270536d': 'HG004'
# }
aids = {aid:sample for sample, aids in samples for aid in aids}
debug(aids)
log(f"Matching variants from {src_study}:{src_checkpoint}")
for sample in samples:
log(f" {sample[0]}: {', '.join(sample[1])}")
log("")
dest_checkpoint = self.get_current_study_checkpoint(dest_study)
if dest_checkpoint:
in_study = {a[0] for a in self.query(
f"""
SELECT DISTINCT aid
FROM {self.table.study.data}
WHERE study_name = %(study_name)s
AND checkpoint = {dest_checkpoint}
;
""",
params={"study_name": dest_study}
)}
if set(aids.keys()).intersection(in_study):
log(f"The following VCFs are already in this study at checkpoint {dest_checkpoint} and will be skipped:")
for aid in set(aids.keys()).intersection(in_study):
log(f" {aids[aid]}: {aid}")
aids.pop(aid)
log("")
if not aids:
raise SystemExit("Nothing left to import.")
count = self.scalar(
self.query(
f"""
SELECT count(*)
FROM {self.table.study.data}
WHERE study_name = %(study_name)s
AND checkpoint = %(checkpoint)d
AND aid IN ({self.quoted_aid_list(aids.keys())})
;
""",
params={"study_name": src_study, "checkpoint": src_checkpoint}
)
)
if not count:
raise SystemExit(f"No variants found.")
log(f"Adding {count:,} variants from {len(aids)} VCF{plural(len(aids))} to study {dest_study}")
new_checkpoint = dest_checkpoint + 1
update_table_mtime(self, self.table.study.data, partition=f"study_name={dest_study}")
# carry previous rev forward
# TODO: make this a parallel query
self.query(
f"""
INSERT INTO "{self.table.study.data}"
(spans, reflen, chrom, pos, varend, varid, ref, alt, qual, filt, info, sample, study_name, checkpoint, sample_name, aid)
SELECT
spans,
reflen,
chrom,
pos,
varend,
varid,
ref,
alt,
qual,
filt,
info,
sample,
%(dest_study)s,
%(new_checkpoint)d,
sample_name,
aid
FROM {self.table.study.data}
WHERE study_name = %(dest_study)s
AND checkpoint = %(old_checkpoint)d
;
""",
params={"dest_study": dest_study, "old_checkpoint": dest_checkpoint, "new_checkpoint": new_checkpoint}
)
# insert new variants
self.parallel_query(
f"""
INSERT INTO "{self.table.study.data}"
(spans, reflen, chrom, pos, varend, varid, ref, alt, qual, filt, info, sample, study_name, checkpoint, sample_name, aid)
SELECT
spans,
reflen,
chrom,
pos,
varend,
varid,
ref,
alt,
qual,
filt,
info,
sample,
%(dest_study)s,
%(new_checkpoint)d,
sample_name,
aid
FROM {self.table.study.data}
WHERE study_name = %(src_study)s
AND checkpoint = %(src_checkpoint)d
AND aid = %(aid)s
;
""",
iterate_param={"aid": aids.keys()},
const_params={"src_study": src_study, "src_checkpoint": src_checkpoint, "dest_study": dest_study, "new_checkpoint": new_checkpoint}
)
self.add_metadata_to_study(dest_study, f"checkpoint {new_checkpoint}", f"added {'; '.join([f'{v}: {k}' for k, v in aids.items()])} from study {src_study} checkpoint {src_checkpoint}")
def sample_missingness(self, study_name, the_filter, checkpoint):
'''
Return a filter clause for sample missingness
'''
reply = self.query(
f"""
WITH uv AS (
SELECT count(*) AS total FROM {self.table.study.merged}
WHERE study_name = %(study_name)s
AND checkpoint = %(checkpoint)d
)
SELECT sample_name from uv, (
SELECT sample_name, COUNT(*) as ct
FROM {self.table.study.data}
WHERE study_name = %(study_name)s
AND checkpoint = %(checkpoint)d
GROUP BY sample_name
ORDER BY sample_name ASC
)
WHERE 1 - (ct / cast(total AS double)) {the_filter[len('S_MISS'):]}
;
""",
params={"study_name": study_name, "checkpoint": checkpoint}
)
if not reply:
raise SystemExit("That filter would eliminate all samples, aborting.")
return f"sample_name IN ({self.quoted_sample_list([s[0] for s in reply])})"
def filter_study(self, study_name, the_filter, exclude=False): # pylint: disable=too-many-statements
'''
Create a new study checkpoint after applying a filter.
If exclude is True, exclude variants that match the_filter.
If exclude is False, include variants that match the_filter.
'''
self.assert_study_is_unfrozen(study_name)
current_checkpoint = self.get_current_study_checkpoint(study_name)
new_checkpoint = current_checkpoint + 1
missing = False
try:
# Missingness filters are more restricted. They can only be applied
# one at a time, with a simple comparison like F_MISS > 0.2
if '_MISS' in the_filter.upper():
missing = True
# validate the filter first
filter_clause = parser(the_filter, parser_type='missingness')
# missingness requires merge
self.merge_study(study_name, checkpoint=current_checkpoint)
if 'S_MISS' in filter_clause:
prefix = ""
postfix = ""
filter_term = f'{"NOT" if exclude else ""} ( {self.sample_missingness(study_name, filter_clause, current_checkpoint)} )'
else:
# NOTE: the inversed exclude is intentional here, since we're in an EXCEPT clause.
prefix = f"""
WITH uv AS (
SELECT chrom, pos, ref, alt FROM {self.table.study.data}
WHERE study_name = '{study_name}'
AND checkpoint = {current_checkpoint}
EXCEPT
SELECT chrom, pos, ref, alt FROM {self.table.study.merged}
WHERE study_name = '{study_name}'
AND checkpoint = {current_checkpoint}
AND {"" if exclude else "NOT"} ( {filter_clause} )
)
"""
postfix = f"""
RIGHT JOIN uv ON
uv.chrom = sv.chrom
AND uv.pos = sv.pos
AND uv.ref = sv.ref
AND uv.alt = sv.alt
"""
# All other conditions should be True for the INSERT INTO
filter_term = '1=1'
else:
prefix = ""
postfix = ""
filter_term = f'{"NOT" if exclude else ""} ( {parser(the_filter)} )'
log("Applying filter")
update_table_mtime(self, self.table.study.data, partition=f"study_name={study_name}")
# Create partitions
potential_aids = self.query(
f"""
SELECT aid, sample_name
FROM {self.table.study.data} sv
WHERE study_name = %(study_name)s
AND checkpoint = {current_checkpoint}
GROUP BY aid, sample_name
;
""",
params={"study_name": study_name}
)
queries = []
parts = []
for (aid, sample_name) in potential_aids:
parts.append(
f"""(study_name='{study_name}', checkpoint={new_checkpoint}, sample_name='{sample_name}', aid='{aid}')""")
# Keep the query to a reasonable size
if len(parts) > 100:
queries.append(
f"""ALTER TABLE {self.table.study.data} ADD IF NOT EXISTS PARTITION {' PARTITION '.join(parts)};""")
parts = []
if parts:
queries.append(
f"""ALTER TABLE {self.table.study.data} ADD IF NOT EXISTS PARTITION {' PARTITION '.join(parts)};""")
parts = []
# Add partitions in parallel
self.parallel_queries(queries)
# Athena has a 100 writer limit, but leave some headroom and chunk INSERTs by aid
queries = []
for chunk in chunked([a[0] for a in potential_aids], 95):
queries.append(f"""
INSERT INTO "{self.table.study.data}"
(spans, reflen, chrom, pos, varend, varid, ref, alt, qual, filt, info, sample, study_name, checkpoint, sample_name, aid)
{prefix}
SELECT
spans,
reflen,
sv.chrom,
sv.pos,
varend,
varid,
sv.ref,
sv.alt,
qual,
filt,
info,
sample,
'{study_name}',
{new_checkpoint},
sample_name,
aid
FROM {self.table.study.data} sv
{postfix}
WHERE study_name = '{study_name}'
AND checkpoint = {current_checkpoint}
AND aid IN ({self.quoted_aid_list(chunk)})
AND {filter_term}
;
""")
self.parallel_queries(queries)
self.add_metadata_to_study(study_name, f"checkpoint {new_checkpoint}", f"{'exclude' if exclude else 'include'} {'missingness ' if missing else ''}{the_filter}")
except ParseException as err:
error(f"Could not parse{'missingness' if missing else ''} filter:\n")
error(err.syntax)
raise SystemExit(err.msg)
update_table_mtime(self, self.table.study.data, partition=f"study_name={study_name}")
reply = self.query(
f"""
SELECT checkpoint, count(*), array_sort(array_agg(DISTINCT(sample_name)))
FROM {self.table.study.data}
WHERE study_name = %(study_name)s
and checkpoint >= {current_checkpoint}
GROUP BY checkpoint
ORDER BY checkpoint
;
""",
params={"study_name": study_name}
)
if len(reply) == 1:
log(f"This filter removed all variants from the study. Rolling back to previous checkpoint.")
self.delete_study(study_name, new_checkpoint)
elif reply[0][1] == reply[1][1]:
log(f"Study {study_name} variants: no change ({reply[0][1]})")
else:
log(f"Study {study_name}:")
log(f" variants: {reply[0][1]} -> {reply[1][1]}")
if reply[0][2] != reply[1][2]:
log(f" samples: {reply[0][2]} -> {reply[1][2]}")
def add_vcf_partitions(self, sample_name, build, aid):
''' Add new partitions to the vcf tables '''
for table in (self.table.vcf.meta, self.table.vcf.data):
update_table_mtime(self, table)
self.query(f"""ALTER TABLE {table} ADD IF NOT EXISTS PARTITION (sample_name='{sample_name}', build='{build}', aid='{aid}');""")
def add_anno_partitions(self, build, anno_name, version, aid):
''' Add new partitions to the anno tables '''
for table in (self.table.anno.meta, self.table.anno.data):
update_table_mtime(self, table)
self.query(f"""ALTER TABLE {table} ADD IF NOT EXISTS PARTITION (build='{build}', anno_name='{anno_name}', version='{version}', aid='{aid}');""")
def get_anno_variant_count(self, aid):
''' Return the count of variants for the annotation with the given aid '''
return self.scalar(self.query(
f"""SELECT count(*) FROM {self.table.anno.data} WHERE aid = %(aid)s""",
params={"aid": aid}
))
def get_vcf_variant_count(self, aid):
''' Return the count of variants with the given aid '''
return self.scalar(self.query(
f"""SELECT count(*) FROM {self.table.vcf.data} WHERE aid = %(aid)s""",
params={"aid": aid}
))
|
StarcoderdataPython
|
8095674
|
<reponame>rafarbop/Python<gh_stars>0
# Desafio 44 Curso em Video Python
# By Rafabr
import os,time,sys
from estrutura_modelo import cabecalho,rodape
cabecalho(44,"Valor de Produto com Diversas Meios de Pagamentos")
try:
valor_normal = float(input("Informe o preço normal do produto(Em R$ - Ex.: 20,44) : ").replace(',','.'))
meio_pag = int(input('''Informe qual o meio de pagamento:
1 - À vista em Dinheiro ou PIX - Desconto de 10%
2 - À vista no Cartão de Crédito ou Débito - Desconto de 5%
3 - Cartão de Crédito em 2 parcelas - Sem Desconto
4 - Cartão de Crédito em 3 ou mais parcelas - Juros de 20%
: '''))
print()
except ValueError:
print('Voçe não digitou valores válidos!\nUse virgulas para casa decimais!')
time.sleep(0.5)
sys.exit()
if valor_normal<0:
print('Voçe digitou valores negativos!')
time.sleep(0.5)
sys.exit()
if meio_pag not in [1,2,3,4]:
print('Voçe digitou uma condição de pagamento inválida!')
time.sleep(0.5)
sys.exit()
if meio_pag == 1:
print(f'Com o meio de pagamento informado voçe terá um Desconto de 10%\n\nO valor do Produto será R$ {(valor_normal*0.9):.2f}'.replace('.',','))
elif meio_pag == 2:
print(f'Com o meio de pagamento informado voçe terá um Desconto de 5%\n\nO valor do Produto será R$ {(valor_normal*0.95):.2f}'.replace('.',','))
elif meio_pag == 3:
print(f'Com o meio de pagamento informado voçe pagará o valor normal do Produto\n\nO valor do Produto será R$ {valor_normal:.2f}'.replace('.',','))
elif meio_pag == 4:
print(f'Com o meio de pagamento informado voçe pagará Juros de 20%\n\nO valor do Produto será R$ {(valor_normal*1.2):.2f}'.replace('.',','))
rodape()
|
StarcoderdataPython
|
5127354
|
<filename>expression/extra/result/__init__.py<gh_stars>100-1000
from .catch import catch
from .pipeline import pipeline
from .traversable import sequence, traverse
__all__ = ["catch", "sequence", "traverse", "pipeline"]
|
StarcoderdataPython
|
12856216
|
<reponame>moyogo/spacy<filename>spacy/tests/website/test_home.py
from __future__ import unicode_literals
import pytest
import spacy
import os
try:
xrange
except NameError:
xrange = range
@pytest.fixture()
def token(doc):
return doc[0]
@pytest.mark.models
def test_load_resources_and_process_text():
from spacy.en import English
nlp = English()
doc = nlp(u'Hello, world. Here are two sentences.')
@pytest.mark.models
def test_get_tokens_and_sentences(doc):
token = doc[0]
sentence = next(doc.sents)
assert token is sentence[0]
assert sentence.text == 'Hello, world.'
@pytest.mark.models
def test_use_integer_ids_for_any_strings(nlp, token):
hello_id = nlp.vocab.strings['Hello']
hello_str = nlp.vocab.strings[hello_id]
assert token.orth == hello_id == 3125
assert token.orth_ == hello_str == 'Hello'
def test_get_and_set_string_views_and_flags(nlp, token):
assert token.shape_ == 'Xxxxx'
for lexeme in nlp.vocab:
if lexeme.is_alpha:
lexeme.shape_ = 'W'
elif lexeme.is_digit:
lexeme.shape_ = 'D'
elif lexeme.is_punct:
lexeme.shape_ = 'P'
else:
lexeme.shape_ = 'M'
assert token.shape_ == 'W'
def test_export_to_numpy_arrays(nlp, doc):
from spacy.attrs import ORTH, LIKE_URL, IS_OOV
attr_ids = [ORTH, LIKE_URL, IS_OOV]
doc_array = doc.to_array(attr_ids)
assert doc_array.shape == (len(doc), len(attr_ids))
assert doc[0].orth == doc_array[0, 0]
assert doc[1].orth == doc_array[1, 0]
assert doc[0].like_url == doc_array[0, 1]
assert list(doc_array[:, 1]) == [t.like_url for t in doc]
@pytest.mark.models
def test_word_vectors(nlp):
doc = nlp("Apples and oranges are similar. Boots and hippos aren't.")
apples = doc[0]
oranges = doc[2]
boots = doc[6]
hippos = doc[8]
assert apples.similarity(oranges) > boots.similarity(hippos)
@pytest.mark.models
def test_part_of_speech_tags(nlp):
from spacy.parts_of_speech import ADV
def is_adverb(token):
return token.pos == spacy.parts_of_speech.ADV
# These are data-specific, so no constants are provided. You have to look
# up the IDs from the StringStore.
NNS = nlp.vocab.strings['NNS']
NNPS = nlp.vocab.strings['NNPS']
def is_plural_noun(token):
return token.tag == NNS or token.tag == NNPS
def print_coarse_pos(token):
print(token.pos_)
def print_fine_pos(token):
print(token.tag_)
@pytest.mark.models
def test_syntactic_dependencies():
def dependency_labels_to_root(token):
'''Walk up the syntactic tree, collecting the arc labels.'''
dep_labels = []
while token.head is not token:
dep_labels.append(token.dep)
token = token.head
return dep_labels
@pytest.mark.models
def test_named_entities():
def iter_products(docs):
for doc in docs:
for ent in doc.ents:
if ent.label_ == 'PRODUCT':
yield ent
def word_is_in_entity(word):
return word.ent_type != 0
def count_parent_verb_by_person(docs):
counts = defaultdict(defaultdict(int))
for doc in docs:
for ent in doc.ents:
if ent.label_ == 'PERSON' and ent.root.head.pos == VERB:
counts[ent.orth_][ent.root.head.lemma_] += 1
return counts
def test_calculate_inline_mark_up_on_original_string():
def put_spans_around_tokens(doc, get_classes):
'''Given some function to compute class names, put each token in a
span element, with the appropriate classes computed.
All whitespace is preserved, outside of the spans. (Yes, I know HTML
won't display it. But the point is no information is lost, so you can
calculate what you need, e.g. <br /> tags, <p> tags, etc.)
'''
output = []
template = '<span classes="{classes}">{word}</span>{space}'
for token in doc:
if token.is_space:
output.append(token.orth_)
else:
output.append(
template.format(
classes=' '.join(get_classes(token)),
word=token.orth_,
space=token.whitespace_))
string = ''.join(output)
string = string.replace('\n', '')
string = string.replace('\t', ' ')
return string
@pytest.mark.models
def test_efficient_binary_serialization(doc):
from spacy.tokens.doc import Doc
byte_string = doc.to_bytes()
open('moby_dick.bin', 'wb').write(byte_string)
nlp = spacy.en.English()
for byte_string in Doc.read_bytes(open('moby_dick.bin', 'rb')):
doc = Doc(nlp.vocab)
doc.from_bytes(byte_string)
@pytest.mark.models
def test_multithreading(nlp):
texts = [u'One document.', u'...', u'Lots of documents']
# .pipe streams input, and produces streaming output
iter_texts = (texts[i % 3] for i in xrange(100000000))
for i, doc in enumerate(nlp.pipe(iter_texts, batch_size=50, n_threads=4)):
assert doc.is_parsed
if i == 100:
break
|
StarcoderdataPython
|
12836872
|
<filename>tests/conftest.py
import pytest
import factory
import asyncio
from cuve.order_service.db import transaction, tables
from cuve.order_service.db.helpers import async_create_database
from cuve.order_service.app import application_factory
from cuve.order_service.config import load_config, ConfigSchema
def pytest_addoption(parser):
parser.addoption('--config', action="store",
default='./etc/config/development.yml')
parser.addoption('--createdb', action="store_true",
default=False)
def pytest_configure(config):
""" Create database if '--fakedb' option provided
"""
if not config.getoption('--createdb'):
return
loop = asyncio.get_event_loop()
config = load_config(ConfigSchema, config.getoption('--config'))
loop.run_until_complete(async_create_database(loop, config['database']))
@pytest.fixture
def client(request, loop, test_client):
config = load_config(ConfigSchema, request.config.getoption('--config'))
app = application_factory(config, loop)
client = test_client(app)
return loop.run_until_complete(client)
@pytest.fixture
def app(client):
""" Shortcut for accessing application behind test client
"""
return client.server.app
@pytest.fixture(autouse=True, scope='function')
def transaction_auto_rollback(app):
""" Autoused fixture creating savepoint before every
test and rollbacking changes after test finishes
"""
pass
#
# Facories for fake table records
#
@pytest.fixture
def company_factory(app):
""" Creates factory for creating fake companies
"""
class CompanyFactory(factory.Factory):
name = factory.Faker('company')
phone = factory.Faker('phone')
description = factory.Faker('bs')
async def factory(*args, **kwargs):
fake = CompanyFactory.stub(*args, **kwargs)
ins_stmt = tables.company.insert().values(**fake.__dict__)
async with transaction(app) as trans:
company_id = await trans.connection.scalar(ins_stmt)
await trans.commit()
sel_stmt = tables.company.select().where(
tables.company.id == company_id)
select_result = await trans.connection.execture(sel_stmt)
return await select_result.fetch_one()
return factory
@pytest.fixture
def account_factory(app, company_factory):
""" Creates factory for creating fake companies
"""
pass
@pytest.fixture
def software_factory(app, company_factory):
""" Creates factory for creating fake software
"""
pass
@pytest.fixture
def software_order_factory(app, account_factory):
""" Creates factory for creating fake software orders
"""
pass
|
StarcoderdataPython
|
4977315
|
<reponame>Ascend/modelzoo
#!/usr/bin/python
#encoding=utf-8
#
# BSD 3-Clause License
#
# Copyright (c) 2017 xxxx
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
#
import os
import sys
import copy
import time
import yaml
import argparse
import numpy as np
from apex import amp
import torch
import torch.nn as nn
import torch.npu
sys.path.append('./')
from models.model_ctc import *
#from warpctc_pytorch import CTCLoss # use built-in nn.CTCLoss
from utils.data_loader import Vocab, SpeechDataset, SpeechDataLoader
supported_rnn = {'nn.LSTM':nn.LSTM, 'nn.GRU': nn.GRU, 'nn.RNN':nn.RNN}
supported_activate = {'relu':nn.ReLU, 'tanh':nn.Tanh, 'sigmoid':nn.Sigmoid}
parser = argparse.ArgumentParser(description='cnn_lstm_ctc')
parser.add_argument('--conf', default='conf/ctc_config.yaml' , help='conf file with argument of LSTM and training')
parser.add_argument('--device_id', default='0', type=str, help='device id')
parser.add_argument('--apex', action='store_true', help='User apex for mixed precision training')
parser.add_argument('--loss_scale', default=128.0, type=float, help='loss scale using in amp, default -1 means dynamic')
parser.add_argument('--opt_level', default='O2', type=str, help='loss scale using in amp, default -1 means dynamic')
def run_epoch(epoch_id, model, data_iter, loss_fn, device, optimizer=None, print_every=20, is_training=True):
if is_training:
model.train()
else:
model.eval()
total_loss = 0
total_tokens = 0
total_errs = 0
cur_loss = 0
for i, data in enumerate(data_iter):
start_time = time.time()
inputs, input_sizes, targets, target_sizes, utt_list = data
inputs = inputs.to(device)
input_sizes = input_sizes.to(device)
targets = targets.to(device)
target_sizes = target_sizes.to(device)
out = model(inputs)
out_len, batch_size, _ = out.size()
input_sizes = (input_sizes * out_len).long()
loss = loss_fn(out, targets, input_sizes, target_sizes)
loss /= batch_size
cur_loss += loss.item()
total_loss += loss.item()
prob, index = torch.max(out, dim=-1)
batch_errs, batch_tokens = model.compute_wer(index.transpose(0,1).cpu().numpy(), input_sizes.cpu().numpy(), targets.cpu().numpy(), target_sizes.cpu().numpy())
total_errs += batch_errs
total_tokens += batch_tokens
if is_training:
optimizer.zero_grad()
if args.apex:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
#nn.utils.clip_grad_norm_(model.parameters(), 400)
optimizer.step()
step_time = time.time() - start_time
if (i + 1) % print_every == 0 and is_training:
print('Epoch = %d, step = %d, time = %.4f, cur_loss = %.4f, total_loss = %.4f, total_wer = %.4f' % (epoch_id,
i+1, step_time, cur_loss / print_every, total_loss / (i+1), total_errs / total_tokens ))
cur_loss = 0
average_loss = total_loss / (i+1)
training = "Train" if is_training else "Valid"
print("Epoch %d %s done, total_loss: %.4f, total_wer: %.4f" % (epoch_id, training, average_loss, total_errs / total_tokens))
return 1-total_errs / total_tokens, average_loss
class Config(object):
batch_size = 4
dropout = 0.1
def main(args,conf):
opts = Config()
for k, v in conf.items():
setattr(opts, k, v)
print('{:50}:{}'.format(k, v))
device = torch.device('npu:' + args.device_id) if opts.use_gpu else torch.device('cpu')
torch.manual_seed(opts.seed)
np.random.seed(opts.seed)
if opts.use_gpu:
torch.npu.set_device(device)
torch.npu.manual_seed(opts.seed)
#Data Loader
vocab = Vocab(opts.vocab_file)
train_dataset = SpeechDataset(vocab, opts.train_scp_path, opts.train_lab_path, opts)
dev_dataset = SpeechDataset(vocab, opts.valid_scp_path, opts.valid_lab_path, opts)
train_loader = SpeechDataLoader(train_dataset, batch_size=opts.batch_size, shuffle=opts.shuffle_train, num_workers=opts.num_workers, drop_last=True, pin_memory=True)
dev_loader = SpeechDataLoader(dev_dataset, batch_size=opts.batch_size, shuffle=False, num_workers=opts.num_workers, drop_last=True, pin_memory=True)
#Define Model
rnn_type = supported_rnn[opts.rnn_type]
rnn_param = {"rnn_input_size":opts.rnn_input_size, "rnn_hidden_size":opts.rnn_hidden_size, "rnn_layers":opts.rnn_layers,
"rnn_type":rnn_type, "bidirectional":opts.bidirectional, "batch_norm":opts.batch_norm}
num_class = vocab.n_words
opts.output_class_dim = vocab.n_words
drop_out = opts.drop_out
add_cnn = opts.add_cnn
cnn_param = {}
channel = eval(opts.channel)
kernel_size = eval(opts.kernel_size)
stride = eval(opts.stride)
padding = eval(opts.padding)
pooling = eval(opts.pooling)
activation_function = supported_activate[opts.activation_function]
cnn_param['batch_norm'] = opts.batch_norm
cnn_param['activate_function'] = activation_function
cnn_param["layer"] = []
for layer in range(opts.layers):
layer_param = [channel[layer], kernel_size[layer], stride[layer], padding[layer]]
if pooling is not None:
layer_param.append(pooling[layer])
else:
layer_param.append(None)
cnn_param["layer"].append(layer_param)
model = CTC_Model(add_cnn=add_cnn, cnn_param=cnn_param, rnn_param=rnn_param, num_class=num_class, drop_out=drop_out)
model = model.to(device)
num_params = 0
for name, param in model.named_parameters():
num_params += param.numel()
print("Number of parameters %d" % num_params)
for idx, m in enumerate(model.children()):
print(idx, m)
#Training
init_lr = opts.init_lr
num_epoches = opts.num_epoches
end_adjust_acc = opts.end_adjust_acc
decay = opts.lr_decay
weight_decay = opts.weight_decay
batch_size = opts.batch_size
params = { 'num_epoches':num_epoches, 'end_adjust_acc':end_adjust_acc, 'mel': opts.mel, 'seed':opts.seed,
'decay':decay, 'learning_rate':init_lr, 'weight_decay':weight_decay, 'batch_size':batch_size,
'feature_type':opts.feature_type, 'n_feats': opts.feature_dim }
print(params)
loss_fn = nn.CTCLoss(reduction='sum')
optimizer = torch.optim.Adam(model.parameters(), lr=init_lr, weight_decay=weight_decay)
if args.apex:
model, optimizer = amp.initialize(model, optimizer, opt_level=args.opt_level, loss_scale=args.loss_scale)
#visualization for training
# from visdom import Visdom
# viz = Visdom()
# if add_cnn:
# title = opts.feature_type + str(opts.feature_dim) + ' CNN_LSTM_CTC'
# else:
# title = opts.feature_type + str(opts.feature_dim) + ' LSTM_CTC'
# viz_opts = [dict(title=title+" Loss", ylabel = 'Loss', xlabel = 'Epoch'),
# dict(title=title+" Loss on Dev", ylabel = 'DEV Loss', xlabel = 'Epoch'),
# dict(title=title+' CER on DEV', ylabel = 'DEV CER', xlabel = 'Epoch')]
# viz_window = [None, None, None]
count = 0
learning_rate = init_lr
loss_best = 1000
loss_best_true = 1000
adjust_rate_flag = False
stop_train = False
adjust_time = 0
acc_best = 0
start_time = time.time()
loss_results = []
dev_loss_results = []
dev_cer_results = []
while not stop_train:
if count >= num_epoches:
break
count += 1
if adjust_rate_flag:
learning_rate *= decay
adjust_rate_flag = False
for param in optimizer.param_groups:
param['lr'] *= decay
print("Start training epoch: %d, learning_rate: %.5f" % (count, learning_rate))
train_acc, loss = run_epoch(count, model, train_loader, loss_fn, device, optimizer=optimizer, print_every=opts.verbose_step, is_training=True)
loss_results.append(loss)
acc, dev_loss = run_epoch(count, model, dev_loader, loss_fn, device, optimizer=None, print_every=opts.verbose_step, is_training=False)
print("loss on dev set is %.4f" % dev_loss)
dev_loss_results.append(dev_loss)
dev_cer_results.append(acc)
#adjust learning rate by dev_loss
if dev_loss < (loss_best - end_adjust_acc):
loss_best = dev_loss
loss_best_true = dev_loss
adjust_rate_count = 0
model_state = copy.deepcopy(model.state_dict())
op_state = copy.deepcopy(optimizer.state_dict())
elif (dev_loss < loss_best + end_adjust_acc):
adjust_rate_count += 1
if dev_loss < loss_best and dev_loss < loss_best_true:
loss_best_true = dev_loss
model_state = copy.deepcopy(model.state_dict())
op_state = copy.deepcopy(optimizer.state_dict())
else:
adjust_rate_count = 10
if acc > acc_best:
acc_best = acc
best_model_state = copy.deepcopy(model.state_dict())
best_op_state = copy.deepcopy(optimizer.state_dict())
print("adjust_rate_count:"+str(adjust_rate_count))
print('adjust_time:'+str(adjust_time))
if adjust_rate_count == 10:
adjust_rate_flag = True
adjust_time += 1
adjust_rate_count = 0
if loss_best > loss_best_true:
loss_best = loss_best_true
model.load_state_dict(model_state)
optimizer.load_state_dict(op_state)
if adjust_time == 8:
stop_train = True
time_used = (time.time() - start_time) / 60
print("epoch %d done, cv acc is: %.4f, time_used: %.4f minutes" % (count, acc, time_used))
# x_axis = range(count)
# y_axis = [loss_results[0:count], dev_loss_results[0:count], dev_cer_results[0:count]]
# for x in range(len(viz_window)):
# if viz_window[x] is None:
# viz_window[x] = viz.line(X = np.array(x_axis), Y = np.array(y_axis[x]), opts = viz_opts[x],)
# else:
# viz.line(X = np.array(x_axis), Y = np.array(y_axis[x]), win = viz_window[x], update = 'replace',)
print("End training, best dev loss is: %.4f, acc is: %.4f" % (loss_best, acc_best))
model.load_state_dict(best_model_state)
optimizer.load_state_dict(best_op_state)
save_dir = os.path.join(opts.checkpoint_dir, opts.exp_name)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
best_path = os.path.join(save_dir, 'ctc_best_model.pth')
params['epoch']=count
torch.save(CTC_Model.save_package(model, optimizer=optimizer, epoch=params, loss_results=loss_results, dev_loss_results=dev_loss_results, dev_cer_results=dev_cer_results), best_path)
if __name__ == '__main__':
args = parser.parse_args()
try:
config_path = args.conf
conf = yaml.safe_load(open(config_path, 'r'))
except:
print("No input config or config file missing, please check.")
sys.exit(1)
main(args,conf)
|
StarcoderdataPython
|
4913436
|
#!/usr/bin/python
import roslib
roslib.load_manifest('PathTask')
import rospy
from std_msgs.msg import String
import time
from threading import Thread
from Robosub.msg import HighLevelControl, ModuleEnableMsg
from SubImageRecognition.msg import ImgRecObject
class PathTask:
MOTOR_COMMAND = 'Command'
MOTOR_FORWARD = 'Forward'
MOTOR_MANUAL = 'Manual'
MOTOR_OFFSET = 'Offset'
MOTOR_STRAFE = 'Straf'
MOTOR_TURN = 'Turn'
SCALE_FORWARD = 0.0017
SCALE_STRAFE = 0.01
SCALE_TURN = 1 / 180.0
SUCCESS_GOAL = 10
TH_ROT = 1
TH_X = 50
TH_Y = 50
def __init__(self):
self.can_turn = True
self.direction = 'right'
self.enabled = False
self.last_motor_change = 0
self.paths = []
self.pub_high_level_motor_controller = rospy.Publisher(
'High_Level_Motion', HighLevelControl)
self.pub_task_complete = rospy.Publisher(
'Task_Completion', String)
self.sub_image_recognition = rospy.Subscriber(
'img_rec/paths', ImgRecObject, self.image_recognition_cb)
self.sub_module_enable = rospy.Subscriber(
'Module_Enable', ModuleEnableMsg, self.module_enable_cb)
self.sub_path_direction = rospy.Subscriber(
'Path_Direction', String, self.path_direction_cb)
self.success_counter = 0
self.thread = Thread(target=self.motor_watcher)
self.thread.daemon = True
self.thread.start()
def align_to_path(self, path):
did_something = False
if path.center_x > self.TH_X or path.center_x < -self.TH_X:
self.publish_motor(self.MOTOR_STRAFE, path.center_x * self.SCALE_STRAFE)
did_something |= True
else:
self.publish_motor(self.MOTOR_STRAFE, 0)
if path.center_y > self.TH_Y or path.center_y < -self.TH_Y:
self.publish_motor(self.MOTOR_FORWARD, path.center_y * self.SCALE_FORWARD)
did_something |= True
else:
self.publish_motor(self.MOTOR_FORWARD, 0)
if self.can_turn and (path.rotation > self.TH_ROT or path.rotation < -self.TH_ROT):
self.publish_motor(self.MOTOR_TURN, path.rotation * self.SCALE_TURN)
did_something |= True
else:
self.publish_motor(self.MOTOR_TURN, 0)
if did_something:
self.success_counter = 0
self.last_motor_change = time.time()
else:
self.success_counter += 1
if self.success_counter >= self.SUCCESS_GOAL:
if self.can_turn:
self.task_complete(True)
else:
self.can_turn = True
self.success_counter = 0
def image_recognition_cb(self, path):
if not self.enabled:
return
if len(self.paths) and path.id == 0:
self.align_to_path(self.select_correct_path())
self.paths = []
self.paths.append(path)
def module_enable_cb(self, msg):
if msg.Module == 'PathTask':
self.can_turn = False
self.enabled = msg.State
self.paths = []
self.success_counter = 0
self.last_motor_change = 0
if not self.enabled:
self.stop_motors()
rospy.loginfo("PathTask Disabled")
else:
rospy.loginfo("PathTask Enabled")
def motor_watcher(self):
while True:
if self.enabled and self.last_motor_change:
if time.time() - self.last_motor_change > 3:
self.task_complete(False)
time.sleep(1)
def path_direction_cb(self, msg):
if msg.data in ('left', 'right'):
self.direction = msg.data
else:
print('[PathTask] Invalid path direction received: ' + msg.data)
def publish_motor(self, direction, value, motion_type=None):
if not motion_type:
if direction == self.MOTOR_TURN:
motion_type = self.MOTOR_MANUAL
else:
motion_type = self.MOTOR_COMMAND
msg = HighLevelControl()
msg.Direction = direction
msg.Value = value
msg.MotionType = motion_type
self.pub_high_level_motor_controller.publish(msg)
def select_correct_path(self):
if len(self.paths) == 1:
return self.paths[0]
elif self.direction == 'left':
best = (9999, None)
for path in self.paths:
if path.center_x < best[0]:
best = (path.center_x, path)
else:
best = (-9999, None)
for path in self.paths:
if path.center_x > best[0]:
best = (path.center_x, path)
return best
def task_complete(self, result):
self.enabled = False
result = 'PathTask ' + 'Success' if result else 'Failure'
self.pub_task_complete.publish(String(result))
self.stop_motors()
def stop_motors(self):
self.publish_motor(self.MOTOR_FORWARD, 0)
self.publish_motor(self.MOTOR_STRAFE, 0)
self.publish_motor(self.MOTOR_TURN, 0)
self.last_motor_change = 0
if __name__ == '__main__':
rospy.init_node('PathTask')
path_task = PathTask()
rospy.spin()
|
StarcoderdataPython
|
3292071
|
text = '[ Статистика ]<br>Система:<br> Процессор:<br>'
for idx, cpu in enumerate(psutil.cpu_percent(interval=1, percpu=True)):
text += '  Ядро №'+str(idx+1)+': '+str(cpu)+'%<br>'
text += '  Температура: '+str(int(open('/sys/class/thermal/thermal_zone0/temp','r').read())/1000)+' °С\n'
mem = psutil.virtual_memory()
MB = 1024 * 1024
text += ' ОЗУ:<br>  Всего: '+str(int(mem.total / MB))+'MB<br>  Использовано: '+str(int((mem.total - mem.available) / MB))+'MB<br>  Свободно: '+str(int(mem.available / MB))+'MB<br>  Использовано ботом: '+str(int(psutil.Process().memory_info().vms / MB))+'MB<br> '
end_time = time.monotonic()
text += 'Бот:<br>  Время работы: '+str(datetime.timedelta(seconds=end_time - start_time))
text += '\n  Обращений: '+str(uses_kb)
apisay(text,pack['toho'])
|
StarcoderdataPython
|
5078436
|
from .base import CMakeToolchainBase
class CMakeAndroidToolchain(CMakeToolchainBase):
pass
|
StarcoderdataPython
|
3415709
|
<reponame>miroslavkrysl/kiv-bit-rsa
"""Definition of signature."""
from __future__ import annotations
from typing import Type
from kiv_bit_rsa.hash import Hash
from kiv_bit_rsa.rsa import Key, Rsa
from kiv_bit_rsa.sign.signable import Signable
class Signature:
"""An object signature.
:py:class:`Signature` holds the signed hash (with an RSA
cipher encryption key) and info about the hash algorithm.
"""
def __init__(self,
hash_class: Type[Hash],
digest_cipher: bytes):
"""Initialize a signature of an object.
:param hash_class: The class used for hash.
:param digest_cipher: Encrypted hash digest.
"""
self._hash_class = hash_class
self._digest_cipher = digest_cipher
@classmethod
def sign(cls,
signable: Signable,
hash_class: Type[Hash],
key: Key) -> Signature:
"""Create a signature of object `signable`.
:param signable: The signable object to sign.
:param hash_class: The class used for hash.
:param key: The encryption key.
:return: The signature of object `signable`.
"""
h = signable.hash(hash_class)
digest_cipher = Rsa().encrypt(h.to_bytes(), key)
return Signature(hash_class, digest_cipher)
def verify(self,
signable: Signable,
key: Key) -> bool:
"""Verify the signable object against this signature.
:param key: The decryption key.
:param signable: The signable object to verify.
:return: True if the contents of the file match the signature.
"""
h = signable.hash(self._hash_class)
digest = Rsa().decrypt(self._digest_cipher, key)
return digest == h.to_bytes()
@property
def hash_method(self):
"""Get the hash method."""
return self._hash_class
@property
def hash_cipher(self):
"""Get the hash cipher"""
return self._digest_cipher
|
StarcoderdataPython
|
377589
|
<filename>webauthn/helpers/bytes_to_base64url.py<gh_stars>100-1000
from base64 import urlsafe_b64encode
def bytes_to_base64url(val: bytes) -> str:
"""
Base64URL-encode the provided bytes
"""
return urlsafe_b64encode(val).decode("utf-8").replace("=", "")
|
StarcoderdataPython
|
6468188
|
<gh_stars>1-10
from __future__ import absolute_import
import numpy as np
import morphs
from morphs.data.derivative import (
f_poly,
p0_poly,
fit_derivative,
_main,
find_max_order,
)
import pytest
from click.testing import CliRunner
@pytest.mark.run(order=0)
def test_f_poly():
x = np.linspace(1, 128)
assert f_poly(x, [-5, 0, -0.5, 1]).shape == x.shape
x.reshape((-1, 1))
assert f_poly(x, [-5, 0, -0.5, 1]).shape == x.shape
x = np.array([1, 128])
temp = f_poly(x, [0, 1])
assert temp[0] == np.exp(-1), "centering not working"
assert temp[1] == np.exp(1), "centering not working"
def test_fit_derivative():
pair_df = morphs.load.pop_pair_df()
morphs.data.parse.morph_dim(pair_df)
for block_path, block_group in pair_df.groupby("block_path"):
for morph_dim, morph_dim_group in block_group.groupby("morph_dim"):
for order in range(8):
p0, bounds = p0_poly(order)
popt, pcov = fit_derivative(morph_dim_group, p0, bounds=bounds)
assert len(popt) == order + 1
break
break
@pytest.mark.run(order=3)
def test_gen_cli_derivative_dict():
runner = CliRunner()
assert not morphs.paths.DERIVATIVE_PKL.exists()
result = runner.invoke(_main, ["--parallel", "--max_order=5"])
assert result.exit_code == 0
assert morphs.paths.DERIVATIVE_PKL.exists()
result2 = runner.invoke(_main, ["--parallel", "--max_order=7"])
assert result2.exit_code == 0
assert "max order incremented!" in result2.output
dd = morphs.load.derivative_dict()
assert find_max_order(dd) == 6
def test_load_derivative_dict():
dd = morphs.load.derivative_dict()
assert morphs.paths.DERIVATIVE_PKL.exists()
assert len(dd) > 0
for block in dd:
assert len(dd[block]) == 24
|
StarcoderdataPython
|
5115971
|
<filename>Main_Window.py
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Main_Window.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets, QtSql
from PyQt5.QtWidgets import QMainWindow, QFileDialog, QDialog, QInputDialog, QMessageBox, QLineEdit
from PyQt5.QtGui import QIcon
import sqlite3
from CopyPasteExcel import copy_paste
from Macros import Ui_Macro_Dialog as Macro_Dialog
import os
def create_connection(database):
db = QtSql.QSqlDatabase.addDatabase("QSQLITE")
db.setDatabaseName(database)
if not db.open():
print("Cannot open database")
print(
"Unable to establish a database connection.\n"
"This example needs SQLite support. Please read "
"the Qt SQL driver documentation for information "
"how to build it.\n\n"
"Click Cancel to exit."
)
return False
query = QtSql.QSqlQuery()
if not query.exec_(
"""CREATE TABLE IF NOT EXISTS Macros (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"title" TEXT NOT NULL,
"description" TEXT)"""
):
print(query.lastError().text())
return False
return True
class FileEdit(QLineEdit):
""" Custom Subclass of QLineEdit tp allow users to drag & drop for file selection (instead of browsing) """
def __init__(self, parent):
super(FileEdit, self).__init__(parent)
self.setDragEnabled(True)
def dragEnterEvent(self, event):
data = event.mimeData()
urls = data.urls()
if urls and urls[0].scheme() == 'file':
event.acceptProposedAction()
def dragMoveEvent(self, event):
data = event.mimeData()
urls = data.urls()
if urls and urls[0].scheme() == 'file':
event.acceptProposedAction()
def dropEvent(self, event):
data = event.mimeData()
urls = data.urls()
if urls and urls[0].scheme() == 'file':
filepath = str(urls[0].path())[1:]
# any file type here
self.setText(filepath)
# BELOW IS CODE FROM: https://www.reddit.com/r/learnpython/comments/97z5dq/pyqt5_drag_and_drop_file_option/e4cv39x/
# WHEN YOU HAVE TIME, REFACTOR ABOVE CODE WITH REGEX TO ONLY OPEN EXCEL FILES (anything ending with .xl...)
# if filepath[-4:].upper() in [".txt", ".x"]:
# self.setText(filepath)
# else:
# dialog = QMessageBox()
# dialog.setWindowTitle("Error: Invalid File")
# dialog.setText("Only Excel files are accepted")
# dialog.setIcon(QMessageBox.Warning)
# dialog.exec_()
class Ui_MainWindow(QMainWindow):
def __init__(self, parent=None):
super(Ui_MainWindow, self).__init__(parent)
self.initUI(MainWindow)
def initUI(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(550, 500)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label_select_excel_files = QtWidgets.QLabel(self.centralwidget)
self.label_select_excel_files.setGeometry(QtCore.QRect(210, 70, 131, 21))
font = QtGui.QFont()
font.setPointSize(14)
self.label_select_excel_files.setFont(font)
self.label_select_excel_files.setAlignment(QtCore.Qt.AlignCenter)
self.label_select_excel_files.setObjectName("label_select_excel_files")
self.Frame_fileimport = QtWidgets.QFrame(self.centralwidget)
self.Frame_fileimport.setGeometry(QtCore.QRect(20, 100, 511, 100))
self.Frame_fileimport.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.Frame_fileimport.setFrameShadow(QtWidgets.QFrame.Raised)
self.Frame_fileimport.setObjectName("frame_fileimport")
self.Label_copyfrom = QtWidgets.QLabel(self.Frame_fileimport)
self.Label_copyfrom.setGeometry(QtCore.QRect(10, 10, 79, 35))
self.Label_copyfrom.setObjectName("Label_copyfrom")
self.Label_destination = QtWidgets.QLabel(self.Frame_fileimport)
self.Label_destination.setGeometry(QtCore.QRect(10, 50, 71, 31))
self.Label_destination.setObjectName("Label_destination")
self.textEdit_copyfrom = FileEdit(self.Frame_fileimport)
self.textEdit_copyfrom.setGeometry(QtCore.QRect(90, 20, 319, 21))
self.textEdit_copyfrom.setObjectName("textEdit_copyfrom")
self.textEdit_destination = FileEdit(self.Frame_fileimport)
self.textEdit_destination.setGeometry(QtCore.QRect(90, 60, 319, 21))
self.textEdit_destination.setObjectName("textEdit_destination")
self.Button_browse_copyfrom = QtWidgets.QPushButton(self.Frame_fileimport)
self.Button_browse_copyfrom.setGeometry(QtCore.QRect(410, 10, 91, 41))
self.Button_browse_copyfrom.setObjectName("Button_browse_copyfrom")
self.Button_browse_copyfrom.clicked.connect(lambda: self.open_excel_file(self.textEdit_copyfrom)) # Added by me (browse func for copy file)
self.Button_browse_destination = QtWidgets.QPushButton(self.Frame_fileimport)
self.Button_browse_destination.setGeometry(QtCore.QRect(410, 50, 91, 41))
self.Button_browse_destination.setObjectName("Button_browse_destination")
self.Button_browse_destination.clicked.connect(lambda: self.open_excel_file(self.textEdit_destination)) # Added by me (browse func for destination file)
self.line = QtWidgets.QFrame(self.centralwidget)
self.line.setGeometry(QtCore.QRect(0, 220, 550, 5))
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.verticalLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(380, 250, 151, 151))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.Label_macros = QtWidgets.QLabel(self.centralwidget)
self.Label_macros.setGeometry(QtCore.QRect(135, 230, 120, 16))
self.Label_macros.setAlignment(QtCore.Qt.AlignCenter)
self.Label_macros.setObjectName("Label_macros")
self._model = QtSql.QSqlTableModel(MainWindow) # Added SQL Table model
self.model.setTable("Macros")
self.model.select()
self.listView_macros = QtWidgets.QListView(self.centralwidget)
self.listView_macros.setGeometry(QtCore.QRect(20, 250, 350, 150))
self.listView_macros.setObjectName("tableWidget_macros")
self.listView_macros.setModel(self.model)
self.listView_macros.setModelColumn(self.model.record().indexOf("title"))
self.Button_new_macro = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.Button_new_macro.setObjectName("Button_new_macro")
self.verticalLayout.addWidget(self.Button_new_macro)
self.Button_new_macro.clicked.connect(self.new_macro) # Create new entry in macro list
self.Button_edit_macro = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.Button_edit_macro.setObjectName("Button_edit_macro")
self.verticalLayout.addWidget(self.Button_edit_macro)
self.Button_edit_macro.clicked.connect(self.edit_macro) # Edit selected entry in macro list
self.Button_remove_macro = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.Button_remove_macro.setObjectName("Button_remove_macro")
self.verticalLayout.addWidget(self.Button_remove_macro)
self.Button_remove_macro.clicked.connect(self.remove_macro) # Remove selected entry in macro list
self.Button_Run = QtWidgets.QPushButton(self.centralwidget)
self.Button_Run.setGeometry(QtCore.QRect(120, 410, 311, 51))
self.Button_Run.setObjectName("Button_Run")
# self.Button_Run.clicked.connect(self.refresh)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 550, 22))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.Button_new_macro.setText(_translate("MainWindow", "New"))
self.Button_edit_macro.setText(_translate("MainWindow", "Edit"))
self.Button_remove_macro.setText(_translate("MainWindow", "Remove"))
# self.listView_macros.setSortingEnabled(False)
self.Label_macros.setText(_translate("MainWindow", "Macros"))
self.Button_Run.setText(_translate("MainWindow", "RUN"))
self.Label_copyfrom.setText(_translate("MainWindow", "Copy From:"))
self.Label_destination.setText(_translate("MainWindow", "Destination:"))
self.Button_browse_destination.setText(_translate("MainWindow", "Browse"))
self.Button_browse_copyfrom.setText(_translate("MainWindow", "Browse"))
self.label_select_excel_files.setText(_translate("MainWindow", "Select Excel Files"))
@property
def model(self):
return self._model
@QtCore.pyqtSlot()
def new_macro(self):
d = Macro_Dialog()
if d.exec_() == QtWidgets.QDialog.Accepted:
r = self.model.record()
r.setValue("title", d.title)
r.setValue("description", d.description)
if self.model.insertRecord(self.model.rowCount(), r):
self.model.select()
@QtCore.pyqtSlot()
def edit_macro(self):
ixs = self.listView_macros.selectionModel().selectedIndexes()
if ixs:
d = Macro_Dialog(self.model, ixs[0].row())
d.exec_()
@QtCore.pyqtSlot()
def remove_macro(self):
ixs = self.listView_macros.selectionModel().selectedIndexes()
if ixs:
reply = QMessageBox.warning(self, "Remove Macro?",
"Remove Macro?",
QMessageBox.Yes | QMessageBox.No)
if reply == QMessageBox.Yes:
self.model.removeRow(ixs[0].row())
self.model.select()
def _add_table(self, columns):
pass
# row_pos = self.tableWidget_macros.count()
# last_row = self.tableWidget_macros.
# self.tableWidget_macros.insertItem()
#
# for i, col in enumerate(columns):
# self.tableWidget_macros.setitem
def open_excel_file(self, textEdit):
""" open file browser and get path to designated copy or destination file """
fname = QFileDialog.getOpenFileName(self, "Open file")
if fname[0]:
file = open(fname[0], 'r')
with file:
text = file.name # << Saves file PATH to textEdit next to it
textEdit.setText(text)
def run(self):
""" """
copy_wb_path = self.textEdit_copyfrom.text()
destination_wb_path = self.textEdit_destination
rule = [["B2", "B2"], ["C2:C4", "D2:D4"]] # TEST rule
pass
if __name__ == "__main__":
import sys
database_name = "Macros_db" # ":memory:"
app = QtWidgets.QApplication(sys.argv)
if not create_connection(database_name):
sys.exit(app.exec_())
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.initUI(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
# ----------- TEST ----------
# open_excel_file()
|
StarcoderdataPython
|
4802275
|
from flask import g, request
from app import ApiException, ApiResult, db
from app.api import bp
from app.api.auth import token_auth
from app.data_service import DataServiceException, logs
from app.models import Log, LogSchema
# CREATE LOG
@bp.route("/logs", methods=["POST"])
@token_auth.login_required
def create_log():
json_data = request.get_json() or {}
try:
log = logs.create_log(json_data, g.current_user)
except DataServiceException as dse:
raise ApiException(dse.message, dse.status)
return ApiResult({"message":f"{log} created."}, 201)
# READ ALL LOGS
@bp.route("/logs", methods=["GET"])
@token_auth.login_required
def get_logs():
log_list = logs.read_logs(g.current_user)
return ApiResult(LogSchema(many=True).dump(log_list))
# READ LOG
@bp.route("/logs/<int:id>", methods=["GET"])
@token_auth.login_required
def get_log(id):
try:
log = logs.read_log(id, user=g.current_user)
except DataServiceException as e:
raise ApiException(e.message, e.status)
return ApiResult(LogSchema().dump(log))
# UPDATE LOG
@bp.route("/logs/<int:id>", methods=["PUT"])
@token_auth.login_required
def update_log(id):
json_data = request.get_json() or {}
try:
log = logs.update_log(id, json_data, g.current_user)
except DataServiceException as dse:
raise ApiException(f"{dse.message} Cannot update log.", dse.status)
return ApiResult({"message":f"{log} updated."})
# DELETE LOG
@bp.route("/logs/<int:id>", methods=["DELETE"])
@token_auth.login_required
def delete_log(id):
try:
log = logs.delete_log(id, g.current_user)
except DataServiceException as dse:
raise ApiException(dse.message, dse.status)
return ApiResult({"message": f"{log} deleted."})
|
StarcoderdataPython
|
339229
|
"""`get_entropy` code comes from https://github.com/paulbrodersen/entropy_estimators/blob/master/entropy_estimators/continuous.py"""
import numpy as np
from scipy.spatial import KDTree
from scipy.special import gamma, digamma
def get_entropy(x, k=1, norm='max', min_dist=0., workers=1):
"""
Code source: https://github.com/paulbrodersen/entropy_estimators/blob/master/entropy_estimators/continuous.py
Estimates the entropy H of a random variable x (in nats) based on
the kth-nearest neighbour distances between point samples.
@reference:
<NAME>., & <NAME>. (1987). Sample estimate of the entropy of a random vector. Problemy Peredachi Informatsii, 23(2), 9–16.
Arguments:
----------
x: (n, d) ndarray
n samples from a d-dimensional multivariate distribution
k: int (default 1)
kth nearest neighbour to use in density estimate;
imposes smoothness on the underlying probability distribution
norm: 'euclidean' or 'max'
p-norm used when computing k-nearest neighbour distances
min_dist: float (default 0.)
minimum distance between data points;
smaller distances will be capped using this value
workers: int (default 1)
number of workers to use for parallel processing in query;
-1 uses all CPU threads
Returns:
--------
h: float
entropy H(X)
"""
n, d = x.shape
if norm == 'max': # max norm:
p = np.inf
log_c_d = 0 # volume of the d-dimensional unit ball
elif norm == 'euclidean': # euclidean norm
p = 2
log_c_d = (d/2.) * np.log(np.pi) -np.log(gamma(d/2. +1))
else:
raise NotImplementedError("Variable 'norm' either 'max' or 'euclidean'")
kdtree = KDTree(x)
# query all points -- k+1 as query point also in initial set
# distances, _ = kdtree.query(x, k + 1, eps=0, p=norm)
distances, _ = kdtree.query(x, k + 1, eps=0, p=p, workers=workers)
distances = distances[:, -1]
# enforce non-zero distances
distances[distances < min_dist] = min_dist
sum_log_dist = np.sum(np.log(2*distances)) # where did the 2 come from? radius -> diameter
h = -digamma(k) + digamma(n) + log_c_d + (d / float(n)) * sum_log_dist
return h
|
StarcoderdataPython
|
9610775
|
<reponame>fukuball/fuku-ml
# encoding=utf8
import os
import numpy as np
import FukuML.Utility as utility
import FukuML.MLBase as ml
import FukuML.DecisionTree as decision_tree
import FukuML.LinearRegression as linear_regression
class Regression(ml.Learner):
# too slow for high dimension data, can't do digits multi classifier
def __init__(self):
"""init"""
self.status = 'empty'
self.train_X = []
self.train_Y = []
self.W = []
self.data_num = 0
self.data_demension = 0
self.test_X = []
self.test_Y = []
self.feature_transform_mode = ''
self.feature_transform_degree = 1
self.run_t = 40
self.decision_tree = []
self.alpha = []
def load_train_data(self, input_data_file=''):
self.status = 'load_train_data'
if (input_data_file == ''):
input_data_file = os.path.normpath(os.path.join(os.path.join(os.getcwd(), os.path.dirname(__file__)), "dataset/pocket_pla_binary_train.dat"))
else:
if (os.path.isfile(input_data_file) is not True):
print("Please make sure input_data_file path is correct.")
return self.train_X, self.train_Y
self.train_X, self.train_Y = utility.DatasetLoader.load(input_data_file)
return self.train_X, self.train_Y
def load_test_data(self, input_data_file=''):
if (input_data_file == ''):
input_data_file = os.path.normpath(os.path.join(os.path.join(os.getcwd(), os.path.dirname(__file__)), "dataset/pocket_pla_binary_test.dat"))
else:
if (os.path.isfile(input_data_file) is not True):
print("Please make sure input_data_file path is correct.")
return self.test_X, self.test_Y
self.test_X, self.test_Y = utility.DatasetLoader.load(input_data_file)
if (self.feature_transform_mode == 'polynomial') or (self.feature_transform_mode == 'legendre'):
self.test_X = self.test_X[:, 1:]
self.test_X = utility.DatasetLoader.feature_transform(
self.test_X,
self.feature_transform_mode,
self.feature_transform_degree
)
return self.test_X, self.test_Y
def set_param(self, run_t):
self.run_t = run_t
return self.run_t
def init_W(self, mode='normal'):
if (self.status != 'load_train_data') and (self.status != 'train'):
print("Please load train data first.")
return self.W
self.status = 'init'
self.data_num = len(self.train_Y)
self.data_demension = len(self.train_X[0])
self.decision_tree = [None] * self.run_t
self.alpha = [0.0] * self.run_t
self.W = np.zeros(self.data_demension)
return self.W
def score_function(self, x, W):
score = 0.0
for i, weak_learner in enumerate(self.decision_tree):
predict_string = np.array(list(map(str, x)))
predict_string = ' '.join(predict_string[1:])
prediction = weak_learner.prediction(predict_string, 'future_data')
score = score + (self.alpha[i] * prediction['prediction'])
return score
def error_function(self, y_prediction, y_truth):
error = (y_prediction - y_truth) ** 2
return error
def calculate_avg_error(self, X, Y, W):
return super(Regression, self).calculate_avg_error(X, Y, W)
def calculate_test_data_avg_error(self):
return super(Regression, self).calculate_test_data_avg_error()
def calculate_alpha_s(self, weak_learner, s):
alpha = 0.0
new_s = s
data_num = len(weak_learner.train_Y)
X = []
for i in range(data_num):
predict_string = np.array(list(map(str, weak_learner.train_X[i])))
predict_string = ' '.join(predict_string[1:]) + ' ' + str(weak_learner.train_Y[i])
prediction = weak_learner.prediction(predict_string, 'test_data')
X.append([float(prediction['prediction'])])
X = np.array(X)
linear = linear_regression.LinearRegression()
linear.status = 'load_train_data'
linear.train_X = X
linear.train_Y = weak_learner.train_Y - s
linear.set_param()
linear.init_W()
linear.train()
alpha = linear.W[0]
new_s = s + alpha * np.ravel(X)
return alpha, new_s
def train(self):
if (self.status != 'init'):
print("Please load train data and init W first.")
return self.W
self.status = 'train'
s = np.array([0] * self.data_num)
for t in range(self.run_t):
# np.random.choice(np.arange(self.data_num), self.data_num, p=(u/sum(u)))
print("Round " + str(t + 1))
decision_tree_c = decision_tree.CART()
decision_tree_c.status = 'load_train_data'
decision_tree_c.train_X = self.train_X
decision_tree_c.train_Y = self.train_Y - s
decision_tree_c.set_param(learn_type='regression', tree_height_limit=3)
decision_tree_c.init_W()
decision_tree_c.train()
alpha, s = self.calculate_alpha_s(decision_tree_c, s)
self.decision_tree[t] = decision_tree_c
self.alpha[t] = alpha
return self.W
def prediction(self, input_data='', mode='test_data'):
return super(Regression, self).prediction(input_data, mode)
|
StarcoderdataPython
|
11225
|
<reponame>BarracudaPff/code-golf-data-pythpn
problem_type = "segmentation"
dataset_name = "synthia_rand_cityscapes"
dataset_name2 = None
perc_mb2 = None
model_name = "resnetFCN"
freeze_layers_from = None
show_model = False
load_imageNet = True
load_pretrained = False
weights_file = "weights.hdf5"
train_model = True
test_model = True
pred_model = False
debug = True
debug_images_train = 50
debug_images_valid = 50
debug_images_test = 50
debug_n_epochs = 2
batch_size_train = 2
batch_size_valid = 2
batch_size_test = 2
crop_size_train = (512, 512)
crop_size_valid = None
crop_size_test = None
resize_train = None
resize_valid = None
resize_test = None
shuffle_train = True
shuffle_valid = False
shuffle_test = False
seed_train = 1924
seed_valid = 1924
seed_test = 1924
optimizer = "rmsprop"
learning_rate = 0.0001
weight_decay = 0.0
n_epochs = 1000
save_results_enabled = True
save_results_nsamples = 5
save_results_batch_size = 5
save_results_n_legend_rows = 1
earlyStopping_enabled = True
earlyStopping_monitor = "val_jaccard"
earlyStopping_mode = "max"
earlyStopping_patience = 50
earlyStopping_verbose = 0
checkpoint_enabled = True
checkpoint_monitor = "val_jaccard"
checkpoint_mode = "max"
checkpoint_save_best_only = True
checkpoint_save_weights_only = True
checkpoint_verbose = 0
plotHist_enabled = True
plotHist_verbose = 0
LRScheduler_enabled = True
LRScheduler_batch_epoch = "batch"
LRScheduler_type = "poly"
LRScheduler_M = 75000
LRScheduler_decay = 0.1
LRScheduler_S = 10000
LRScheduler_power = 0.9
TensorBoard_enabled = True
TensorBoard_histogram_freq = 1
TensorBoard_write_graph = True
TensorBoard_write_images = False
TensorBoard_logs_folder = None
norm_imageNet_preprocess = True
norm_fit_dataset = False
norm_rescale = 1
norm_featurewise_center = False
norm_featurewise_std_normalization = False
norm_samplewise_center = False
norm_samplewise_std_normalization = False
norm_gcn = False
norm_zca_whitening = False
cb_weights_method = None
da_rotation_range = 0
da_width_shift_range = 0.0
da_height_shift_range = 0.0
da_shear_range = 0.0
da_zoom_range = 0.5
da_channel_shift_range = 0.0
da_fill_mode = "constant"
da_cval = 0.0
da_horizontal_flip = True
da_vertical_flip = False
da_spline_warp = False
da_warp_sigma = 10
da_warp_grid_size = 3
da_save_to_dir = False
|
StarcoderdataPython
|
11272871
|
<gh_stars>0
from setuptools import setup
with open("README.md", "r") as fh:
readme = fh.read()
setup(name='calculaHashDadosAbertos',
version='0.0.5',
url='https://github.com/masuta16/calculaHash',
license='MIT License',
author='<NAME>',
long_description=readme,
long_description_content_type="text/markdown",
author_email='<EMAIL>',
keywords='Dados Abertos',
description=u'Retorna dados de hash dos ultimos n dias da biblioteca de dados abertos do governo',
packages=['calculaHashDadosAbertos'],
install_requires=['requests','pandas','datetime'],)
|
StarcoderdataPython
|
3373799
|
<gh_stars>0
"""
These functions are used to keep the median element from a stream of numbers,
here represented by a list on numbers using heaps.
The function medianMaintenance always keep the median and also keeps the sum of
all the medians whenever a new number is added.
The two other functions are helper functions to keep the heaps leveled.
"""
import heapq
def make_heaps_even(low_size, high_size, heap_low, heap_high):
"""
This function keeps the heaps even, the difference in the heaps size can't be more than one
:param low_size: the size of the low heap
:param high_size: the size of the high heap
:param heap_low: heap that store all the elements that are smaller then the median
:param heap_high: heap that store all the elements that are bigger than the median
:return low_size, high_size: the updated size of the heaps
"""
if(low_size > high_size +1):
move_num = heapq.heappop(heap_low)
heapq.heappush(heap_high, -move_num)
low_size -= 1
high_size += 1
#print 'moving', -move_num, 'from low to high heap'
if (high_size > low_size +1):
move_num = heapq.heappop(heap_high)
heapq.heappush(heap_low, -move_num)
high_size -= 1
low_size += 1
#print 'moving', move_num, 'from high to low heap'
return low_size, high_size
def get_median(low_size, high_size, heap_low, heap_high):
"""
This function returns the median element,
if the low heap is bigger then the median is the biggest in the heap and
the function will return it.
if the high heap is bigger then the median is the smallest element in that
heap and we will return it.
if the heaps are equals that we will return the biggest element from the low heap
:param low_size: the size of the low heap
:param high_size: the size of the high heap
:param heap_low: heap that store all the elements that are smaller then the median
:param heap_high: heap that store all the elements that are bigger than the median
:return median: the median element from the heaps
"""
if(low_size < high_size):
temp_median = heapq.heappop(heap_high)
heapq.heappush(heap_high,temp_median)
#print 'high is bigger, median is ', temp_median
return temp_median
else:
temp_median = heapq.heappop(heap_low)
heapq.heappush(heap_low,temp_median)
#print 'low is bigger or equal, median is ', temp_median
return -temp_median
def medianMaintnance(numbers_list):
"""
This function returns the sum of all the median elements,
when colculating the median after each element is added.
the function uses 2 heaps:
high heap - keeps all the numbers that are bigger that the median,
and can return in O(1) time the smallest number. regulr heap from heapq
low heap - - keeps all the numbers that are smaller that the median,
and can return in O(1) time the biggest number.
put all the numbers with negetive value in a heap from heapq
:param numbers_list: a list of numbers
:return median_sum: the sum of all the medians element over time
"""
heap_low = []
heap_high = []
median = 0
median_sum = 0
h_low_size = 0
h_high_size = 0
for num in numbers_list:
#print num
if(num < median):
# push to low heap'
heapq.heappush(heap_low,-num)
h_low_size += 1
else:
# push to high heap'
heapq.heappush(heap_high, num)
h_high_size+=1
# make heaps even and get the new median
h_low_size, h_high_size = make_heaps_even(h_low_size, h_high_size, heap_low, heap_high)
median = get_median(h_low_size, h_high_size, heap_low, heap_high)
#print 'median = ', median
median_sum += median
return median_sum
numbers_list = [1369,831,5283,1477,3932,2632,5179,1645,5714,1183,982,6846,4154,1595,5426,6412,9160,1746,3382,8375,8279,1500]
print medianMaintnance(numbers_list)
|
StarcoderdataPython
|
1728697
|
from rest_framework import permissions
from ..utils import is_admin
class ReadOnly(permissions.BasePermission):
def has_permission(self, request, view):
return request.method in permissions.SAFE_METHODS
class IsAdminUserOrReadOnly(permissions.IsAdminUser):
def has_permission(self, request, view):
# is_admin = super(IsAdminUserOrReadOnly, self).has_permission(request, view)
# Python3:
is_admin = super().has_permission(request, view)
return request.method in permissions.SAFE_METHODS or is_admin
class IsPrjLeadOrAdminOrReadOnly(permissions.BasePermission):
"""A custom perission class that will only allow the creel project lead or a
site administrator access the endpoint (for creating, updating or
deleting creel design objects).
TODO: add Crew or readonly to allow field crew to collect data but
not alter creel design tables.
"""
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
if hasattr(obj, "creel"):
lead_or_crew = obj.creel.prj_ldr == request.user
else:
lead_or_crew = obj.prj_ldr == request.user
return lead_or_crew or is_admin(request.user)
|
StarcoderdataPython
|
1672432
|
from project.sports_car import SportsCar
print(SportsCar())
sc = SportsCar()
print(sc.drive())
|
StarcoderdataPython
|
12807751
|
<filename>dialogue_ope/airdialogue_model_transformer/models/modules.py
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
import numpy as np
import torch as th
import torch.nn as nn
from parlai.utils.torch import neginf
from parlai.agents.transformer.modules import TransformerGeneratorModel, TransformerEncoder
def universal_sentence_embedding(sentences, mask, sqrt=True):
"""
Perform Universal Sentence Encoder averaging
(https://arxiv.org/abs/1803.11175).
This is really just sum / sqrt(len).
:param Tensor sentences: an N x T x D of Transformer outputs. Note this is
the exact output of TransformerEncoder, but has the time axis first
:param ByteTensor: an N x T binary matrix of paddings
:return: an N x D matrix of sentence embeddings
:rtype Tensor:
"""
# need to mask out the padded chars
sentence_sums = th.bmm(
sentences.permute(0, 2, 1),
mask.float().unsqueeze(-1)).squeeze(-1)
divisor = mask.sum(dim=1).view(-1, 1).float()
if sqrt:
divisor = divisor.sqrt()
sentence_sums /= divisor
return sentence_sums
class EndToEndModel(TransformerGeneratorModel):
def __init__(self, opt, dictionary, agenttype):
super().__init__(opt, dictionary)
self.encoder = ContextKnowledgeEncoder(self.encoder, opt, dictionary,
agenttype)
self.decoder = ContextKnowledgeDecoder(self.decoder, agenttype)
self.agenttype = agenttype
def reorder_encoder_states(self, encoder_out, indices):
# ck_attn is used for ticket classification
enc, mask, ck_attn, intent_out, name_out = encoder_out
if not th.is_tensor(indices):
indices = th.LongTensor(indices).to(enc.device)
enc = th.index_select(enc, 0, indices)
mask = th.index_select(mask, 0, indices)
if self.agenttype == 'agent':
intent_out = th.index_select(intent_out, 0, indices)
name_out = th.index_select(name_out, 0, indices)
ck_attn = th.index_select(ck_attn, 0, indices)
else:
intent_out = None
ck_attn = None
name_out = None
return enc, mask, ck_attn, intent_out, name_out
def reorder_decoder_incremental_state(self, incremental_state,
inds):
"""
Reorder the decoder incremental state.
See ``TorchGeneratorModel.reorder_decoder_incremental_state`` for a
description.
Here, incremental_state is a dict whose keys are layer indices and whose
values
are dicts containing the incremental state for that layer.
"""
return {
idx: layer.reorder_incremental_state(incremental_state[idx], inds)
for idx, layer in enumerate(self.decoder.transformer.layers)
}
class ClassificationHead(nn.Module):
def __init__(self, dim, out=3):
"""
3 classes: book, cancel, change
"""
super().__init__()
self.linear = nn.Linear(dim, dim)
self.attn_wei = nn.Linear(dim, 1)
self.softmax = nn.Softmax(dim=1)
self.act = nn.Tanh()
self.final = nn.Linear(dim, out)
def forward(self, x, mask):
x = self.linear(x)
x = self.act(x)
attn = self.attn_wei(x).squeeze(-1)
attn.masked_fill_(~mask, neginf(x.dtype))
attn = self.softmax(attn)
x = th.einsum('btd,bt->bd', x, attn)
x = self.final(x)
return x
class MultiTokenClassificationHead(nn.Module):
def __init__(self, dim, embeddings, out=10):
super().__init__()
self.linear = nn.Linear(dim, out * dim)
self.attn_wei = nn.Linear(dim, 1)
self.act = nn.Tanh()
self.softmax = nn.Softmax(dim=1)
self.proj = nn.Linear(dim, dim)
self.embeddings = embeddings.weight
self.out = out
def forward(self, x, mask):
# import ipdb; ipdb.set_trace()
# x: N x T x D
N, T, D = x.shape
x = self.linear(x).view(N, T, self.out, D)
x = self.act(x)
attn = self.attn_wei(x).squeeze(-1)
attn.masked_fill_(~mask[:, :, None], neginf(x.dtype))
attn = self.softmax(attn)
x = th.einsum('btod,bto->bod', x, attn)
x = self.proj(x)
x = th.einsum('bod,vd->bov', x, self.embeddings)
return x
class ContextKnowledgeEncoder(nn.Module):
"""
Knowledge here can be customer intent or tickets+reservations
"""
def __init__(self, transformer, opt, dictionary, agenttype):
super().__init__()
# The transformer takes care of most of the work, but other modules
# expect us to have an embeddings available
self.embeddings = transformer.embeddings
self.embed_dim = transformer.embeddings.embedding_dim
self.transformer = transformer
self.knowledge_transformer = TransformerEncoder(
embedding=self.embeddings,
n_heads=opt['n_heads'],
n_layers=opt['n_layers_knowledge'],
embedding_size=opt['embedding_size'],
ffn_size=opt['ffn_size'],
vocabulary_size=len(dictionary),
padding_idx=transformer.padding_idx,
learn_positional_embeddings=opt['learn_positional_embeddings'],
embeddings_scale=opt['embeddings_scale'],
reduction_type=transformer.reduction_type,
n_positions=transformer.n_positions,
activation=opt['activation'],
variant=opt['variant'],
output_scaling=opt['output_scaling'],
)
self.agenttype = agenttype
if self.agenttype == 'agent':
self.intent_head = ClassificationHead(opt['embedding_size'])
self.name_head = MultiTokenClassificationHead(opt['embedding_size'],
self.embeddings,
opt.get('name_vec_len'))
self.reservation_transformer = TransformerEncoder(
embedding=self.embeddings,
n_heads=opt['n_heads'],
n_layers=opt['n_layers_knowledge'],
embedding_size=opt['embedding_size'],
ffn_size=opt['ffn_size'],
vocabulary_size=len(dictionary),
padding_idx=transformer.padding_idx,
learn_positional_embeddings=opt['learn_positional_embeddings'],
embeddings_scale=opt['embeddings_scale'],
reduction_type=transformer.reduction_type,
n_positions=transformer.n_positions,
activation=opt['activation'],
variant=opt['variant'],
output_scaling=opt['output_scaling'],
)
self.know_use_project = nn.Linear(opt['embedding_size'],
opt['embedding_size'])
def forward(self, src_tokens, know_tokens, ck_mask, res_tokens=None):
# encode the context, pretty basic
context_encoded, context_mask = self.transformer(src_tokens)
# make all the knowledge into a 2D matrix to encode
# knowledge is intent for customer and tickets for agent
N, K, Tk = know_tokens.size()
know_flat = know_tokens.reshape(-1, Tk)
know_encoded, know_mask = self.knowledge_transformer(know_flat)
if self.agenttype == 'customer':
ck_attn = None
intent_out = None
name_out = None
cs_encoded = know_encoded
cs_mask = know_mask
elif self.agenttype == 'agent':
# import ipdb; ipdb.set_trace()
# compute our sentence embeddings for context and knowledge
context_use = universal_sentence_embedding(context_encoded, context_mask)
know_use = universal_sentence_embedding(know_encoded, know_mask)
# remash it back into the shape we need
know_use = know_use.reshape(N, K, self.embed_dim)
# project before calculate attn
know_use_proj = self.know_use_project(know_use)
ck_attn = th.bmm(know_use_proj, context_use.unsqueeze(-1)).squeeze(-1)
ck_attn /= np.sqrt(self.embed_dim)
# fill with near -inf
ck_attn.masked_fill_(~ck_mask, neginf(context_encoded.dtype))
# Compute context knowledge attn prob
ck_prob = nn.functional.softmax(ck_attn, dim=-1)
_, cs_ids = ck_attn.max(1)
# pick the true chosen sentence. remember that TransformerEncoder outputs
# (batch, time, embed)
# but because know_encoded is a flattened, it's really
# (N * K, T, D)
# We need to compute the offsets of the chosen_sentences
cs_offsets = th.arange(N, device=cs_ids.device) * K + cs_ids
cs_encoded = know_encoded[cs_offsets]
# but padding is (N * K, T)
cs_mask = know_mask[cs_offsets]
# compute reservation embeddings
res_encoded, res_mask = self.reservation_transformer(res_tokens)
# finally, concatenate it all
cs_encoded = th.cat([know_use, cs_encoded, res_encoded], dim=1)
cs_mask = th.cat([ck_mask, cs_mask, res_mask], dim=1)
# intent prediction
intent_out = self.intent_head(context_encoded, context_mask)
name_out = self.name_head(context_encoded, context_mask)
# finally, concatenate it all
full_enc = th.cat([cs_encoded, context_encoded], dim=1)
full_mask = th.cat([cs_mask, context_mask], dim=1)
# also return the knowledge selection mask for the loss
return full_enc, full_mask, ck_attn, intent_out, name_out
class ContextKnowledgeDecoder(nn.Module):
def __init__(self, transformer, agenttype):
super().__init__()
self.transformer = transformer
self.agenttype = agenttype
def forward(self, input, encoder_state, incr_state=None):
# our CK Encoder returns an extra output which the Transformer decoder
# doesn't expect (the knowledge selection mask). Just chop it off
encoder_output, encoder_mask, _, _, _ = encoder_state
return self.transformer(input, (encoder_output, encoder_mask), incr_state)
|
StarcoderdataPython
|
6492970
|
#!/usr/bin/env python
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(name='django-adaptors',
version='0.2.5',
description='Convert CSV/XML files into python object or django model',
author='<NAME>',
author_email='<EMAIL>',
long_description=read('README.txt'),
license="BSD",
keywords="CSV XML Django adaptor",
packages=['adaptor'],
install_requires=[
'Django>=1.4',
],
extras_require={
'XML': ['lxml>=2.3.4']
},
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: BSD License",
])
|
StarcoderdataPython
|
376417
|
import datetime
import pandas as pd
#uses a set name and data model of component attribute changes to generate set#attribute.xml based on template
#string, Table -> Beautifulsoup
def makeAttributeXML(currentSet,compmodel):
from UserInterface.ProjectSQLiteHandler import ProjectSQLiteHandler
from PyQt5 import QtWidgets
soup = readTemplateAttributeXML()
#fillSetInfo the soup to reflect the model
#for each row in model
compName =''
compTag = ''
compAttr=''
compValue=''
for i in range(compmodel.rowCount()):
compName = ' '.join([compName, compmodel.data(compmodel.index(i,2))])
compTag = ' '.join([compTag, '.'.join(compmodel.data(compmodel.index(i,3)).split('.')[:-1])])
compAttr = ' '.join([compAttr, compmodel.data(compmodel.index(i,3)).split('.')[-1]])
compValue = ' '.join([compValue, compmodel.data(compmodel.index(i, 4))])
tag = soup.find('compName')
tag.attrs['value'] = compName.lstrip()
tag = soup.find('compTag')
tag.attrs['value'] = compTag.lstrip()
tag = soup.find('compAttr')
tag.attrs['value'] = compAttr.lstrip()
tag = soup.find('compValue')
tag.attrs['value']= compValue.lstrip()
#fillSetInfo the set information
handler = ProjectSQLiteHandler()
dataTuple = handler.cursor.execute("SELECT set_name, date_start, date_end, timestep, component_names from setup where set_name = '" + currentSet.lower() + "'").fetchone()
tag = soup.find('setupTag')
tag.attrs['value'] = "componentNames runTimeSteps timeStep"
tag = soup.find('setupAttr')
tag.attrs['value']= "value value value"
tag = soup.find('setupValue')
df = compmodel.parent().window().findChild(QtWidgets.QWidget, 'setupDialog').model.data.fixed
tag.attrs['value'] = " ".join([dataTuple[4],timeStepsToInteger(dataTuple[1],dataTuple[2],df),str(dataTuple[3])])
return soup
#write a soup to xml file
#BeautifulSoup, String, String -> None
def writeAttributeXML(soup,saveDir,setName):
import os
# write combined xml file
if not os.path.exists(saveDir):
os.makedirs(saveDir)
f = open(os.path.join(saveDir,setName), "w")
f.write(soup.prettify())
f.close()
return
#dataframe, integer - > datetime
def integerToTimeIndex(df, i):
d = pd.to_datetime(df.index[int(i)]).date()
return d
def timeStepsToInteger(d1,d2,df):
d1 = datetime.datetime.strptime(d1, '%Y-%m-%d')
d2 = datetime.datetime.strptime(d2, '%Y-%m-%d')
#look in the dataframe to find the position of d1 and d2
#where do we get the dataframe
if (d1.date() > pd.to_datetime(df.index[0]).date())| (d2.date() < pd.to_datetime(df.last_valid_index()).date()):
d1 = pd.to_datetime(df[d1:].first_valid_index())
d2 = pd.to_datetime(df[:d2].last_valid_index())
v1 = df.index.get_loc(d1)
v2 = df.index.get_loc(d2)
return ' '.join([str(v1),str(v2)])
return 'all'
#->Soup
def readTemplateAttributeXML():
from bs4 import BeautifulSoup
import os
# xml templates are in the model/resources/descriptor folder
here = os.path.dirname(os.path.realpath(__file__))
# pull xml from project folder
resourcePath = os.path.join(here, '../GBSModel/Resources/Setup')
# get list of component prefixes that correspond to componentDescriptors
# read the xml file
infile_child = open(os.path.join(resourcePath, 'projectSetAttributes.xml'), "r") # open
contents_child = infile_child.read()
infile_child.close()
soup = BeautifulSoup(contents_child, 'xml') # turn into soup
parent = soup.childOf.string # find the name of parent. if 'self', no parent file
while parent != 'self': # continue to iterate if there are parents
fileName = parent + '.xml'
infile_child = open(fileName, "r")
contents_child = infile_child.read()
infile_child.close()
soup2 = BeautifulSoup(contents_child, 'xml')
# find parent. if 'self' then no parent
parent = soup2.childOf.string
for child in soup2.component.findChildren(): # for each tag under component
# check to see if this is already a tag. If it is, it is a more specific implementation, so don't add
# from parent file
if soup.component.find(child.name) is None:
soup.component.append(child)
return soup
|
StarcoderdataPython
|
11281939
|
<filename>hearbeat_fritz.py
# -*- coding: utf-8 -*-
"""
Created on Wed May 22 17:35:40 2019
@author: BIG1KOR
"""
from imageai.Detection import VideoObjectDetection
#%%
import os
import cv2
#%%
execution_path = os.path.join(os.getcwd())
#%%
detector = VideoObjectDetection()
detector.setModelTypeAsYOLOv3()
detector.setModelPath(os.path.join(execution_path , "models\\yolo.h5"))
detector.loadModel()
video_path = detector.detectObjectsFromVideo(input_file_path=os.path.join( execution_path, "data\\traffic-mini.mp4"),
output_file_path=os.path.join(execution_path, "traffic_mini_detected_1")
, frames_per_second=29, log_progress=True)
print(video_path)
#%%
|
StarcoderdataPython
|
3591903
|
#!/usr/bin/python
"""
Appendix E: Cell Methods
To be imported into cf.py upon initialization of a CF Checker class.
"""
cell_methods16 = {
"point",
"sum",
"mean",
"maximum",
"minimum",
"mid_range",
"standard_deviation",
"variance",
"mode",
"median",
"sum_of_squares",
}
cell_methods17 = cell_methods16.union(
{ # returns new set with elements from both
"maximum_absolute_value",
"minimum_absolute_value",
"mean_absolute_value",
"mean_of_upper_decile",
"range",
"root_mean_square",
}
)
|
StarcoderdataPython
|
4967266
|
<reponame>dokipen/trac-announcer-plugin<filename>announcer/subscribers/ticket_groups.py
# -*- coding: utf-8 -*-
#
# Copyright (c) 2008, <NAME>
# Copyright (c) 2009-2010, <NAME>
# Copyright (c) 2010, <NAME>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <ORGANIZATION> nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
import re
from trac.core import Component, implements
from trac.ticket import model
from trac.web.chrome import add_warning
from trac.config import ListOption
from announcer.api import IAnnouncementSubscriber, istrue
from announcer.api import IAnnouncementPreferenceProvider
from announcer.api import _
from announcer.util.settings import BoolSubscriptionSetting
class JoinableGroupSubscriber(Component):
implements(IAnnouncementSubscriber, IAnnouncementPreferenceProvider)
joinable_groups = ListOption('announcer', 'joinable_groups', [],
"""Joinable groups represent 'opt-in' groups that users may
freely join.
The name of the groups should be a simple alphanumeric string. By
adding the group name preceeded by @ (such as @sec for the sec group)
to the CC field of a ticket, everyone in that group will receive an
announcement when that ticket is changed.
""")
def subscriptions(self, event):
if event.realm != 'ticket':
return
if event.category not in ('changed', 'created', 'attachment added'):
return
settings = self._settings()
cc = event.target['cc'] or ''
for chunk in re.split('\s|,', cc):
chunk = chunk.strip()
if chunk.startswith('@'):
member = None
grp = chunk[1:]
for member in settings[grp].get_subscriptions():
self.log.debug(
"JoinableGroupSubscriber added '%s (%s)' " \
"because of opt-in to group: %s"%(member[1], \
member[2] and 'authenticated' or \
'not authenticated', grp))
yield member
if member is None:
self.log.debug("JoinableGroupSubscriber found " \
"no members for group: %s."%grp)
def get_announcement_preference_boxes(self, req):
if req.authname == "anonymous" and 'email' not in req.session:
return
if self.joinable_groups:
yield "joinable_groups", _("Group Subscriptions")
def render_announcement_preference_box(self, req, panel):
settings = self._settings()
if req.method == "POST":
for grp, setting in settings.items():
setting.set_user_setting(req.session,
value=req.args.get('joinable_group_%s'%grp), save=False)
req.session.save()
groups = {}
for grp, setting in settings.items():
groups[grp] = setting.get_user_setting(req.session.sid)[1]
data = dict(joinable_groups = groups)
return "prefs_announcer_joinable_groups.html", data
def _settings(self):
settings = {}
for grp in self.joinable_groups:
settings[grp[1:]] = BoolSubscriptionSetting(
self.env, 'group_%s'%grp[1:])
return settings
|
StarcoderdataPython
|
11364421
|
import argparse
from displ.pwscf.parseScf import final_coordinates_from_scf
def with_coordinates(pw_in_path, positions_type, atom_symbols, atom_positions):
"""Return a string giving a new input file, which is the same as the one at
`pw_in_path` except that the ATOMIC_POSITIONS block is replaced by the one
specified by the other parameters of this function.
`positions_type`, `atom_symbols`, and `atom_positions` have the same
meaning as the return values from `final_coordinates_from_scf()`.
Assumes that there are no whitespace lines in the ATOMIC_POSITIONS block
(not sure whether this is allowed by QE).
"""
with open(pw_in_path, 'r') as fp:
in_lines = fp.readlines()
out_lines = []
in_atomic_block = False
atom_count = 0
for i, line in enumerate(in_lines):
if 'ATOMIC_POSITIONS' in line:
out_lines.append("ATOMIC_POSITIONS {}\n".format(positions_type))
in_atomic_block = True
elif in_atomic_block:
sym, pos = atom_symbols[atom_count], atom_positions[atom_count]
pos_line = " {} {} {} {}\n".format(sym, str(pos[0]), str(pos[1]),
str(pos[2]))
out_lines.append(pos_line)
atom_count += 1
if atom_count == len(atom_symbols):
in_atomic_block = False
else:
out_lines.append(line)
return ''.join(out_lines)
def set_relaxed_coordinates(pw_in_paths, relax_path):
positions_type, atom_symbols, atom_positions = final_coordinates_from_scf(relax_path)
for path in pw_in_paths:
relaxed_input = with_coordinates(path, positions_type, atom_symbols,
atom_positions)
with open(path, 'w') as fp:
fp.write(relaxed_input)
def _main():
parser = argparse.ArgumentParser("Set coordinates to relaxed value",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("relaxed_output", type=str,
help="Path to pw.x relaxed output text file")
parser.add_argument("input_to_change", type=str,
help="Path to pw.x input file to change to relaxed coordinates")
args = parser.parse_args()
set_relaxed_coordinates([args.input_to_change], args.relaxed_output)
if __name__ == "__main__":
_main()
|
StarcoderdataPython
|
4824620
|
<filename>PacoteDownload/Mundo 2 do curso/while/desafio 64.py
x=int(input('digite seu número: '))
c=999
soma=0
n_entradas=0
while x!=999:
if x != 999:
soma = soma+x
n_entradas += 1
x = int(input('digite seu número: '))
print('A soma é {} e foram {} entradas.'.format(soma,n_entradas))
|
StarcoderdataPython
|
3211194
|
<reponame>sampotter/pyvista
import pytest
import pyvista as pv
def test_compare_images_two_plotters(sphere, tmpdir):
filename = str(tmpdir.mkdir("tmpdir").join('tmp.png'))
pl1 = pv.Plotter()
pl1.add_mesh(sphere)
arr1 = pl1.screenshot(filename)
im1 = pv.read(filename)
pl2 = pv.Plotter()
pl2.add_mesh(sphere)
assert not pv.compare_images(pl1, pl2)
assert not pv.compare_images(arr1, pl2)
assert not pv.compare_images(im1, pl2)
assert not pv.compare_images(filename, pl2)
assert not pv.compare_images(arr1, pl2, use_vtk=True)
with pytest.raises(TypeError):
pv.compare_images(im1, pl1.ren_win)
|
StarcoderdataPython
|
4989757
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generating and running arithmetic programs with if and repeat statements.
We use a list of statements to represent a program. Each statement is a list of
an operator and two operands. The standard ops in a program are +, -, *,
if-statements, and a special "repeat" op ("r") that acts as a repeat block in
the program.
The +, -, and * ops update a variable by modifying it. The first operand
indicates which variable is being updated. The second operand indicates
by how much to modify the variable.
In the repeat op, the first operand indicates the number of repetitions and the
second op indicates how many statements to repeat.
"""
import random
from absl import logging # pylint: disable=unused-import
from ipagnn.datasets.control_flow_programs.program_generators import constants
REPEAT_OP = "r"
IF_OP = "i"
ELSE_OP = "e"
PLACEHOLDER_OP = "_"
def generate_python_source(length, config):
"""Generates Python code according to the config."""
statements, unused_hole_statement_index = _generate_statements(length, config)
return _to_python_source(statements, config)
def generate_python_source_and_partial_python_source(length, config):
"""Generates Python code according to the config."""
statements, hole_statement_index = _generate_statements(length, config)
partial_statements = statements.copy()
partial_statements[hole_statement_index] = _placeholder_statement()
return (_to_python_source(statements, config),
_to_python_source(partial_statements, config))
def _placeholder_statement():
return (PLACEHOLDER_OP, 0, 0)
def _generate_statements(length, config):
"""Generates a list of statements representing a control flow program.
Args:
length: The number of statements to generate.
config: The ArithmeticRepeatsConfig specifying the properties of the program
to generate.
Returns:
A list of statements, each statement being a 3-tuple (op, operand, operand),
as well as the index of a statement to replace with a hole.
"""
max_value = config.base ** config.num_digits - 1
statements = []
nesting_lines_remaining = []
nesting_instructions = []
num_repeats = 0
num_ifs = 0
hole_candidates = []
instruction = None
for statement_index in range(length):
if instruction is None:
current_nesting = len(nesting_lines_remaining)
nesting_permitted = (config.max_nesting is None
or current_nesting < config.max_nesting)
too_many_repeats = (config.max_repeat_statements is not None
and num_repeats > config.max_repeat_statements)
repeat_permitted = nesting_permitted and not (
too_many_repeats
or statement_index == length - 1 # Last line of program.
or 1 in nesting_lines_remaining # Last line of another block.
)
too_many_ifs = (config.max_if_statements is not None
and num_ifs > config.max_if_statements)
if_permitted = nesting_permitted and not (
too_many_ifs
or statement_index == length - 1 # Last line of program.
or 1 in nesting_lines_remaining # Last line of another block.
)
ifelse_permitted = nesting_permitted and not (
too_many_ifs
or statement_index >= length - 3 # Need 4 lines for if-else.
or 1 in nesting_lines_remaining # Last line of another block.
or 2 in nesting_lines_remaining # 2nd-to-last line of another block.
or 3 in nesting_lines_remaining # 3rd-to-last line of another block.
)
op_random = random.random()
is_repeat = repeat_permitted and op_random < config.repeat_probability
is_if = if_permitted and (
config.repeat_probability
< op_random
< config.repeat_probability + config.if_probability)
is_ifelse = ifelse_permitted and (
config.repeat_probability + config.if_probability
< op_random
< (config.repeat_probability
+ config.if_probability
+ config.ifelse_probability))
# statements_remaining_* includes current statement.
statements_remaining_in_program = length - statement_index
statements_remaining_in_block = min(
[statements_remaining_in_program] + nesting_lines_remaining)
if config.max_block_size:
max_block_size = min(config.max_block_size,
statements_remaining_in_block)
else:
max_block_size = statements_remaining_in_block
if is_repeat:
num_repeats += 1
repetitions = random.randint(2, config.max_repetitions)
# num_statements includes current statement.
num_statements = random.randint(2, max_block_size)
nesting_lines_remaining.append(num_statements)
nesting_instructions.append(None)
# -1 is to not include current statement.
statement = (REPEAT_OP, repetitions, num_statements - 1)
elif is_if:
num_ifs += 1
# num_statements includes current statement.
num_statements = random.randint(2, max_block_size)
nesting_lines_remaining.append(num_statements)
nesting_instructions.append(None)
threshold = random.randint(0, max_value) # "if v0 > {threshold}:"
# -1 is to not include current statement.
statement = (IF_OP, threshold, num_statements - 1)
elif is_ifelse:
num_ifs += 1
# num_statements includes current statement.
num_statements = random.randint(4, max_block_size)
# Choose a statement to be the else statement.
else_statement_index = random.randint(2, num_statements - 2)
nesting_lines_remaining.append(else_statement_index)
nesting_instructions.append(
("else", num_statements - else_statement_index))
threshold = random.randint(0, max_value) # "if v0 > {threshold}:"
# -1 is to not include current statement.
statement = (IF_OP, threshold, else_statement_index - 1)
else:
op = random.choice(config.ops)
variable_index = 0 # "v0"
operand = random.randint(0, max_value)
statement = (op, variable_index, operand)
hole_candidates.append(statement_index)
else: # instruction is not None
if instruction[0] == "else":
# Insert an else block.
num_statements = instruction[1]
nesting_lines_remaining.append(num_statements)
nesting_instructions.append(None)
# -1 is to not include current statement.
statement = (ELSE_OP, 0, num_statements - 1)
else:
raise ValueError("Unexpected instruction", instruction)
instruction = None
statements.append(statement)
# Decrement nesting.
for nesting_index in range(len(nesting_lines_remaining)):
nesting_lines_remaining[nesting_index] -= 1
while nesting_lines_remaining and nesting_lines_remaining[-1] == 0:
nesting_lines_remaining.pop()
instruction = nesting_instructions.pop()
assert 0 not in nesting_lines_remaining
hole_statement_index = random.choice(hole_candidates)
return statements, hole_statement_index
def _select_counter_variable(used_variables, config):
del config # Unused.
num_variables = 10 # TODO(dbieber): num_variables is hardcoded.
max_variable = num_variables - 1
allowed_variables = (
set(range(1, max_variable + 1)) - set(used_variables))
return random.choice(list(allowed_variables))
def _to_python_source(statements, config):
"""Convert statements into Python source code.
Repeat statements are rendered as while loops with a counter variable that
tracks the number of iterations remaining.
Args:
statements: A list of statements. Each statement is a triple containing
(op, operand, operand).
config: An ArithmeticRepeatsConfig.
Returns:
Python source code representing the program.
"""
lines = []
nesting_lines_remaining = []
used_variables = []
for statement in statements:
op, operand1, operand2 = statement
indent = constants.INDENT_STRING * len(nesting_lines_remaining)
if op is REPEAT_OP:
# num_statements doesn't include current statement.
repetitions, num_statements = operand1, operand2
variable_index = _select_counter_variable(used_variables, config)
line1 = f"{indent}v{variable_index} = {repetitions}"
line2 = f"{indent}while v{variable_index} > 0:"
# +1 is for current statement.
nesting_lines_remaining.append(num_statements + 1)
used_variables.append(variable_index)
line3_indent = constants.INDENT_STRING * len(nesting_lines_remaining)
line3 = f"{line3_indent}v{variable_index} -= 1"
lines.extend([line1, line2, line3])
elif op is IF_OP:
# num_statements doesn't include current statement.
threshold, num_statements = operand1, operand2
lines.append(f"{indent}if v0 > {threshold}:")
# +1 is for current statement.
nesting_lines_remaining.append(num_statements + 1)
used_variables.append(None)
elif op is ELSE_OP:
lines.append(f"{indent}else:")
# +1 is for current statement.
num_statements = operand2
nesting_lines_remaining.append(num_statements + 1)
used_variables.append(None)
elif op is PLACEHOLDER_OP:
lines.append(f"{indent}_ = 0")
else:
variable_index, operand = operand1, operand2
line = f"{indent}v{variable_index} {op} {operand}"
lines.append(line)
# Decrement nesting.
for nesting_index in range(len(nesting_lines_remaining)):
nesting_lines_remaining[nesting_index] -= 1
while nesting_lines_remaining and nesting_lines_remaining[-1] == 0:
nesting_lines_remaining.pop()
used_variables.pop()
return "\n".join(lines)
|
StarcoderdataPython
|
189989
|
"""# Shoelace Widget Functionality
Provides the ShoelaceWidget and ShoelaceWidgetGenerator
"""
from ..shoelace_component import ShoelaceComponent
class ShoelaceWidget(ShoelaceComponent): # pylint: disable=too-few-public-methods
"""Your Shoelace Widgets should inherits this"""
|
StarcoderdataPython
|
269614
|
searcher = ix.searcher()
from whoosh.qparser import QueryParser
qp = QueryParser("title", schema=ix.schema)
q = qp.parse(u"felipe")
with ix.searcher() as s:
results = s.search(q)
len(results)
|
StarcoderdataPython
|
5062099
|
import os,sys
# 将repostory的目录,作为根目录,添加到系统环境中。
VNPY_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..' ))
if VNPY_ROOT not in sys.path:
sys.path.append(VNPY_ROOT)
print(f'append {VNPY_ROOT} into sys.path')
import asyncio
from vnpy.api.eastmoney_api.eastmoney import EastMoneyBackend, URL_ROOT
if __name__ == '__main__':
loop = asyncio.get_event_loop()
debug = False
username = "xxx"
password = "<PASSWORD>"
if debug:
backend = EastMoneyBackend(browser_url=None, debug=True)
else:
backend = EastMoneyBackend()
if username is None or password is None:
print("err_msg", "无效的登录信息")
if username is not None:
task = backend.login(username, password, max_retries=10)
result = loop.run_until_complete(task)
else:
result = True
print('登录完成')
print('validatekey:{}'.format(backend.validatekey))
print('cookies:{}'.format(backend.cookies))
|
StarcoderdataPython
|
12855933
|
<reponame>rodrigoviannini/meus_Primeiros_Codigos<filename>007 - Intro List Comprehension.py/016 - Maior.py
"""
List Comprehension Aninhada
OBJ: Encontrar o maior ou os maiores números de uma lista e imprimir outra lista
"""
listaGenerica = [1, 2, 3, 4, 1, 2, 3, 4, 10, 10, 10, 5, 3, -4]
listaMaior = [x for x in listaGenerica if not False in [True if x >= y else False for y in listaGenerica]]
print(listaMaior)
|
StarcoderdataPython
|
1742727
|
<gh_stars>0
"""This module contains the general information for AdaptorEthInterruptProfile ManagedObject."""
from ...ucscentralmo import ManagedObject
from ...ucscentralcoremeta import UcsCentralVersion, MoPropertyMeta, MoMeta
from ...ucscentralmeta import VersionMeta
class AdaptorEthInterruptProfileConsts():
MODE_INTX = "intx"
MODE_MSI = "msi"
MODE_MSI_X = "msi-x"
class AdaptorEthInterruptProfile(ManagedObject):
"""This is AdaptorEthInterruptProfile class."""
consts = AdaptorEthInterruptProfileConsts()
naming_props = set([])
mo_meta = MoMeta("AdaptorEthInterruptProfile", "adaptorEthInterruptProfile", "eth-int", VersionMeta.Version111a, "InputOutput", 0xff, [], ["admin", "ls-config-policy", "ls-network", "ls-server-policy"], [u'adaptorHostEthIfProfile', u'adaptorUsnicConnDef'], [], ["Get", "Set"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"coalescing_time": MoPropertyMeta("coalescing_time", "coalescingTime", "uint", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, [], ["0-65535"]),
"coalescing_type": MoPropertyMeta("coalescing_type", "coalescingType", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, [], []),
"count": MoPropertyMeta("count", "count", "ushort", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, [], ["1-514"]),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"mode": MoPropertyMeta("mode", "mode", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, ["intx", "msi", "msi-x"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, 0x40, 0, 256, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x80, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"coalescingTime": "coalescing_time",
"coalescingType": "coalescing_type",
"count": "count",
"dn": "dn",
"mode": "mode",
"rn": "rn",
"status": "status",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.coalescing_time = None
self.coalescing_type = None
self.count = None
self.mode = None
self.status = None
ManagedObject.__init__(self, "AdaptorEthInterruptProfile", parent_mo_or_dn, **kwargs)
|
StarcoderdataPython
|
8093962
|
<filename>others/solution/1620.py<gh_stars>0
n, m = map(int, input().split())
pokemon_dictonary1 = {}
for i in range(n):
pokemon_name = input()
pokemon_dictonary1[pokemon_name] = f'{i+1}'
pokemon_dictonary2 = {v:k for k, v in pokemon_dictonary1.items()}
for j in range(m):
problem = input()
if not problem.isnumeric():
print(pokemon_dictonary1[problem])
else:
print(pokemon_dictonary2[problem])
|
StarcoderdataPython
|
11285365
|
SCALAR_ENTRY = 'scalar'
SCALARS_ENTRY = 'scalars'
IMAGE_ENTRY = 'image'
PLOT_ENTRY = 'plot'
LOG_ENTRY_TYPES = []
class LogEntry:
def __init__(self, value, data_type):
self.value = value
self.data_type = data_type
def __repr__(self):
return 'LogEntry(\n %s\n)' % self.value.__repr__().replace('\n', '\n ')
|
StarcoderdataPython
|
3280349
|
<filename>test/integration_tests/test_models.py
import torch
from torchtext.models import ROBERTA_BASE_ENCODER, ROBERTA_LARGE_ENCODER, XLMR_BASE_ENCODER, XLMR_LARGE_ENCODER
from ..common.assets import get_asset_path
from ..common.parameterized_utils import nested_params
from ..common.torchtext_test_case import TorchtextTestCase
class TestModels(TorchtextTestCase):
@nested_params(
[
("xlmr.base.output.pt", "XLMR base Model Comparison", XLMR_BASE_ENCODER),
("xlmr.large.output.pt", "XLMR base Model Comparison", XLMR_LARGE_ENCODER),
(
"roberta.base.output.pt",
"Roberta base Model Comparison",
ROBERTA_BASE_ENCODER,
),
(
"roberta.large.output.pt",
"Roberta base Model Comparison",
ROBERTA_LARGE_ENCODER,
),
],
[True, False],
)
def test_model(self, model_args, is_jit):
"""Verify pre-trained XLM-R and Roberta models in torchtext produce
the same output as the reference implementation within fairseq
"""
expected_asset_name, test_text, model_bundler = model_args
expected_asset_path = get_asset_path(expected_asset_name)
transform = model_bundler.transform()
model = model_bundler.get_model()
model = model.eval()
if is_jit:
transform = torch.jit.script(transform)
model = torch.jit.script(model)
model_input = torch.tensor(transform([test_text]))
actual = model(model_input)
expected = torch.load(expected_asset_path)
torch.testing.assert_close(actual, expected)
|
StarcoderdataPython
|
164369
|
import time
import logging
from ..data_asset import DataAsset
from ..dataset import Dataset
from great_expectations.exceptions import GreatExpectationsError
logger = logging.getLogger(__name__)
class DataAssetProfiler(object):
@classmethod
def validate(cls, data_asset):
return isinstance(data_asset, DataAsset)
class DatasetProfiler(object):
@classmethod
def validate(cls, dataset):
return isinstance(dataset, Dataset)
@classmethod
def add_expectation_meta(cls, expectation):
if not "meta" in expectation:
expectation["meta"] = {}
expectation["meta"][str(cls.__name__)] = {
"confidence": "very low"
}
return expectation
@classmethod
def add_meta(cls, expectation_suite, batch_kwargs=None):
if not "meta" in expectation_suite:
expectation_suite["meta"] = {}
class_name = str(cls.__name__)
expectation_suite["meta"][class_name] = {
"created_by": class_name,
"created_at": time.time(),
}
if batch_kwargs is not None:
expectation_suite["meta"][class_name]["batch_kwargs"] = batch_kwargs
new_expectations = [cls.add_expectation_meta(
exp) for exp in expectation_suite["expectations"]]
expectation_suite["expectations"] = new_expectations
return expectation_suite
@classmethod
def profile(cls, data_asset, run_id=None):
if not cls.validate(data_asset):
raise GreatExpectationsError("Invalid data_asset for profiler; aborting")
expectation_suite = cls._profile(data_asset)
batch_kwargs = data_asset.get_batch_kwargs()
expectation_suite = cls.add_meta(expectation_suite, batch_kwargs)
validation_results = data_asset.validate(expectation_suite, run_id=run_id, result_format="SUMMARY")
return expectation_suite, validation_results
@classmethod
def _profile(cls, dataset):
raise NotImplementedError
|
StarcoderdataPython
|
6523948
|
<gh_stars>0
"""
Compress to show string and number behind
"""
def compressedString(message):
# catch empty or single
if len(message) <= 1:
return message
# use a stack and counter
last_alpha = ""
count = 1
return_string = ""
for msg in message:
if last_alpha != msg:
# append to return string
return_string += last_alpha
if count > 1:
return_string += str(count)
last_alpha = msg
count = 1
else:
count += 1
# last append
return_string += last_alpha
if count > 1:
return_string += str(count)
return return_string
# driver
print(compressedString("abbbbccccc"))
|
StarcoderdataPython
|
3504240
|
<filename>setup.py
#!/usr/bin/env python3
# Copyright (C) 2020 <NAME> <<EMAIL>>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import rpki_ov_checker
version = rpki_ov_checker.__version__
import codecs
import os
import sys
from os.path import abspath, dirname, join
from setuptools import setup, find_packages
here = abspath(dirname(__file__))
def parse_requirements(filename):
""" load requirements from a pip requirements file """
lineiter = (line.strip() for line in open(filename))
return [line for line in lineiter if line and not line.startswith("#")]
with codecs.open(join(here, 'README.md'), encoding='utf-8') as f:
README = f.read()
if sys.argv[-1] == 'publish':
os.system('python3 setup.py sdist upload')
print("You probably want to also tag the version now:")
print((" git tag -a %s -m 'version %s'" % (version, version)))
print(" git push --tags")
sys.exit()
install_reqs = parse_requirements('requirements.txt')
reqs = install_reqs
setup(
name='rpki-ov-checker',
version=version,
maintainer="<NAME>",
maintainer_email='<EMAIL>',
url='https://github.com/job/rpki-ov-checker',
description='RPKI Origin Validation checker',
long_description=README,
long_description_content_type="text/markdown",
license='ISCL',
keywords='rpki prefix routing networking',
setup_requires=reqs,
install_requires=reqs,
classifiers=[
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Networking',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3 :: Only'
],
packages=find_packages(exclude=['tests', 'tests.*']),
entry_points={'console_scripts':
['rpki-ov-checker = rpki_ov_checker.checker:main']},
)
|
StarcoderdataPython
|
5087818
|
class Solution:
def maxSatisfaction(self, satisfaction: List[int]) -> int:
satisfaction.sort()
best,esum,total = 0,0,0
for x in reversed(satisfaction):
esum += x
total += esum
if total>best:
best = total
elif esum<0:
break
return best
|
StarcoderdataPython
|
9624009
|
from __future__ import absolute_import, division, print_function, unicode_literals
from cechomesh import Color, ColorList, even_color_spread
from echomesh.util.TestCase import TestCase
class ColorListTest(TestCase):
def setUp(self):
self.cl = ColorList()
def assertResult(self, s):
self.assertEqual(str(self.cl), s)
def test_single(self):
self.cl.append('red')
self.assertResult('[red]')
def test_issue(self):
cl = ColorList(['red', 'white'])
self.assertEqual(str(cl), '[red, white]')
def test_issue(self):
cl = ColorList(['red', 'white', 'green', 'blue'])
self.assertEqual(str(cl), '[red, white, green, blue]')
def test_single_hsb(self):
self.cl = ColorList()
self.cl.append('red')
self.assertResult('[red]')
def test_empty(self):
self.assertResult('[]')
def test_append(self):
self.cl.append('red')
self.assertResult('[red]')
self.assertRaises(ValueError, self.cl.append, 'glug')
self.assertResult('[red]')
def test_sort(self):
self.cl.extend(['green', 'red', 'blue'])
self.cl.sort()
self.assertResult('[blue, green, red]')
def test_combine(self):
self.cl.extend(['black', 'white', 'red', 'blue', 'green'])
self.cl.combine(['white', 'white', 'blue', 'green', 'red'])
self.assertResult('[white, white, magenta, cyan, yellow]')
def test_combine_columns(self):
self.cl = ColorList(['yellow', 'white', 'red', 'blue', 'green'],
columns=2)
self.cl.combine(ColorList(['yellow', 'white', 'black', 'green', 'red'],
columns=3))
self.assertResult(
'[yellow, white, black,'
' yellow, magenta, black,'
' green, black, black], columns=3')
def test_combine_columns2(self):
self.cl = ColorList(['yellow', 'white', 'red', 'blue', 'green'],
columns=3)
self.cl.combine(ColorList(['yellow', 'white', 'red', 'green', 'coral'],
columns=2))
self.assertResult(
'[yellow, white, red, magenta, green, black, coral, black, black]'
', columns=3')
def test_combine_columns4(self):
self.cl = ColorList()
self.cl.combine(ColorList(['yellow', 'white', 'red', 'green', 'coral'],
columns=2))
self.assertResult('[yellow, white, red, green, coral], columns=2')
def test_count(self):
self.cl.extend(['green', 'red', 'blue', 'red', 'pink'])
self.assertEqual(self.cl.count('green'), 1)
self.assertEqual(self.cl.count('yellow'), 0)
self.assertEqual(self.cl.count('red'), 2)
def test_index(self):
self.cl.extend(['green', 'red', 'blue', 'red', 0x303030])
self.assertEqual(self.cl.index('green'), 0)
self.assertEqual(self.cl.index('red'), 1)
self.assertRaises(ValueError, self.cl.index, 'yellow')
def test_insert(self):
self.cl.extend(['green', 'red', 'blue', 'red'])
self.cl.insert(2, 'pink')
self.assertResult('[green, red, pink, blue, red]')
def test_slice1(self):
self.cl[:] = ['green', 'red', 'blue', 'red']
self.assertResult('[green, red, blue, red]')
def test_slice2(self):
self.cl[:] = ['green', 'red', 'blue', 'red']
self.cl[1:4:2] = ['pink', 'orange']
self.assertResult('[green, pink, blue, pink]')
def test_getslice1(self):
self.cl.extend(['green', 'red', 'blue', 'red'])
self.cl = self.cl[:]
self.assertResult('[green, red, blue, red]')
def test_getslice1(self):
self.cl.extend(['green', 'red', 'blue', 'red'])
self.cl = self.cl[1::2]
self.assertResult('[red, red]')
def test_del(self):
self.cl.extend(['green', 'red', 'blue', 'red'])
del self.cl[2]
self.assertResult('[green, red, red]')
def test_del(self):
self.cl.extend(['green', 'red', 'blue', 'red'])
del self.cl[2]
self.assertResult('[green, red, red]')
def test_columns(self):
cl = ColorList(['green', 'red', 'blue'], columns=8)
cl.columns = 4
self.assertEqual(
cl, ColorList(['green', 'red', 'blue', 'black'], columns=4))
self.cl = ColorList(['green', 'red', 'blue', 'yellow', 'orange'],
columns=3)
self.assertEqual(self.cl, self.cl)
cl = ColorList(['green', 'red', 'blue', 'yellow', 'orange'], columns=2)
self.assertNotEqual(self.cl, cl)
self.cl.columns = 2
self.assertEqual(
self.cl, ColorList(['green', 'red', 'yellow', 'orange'], columns=2))
self.cl.columns = 3
self.assertEqual(
self.cl, ColorList(
['green', 'red', 'black', 'yellow', 'orange', 'black'],
columns=3))
def test_contains(self):
self.cl.extend(['green', 'red', 'blue', 'red'])
self.assertTrue('red' in self.cl)
self.assertFalse('pink' in self.cl)
def test_add(self):
self.cl.extend(['green', 'red', 'blue', 'red'])
self.cl = self.cl + ['yellow', 'pink']
self.assertResult('[green, red, blue, red, yellow, pink]')
def test_radd(self):
self.cl.extend(['yellow', 'pink'])
self.cl = ['green', 'red', 'blue', 'red'] + self.cl
self.assertResult('[green, red, blue, red, yellow, pink]')
def test_iadd(self):
self.cl.extend(['green', 'red', 'blue', 'red'])
self.cl += ['yellow', 'pink']
self.assertResult('[green, red, blue, red, yellow, pink]')
def test_pop(self):
self.cl.extend(['green', 'red', 'blue', 'red'])
self.assertEqual(self.cl.pop(), Color('red'))
self.assertEqual(self.cl, ColorList(['green', 'red', 'blue']))
self.assertEqual(self.cl.pop(0), Color('green'))
self.assertEqual(self.cl, ColorList(['red', 'blue']))
self.assertRaises(IndexError, self.cl.pop, 3)
def test_remove(self):
self.cl.extend(['green', 'red', 'blue', 'red'])
self.cl.remove('red')
self.assertEqual(self.cl, ColorList(['green', 'blue', 'red']))
self.cl.remove('green')
self.assertEqual(self.cl, ColorList(['blue', 'red']))
self.assertRaises(ValueError, self.cl.remove, 'green')
def test_reverse(self):
self.cl += ['green', 'red', 'blue', 'red']
self.cl.reverse()
self.assertEqual(self.cl, ColorList(['red', 'blue', 'red', 'green']))
def test_reversed(self):
self.cl += ['green', 'red', 'blue', 'red']
cl = reversed(self.cl)
self.assertEqual(cl, ColorList(['red', 'blue', 'red', 'green']))
def test_mul(self):
self.cl += ['green', 'red']
self.cl = self.cl * 3
self.assertEqual(
self.cl,
ColorList(['green', 'red', 'green', 'red', 'green', 'red']))
def test_rmul(self):
self.cl += ['green', 'red']
self.cl = 3 * self.cl
self.assertEqual(
self.cl,
ColorList(['green', 'red', 'green', 'red', 'green', 'red']))
def test_imul(self):
self.cl += ['green', 'red']
self.cl *= 3
self.assertEqual(
self.cl,
ColorList(['green', 'red', 'green', 'red', 'green', 'red']))
def test_scale0(self):
self.cl += ['green', 'red']
self.cl.scale(0)
self.assertResult('[black, black]')
def test_scale1(self):
self.cl += ['green', 'red']
self.cl.scale(1)
self.assertResult('[green, red]')
def test_scale2(self):
self.cl += ['white']
self.cl.scale(0.5)
self.assertResult('[grey 50]')
def test_spread1(self):
self.cl = even_color_spread(2, 'black', 'white')
self.assertResult('[black, white]')
def test_spread2(self):
self.cl = even_color_spread(3, 'black', 'white', 'green')
self.assertResult('[black, white, green]')
def test_spread3(self):
self.cl = even_color_spread(5, 'black', 'white')
self.assertResult('[black, grey 25, grey 50, grey 75, white]')
def test_spread4(self):
self.cl = even_color_spread(5, 'white', 'red')
self.assertResult('[white, '
'[red=1.000, green=0.750, blue=0.750], '
'[red=1.000, green=0.500, blue=0.500], '
'[red=1.000, green=0.250, blue=0.250], '
'red]')
def test_spread5(self):
self.cl = even_color_spread(10, 'black', 'white', 'red', 'yellow')
self.assertResult('[black, dark grey, grey 66.7, white, '
'[red=1.000, green=0.667, blue=0.667], '
'[red=1.000, green=0.333, blue=0.333], red, '
'[red=1.000, green=0.333, blue=0.000], '
'[red=1.000, green=0.667, blue=0.000], yellow]')
def test_spread6(self):
self.cl = even_color_spread(5, 'black', 'white', 'red')
self.assertResult('[black, grey 50, white, '
'[red=1.000, green=0.500, blue=0.500], red]')
|
StarcoderdataPython
|
1754788
|
import tensorflow as tf
import sys
import numpy as np
from PIL import Image
import cv2
import os, os.path
# speicherorte fuer trainierten graph und labels in train.sh festlegen ##
# Disable tensorflow compilation warnings
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
image_path = sys.argv[1]
# angabe in console als argument nach dem aufruf
##CROPPING
#multiple cascades: https://github.com/Itseez/opencv/tree/master/data/haarcascades
#https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_frontalface_default.xml
face_cascade = cv2.CascadeClassifier('faces.xml')
#https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_eye.xml
eye_cascade = cv2.CascadeClassifier('eye.xml')
nfaces_detected = 0
# note the dependency on the format of the filename
img = cv2.imread(image_path)
height = img.shape[0]
width = img.shape[1]
size = height * width
#???
# if size > (500^2):
# r = 500.0 / img.shape[1]
# dim = (500, int(img.shape[0] * r))
# img2 = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)
# img = img2
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#faces = face_cascade.detectMultiScale(gray, 1.3, 5)
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=3, minSize=(15, 15), flags = cv2.CASCADE_SCALE_IMAGE )
nface_within_pic = 0
for (x,y,w,h) in faces:
face_with_eyes_detected = 0
imgCrop = img[y:y+h,x:x+w]
#cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
#eyes = eye_cascade.detectMultiScale(roi_gray)
eyes = face_cascade.detectMultiScale(roi_gray, scaleFactor=1.3, minNeighbors=3, minSize=(5, 5), flags = cv2.CASCADE_SCALE_IMAGE )
eyesn = 0
for (ex,ey,ew,eh) in eyes:
#cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
eyesn = eyesn +1
# allow detection if only one 1 eye for sideways face profile ?
# No, always assume a frontal profile since that's the haar detection profile we chose above
# if eyesn >= 1:
if eyesn >= 1:
face_with_eyes_detected = 1
#cv2.imshow('img',imgCrop)
if face_with_eyes_detected > 0:
cv2.imwrite('face'+str(nface_within_pic)+'.jpg', imgCrop)
print("Image has been processed and cropped")
nface_within_pic += 1
nfaces_detected += 1
##CROPPING ENDSL
#CHOOSE BIGGEST FACE
filenames= ['face%d.jpg'%(i,) for i in range(nfaces_detected)]
sizes = [Image.open(f, 'r').size for f in filenames]
largest= max(sizes)
index= sizes.index(largest)
imagefile= filenames[index]
print(imagefile+ " is the largest face, so we will id it.")
#bilddatei readen
image_data = tf.gfile.FastGFile(imagefile, 'rb').read()
# holt labels aus file in array
label_lines = [line.rstrip() for line
in tf.gfile.GFile("tf_files/retrained_labels.txt")]
# !! labels befinden sich jeweils in eigenen lines -> keine aenderung in retrain.py noetig -> falsche darstellung im windows editor !!
# graph einlesen, wurde in train.sh -> call retrain.py trainiert
with tf.gfile.FastGFile("tf_files/retrained_graph.pb", 'rb') as f:
graph_def = tf.GraphDef() ## The graph-graph_def is a saved copy of a TensorFlow graph; objektinitialisierung
graph_def.ParseFromString(f.read()) #Parse serialized protocol buffer data into variable
_ = tf.import_graph_def(graph_def, name='') # import a serialized TensorFlow GraphDef protocol buffer, extract objects in the GraphDef as tf.Tensor
#https://github.com/Hvass-Labs/TensorFlow-Tutorials/blob/master/inception.py ; ab zeile 276
with tf.Session() as sess:
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
# return: Tensor("final_result:0", shape=(?, 4), dtype=float32); stringname definiert in retrain.py, zeile 1064
predictions = sess.run(softmax_tensor, \
{'DecodeJpeg/contents:0': image_data})
# gibt prediction values in array zuerueck:
top_k = predictions[0].argsort()[-len(predictions[0]):][::-1]
# sortierung; circle -> 0, plus -> 1, square -> 2, triangle -> 3; array return bsp [3 1 2 0] -> sortiert nach groesster uebereinstimmmung
# output
for node_id in top_k:
human_string = label_lines[node_id]
score = predictions[0][node_id]
print('%s (score = %.2f)' % (human_string, score))
|
StarcoderdataPython
|
5191365
|
<filename>metriq/errors.py
__all__ = ["MetriqError"]
from tea_client.errors import TeaClientError
MetriqError = TeaClientError
|
StarcoderdataPython
|
3373188
|
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
setup(name='fslks',
version='0.0.1-SNAPSHOT',
author='<NAME>, <NAME>',
author_email='<EMAIL>',
description='Implementation of Few-short Learning with the Kitchen Sink for Consumer Health Answer Generation',
license='MIT',
keywords='tensorflow deep-learning machine-learning question-answering few-shot-learning',
long_description=os.open(os.path.join(os.path.dirname(__file__), 'README.md')).read(),
install_requires=open(os.path.join(os.path.dirname(__file__), 'requirements.txt')).read(),
packages=find_packages()
)
|
StarcoderdataPython
|
8016488
|
<reponame>jackton1/pyrollbar
__all__ = ['add_to']
import logging
import sys
from typing import Callable, Optional, Type, Union
from fastapi import APIRouter, FastAPI, __version__
from fastapi.routing import APIRoute
try:
from fastapi import Request, Response
except ImportError:
# Added in FastAPI v0.51.0
from starlette.requests import Request
from starlette.responses import Response
import rollbar
from .utils import fastapi_min_version, get_installed_middlewares, has_bare_routing
from rollbar.contrib.asgi.integration import integrate
from rollbar.contrib.starlette.requests import store_current_request
from rollbar.lib._async import RollbarAsyncError, try_report
log = logging.getLogger(__name__)
@fastapi_min_version('0.41.0')
@integrate(framework_name=f'fastapi {__version__}')
def add_to(app_or_router: Union[FastAPI, APIRouter]) -> Optional[Type[APIRoute]]:
"""
Adds RollbarLoggingRoute handler to the router app.
This is the recommended way for integration with FastAPI.
Alternatively to using middleware, the handler may fill
more data in the payload (e.g. request body).
app_or_router: FastAPI app or router
Note: The route handler must be added before adding user routes
Requirements: FastAPI v0.41.0+
Example usage:
from fastapi import FastAPI
from rollbar.contrib.fastapi import add_to as rollbar_add_to
app = FastAPI()
rollbar_add_to(app)
"""
if not has_bare_routing(app_or_router):
log.error(
'RollbarLoggingRoute must to be added to a bare router'
' (before adding routes). See docs for more details.'
)
return None
installed_middlewares = get_installed_middlewares(app_or_router)
if installed_middlewares:
log.warning(
f'Detected middleware installed {installed_middlewares}'
' while loading Rollbar route handler.'
' This can cause in duplicate occurrences.'
)
if isinstance(app_or_router, FastAPI):
_add_to_app(app_or_router)
elif isinstance(app_or_router, APIRouter):
_add_to_router(app_or_router)
else:
log.error('Error adding RollbarLoggingRoute to application.')
return None
return RollbarLoggingRoute
class RollbarLoggingRoute(APIRoute):
def get_route_handler(self) -> Callable:
router_handler = super().get_route_handler()
async def rollbar_route_handler(request: Request) -> Response:
try:
store_current_request(request)
return await router_handler(request)
except Exception:
# FastAPI requires the `python-multipart` package to parse the content
if not request._stream_consumed:
await request.body()
await request.form()
exc_info = sys.exc_info()
try:
await try_report(exc_info, request)
except RollbarAsyncError:
log.warning(
'Failed to report asynchronously. Trying to report synchronously.'
)
rollbar.report_exc_info(exc_info, request)
raise
return rollbar_route_handler
def _add_to_app(app):
app.router.route_class = RollbarLoggingRoute
def _add_to_router(router):
router.route_class = RollbarLoggingRoute
|
StarcoderdataPython
|
11260213
|
#built in user_model in django.
from django.contrib.auth import get_user_model
#usercreationform is a built in library to create users by django. check docs.
from django.contrib.auth.forms import UserCreationForm
class UserCreateForm(UserCreationForm):
class Meta:
fields = ("username", "email", "<PASSWORD>", "<PASSWORD>")
model = get_user_model()
#changing the default label on the the form
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["username"].label = "Display name"
self.fields["email"].label = "Email address"
|
StarcoderdataPython
|
1935813
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2018 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import logging
from openquake.baselib import sap, datastore, general
from openquake.commonlib import logs
from openquake.engine import engine
from openquake.server import dbserver
from requests import Session
CHUNKSIZE = 4*1024**2 # 4 MB
# NB: it is really difficult to test this automatically, so it is only
# tested manually
def login(host, username, password):
session = Session()
login_url = host + '/accounts/ajax_login/'
session_resp = session.post(
login_url, data={"username": username, "password": password},
timeout=10)
assert session_resp.status_code == 200, 'Login failed'
return session
@sap.Script
def importcalc(host, calc_id, username, password):
"""
Import a remote calculation into the local database
"""
logging.basicConfig(level=logging.INFO)
if '/' in host.split('//', 1)[1]:
sys.exit('Wrong host ending with /%s' % host.rsplit('/', 1)[1])
calc_url = '/'.join([host, 'v1/calc', str(calc_id)])
dbserver.ensure_on()
job = logs.dbcmd('get_job', calc_id)
if job is not None:
sys.exit('There is already a job #%d in the local db' % calc_id)
datadir = datastore.get_datadir()
session = login(host, username, password)
status = session.get('%s/status' % calc_url)
if 'Log in to an existing account' in status.text:
sys.exit('Could not login')
json = status.json()
if json["parent_id"]:
sys.exit('The job has a parent (#%(parent_id)d) and cannot be '
'downloaded' % json)
resp = session.get('%s/datastore' % calc_url, stream=True)
assert resp.status_code == 200, resp.status_code
fname = '%s/calc_%d.hdf5' % (datadir, calc_id)
down = 0
with open(fname, 'wb') as f:
logging.info('%s -> %s', calc_url, fname)
for chunk in resp.iter_content(CHUNKSIZE):
f.write(chunk)
down += len(chunk)
general.println('Downloaded {:,} bytes'.format(down))
print()
logs.dbcmd('import_job', calc_id, json['calculation_mode'],
json['description'], json['owner'], json['status'],
json['parent_id'], datadir)
with datastore.read(calc_id) as dstore:
engine.expose_outputs(dstore)
logging.info('Imported calculation %d successfully', calc_id)
importcalc.arg('host', 'remote host (ex. https://oq1.wilson.openquake.org/)')
importcalc.arg('calc_id', 'calculation ID', type=int)
importcalc.arg('username', 'user name')
importcalc.arg('password', '<PASSWORD>')
|
StarcoderdataPython
|
5164900
|
<reponame>stanwood/traidoo-api
import datetime
import pytest
from model_bakery import baker
from items.models import Item
from products.models import Product
@pytest.mark.django_db
def test_get_only_available_products(client_anonymous, traidoo_region):
product_1 = baker.make(Product, region=traidoo_region)
baker.make(Product, region=traidoo_region)
tomorrow = datetime.datetime.utcnow().date() + datetime.timedelta(days=1)
baker.make(Item, quantity=1, product=product_1, latest_delivery_date=tomorrow)
response = client_anonymous.get(f"/products?is_available=True")
assert response.json()["count"] == 1
assert response.json()["results"][0]["id"] == product_1.id
@pytest.mark.django_db
def test_do_not_return_products_when_quntity_is_0(client_anonymous, traidoo_region):
product_1 = baker.make(Product, region=traidoo_region)
baker.make(Product, region=traidoo_region)
tomorrow = datetime.datetime.utcnow().date() + datetime.timedelta(days=1)
baker.make(Item, quantity=0, product=product_1, latest_delivery_date=tomorrow)
response = client_anonymous.get(f"/products?is_available=True")
assert response.json()["count"] == 0
@pytest.mark.django_db
def test_do_not_return_expired_products(client_anonymous, traidoo_region):
product_1 = baker.make(Product, region=traidoo_region)
baker.make(Product, region=traidoo_region)
today = datetime.datetime.utcnow().date()
baker.make(Item, quantity=1, product=product_1, latest_delivery_date=today)
response = client_anonymous.get(f"/products?is_available=True")
assert response.json()["count"] == 0
@pytest.mark.django_db
def test_get_only_not_available_products(client_anonymous, traidoo_region):
product_1, product_2 = baker.make(Product, region=traidoo_region, _quantity=2)
tomorrow = datetime.datetime.utcnow().date() + datetime.timedelta(days=1)
baker.make(Item, quantity=1, product=product_1, latest_delivery_date=tomorrow)
response = client_anonymous.get(f"/products?is_available=False")
assert response.json()["count"] == 1
assert response.json()["results"][0]["id"] == product_2.id
@pytest.mark.django_db
def test_get_all_products(client_anonymous, traidoo_region):
product_1 = baker.make(Product, region=traidoo_region)
baker.make(Product, region=traidoo_region)
baker.make(Item, quantity=1, product=product_1)
response = client_anonymous.get(f"/products")
assert response.json()["count"] == 2
@pytest.mark.django_db
def test_do_not_count_expired_items(client_anonymous, traidoo_region):
product = baker.make(Product, region=traidoo_region)
today = datetime.datetime.utcnow().date()
tomorrow = today + datetime.timedelta(days=1)
yesterday = today - datetime.timedelta(days=1)
baker.make(Item, quantity=1, product=product, latest_delivery_date=today)
baker.make(Item, quantity=1, product=product, latest_delivery_date=yesterday)
baker.make(Item, quantity=1, product=product, latest_delivery_date=tomorrow)
response = client_anonymous.get(f"/products")
assert response.json()["count"] == 1
assert response.json()["results"][0]["itemsAvailable"] == 1
|
StarcoderdataPython
|
4930850
|
import os
from flask import Flask
from api import blueprint as api_blueprint
from client import blueprint as client_blueprint
def create_app(testing: bool = False) -> Flask:
app = Flask(__name__)
app.register_blueprint(api_blueprint)
app.register_blueprint(client_blueprint)
app.secret_key = os.environ.get('FLASK_SECRET_KEY', 'Unsafe Secret')
app.config['TESTING'] = testing
app.config['GHIBLI_API_HOST'] = os.environ.get(
'GHIBLI_API_HOST',
'https://ghibliapi.herokuapp.com'
)
return app
|
StarcoderdataPython
|
6426338
|
from pypadre import _name, _version
from pypadre.core.model.code.code_mixin import PipIdentifier
PACKAGE_ID = PipIdentifier(pip_package=_name.__name__, version=_version.__version__)
|
StarcoderdataPython
|
4973993
|
<reponame>nagapavan525/nbdev_project<gh_stars>0
# AUTOGENERATED! DO NOT EDIT! File to edit: 00_core.ipynb (unless otherwise specified).
__all__ = ['greetings']
# Cell
def greetings():
return "Hello world"
|
StarcoderdataPython
|
3403533
|
import locale
import os
from pathlib import Path
import click
from tqdm import tqdm # type: ignore
from kaleidoscope.gallery import generate_gallery_ini, generate_album_ini
from kaleidoscope.generator import generate, DefaultListener
from kaleidoscope.reader import read_gallery
gallery_path = "."
@click.group()
@click.option('--gallery', type=click.Path())
@click.pass_context
def cli(ctx, gallery):
locale.setlocale(locale.LC_ALL, '')
if gallery is not None:
global gallery_path
gallery_path = gallery
@cli.command()
def build():
"""Build gallery."""
gallery = read_gallery(gallery_path)
output_path = os.path.join(gallery_path, "output")
generate(gallery, output_path, ProgressReporter())
@cli.command(name='init-gallery')
def init_gallery():
"""Generate gallery configuration file."""
generate_gallery_ini(Path(gallery_path))
@cli.command(name='init-album')
@click.argument('directory',
type=click.Path(exists=True, file_okay=False, dir_okay=True))
def init_album(directory):
"""Generate album configuration file with list of photos."""
generate_album_ini(Path(gallery_path).joinpath(directory))
class ProgressReporter(DefaultListener):
"""Reports progress of gallery generation to a user."""
def __init__(self):
self._progressbar = None
def starting_album(self, album, photos_to_process):
print("Generating album " + album.title)
if photos_to_process > 0:
self._progressbar = tqdm(desc="Resizing", unit="photo",
total=photos_to_process)
def resizing_photo(self, photo):
self._progressbar.update(1)
def finishing_album(self):
if self._progressbar:
self._progressbar.close()
self._progressbar = None
if __name__ == '__main__':
cli()
|
StarcoderdataPython
|
164337
|
<filename>tests/api/views/test_s3bucket.py
import json
from unittest.mock import patch
from botocore.exceptions import ClientError
from model_mommy import mommy
import pytest
from rest_framework import status
from rest_framework.reverse import reverse
from controlpanel.api.models import UserS3Bucket
from tests.api.fixtures.es import BUCKET_HITS_AGGREGATION
@pytest.fixture
def bucket():
return mommy.make('api.S3Bucket', name='test-bucket-1')
@pytest.fixture(autouse=True)
def models(bucket):
mommy.make('api.S3Bucket')
mommy.make('api.S3Bucket', is_data_warehouse=True)
mommy.make('api.AppS3Bucket', s3bucket=bucket)
mommy.make('api.UserS3Bucket', s3bucket=bucket)
def test_list(client):
response = client.get(reverse('s3bucket-list'))
assert response.status_code == status.HTTP_200_OK
assert len(response.data['results']) == 3
response = client.get(reverse('s3bucket-list') + '?is_data_warehouse=true')
assert len(response.data['results']) == 1
def test_detail(client, bucket):
response = client.get(reverse('s3bucket-detail', (bucket.id,)))
assert response.status_code == status.HTTP_200_OK
expected_s3bucket_fields = {
'id',
'url',
'name',
'arn',
'apps3buckets',
'users3buckets',
'created_by',
'is_data_warehouse',
'location_url',
}
assert set(response.data) == expected_s3bucket_fields
apps3bucket = response.data['apps3buckets'][0]
expected_apps3bucket_fields = {'id', 'url', 'app', 'access_level'}
assert set(apps3bucket) == expected_apps3bucket_fields
expected_app_fields = {
'id',
'url',
'name',
'description',
'slug',
'repo_url',
'iam_role_name',
'created_by',
}
assert set(apps3bucket['app']) == expected_app_fields
users3bucket = response.data['users3buckets'][0]
expected_users3bucket_fields = {
'id',
'user',
'access_level',
'is_admin'
}
assert set(users3bucket) == expected_users3bucket_fields
expected_user_fields = {
'auth0_id',
'url',
'username',
'name',
'email',
}
assert set(users3bucket['user']) == expected_user_fields
def test_delete(client, bucket):
response = client.delete(reverse('s3bucket-detail', (bucket.id,)))
assert response.status_code == status.HTTP_204_NO_CONTENT
response = client.get(reverse('s3bucket-detail', (bucket.id,)))
assert response.status_code == status.HTTP_404_NOT_FOUND
def test_create(client, superuser, aws):
data = {'name': 'test-bucket-123'}
response = client.post(reverse('s3bucket-list'), data)
assert response.status_code == status.HTTP_201_CREATED
assert response.data['created_by'] == superuser.auth0_id
assert not response.data['is_data_warehouse']
aws.create_bucket.assert_called()
users3bucket = UserS3Bucket.objects.get(
user_id=superuser.auth0_id,
s3bucket_id=response.data['id'],
)
assert users3bucket.user.auth0_id == superuser.auth0_id
assert response.data['id'] == users3bucket.s3bucket.id
assert UserS3Bucket.READWRITE == users3bucket.access_level
assert users3bucket.is_admin
EXISTING_BUCKET_NAME = object()
@pytest.mark.parametrize(
"name",
[
EXISTING_BUCKET_NAME,
'ab',
'127.0.0.1',
'__test_bucket__',
'badenv-bucketname',
'bucketname',
],
ids=[
'name-exists',
'name-too-short',
'name-like-ipaddr',
'name-invalid-start-end-chars',
'name-invalid-prefix',
'name-no-prefix',
],
)
def test_create_bad_request(client, bucket, name):
if name is EXISTING_BUCKET_NAME:
name = bucket.name
response = client.post(reverse('s3bucket-list'), {"name": name})
assert response.status_code == status.HTTP_400_BAD_REQUEST
def test_update(client, bucket):
data = {'name': 'test-bucket-updated'}
response = client.put(
reverse('s3bucket-detail', (bucket.id,)),
json.dumps(data),
content_type='application/json',
)
assert response.status_code == status.HTTP_200_OK
assert response.data['name'] == data['name']
@pytest.mark.skip(reason="Needs to move to test_aws")
def test_aws_error_existing_ignored(client, aws):
e = type('BucketAlreadyOwnedByYou', (ClientError,), {})
aws.create_bucket.side_effect = e({}, 'Foo')
data = {'name': f'test-bucket-123'}
response = client.post(reverse('s3bucket-list'), data)
assert response.status_code == status.HTTP_201_CREATED
aws.create_bucket.assert_called()
def test_access_logs(client, bucket, elasticsearch):
elasticsearch.search.return_value = BUCKET_HITS_AGGREGATION
response = client.get(reverse('s3bucket-access-logs', (bucket.id,)))
assert response.status_code == status.HTTP_200_OK
assert len(response.data) == 2
assert response.data[0]['accessed_by'] == 'sentencing-policy-model'
assert response.data[0]['count'] == 11
assert response.data[0]['type'] == 'app'
assert response.data[1]['accessed_by'] == 'foobar'
assert response.data[1]['count'] == 3
assert response.data[1]['type'] == 'user'
|
StarcoderdataPython
|
9717593
|
<filename>modules/gmaps.py
from geopy.geocoders import Nominatim
from geopy.distance import geodesic
import openrouteservice
from openrouteservice import convert
from pyrogram import Client
import time
import json
import sys
sys.path.append(sys.path[0] + "/..")
from utils.get_config import *
from gtts import gTTS
config = get_config_file("config.json")
api_geopy = config["api_geopy"]
"""
query => le due località su cui calcolare la distanza con la virgola come separatore.
client, message => dati per comunicare con pyrogram in sendMessage.
Funzione che formatta l'input, esegue la funzione per calcolare la distanza tra i due luoghi e restituisce il risultato tramite messaggio.
"""
def execute_km(query,client,message):
addresses = query.split(',')
km = distanza(addresses[0],addresses[1])
if(km == "None"):
result = "__Error 404: not found__"
else:
result = "La distanza tra i due luoghi è di " + str(km) + " km."
return sendMessage(client,message,result)
def execute_route(query,client,message):
#uso il carattere '/' come separatore per recuperare modalità di trasporto e dopo uso ',' per recuperare i due luoghi
try:
first_split = query.split('/')
mode = first_split[0]
addresses = first_split[1].split(',')
except:
return sendMessage(client,message,"__Errore formato__\nprova /help mappe.__")
route = directions(client,message,addresses[0],addresses[1],mode)
result = route
return sendMessage(client,message,result)
"""
address => indirizzo di cui si vuole sapere la localizzazione.
client, message => dati per comunicare con pyrogram in send_location.
Funzione che dato un indirizzo restituisce tramite messaggio la posizione geografica tramite le API dirette di Telegram.
Viene usata anche come funzione ausiliaria in 'distanza', in quel caso restituisce solo l'array con le due coppie di coordinate.
"""
@Client.on_message()
def showmaps(address,client,message):
geolocate = Nominatim(user_agent="my-tg-app")
location = geolocate.geocode(address,timeout=10000)
coordinates = []
try:
coordinates.append(location.latitude)
coordinates.append(location.longitude)
except:
return sendMessage(client,message,"__Error 404: not found__")
try:
client.send_location(get_chat(message),coordinates[0],coordinates[1],reply_to_message_id=get_id_msg(message))
except:
return coordinates
"""
address1 => il primo luogo.
address2 => il secondo luogo.
Data una coppia di coordinate geografiche, viene calcolata la distanza in linea d'aria dei due luoghi in km.
"""
def distanza(address1,address2):
try:
coord1 = showmaps(address1,client = None,message = None)
coord2 = showmaps(address2,client = None,message = None)
except:
return "None"
departure = (coord1[0],coord1[1])
arrive = (coord2[0],coord2[1])
result = geodesic(departure,arrive).miles
result = (result * 1.609344)
return round(result,2)
@Client.on_message()
def directions(client,message,address1,address2,query):
coord1 = showmaps(address1,client = None,message = None)
coord2 = showmaps(address2,client = None,message = None)
coord1 = coord1[::-1]
coord2 = coord2[::-1]
coords = ((coord1[0],coord1[1]),(coord2[0],coord2[1]))
client_geopy = openrouteservice.Client(key = api_geopy)
#dizionario con le tre modalità di trasporto supportate dalla funzione
modes = { 'macchina': 'driving-car', 'piedi': 'foot-walking', 'bicicletta':'cycling-road'}
if query in modes:
profile = modes[query]
try:
travel = client_geopy.directions(coords,profile=profile,format='json',preference = 'fastest',units='km',language="it")
except:
return "__Destinazione troppo lontana__"
client_geopy = openrouteservice.Client(key = api_geopy)
dis_time = travel['routes'][0]['summary']
distanza = dis_time['distance']
distanza = round(distanza,2)
time_travel = round((float(dis_time['duration']) / 60),2)
if(time_travel > 60):
time_travel = str(round(time_travel / 60,2)) + " ore."
else:
time_travel = str(time_travel) + " minuti."
steps = travel['routes'][0]['segments'][0]["steps"]
istruzioni = ""
for item in steps:
if float(item["distance"]) < 1:
tragitto = int((float(item["distance"]) * 1000))
tragitto = "Tra " + str(tragitto) + " metri "
else:
tragitto = round(item["distance"],2)
tragitto = "Tra " + str(tragitto) + " km "
if "Arrivo" in item["instruction"]:
istruzioni += item["instruction"] + "\n"
else:
istruzioni += tragitto + item["instruction"] + "\n"
tts = gTTS(istruzioni,lang="it")
tts.save("istruzioni.mp3")
client.send_document(get_chat(message),document = "istruzioni.mp3",caption = "Istruzioni per raggiungere la destinazione con: " + query, reply_to_message_id=get_id_msg(message))
result = "La tua destinazione si trova a " + str(distanza) + " km raggiungibile in circa " + str(time_travel)
return result
|
StarcoderdataPython
|
4959965
|
# -*- coding: utf-8 -*-
from collections import OrderedDict
from gluon import current
from gluon.storage import Storage
def config(settings):
"""
Cumbria County Council extensions to the Volunteer Management template
- branding
- support Donations
- support Assessments
"""
T = current.T
settings.base.system_name = T("Support Cumbria")
settings.base.system_name_short = T("Support Cumbria")
# Theme
settings.base.theme = "CCC"
settings.base.theme_layouts = "CCC"
settings.base.theme_config = "CCC"
# PrePopulate data
settings.base.prepopulate += ("CCC",)
settings.base.prepopulate_demo = ("CCC/Demo",)
# Authentication settings
# Do new users need to verify their email address?
settings.auth.registration_requires_verification = True
# Do new users need to be approved by an administrator prior to being able to login?
# - varies by path (see register() in controllers.py)
#settings.auth.registration_requires_approval = True
settings.auth.registration_requests_organisation = True
# Required for access to default realm permissions
settings.auth.registration_link_user_to = ["staff"]
settings.auth.registration_link_user_to_default = ["staff"]
# -------------------------------------------------------------------------
# L10n (Localization) settings
settings.L10n.languages = OrderedDict([
("en-gb", "English"),
])
# Default Language
settings.L10n.default_language = "en-gb"
# Uncomment to Hide the language toolbar
settings.L10n.display_toolbar = False
# Security Policy
# http://eden.sahanafoundation.org/wiki/S3AAA#System-widePolicy
# 1: Simple (default): Global as Reader, Authenticated as Editor
# 2: Editor role required for Update/Delete, unless record owned by session
# 3: Apply Controller ACLs
# 4: Apply both Controller & Function ACLs
# 5: Apply Controller, Function & Table ACLs
# 6: Apply Controller, Function, Table ACLs and Entity Realm
# 7: Apply Controller, Function, Table ACLs and Entity Realm + Hierarchy
# 8: Apply Controller, Function, Table ACLs, Entity Realm + Hierarchy and Delegations
settings.security.policy = 7 # Organisation-ACLs
# Consent Tracking
settings.auth.consent_tracking = True
# Record Approval
settings.auth.record_approval = True
settings.auth.record_approval_required_for = ("org_organisation",
)
# -------------------------------------------------------------------------
# Comment/uncomment modules here to disable/enable them
# Modules menu is defined in modules/eden/menu.py
#settings.modules.update([
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = T("Home"),
restricted = False, # Use ACLs to control access to this module
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
module_type = None # No Menu
)),
("errors", Storage(
name_nice = T("Ticket Viewer"),
#description = "Needed for Breadcrumbs",
restricted = False,
module_type = None # No Menu
)),
#("sync", Storage(
# name_nice = T("Synchronization"),
# #description = "Synchronization",
# restricted = True,
# access = "|1|", # Only Administrators can see this module in the default menu & access the controller
# module_type = None # This item is handled separately for the menu
#)),
#("tour", Storage(
# name_nice = T("Guided Tour Functionality"),
# module_type = None,
#)),
#("translate", Storage(
# name_nice = T("Translation Functionality"),
# #description = "Selective translation of strings based on module.",
# module_type = None,
#)),
("gis", Storage(
name_nice = T("Map"),
#description = "Situation Awareness & Geospatial Analysis",
restricted = True,
module_type = None,
)),
("pr", Storage(
name_nice = T("Person Registry"),
#description = "Central point to record details on People",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
module_type = None,
)),
("org", Storage(
name_nice = T("Organizations"),
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
restricted = True,
module_type = None,
)),
("hrm", Storage(
name_nice = T("Personnel"),
#description = "Human Resources Management",
restricted = True,
module_type = None,
)),
#("vol", Storage(
# name_nice = T("Volunteers"),
# #description = "Human Resources Management",
# restricted = True,
# module_type = 2,
#)),
("cms", Storage(
name_nice = T("Content Management"),
#description = "Content Management System",
restricted = True,
module_type = None,
)),
("doc", Storage(
name_nice = T("Documents"),
#description = "A library of digital resources, such as photos, documents and reports",
restricted = True,
module_type = None,
)),
("msg", Storage(
name_nice = T("Messaging"),
#description = "Sends & Receives Alerts via Email & SMS",
restricted = True,
# The user-visible functionality of this module isn't normally required. Rather it's main purpose is to be accessed from other modules.
module_type = None,
)),
#("cr", Storage(
# name_nice = T("Shelters"),
# #description = "Tracks the location, capacity and breakdown of victims in Shelters",
# restricted = True,
# module_type = 10
#)),
("dc", Storage(
name_nice = T("Assessments"),
#description = "Data collection tool",
restricted = True,
module_type = None,
)),
("project", Storage(
name_nice = T("Projects"),
#description = "Tasks for Contacts",
restricted = True,
module_type = None,
)),
("supply", Storage(
name_nice = T("Supply Chain Management"),
#description = "Used within Inventory Management, Request Management and Asset Management",
restricted = True,
module_type = None, # Not displayed
)),
#("inv", Storage(
# name_nice = T("Warehouses"),
# #description = "Receiving and Sending Items",
# restricted = True,
# module_type = None,
#)),
("req", Storage(
name_nice = T("Requests"),
#description = "Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.",
restricted = True,
module_type = None,
)),
])
settings.search.filter_manager = False
settings.ui.filter_clear = False
settings.cms.richtext = True
settings.hrm.event_course_mandatory = False
settings.pr.hide_third_gender = False
#settings.project.task_priority_opts = {1: T("Low"),
# 2: T("Medium"),
# 3: T("High"),
# }
#settings.project.task_status_opts = {1: T("New"),
# 2: T("In-Progress"),
# 3: T("Closed"),
# }
# Now using req_need, so unused:
#settings.req.req_type = ("People",)
# -------------------------------------------------------------------------
def ccc_realm_entity(table, row):
"""
Assign a Realm Entity to records
"""
if current.auth.s3_has_role("ADMIN"):
# Use default rules
return 0
tablename = table._tablename
if tablename in (#"hrm_training_event",
"project_task",
#"req_need",
):
# Use the Org of the Creator
db = current.db
new_row = db(table.id == row.id).select(table.created_by,
limitby = (0, 1),
).first()
user_id = new_row.created_by
utable = db.auth_user
otable = current.s3db.org_organisation
query = (utable.id == user_id) & \
(utable.organisation_id == otable.id)
org = db(query).select(otable.pe_id,
limitby = (0, 1),
).first()
if org:
return org.pe_id
# Use default rules
return 0
settings.auth.realm_entity = ccc_realm_entity
# -------------------------------------------------------------------------
def ccc_rheader(r):
"""
Custom rheaders
"""
if r.representation != "html":
# RHeaders only used in interactive views
return None
# Need to use this format as otherwise req_match?viewing=org_office.x
# doesn't have an rheader
from s3 import s3_rheader_resource, s3_rheader_tabs
tablename, record = s3_rheader_resource(r)
if record is None:
# List or Create form: rheader makes no sense here
return None
from gluon import DIV, TABLE, TR, TH
T = current.T
if tablename == "hrm_training_event":
T = current.T
tabs = [(T("Basic Details"), None),
(T("Participants"), "participant"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
location_id = table.location_id
date_field = table.start_date
rheader = DIV(TABLE(TR(TH("%s: " % T("Date")),
date_field.represent(record.start_date),
),
TR(TH("%s: " % location_id.label),
location_id.represent(record.location_id),
)),
rheader_tabs)
elif tablename == "org_organisation":
T = current.T
tabs = [(T("Basic Details"), None),
#(T("Offices"), "office"),
(T("Key Locations"), "facility"),
#(T("Locations Served"), "location"),
(T("Volunteers"), "human_resource"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
from s3 import s3_fullname
table = r.table
rheader = DIV(TABLE(TR(TH("%s: " % T("Name")),
record.name,
)),
rheader_tabs)
elif tablename == "pr_group":
T = current.T
tabs = [(T("Basic Details"), None),
# 'Person' allows native tab breakout
#(T("Members"), "group_membership"),
(T("Members"), "person"),
#(T("Locations"), "group_location"),
#(T("Skills"), "competency"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
from s3 import s3_fullname
table = r.table
rheader = DIV(TABLE(TR(TH("%s: " % T("Name")),
record.name,
)),
rheader_tabs)
elif tablename == "pr_person":
T = current.T
tabs = [(T("Basic Details"), None),
(T("Address"), "address"),
(T("Contacts"), "contacts"),
# Included in Contacts tab:
#(T("Emergency Contacts"), "contact_emergency"),
]
get_vars_get = r.get_vars.get
has_role = current.auth.s3_has_role
if get_vars_get("donors") or \
has_role("DONOR", include_admin=False):
# Better on main form using S3SQLInlineLink
#tabs.append((T("Goods / Services"), "item"))
pass
elif get_vars_get("groups") or \
has_role("GROUP_ADMIN", include_admin=False):
# Better as menu item, to be able to access tab(s)
#tabs.append((T("Group"), "group"))
pass
else:
tabs.append((T("Additional Information"), "additional"))
# Better on main form using S3SQLInlineLink
#tabs.append((T("Skills"), "competency"))
if has_role("ORG_ADMIN"):
tabs.insert(1, (T("Affiliation"), "human_resource"))
rheader_tabs = s3_rheader_tabs(r, tabs)
from s3 import s3_fullname
table = r.table
rheader = DIV(TABLE(TR(TH("%s: " % T("Name")),
s3_fullname(record),
)),
rheader_tabs)
elif tablename == "req_need":
if not current.auth.s3_has_role("ORG_ADMIN"):
# @ToDo: Button to Apply (rheader or rfooter)
return None
T = current.T
tabs = [(T("Basic Details"), None),
#(T("Items"), "need_item"),
#(T("Skills"), "need_skill"),
(T("People"), "need_person"),
(T("Invite"), "assign"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
location_id = table.location_id
date_field = table.date
rheader = DIV(TABLE(TR(TH("%s: " % date_field.label),
date_field.represent(record.date),
),
TR(TH("%s: " % location_id.label),
location_id.represent(record.location_id),
)),
rheader_tabs)
return rheader
# -------------------------------------------------------------------------
def customise_auth_user_resource(r, tablename):
"""
Hook in custom auth_user_register_onaccept for use when Agency/Existing Users are Approved
"""
from templates.CCC.controllers import auth_user_register_onaccept
current.s3db.configure("auth_user",
register_onaccept = auth_user_register_onaccept,
)
settings.customise_auth_user_resource = customise_auth_user_resource
# -------------------------------------------------------------------------
def customise_auth_user_controller(**attr):
if current.request.args(0) == "register":
# Not easy to tweak the URL in the login form's buttons
from gluon import redirect, URL
redirect(URL(c="default", f="index",
args="register",
vars=current.request.get_vars))
return attr
settings.customise_auth_user_controller = customise_auth_user_controller
# -------------------------------------------------------------------------
def customise_cms_post_resource(r, tablename):
from gluon import URL
from s3 import S3SQLCustomForm, S3SQLInlineComponent, S3TextFilter
#from templates.CCC.controllers import cms_post_list_layout
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Information"),
# title_display = T("Guide Details"),
title_list = "",
# title_update = T("Edit Guide"),
# #title_upload = T("Import Guides"),
# label_list_button = T("List Guides"),
# label_delete_button = T("Delete Guide"),
# msg_record_created = T("Guide added"),
# msg_record_modified = T("Guide updated"),
# msg_record_deleted = T("Guide deleted"),
# msg_list_empty = T("No Guides currently registered")
)
s3db = current.s3db
#f = s3db.cms_post.series_id
#f.label = T("Category")
#f.readable = f.writable = True
s3db.configure("cms_post",
create_next = URL(args="datalist"),
crud_form = S3SQLCustomForm(#"series_id",
"title",
"body",
S3SQLInlineComponent(
"document",
label = T("Attachment"),
#multiple = False,
fields = [("", "file")],
),
),
list_fields = [#"series_id",
"title",
"body",
"date",
"document.file",
],
#list_layout = cms_post_list_layout,
filter_widgets = [S3TextFilter(["title",
#"series_id",
],
#formstyle = text_filter_formstyle,
label = "",
_placeholder = T("Search"),
),
],
)
settings.customise_cms_post_resource = customise_cms_post_resource
# -----------------------------------------------------------------------------
def customise_cms_post_controller(**attr):
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if r.method == "datalist":
# Filter out system posts
from s3 import FS
r.resource.add_filter(FS("post_module.module") == None)
return result
s3.prep = prep
s3.dl_no_header = True
#attr["dl_rowsize"] = 2
return attr
settings.customise_cms_post_controller = customise_cms_post_controller
# -------------------------------------------------------------------------
def customise_doc_document_resource(r, tablename):
from gluon import IS_IN_SET, URL
from s3 import S3SQLCustomForm, S3TextFilter
#from templates.CCC.controllers import doc_document_list_layout
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Document"),
# title_display = T("Guide Details"),
title_list = "",
# title_update = T("Edit Guide"),
# #title_upload = T("Import Guides"),
# label_list_button = T("List Guides"),
# label_delete_button = T("Delete Guide"),
# msg_record_created = T("Guide added"),
# msg_record_modified = T("Guide updated"),
# msg_record_deleted = T("Guide deleted"),
# msg_list_empty = T("No Guides currently registered")
)
s3db = current.s3db
# Filtered components
s3db.add_components("doc_document",
doc_document_tag = ({"name": "document_type",
"joinby": "document_id",
"filterby": {"tag": "document_type"},
"multiple": False,
},
),
)
# Individual settings for specific tag components
components_get = s3db.resource(tablename).components.get
document_type = components_get("document_type")
f = document_type.table.value
f.requires = IS_IN_SET(["Emergency Plan",
"Contact Information",
"Risk Assessment",
"Guidance Document",
"Map",
"Other",
])
f = s3db.doc_document.organisation_id
user = current.auth.user
organisation_id = user and user.organisation_id
if organisation_id:
f.default = organisation_id
else:
f.readable = f.writable = True
s3db.configure("doc_document",
create_next = URL(args="datalist"),
crud_form = S3SQLCustomForm("organisation_id",
(T("Type"), "document_type.value"),
(T("Document Name"), "name"),
"file",
"date",
"comments",
),
list_fields = ["organisation_id",
"document_type.value",
"name",
"file",
"date",
"comments",
],
#list_layout = doc_document_list_layout,
filter_widgets = [S3TextFilter(["name",
"organisation_id",
],
#formstyle = text_filter_formstyle,
label = "",
_placeholder = T("Search"),
),
],
)
settings.customise_doc_document_resource = customise_doc_document_resource
# -----------------------------------------------------------------------------
def customise_doc_document_controller(**attr):
current.response.s3.dl_no_header = True
return attr
settings.customise_doc_document_controller = customise_doc_document_controller
# -------------------------------------------------------------------------
def customise_hrm_competency_resource(r, tablename):
s3db = current.s3db
table = s3db.hrm_competency
table.competency_id.readable = table.competency_id.writable = False
table.organisation_id.readable = table.organisation_id.writable = False
s3db.configure("hrm_competency",
list_fields = ["skill_id",
"comments",
],
)
settings.customise_hrm_competency_resource = customise_hrm_competency_resource
# -------------------------------------------------------------------------
def customise_hrm_human_resource_resource(r, tablename):
from s3 import S3OptionsFilter, S3SQLCustomForm, S3TextFilter
from s3layouts import S3PopupLink
s3db = current.s3db
# Filtered components
s3db.add_components("hrm_human_resource",
hrm_human_resource_tag = ({"name": "job_title",
"joinby": "human_resource_id",
"filterby": {"tag": "job_title"},
"multiple": False,
},
),
)
table = s3db.hrm_human_resource
#f = table.job_title_id
#f.label = T("Role")
#f.comment = S3PopupLink(c = "hrm",
# f = "job_title",
# label = T("New Job Title"),
# title = T("Role"),
# tooltip = T("The volunteer's role"),
# )
if r.controller == "default":
# Personal Profile
list_fields = ["job_title.value",
]
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("New Affiliation"),
title_display = T("Affiliation Details"),
title_list = T("Affiliations"),
title_update = T("Edit Affiliation"),
#title_upload = T("Import Affiliations"),
label_list_button = T("List Affiliations"),
label_delete_button = T("Delete Affiliation"),
msg_record_created = T("Affiliation added"),
msg_record_modified = T("Affiliation updated"),
msg_record_deleted = T("Affiliation deleted"),
msg_list_empty = T("No Affiliations currently registered")
)
else:
list_fields = ["person_id",
(T("Role"), "job_title.value"),
(T("Skills"), "person_id$competency.skill_id"),
(T("Email"), "email.value"),
(T("Mobile Phone"), "phone.value"),
]
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("New Volunteer"),
title_display = T("Volunteer Details"),
title_list = T("Volunteers"),
title_update = T("Edit Volunteer"),
#title_upload = T("Import Volunteers"),
label_list_button = T("List Volunteers"),
label_delete_button = T("Delete Volunteer"),
msg_record_created = T("Volunteer added"),
msg_record_modified = T("Volunteer updated"),
msg_record_deleted = T("Volunteer deleted"),
msg_list_empty = T("No Volunteers currently registered")
)
filter_fields = ["person_id$first_name",
"person_id$middle_name",
"person_id$last_name",
"job_title.value",
"comments",
"person_id$competency.skill_id$name",
]
gtable = s3db.gis_location
districts = current.db((gtable.level == "L3") & (gtable.L2 == "Cumbria")).select(gtable.id,
gtable.name,
cache = s3db.cache)
districts = {d.id:d.name for d in districts}
filter_widgets = [S3TextFilter(filter_fields,
#formstyle = text_filter_formstyle,
label = "",
_placeholder = T("Search"),
),
S3OptionsFilter("person_id$person_location.location_id",
label = T("Locations Served"),
options = districts,
),
S3OptionsFilter("person_id$competency.skill_id"),
]
if current.auth.s3_has_role("ADMIN"):
filter_fields.insert(0, "organisation_id$name")
filter_widgets.append(S3OptionsFilter("organisation_id"))
list_fields.insert(0, "organisation_id")
else:
f = table.organisation_id
f.readable = f.writable = False
f.comment = None # No Create
s3db.configure("hrm_human_resource",
crud_form = S3SQLCustomForm("organisation_id",
(T("Role"), "job_title.value"),
"person_id",
"comments",
),
list_fields = list_fields,
filter_widgets = filter_widgets,
)
settings.customise_hrm_human_resource_resource = customise_hrm_human_resource_resource
# -------------------------------------------------------------------------
#def customise_hrm_job_title_resource(r, tablename):
# current.response.s3.crud_strings[tablename] = Storage(
# label_create = T("New Role"),
# title_display = T("Role Details"),
# title_list = T("Roles"),
# title_update = T("Edit Role"),
# #title_upload = T("Import Roles"),
# label_list_button = T("List Roles"),
# label_delete_button = T("Delete Role"),
# msg_record_created = T("Role added"),
# msg_record_modified = T("Role updated"),
# msg_record_deleted = T("Role deleted"),
# msg_list_empty = T("No Roles currently registered")
# )
#settings.customise_hrm_job_title_resource = customise_hrm_job_title_resource
# -------------------------------------------------------------------------
def hrm_training_event_postprocess(form):
"""
Create Site based on other fields
"""
training_event_id = form.vars.id
db = current.db
s3db = current.s3db
etable = s3db.hrm_training_event
ettable = s3db.hrm_event_tag
ftable = s3db.org_facility
# Load record
left = ettable.on((ettable.training_event_id == training_event_id) & \
(ettable.tag == "venue_name")
)
training_event = db(etable.id == training_event_id).select(etable.location_id,
etable.site_id,
ettable.value,
left = left,
limitby = (0, 1)
).first()
venue_name = training_event[ettable.value]
location_id = training_event[etable.location_id]
site_id = training_event[etable.site_id]
if site_id:
facility = db(ftable.site_id == site_id).select(ftable.id,
limitby = (0, 1)
).first()
facility.update_record(name = venue_name,
location_id = location_id,
)
else:
record = {"name": venue_name,
"location_id": location_id,
}
facility_id = ftable.insert(**record)
record["id"] = facility_id
s3db.update_super(ftable, record)
db(etable.id == training_event_id).update(site_id = record["site_id"])
# -------------------------------------------------------------------------
def customise_hrm_training_event_resource(r, tablename):
from gluon import IS_EMAIL, IS_EMPTY_OR, IS_IN_SET, IS_NOT_EMPTY, IS_URL
from s3 import IS_UTC_DATETIME, \
S3SQLInlineLink, S3LocationSelector, \
S3OptionsFilter, S3SQLCustomForm, S3TextFilter, \
s3_phone_requires
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("New Event"),
title_display = T("Event Details"),
title_list = T("Events"),
title_update = T("Edit Event"),
#title_upload = T("Import Events"),
label_list_button = T("List Events"),
label_delete_button = T("Delete Event"),
msg_record_created = T("Event added"),
msg_record_modified = T("Event updated"),
msg_record_deleted = T("Event deleted"),
msg_list_empty = T("No Events currently registered")
)
s3db = current.s3db
# Filtered components
s3db.add_components("hrm_training_event",
hrm_event_tag = ({"name": "venue_name",
"joinby": "training_event_id",
"filterby": {"tag": "venue_name"},
"multiple": False,
},
{"name": "contact_name",
"joinby": "training_event_id",
"filterby": {"tag": "contact_name"},
"multiple": False,
},
{"name": "contact_tel",
"joinby": "training_event_id",
"filterby": {"tag": "contact_tel"},
"multiple": False,
},
{"name": "contact_email",
"joinby": "training_event_id",
"filterby": {"tag": "contact_email"},
"multiple": False,
},
{"name": "contact_web",
"joinby": "training_event_id",
"filterby": {"tag": "contact_web"},
"multiple": False,
},
),
)
# Individual settings for specific tag components
components_get = s3db.resource(tablename).components.get
venue_name = components_get("venue_name")
f = venue_name.table.value
f.requires = IS_NOT_EMPTY()
contact_tel = components_get("contact_tel")
f = contact_tel.table.value
f.requires = IS_EMPTY_OR(s3_phone_requires)
contact_email = components_get("contact_email")
f = contact_email.table.value
f.requires = IS_EMAIL()
contact_web = components_get("contact_web")
f = contact_web.table.value
f.requires = IS_EMPTY_OR(IS_URL())
table = s3db.hrm_training_event
table.name.readable = table.name.writable = True
table.comments.comment = None
table.start_date.requires = IS_UTC_DATETIME()
table.site_id.represent = s3db.org_SiteRepresent(show_type = False)
f = table.location_id
f.readable = f.writable = True
f.widget = S3LocationSelector(levels = ("L3"),
required_levels = ("L3"),
show_address = True)
#gtable = s3db.gis_location
#districts = current.db((gtable.level == "L3") & (gtable.L2 == "Cumbria")).select(gtable.id,
# gtable.name,
# cache = s3db.cache)
#districts = {d.id:d.name for d in districts}
#f = s3db.hrm_event_location.location_id
#f.requires = IS_IN_SET(districts)
#f.widget = None
list_fields = ["start_date",
"name",
"site_id",
"location_id$L3",
"location_id$addr_street",
]
filter_widgets = [S3TextFilter(["name",
"comments",
],
#formstyle = text_filter_formstyle,
label = "",
_placeholder = T("Search"),
),
]
auth = current.auth
if auth.s3_has_role("ADMIN"):
filter_widgets.append(S3OptionsFilter("organisation_id",
label = T("Organization")))
list_fields.insert(0, (T("Organization"), "organisation_id"))
else:
f = table.organisation_id
f.default = auth.user.organisation_id
f.readable = f.writable = False
s3db.configure("hrm_training_event",
crud_form = S3SQLCustomForm((T("Event name"), "name"),
(T("Event description"), "comments"),
(T("Starts"), "start_date"),
(T("Ends"), "end_date"),
(T("Lead Organization"), "organisation_id"),
#S3SQLInlineLink("location",
# field = "location_id",
# label = T("Tick the area(s) which this event relates to"),
# ),
(T("Venue name"), "venue_name.value"),
"location_id",
(T("Contact Name"), "contact_name.value"),
(T("Telephone"), "contact_tel.value"),
(T("Email"), "contact_email.value"),
(T("Website"), "contact_web.value"),
postprocess = hrm_training_event_postprocess,
),
filter_widgets = filter_widgets,
list_fields = list_fields,
subheadings = {"name": T("Event Information"),
#"link_defaultlocation": T("Event Coverage"),
"venue_name_value": T("Venue"),
"contact_name_value": T("Contact Information"),
},
)
settings.customise_hrm_training_event_resource = customise_hrm_training_event_resource
# -----------------------------------------------------------------------------
def customise_hrm_training_event_controller(**attr):
attr["rheader"] = ccc_rheader
return attr
settings.customise_hrm_training_event_controller = customise_hrm_training_event_controller
# -------------------------------------------------------------------------
def customise_org_facility_resource(r, tablename):
from s3 import S3SQLCustomForm, S3SQLInlineLink
s3db = current.s3db
s3db.org_site_facility_type.facility_type_id.label = T("Type")
crud_form = S3SQLCustomForm("name",
"code",
S3SQLInlineLink(
"facility_type",
label = T("Type"),
field = "facility_type_id",
#widget = "groupedopts",
cols = 3,
),
#"organisation_id",
"location_id",
"opening_times",
"contact",
"phone1",
"phone2",
"email",
"website",
#S3SQLInlineComponent(
# "status",
# label = T("Status"),
# fields = ["last_contacted"],
# multiple = False,
#),
"obsolete",
"comments",
)
s3db.configure(tablename,
crud_form = crud_form,
)
settings.customise_org_facility_resource = customise_org_facility_resource
# -------------------------------------------------------------------------
def customise_org_organisation_resource(r, tablename):
from gluon import IS_EMAIL, IS_EMPTY_OR, IS_IN_SET, IS_URL
from s3 import S3OptionsFilter, S3SQLCustomForm, S3SQLInlineComponent, S3SQLInlineLink, S3TextFilter
s3db = current.s3db
# Filtered components
s3db.add_components("org_organisation",
pr_contact = ({"name": "email",
"joinby": "pe_id",
"multiple": False,
"filterby": {"contact_method": "EMAIL",
},
},
{"name": "facebook",
"joinby": "pe_id",
"multiple": False,
"filterby": {"contact_method": "FACEBOOK",
},
},
{"name": "twitter",
"joinby": "pe_id",
"multiple": False,
"filterby": {"contact_method": "TWITTER",
},
},
{"name": "sm_other",
"joinby": "pe_id",
"multiple": False,
"filterby": {"contact_method": "OTHER",
},
},
),
org_organisation_tag = ({"name": "sm_other_type",
"joinby": "organisation_id",
"multiple": False,
"filterby": {"tag": "sm_other_type",
},
},
),
)
# Individual settings for specific tag components
components_get = s3db.resource(tablename).components.get
email = components_get("email")
f = email.table.value
f.requires = IS_EMPTY_OR(IS_EMAIL())
facebook = components_get("facebook")
f = facebook.table.value
f.requires = IS_EMPTY_OR(IS_URL())
#twitter = components_get("twitter")
#f = twitter.table.value
#f.requires = IS_EMPTY_OR(None)
#sm_other = components_get("sm_other")
#f = sm_other.table.value
#f.requires = IS_EMPTY_OR(None)
gtable = s3db.gis_location
districts = current.db((gtable.level == "L3") & (gtable.L2 == "Cumbria")).select(gtable.id,
gtable.name,
cache = s3db.cache)
districts = {d.id:d.name for d in districts}
f = s3db.org_organisation_location.location_id
f.requires = IS_EMPTY_OR(IS_IN_SET(districts))
f.widget = None
s3db.configure("org_organisation",
crud_form = S3SQLCustomForm((T("Name of Organization"), "name"),
S3SQLInlineLink("organisation_type",
field = "organisation_type_id",
label = T("Type"),
),
S3SQLInlineLink("location",
field = "location_id",
label = T("District"),
),
S3SQLInlineComponent(
"email",
name = "email",
label = T("Email"),
multiple = False,
fields = [("", "value")],
#filterby = {"field": "contact_method",
# "options": "EMAIL",
# },
),
S3SQLInlineComponent(
"facebook",
name = "facebook",
label = T("Facebook"),
multiple = False,
fields = [("", "value")],
#filterby = {"field": "contact_method",
# "options": "FACEBOOK",
# },
),
S3SQLInlineComponent(
"twitter",
name = "twitter",
label = T("Twitter"),
multiple = False,
fields = [("", "value")],
#filterby = {"field": "contact_method",
# "options": "TWITTER",
# },
),
S3SQLInlineComponent(
"sm_other",
name = "sm_other",
label = T("SM Other"),
multiple = False,
fields = [("", "value")],
#filterby = {"field": "contact_method",
# "options": "OTHER",
# },
),
(T("Please Specify"), "sm_other_type.value"),
"website",
"comments",
),
list_fields = ["name",
(T("Type"), "organisation_organisation_type.organisation_type_id"),
],
filter_widgets = [S3TextFilter(["name",
"comments",
],
#formstyle = text_filter_formstyle,
label = "",
_placeholder = T("Search"),
),
S3OptionsFilter("organisation_organisation_type.organisation_type_id",
label = T("Type"),
),
S3OptionsFilter("organisation_location.location_id",
label = T("Locations Served"),
),
],
)
settings.customise_org_organisation_resource = customise_org_organisation_resource
# -----------------------------------------------------------------------------
def customise_org_organisation_controller(**attr):
attr["rheader"] = ccc_rheader
return attr
settings.customise_org_organisation_controller = customise_org_organisation_controller
# -------------------------------------------------------------------------
def customise_org_organisation_location_resource(r, tablename):
from gluon import IS_EMPTY_OR, IS_IN_SET
s3db = current.s3db
gtable = s3db.gis_location
districts = current.db((gtable.level == "L3") & (gtable.L2 == "Cumbria")).select(gtable.id,
gtable.name,
cache = s3db.cache)
districts = {d.id:d.name for d in districts}
f = s3db.org_organisation_location.location_id
f.requires = IS_EMPTY_OR(IS_IN_SET(districts))
f.widget = None
settings.customise_org_organisation_location_resource = customise_org_organisation_location_resource
# -------------------------------------------------------------------------
def customise_pr_group_resource(r, tablename):
from gluon import IS_EMPTY_OR, IS_INT_IN_RANGE, IS_NOT_EMPTY
from s3 import IS_INT_AMOUNT, S3OptionsFilter, S3SQLCustomForm, S3SQLInlineLink, S3TextFilter, s3_phone_requires
s3db = current.s3db
# Filtered components
s3db.add_components("pr_group",
pr_group_tag = ({"name": "volunteers",
"joinby": "group_id",
"filterby": {"tag": "volunteers"},
"multiple": False,
},
{"name": "transport",
"joinby": "group_id",
"filterby": {"tag": "transport"},
"multiple": False,
},
{"name": "skills_details",
"joinby": "group_id",
"filterby": {"tag": "skills_details"},
"multiple": False,
},
{"name": "contact_name",
"joinby": "group_id",
"filterby": {"tag": "contact_name"},
"multiple": False,
},
{"name": "contact_number",
"joinby": "group_id",
"filterby": {"tag": "contact_number"},
"multiple": False,
},
),
)
# Individual settings for specific tag components
components_get = s3db.resource(tablename).components.get
integer_represent = IS_INT_AMOUNT.represent
volunteers = components_get("volunteers")
f = volunteers.table.value
f.represent = integer_represent
f.requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, None))
contact_name = components_get("contact_name")
f = contact_name.table.value
f.requires = IS_NOT_EMPTY()
f.comment = T("Contact must not be listed as a leader")
contact_number = components_get("contact_number")
f = contact_number.table.value
f.requires = s3_phone_requires
s3db.configure("pr_group",
crud_form = S3SQLCustomForm("name",
(T("Approximate Number of Volunteers"), "volunteers.value"),
(T("Mode of Transport"), "transport.value"),
S3SQLInlineLink("skill",
field = "skill_id",
label = T("Volunteer Offer"),
),
(T("Please specify details"), "skills_details.value"),
S3SQLInlineLink("location",
field = "location_id",
label = T("Where would you be willing to volunteer?"),
),
(T("Emergency Contact Name"), "contact_name.value"),
(T("Emergency Contact Number"), "contact_number.value"),
"comments",
),
list_fields = ["name",
(T("# Volunteers"), "volunteers.value"),
(T("Mode of Transport"), "transport.value"),
# Not working:
#(T("Leaders"), "group_membership.person_id"),
(T("Locations"), "group_location.location_id"),
(T("Skills"), "group_competency.skill_id"),
(T("Skills Details"), "skill_details.value"),
"comments",
],
filter_widgets = [S3TextFilter(["name",
"group_membership.person_id$first_name",
"group_membership.person_id$middle_name",
"group_membership.person_id$last_name",
"group_location.location_id",
"group_competency.skill_id",
"skills_details.value",
"comments",
],
#formstyle = text_filter_formstyle,
label = "",
_placeholder = T("Search"),
),
S3OptionsFilter("group_location.location_id",
label = T("Locations Served"),
),
S3OptionsFilter("group_competency.skill_id",
label = T("Skill"),
),
],
)
settings.customise_pr_group_resource = customise_pr_group_resource
# -----------------------------------------------------------------------------
def customise_pr_group_controller(**attr):
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if r.component_name == "person":
s3.crud_strings["pr_person"] = Storage(
label_create = T("New Member"),
title_display = T("Member Details"),
title_list = T("Members"),
title_update = T("Edit Member"),
#title_upload = T("Import Members"),
label_list_button = T("List Members"),
label_delete_button = T("Delete Member"),
msg_record_created = T("Member added"),
msg_record_modified = T("Member updated"),
msg_record_deleted = T("Member deleted"),
msg_list_empty = T("No Members currently registered")
)
r.component.configure(list_fields = ["first_name",
"middle_name",
"last_name",
(T("Email"), "email.value"),
(T("Mobile Phone"), "phone.value"),
"comments",
],
)
return result
s3.prep = prep
attr["rheader"] = ccc_rheader
# Allow components with components (i.e. persons) to breakout from tabs
#attr["native"] = True
# Custom postp
standard_postp = s3.postp
def postp(r, output):
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
if r.component_name == "person":
# Include get_vars on Action Buttons to configure crud_form/crud_strings appropriately
from gluon import URL
from s3 import S3CRUD
read_url = URL(c="pr", f="person", args=["[id]", "read"],
vars = {"groups": 1})
update_url = URL(c="pr", f="person", args=["[id]", "update"],
vars = {"groups": 1})
S3CRUD.action_buttons(r,
read_url = read_url,
update_url = update_url,
)
return output
s3.postp = postp
return attr
settings.customise_pr_group_controller = customise_pr_group_controller
# -------------------------------------------------------------------------
def customise_pr_group_location_resource(r, tablename):
from gluon import IS_EMPTY_OR, IS_IN_SET
s3db = current.s3db
gtable = s3db.gis_location
districts = current.db((gtable.level == "L3") & (gtable.L2 == "Cumbria")).select(gtable.id,
gtable.name,
cache = s3db.cache)
districts = {d.id:d.name for d in districts}
f = s3db.pr_group_location.location_id
f.requires = IS_EMPTY_OR(IS_IN_SET(districts))
f.widget = None
settings.customise_pr_group_location_resource = customise_pr_group_location_resource
# -------------------------------------------------------------------------
def customise_pr_group_membership_resource(r, tablename):
from s3 import S3AddPersonWidget, S3SQLCustomForm
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Leader"),
title_display = T("Leader Details"),
title_list = T("Leaders"),
title_update = T("Edit Leader"),
#title_upload = T("Import Leaders"),
label_list_button = T("List Leaders"),
label_delete_button = T("Delete Leader"),
msg_record_created = T("Leader added"),
msg_record_modified = T("Leader updated"),
msg_record_deleted = T("Leader deleted"),
msg_list_empty = T("No Leaders currently registered")
)
s3db = current.s3db
table = s3db.pr_group_membership
table.person_id.widget = S3AddPersonWidget(controller="pr")
s3db.configure("pr_group_membership",
crud_form = S3SQLCustomForm("person_id",
"comments",
),
list_fields = ["person_id",
(T("Phone"), "person_id$phone.value"),
(T("Email"), "person_id$email.value"),
"comments",
],
)
settings.customise_pr_group_membership_resource = customise_pr_group_membership_resource
# -------------------------------------------------------------------------
def customise_pr_person_resource(r, tablename):
from gluon import IS_EMPTY_OR, IS_IN_SET
from s3 import S3SQLCustomForm, S3SQLInlineLink
s3db = current.s3db
# Filtered components
s3db.add_components("pr_person",
pr_person_tag = ({"name": "organisation",
"joinby": "person_id",
"filterby": {"tag": "organisation"},
"multiple": False,
},
{"name": "organisation_type",
"joinby": "person_id",
"filterby": {"tag": "organisation_type"},
"multiple": False,
},
{"name": "items_details",
"joinby": "person_id",
"filterby": {"tag": "items_details"},
"multiple": False,
},
{"name": "skills_details",
"joinby": "person_id",
"filterby": {"tag": "skills_details"},
"multiple": False,
},
{"name": "delivery",
"joinby": "person_id",
"filterby": {"tag": "delivery"},
"multiple": False,
},
{"name": "availability",
"joinby": "person_id",
"filterby": {"tag": "availability"},
"multiple": False,
},
),
)
# Individual settings for specific tag components
components_get = s3db.resource(tablename).components.get
organisation_type = components_get("organisation_type")
f = organisation_type.table.value
f.requires = IS_EMPTY_OR(IS_IN_SET([T("Business Donor"),
T("Individual Donor"),
T("Public Sector Organization"),
T("Voluntary Sector Organization"),
]))
delivery = components_get("delivery")
f = delivery.table.value
f.requires = IS_EMPTY_OR(IS_IN_SET(("Y", "N")))
f.represent = lambda v: T("yes") if v == "Y" else T("no")
from s3 import S3TagCheckboxWidget
f.widget = S3TagCheckboxWidget(on="Y", off="N")
f.default = "N"
get_vars_get = r.get_vars.get
has_role = current.auth.s3_has_role
if get_vars_get("donors") or \
has_role("DONOR", include_admin=False):
# Donor
crud_fields = ["first_name",
"middle_name",
"last_name",
"date_of_birth",
(T("Gender"), "gender"),
(T("Name of Organization"), "organisation.value"),
(T("Type of Organization"), "organisation_type.value"),
S3SQLInlineLink("item",
field = "item_id",
label = T("Goods / Services"),
),
(T("Details"), "items_details.value"),
(T("Are you able to Deliver?"), "delivery.value"),
S3SQLInlineLink("location",
field = "location_id",
label = T("Where would you be willing to deliver?"),
),
(T("Length of time the offer is available?"), "availability.value"),
"comments",
]
elif get_vars_get("groups") or \
r.function == "group" or \
has_role("GROUP_ADMIN", include_admin=False):
# Group Admin
# Skills are recorded at the Group level
crud_fields = ["first_name",
"middle_name",
"last_name",
"date_of_birth",
(T("Gender"), "gender"),
"comments",
]
else:
# Individual Volunteer: Reserve or Organisation
crud_fields = ["first_name",
"middle_name",
"last_name",
"date_of_birth",
(T("Gender"), "gender"),
S3SQLInlineLink("skill",
field = "skill_id",
label = T("Volunteer Offer"),
),
(T("Skills Details"), "skills_details.value"),
S3SQLInlineLink("location",
field = "location_id",
label = T("Where would you be willing to operate?"),
),
"comments",
]
s3db.configure("pr_person",
crud_form = S3SQLCustomForm(*crud_fields),
)
settings.customise_pr_person_resource = customise_pr_person_resource
# -----------------------------------------------------------------------------
def customise_pr_person_controller(**attr):
s3db = current.s3db
# Custom Component
s3db.add_components("pr_person",
pr_group = {"link": "pr_group_membership",
"joinby": "person_id",
"key": "group_id",
"actuate": "replace",
"multiple": False,
},
)
# Custom Method
from templates.CCC.controllers import personAdditional
s3db.set_method("pr", "person",
method = "additional",
action = personAdditional)
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if r.component_name == "group_membership":
r.resource.components._components["group_membership"].configure(listadd = False,
list_fields = [(T("Name"), "group_id$name"),
"group_id$comments",
],
)
get_vars_get = r.get_vars.get
has_role = current.auth.s3_has_role
if get_vars_get("reserves") or \
has_role("RESERVE", include_admin=False):
# Reserve Volunteers
from s3 import FS, S3OptionsFilter, S3TextFilter
resource = r.resource
# Only include Reserves
db = current.db
mtable = db.auth_membership
gtable = db.auth_group
query = (gtable.uuid == "RESERVE") & \
(gtable.id == mtable.group_id)
reserves = db(query).select(mtable.user_id)
reserves = [m.user_id for m in reserves]
resource.add_filter(FS("user.id").belongs(reserves))
gtable = s3db.gis_location
districts = current.db((gtable.level == "L3") & (gtable.L2 == "Cumbria")).select(gtable.id,
gtable.name,
cache = s3db.cache)
districts = {d.id:d.name for d in districts}
resource.configure(list_fields = ["first_name",
"middle_name",
"last_name",
(T("Skills"), "competency.skill_id"),
(T("Email"), "email.value"),
(T("Mobile Phone"), "phone.value"),
],
filter_widgets = [S3TextFilter(["first_name",
"middle_name",
"last_name",
"comments",
"competency.skill_id$name",
],
#formstyle = text_filter_formstyle,
label = "",
_placeholder = T("Search"),
),
S3OptionsFilter("person_location.location_id",
label = T("Locations Served"),
options = districts,
),
S3OptionsFilter("competency.skill_id",
),
],
)
s3.crud_strings[r.tablename] = Storage(
label_create = T("New Reserve Volunteer"),
title_display = T("Reserve Volunteer Details"),
title_list = T("Reserve Volunteers"),
title_update = T("Edit Reserve Volunteer"),
#title_upload = T("Import Reserve Volunteers"),
label_list_button = T("List Reserve Volunteers"),
label_delete_button = T("Delete Reserve Volunteer"),
msg_record_created = T("Reserve Volunteer added"),
msg_record_modified = T("Reserve Volunteer updated"),
msg_record_deleted = T("Reserve Volunteer deleted"),
msg_list_empty = T("No Reserve Volunteers currently registered")
)
elif get_vars_get("donors") or \
has_role("DONOR", include_admin=False):
# Donors
from s3 import FS, S3OptionsFilter, S3TextFilter
resource = r.resource
# Only include Donors
db = current.db
mtable = db.auth_membership
gtable = db.auth_group
query = (gtable.uuid == "DONOR") & \
(gtable.id == mtable.group_id)
donors = db(query).select(mtable.user_id)
donors = [d.user_id for d in donors]
resource.add_filter(FS("user.id").belongs(donors))
resource.configure(list_fields = [# @ToDo: Add Organisation freetext
"first_name",
"middle_name",
"last_name",
(T("Goods / Services"), "person_item.item_id"),
(T("Email"), "email.value"),
(T("Mobile Phone"), "phone.value"),
],
filter_widgets = [S3TextFilter(["first_name",
"middle_name",
"last_name",
"comments",
# @ToDo: Add Items
#"competency.skill_id$name",
],
#formstyle = text_filter_formstyle,
label = "",
_placeholder = T("Search"),
),
S3OptionsFilter("person_item.item_id",
),
],
)
s3.crud_strings[r.tablename] = Storage(
label_create = T("New Donor"),
title_display = T("Donor Details"),
title_list = T("Donors"),
title_update = T("Edit Donor"),
#title_upload = T("Import Donors"),
label_list_button = T("List Donors"),
label_delete_button = T("Delete Donor"),
msg_record_created = T("Donor added"),
msg_record_modified = T("Donor updated"),
msg_record_deleted = T("Donor deleted"),
msg_list_empty = T("No Donors currently registered")
)
elif get_vars_get("groups") or \
has_role("GROUP_ADMIN", include_admin=False):
# Group Members
s3.crud_strings[r.tablename] = Storage(
label_create = T("New Member"),
title_display = T("Member Details"),
title_list = T("Members"),
title_update = T("Edit Member"),
#title_upload = T("Import Members"),
label_list_button = T("List Members"),
label_delete_button = T("Delete Member"),
msg_record_created = T("Member added"),
msg_record_modified = T("Member updated"),
msg_record_deleted = T("Member deleted"),
msg_list_empty = T("No Members currently registered")
)
else:
# Organisation Volunteers
# (only used for hrm/person profile)
s3.crud_strings[r.tablename] = Storage(
label_create = T("New Volunteer"),
title_display = T("Volunteer Details"),
title_list = T("Volunteers"),
title_update = T("Edit Volunteer"),
#title_upload = T("Import Volunteers"),
label_list_button = T("List Volunteers"),
label_delete_button = T("Delete Volunteer"),
msg_record_created = T("Volunteer added"),
msg_record_modified = T("Volunteer updated"),
msg_record_deleted = T("Volunteer deleted"),
msg_list_empty = T("No Volunteers currently registered")
)
return result
s3.prep = prep
# Custom postp
standard_postp = s3.postp
def postp(r, output):
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
# Include get_vars on Action Buttons to configure crud_form/crud_strings appropriately
from gluon import URL
from s3 import S3CRUD
read_url = URL(c="pr", f="person", args=["[id]", "read"],
vars = r.get_vars)
update_url = URL(c="pr", f="person", args=["[id]", "update"],
vars = r.get_vars)
S3CRUD.action_buttons(r,
read_url = read_url,
update_url = update_url,
)
return output
s3.postp = postp
# Hide the search box on component tabs, as confusing & not useful
attr["dtargs"] = {"dt_searching": False,
}
attr["rheader"] = ccc_rheader
return attr
settings.customise_pr_person_controller = customise_pr_person_controller
# -------------------------------------------------------------------------
def customise_pr_person_location_resource(r, tablename):
from gluon import IS_EMPTY_OR, IS_IN_SET
from s3 import S3Represent
s3db = current.s3db
gtable = s3db.gis_location
districts = current.db((gtable.level == "L3") & (gtable.L2 == "Cumbria")).select(gtable.id,
gtable.name,
cache = s3db.cache)
districts = {d.id:d.name for d in districts}
f = s3db.pr_person_location.location_id
f.represent = S3Represent(options = districts)
f.requires = IS_EMPTY_OR(IS_IN_SET(districts))
f.widget = None
settings.customise_pr_person_location_resource = customise_pr_person_location_resource
# -------------------------------------------------------------------------
def project_task_create_onaccept(form):
"""
When a Task is created:
* Notify OrgAdmins
"""
from gluon import URL
from s3 import s3_fullname
form_vars_get = form.vars.get
task_id = form_vars_get("id")
# Lookup the Author details
db = current.db
s3db = current.s3db
ttable = s3db.project_task
otable = s3db.org_organisation
utable = db.auth_user
query = (ttable.id == task_id) & \
(ttable.created_by == utable.id)
user = db(query).select(utable.first_name,
utable.last_name,
utable.organisation_id,
limitby = (0, 1)
).first()
fullname = s3_fullname(user)
# Lookup the ORG_ADMINs
gtable = db.auth_group
mtable = db.auth_membership
query = (gtable.uuid == "ORG_ADMIN") & \
(gtable.id == mtable.group_id) & \
(mtable.user_id == utable.id) & \
(utable.organisation_id == user.organisation_id)
org_admins = db(query).select(utable.email)
# Construct Email message
system_name = settings.get_system_name_short()
subject = "%s: Message sent from %s" % \
(system_name,
fullname,
)
url = "%s%s" % (settings.get_base_public_url(),
URL(c="project", f="task", args=[task_id]))
message = "%s has sent you a Message on %s\n\nSubject: %s\nMessage: %s\n\nYou can view the message here: %s" % \
(fullname,
system_name,
form_vars_get("name"),
form_vars_get("description") or "",
url,
)
# Send message to each
send_email = current.msg.send_email
for admin in org_admins:
send_email(to = admin.email,
subject = subject,
message = message,
)
# -------------------------------------------------------------------------
def customise_project_task_resource(r, tablename):
from s3 import S3OptionsFilter, S3SQLCustomForm, S3TextFilter
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("New Message"),
title_display = T("Message Details"),
title_list = T("Messages"),
title_update = T("Edit Message"),
#title_upload = T("Import Messages"),
label_list_button = T("List Messages"),
label_delete_button = T("Delete Message"),
msg_record_created = T("Message added"),
msg_record_modified = T("Message updated"),
msg_record_deleted = T("Message deleted"),
msg_list_empty = T("No Messages currently created")
)
s3db = current.s3db
table = s3db.project_task
table.name.label = T("Subject")
table.description.label = T("Message")
if current.auth.s3_has_role("ORG_ADMIN"):
# @ToDo: Filter Assigned To to just OrgAdmins?
pass
else:
# f = table.priority
# f.default = 1
# f.readable = f.writable = False
# f = table.status
# f.default = 1
# f.readable = f.writable = False
# table.pe_id.readable = table.pe_id.writable = False
table.comments.readable = table.comments.writable = False
s3db.configure("project_task",
# Can simply replace the default one
create_onaccept = project_task_create_onaccept,
crud_form = S3SQLCustomForm("name",
"description",
#"priority",
#"status",
#"pe_id",
"comments",
),
list_fields = [#"priority",
#"status",
#"pe_id",
"created_by",
"name",
],
filter_widgets = [S3TextFilter(["name",
"description",
"comments",
],
#formstyle = text_filter_formstyle,
label = "",
_placeholder = T("Search"),
),
#S3OptionsFilter("priority",
# options = settings.get_project_task_priority_opts(),
# cols = 3,
# ),
#S3OptionsFilter("status",
# options = settings.get_project_task_status_opts(),
# cols = 3,
# ),
],
)
settings.customise_project_task_resource = customise_project_task_resource
# -----------------------------------------------------------------------------
def customise_project_task_controller(**attr):
if current.auth.s3_has_role("ORG_ADMIN"):
# @ToDo: Default filter to hide Closed messages
pass
else:
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if r.method not in ("create", "read", "update"):
from gluon import redirect
redirect(r.url(method="create"))
else:
current.messages.UPDATE = "Edit"
# Don't attempt to load comments
s3.rfooter = None
return result
s3.prep = prep
# Custom postp
standard_postp = s3.postp
def postp(r, output):
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
if r.method == "read" and "buttons" in output:
output["buttons"].pop("list_btn")
return output
s3.postp = postp
attr["rheader"] = None
return attr
settings.customise_project_task_controller = customise_project_task_controller
# -------------------------------------------------------------------------
def req_need_organisation_onaccept(form):
"""
Set the realm of the parent req_need to that of the organisation
"""
db = current.db
s3db = current.s3db
rntable = s3db.req_need
otable = s3db.org_organisation
form_vars_get = form.vars.get
need_id = form_vars_get("need_id")
organisation_id = form_vars_get("organisation_id")
if not need_id or not organisation_id:
rnotable = s3db.req_need_organisation
record_id = form_vars_get("id")
record = db(rnotable.id == record_id).select(rnotable.need_id,
rnotable.organisation_id,
limitby = (0, 1),
).first()
need_id = record.need_id
organisation_id = record.organisation_id
org = db(otable.id == organisation_id).select(otable.pe_id,
limitby = (0, 1),
).first()
realm_entity = org.pe_id
db(rntable.id == need_id).update(realm_entity = realm_entity)
# -------------------------------------------------------------------------
def customise_req_need_resource(r, tablename):
from s3 import IS_ONE_OF, IS_UTC_DATETIME, S3CalendarWidget, S3DateTime, \
S3LocationSelector, S3SQLCustomForm, S3SQLInlineComponent, \
S3OptionsFilter, S3TextFilter, s3_comments_widget
s3db = current.s3db
# Filtered components
s3db.add_components("req_need",
req_need_tag = ({"name": "age_restrictions",
"joinby": "need_id",
"filterby": {"tag": "age_restrictions"},
"multiple": False,
},
{"name": "practical_info",
"joinby": "need_id",
"filterby": {"tag": "practical_info"},
"multiple": False,
},
{"name": "parking",
"joinby": "need_id",
"filterby": {"tag": "parking"},
"multiple": False,
},
{"name": "bring",
"joinby": "need_id",
"filterby": {"tag": "bring"},
"multiple": False,
},
),
)
# Individual settings for specific tag components
components_get = s3db.resource(tablename).components.get
practical_info = components_get("practical_info")
f = practical_info.table.value
f.widget = lambda f, v: \
s3_comments_widget(f, v, _placeholder = "including directions to location of the opportunity")
table = s3db.req_need
table.name.label = T("Description")
f = table.date
f.label = T("Start Date")
f.represent = lambda dt: S3DateTime.datetime_represent(dt, utc=True)
f.requires = IS_UTC_DATETIME()
f.widget = S3CalendarWidget(timepicker = True)
table.end_date.readable = table.end_date.writable = True
table.location_id.widget = S3LocationSelector(levels = ("L3"),
required_levels = ("L3"),
show_address = True)
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("New Opportunity"),
title_display = T("Opportunity Details"),
title_list = T("Opportunities"),
title_update = T("Edit Opportunity"),
#title_upload = T("Import Opportunities"),
label_list_button = T("List Opportunities"),
label_delete_button = T("Delete Opportunity"),
msg_record_created = T("Opportunity added"),
msg_record_modified = T("Opportunity updated"),
msg_record_deleted = T("Opportunity deleted"),
msg_list_empty = T("No Opportunities currently registered")
)
person_id = s3db.req_need_contact.person_id
person_id.comment = None # No Create
filter_widgets = [S3TextFilter(["name",
"comments",
],
#formstyle = text_filter_formstyle,
label = "",
_placeholder = T("Search"),
),
S3OptionsFilter("location_id$L3",
label = T("District"),
),
S3OptionsFilter("need_skill.skill_id"),
]
list_fields = ["date",
"end_date",
"location_id",
#(T("Opportunity"), "name"),
"name",
"need_contact.person_id",
#(T("Phone"), "need_contact.person_id$phone.value"),
#(T("Email"), "need_contact.person_id$email.value"),
"need_skill.skill_id",
"need_skill.quantity",
]
auth = current.auth
if auth.s3_has_role("ADMIN"):
filter_widgets.insert(-2, S3OptionsFilter("need_organisation.organisation_id"))
list_fields.insert(0, "need_organisation.organisation_id")
else:
organisation_id = auth.user.organisation_id
f = s3db.req_need_organisation.organisation_id
f.default = organisation_id
# Needs to be in the form
#f.readable = f.writable = False
f.requires = s3db.org_organisation_requires(updateable=True)
f.comment = None # No Create
# Dropdown, not Autocomplete
person_id.widget = None
# Filtered to people affiliated with this Org
db = current.db
hrtable = s3db.hrm_human_resource
persons = db(hrtable.organisation_id == organisation_id).select(hrtable.person_id)
persons = [p.person_id for p in persons]
person_id.requires = IS_ONE_OF(db, "pr_person.id",
person_id.represent,
orderby = "pr_person.first_name",
sort = True,
filterby = "id",
filter_opts = persons,
)
s3db.configure("req_need",
# Needs a custom handler as default handler only supports default forms
#copyable = True,
crud_form = S3SQLCustomForm("need_organisation.organisation_id",
"date",
"end_date",
"location_id",
"name",
"need_contact.person_id",
S3SQLInlineComponent("need_skill",
label = "",
fields = ["skill_id", "quantity"],
multiple = False,
),
(T("Age Restrictions"), "age_restrictions.value"),
(T("Practical Information"), "practical_info.value"),
(T("Parking Options"), "parking.value"),
(T("What to Bring"), "bring.value"),
"comments",
),
filter_widgets = filter_widgets,
list_fields = list_fields,
)
s3db.configure("req_need_organisation",
onaccept = req_need_organisation_onaccept,
)
settings.customise_req_need_resource = customise_req_need_resource
# -----------------------------------------------------------------------------
def customise_req_need_controller(**attr):
#s3 = current.response.s3
# Custom prep
#standard_prep = s3.prep
#def prep(r):
# # Call standard prep
# if callable(standard_prep):
# result = standard_prep(r)
# else:
# result = True
# if r.method == "read":
# # Show the Contact's Phone & Email
# # @ToDo: Do this only for Vols whose Application has been succesful
# # @ToDo: Create custom version of this which bypasses ACLs since
# # - Will fail for normal Vols as they can't see other Vols anyway
# # - Also failing for OrgAdmin as the user-added Phone is in the Personal PE not the Org's
# s3db = current.s3db
# s3db.req_need_contact.person_id.represent = s3db.pr_PersonRepresentContact(show_email = True,
# show_link = False,
# )
# return result
#s3.prep = prep
attr["rheader"] = ccc_rheader
return attr
settings.customise_req_need_controller = customise_req_need_controller
# -------------------------------------------------------------------------
def customise_req_need_person_resource(r, tablename):
current.response.s3.crud_labels["DELETE"] = "Remove"
s3db = current.s3db
s3db.req_need_person.person_id.represent = s3db.pr_PersonRepresent(show_link=True)
s3db.configure("req_need_person",
# Don't add people here (they are either invited or apply)
listadd = False,
)
settings.customise_req_need_person_resource = customise_req_need_person_resource
# -------------------------------------------------------------------------
def customise_supply_person_item_resource(r, tablename):
s3db = current.s3db
f = s3db.supply_person_item.item_id
# No Hyperlink for Items (don't have permissions anyway)
f.represent = s3db.supply_ItemRepresent()
# Dropdown, not Autocomplete
f.widget = None
settings.customise_supply_person_item_resource = customise_supply_person_item_resource
# END =========================================================================
|
StarcoderdataPython
|
34659
|
<reponame>DeadCodeProductions/dead
#!/usr/bin/env python3
import copy
import hashlib
import logging
import os
import random
import re
import subprocess
import sys
import tempfile
import time
from multiprocessing import Pool
from pathlib import Path
from typing import Any, Dict, Optional, cast
import requests
import bisector
import builder
import checker
import database
import generator
import init
import parsers
import patchdatabase
import preprocessing
import reducer
import repository
import utils
def get_llvm_github_commit_author(rev: str) -> Optional[str]:
html = requests.get(
"https://github.com/llvm/llvm-project/commit/" + rev
).content.decode()
p = re.compile(r'.*\/llvm\/llvm-project\/commits\?author=(.*)".*')
for l in html.split("\n"):
l = l.strip()
if m := p.match(l):
return m.group(1)
return None
def get_all_bisections(ddb: database.CaseDatabase) -> list[str]:
res = ddb.con.execute("select distinct bisection from cases")
return [r[0] for r in res]
def _run() -> None:
scenario = utils.get_scenario(config, args)
counter = 0
output_directory = (
Path(args.output_directory).absolute() if args.output_directory else None
)
parallel_generator = (
gnrtr.parallel_interesting_case(config, scenario, args.cores, start_stop=True)
if args.parallel_generation
else None
)
pipeline_components = (
["Generator<" + "parallel>" if args.parallel_generation else "single>"]
+ (["Bisector"] if args.bisector else [])
+ (
["Reducer<Only New>"]
if args.reducer is None
else (["Reducer<Always>"] if args.reducer == True else [])
)
)
print("Pipeline:", " -> ".join(pipeline_components), file=sys.stderr)
last_update_time = time.time()
while True:
if args.amount and args.amount != 0:
if counter >= args.amount:
break
if args.update_trunk_after_X_hours is not None:
if (
time.time() - last_update_time
) / 3600 > args.update_trunk_after_X_hours:
logging.info("Updating repositories...")
last_update_time = time.time()
known: Dict[str, list[int]] = dict()
for i, s in enumerate(scenario.target_settings):
cname = s.compiler_config.name
if cname not in known:
known[cname] = []
known[cname].append(i)
for cname, l in known.items():
repo = repository.Repo.repo_from_setting(
scenario.target_settings[l[0]]
)
old_trunk_commit = repo.rev_to_commit("trunk")
repo.pull()
new_trunk_commit = repo.rev_to_commit("trunk")
for i in l:
if scenario.target_settings[i].rev == old_trunk_commit:
scenario.target_settings[i].rev = new_trunk_commit
# Time db values
generator_time: Optional[float] = None
generator_try_count: Optional[int] = None
bisector_time: Optional[float] = None
bisector_steps: Optional[int] = None
reducer_time: Optional[float] = None
if parallel_generator:
case = next(parallel_generator)
else:
time_start_gen = time.perf_counter()
case = gnrtr.generate_interesting_case(scenario)
time_end_gen = time.perf_counter()
generator_time = time_end_gen - time_start_gen
generator_try_count = gnrtr.try_counter
if args.bisector:
try:
time_start_bisector = time.perf_counter()
bisect_worked = bsctr.bisect_case(case)
time_end_bisector = time.perf_counter()
bisector_time = time_end_bisector - time_start_bisector
bisector_steps = bsctr.steps
if not bisect_worked:
continue
except bisector.BisectionException as e:
print(f"BisectionException: '{e}'", file=sys.stderr)
continue
except AssertionError as e:
print(f"AssertionError: '{e}'", file=sys.stderr)
continue
except builder.BuildException as e:
print(f"BuildException: '{e}'", file=sys.stderr)
continue
if args.reducer is not False:
if (
args.reducer
or case.bisection
and case.bisection in get_all_bisections(ddb)
):
try:
time_start_reducer = time.perf_counter()
worked = rdcr.reduce_case(case)
time_end_reducer = time.perf_counter()
reducer_time = time_end_reducer - time_start_reducer
except builder.BuildException as e:
print(f"BuildException: {e}")
continue
if not output_directory:
case_id = ddb.record_case(case)
ddb.record_timing(
case_id,
generator_time,
generator_try_count,
bisector_time,
bisector_steps,
reducer_time,
)
else:
h = abs(hash(str(case)))
path = output_directory / Path(f"case_{counter:08}-{h:019}.tar")
logging.debug("Writing case to {path}...")
case.to_file(path)
counter += 1
def _absorb() -> None:
def read_into_db(file: Path) -> None:
# Why another db here?
# https://docs.python.org/3/library/sqlite3.html#sqlite3.threadsafety
# “Threads may share the module, but not connections.”
# Of course we are using multiple processes here, but the processes
# are a copy of eachother and who knows how things are implemented,
# so better be safe than sorry and create a new connection,
# especially when the next sentence is:
# "However, this may not always be true."
# (They may just refer to the option of having sqlite compiled with
# SQLITE_THREADSAFE=0)
db = database.CaseDatabase(config, config.casedb)
case = utils.Case.from_file(config, file)
db.record_case(case)
if Path(args.absorb_object).is_file():
read_into_db(Path(args.absorb_object))
exit(0)
pool = Pool(10)
absorb_directory = Path(args.absorb_object).absolute()
paths = [p for p in absorb_directory.iterdir() if p.match("*.tar")]
len_paths = len(paths)
len_len_paths = len(str(len_paths))
print("Absorbing... ", end="", flush=True)
status_str = ""
counter = 0
start_time = time.perf_counter()
for _ in pool.imap_unordered(read_into_db, paths):
counter += 1
print("\b" * len(status_str), end="", flush=True)
delta_t = time.perf_counter() - start_time
status_str = f"{{: >{len_len_paths}}}/{len_paths} {delta_t:.2f}s".format(
counter
)
print(status_str, end="", flush=True)
print("")
def _tofile() -> None:
case_pre = ddb.get_case_from_id(args.case_id)
if not case_pre:
print(f"Found no case for ID {args.case_id}")
exit(1)
else:
case = case_pre
print(f"Saving case to ./case_{args.case_id}.tar")
case.to_file(Path(f"./case_{args.case_id}.tar"))
def _rereduce() -> None:
with open(args.code_path, "r") as f:
rereduce_code = f.read()
case = ddb.get_case_from_id_or_die(args.case_id)
print(f"Re-reducing code with respect to Case {args.case_id}", file=sys.stderr)
res = rdcr.reduce_code(
rereduce_code,
case.marker,
case.bad_setting,
case.good_settings,
preprocess=False,
)
print(res)
def _report() -> None:
pre_check_case = ddb.get_case_from_id(args.case_id)
if not pre_check_case:
print("No case with this ID.", file=sys.stderr)
exit(1)
else:
case = pre_check_case
if not case.bisection:
print("Case is not bisected. Starting bisection...", file=sys.stderr)
start_time = time.perf_counter()
worked = bsctr.bisect_case(case)
bisector_time = time.perf_counter() - start_time
if worked:
ddb.update_case(args.case_id, case)
g_time, gtc, b_time, b_steps, r_time = ddb.get_timing_from_id(args.case_id)
b_time = bisector_time
b_steps = bsctr.steps
ddb.record_timing(args.case_id, g_time, gtc, b_time, b_steps, r_time)
else:
print("Could not bisect case. Aborting...", file=sys.stderr)
exit(1)
# check for reduced and massaged code
if not case.reduced_code:
print("Case is not reduced. Starting reduction...", file=sys.stderr)
if rdcr.reduce_case(case):
ddb.update_case(args.case_id, case)
else:
print("Could not reduce case. Aborting...", file=sys.stderr)
exit(1)
massaged_code, _, _ = ddb.get_report_info_from_id(args.case_id)
if massaged_code:
case.reduced_code = massaged_code
bad_setting = case.bad_setting
bad_repo = repository.Repo(
bad_setting.compiler_config.repo, bad_setting.compiler_config.main_branch
)
is_gcc: bool = bad_setting.compiler_config.name == "gcc"
# Last sanity check
cpy = copy.deepcopy(case)
cpy.code = cast(str, case.reduced_code)
print("Normal interestingness test...", end="", file=sys.stderr, flush=True)
if not chkr.is_interesting(cpy, preprocess=False):
print("\nCase is not interesting! Aborting...", file=sys.stderr)
exit(1)
else:
print("OK", file=sys.stderr)
# Check against newest upstream
if args.pull:
print("Pulling Repo...", file=sys.stderr)
bad_repo.pull()
print("Interestingness test against main...", end="", file=sys.stderr)
cpy.bad_setting.rev = bad_repo.rev_to_commit(f"{bad_repo.main_branch}")
if not chkr.is_interesting(cpy, preprocess=False):
print(
"\nCase is not interesting on main! Might be fixed. Stopping...",
file=sys.stderr,
)
exit(0)
else:
print("OK", file=sys.stderr)
# Use newest main in report
case.bad_setting.rev = cpy.bad_setting.rev
# Check if bisection commit is what it should be
print("Checking bisection commit...", file=sys.stderr)
marker_prefix = utils.get_marker_prefix(case.marker)
bisection_setting = copy.deepcopy(cpy.bad_setting)
bisection_setting.rev = cast(str, cpy.bisection)
prebisection_setting = copy.deepcopy(bisection_setting)
repo = repository.Repo.repo_from_setting(bisection_setting)
prebisection_setting.rev = repo.rev_to_commit(f"{case.bisection}~")
bis_set = builder.find_alive_markers(
cpy.code, bisection_setting, marker_prefix, bldr
)
rebis_set = builder.find_alive_markers(
cpy.code, prebisection_setting, marker_prefix, bldr
)
if not cpy.marker in bis_set or cpy.marker in rebis_set:
print("Bisection commit is not correct! Aborting...", file=sys.stderr)
exit(1)
# Choose same opt level and newest version
possible_good_compiler = [
gs for gs in case.good_settings if gs.opt_level == bad_setting.opt_level
]
good_setting = utils.get_latest_compiler_setting_from_list(
bad_repo, possible_good_compiler
)
# Replace markers
source = cpy.code.replace(cpy.marker, "foo").replace(
utils.get_marker_prefix(cpy.marker), "bar"
)
bad_setting_tag = bad_setting.rev + " (trunk)"
bad_setting_str = f"{bad_setting.compiler_config.name}-{bad_setting_tag} -O{bad_setting.opt_level}"
tmp = bad_repo.rev_to_tag(good_setting.rev)
if not tmp:
good_setting_tag = good_setting.rev
else:
good_setting_tag = tmp
good_setting_str = f"{good_setting.compiler_config.name}-{good_setting_tag} -O{good_setting.opt_level}"
def to_collapsed(
s: str, is_gcc: bool, summary: str = "Output", open: bool = False
) -> str:
if is_gcc:
s = (
"--------- OUTPUT ---------\n"
+ s
+ "\n---------- END OUTPUT ---------\n"
)
else:
sopen = "open" if open else ""
s = (
f"<details {sopen}><summary>{summary}</summary><p>\n"
+ s
+ "\n</p></details>"
)
return s
def to_code(code: str, is_gcc: bool, stype: str = "") -> str:
if not is_gcc:
return f"\n```{stype}\n" + code.rstrip() + "\n```"
return code
def print_cody_str(s: str, is_gcc: bool) -> None:
s = "`" + s + "`"
print(s)
def to_cody_str(s: str, is_gcc: bool) -> str:
if not is_gcc:
s = "`" + s + "`"
return s
def replace_rand(code: str) -> str:
# Replace .file with case.c
ex = re.compile(r"\t\.file\t(\".*\")")
m = ex.search(code)
if m:
res = m.group(1)
return code.replace(res, '"case.c"')
return code
def replace_file_name_IR(ir: str) -> str:
head = "; ModuleID = 'case.c'\n" + 'source_filename = "case.c"\n'
tail = ir.split("\n")[2:]
ir = head + "\n".join(tail)
return ir
def keep_only_main(code: str) -> str:
lines = list(code.split("\n"))
first = 0
for i, line in enumerate(lines):
if "main:" in line:
first = i
break
last = first + 1
ex = re.compile(".*.cfi_endproc")
for i, line in enumerate(lines[last:], start=last):
if ex.match(line):
last = i
break
return "\n".join(lines[first:last])
def prep_asm(asm: str, is_gcc: bool) -> str:
asm = replace_rand(asm)
asm = keep_only_main(asm)
asm = to_code(asm, is_gcc, "asm")
asm = to_collapsed(asm, is_gcc, summary="Reduced assembly")
return asm
def prep_IR(ir: str) -> str:
ir = replace_file_name_IR(ir)
ir = to_code(ir, False, "ll")
ir = to_collapsed(ir, False, summary="Emitted IR")
return ir
print(
f"Dead Code Elimination Regression at -O{bad_setting.opt_level} (trunk vs. {good_setting_tag.split('-')[-1]}) {args.case_id}"
)
print("---------------")
print(to_cody_str(f"cat case.c #{args.case_id}", is_gcc))
print(to_code(source, is_gcc, "c"))
print(
f"`{bad_setting_str}` can not eliminate `foo` but `{good_setting_str}` can.\n"
)
# Compile
if is_gcc:
case.bad_setting.add_flag("-emit-llvm")
good_setting.add_flag("-emit-llvm")
asm_bad = builder.get_asm_str(source, case.bad_setting, bldr)
asm_good = builder.get_asm_str(source, good_setting, bldr)
print_cody_str(f"{bad_setting_str} -S -o /dev/stdout case.c", is_gcc)
print(prep_asm(asm_bad, is_gcc))
print()
print_cody_str(f"{good_setting_str} -S -o /dev/stdout case.c", is_gcc)
print(prep_asm(asm_good, is_gcc))
print()
print(
"Bisects to: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h="
+ str(case.bisection)
)
print()
print("----- Build information -----")
print(f"----- {bad_setting_tag}")
print(
builder.get_verbose_compiler_info(bad_setting, bldr).split("lto-wrapper\n")[
-1
]
)
print(f"\n----- {good_setting_tag}")
print(
builder.get_verbose_compiler_info(good_setting, bldr).split(
"lto-wrapper\n"
)[-1]
)
else:
print("Target: `x86_64-unknown-linux-gnu`")
ir_bad = builder.get_llvm_IR(source, case.bad_setting, bldr)
ir_good = builder.get_llvm_IR(source, good_setting, bldr)
asm_bad = builder.get_asm_str(source, case.bad_setting, bldr)
asm_good = builder.get_asm_str(source, good_setting, bldr)
print("\n------------------------------------------------\n")
print_cody_str(
f"{bad_setting_str} [-emit-llvm] -S -o /dev/stdout case.c", is_gcc
)
print(prep_IR(ir_bad))
print()
print(prep_asm(asm_bad, is_gcc))
print()
print("\n------------------------------------------------\n")
print_cody_str(
f"{good_setting_str} [-emit-llvm] -S -o /dev/stdout case.c", is_gcc
)
print()
print(prep_IR(ir_good))
print()
print(prep_asm(asm_good, is_gcc))
print("\n------------------------------------------------\n")
print("### Bisection")
bisection_setting = copy.deepcopy(case.bad_setting)
bisection_setting.rev = cast(str, case.bisection)
print(f"Bisected to: {case.bisection}")
author = get_llvm_github_commit_author(cast(str, case.bisection))
if author:
print(f"Committed by: @{author}")
print("\n------------------------------------------------\n")
bisection_asm = builder.get_asm_str(source, bisection_setting, bldr)
bisection_ir = builder.get_llvm_IR(source, bisection_setting, bldr)
print(
to_cody_str(
f"{bisection_setting.report_string()} [-emit-llvm] -S -o /dev/stdout case.c",
is_gcc,
)
)
print(prep_IR(bisection_ir))
print()
print(prep_asm(bisection_asm, is_gcc))
print("\n------------------------------------------------\n")
prebisection_setting = copy.deepcopy(bisection_setting)
prebisection_setting.rev = bad_repo.rev_to_commit(f"{bisection_setting.rev}~")
print(f"Previous commit: {prebisection_setting.rev}")
print(
"\n"
+ to_cody_str(
f"{prebisection_setting.report_string()} [-emit-llvm] -S -o /dev/stdout case.c",
is_gcc,
)
)
prebisection_asm = builder.get_asm_str(source, prebisection_setting, bldr)
prebisection_ir = builder.get_llvm_IR(source, prebisection_setting, bldr)
print()
print(prep_IR(prebisection_ir))
print()
print(prep_asm(prebisection_asm, is_gcc))
with open("case.txt", "w") as f:
f.write(source)
print("Saved case.txt...", file=sys.stderr)
def _diagnose() -> None:
width = 50
def ok_fail(b: bool) -> str:
if b:
return "OK"
else:
return "FAIL"
def nice_print(name: str, value: str) -> None:
print(("{:.<" f"{width}}}").format(name), value)
if args.case_id:
case = ddb.get_case_from_id_or_die(args.case_id)
else:
case = utils.Case.from_file(config, Path(args.file))
repo = repository.Repo(
case.bad_setting.compiler_config.repo,
case.bad_setting.compiler_config.main_branch,
)
def sanitize_values(
config: utils.NestedNamespace,
case: utils.Case,
prefix: str,
chkr: checker.Checker,
) -> None:
empty_body_code = chkr._emtpy_marker_code_str(case)
with tempfile.NamedTemporaryFile(suffix=".c") as tf:
with open(tf.name, "w") as f:
f.write(empty_body_code)
res_comp_warnings = checker.check_compiler_warnings(
config.gcc.sane_version,
config.llvm.sane_version,
Path(tf.name),
case.bad_setting.get_flag_str(),
10,
)
nice_print(
prefix + "Sanity: compiler warnings",
ok_fail(res_comp_warnings),
)
res_use_ub_san = checker.use_ub_sanitizers(
config.llvm.sane_version,
Path(tf.name),
case.bad_setting.get_flag_str(),
10,
10,
)
nice_print(
prefix + "Sanity: undefined behaviour", ok_fail(res_use_ub_san)
)
res_ccomp = checker.verify_with_ccomp(
config.ccomp,
Path(tf.name),
case.bad_setting.get_flag_str(),
10,
)
nice_print(
prefix + "Sanity: ccomp",
ok_fail(res_ccomp),
)
def checks(case: utils.Case, prefix: str) -> None:
nice_print(
prefix + "Check marker", ok_fail(chkr.is_interesting_wrt_marker(case))
)
nice_print(prefix + "Check CCC", ok_fail(chkr.is_interesting_wrt_ccc(case)))
nice_print(
prefix + "Check static. annotated",
ok_fail(chkr.is_interesting_with_static_globals(case)),
)
res_empty = chkr.is_interesting_with_empty_marker_bodies(case)
nice_print(prefix + "Check empty bodies", ok_fail(res_empty))
if not res_empty:
sanitize_values(config, case, prefix, chkr)
print(("{:=^" f"{width}}}").format(" Values "))
nice_print("Marker", case.marker)
nice_print("Code lenght", str(len(case.code)))
nice_print("Bad Setting", str(case.bad_setting))
same_opt = [
gs for gs in case.good_settings if gs.opt_level == case.bad_setting.opt_level
]
nice_print(
"Newest Good Setting",
str(utils.get_latest_compiler_setting_from_list(repo, same_opt)),
)
checks(case, "")
cpy = copy.deepcopy(case)
if not (
code_pp := preprocessing.preprocess_csmith_code(
case.code, utils.get_marker_prefix(case.marker), case.bad_setting, bldr
)
):
print("Code could not be preprocessed. Skipping perprocessed checks")
else:
cpy.code = code_pp
checks(cpy, "PP: ")
if case.reduced_code:
cpy = copy.deepcopy(case)
cpy.code = case.reduced_code
checks(cpy, "Reduced: ")
if args.case_id:
massaged_code, _, _ = ddb.get_report_info_from_id(args.case_id)
if massaged_code:
cpy.code = massaged_code
checks(cpy, "Massaged: ")
if case.bisection:
cpy = copy.deepcopy(case)
nice_print("Bisection", case.bisection)
cpy.bad_setting.rev = case.bisection
prev_rev = repo.rev_to_commit(case.bisection + "~")
nice_print("Bisection prev commit", prev_rev)
bis_res_og = chkr.is_interesting(cpy, preprocess=False)
cpy.bad_setting.rev = prev_rev
bis_prev_res_og = chkr.is_interesting(cpy, preprocess=False)
nice_print(
"Bisection test original code", ok_fail(bis_res_og and not bis_prev_res_og)
)
cpy = copy.deepcopy(case)
if cpy.reduced_code:
cpy.code = cpy.reduced_code
cpy.bad_setting.rev = case.bisection
bis_res = chkr.is_interesting(cpy, preprocess=False)
cpy.bad_setting.rev = prev_rev
bis_prev_res = chkr.is_interesting(cpy, preprocess=False)
nice_print(
"Bisection test reduced code", ok_fail(bis_res and not bis_prev_res)
)
if case.reduced_code:
print(case.reduced_code)
def _check_reduced() -> None:
"""Check code against every good and bad setting of a case.
Args:
Returns:
None:
"""
def ok_fail(b: bool) -> str:
if b:
return "OK"
else:
return "FAIL"
def nice_print(name: str, value: str) -> None:
width = 100
print(("{:.<" f"{width}}}").format(name), value)
with open(args.code_path, "r") as f:
new_code = f.read()
case = ddb.get_case_from_id_or_die(args.case_id)
prefix = utils.get_marker_prefix(case.marker)
bad_alive = builder.find_alive_markers(new_code, case.bad_setting, prefix, bldr)
nice_print(f"Bad {case.bad_setting}", ok_fail(case.marker in bad_alive))
for gs in case.good_settings:
good_alive = builder.find_alive_markers(new_code, gs, prefix, bldr)
nice_print(f"Good {gs}", ok_fail(case.marker not in good_alive))
case.code = new_code
case.reduced_code = new_code
nice_print("Check", ok_fail(chkr.is_interesting(case, preprocess=False)))
# Useful when working with watch -n 0 to see that something happened
print(random.randint(0, 1000))
def _cache() -> None:
if args.what == "clean":
print("Cleaning...")
for c in Path(config.cachedir).iterdir():
if not (c / "DONE").exists():
try:
os.rmdir(c)
except FileNotFoundError:
print(c, "spooky. It just disappeared...")
except OSError:
print(c, "is not empty but also not done!")
print("Done")
elif args.what == "stats":
count_gcc = 0
count_clang = 0
for c in Path(config.cachedir).iterdir():
if c.name.startswith("clang"):
count_clang += 1
else:
count_gcc += 1
tot = count_gcc + count_clang
print("Amount compilers:", tot)
print("Amount clang: {} {:.2f}%".format(count_clang, count_clang / tot * 100))
print("Amount GCC: {} {:.2f}%".format(count_gcc, count_gcc / tot * 100))
def _asm() -> None:
def save_wrapper(name: str, content: str) -> None:
utils.save_to_file(Path(name + ".s"), content)
print(f"Saving {name + '.s'}...")
case = ddb.get_case_from_id_or_die(args.case_id)
bad_repo = repository.Repo(
case.bad_setting.compiler_config.repo,
case.bad_setting.compiler_config.main_branch,
)
same_opt = [
gs for gs in case.good_settings if gs.opt_level == case.bad_setting.opt_level
]
good_setting = utils.get_latest_compiler_setting_from_list(bad_repo, same_opt)
asmbad = builder.get_asm_str(case.code, case.bad_setting, bldr)
asmgood = builder.get_asm_str(case.code, good_setting, bldr)
save_wrapper("asmbad", asmbad)
save_wrapper("asmgood", asmgood)
if case.reduced_code:
reducedasmbad = builder.get_asm_str(case.reduced_code, case.bad_setting, bldr)
reducedasmgood = builder.get_asm_str(case.reduced_code, good_setting, bldr)
save_wrapper("reducedasmbad", reducedasmbad)
save_wrapper("reducedasmgood", reducedasmgood)
if case.bisection:
bisection_setting = copy.deepcopy(case.bad_setting)
bisection_setting.rev = case.bisection
asmbisect = builder.get_asm_str(case.code, bisection_setting, bldr)
save_wrapper("asmbisect", asmbisect)
if case.reduced_code:
reducedasmbisect = builder.get_asm_str(
case.reduced_code, bisection_setting, bldr
)
save_wrapper("reducedasmbisect", reducedasmbisect)
print(case.marker)
def _get() -> None:
# Why are you printing code with end=""?
case_id: int = int(args.case_id)
if args.what in ["ocode", "rcode", "bisection"]:
case = ddb.get_case_from_id_or_die(args.case_id)
if args.what == "ocode":
print(case.code, end="")
return
elif args.what == "rcode":
print(case.reduced_code, end="")
return
elif args.what == "bisection":
print(case.bisection, end="")
return
else:
mcode, link, fixed = ddb.get_report_info_from_id(case_id)
if args.what == "link":
print(link)
return
elif args.what == "fixed":
print(fixed)
return
elif args.what == "mcode":
print(mcode, end="")
return
logging.warning(
"Whoops, this should not have"
" happened because the parser forces "
"`what` to only allow some strings."
)
return
def _set() -> None:
case_id: int = int(args.case_id)
case = ddb.get_case_from_id_or_die(case_id)
mcode, link, fixed = ddb.get_report_info_from_id(case_id)
repo = repository.Repo(
case.bad_setting.compiler_config.repo,
case.bad_setting.compiler_config.main_branch,
)
if args.what == "ocode":
with open(args.var, "r") as f:
new_code = f.read()
case.code = new_code
if chkr.is_interesting(case):
ddb.update_case(case_id, case)
else:
logging.critical(
"The provided code is not interesting wrt to the case. Will not save!"
)
exit(1)
return
elif args.what == "rcode":
if args.var == "null":
print("Old reduced_code:")
print(case.reduced_code)
case.reduced_code = None
ddb.update_case(case_id, case)
return
with open(args.var, "r") as f:
rcode = f.read()
old_code = case.code
case.code = rcode
if chkr.is_interesting(case):
case.code = old_code
case.reduced_code = rcode
ddb.update_case(case_id, case)
else:
logging.critical(
"The provided code is not interesting wrt to the case. Will not save!"
)
exit(1)
return
elif args.what == "bisection":
if args.var == "null":
print("Old bisection:", case.bisection)
case.bisection = None
ddb.update_case(case_id, case)
return
# Also acts as check that the given rev is ok
rev = repo.rev_to_commit(args.var)
# Just in case someone accidentally overrides things...
logging.info(f"Previous bisection for case {case_id}: {case.bisection}")
case.bisection = rev
ddb.update_case(case_id, case)
return
elif args.what == "link":
if args.var == "null":
print("Old link:", link)
ddb.record_reported_case(case_id, mcode, None, fixed)
return
tmp: str = args.var
tmp = tmp.strip()
ddb.record_reported_case(case_id, mcode, tmp, fixed)
return
elif args.what == "fixed":
if args.var == "null":
print("Old fixed:", fixed)
ddb.record_reported_case(case_id, mcode, link, None)
return
rev = repo.rev_to_commit(args.var)
case.bad_setting.rev = rev
if not chkr.is_interesting(case):
ddb.record_reported_case(case_id, mcode, link, rev)
print("Fixed")
else:
logging.critical(f"Case {case_id} was not fixed by {args.var}! Not saving!")
exit(1)
return
elif args.what == "mcode":
if args.var == "null":
print("Old massaged code:")
print(mcode)
ddb.record_reported_case(case_id, None, link, fixed)
return
if not case.bisection:
logging.fatal(
"Can not save massaged code to a case that is not bisected. Bad things could happen. Stopping..."
)
exit(1)
with open(args.var, "r") as f:
new_mcode = f.read()
old_bisection = case.bisection
case.code = new_mcode
if chkr.is_interesting(case):
print("Checking bisection...")
if not bsctr.bisect_case(case, force=True):
logging.critical("Checking bisection failed...")
exit(1)
if case.bisection != old_bisection:
logging.critical(
"Bisection of provided massaged code does not match the original bisection!"
)
exit(1)
ddb.record_reported_case(case_id, new_mcode, link, fixed)
else:
logging.critical("The provided massaged code is not interesting!")
exit(1)
return
logging.warning(
"Whoops, this should not have"
" happened because the parser forces "
"`what` to only allow some strings."
)
return
def _build() -> None:
compiler_config = utils.get_compiler_config(config, args.project)
additional_patches: list[Path] = []
if args.add_patches:
additional_patches = [Path(patch).absolute() for patch in args.add_patches]
for rev in args.rev:
print(
bldr.build(
compiler_config,
rev,
additional_patches=additional_patches,
force=args.force,
)
)
def _reduce() -> None:
for i, case_id in enumerate(args.case_id):
print(f"Reducing {case_id}. Done {i}/{len(args.case_id)}", file=sys.stderr)
pre_case = ddb.get_case_from_id(case_id)
if not pre_case:
if len(args.case_id) == 1:
print(f"Case ID {case_id} is not known. Aborting...", file=sys.stderr)
exit(1)
else:
print(f"Case ID {case_id} is not known. Continuing...", file=sys.stderr)
continue
else:
case = pre_case
start_time = time.perf_counter()
if rdcr.reduce_case(case, force=args.force):
ddb.update_case(case_id, case)
reducer_time = time.perf_counter() - start_time
# If the reduction takes less than 5 seconds,
# we can assume that the reduction was already done
if reducer_time > 5.0:
gtime, gtc, b_time, b_steps, _ = ddb.get_timing_from_id(case_id)
ddb.record_timing(case_id, gtime, gtc, b_time, b_steps, reducer_time)
else:
print(f"{case_id} failed...", file=sys.stderr)
print("Done")
def _bisect() -> None:
for i, case_id in enumerate(args.case_id):
print(f"Bisecting {case_id}. Done {i}/{len(args.case_id)}", file=sys.stderr)
pre_case = ddb.get_case_from_id(case_id)
if not pre_case:
if len(args.case_id) == 1:
print(f"Case ID {case_id} is not known. Aborting...", file=sys.stderr)
exit(1)
else:
print(f"Case ID {case_id} is not known. Continuing...", file=sys.stderr)
continue
else:
case = pre_case
start_time = time.perf_counter()
if bsctr.bisect_case(case, force=args.force):
ddb.update_case(case_id, case)
bisector_time = time.perf_counter() - start_time
# if the bisection took less than 5 seconds
# we can assume that it was already bisected
if bisector_time > 5.0:
gtime, gtc, _, _, rtime = ddb.get_timing_from_id(case_id)
ddb.record_timing(
case_id, gtime, gtc, bisector_time, bsctr.steps, rtime
)
else:
print(f"{case_id} failed...", file=sys.stderr)
print("Done", file=sys.stderr)
def _edit() -> None:
if "EDITOR" not in os.environ:
print("Did not find EDITOR variable. Using nano...", file=sys.stderr)
subprocess.run(["nano", config.config_path])
else:
subprocess.run(os.environ["EDITOR"].split(" ") + [config.config_path])
def _unreported() -> None:
query = """
WITH exclude_bisections AS (
select distinct bisection from reported_cases join cases on cases.case_id = reported_cases.case_id
where fixed_by is not NULL
or bug_report_link is not NULL
)
"""
if args.good_version or args.OX_only:
query += f"""
,concrete_good AS (
select case_id from good_settings join compiler_setting on good_settings.compiler_setting_id = compiler_setting.compiler_setting_id
where 1
"""
if args.good_version:
gcc_repo = repository.Repo(config.gcc.repo, config.gcc.main_branch)
llvm_repo = repository.Repo(config.llvm.repo, config.llvm.main_branch)
try:
rev = gcc_repo.rev_to_commit(args.good_version)
except:
rev = llvm_repo.rev_to_commit(args.good_version)
query += f" and rev = '{rev}'"
query += ")"
query += """
select MIN(cases.case_id), bisection, count(bisection) as cnt from cases
join compiler_setting on cases.bad_setting_id = compiler_setting.compiler_setting_id
"""
if args.good_version:
query += "\njoin concrete_good on cases.case_id = concrete_good.case_id\n"
if args.reduced or args.not_reduced:
query += "\nleft join reported_cases on cases.case_id = reported_cases.case_id"
query += """
where bisection not in exclude_bisections
"""
if args.clang_only:
query += "\nand compiler = 'clang'"
elif args.gcc_only:
query += "\nand compiler = 'gcc'"
if args.OX_only:
query += f" and opt_level = '{args.OX_only}'"
query += "\ngroup by bisection"
if args.reduced:
query += "\n having reduced_code_sha1 is not null "
elif args.not_reduced:
query += "\n having reduced_code_sha1 is null "
query += "\norder by cnt desc"
res = ddb.con.execute(query).fetchall()
if not res:
return
if res[-1][1] is None:
res = res[:-1]
if args.id_only:
for case_id, _, _ in res:
print(case_id)
else:
print("{: <8} {: <45} {}".format("ID", "Bisection", "Count"))
print("{:-<64}".format(""))
for case_id, bisection, count in res:
print("{: <8} {: <45} {}".format(case_id, bisection, count))
print("{:-<64}".format(""))
print("{: <8} {: <45} {}".format("ID", "Bisection", "Count"))
def _reported() -> None:
query = """
with rep as (
select cases.case_id, bisection, bug_report_link, compiler from cases
join compiler_setting on bad_setting_id = compiler_setting_id
left join reported_cases on cases.case_id = reported_cases.case_id
where bug_report_link is not null order by cases.case_id
)
select rep.case_id, bisection, bug_report_link
"""
if args.good_settings:
query += """, compiler_setting.compiler, compiler_setting.rev, compiler_setting.opt_level
from rep
left join good_settings on rep.case_id = good_settings.case_id
left join compiler_setting on good_settings.compiler_setting_id = compiler_setting.compiler_setting_id
"""
else:
query += " from rep"
query += " where 1 "
if args.clang_only or args.llvm_only:
query += " and compiler = 'clang'"
elif args.gcc_only:
query += " and compiler = 'gcc'"
query += " order by rep.case_id"
if not (res := ddb.con.execute(query).fetchall()):
return
if args.id_only:
for case_id, _, _ in res:
print(case_id)
elif args.good_settings:
gcc_repo = repository.Repo(config.gcc.repo, config.gcc.main_branch)
llvm_repo = repository.Repo(config.llvm.repo, config.llvm.main_branch)
print(
"{: <8} {: <45} {: <45} {}".format(
"ID", "Bisection", "Good Settings", "Link"
)
)
last_case_id = -1
for case_id, bisection, link, name, rev, opt_level in res:
if name == "gcc":
maybe_tag = gcc_repo.rev_to_tag(rev)
else:
maybe_tag = llvm_repo.rev_to_tag(rev)
nice_rev = maybe_tag if maybe_tag else rev
comp_str = f"{name}-{nice_rev} -O{opt_level}"
if last_case_id != case_id:
last_case_id = case_id
print("{:-<155}".format(""))
print(
"{: <8} {: <45} {: <45} {}".format(
case_id, bisection, comp_str, link
)
)
else:
print("{: <8} {: <45} {: <45} {}".format("", "", comp_str, ""))
print("{:-<155}".format(""))
print(
"{: <8} {: <45} {: <45} {}".format(
"ID", "Bisection", "Good Settings", "Link"
)
)
else:
print("{: <8} {: <45} {}".format("ID", "Bisection", "Link"))
print("{:-<110}".format(""))
for case_id, bisection, link in res:
print("{: <8} {: <45} {}".format(case_id, bisection, link))
print("{:-<110}".format(""))
print("{: <8} {: <45} {}".format("ID", "Bisection", "Link"))
def _findby() -> None:
if args.what == "link":
link_query = "SELECT case_id FROM reported_cases WHERE bug_report_link = ?"
res = ddb.con.execute(link_query, (args.var.strip(),)).fetchall()
for r in res:
print(r[0])
return
elif args.what == "fixed":
query = "SELECT case_id FROM reported_cases WHERE fixed_by = ?"
res = ddb.con.execute(query, (args.var.strip(),)).fetchall()
for r in res:
print(r[0])
return
elif args.what == "case":
case = utils.Case.from_file(config, Path(args.var))
code_sha1 = hashlib.sha1(case.code.encode("utf-8")).hexdigest()
# Try if we have any luck with just using code
code_query = "SELECT cases.case_id FROM cases LEFT OUTER JOIN reported_cases ON cases.case_id = reported_cases.case_id WHERE code_sha1 = ? OR reduced_code_sha1 = ? OR massaged_code_sha1 = ?"
res_ocode = ddb.con.execute(
code_query, (code_sha1, code_sha1, code_sha1)
).fetchall()
possible = set([i[0] for i in res_ocode])
if case.reduced_code:
rcode_sha1 = hashlib.sha1(case.reduced_code.encode("utf-8")).hexdigest()
res_ocode = ddb.con.execute(
code_query, (rcode_sha1, rcode_sha1, rcode_sha1)
).fetchall()
possible.update([i[0] for i in res_ocode])
if case.bisection:
other = ddb.con.execute(
"SELECT case_id FROM cases WHERE marker = ? AND bisection = ?",
(case.marker, case.bisection),
).fetchall()
else:
other = ddb.con.execute(
"SELECT case_id FROM cases WHERE marker = ?", (case.marker)
).fetchall()
if len(possible) > 0:
possible = possible.intersection([i[0] for i in other])
else:
possible = set([i[0] for i in other])
for i in possible:
print(i)
return
elif args.what == "code":
with open(args.var, "r") as f:
code = f.read()
code_sha1 = hashlib.sha1(code.encode("utf-8")).hexdigest()
res = ddb.con.execute(
"SELECT cases.case_id FROM cases LEFT OUTER JOIN reported_cases ON cases.case_id = reported_cases.case_id WHERE code_sha1 = ? OR reduced_code_sha1 = ? OR massaged_code_sha1 = ?",
(code_sha1, code_sha1, code_sha1),
).fetchall()
for i in res:
print(i[0])
return
return
if __name__ == "__main__":
config, args = utils.get_config_and_parser(parsers.main_parser())
patchdb = patchdatabase.PatchDB(config.patchdb)
bldr = builder.Builder(config, patchdb, args.cores)
chkr = checker.Checker(config, bldr)
gnrtr = generator.CSmithCaseGenerator(config, patchdb, args.cores)
rdcr = reducer.Reducer(config, bldr)
bsctr = bisector.Bisector(config, bldr, chkr)
ddb = database.CaseDatabase(config, config.casedb)
if args.sub == "run":
_run()
elif args.sub == "get":
_get()
elif args.sub == "set":
_set()
elif args.sub == "absorb":
_absorb()
elif args.sub == "tofile":
_tofile()
elif args.sub == "rereduce":
_rereduce()
elif args.sub == "report":
_report()
elif args.sub == "diagnose":
if not args.case_id and not args.file:
print("Need a file or a case id to work with", file=sys.stderr)
_diagnose()
elif args.sub == "checkreduced":
_check_reduced()
elif args.sub == "cache":
_cache()
elif args.sub == "asm":
_asm()
elif args.sub == "build":
_build()
elif args.sub == "reduce":
_reduce()
elif args.sub == "bisect":
_bisect()
elif args.sub == "edit":
_edit()
elif args.sub == "unreported":
_unreported()
elif args.sub == "reported":
_reported()
elif args.sub == "findby":
_findby()
elif args.sub == "init":
init.main()
gnrtr.terminate_processes()
|
StarcoderdataPython
|
9739942
|
<reponame>pagreene/grip
from __future__ import absolute_import, print_function, unicode_literals
import os
import sys
import imp
from glob import glob
import traceback
BASE = os.path.dirname(os.path.abspath(__file__))
TESTS = os.path.join(BASE, "tests")
GRIPQL = os.path.join(os.path.dirname(BASE), "gripql", "python")
GRAPH = "test_graph"
sys.path.append(GRIPQL)
import gripql # noqa: E402
if __name__ == "__main__":
server = sys.argv[1]
if len(sys.argv) > 2:
tests = sys.argv[2:]
else:
tests = []
conn = gripql.Connection(server)
if GRAPH in conn.listGraphs():
print(list(conn.graph(GRAPH).query().V().count())[0])
if int(list(conn.graph(GRAPH).query().V().count())[0]['count']) != 0:
print("Need to start with empty DB: %s" % (GRAPH))
sys.exit()
correct = 0
total = 0
for a in glob(os.path.join(TESTS, "ot_*.py")):
name = os.path.basename(a)[:-3]
if len(tests) == 0 or name[3:] in tests:
mod = imp.load_source('test.%s' % name, a)
for f in dir(mod):
if f.startswith("test_"):
func = getattr(mod, f)
if callable(func):
try:
print("Running: %s %s " % (name, f[5:]))
conn.addGraph(GRAPH)
e = func(conn.graph(GRAPH))
if len(e) == 0:
correct += 1
print("Passed: %s %s " % (name, f[5:]))
else:
print("Failed: %s %s " % (name, f[5:]))
for i in e:
print("\t- %s" % (i))
except Exception as e:
print("Crashed: %s %s %s" % (name, f[5:], e))
traceback.print_exc()
total += 1
conn.deleteGraph(GRAPH)
print("Passed %s out of %s" % (correct, total))
if correct != total:
sys.exit(1)
|
StarcoderdataPython
|
1688723
|
<reponame>multirotorsociety/SAFMC-19-D2-Autonomous-Drone
from picamera.array import PiRGBArray
from picamera import PiCamera
import cv2
import time
import numpy as np
import imutils
from PIL import Image
def image_convert_to_perc_green(img):
b_channel = np.array(img[:,:,0]).astype('float')
g_channel = np.array(img[:,:,1]).astype('float')
r_channel = np.array(img[:,:,2]).astype('float')
bgr_channel = np.add((np.add(b_channel, g_channel)), r_channel)
img_rec_green = np.divide(g_channel, bgr_channel)
img_rec_green = img_rec_green * 255
img_rec_green = np.floor(img_rec_green).astype('uint8')
return img_rec_green
def image_grab_green_channel(img):
g = img.copy()
# set blue and red channels to 0
g[:, :, 0] = 0
g[:, :, 2] = 0
g = cv2.cvtColor(g, cv2.COLOR_BGR2GRAY)
return g
def image_grab_green_hsv(img):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, (36, 25, 25), (70, 255,255))
imask = mask>0
green = np.zeros_like(img, np.uint8)
green[imask] = img[imask]
green = cv2.cvtColor(green, cv2.COLOR_BGR2GRAY)
return green
# initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
camera.resolution = (426, 240)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size=(426, 240))
# allow the camera to warmup
time.sleep(0.1)
# capture frames from the camera
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
# grab the raw NumPy array representing the image, then initialize the timestamp
# and occupied/unoccupied text
image = frame.array
height, width = image.shape[:2]
centre = (int(width/2), int(height/2))
output = image.copy()
cv2.circle(output, centre, 3, (255, 255, 255), -1)
#image = image_grab_green(image)
#image = image_convert_to_perc_green(image)
image = image_grab_green_hsv(image)
#gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(image, (27, 27), 0)
thresh = cv2.threshold(blurred, 75, 255, cv2.THRESH_BINARY)[1]
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
# loop over the contours
try:
#for c in cnts:
# compute the center of the contour
c = cnts[0]
M = cv2.moments(c)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
# draw the contour and center of the shape on the image
cv2.drawContours(output, [c], -1, (0, 255, 0), 2)
cv2.circle(output, (cX, cY), 3, (255, 255, 255), -1)
cv2.putText(output, "center", (cX - 20, cY - 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
cv2.line(output, centre, (cX, cY), (255,0,0), 2)
dX = cX - centre[0]
dY = centre[1] - cY
cv2.putText(output, ("(" + str(dX) + ", " + str(dY) + " )"), (centre[0] - 20, centre[1] - 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
except:
pass
# show the frame
cv2.imshow("thresh", thresh)
cv2.imshow("preview", output)
#img2 = Image.fromarray(frame, 'RGB')
#img2.show()
key = cv2.waitKey(1) & 0xFF
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
cv2.destroyAllWindows()
cap.release()
|
StarcoderdataPython
|
8153068
|
import requests
from bs4 import BeautifulSoup
import sys
import datetime
number_of_listings = 5
simple = True
# Args
if (len(sys.argv) < 4):
print("Usage : python strava.py jmeno heslo jidelna")
sys.exit(1)
# Start the session
session = requests.Session()
# Create the payload
payload = {'uzivatel' : sys.argv[1],
'heslo' : sys.argv[2],
'zarizeni' : sys.argv[3]
}
# Post the payload to the site to log in
s = session.post("https://www.strava.cz/Strava/Stravnik/prihlaseni", data=payload)
# Navigate to the next page and scrape the data
s = session.get('https://www.strava.cz/Strava/Stravnik/Objednavky')
#Parse
soup = BeautifulSoup(s.text, 'html.parser')
res = soup.find_all(class_="objednavka-obalka objednavka-obalka-jednotne")
def display_simple():
# For the first `number_of_listings` listings
for x in res[:number_of_listings]:
day = x.find("div").find("div").text.split('\n')[1].split('\r')[0].strip()
# Only today
if(int(day.split(' ')[2].strip()[::2]) == int(datetime.datetime.now().strftime("%m")) and int(day.split(' ')[1].strip()[:-1]) == int(datetime.datetime.now().strftime("%d"))):
pass
else:
continue
# Find all the foods
foods = x.find_all(class_="objednavka-jidla-obalka")[0].find_all(class_="objednavka-jidlo-obalka")
for food in foods:
# Find the values
food_name = food.find(class_="objednavka-jidlo-nazev").text
food_type = food.find(class_="objednavka-jidlo-popis").text
food_value = food.find(class_="objednavka-jidlo-zmena").contents[1].contents[3].attrs["value"]
# Remove this if you need to
# This just removes the soup entry
if(food_type == "Polévka"):
continue
# Turn the value from text to markdown-like text
if food_value == "zaskrtnuto":
print((food_name).strip())
def display_table():
# For the first `number_of_listings` listings
for x in res[:number_of_listings]:
# Get the day and print
day = x.find("div").find("div").text.split('\n')[1].split('\r')[0].lstrip()
print(day)
# Find all the foods
foods = x.find_all(class_="objednavka-jidla-obalka")[0].find_all(class_="objednavka-jidlo-obalka")
for food in foods:
# Find the values
food_name = food.find(class_="objednavka-jidlo-nazev").text
food_type = food.find(class_="objednavka-jidlo-popis").text
food_value = food.find(class_="objednavka-jidlo-zmena").contents[1].contents[3].attrs["value"]
# Remove this if you need to
# This just removes the soup entry
if(food_type == "Polévka"):
continue
# Turn the value from text to markdown-like text
if food_value == "zaskrtnuto":
food_value = "[x]"
elif food_value == "nezaskrtnuto":
food_value = "[ ]"
else:
food_value = "[-]"
# Strip in case of leading/trailing spaces and print
print((food_value + " " + food_type + " - " + food_name).lstrip().rstrip())
# Empty line for cleanness
print()
if(simple):
display_simple()
else:
display_table()
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.