id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
3338726 | <reponame>vedaldi/dynamic-video-depth
import os
import torch.multiprocessing as mp
from .scripts.preprocess.universal import preprocess
from . import train
import numpy as np
from skimage.transform import resize as imresize
gaps = [1, 2, 4, 6, 8]
def load_index(out_dir):
index_path = os.path.join(out_dir, "preproc", "index.npz")
return np.load(index_path)
def load_frame(out_dir, fid, index=None, dry_run=False):
if index is None:
index = load_index(out_dir)
depth_dir = os.path.join(
out_dir,
"testscene_flow_motion_field_universal_sequence_default",
"epoch0020_test",
)
H = index["height"]
W = index["width"]
scale = index["scale"]
i = np.nonzero(index["fids"] == fid)[0].item()
batch_path = os.path.join(depth_dir, f"batch{i:04d}.npz")
if not dry_run:
batch = np.load(batch_path)
x = batch["depth"].squeeze(0)
x = np.transpose(x, (1, 2, 0))
x = imresize(x, (H, W), preserve_range=True).astype(np.float32)
x = np.transpose(x, (2, 0, 1)) / scale
else:
if not os.path.exists(batch_path):
raise FileNotFoundError()
x = True
return x
def load(out_dir, dry_run=False):
try:
depth = []
index = load_index(out_dir)
for i, fid in enumerate(index["fids"]):
depth.append(load_frame(out_dir, fid, index=index, dry_run=dry_run))
if not dry_run:
depth = np.concatenate(depth)
return {"depth": depth, "fids": index["fids"]}
except FileNotFoundError:
if dry_run:
return None
raise
def run(
dataloader,
out_dir,
resume=False,
rescale_depth_using_masked_region=False,
use_motion_mask=False,
):
preproc_dir = os.path.join(out_dir, "preproc")
checkpoint_dir = os.path.join(out_dir, "checkpoints", "0")
test_script_path = os.path.join(
os.path.dirname(__file__), "experiments/universal/test_cmd.txt"
)
preprocess(
dataloader,
preproc_dir,
gaps=gaps,
rescale_depth_using_masked_region=rescale_depth_using_masked_region,
resume=resume,
)
# fmt: off
args = [
"--net", "scene_flow_motion_field",
"--dataset", "universal_sequence",
"--data_dir", preproc_dir,
"--log_time",
"--epoch_batches", "2000",
"--epoch", "20",
"--lr", "1e-6",
"--html_logger",
"--vali_batches", "150",
"--batch_size", "1",
"--optim", "adam",
"--vis_batches_vali", "4",
"--vis_every_vali", "1",
"--vis_every_train", "1",
"--vis_batches_train", "5",
"--vis_at_start",
"--tensorboard",
"--gpu", "0",
"--save_net", "1",
"--workers", "4",
"--one_way",
"--loss_type", "l1",
"--l1_mul", "0",
"--acc_mul", "1",
"--disp_mul", "1",
"--warm_sf", "5",
"--scene_lr_mul", "1000",
"--repeat", "1",
"--flow_mul", "1",
"--sf_mag_div", "100",
"--time_dependent",
"--gaps", ','.join([str(gap) for gap in gaps]),
"--midas",
"--use_disp",
"--full_logdir", checkpoint_dir,
"--test_template", test_script_path,
"--resume", "-1" if resume else "0", # resume from the last epoch
"--force_overwrite",
]
# fmt: on
if use_motion_mask:
args.append("--use_motion_seg")
# mp.set_start_method('spawn', force=True)
train.main(args=args)
| StarcoderdataPython |
41265 | <filename>package/niflow/ants/brainextraction/__init__.py<gh_stars>0
from .__about__ import __version__
from .workflows.brainextraction import init_brain_extraction_wf
| StarcoderdataPython |
3295876 | # -*- coding: utf-8 -*-
"""Top-level package for YooKassa API Python Client Library."""
from yookassa.configuration import Configuration
from yookassa.payment import Payment
from yookassa.receipt import Receipt
from yookassa.refund import Refund
from yookassa.settings import Settings
from yookassa.webhook import Webhook
__author__ = "YooMoney"
__email__ = '<EMAIL>'
__version__ = '2.1.2'
| StarcoderdataPython |
1719316 | import os
from pathlib import Path
import cv2, imutils
import time
import numpy as np
import pyshine as ps
import argparse
import subprocess
from skimage.metrics import structural_similarity as compare_ssim
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QFileDialog
from PyQt5.QtGui import QImage
import ocr
import setROI
captureDevice = 1
saveDir = 'D:/IMG/cvCapture'
w_frame = 1024
h_frame = 576
diff_target = 0.9
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(w_frame*1.25, h_frame*1.25)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout_2 = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout_2.setObjectName("gridLayout_2")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setText("")
self.label.setPixmap(QtGui.QPixmap("images/H.png"))
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.verticalSlider = QtWidgets.QSlider(self.centralwidget)
self.verticalSlider.setOrientation(QtCore.Qt.Vertical)
self.verticalSlider.setObjectName("verticalSlider")
self.gridLayout.addWidget(self.verticalSlider, 0, 0, 1, 1)
self.verticalSlider_2 = QtWidgets.QSlider(self.centralwidget)
self.verticalSlider_2.setOrientation(QtCore.Qt.Vertical)
self.verticalSlider_2.setObjectName("verticalSlider_2")
self.gridLayout.addWidget(self.verticalSlider_2, 0, 1, 1, 1)
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 1, 0, 1, 1)
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 1, 1, 1, 1)
self.horizontalLayout.addLayout(self.gridLayout)
self.gridLayout_2.addLayout(self.horizontalLayout, 0, 0, 1, 2)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setObjectName("pushButton")
self.horizontalLayout_2.addWidget(self.pushButton)
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_3.setObjectName("pushButton_3")
self.horizontalLayout_2.addWidget(self.pushButton_3)
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setObjectName("pushButton_2")
self.horizontalLayout_2.addWidget(self.pushButton_2)
self.gridLayout_2.addLayout(self.horizontalLayout_2, 1, 0, 1, 1)
spacerItem = QtWidgets.QSpacerItem(313, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem, 1, 1, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.verticalSlider.valueChanged['int'].connect(self.brightness_value)
self.verticalSlider_2.valueChanged['int'].connect(self.blur_value)
self.pushButton_2.clicked.connect(self.loadImage)
self.pushButton.clicked.connect(self.setRoiPart)
self.pushButton_3.clicked.connect(self.setRoiZone)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
# Added code here
self.filename = 'Snapshot '+str(time.strftime("%Y-%b-%d at %H.%M.%S %p"))+'.png' # Will hold the image address location
self.tmp = None # Will hold the temporary image for display
self.tmpPrevious = None #
self.brightness_value_now = 0 # Updated brightness value
self.blur_value_now = 0 # Updated blur value
self.fps=0
self.started = False
self.grayCurrent = 0
self.grayPrevious = 0
self.ocrText = ''
def loadImage(self):
if self.started:
self.started=False
self.pushButton_2.setText('Start')
else:
self.started=True
self.pushButton_2.setText('Stop')
cam = True # True for webcam
if cam:
vid = cv2.VideoCapture(captureDevice)
else:
vid = cv2.VideoCapture('video.mp4')
cnt=0
frames_to_count=20
st = 0
fps=0
while(vid.isOpened()):
img, self.image = vid.read()
self.image = imutils.resize(self.image ,height = 1080 )
if cnt == frames_to_count:
try: # To avoid divide by 0 we put it in try except
print(frames_to_count/(time.time()-st),'FPS')
self.fps = round(frames_to_count/(time.time()-st))
st = time.time()
cnt=0
except:
pass
cnt+=1
self.checkDiff(self.image)
self.update()
key = cv2.waitKey(1) & 0xFF
if self.started==False:
break
print('Loop break')
def setPhoto(self,image):
self.tmp = image
image = imutils.resize(image,width=w_frame)
frame = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = QImage(frame, frame.shape[1],frame.shape[0],frame.strides[0],QImage.Format_RGB888)
self.label.setPixmap(QtGui.QPixmap.fromImage(image))
def brightness_value(self,value):
self.brightness_value_now = value
print('Brightness: ',value)
self.update()
def blur_value(self,value):
self.blur_value_now = value
print('Blur: ',value)
self.update()
def changeBrightness(self,img,value):
hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
h,s,v = cv2.split(hsv)
lim = 255 - value
v[v>lim] = 255
v[v<=lim] += value
final_hsv = cv2.merge((h,s,v))
img = cv2.cvtColor(final_hsv,cv2.COLOR_HSV2BGR)
return img
def changeBlur(self,img,value):
kernel_size = (value+1,value+1) # +1 is to avoid 0
img = cv2.blur(img,kernel_size)
return img
def update(self):
img = self.changeBrightness(self.image,self.brightness_value_now)
img = self.changeBlur(img,self.blur_value_now)
self.setPhoto(img)
def checkDiff(self,_img):
gray = cv2.cvtColor(_img, cv2.COLOR_BGR2GRAY)
img = _img
if self.tmpPrevious is not None:
diff_frame = gray - self.tmpPrevious
diff_frame -= diff_frame.min()
#disp_frame = np.uint8(255.0*diff_frame/float(diff_frame.max()))
(score, diff) = compare_ssim(gray, self.tmpPrevious, full=True)
diff = (diff * 255).astype("uint8")
#print("SSIM: {}%".format(score*100))
_text ='{0:.2f}'.format(score)
text = f'Diff: {_text} / {diff_target}'
if score < diff_target:
self.savePhoto()
img = ps.putBText(_img,text,text_offset_x=250,text_offset_y=100,vspace=20,hspace=10, font_scale=1.0,background_RGB=(222,20,10),text_RGB=(255,255,255))
else:
img = ps.putBText(_img,text,text_offset_x=250,text_offset_y=100,vspace=20,hspace=10, font_scale=1.0,background_RGB=(10,20,222),text_RGB=(255,255,255))
else:
self.tmpPrevious = gray
self.tmpPrevious = gray
self.setPhoto(img)
def savePhoto(self):
""" This function will save the image """
_image,Part,Zone = ocr.main(self.image)
self.filename = Part+'_'+Zone+'_'+str(time.strftime("%Y-%b-%d_%H_%M_%S"))+'.jpg'
cv2.imwrite(os.path.join(saveDir, self.filename),_image,[int(cv2.IMWRITE_JPEG_QUALITY), 100])
print('Image saved as:',self.filename)
def setRoiPart(self):
if self.started:
self.started=False
self.pushButton_2.setText('ROI Set')
cv2.imwrite('_T.jpg',self.tmp,[int(cv2.IMWRITE_JPEG_QUALITY), 100])
self.started=setROI.main(0)
self.pushButton_2.setText('Stop')
print('Image saved as:',self.filename)
else:
self.started=True
self.pushButton_2.setText('Stop')
def setRoiZone(self):
if self.started:
self.started=False
self.pushButton_2.setText('ROI Set')
cv2.imwrite('_T.jpg',self.tmp,[int(cv2.IMWRITE_JPEG_QUALITY), 100])
self.started=setROI.main(1)
self.pushButton_2.setText('Stop')
print('Image saved as:',self.filename)
else:
self.started=True
self.pushButton_2.setText('Stop')
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "PyShine video process"))
self.pushButton_2.setText(_translate("MainWindow", "Start"))
self.label_2.setText(_translate("MainWindow", "Brightness"))
self.label_3.setText(_translate("MainWindow", "Blur"))
self.pushButton.setText(_translate("MainWindow", "Set ROI Part"))
self.pushButton_3.setText(_translate("MainWindow", "Set ROI Zone"))
if __name__ == "__main__":
import sys
Path(saveDir).mkdir(parents=True, exist_ok=True)
Path(saveDir+"/ocr").mkdir(parents=True, exist_ok=True)
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| StarcoderdataPython |
14413 | <filename>django-openstack/django_openstack/syspanel/views/instances.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Fourth Paradigm Development, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import template
from django import http
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response
from django.utils.translation import ugettext as _
import datetime
import logging
from django.contrib import messages
from django_openstack import api
from django_openstack import forms
from django_openstack.dash.views import instances as dash_instances
from openstackx.api import exceptions as api_exceptions
TerminateInstance = dash_instances.TerminateInstance
RebootInstance = dash_instances.RebootInstance
LOG = logging.getLogger('django_openstack.syspanel.views.instances')
def _next_month(date_start):
y = date_start.year + (date_start.month + 1)/13
m = ((date_start.month + 1)%13)
if m == 0:
m = 1
return datetime.date(y, m, 1)
def _current_month():
today = datetime.date.today()
return datetime.date(today.year, today.month,1)
def _get_start_and_end_date(request):
try:
date_start = datetime.date(int(request.GET['date_year']), int(request.GET['date_month']), 1)
except:
today = datetime.date.today()
date_start = datetime.date(today.year, today.month,1)
date_end = _next_month(date_start)
datetime_start = datetime.datetime.combine(date_start, datetime.time())
datetime_end = datetime.datetime.combine(date_end, datetime.time())
if date_end > datetime.date.today():
datetime_end = datetime.datetime.utcnow()
return (date_start, date_end, datetime_start, datetime_end)
@login_required
def usage(request):
(date_start, date_end, datetime_start, datetime_end) = _get_start_and_end_date(request)
service_list = []
usage_list = []
max_vcpus = max_gigabytes = 0
total_ram = 0
if date_start > _current_month():
messages.error(request, 'No data for the selected period')
date_end = date_start
datetime_end = datetime_start
else:
try:
service_list = api.service_list(request)
except api_exceptions.ApiException, e:
LOG.error('ApiException fetching service list in instance usage',
exc_info=True)
messages.error(request,
'Unable to get service info: %s' % e.message)
for service in service_list:
if service.type == 'nova-compute':
max_vcpus += service.stats['max_vcpus']
max_gigabytes += service.stats['max_gigabytes']
total_ram += settings.COMPUTE_HOST_RAM_GB
try:
usage_list = api.usage_list(request, datetime_start, datetime_end)
except api_exceptions.ApiException, e:
LOG.error('ApiException fetching usage list in instance usage'
' on date range "%s to %s"' % (datetime_start,
datetime_end),
exc_info=True)
messages.error(request, 'Unable to get usage info: %s' % e.message)
dateform = forms.DateForm()
dateform['date'].field.initial = date_start
global_summary = {'max_vcpus': max_vcpus, 'max_gigabytes': max_gigabytes,
'total_active_disk_size': 0, 'total_active_vcpus': 0,
'total_active_ram_size': 0}
for usage in usage_list:
# FIXME: api needs a simpler dict interface (with iteration) - anthony
# NOTE(mgius): Changed this on the api end. Not too much neater, but
# at least its not going into private member data of an external
# class anymore
#usage = usage._info
for k in usage._attrs:
v = usage.__getattr__(k)
if type(v) in [float, int]:
if not k in global_summary:
global_summary[k] = 0
global_summary[k] += v
max_disk_tb = used_disk_tb = available_disk_tb = 0
max_disk_tb = global_summary['max_gigabytes'] / float(1000)
used_disk_tb = global_summary['total_active_disk_size'] / float(1000)
available_disk_tb = (global_summary['max_gigabytes'] / float(1000) - \
global_summary['total_active_disk_size'] / float(1000))
used_ram = global_summary['total_active_ram_size'] / float(1024)
avail_ram = total_ram - used_ram
ram_unit = "GB"
if total_ram > 999:
ram_unit = "TB"
total_ram /= float(1024)
used_ram /= float(1024)
avail_ram /= float(1024)
return render_to_response(
'syspanel_usage.html',{
'dateform': dateform,
'usage_list': usage_list,
'global_summary': global_summary,
'available_cores': global_summary['max_vcpus'] - global_summary['total_active_vcpus'],
'available_disk': global_summary['max_gigabytes'] - global_summary['total_active_disk_size'],
'max_disk_tb': max_disk_tb,
'used_disk_tb': used_disk_tb,
'available_disk_tb': available_disk_tb,
'total_ram': total_ram,
'used_ram': used_ram,
'avail_ram': avail_ram,
'ram_unit': ram_unit,
'external_links': settings.EXTERNAL_MONITORING,
}, context_instance = template.RequestContext(request))
@login_required
def tenant_usage(request, tenant_id):
(date_start, date_end, datetime_start, datetime_end) = _get_start_and_end_date(request)
if date_start > _current_month():
messages.error(request, 'No data for the selected period')
date_end = date_start
datetime_end = datetime_start
dateform = forms.DateForm()
dateform['date'].field.initial = date_start
usage = {}
try:
usage = api.usage_get(request, tenant_id, datetime_start, datetime_end)
except api_exceptions.ApiException, e:
LOG.error('ApiException getting usage info for tenant "%s"'
' on date range "%s to %s"' % (tenant_id,
datetime_start,
datetime_end))
messages.error(request, 'Unable to get usage info: %s' % e.message)
running_instances = []
terminated_instances = []
if hasattr(usage, 'instances'):
now = datetime.datetime.now()
for i in usage.instances:
# this is just a way to phrase uptime in a way that is compatible
# with the 'timesince' filter. Use of local time intentional
i['uptime_at'] = now - datetime.timedelta(seconds=i['uptime'])
if i['ended_at']:
terminated_instances.append(i)
else:
running_instances.append(i)
return render_to_response('syspanel_tenant_usage.html', {
'dateform': dateform,
'usage': usage,
'instances': running_instances + terminated_instances,
'tenant_id': tenant_id,
}, context_instance = template.RequestContext(request))
@login_required
def index(request):
for f in (TerminateInstance, RebootInstance):
_, handled = f.maybe_handle(request)
if handled:
return handled
instances = []
try:
instances = api.server_list(request)
except Exception as e:
LOG.error('Unspecified error in instance index', exc_info=True)
messages.error(request, 'Unable to get instance list: %s' % e.message)
# We don't have any way of showing errors for these, so don't bother
# trying to reuse the forms from above
terminate_form = TerminateInstance()
reboot_form = RebootInstance()
return render_to_response('syspanel_instances.html', {
'instances': instances,
'terminate_form': terminate_form,
'reboot_form': reboot_form,
}, context_instance=template.RequestContext(request))
@login_required
def refresh(request):
for f in (TerminateInstance, RebootInstance):
_, handled = f.maybe_handle(request)
if handled:
return handled
instances = []
try:
instances = api.server_list(request)
except Exception as e:
messages.error(request, 'Unable to get instance list: %s' % e.message)
# We don't have any way of showing errors for these, so don't bother
# trying to reuse the forms from above
terminate_form = TerminateInstance()
reboot_form = RebootInstance()
return render_to_response('_syspanel_instance_list.html', {
'instances': instances,
'terminate_form': terminate_form,
'reboot_form': reboot_form,
}, context_instance=template.RequestContext(request))
| StarcoderdataPython |
3357626 | #!flask/bin/python
from app import app
import os
app.run(host=os.getenv('IP', '0.0.0.0'),port=int(os.getenv('PORT', 8080)),debug=True) | StarcoderdataPython |
1718550 | @jit
def fast_auc(y_true, y_prob):
y_true = np.asarray(y_true)
y_true = y_true[np.argsort(y_prob)]
nfalse = 0
auc = 0
n = len(y_true)
for i in range(n):
y_i = y_true[i]
nfalse += (1 - y_i)
auc += y_i * nfalse
auc /= (nfalse * (n - nfalse))
return auc
def eval_auc(preds, dtrain):
labels = dtrain.get_label()
return 'auc', fast_auc(labels, preds), True | StarcoderdataPython |
1696231 | Huffman ( [a1,f1],[a2
,f2],…,[an,fn])
if n=1 then
code[a1] ←
else
let fi,fj be the 2 smallest f’
s
Huffman ( [ai,fi+fj],[a1,f1],…,[an,fn] )
omits ai,aj
code[aj] ← code[ai] + “0”
code[ai] ← code[ai] + “1”
| StarcoderdataPython |
1660426 | <reponame>aaditep/aadioptimize<gh_stars>0
import numpy.matlib as mat
import numpy as np
N =1
def initDE(N_p,lb,ub,prob):
"""
Initializes paramaters for differential evolution
Paramaters
----------
N_p : int
Number of population
lb : int
lower bound of searchspace
ub : int
upper bound of searchspace
prob : function
The objective function
Returns
-------
lb : numpy.ndarray
Returns the lower bound as a numpy array
ub : numpy.ndarray
Returns the upper bound as a numpy array
f : numpy.ndarray
Returns vector for fitness function
fu : numpy ndarray
Retruns empty vector for fitness function
D : int
Returns the amount of decision variables for crossover process
U : numpy.ndarray
Returns matrix for trial solution
P : numpy.ndarray
Returns randomly generated matrix of target vectors
"""
lb = np.full(N_p,lb)
ub = np.full(N_p,ub)
f = np.zeros((N_p,1)) #empty vector for fitness function
fu = np.zeros((N_p,1))#newly created trial vector
D = len(lb) # Determining amount of decision variables
U = np.zeros((N_p,D)) #Matrix for storing trial solutions
#Initial random population
P = mat.repmat(lb,N_p,1)+mat.repmat((ub-lb),N_p,1)*np.random.rand(len(ub-lb),N_p)
for p in np.arange(N_p):
f[p]=prob(P[p,])
return lb,ub,f,fu,D,U,P
#This function starts the mutation process and generates a donorvector
def mutation(i,N_p,t,T,P,N_vars,F_min,F_const):
"""
Function that generates a donorvector. If there is >=3 searchvariables then the
adaptive scaling factor is implimented. Otherwise just the constant. It gnerates
candidates for the donorvector by randomly choosing rows from the initial matrix,
but not the i-th element.
Paramaters
----------
i : int
Number of the row in matrix
N_p : int
Number of population
t : int
Iteration index
T : int
Total number of iterations
N_vars : int
Number of search variables
F_min : optional (float,int)
The minimum value of scaling factor. Used when N_vars >= 3
F_const : optional (float,int)
The constant value of scaling factor
Returns
-------
V : numpy.ndarray
The donor vector
"""
#Adaptive scaling factor
if N_vars >= 3:
F=F_min*2**np.exp(1-(T/(T+1-t)))
else:
F = F_const
#candidates are assigned without the i-th element
candidates= np.delete(np.arange(N_p), np.where(np.arange(N_p)==i))
#3 target vectors are picked out randomly for the donorvector generator
cand_rand=np.random.choice(candidates,3,replace= False)
X1=P[cand_rand[0],]
X2=P[cand_rand[1],]
X3=P[cand_rand[2],]
#Donorvctor generator
V= X1 + F*(X2-X3)
return V
#this function evaluates donor vector and uses parts of it which fit better
def crossover(f,P_c_min,P_c_max,i,D,V,P,U):
"""
Crossover function for differential evolution. This function uses adaptive crossover rate.
The minimum and the maximum range is set by user. It decides whether or not to use donorvector's
j-th elements in the U matrix.
Paramaters
---------
f : numpy.ndarray
The fitness function array
P_c_min : optional(float/integer)
Minimum crossover rate value for adaptive crossover rate
P_c_max : optional(float/integer)
Maximum crossover rate value for adaptive crossover rate
i : int
Row number
D : int
The amount of decision variables for crossover process
V : numpy.ndarray
The donor vector
P : numpy.ndarray
Matrix of initial target vectors
U : numpy.ndarrat
Matrix of trial solutions
Returns
-------
U : numpy.ndarray
Retruns the U matrix with new trial solutions.
"""
#ADAPTIVE Crossover
if f[i] < np.mean(f):
P_c = P_c_min + (P_c_max-P_c_min)*((f[i]-np.mean(f))/(np.max(f)-np.mean(f)))
else:
P_c = P_c_min
delta = np.random.randint(0,D-1)
for j in np.arange(D):
if np.random.uniform(0,1) <= P_c or delta == j:
U[i,j] = V[j]
else:
U[i,j]=P[i,j]
return U
#this function bounds the vector and replaces the old target vector with new if better
def boundgreed(N,j,U,P,f,fu,ub,lb,prob):
"""
This function bound the vector elements according to the bound set by the usere. If bounds
violated, it is replaced by either the lower- or upperbound. Then the Greedy selection is performed.
Firstly objective function is valued by the new vector. Then it is compared to the initial or the last
objective function value. If the new value is samller. Then the Initial or last target vector matrix's rows
are replaced by new vector.
Parameters
----------
j : int
U : numpy.ndarray
Matrix of trial vectors
P : numpy.ndarray
Matrix of target vectors
f : numpy.ndarray
Target vectors' Fitness function array.
fu : numpy.ndarray
Trial vectors' Fitness function array.
ub : numpy.ndarray
Upperbound
lb : numpy.ndarray
Lowerbound
prob : function
The objective function
Returns
-------
f : numpy.ndarray
New trial vectors' fitness function value that will be used in next iteration
P : numpy.ndarray
New trial vector matrix that will be used in next iteration
"""
U[j]=np.minimum(U[j], ub)
U[j]=np.maximum(U[j], lb)
##
fu[j]=prob(U[j])
N = N+1
if fu[j] < f[j]:
P[j]= U[j]
f[j]=fu[j]
return N,f,P
#distance from known location
def distance(known_loc,found_loc,N_vars,):
"""
Function that uses pythagorean theorem to calculate distance between the found point
and known location. NB!!! This function is not used in the main prgram so thist must
be called itself.
Parameters
----------
known_loc : numpy.ndarray
Known location that is given by the user
found_loc : numpy.ndarray
Found location with the program
N_vars : int
Number of search variables
Returns
-------
dist : float
Returns the distance between the points.
"""
undersqrt=np.zeros(N_vars)
for i in (np.arange(N_vars)):
undersqrt[i] =(known_loc[i]-found_loc[i])**2
dist = np.sqrt(sum(undersqrt))
return dist
def main(N,N_p,T,lb,ub,prob,N_vars,F_min,F_const,P_c_min,P_c_max):
"""
Differential evolution optimizer. It takes all the parmaters and uses them to find the
global optimum of the objctive function. At least tries. Number of evaluation of the
fitness function is 1+(N_p*T).
Parameters
----------
N : int
Number of evaluations counter
N_p : int
Number of population
T : int
Number of iterations
lb : int
Lower bound of search space
ub : TYPE
Upper bound of search space
prob : function
Function for objective function
N_vars : int
Number of search variables
F_min : optional (int/float)
Minimum value for the scaling factor
F_const : optional (int/float)
Constant value for the scaling factor
P_c_min : optional (int/float)
Minimum value of Crossover rate
P_c_max : optional (int/float)
Maximum value of crossover rate
Raises
------
Exception
Raises error when there is less than 4 of the population(N_p)
Returns
-------
best_of_f : numpy.ndarray
Returns the best value of the objective function
globopt : numpy.ndarray
Returns global optimum location
"""
lb,ub,f,fu,D,U,P = initDE(N_p,lb,ub,prob)
if N_p < 4:
raise Exception("Sorry, there must be atleast a population of 4. Reccomended 20")
for t in np.arange(T):
for i in np.arange(N_p):
V = mutation(i,N_p,t,T,P,N_vars,F_min,F_const)
U=crossover(f,P_c_min,P_c_max,i,D,V,P,U)
for j in np.arange(N_p):
N,f,P = boundgreed(N,j,U,P,f,fu,ub,lb,prob)
#if N == 500:
#break
best_of_f= min(f)
globopt = P[f.argmin()]
return N,best_of_f, globopt[:N_vars]
| StarcoderdataPython |
3217065 | # In this program we take CSVs that are prepared with a search name - either a Fund name / ISIN / Stock ticker
# and use that to search either Investing.com (InvestPy) or Yahoo Finance (with pandas URL) to get historical
# price data. Then we plot graphs using matplotlib, and present these in PDF using ReportLab.
import investpy
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.ticker import MaxNLocator, LinearLocator
from urllib.error import HTTPError
from time import sleep
import os
import textwrap
import pickle # Use pickle module to save complex Python objects to the disk. If all objects can be handled by json,
# you can use json module, which gives a human-readable file. However in this case, we have dataframes.
# You can read dfs into json, but they must be assigned as json first. Simpler to use pickle here.
# ReportLab imports
from reportlab.platypus import SimpleDocTemplate, PageTemplate, Frame, Flowable, Paragraph, Table, TableStyle, Spacer, KeepTogether # Platypus contains the flowable classes
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle # StyleSheet is a set of default style we can use, and ParagraphStyle can customise them
from reportlab.lib.enums import TA_JUSTIFY, TA_CENTER # Import text alignment & justify constants here
from reportlab.lib import colors
from reportlab.pdfbase import pdfmetrics # Used to register fonts
from reportlab.pdfbase.ttfonts import TTFont # Used for creating TrueType font object
from io import BytesIO # IO streams are file-like objects that live in python memory, rather than on disk. Much faster, and less files!
from svglib.svglib import svg2rlg # Library for converting SVG image files into other formats (e.g. ReportLab graphics.)
# Read csv file of trades for a tax year. CSVs must be prepared with a search name col that works on Investing.com or Yahoo.
equities_DF = pd.read_csv('./Investments CSV Example.csv', sep=',', header=0, index_col=0).dropna(how='all')
# Convert date strings to datetime objects.
equities_DF['Bought'] = pd.to_datetime(equities_DF['Bought'], format='%d/%m/%Y')
equities_DF['Sold'] = pd.to_datetime(equities_DF['Sold'], format='%d/%m/%Y')
# Calculate time buffer so we get a bit of extra price data before and after trade.
equities_DF['Buffer'] = (equities_DF['Sold'] - equities_DF['Bought']) * 0.2
# Create column for time interval between Bought and Sold
equities_DF['Interval'] = equities_DF['Sold'] - equities_DF['Bought']
# Create search-date columns for 'bought' and 'sold' with added buffer
equities_DF['Bought_search'] = equities_DF['Bought'] - equities_DF['Buffer']
equities_DF['Sold_search'] = equities_DF['Sold'] + equities_DF['Buffer']
# Create two Epoch timestamp (ts) columns for bought and sold dates.
equities_DF['Bought_ts'] = ((equities_DF['Bought_search'] - pd.Timestamp('1970-01-01')) // pd.Timedelta('1s')).astype('Int64') # Int64 is a special pandas type that supports nullable Ints.
equities_DF['Sold_ts'] = ((equities_DF['Sold_search'] - pd.Timestamp('1970-01-01')) // pd.Timedelta('1s')).astype('Int64')
# Create master dictionary for holding name of equity and df of historical prices
prices = {}
# Function for fetching historical price data
def fetch_prices():
consecutive_trades = '' # Variable for checking consecutive trades (i.e. where two different purchases were sold together)
consecutive_trades_I = '' # Another variable for checking consecutive trades (but in second part of code)
for equity in enumerate(equities_DF.index):
# Add fund/share as dict key, and add sub-dict with key as 'Investpy fund name' / 'ISIN' / 'Ticker' and search name as value.
if equity[1] not in prices:
prices[equity[1]] = equities_DF.iloc[equity[0], 7:10].dropna().to_dict()
consecutive_trades = equity[1]
elif equity[1] == consecutive_trades: # If a consecutive buy exists, add the date of that buy.
prices[equity[1]]['Additional buy'] = equities_DF.iloc[equity[0], 1]
consecutive_trades = equity[1]
# Set default search country as UK, unless 'USD' found.
country = 'United Kingdom'
if 'USD' in equity[1]:
country = 'United States'
elif 'CAD' in equity[1]:
country = 'Canada'
# Retrieve historic fund/share prices
# First check what type of search we need to do: using Fund Name, ISIN, or ticker
if equity[1] == consecutive_trades_I: # Skip the additional buys if they are part of same sell transaction.
print(f'{equity[0]}. Additional buy for {equity[1]} - skipped.')
continue
elif 'InvestPy Fund Name' in prices[equity[1]]:
search_name = prices[equity[1]]['InvestPy Fund Name'] # Get value that we use to search InvestPy or Yahoo.
try: # Add a df of historical price data to 'Price History' key
prices[equity[1]]['Price History'] = investpy.get_fund_historical_data(fund=search_name,
country=country, # Below converts datetime to string format for searching
from_date=equities_DF.iloc[equity[0], -4].strftime('%d/%m/%Y'),
to_date=equities_DF.iloc[equity[0], -3].strftime('%d/%m/%Y'),
interval='Daily')
print(f'{equity[0]}. Retrieved fund price data for {equity[1]}.')
except RuntimeError:
print(RuntimeError, f'CHECK! InvestPy did not find price data for {equity[1]}')
elif 'Stock Ticker' in prices[equity[1]]:
search_name = prices[equity[1]]['Stock Ticker']
try:
prices[equity[1]]['Price History'] = investpy.get_stock_historical_data(stock=search_name,
country=country,
from_date=equities_DF.iloc[equity[0], -4].strftime('%d/%m/%Y'),
to_date=equities_DF.iloc[equity[0], -3].strftime('%d/%m/%Y'),
interval='Daily')
print(f'{equity[0]}. Retrieved stock price data for {equity[1]}.')
except RuntimeError: # If InvestPy fails, try Yahoo Finance.
prices[equity[1]]['Price History'] = pd.read_csv(f'https://query1.finance.yahoo.com/v7/finance/download/{search_name}?period1={equities_DF.iloc[equity[0], -2]}&period2={equities_DF.iloc[equity[0], -1]}&interval=1d&events=history', index_col='Date')
# Yahoo Finance data not downloaded as datetime objects - convert these:
prices[equity[1]]['Price History'].index = pd.to_datetime(prices[equity[1]]['Price History'].index, format='%Y-%m-%d')
print(f'{equity[0]}. Retrieved stock price data for {equity[1]} from YF.')
sleep(1) # Ensures we don't overload Yahoo with requests.
except HTTPError:
print('CHECK! Yahoo Finance request failed for', equity[1])
elif 'ISIN for Yahoo Finance' in prices[equity[1]]:
search_name = prices[equity[1]]['ISIN for Yahoo Finance']
try:
prices[equity[1]]['Price History'] = pd.read_csv(f'https://query1.finance.yahoo.com/v7/finance/download/{search_name}?period1={equities_DF.iloc[equity[0], -2]}&period2={equities_DF.iloc[equity[0], -1]}&interval=1d&events=history', index_col='Date')
prices[equity[1]]['Price History'].index = pd.to_datetime(prices[equity[1]]['Price History'].index, format='%Y-%m-%d') # Convert index to datetime
print(f'{equity[0]}. Retrieved fund price data for {equity[1]} using ISIN.')
sleep(1)
except HTTPError:
try: # Some ISIN numbers require a '.L' on the end to be found on Yahoo for some reason.
prices[equity[1]]['Price History'] = pd.read_csv(f'https://query1.finance.yahoo.com/v7/finance/download/{search_name}.L?period1={equities_DF.iloc[equity[0], -2]}&period2={equities_DF.iloc[equity[0], -1]}&interval=1d&events=history', index_col='Date')
prices[equity[1]]['Price History'].index = pd.to_datetime(prices[equity[1]]['Price History'].index, format='%Y-%m-%d') # Convert index to datetime
print(f'{equity[0]}. Retrieved fund price data for {equity[1]} using ISIN.')
sleep(1)
except HTTPError:
print('CHECK! Yahoo Finance request failed for', equity[1])
except Exception as UnknownError:
print('Unknown error for', equity[1], UnknownError)
else: # I couldn't find this equity on Investing.com or Yahoo Finance so we just skip it.
print(f'{equity[0]}. No price data for this equity - skipped.')
consecutive_trades_I = equity[1] # Overwrite this var to check for consecutives.
# Now correct price data which is in £ not pennies: Some funds randomly change from £s to pennies midway through dataset.
try: # Correct values which are < max value divided by 100.
prices[equity[1]]['Price History'].loc[prices[equity[1]]['Price History']['Close'] < prices[equity[1]]['Price History']['Close'].max() / 100, ['Open', 'High', 'Low', 'Close']] *= 100
except KeyError:
print(KeyError, 'This equity had no price data')
# Fetch the prices if not found already:
if not os.path.isfile('./prices_2019-20.pkl'):
fetch_prices()
# Save prices dictionary to disk, so I don't have to retrieve price data everytime.
# Highest protocol ensures the correct compatibility with my Python version. This is a binary encoding, hence 'wb'.
def save_prices(prices_dict, filename):
with open(filename, 'wb') as filepath:
pickle.dump(prices_dict, filepath, pickle.HIGHEST_PROTOCOL)
# Save the prices to file (can # out so it doesn't run everytime):
if not os.path.isfile('./prices_2019-20.pkl'):
save_prices(prices, 'prices_2019-20.pkl')
# Read pickle file into Python again.
def load_prices(filename):
with open(filename, 'rb') as file:
prices = pickle.load(file)
return prices
# Load the prices data
if os.path.isfile('./prices_2019-20.pkl'):
load_prices('prices_2019-20.pkl')
###------------------------MATPLOTLIB PLOTTING SECTION------------------------###
# Create overview of trades in subplots. Create fig handle and axs 2D numpy array containing all 20 axes.
def overview_plots():
fig, axs = plt.subplots(nrows=4, ncols=6, figsize=(12, 6), tight_layout=True)
fig.suptitle(f'Historical Price Data for Investments Sold in XXXX-XX')
# Set accuracy of Tick labels to be used, depending on Buy-Sell time interval
monthYear = mdates.DateFormatter('%b-%y')
dayMonthYear = mdates.DateFormatter('%d/%m/%y')
# ax.flat is an attribute of ax that gives an iterator where the 4x6 array is flattened to a 1D array. Allows us to loop through.
for ax, (equity_name, equity) in zip(axs.flat, prices.items()):
if equity.get('Price History') is not None:
ax.set_title("\n".join(textwrap.wrap(equity_name, 45)), fontsize=6, wrap=True) # Use textwrap to split string according to the char limit (45), then join with /n.
ax.plot(equity['Price History'].index, equity['Price History']['Close'], color='blue', linewidth=1)
ax.tick_params(labelsize=4)
ax.set_xlabel('Date', fontsize=5)
ax.set_ylabel('Price', fontsize=5)
locator = MaxNLocator(nbins='auto') # Create an automatic tick spacer
numticks = LinearLocator(numticks=6) # Create a linear tick spacer of set no. of ticks
ax.yaxis.set_major_locator(locator)
ax.xaxis.set_major_locator(numticks)
# We use the 'Interval' column to determine what Tick formatting accuracy we should use on the graphs.
interval = equities_DF.loc[equity_name, 'Interval']
if isinstance(interval, pd.Series): # Where we have consecutive trades, we have 2 values in a series.
interval = equities_DF.loc[equity_name, 'Interval'][0]
if interval < pd.Timedelta(60, 'days'):
ax.xaxis.set_major_formatter(dayMonthYear)
ax.tick_params(axis='x', labelrotation=30)
else:
ax.xaxis.set_major_formatter(monthYear)
# Define buy and sold dates
bought_date = equities_DF.loc[equity_name, 'Bought']
sold_date = equities_DF.loc[equity_name, 'Sold']
if isinstance(bought_date, pd.Series):
bought_date = bought_date[0]
sold_date = sold_date[0]
# Try to annotate Buy and Sell arrows
bought_ycoord = prices[equity_name]['Price History'].loc[bought_date, 'Close']
sold_ycoord = prices[equity_name]['Price History'].loc[sold_date, 'Close']
if not pd.isna([bought_ycoord, sold_ycoord]).any():
ax.annotate('Bought', (bought_date, bought_ycoord), xycoords='data', fontsize=5, fontweight='semibold', color='orange', xytext=(-15, -25), textcoords='offset points', arrowprops={'arrowstyle': '->'})
ax.annotate('Sold', (sold_date, sold_ycoord), xycoords='data', fontsize=5, fontweight='semibold', color='red', xytext=(-15, -25), textcoords='offset points', arrowprops={'arrowstyle': '->'})
else:
pass
else:
continue
overview_plots()
##########################################################################################################
###------------------------------------------ PDF Production ------------------------------------------###
##########################################################################################################
# Using ReportLab, you can either layout the PDF using a canvas, and painting it with static content, such as
# strings, lines, drawings, logos etc. Or you you can use Flowables which is a list of items or content that we want to add to the PDF.
# These are easily styled with margins, paragraph style etc., and so are great for report content that's used repeatedly.
# Flowables are appended one after the other, a bit like typing in a Word Doc. Whereas, static elements are drawn in a fixed location.
# Normally flowables are appended to the story list, which is then used to build the final PDF.
# Mixing static content, and flowables can be a bit messy though. The way to do it is to use PageTemplate, which draws on static
# content, and also has a Frame that holds the flowables. You assign that template to the PDF before building it.
# First, define function that draws static content. i.e. content that is in the same position for every page.
# This function is used later in drawOn argument, and MUST include (canvas, doc) args
def draw_static_elements(canvas, pdf_doc):
canvas.saveState() # saveState saves current font, graphics transform for later recall by the next restoreState
# TrueType (.ttf) fonts are those used on Mac and PC systems, as opposed to Type1 fonts developed by Adobe in their PDFs.
# Must use a font with .ttc, .ttf, .otf format. ReportLab searches through your computer for them. 'Font Suitcase' not usable unfortunately
pdfmetrics.registerFont(TTFont('Euphemia', 'EuphemiaCAS.ttc'))
canvas.setFont('Euphemia', 10)
# Draw string at fixed location (top-left corner)
canvas.drawString(30, 810, f'Report generated on {pd.to_datetime("today"):%d/%m/%Y}')
# Reset font, graphic settings back to what they were before this function ran
canvas.restoreState()
# Define function to rescale drawing objects
def scale_to_fit(drawing, pdf_doc):
"""This function scales the drawing object to fit within the margin width of the pdf SampleDocTemplate"""
max_width = pdf_doc.width
scale_factor = max_width / drawing.width
# Not sure why, but width and height attributes don't actually change the size, but they must be changed to help the positioning during pdf build.
drawing.width *= scale_factor
drawing.height *= scale_factor
drawing.scale(scale_factor, scale_factor) # This actually scales the image by changing transform attr. Two args: scale_x, scale_y
drawing.hAlign = 'RIGHT'
return drawing
class Line(Flowable): # Inherits attributes from Flowable class, so it can be appended to story.
def __init__(self, width, height=0): # Only need to specify width to draw a line.
Flowable.__init__(self)
self.width = width
self.height = height
def __repr__(self):
return f"Line with width={self.width}"
def draw(self):
"""Use canvas.line method. Provide two X,Y pairs for start and end of line."""
self.canv.line(0, self.height, self.width, self.height)
line = Line(438) # 438 is the approx width of the text in the PDF
# SET UP PDF READY FOR TAKING FIGURES #
# The simple doc template sets up our document. You can specify page size, margins etc
pdf = SimpleDocTemplate('Report Preview.pdf', topMargin=57, bottomMargin=35, author='<NAME>', showBoundary=False)
# Create Frame for holding flowables. Frame object is used by the platypus modules. Args: x,y (bottom left),
frame = Frame(pdf.leftMargin, pdf.bottomMargin, pdf.width, pdf.height, showBoundary=False)
# Add Frame to the page template and call on template to draw static objects
template = PageTemplate(frames=[frame], onPage=draw_static_elements)
# Add the template to the simple doc
pdf.addPageTemplates(template)
# Get the preset paragraph/text styles
styles = getSampleStyleSheet()
# TrueType (.ttf) fonts are those used on Mac and PC systems, as opposed to Type1 fonts developed by Adobe in their PDFs.
# Must use a font with .ttc, .ttf, .otf format. ReportLab searches through your computer for them. 'Font Suitcase' not usable unfortunately
pdfmetrics.registerFont(TTFont('Palatino Linotype', 'Palatino Linotype.ttf'))
# Create custom paragraph style
styles.add(ParagraphStyle(name='MainTitle', fontName='Palatino Linotype', underlineWidth=1, fontSize=16, alignment=TA_CENTER))
styles.add(ParagraphStyle(name='EquityHeading', fontName='Palatino Linotype', fontSize=12, alignment=TA_JUSTIFY))
styles.add(ParagraphStyle(name='Body', fontName='Palatino Linotype', fontSize=10, alignment=TA_JUSTIFY))
# Define story list for holding flowables
story = list()
# Add a paragraph to the pdf story with the title. </u> is XML for underline.
story.append(Paragraph('<u>HL Fund and Share Account Trades: Tax Year XXXX-XX</u>', style=styles['MainTitle']))
# Add a blank line. If font size is 10, then height=12 adds a blank line.
story.append(Spacer(5, 30))
# In loop below, recreate individual, larger figures for each equity.
# Set accuracy of Tick labels to be used, depending on Buy-Sell interval
monthYear = mdates.DateFormatter('%b-%y')
dayMonthYear = mdates.DateFormatter('%d-%b-%y')
# Create historical price plots. Each plot will be saved in-memory to BytesIO object to be put into PDF document
for equity_name, equity in prices.items():
if equity.get('Price History') is not None:
fig, ax = plt.subplots(figsize=(7, 4), tight_layout=True)
ax.plot(equity['Price History'].index, equity['Price History']['Close'], color='blue', linewidth=1)
ax.grid(color='lightgrey', linestyle='-', linewidth=0.5)
ax.tick_params(labelsize=8)
ax.set_xlabel('Date', fontsize=11)
ax.set_ylabel('Price', fontsize=11)
locator = MaxNLocator(nbins='auto')
numticks = LinearLocator(numticks=8)
ax.yaxis.set_major_locator(locator)
ax.xaxis.set_major_locator(numticks)
# Use the Interval column to determine what Tick formatting accuracy we should use on the graphs.
interval = equities_DF.loc[equity_name, 'Interval']
if isinstance(interval, pd.Series):
interval = equities_DF.loc[equity_name, 'Interval'][0]
if interval < pd.Timedelta(60, 'days'):
ax.xaxis.set_major_formatter(dayMonthYear)
else:
ax.xaxis.set_major_formatter(monthYear)
# Try annotate Buy and Sell arrows
bought_date = equities_DF.loc[equity_name, 'Bought']
sold_date = equities_DF.loc[equity_name, 'Sold']
if isinstance(bought_date, pd.Series):
bought_date = bought_date[0]
sold_date = sold_date[0]
bought_ycoord = prices[equity_name]['Price History'].loc[bought_date, 'Close']
sold_ycoord = prices[equity_name]['Price History'].loc[sold_date, 'Close']
if not pd.isna([bought_ycoord, sold_ycoord]).any():
try:
ax.annotate('Bought', (bought_date, bought_ycoord), xycoords='data', fontsize=10, fontweight='semibold', color='orange', xytext=(-15, -70), textcoords='offset points', arrowprops={'arrowstyle': '->'})
ax.annotate('Sold', (sold_date, sold_ycoord), xycoords='data', fontsize=10, fontweight='semibold', color='red', xytext=(-15, -70), textcoords='offset points', arrowprops={'arrowstyle': '->'})
except KeyError:
print(KeyError, equity_name)
else:
pass
# ------------------------------- PDF construction ------------------------------- #
# Create Bytes object (binary object) to save figure within Python. This avoids having to save file on disk
chart = BytesIO()
fig.savefig(chart, format='svg')
# Set the current position of the file handle (like a cursor).
# '0' sets cursor at beginning of file. So when we read file, we read from the start.
chart.seek(0)
# svg2rlg takes a SVG file and converts it to ReportLab graphic. Returns a drawing object. This can be directly appended to story.
chartRL = svg2rlg(chart)
chartRL = scale_to_fit(chartRL, pdf)
# Define equity text to be appended later
equityText = Paragraph(f'{equity_name}:', style=styles['EquityHeading'])
# Define profit/loss number as float, and set green/pink colour for gain/loss. Also define return % number.
profit_loss = equities_DF.loc[equity_name, 'Profit/Loss']
return_pc = equities_DF.loc[equity_name, 'Return %']
if isinstance(profit_loss, pd.Series):
profit_loss = float(profit_loss[0].replace('£', '').replace(',', ''))
return_pc = return_pc[0]
else:
profit_loss = float(profit_loss.replace('£', '').replace(',', ''))
if profit_loss > 0:
profit_loss_color = colors.palegreen
else:
profit_loss_color = colors.pink
# Define table data. Each element of list is a row.
table_data = [['', 'Bought', 'Sold', 'Profit/Loss', 'Return %'],
[equity_name, bought_date.strftime('%d/%m/%Y'), sold_date.strftime('%d/%m/%Y'), '£' + str(profit_loss), return_pc]]
table = Table(table_data)
# Set table style. (From cell) (To cell) (col, row)
table.setStyle(TableStyle([('FONTNAME', (0, 0), (-1, -1), 'Palatino Linotype'),
('FONTSIZE', (0, 0), (-1, -1), 9),
('INNERGRID', (0, 0), (-1, -1), 1, colors.grey),
('BACKGROUND', (3, -1), (4, -1), profit_loss_color)]))
# Use KeepTogether flowable to ensure line, spacer, chartRL etc. flowables all stay together for each equity.
story.append(KeepTogether([line, Spacer(5, 6),
equityText, Spacer(5, 4),
chartRL, Spacer(5, 2),
table, Spacer(5, 30)]))
else:
continue
# Close all plots
plt.close('all')
# Build pdf. Can also annotate page numbers, logos onto pages. Building pdf creates Canvas object, can be accessed by .canv
# Build pdf can also take onPage static drawing functions. Haven't tried this yet
pdf.build(story) | StarcoderdataPython |
33665 | import kfserving
from typing import List, Union
import numpy as np
class Predictor(): # pylint:disable=too-few-public-methods
def __init__(self, clf: kfserving.KFModel):
self.clf = clf
def predict_fn(self, arr: Union[np.ndarray, List]) -> np.ndarray:
instances = []
for req_data in arr:
if isinstance(req_data, np.ndarray):
instances.append(req_data.tolist())
else:
instances.append(req_data)
resp = self.clf.predict({"instances": instances})
return np.array(resp["predictions"])
| StarcoderdataPython |
3283597 | # General
name = "COVIDNext50_NewData"
gpu = False
batch_size = 64
n_threads = 20
random_seed = 1337
# Model
# Model weights path
# weights = "./experiments/ckpts/<model.pth>"
weights = './experiments/COVIDNext50_NewData_F1_92.98_step_10800.pth'
# Optimizer
lr = 1e-4
weight_decay = 1e-3
lr_reduce_factor = 0.7
lr_reduce_patience = 5
# Data
# train_imgs = "/data/ssd/datasets/covid/COVIDxV2/data/train"
# train_labels = "/data/ssd/datasets/covid/COVIDxV2/data/train_COVIDx.txt"
# val_imgs = "/data/ssd/datasets/covid/COVIDxV2/data/test"
# val_labels = "/data/ssd/datasets/covid/COVIDxV2/data/test_COVIDx.txt"
train_imgs = "assets/covid19newdata/train"
train_labels = "assets/covid19newdata/train_COVIDx.txt"
val_imgs = "assets/covid19newdata/train"
val_labels = "assets/covid19newdata/test_COVIDx.txt"
# Categories mapping
mapping = {
'normal': 0,
'pneumonia': 1,
'COVID-19': 2
}
# Loss weigths order follows the order in the category mapping dict
loss_weights = [0.05, 0.05, 1.0]
width = 256
height = 256
n_classes = len(mapping)
# Training
epochs = 300
log_steps = 1
eval_steps = 1
ckpts_dir = "./experiments/ckpts"
| StarcoderdataPython |
1781459 | import pytest
# pytest_addoption 可以让用户注册一个自定义的命令行参数,方便用户将数据传递给 pytest
def pytest_addoption(parser):
parser.addoption(
"--cmdopt", action="store",
default="None",
type=list,# 类型可以int,str,float,list 等类型,如果不指定类型的话,pytest会把接受到的参数值都默认为 str 类型
# choices= ['python', 'java', 'c++'],#
help="将自定义命令行参数 ’--cmdopt' 添加到 pytest 配置中"
)
# 从配置对象中读取自定义参数的值
@pytest.fixture(scope="session") # scope 有session、module、class、function
def cmdopt(request):
return request.config.getoption("--cmdopt")
# 然后任何 fixture 或测试用例都可以调用 cmdopt 来获得设备信息
# 将自定义参数的值打印出来
@pytest.fixture(autouse=True)
def fix_1(cmdopt):
print('\n --cmdopt的值:',cmdopt)
if __name__ == '__main__':
# 使用参数
pytest.main(['-s', '--cmdopt=98k'])
'''
命令行执行:pytest test24-sys-argv.py --cmdopt=abcd
''' | StarcoderdataPython |
3281440 | <reponame>soraros/nutils
import functools, numpy, operator
from nutils.testing import TestCase
from nutils import expression_v2, function, mesh, sample
class SerializedOps:
def from_int(self, v): return '{}i'.format(v)
def from_float(self, v): return '{}f'.format(v)
def scope(self, array): return 'scope({})'.format(array)
def mean(self, array): return 'mean({})'.format(array)
def jump(self, array): return 'jump({})'.format(array)
def add(self, *args): return 'add({})'.format(', '.join(('neg({})' if negate else '{}').format(arg) for negate, arg in args))
def multiply(self, *args): return 'mul({})'.format(', '.join(map(str, args)))
def divide(self, numerator, denominator): return 'div({}, {})'.format(numerator, denominator)
def power(self, base, exponent): return 'pow({}, {})'.format(base, exponent)
def get_element(self, array, axis, item): return 'get({}, {}, {})'.format(array, axis, item)
def transpose(self, array, axes): return 'transpose({})'.format(', '.join(map(str, (array, *axes))))
def trace(self, array, axis1, axis2): return 'trace({}, {}, {})'.format(array, axis1, axis2)
def get_variable(self, name, ndim):
if name.startswith('a') and all('0' <= i <= '9' for i in name[1:]):
if len(name) != ndim + 1:
return expression_v2._InvalidDimension(len(name) - 1)
else:
return name, tuple(map(int, name[1:]))
def call(self, name, ngenerates, arg):
if not name.startswith('f') or not all('0' <= i <= '9' for i in name[1:]):
return None
gen_sh = tuple(map(int, name[1:]))
if len(gen_sh) != ngenerates:
return expression_v2._InvalidDimension(len(gen_sh))
return 'call({}, {})'.format(name, str(arg)), gen_sh
class Parser(TestCase):
def setUp(self):
super().setUp()
self.parser = expression_v2._Parser(SerializedOps())
def mkasserts(self, parse):
def assertParses(expression, desired_result, desired_indices, *desired_shape):
with self.subTest('without-spaces'):
s_expression = expression_v2._Substring(expression)
actual_result, actual_shape, actual_indices, summed_indices = parse(s_expression)
self.assertEqual(actual_result, desired_result)
self.assertEqual(actual_shape, desired_shape)
self.assertEqual(actual_indices, desired_indices)
with self.subTest('with-spaces'):
s_expression = expression_v2._Substring(' ' + expression + ' ')
actual_result, actual_shape, actual_indices, summed_indices = parse(s_expression)
self.assertEqual(actual_result, desired_result)
self.assertEqual(actual_shape, desired_shape)
self.assertEqual(actual_indices, desired_indices)
def assertRaises(message, expression, markers, check_trim=True):
s_expression = expression_v2._Substring(expression)
with self.assertRaises(expression_v2.ExpressionSyntaxError) as cm:
parse(s_expression)
self.assertEqual(str(cm.exception), message + '\n' + expression + '\n' + markers)
if check_trim:
expression_spaces = ' ' + expression + ' '
s_expression = expression_v2._Substring(expression_spaces)
with self.assertRaises(expression_v2.ExpressionSyntaxError) as cm:
parse(s_expression)
self.assertEqual(str(cm.exception), message + '\n' + expression_spaces + '\n ' + markers)
return assertParses, assertRaises
def test_parse_expression(self):
assertParses, assertRaises = self.mkasserts(self.parser.parse_expression)
assertParses('1', '1i', '')
assertParses('-1 + 2', 'add(neg(1i), 2i)', '')
assertParses('- 1 + a2_i a2_i + 2', 'add(neg(1i), trace(mul(a2, a2), 0, 1), 2i)', '')
assertParses('a2_i + a23_ij a3_j + a2_i', 'add(a2, trace(mul(a23, a3), 1, 2), a2)', 'i', 2)
assertParses('a2_i + a23_ij a3_j + a2_i', 'add(a2, trace(mul(a23, a3), 1, 2), a2)', 'i', 2)
assertParses('a012_ijk + a021_ikj + a102_jik + a120_jki + a201_kij + a210_kji', 'add(a012, transpose(a021, 0, 2, 1), transpose(a102, 1, 0, 2), transpose(a120, 2, 0, 1), transpose(a201, 1, 2, 0), transpose(a210, 2, 1, 0))', 'ijk', 0, 1, 2)
assertParses('-2^2', 'add(neg(pow(2i, 2i)))', '') # https://en.wikipedia.org/wiki/Order_of_operations#Unary_minus_sign
assertRaises(
'Index i of the first term [^] is missing in the third term [~].',
'a2_i + a2_i + 3 + a2_i',
'^^^^ ~')
assertRaises(
'Index i of the second term [~] is missing in the first term [^].',
'1 + a2_i + 3',
'^ ~~~~')
assertRaises(
'Index i has length 2 in the first term [^] but length 3 in the fourth term [~].',
'a23_ij + a23_ij + a23_ij + a32_ij',
'^^^^^^ ~~~~~~')
def test_parse_fraction(self):
assertParses, assertRaises = self.mkasserts(self.parser.parse_fraction)
assertParses('1 / 2', 'div(1i, 2i)', '')
assertParses('2 a2_i / 2 a2_j a2_j', 'div(mul(2i, a2), trace(mul(2i, a2, a2), 0, 1))', 'i', 2)
assertRaises(
'Repeated fractions are not allowed. Use parentheses if necessary.',
'1 / 2 / 3',
'^^^^^^^^^')
assertRaises(
'The denominator must have dimension zero.',
'1 / a2_i',
' ^^^^')
assertRaises(
'Index i occurs more than twice.',
'a2_i / a22_ii',
'^^^^^^^^^^^^^')
assertRaises(
'Index i occurs more than twice.',
'a22_ii / a22_ii',
'^^^^^^^^^^^^^^^')
def test_parse_term(self):
assertParses, assertRaises = self.mkasserts(self.parser.parse_term)
assertParses('1 a2_i a2_j', 'mul(1i, a2, a2)', 'ij', 2, 2)
assertParses('a2_i a23_ij a3_j', 'trace(trace(mul(a2, a23, a3), 0, 1), 0, 1)', '')
assertParses('a2_i a3_j a3_j', 'trace(mul(a2, a3, a3), 1, 2)', 'i', 2)
assertRaises(
'Numbers are only allowed at the start of a term.',
'1 1',
' ^')
assertRaises(
'Index i is assigned to axes with different lengths: 2 and 3.',
'1 a2_i a3_i a',
'^^^^^^^^^^^^^')
assertRaises(
'Index i occurs more than twice.',
'1 a22_ii a2_i a',
'^^^^^^^^^^^^^^^')
assertRaises(
'Index i occurs more than twice.',
'1 a22_ii a22_ii a',
'^^^^^^^^^^^^^^^^^')
def test_parse_power_number(self):
assertParses, assertRaises = self.mkasserts(lambda s: self.parser.parse_power(s, allow_number=True))
assertParses('1^2', 'pow(1i, 2i)', '')
assertParses('1^-2', 'pow(1i, -2i)', '')
assertParses('a2_i^2', 'pow(a2, 2i)', 'i', 2)
assertRaises(
'The exponent must have dimension zero.',
'a^(a2_i)',
' ^^^^^^')
assertRaises(
'Index i occurs more than twice.',
'a2_i^(a22_ii)',
'^^^^^^^^^^^^^')
assertRaises(
'Index i occurs more than twice.',
'a2_i^(a22_ii)',
'^^^^^^^^^^^^^')
assertRaises(
'Unexpected whitespace before `^`.',
'a ^2',
' ^')
assertRaises(
'Unexpected whitespace after `^`.',
'a^ 2',
' ^')
assertRaises(
'Expected a number, variable, scope, mean, jump or function call.',
'^2',
'^')
def test_parse_power_nonumber(self):
assertParses, assertRaises = self.mkasserts(lambda s: self.parser.parse_power(s, allow_number=False))
assertParses('a2_i^2', 'pow(a2, 2i)', 'i', 2)
assertParses('a23_ij^-2', 'pow(a23, -2i)', 'ij', 2, 3)
assertRaises(
'The exponent must have dimension zero.',
'a^(a2_i)',
' ^^^^^^')
assertRaises(
'Unexpected whitespace before `^`.',
'a ^2',
' ^')
assertRaises(
'Unexpected whitespace after `^`.',
'a^ 2',
' ^')
assertRaises(
'Expected a variable, scope, mean, jump or function call.',
'^2',
'^')
assertRaises(
'Expected an int.',
'a^2_i',
' ^^^')
assertRaises(
'Expected an int or scoped expression.',
'a^',
' ^')
assertRaises(
'Expected an int or scoped expression.',
'a^a2_i',
' ^^^^')
assertRaises(
'Repeated powers are not allowed. Use parentheses if necessary.',
'a^a^a',
'^^^^^')
def test_parse_variable(self):
assertParses, assertRaises = self.mkasserts(lambda s: self.parser.parse_item(s, allow_number=False))
assertParses('a22_ij', 'a22', 'ij', 2, 2)
assertParses('a222_iji', 'trace(a222, 0, 2)', 'j', 2)
assertParses('a2_0', 'get(a2, 0, 0)', '')
assertParses('a23_1i', 'get(a23, 0, 1)', 'i', 3)
assertRaises(
'No such variable: `unknown`.',
'unknown_i',
'^^^^^^^')
assertRaises(
'Expected 1 index for variable `a2` but got 2.',
'a2_ij',
'^^^^^')
assertRaises(
'Expected 2 indices for variable `a22` but got 1.',
'a22_i',
'^^^^^')
assertRaises(
'Index i occurs more than twice.',
'a222_iii',
'^^^^^^^^')
assertRaises(
'Index of axis with length 2 out of range.',
'a23_3i',
' ^')
assertRaises(
'Symbol `$` is not allowed as index.',
'a234_i$j',
' ^')
def test_parse_call(self):
assertParses, assertRaises = self.mkasserts(lambda s: self.parser.parse_item(s, allow_number=False))
assertParses('f(a2_i + a2_i)', 'call(f, add(a2, a2))', 'i', 2)
assertParses('f(a2_i (a3_j + a3_j))', 'call(f, mul(a2, scope(add(a3, a3))))', 'ij', 2, 3)
assertParses('f62_mi(a256_ilm)', 'trace(trace(call(f62, a256), 2, 3), 0, 2)', 'l', 5)
assertParses('f42_ij(a34_ki)', 'trace(call(f42, a34), 1, 2)', 'kj', 3, 2)
assertParses('f32_ij(a34_ik)', 'trace(call(f32, a34), 0, 2)', 'kj', 4, 2)
assertParses('f23_i0(a2_k)', 'get(call(f23, a2), 2, 0)', 'ki', 2, 2)
assertParses('f23_1j(a2_k)', 'get(call(f23, a2), 1, 1)', 'kj', 2, 3)
assertRaises(
'Expected a number, variable, scope, mean, jump or function call.',
'f()',
' ^')
assertRaises(
'No such function: `g`.',
'g(a)',
'^')
assertRaises(
'Index i occurs more than twice.',
'f2_i(a22_ii)',
'^^^^^^^^^^^^')
assertRaises(
'Index i occurs more than twice.',
'f22_ii(a2_i)',
'^^^^^^^^^^^^')
assertRaises(
'Index i occurs more than twice.',
'f22_ii(a22_ii)',
'^^^^^^^^^^^^^^')
assertRaises(
'Index of axis with length 2 out of range.',
'f2_2(a)',
' ^')
assertRaises(
'Expected 2 indices for axes generated by function `f23` but got 1.',
'f23_j(a4_i)',
'^^^^^^^^^^^')
assertRaises(
'Symbol `$` is not allowed as index.',
'f234_i$j(a)',
' ^')
def test_parse_item_number(self):
assertParses, assertRaises = self.mkasserts(lambda s: self.parser.parse_item(s, allow_number=True))
assertRaises(
'Expected a number, variable, scope, mean, jump or function call.',
' ',
'^^^', check_trim=False)
assertRaises(
'Expected a number, variable, scope, mean, jump or function call.',
'1a',
'^^')
assertRaises(
'Expected a number, variable, scope, mean, jump or function call. '
'Hint: the operators `+`, `-` and `/` must be surrounded by spaces.',
'1+a',
'^^^')
assertRaises(
'Expected a number, variable, scope, mean, jump or function call. '
'Hint: the operators `+`, `-` and `/` must be surrounded by spaces.',
'1-a',
'^^^')
assertRaises(
'Expected a number, variable, scope, mean, jump or function call. '
'Hint: the operators `+`, `-` and `/` must be surrounded by spaces.',
'1/a',
'^^^')
def test_parse_item_nonumber(self):
assertParses, assertRaises = self.mkasserts(lambda s: self.parser.parse_item(s, allow_number=False))
assertRaises(
'Expected a variable, scope, mean, jump or function call.',
' ',
'^^^', check_trim=False)
assertRaises(
'Numbers are only allowed at the start of a term.',
'1',
'^')
assertRaises(
'Numbers are only allowed at the start of a term.',
'1a',
'^^')
assertRaises(
'Expected a variable, scope, mean, jump or function call.',
'f[a]',
'^^^^')
assertRaises(
'Expected a variable, scope, mean, jump or function call.',
'f{a}',
'^^^^')
assertRaises(
'Expected a variable, scope, mean, jump or function call.',
'f<a>',
'^^^^')
assertRaises(
'Expected a variable, scope, mean, jump or function call.',
'<a>',
'^^^')
def test_parse_scope(self):
assertParses, assertRaises = self.mkasserts(lambda s: self.parser.parse_item(s, allow_number=False))
assertParses('(1)', 'scope(1i)', '')
assertParses('(1 + a)', 'scope(add(1i, a))', '')
assertRaises(
'Unclosed `(`.',
'(1',
'^ ~', check_trim=False)
assertRaises(
'Parenthesis `(` closed by `]`.',
'(1]',
'^ ~')
assertRaises(
'Parenthesis `(` closed by `]`.',
'(1])',
'^ ~')
assertRaises(
'Unexpected symbols after scope.',
'(1)spam',
' ^^^^')
def test_parse_mean(self):
assertParses, assertRaises = self.mkasserts(lambda s: self.parser.parse_item(s, allow_number=False))
assertParses('{1 + 2}', 'mean(add(1i, 2i))', '')
assertParses('{(a2_i)}', 'mean(scope(a2))', 'i', 2)
def test_parse_jump(self):
assertParses, assertRaises = self.mkasserts(lambda s: self.parser.parse_item(s, allow_number=False))
assertParses('[1 + 2]', 'jump(add(1i, 2i))', '')
assertParses('[(a2_i)]', 'jump(scope(a2))', 'i', 2)
def test_parse_signed_int(self):
assertParses, assertRaises = self.mkasserts(self.parser.parse_signed_int)
assertParses('1', '1i', '')
assertParses('-1', '-1i', '')
assertRaises(
'Expected an int.',
'',
'^', check_trim=False)
assertRaises(
'Expected an int.',
' ',
'^^^', check_trim=False)
assertRaises(
'Expected an int.',
'a',
'^')
def test_parse_unsigned_int(self):
assertParses, assertRaises = self.mkasserts(self.parser.parse_unsigned_int)
assertParses('1', '1i', '')
assertParses('2', '2i', '')
assertParses('34', '34i', '')
assertRaises(
'Expected an int.',
'',
'^', check_trim=False)
assertRaises(
'Expected an int.',
' ',
'^^^', check_trim=False)
assertRaises(
'Expected an int.',
'a',
'^')
assertRaises(
'Expected an int.',
'-1',
'^^')
def test_parse_unsigned_float(self):
assertParses, assertRaises = self.mkasserts(self.parser.parse_unsigned_float)
assertParses('1', '1.0f', '')
assertParses('1.0', '1.0f', '')
assertParses('1.', '1.0f', '')
assertParses('0.1', '0.1f', '')
assertParses('1e-1', '0.1f', '')
assertParses('1.0e-1', '0.1f', '')
assertParses('.1e-1', '0.01f', '')
assertRaises(
'Expected a float.',
'',
'^', check_trim=False)
assertRaises(
'Expected a float.',
' ',
'^^^', check_trim=False)
assertRaises(
'Expected a float.',
'a',
'^')
assertRaises(
'Expected a float.',
'-1.2',
'^^^^')
class Namespace(TestCase):
def test_set_int(self):
ns = expression_v2.Namespace()
ns.a = 1
def test_set_float(self):
ns = expression_v2.Namespace()
ns.a = 1.2
def test_set_complex(self):
ns = expression_v2.Namespace()
ns.a = 1+1j
def test_set_numpy(self):
ns = expression_v2.Namespace()
ns.a = numpy.array([1, 2])
def test_set_numpy_indices(self):
ns = expression_v2.Namespace()
a = numpy.array([1, 2])
with self.assertRaisesRegex(AttributeError, '^Cannot assign an array to an attribute with an underscore.'):
ns.a_i = a
def test_set_expression_0d(self):
ns = expression_v2.Namespace()
ns.a = '1.2'
def test_set_expression_1d(self):
ns = expression_v2.Namespace()
ns.a = numpy.array([1, 2])
ns.b_i = '2 a_i'
def test_set_expression_2d(self):
ns = expression_v2.Namespace()
ns.a = numpy.array([1, 2])
ns.b_ij = 'a_i a_j'
ns.c_ji = 'a_i a_j'
def test_set_expression_invalid_indices(self):
ns = expression_v2.Namespace()
ns.a = numpy.array([1, 2])
with self.assertRaisesRegex(AttributeError, '^Only lower case latin characters are allowed as indices.'):
ns.b_α = 'a_α'
def test_set_expression_duplicate_indices(self):
ns = expression_v2.Namespace()
ns.a = numpy.array([1, 2])
with self.assertRaisesRegex(AttributeError, '^All indices must be unique.'):
ns.b_ii = 'a_i a_i'
def test_set_expression_missing_indices(self):
ns = expression_v2.Namespace()
ns.a = numpy.array([1, 2])
with self.assertRaisesRegex(AttributeError, '^Index i of the expression is missing in the namespace attribute.'):
ns.b_j = 'a_i a_j'
with self.assertRaisesRegex(AttributeError, '^Index j of the namespace attribute is missing in the expression.'):
ns.b_ij = 'a_i'
def test_set_function(self):
ns = expression_v2.Namespace()
ns.f = lambda a: a**2
self.assertAlmostEqual(('f(2)' @ ns).eval(), 4)
def test_set_function_unexpected_indices(self):
ns = expression_v2.Namespace()
with self.assertRaisesRegex(AttributeError, '^Cannot assign a function to an attribute with an underscore.'):
ns.f_i = lambda a: function.stack([a, a], axis=-1)
def test_set_other(self):
ns = expression_v2.Namespace()
with self.assertRaisesRegex(AttributeError, '^Cannot assign an object of type'):
ns.a = object()
def test_eval(self):
ns = expression_v2.Namespace()
ns.a = numpy.array([1, 2])
ns.b = numpy.array([1, 2, 3])
self.assertEqual(('1' @ ns).eval().tolist(), 1)
self.assertEqual(('a_i' @ ns).eval().tolist(), [1, 2])
self.assertEqual(('a_i b_j' @ ns).eval().export('dense').tolist(), [[1, 2, 3], [2, 4, 6]])
self.assertEqual(('b_j a_i' @ ns).eval().export('dense').tolist(), [[1, 2, 3], [2, 4, 6]])
def test_eval_tuple_list(self):
ns = expression_v2.Namespace()
self.assertEqual(sample.eval_integrals(*(('1', '2', '3') @ ns)), (1, 2, 3))
self.assertEqual(sample.eval_integrals(*(['1', '2', '3'] @ ns)), (1, 2, 3))
def test_define_for_0d(self):
ns = expression_v2.Namespace()
topo, ns.t = mesh.line(numpy.linspace(0, 1, 3), bnames=['past', 'future'])
ns.define_for('t', gradient='∂t', normal='nt', jacobians=['dt', 'dtb'])
ns.basis = topo.basis('spline', degree=1)
self.assertAlmostEqual(topo.integral('dt' @ ns, degree=2).eval(), 1)
self.assertAlmostEqual(topo.integral('∂t(t^2) dt' @ ns, degree=2).eval(), 1)
self.assertAlmostEqual(topo.boundary['future'].integral('nt dtb' @ ns, degree=2).eval(), 1)
self.assertAlmostEqual(topo.boundary['past'].integral('nt dtb' @ ns, degree=2).eval(), -1)
with self.assertRaisesRegex(ValueError, 'The curl can only be defined for a geometry with shape'):
ns.define_for('t', curl='curl')
def test_define_for_1d(self):
ns = expression_v2.Namespace()
topo, ns.x = mesh.rectilinear([numpy.linspace(0, 1, 3), numpy.linspace(0, 1, 5)])
ns.define_for('x', gradient='∇', normal='n', jacobians=['dV', 'dS'])
self.assertAlmostEqual(topo.integral('dV' @ ns, degree=2).eval(), 1)
self.assertAlmostEqual(topo.integral('∇_i(x_i^2) dV' @ ns, degree=2).eval(), 2)
self.assertAlmostEqual(topo.boundary['right'].integral('n_0 dS' @ ns, degree=2).eval(), 1)
self.assertAlmostEqual(topo.boundary['bottom'].integral('n_1 dS' @ ns, degree=2).eval(), -1)
with self.assertRaisesRegex(ValueError, 'The curl can only be defined for a geometry with shape'):
ns.define_for('x', curl='curl')
def test_define_for_3d(self):
ns = expression_v2.Namespace()
topo, ns.X = mesh.rectilinear([numpy.linspace(-1, 1, 3)]*3)
ns.x, ns.y, ns.z = ns.X
ns.define_for('X', gradient='∇', curl='curl')
ns.δ = function.eye(3)
ns.ε = function.levicivita(3)
ns.f = function.Array.cast([['x', '-z', 'y'], ['0', 'x z', '0']] @ ns)
smpl = topo.sample('gauss', 5)
assertEvalAlmostEqual = lambda *args: self.assertAllAlmostEqual(*(smpl(f).as_evaluable_array.eval() for f in args))
assertEvalAlmostEqual('curl_ij(y δ_j0 - x δ_j1 + z δ_j2)' @ ns, '-2 δ_i2' @ ns)
assertEvalAlmostEqual('curl_ij(-x^2 δ_j1)' @ ns, '-2 x δ_i2' @ ns)
assertEvalAlmostEqual('curl_ij((x δ_j0 - z δ_j1 + y δ_j2) δ_k0 + x z δ_j1 δ_k1)' @ ns, '2 δ_i0 δ_k0 - x δ_i0 δ_k1 + z δ_i2 δ_k1' @ ns)
assertEvalAlmostEqual('curl_ij(∇_j(x y + z))' @ ns, function.zeros((3,)))
def test_copy(self):
ns1 = expression_v2.Namespace()
ns1.a = 1
ns2 = ns1.copy_()
self.assertAlmostEqual(ns2.a.eval(), 1)
| StarcoderdataPython |
3224928 | <gh_stars>1-10
#!python
from more_or_less import PageOfHeight
from more_or_less.fixed_size_screen import FixedSizeScreen
from more_or_less.input import Input
from more_or_less.more_page_builder import MorePageBuilder
from more_or_less.output import Output
from more_or_less.page_builder import StopOutput
from more_or_less.wrapped_page import WrappedPage
from unittest.mock import Mock
import unittest
class TestUtil(unittest.TestCase):
def assertIsPageOfType(self, page, page_type):
''' assertIsInstance, but will first strip page-wrappers '''
page = _skip_page_wrappers(page)
self.assertIsInstance(page, page_type)
def assertIsPageOfHeight(self, page, height):
self.assertIsPageOfType(page, PageOfHeight)
self.assertEqual(height, page.height)
def assertIsFullscreenPage(self, page, screen_height=1000):
self.assertIsPageOfHeight(page, _page_height_for_screen(screen_height))
def get_more_page_builder(self, output=None, input=None, plugins=None, screen_height=1000):
return MorePageBuilder(
input=input or Mock(Input),
output=output or Mock(Output),
screen_dimensions=FixedSizeScreen(height=screen_height),
plugins=plugins,
)
class TestMorePageBuilder(TestUtil):
def test_build_first_page_returns_page_of_screen_height_minus_one(self):
screen_height = 10
builder = self.get_more_page_builder(screen_height=screen_height)
page = builder.build_first_page()
self.assertIsPageOfHeight(page, screen_height - 1)
def test_build_next_page_prompts_user_for_action(self):
input = Mock(Input)
input.get_character.return_value = ' '
builder = self.get_more_page_builder(input=input)
builder.build_next_page()
input.get_character.assert_called_once_with('--More--')
def test_returns_full_screen_page_if_user_presses_space(self):
screen_height = 10
input = Mock(Input)
builder = self.get_more_page_builder(input=input, screen_height=10)
input.get_character.return_value = ' '
page = builder.build_next_page()
self.assertIsFullscreenPage(page, screen_height)
def test_returns_one_line_page_if_user_presses_enter(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.return_value = '\r'
page = builder.build_next_page()
self.assertIsPageOfHeight(page, 1)
def test_enter_works_both_on_newline_and_carriage_return(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.return_value = '\n'
page = builder.build_next_page()
self.assertIsPageOfHeight(page, 1)
def test_stops_output_if_user_presses_q(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.return_value = 'q'
with self.assertRaises(StopOutput):
builder.build_next_page()
def test_stops_output_if_user_presses_Q(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.return_value = 'Q'
with self.assertRaises(StopOutput):
builder.build_next_page()
def test_stops_output_on_ctrl_c(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.side_effect = KeyboardInterrupt
with self.assertRaises(StopOutput):
builder.build_next_page()
def test_ignores_unexpected_user_input(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.side_effect = ['a', 'b', 'c', '\r']
builder.build_next_page()
self.assertEqual(4, input.get_character.call_count)
def test_user_can_enter_count_before_enter(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.side_effect = ['5', '\n']
page = builder.build_next_page()
self.assertIsPageOfHeight(page, 5)
def test_count_becomes_the_new_default_for_enter(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.side_effect = ['5', '\n']
builder.build_next_page()
input.get_character.side_effect = ['\n']
second_page = builder.build_next_page()
self.assertIsPageOfHeight(second_page, 5)
def test_can_specify_count_bigger_than_10(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.side_effect = ['5', '0', '0', '\n']
page = builder.build_next_page()
self.assertIsPageOfHeight(page, 500)
def test_user_can_enter_count_before_space(self):
input = Mock(Input)
builder = self.get_more_page_builder(input=input)
input.get_character.side_effect = ['5', ' ']
page = builder.build_next_page()
self.assertIsPageOfHeight(page, 5)
def test_count_does_not_become_the_new_default_for_space(self):
input = Mock(Input)
screen_height = 666
builder = self.get_more_page_builder(input=input, screen_height=screen_height)
input.get_character.side_effect = ['5', ' ']
builder.build_next_page()
input.get_character.side_effect = [' ']
second_page = builder.build_next_page()
self.assertIsFullscreenPage(second_page, screen_height)
def _page_height_for_screen(screen_height):
height_reserved_for_more_prompt = 1
return screen_height - height_reserved_for_more_prompt
def _skip_page_wrappers(page):
while isinstance(page, WrappedPage):
page = page.wrapped_page
return page | StarcoderdataPython |
86266 | <filename>clickhouse_manager/config.py<gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class Config(object):
config = None
def __init__(self, config):
self.config = config
def __str__(self):
return str(self.config)
def __getitem__(self, item):
return self.config[item]
def interactive(self):
return self.config['app']['interactive']
def dry(self):
return self.config['app']['dry']
def log_file(self):
return self.config['app']['log-file']
def log_level(self):
return self.config['app']['log-level']
def pid_file(self):
return self.config['app']['pid_file']
def ch_config_folder(self):
return self.config['manager']['config-folder']
def ch_config_file(self):
return self.config['manager']['config.xml']
def ch_config_user_file(self):
return self.config['manager']['user.xml']
def ssh_username(self):
return self.config['ssh']['username']
def ssh_password(self):
return self.config['ssh']['password']
def ssh_port(self):
return self.config['ssh']['port']
| StarcoderdataPython |
109022 | import openturns as ot
class UBoolean:
"""
Wrapper class of ot.Bernoulli to enable boolean operator
"""
def __init__(self, confidence=ot.Bernoulli()):
self.confidence = confidence
def __str__(self):
return "UBoolean(" + str(self.confidence.getP()) + ")"
def __and__(self, other):
if isinstance(other, UBoolean):
return UBoolean(ot.Bernoulli(self.confidence.getP() * other.confidence.getP()))
return NotImplemented
def __or__(self, other):
if isinstance(other, UBoolean):
return UBoolean(ot.Bernoulli(self.confidence.getP() * other.confidence.getP()))
return NotImplemented
def not_op(self):
return UBoolean(ot.Bernoulli(1 - self.confidence.getP()))
def value_with_confidence(self, threshold):
if self.exist(threshold):
p = self.confidence.getP()
if p >= threshold:
return True
elif p < threshold:
return False
raise RuntimeError("No value exist for " + str(self) + "with a confidence of " + str(threshold))
def exist(self, threshold):
if isinstance(threshold, (int, float)):
p = self.confidence.getP()
return p >= threshold or (1-p) >= threshold
| StarcoderdataPython |
29028 | # encoding: utf-8
"""
@author: BrikerMan
@contact: <EMAIL>
@blog: https://eliyar.biz
@version: 1.0
@license: Apache Licence
@file: test.py.py
@time: 2019-01-25 14:43
"""
import unittest
from tests import *
from kashgari.utils.logger import init_logger
init_logger()
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1762275 | <reponame>MarlonRF/tcc_files
# - Título:
# Parser XML Lattes
# - Descrição:
# Converte dados dos XML, baixados dos currículos Lattes, em DataFrames Pandas.
# Parte do trabalho de conclusão de curso de Bacharelado em Matemática Aplicada e Computaicional do IME/USP.
# - Autor:
# <NAME>
# - Orientador:
# <NAME> vel Lejbman
# -------- Bibliotecas -------- #
import os
import zipfile
import numpy as np
import pandas as pd
from glob import glob
import xml.etree.ElementTree as ET
from lxml import etree
import collections, re
# -------- Funções -------- #
def listarArquivos(diretorio: str="padrão", extensao: str="zip") -> list:
"""
Adiciona tag aos cabeçalhos de colunas de DataFrame definidos.
Caso os cabeçalhos não sejam declarados, todos receberam a tag.
Parameters
----------
diretorio : str
Caminho até o diretório em que estão os arquivos ZIP/XML
Por padrão é a pasta "\dados\xml_zip" relativo ao arquivo
extensao : str
Sufixo (string) que será usado como tag
Returns
-------
lista
lista com os caminhos para arquivos da extensão definida
"""
lista = []
if diretorio =="padrão":
diretorio = os.path.dirname(os.path.abspath(__file__)) +'\\dados\\xml_zip'
else:
pass
padrão = "*."+extensao
for dir,_,_ in os.walk(diretorio):
lista.extend(glob(os.path.join(dir,padrão)))
return lista
def abreZip(arquivo: str='', diretorio_temp=: str='\\dados\\xml_zip\\temp') -> None:
"""
Abre os arquios ZIP e extrai o arquivo curriculo.XML para diretório definido
Parameters
----------
arquivo : str
Caminho do arquivo ZIP
diretorio_temp : str
Diretorio onde será salvo o XML extraido
Returns
-------
None:
Não devolve nada
"""
diretorio_temp = os.path.dirname(os.path.abspath(__file__)) +diretorio_temp
print(diretorio_temp)
# with garante que um arquivo é aberto e depois de lido é fechado.
with zipfile.ZipFile(arquivo, 'r') as zip_ref:
zip_ref.extractall(diretorio_temp)
print("ok")
def lerXML(arquivo: str = '\\dados\\xml_zip\\temp\\curriculo.xml', deleta: bool = True):
"""
Lê o arquivo XML na pasta
Parameters
----------
arquivo : str
Caminho do arquivo curriculo.xml
deleta : bool
Deleta arquivo após leitura
Returns
-------
DataFrame : pd.DataFrame
DataFrame com tags
"""
local = os.path.dirname(os.path.abspath(__file__))
arquivo = local+arquivo
tree = ET.parse(arquivo)
root = tree.getroot()
if deleta == True:
os.remove(arquivo)
return tree, root
def estruturaXML(arquivo: str = '\\dados\\xml_zip\\temp\\curriculo.xml'):
"""
Imprime a estrutura do arquivo XML dos currículos Lattes
Parameters
----------
caminho : str
Caminho do arquivo curriculo.xml
Returns
-------
Print
"""
#abreZip(caminho,arquivo, pasta)
x = etree.parse(arquivo) # Lê o arquivo XML
xml = etree.tostring(x, pretty_print=True) # Ajeita o XML
raiz = etree.fromstring(xml) # Recebe raíz (root) do arquivo XML
arvore = etree.ElementTree(raiz) # Recebe a estruta em árvore (tree) do arquivo XML
arvore_bonita = collections.OrderedDict() # Dicionário Ordenado do Collections em branco
for tag in raiz.iter():
caminho = re.sub('\[[0-9]+\]', '', arvore.getpath(tag)) # Substittui números por ''
if caminho not in arvore_bonita:
arvore_bonita[caminho] = []
if len(tag.keys()) > 0:
arvore_bonita[caminho].extend(atributo for atributo in tag.keys() # Adiciona abributos que não foram adicionados
if atributo not in arvore_bonita[caminho])
for caminho, atributos in arvore_bonita.items():
indent = int(caminho.count('/') - 1) # Conta identações de acordo com '/'
espaço=' ' * indent
a = espaço +','
print('{0}({1}) {2} : [{3}]'.format(espaço, indent, caminho.split('/')[-1], ', '.join(f'{atributo}' # Imprime os resultados
for atributo in atributos) if len(atributos) > 0 else '-'))
def tagsAtributos(tag: str, root, subnivel=False) -> list:
"""
Adiciona tag aos cabeçalhos de colunas de DataFrame definidos.
Caso os cabeçalhos não sejam declarados, todos receberam a tag.
Parameters
----------
tag : str
DataFrame
root : root XML
objeto root devolvido pela função lerXML()
Returns
-------
Tags e Abributos : list
Devolve duas listas com as Tags e Atributos
"""
info = [elemento for elemento in root.iter(tag)]
if subnivel == True:
tags=[child.tag for child in info[0]]
atributos=[child.attrib for child in info[0]]
else:
tags=[child.tag for child in info]
atributos=[child.attrib for child in info]
return tags, atributos
def AbributoXMLparaDf(tag:str, root, subnivel=False):
"""
Converte os atributos das tags de entrada
Parameters
----------
tag : str
Tag dos dados de interesse
root : root XML
Objeto devo
Returns
-------
DataFrame : pd.DataFrame
DataFrame com tags
"""
tags, atributos = tagsAtributos(tag, root, subnivel)
return pd.DataFrame(atributos)
def artigos(root):
"""
Adiciona tag aos cabeçalhos de colunas de DataFrame definidos.
Caso os cabeçalhos não sejam declarados, todos receberam a tag.
Parameters
----------
DataFrame : pd.DataFrame
DataFrame
tag : str
objeto root devolvido pela função lerXML()
colunas : list
Colunas cujas cabeçalhos receberão tags
Returns
-------
DataFrame : pd.DataFrame
DataFrame com os dados da tag selecionada
"""
basicos = AbributoXMLparaDf("DADOS-BASICOS-DO-ARTIGO",root) # Coleta os dados básicos dos artigos no arquivo XML
detalhes = AbributoXMLparaDf("DETALHAMENTO-DO-ARTIGO",root) # Coleta os dados detalhados dos artigos no arquivo XML
completo = pd.concat([basicos, detalhes], axis=1, join="inner") # Concatena os dados em uma lista
dados_gerais = [prod.attrib for prod in root.iter('DADOS-GERAIS')][0] # Coleta os dados gerais do pesquisador no arquivo XML
nome_completo = dados_gerais['NOME-COMPLETO'] # Recebe o nome completo do pesquisador
completo['autor_ifusp']=nome_completo # Adiciona o nome do pesquisador ao dataframe dos artigos
return completo
# As funções abaixo não foram utilizadas no trabalho, mas funcionam!
# Serão usadas e aperfeiçoadas em trablho futuros
def orientacaoMestrado(root):
basicos = AbributoXMLparaDf('DADOS-BASICOS-DE-ORIENTACOES-CONCLUIDAS-PARA-MESTRADO', root)
detalhes = AbributoXMLparaDf('DETALHAMENTO-DE-ORIENTACOES-CONCLUIDAS-PARA-MESTRADO', root)
completo = pd.concat([basicos, detalhes], axis=1, join="inner")
dados_gerais = [prod.attrib for prod in root.iter('DADOS-GERAIS')][0]
nome_completo = dados_gerais['NOME-COMPLETO']
completo['autor_ifusp']=nome_completo
return completo
def orientacaoDoutorado(root):
basicos = AbributoXMLparaDf('DADOS-BASICOS-DE-ORIENTACOES-CONCLUIDAS-PARA-DOUTORADO', root)
detalhes = AbributoXMLparaDf('DETALHAMENTO-DE-ORIENTACOES-CONCLUIDAS-PARA-DOUTORADO', root)
completo = pd.concat([basicos, detalhes], axis=1, join="inner")
dados_gerais = [prod.attrib for prod in root.iter('DADOS-GERAIS')][0]
nome_completo = dados_gerais['NOME-COMPLETO']
completo['autor_ifusp']=nome_completo
return completo
def participacaoDoutorado(root):
basicos = AbributoXMLparaDf('DADOS-BASICOS-DA-PARTICIPACAO-EM-BANCA-DE-DOUTORADO', root)
detalhes = AbributoXMLparaDf('DETALHAMENTO-DA-PARTICIPACAO-EM-BANCA-DE-DOUTORADO', root)
completo = pd.concat([basicos, detalhes], axis=1, join="inner")
dados_gerais = [prod.attrib for prod in root.iter('DADOS-GERAIS')][0]
nome_completo = dados_gerais['NOME-COMPLETO']
completo['autor_ifusp']=nome_completo
return completo
def participacaoMestrado(root):
basicos = AbributoXMLparaDf('DADOS-BASICOS-DA-PARTICIPACAO-EM-BANCA-DE-MESTRADO', root)
detalhes = AbributoXMLparaDf('DETALHAMENTO-DA-ORIENTACAO-EM-ANDAMENTO-DE-MESTRADO', root)
completo = pd.concat([basicos, detalhes], axis=1, join="inner")
dados_gerais = [prod.attrib for prod in root.iter('DADOS-GERAIS')][0]
nome_completo = dados_gerais['NOME-COMPLETO']
completo['autor_ifusp']=nome_completo
return completo
def participacaoQualificacao(root):
basicos = AbributoXMLparaDf('DETALHAMENTO-DA-PARTICIPACAO-EM-BANCA-DE-EXAME-QUALIFICACAO', root)
detalhes = AbributoXMLparaDf('DADOS-BASICOS-DA-PARTICIPACAO-EM-BANCA-DE-EXAME-QUALIFICACAO', root)
completo = pd.concat([basicos, detalhes], axis=1, join="inner")
dados_gerais = [prod.attrib for prod in root.iter('DADOS-GERAIS')][0]
nome_completo = dados_gerais['NOME-COMPLETO']
completo['autor_ifusp']=nome_completo
return completo
def participacaoApresentacao(root):
basicos = AbributoXMLparaDf('DADOS-BASICOS-DA-APRESENTACAO-DE-TRABALHO', root)
detalhes = AbributoXMLparaDf('DETALHAMENTO-DA-APRESENTACAO-DE-TRABALHO', root)
completo = pd.concat([basicos, detalhes], axis=1, join="inner")
dados_gerais = [prod.attrib for prod in root.iter('DADOS-GERAIS')][0]
nome_completo = dados_gerais['NOME-COMPLETO']
completo['autor_ifusp']=nome_completo
return completo
def participacaoBancaConcurso(root):
basicos = AbributoXMLparaDf('DADOS-BASICOS-DA-BANCA-JULGADORA-PARA-CONCURSO-PUBLICO', root)
detalhes = AbributoXMLparaDf('DETALHAMENTO-DA-BANCA-JULGADORA-PARA-CONCURSO-PUBLICO', root)
completo = pd.concat([basicos, detalhes], axis=1, join="inner")
dados_gerais = [prod.attrib for prod in root.iter('DADOS-GERAIS')][0]
nome_completo = dados_gerais['NOME-COMPLETO']
completo['autor_ifusp']=nome_completo
return completo
def participacaoBancaLDocencia(root):
basicos = AbributoXMLparaDf('DADOS-BASICOS-DA-BANCA-JULGADORA-PARA-LIVRE-DOCENCIA', root)
detalhes = AbributoXMLparaDf('DETALHAMENTO-DA-BANCA-JULGADORA-PARA-LIVRE-DOCENCIA', root)
completo = pd.concat([basicos, detalhes], axis=1, join="inner")
dados_gerais = [prod.attrib for prod in root.iter('DADOS-GERAIS')][0]
nome_completo = dados_gerais['NOME-COMPLETO']
completo['autor_ifusp']=nome_completo
return completo
def participacaoBancaTitular(root):
basicos = AbributoXMLparaDf('DADOS-BASICOS-DA-BANCA-JULGADORA-PARA-PROFESSOR-TITULAR', root)
detalhes = AbributoXMLparaDf('DETALHAMENTO-DA-BANCA-JULGADORA-PARA-PROFESSOR-TITULAR', root)
completo = pd.concat([basicos, detalhes], axis=1, join="inner")
dados_gerais = [prod.attrib for prod in root.iter('DADOS-GERAIS')][0]
nome_completo = dados_gerais['NOME-COMPLETO']
completo['autor_ifusp']=nome_completo
return completo
def organizacaoEvento(root):
basicos = AbributoXMLparaDf('DADOS-BASICOS-DA-ORGANIZACAO-DE-EVENTO', root)
detalhes = AbributoXMLparaDf('DETALHAMENTO-DA-ORGANIZACAO-DE-EVENTO', root)
completo = pd.concat([basicos, detalhes], axis=1, join="inner")
dados_gerais = [prod.attrib for prod in root.iter('DADOS-GERAIS')][0]
nome_completo = dados_gerais['NOME-COMPLETO']
completo['autor_ifusp']=nome_completo
return completo
def trabalhoPublicado(root):
basicos = AbributoXMLparaDf('DADOS-BASICOS-DO-TRABALHO', root)
detalhes = AbributoXMLparaDf('DETALHAMENTO-DO-TRABALHO', root)
completo = pd.concat([basicos, detalhes], axis=1, join="inner")
dados_gerais = [prod.attrib for prod in root.iter('DADOS-GERAIS')][0]
nome_completo = dados_gerais['NOME-COMPLETO']
completo['autor_ifusp']=nome_completo
return completo
def orientacaoOutras(root):
basicos = AbributoXMLparaDf('DADOS-BASICOS-DE-OUTRAS-ORIENTACOES-CONCLUIDAS', root)
detalhes = AbributoXMLparaDf('DETALHAMENTO-DE-OUTRAS-ORIENTACOES-CONCLUIDAS', root)
completo = pd.concat([basicos, detalhes], axis=1, join="inner")
dados_gerais = [prod.attrib for prod in root.iter('DADOS-GERAIS')][0]
nome_completo = dados_gerais['NOME-COMPLETO']
completo['autor_ifusp']=nome_completo
return completo
def orientacaoOutras(root):
basicos = AbributoXMLparaDf('DADOS-BASICOS-DE-OUTRAS-ORIENTACOES-CONCLUIDAS', root)
detalhes = AbributoXMLparaDf('DETALHAMENTO-DE-OUTRAS-ORIENTACOES-CONCLUIDAS', root)
completo = pd.concat([basicos, detalhes], axis=1, join="inner")
dados_gerais = [prod.attrib for prod in root.iter('DADOS-GERAIS')][0]
nome_completo = dados_gerais['NOME-COMPLETO']
completo['autor_ifusp']=nome_completo
return completo
# -------- Execução do programa -------- #
lista_xmls = listarArquivos() # Lista os arquivos XML em um diretório
lista = [] # Lista vazia para armazenar caminho para arquivos
for xml in lista_xmls: # Itera sob lista de caminhos dos arquivos XML coletando dados dos currículos
abreZip(xml) # Abre arquivo ZIP, cria uma pasta "temp" para armazenar o arquivo "curriculo.XML"
tree, root = lerXML() # Lê o arquivo XML
a = artigos(root) # Coleta os dados de artigos do curriculo
lista.append(a) # Adicionar os dados a lista
df_artigos = pd.concat(lista).reset_index() # Concatena os dados de artigos da "lista" em DataFrame
local = os.path.dirname(os.path.abspath(__file__)) # Caminho da pasta em que o arquivo PY está sendo executado
df_artigos.to_csv(local+'\dados\coletas\df_lattes.csv', index = False) # Salva os dados dos arquivos XML em um dataframe
print("-- Fim do programa --")
| StarcoderdataPython |
3344104 | from db import modles
def register(name,password):
obj=modles.Student.get_obj_by_name(name)
if obj:
return False,'student name has exist'
else:
modles.Student(name,password)
return True,'register successfully!'
def choose_school(name,type):
obj=modles.Student.get_obj_by_name(type)
if obj.school:
return False,'你已经选择过校区:%s'%obj.school
else:
obj.choose_school(name)
return True,'选择成功,学校为%s'%name
def get_school_and_courses(type):
obj=modles.Student.get_obj_by_name(type)
return obj.school,obj.courses
def get_school_courses(school_name):
obj=modles.School.get_obj_by_name(school_name)
return obj.courses
def choose_course(course_name,type):
obj=modles.Student.get_obj_by_name(type)
obj.add_course(course_name)
obj_course=modles.Course.get_obj_by_name(course_name)
obj_course.add_student_name(type)
def check_score(type):
obj=modles.Student.get_obj_by_name(type)
return obj.score | StarcoderdataPython |
1630334 | from setuptools import setup
setup(name='sukima',
version='0.1',
description='A framework for GPT generation utilities for providing a basic API.',
url='https://github.com/harubaru/sukima',
author='<NAME>',
license='BSD2',
packages=['sukima']) | StarcoderdataPython |
1671909 | <reponame>rickavmaniac/masonite
from ..app import App
from ..request import Request
from ..response import Response
class ResponseMiddleware:
def __init__(self, request: Request, app: App, response: Response):
self.request = request
self.app = app
self.response = response
def after(self):
if self.request.redirect_url:
self.response.redirect(self.request.redirect_url, status=302)
self.request.reset_redirections()
if self.app.has("Session") and self.response.is_status(200):
try:
self.app.make("Session").driver("memory").reset(flash_only=True)
except Exception:
pass
| StarcoderdataPython |
125622 | <gh_stars>10-100
from git_gopher.CommandInterface import CommandInterface
from git_gopher.NoTagsException import NoTagsException
from git_gopher.HistoryCommandRunner import HistoryCommandRunner
from git_gopher.GitDataGetter import GitDataGetter
class CheckoutTag(CommandInterface):
def __init__(self, hist_command_runner: HistoryCommandRunner, git_data_getter: GitDataGetter):
self._hist_command_runner = hist_command_runner
self._git_data_getter = git_data_getter
def run(self):
try:
tag = self._git_data_getter.get_tag_name(preview='echo "git checkout {2}"')
except NoTagsException:
print("No tags exist for this repository")
return
if tag:
return self._hist_command_runner.run(['git', 'checkout', tag])
| StarcoderdataPython |
3210558 | <filename>appengine_module/gae_ts_mon/test/shared_test.py
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import gae_ts_mon
from infra_libs.ts_mon import shared
from testing_utils import testing
class SharedTest(testing.AppengineTestCase):
def setUp(self):
super(SharedTest, self).setUp()
shared.reset_for_unittest()
def tearDown(self):
shared.reset_for_unittest()
self.assertEqual([], list(shared.global_metrics_callbacks))
super(SharedTest, self).tearDown()
def test_register_global_metrics(self):
metric = gae_ts_mon.GaugeMetric('test')
shared.register_global_metrics([metric])
self.assertEqual(['test'], list(shared.global_metrics))
shared.register_global_metrics([metric])
self.assertEqual(['test'], list(shared.global_metrics))
shared.register_global_metrics([])
self.assertEqual(['test'], list(shared.global_metrics))
def test_register_global_metrics_callback(self):
shared.register_global_metrics_callback('test', 'callback')
self.assertEqual(['test'], list(shared.global_metrics_callbacks))
shared.register_global_metrics_callback('nonexistent', None)
self.assertEqual(['test'], list(shared.global_metrics_callbacks))
shared.register_global_metrics_callback('test', None)
self.assertEqual([], list(shared.global_metrics_callbacks))
def test_get_instance_entity(self):
entity = shared.get_instance_entity()
# Save the modification, make sure it sticks.
entity.task_num = 42
entity.put()
entity2 = shared.get_instance_entity()
self.assertEqual(42, entity2.task_num)
# Make sure it does not pollute the default namespace.
self.assertIsNone(shared.Instance.get_by_id(entity.key.id()))
| StarcoderdataPython |
62630 | import os
from os import path
from eiffel_loop.scons.c_library import LIBRARY_INFO
from eiffel_loop.package import TAR_GZ_SOFTWARE_PACKAGE
from eiffel_loop.package import SOFTWARE_PATCH
info = LIBRARY_INFO ('source/id3.getlib')
print 'is_list', isinstance (info.configure [0], list)
print 'url', info.url
print info.configure
print 'test_data', info.test_data
pkg = TAR_GZ_SOFTWARE_PACKAGE (info.url, info.c_dev, info.extracted)
patch = SOFTWARE_PATCH (info.patch_url, info.c_dev, info.extracted)
patch.apply ()
# create links to `include' and `test_dir'
| StarcoderdataPython |
155302 | <filename>tests/test_refresh_token.py
from django.contrib.auth import get_user_model
from django.utils import timezone
from .testCases import RelayTestCase, DefaultTestCase
from graphql_auth.constants import Messages
class RefreshTokenTestCaseMixin:
def setUp(self):
self.user = self.register_user(
email="<EMAIL>", username="foo", verified=True, archived=False
)
def test_refresh_token(self):
query = self.get_login_query()
executed = self.make_request(query)
self.assertTrue(executed["refreshToken"])
query = self.get_verify_query(executed["refreshToken"])
executed = self.make_request(query)
self.assertTrue(executed["success"])
self.assertTrue(executed["refreshToken"])
self.assertTrue(executed["payload"])
self.assertFalse(executed["errors"])
def test_invalid_token(self):
query = self.get_verify_query("invalid_token")
executed = self.make_request(query, raw=True)
self.assertIsNotNone(executed["errors"])
class RefreshTokenTestCase(RefreshTokenTestCaseMixin, DefaultTestCase):
def get_login_query(self):
return """
mutation {
tokenAuth(email: "<EMAIL>", password: <PASSWORD>" )
{ refreshToken, success, errors }
}
""" % (
self.default_password
)
def get_verify_query(self, token):
return """
mutation {
refreshToken(refreshToken: "%s" )
{ success, errors, refreshToken, payload }
}
""" % (
token
)
class RefreshTokenRelayTestCase(RefreshTokenTestCaseMixin, RelayTestCase):
def get_login_query(self):
return """
mutation {
tokenAuth(input:{ email: "<EMAIL>", password: <PASSWORD>" })
{ refreshToken, refreshExpiresIn, success, errors }
}
""" % (
self.default_password
)
def get_verify_query(self, token):
return """
mutation {
refreshToken(input: {refreshToken: "%s"} )
{ success, errors, refreshToken, refreshExpiresIn, payload }
}
""" % (
token
)
| StarcoderdataPython |
1620350 | <reponame>ejguan/data<gh_stars>0
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import io
import os
import unittest
import warnings
import expecttest
import torchdata
from _utils._common_utils_for_test import check_hash_fn, create_temp_dir
from torchdata.datapipes.iter import (
EndOnDiskCacheHolder,
FileOpener,
HttpReader,
IterableWrapper,
OnDiskCacheHolder,
S3FileLister,
S3FileLoader,
)
class TestDataPipeRemoteIO(expecttest.TestCase):
def setUp(self):
self.temp_dir = create_temp_dir()
def tearDown(self):
try:
self.temp_dir.cleanup()
except Exception as e:
warnings.warn(f"TestDataPipeRemoteIO was not able to cleanup temp dir due to {e}")
def test_http_reader_iterdatapipe(self):
file_url = "https://raw.githubusercontent.com/pytorch/data/main/LICENSE"
expected_file_name = "LICENSE"
expected_MD5_hash = "bb9675028dd39d2dd2bf71002b93e66c"
http_reader_dp = HttpReader(IterableWrapper([file_url]))
# Functional Test: test if the Http Reader can download and read properly
reader_dp = http_reader_dp.readlines()
it = iter(reader_dp)
path, line = next(it)
self.assertEqual(expected_file_name, os.path.basename(path))
self.assertTrue(b"BSD" in line)
# Reset Test: http_reader_dp has been read, but we reset when calling check_hash()
check_cache_dp = http_reader_dp.check_hash({file_url: expected_MD5_hash}, "md5", rewind=False)
it = iter(check_cache_dp)
path, stream = next(it)
self.assertEqual(expected_file_name, os.path.basename(path))
self.assertTrue(io.BufferedReader, type(stream))
# __len__ Test: returns the length of source DataPipe
self.assertEqual(1, len(http_reader_dp))
def test_on_disk_cache_holder_iterdatapipe(self):
tar_file_url = "https://raw.githubusercontent.com/pytorch/data/main/test/_fakedata/csv.tar.gz"
expected_file_name = os.path.join(self.temp_dir.name, "csv.tar.gz")
expected_MD5_hash = "42cd45e588dbcf64c65751fbf0228af9"
tar_hash_dict = {expected_file_name: expected_MD5_hash}
tar_file_dp = IterableWrapper([tar_file_url])
with self.assertRaisesRegex(RuntimeError, "Expected `OnDiskCacheHolder` existing"):
_ = tar_file_dp.end_caching()
def _filepath_fn(url):
filename = os.path.basename(url)
return os.path.join(self.temp_dir.name, filename)
tar_cache_dp = tar_file_dp.on_disk_cache(
filepath_fn=_filepath_fn,
hash_dict=tar_hash_dict,
hash_type="md5",
)
# DataPipe Constructor
tar_cache_dp = HttpReader(tar_cache_dp)
# Start iteration without `end_caching`
with self.assertRaisesRegex(RuntimeError, "Please call"):
_ = list(tar_cache_dp)
# Both filepath_fn and same_filepath_fn are set
with self.assertRaisesRegex(ValueError, "`filepath_fn` is mutually"):
_ = tar_cache_dp.end_caching(mode="wb", filepath_fn=_filepath_fn, same_filepath_fn=True)
tar_cache_dp = tar_cache_dp.end_caching(mode="wb", same_filepath_fn=True)
# File doesn't exist on disk
self.assertFalse(os.path.exists(expected_file_name))
path = list(tar_cache_dp)[0]
# File is cached to disk
self.assertTrue(os.path.exists(expected_file_name))
self.assertEqual(expected_file_name, path)
self.assertTrue(check_hash_fn(expected_file_name, expected_MD5_hash))
# Modify the downloaded file to trigger downloading again
with open(expected_file_name, "w") as f:
f.write("0123456789abcdef")
self.assertFalse(check_hash_fn(expected_file_name, expected_MD5_hash))
path = list(tar_cache_dp)[0]
self.assertTrue(check_hash_fn(expected_file_name, expected_MD5_hash))
# Call `end_caching` again
with self.assertRaisesRegex(RuntimeError, "`end_caching` can only be invoked once"):
_ = tar_cache_dp.end_caching()
# Multiple filepaths
def _gen_filepath_fn(tar_path):
for i in range(3):
yield os.path.join(os.path.dirname(tar_path), "csv", f"{i}.csv")
# DataPipe Constructor
file_cache_dp = OnDiskCacheHolder(tar_cache_dp, filepath_fn=_gen_filepath_fn)
file_cache_dp = FileOpener(file_cache_dp, mode="rb")
# Functional API
file_cache_dp = file_cache_dp.load_from_tar()
def _csv_filepath_fn(csv_path):
return os.path.join(self.temp_dir.name, "csv", os.path.basename(csv_path))
# Read and decode
def _read_and_decode(x):
return x.read().decode()
file_cache_dp = file_cache_dp.map(fn=_read_and_decode, input_col=1)
file_cache_dp = EndOnDiskCacheHolder(file_cache_dp, mode="w", filepath_fn=_csv_filepath_fn, skip_read=True)
cached_it = iter(file_cache_dp)
for expected_csv_path in _gen_filepath_fn(expected_file_name):
# File doesn't exist on disk
self.assertFalse(os.path.exists(expected_csv_path))
csv_path = next(cached_it)
# File is cached to disk
self.assertTrue(os.path.exists(expected_csv_path))
self.assertEqual(expected_csv_path, csv_path)
# Cache decompressed archive but only check root directory
root_dir = "temp"
file_cache_dp = OnDiskCacheHolder(
tar_cache_dp, filepath_fn=lambda tar_path: os.path.join(os.path.dirname(tar_path), root_dir)
)
file_cache_dp = FileOpener(file_cache_dp, mode="rb").load_from_tar()
file_cache_dp = file_cache_dp.end_caching(
mode="wb",
filepath_fn=lambda file_path: os.path.join(self.temp_dir.name, root_dir, os.path.basename(file_path)),
)
cached_it = iter(file_cache_dp)
for i in range(3):
expected_csv_path = os.path.join(self.temp_dir.name, root_dir, f"{i}.csv")
# File doesn't exist on disk
self.assertFalse(os.path.exists(expected_csv_path))
csv_path = next(cached_it)
# File is cached to disk
self.assertTrue(os.path.exists(expected_csv_path))
self.assertEqual(expected_csv_path, csv_path)
def test_s3_io_iterdatapipe(self):
# sanity test
file_urls = ["s3://ai2-public-datasets"]
try:
s3_lister_dp = S3FileLister(IterableWrapper(file_urls))
s3_loader_dp = S3FileLoader(IterableWrapper(file_urls))
except ModuleNotFoundError:
warnings.warn(
"S3 IO datapipes or C++ extension '_torchdata' isn't built in the current 'torchdata' package"
)
return
# S3FileLister: different inputs
input_list = [
[["s3://ai2-public-datasets"], 71], # bucket without '/'
[["s3://ai2-public-datasets/"], 71], # bucket with '/'
[["s3://ai2-public-datasets/charades"], 18], # folder without '/'
[["s3://ai2-public-datasets/charades/"], 18], # folder without '/'
[["s3://ai2-public-datasets/charad"], 18], # prefix
[
[
"s3://ai2-public-datasets/charades/Charades_v1",
"s3://ai2-public-datasets/charades/Charades_vu17",
],
12,
], # prefixes
[["s3://ai2-public-datasets/charades/Charades_v1.zip"], 1], # single file
[
[
"s3://ai2-public-datasets/charades/Charades_v1.zip",
"s3://ai2-public-datasets/charades/Charades_v1_flow.tar",
"s3://ai2-public-datasets/charades/Charades_v1_rgb.tar",
"s3://ai2-public-datasets/charades/Charades_v1_480.zip",
],
4,
], # multiple files
[
[
"s3://ai2-public-datasets/charades/Charades_v1.zip",
"s3://ai2-public-datasets/charades/Charades_v1_flow.tar",
"s3://ai2-public-datasets/charades/Charades_v1_rgb.tar",
"s3://ai2-public-datasets/charades/Charades_v1_480.zip",
"s3://ai2-public-datasets/charades/Charades_vu17",
],
10,
], # files + prefixes
]
for input in input_list:
s3_lister_dp = S3FileLister(IterableWrapper(input[0]), region="us-west-2")
self.assertEqual(sum(1 for _ in s3_lister_dp), input[1], f"{input[0]} failed")
# S3FileLister: prefixes + different region
file_urls = [
"s3://aft-vbi-pds/bin-images/111",
"s3://aft-vbi-pds/bin-images/222",
]
s3_lister_dp = S3FileLister(IterableWrapper(file_urls), region="us-east-1")
self.assertEqual(sum(1 for _ in s3_lister_dp), 2212, f"{input} failed")
# S3FileLister: incorrect inputs
input_list = [
[""],
["ai2-public-datasets"],
["s3://"],
["s3:///bin-images"],
]
for input in input_list:
with self.assertRaises(ValueError, msg=f"{input} should raise ValueError."):
s3_lister_dp = S3FileLister(IterableWrapper(input), region="us-east-1")
for _ in s3_lister_dp:
pass
# S3FileLoader: loader
input = [
"s3://charades-tar-shards/charades-video-0.tar",
"s3://charades-tar-shards/charades-video-1.tar",
] # multiple files
s3_loader_dp = S3FileLoader(input, region="us-west-2")
self.assertEqual(sum(1 for _ in s3_loader_dp), 2, f"{input} failed")
input = [["s3://aft-vbi-pds/bin-images/100730.jpg"], 1]
s3_loader_dp = S3FileLoader(input[0], region="us-east-1")
self.assertEqual(sum(1 for _ in s3_loader_dp), input[1], f"{input[0]} failed")
# S3FileLoader: incorrect inputs
input_list = [
[""],
["ai2-public-datasets"],
["s3://"],
["s3:///bin-images"],
["s3://ai2-public-datasets/bin-image"],
]
for input in input_list:
with self.assertRaises(ValueError, msg=f"{input} should raise ValueError."):
s3_loader_dp = S3FileLoader(input, region="us-east-1")
for _ in s3_loader_dp:
pass
# integration test
input = [["s3://charades-tar-shards/"], 10]
s3_lister_dp = S3FileLister(IterableWrapper(input[0]), region="us-west-2")
s3_loader_dp = S3FileLoader(s3_lister_dp, region="us-west-2")
self.assertEqual(sum(1 for _ in s3_loader_dp), input[1], f"{input[0]} failed")
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
3215199 | import os, sys
import SiEPIC
try:
import siepic_tools
except:
pass
# import xml before lumapi (SiEPIC.lumerical), otherwise XML doesn't work:
from xml.etree import cElementTree
import math
from SiEPIC.utils import arc, arc_xy, arc_wg, arc_to_waveguide, points_per_circle
import pya
from SiEPIC.utils import get_technology, get_technology_by_name
# Import KLayout Python API methods:
# Box, Point, Polygon, Text, Trans, LayerInfo, etc
from pya import *
import pya
#import numpy as np
MODULE_NUMPY = False
op_tag = "" #operation tag which defines whether we are loading library in script or GUI env
try:
# import pya from klayout
import pya
if("Application" in str(dir(pya))):
from SiEPIC.utils import get_technology_by_name
op_tag = "GUI"
#import pya functions
else:
raise ImportError
except:
import klayout.db as pya
from zeropdk import Tech
op_tag = "script"
lyp_filepath = os.path(str(os.path(os.path.dirname(os.path.realpath(__file__))).parent) + r"/klayout_Layers_GSiP.lyp")
print(lyp_filepath)
from pya import Box, Point, Polygon, Text, Trans, LayerInfo, \
PCellDeclarationHelper, DPoint, DPath, Path, ShapeProcessor, \
Library, CellInstArray
path = os.path.dirname(os.path.abspath(__file__))
def linspace_without_numpy(low, high, length):
step = ((high-low) * 1.0 / length)
return [low+i*step for i in range(length)]
def pin(w,pin_text, trans, LayerPinRecN, dbu, cell):
"""
w: Waveguide Width, e.g., 500 (in dbu)
pin_text: Pin Text, e.g., "pin1"
trans: e.g., trans = Trans(0, False, 0, 0) - first number is 0, 1, 2, or 3.
pinrec: PinRec Layer, e.g., layout.layer(TECHNOLOGY['PinRec']))
devrec: DevRec Layer, e.g., layout.layer(TECHNOLOGY['DevRec']))
"""
# Create the pin, as short paths:
from SiEPIC._globals import PIN_LENGTH
pin = trans*Path([Point(-PIN_LENGTH/2, 0), Point(PIN_LENGTH/2, 0)], w)
cell.shapes(LayerPinRecN).insert(pin)
text = Text (pin_text, trans)
shape = cell.shapes(LayerPinRecN).insert(text)
shape.text_size = w*0.8
print("Done drawing the layout for - pin" )
| StarcoderdataPython |
184247 | from django.conf.urls import url
from . import views
from django.conf.urls import url, include
app_name = 'Article'
urlpatterns = [
url (r'^create/',views.CreateArticle,name="CreateArticle"),
url (r'^home/',views.home ,name="Home"),
url (r'^(?P<pk>[0-9]+)/addLike/',views.AddLike ,name="AddLike"),
url (r'^(?P<pk>[0-9]+)/AddDislikeike/',views.AddDislike ,name="AddDislike")
]
| StarcoderdataPython |
1684975 | <filename>adminmgr/media/code/A3/task2/BD_135_703_2371_6tOSZiH.py
import findspark
findspark.init()
from pyspark import SparkConf,SparkContext
from pyspark.streaming import StreamingContext
from pyspark.sql import Row,SQLContext
import sys
import requests
def func(rdd):
sorted_rdd = rdd.sortBy(lambda x: (-x[1],x[0])).filter(lambda y: y[0] !='')
s_list=sorted_rdd.collect()
if(s_list!=[]):
print(s_list[0][0],s_list[1][0],s_list[2][0],s_list[3][0],s_list[4][0],sep=",")
def func2(line):
hashtag=line.split(";")[7]
if(',' in hashtag):
return hashtag.split(",")
return [hashtag]
conf1=SparkConf()
conf1.setAppName("BigData")
sc1=SparkContext(conf=conf1)
sscp=StreamingContext(sc1,int(sys.argv[2]))
sscp.checkpoint("/checkpoint_BIGDATA")
dataStream1=sscp.socketTextStream("localhost",9009)
hashtags=dataStream1.window(int(sys.argv[1]),1).flatMap(hashtag).map(lambda h : (h,1)).reduceByKey(lambda x,y:int(x)+int(y))
hashtags.foreachRDD(func)
ssc.start()
ssc.awaitTermination(60)
ssc.stop()
| StarcoderdataPython |
1778538 | import io
import re
from urllib.parse import urlparse
import requests
from fair_test import FairTest, FairTestEvaluation
class MetricTest(FairTest):
metric_path = 'i2-fair-vocabularies-resolve'
applies_to_principle = 'I2'
title = 'Metadata uses resolvable FAIR Vocabularies'
description = """Maturity Indicator to test if the linked data metadata uses terms that resolve to linked (FAIR) data.
One predicate from each hostname is tested, the test is successful if more than 60% of the hostnames resolve to RDF."""
author = 'https://orcid.org/0000-0002-1501-1082'
metric_version = '0.1.0'
topics = ['metadata', 'linked data', 'advanced compliance']
test_test={
'https://w3id.org/ejp-rd/fairdatapoints/wp13/dataset/c5414323-eab1-483f-a883-77951f246972': 1,
'https://doi.org/10.1594/PANGAEA.908011': 1,
'http://example.com': 0,
}
def evaluate(self, eval: FairTestEvaluation):
g = eval.retrieve_metadata(eval.subject)
if not isinstance(g, (list, dict)) and len(g) > 1:
eval.info(f'Successfully found and parsed RDF metadata available at {eval.subject}. It contains {str(len(g))} triples')
else:
eval.failure(f"No RDF metadata found at the subject URL {eval.subject}")
return eval.response()
domains_tested = []
domains_resolving = 0
for s, p, o in g.triples((None, None, None)):
result = urlparse(str(p))
if result.netloc in domains_tested:
continue
eval.info(f"Testing URI {str(p)} for the domain {result.netloc}")
domains_tested.append(result.netloc)
g_test = eval.retrieve_metadata(str(p))
if not isinstance(g_test, (list, dict)) and len(g_test) > 0:
domains_resolving += 1
else:
eval.warn(f"URI not resolving to RDF: {str(p)} (we consider the domain {result.netloc} does not resolve to Linked Data)")
eval.info(f"{str(domains_resolving)} URLs resolving, in {len(domains_tested)} domains tested: {', '.join(domains_tested)}")
# Success if more than 60% of domains resolves
percent_resolves = domains_resolving / len(domains_tested)
if percent_resolves >= 0.6:
eval.success(f"{str(percent_resolves*100)}% of the domains URL used by predicates resolves to RDF")
else:
eval.failure(f"Only {str(percent_resolves*100)}% of the domains URL used by predicates resolves to RDF (60% required). Make sure you are using URIs that resolves to RDF as predicates.")
return eval.response()
| StarcoderdataPython |
1634000 | #!/usr/bin/env python
# A logFileParser class to parse VistA FileMan Schema log files and generate
# the FileMan Schema and dependencies among packages.
#---------------------------------------------------------------------------
# Copyright 2012 The Open Source Electronic Health Record Agent
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#----------------------------------------------------------------
from builtins import range
from builtins import object
from future.utils import iteritems
import glob
import re
import os
import os.path
import sys
import subprocess
import re
import csv
import codecs
import argparse
from datetime import datetime, date, time
from CrossReference import CrossReference, Routine, Package, Global, PlatformDependentGenericRoutine
from CrossReference import FileManField, FileManFile, FileManFieldFactory
from CrossReference import LocalVariable, GlobalVariable, NakedGlobal, MarkedItem, LabelReference
from LogManager import logger
NAME_LOC_TYPE_REGEX = re.compile("(?P<Name>^[^ ].*) +(?P<Loc>[^ ]*;[^ ]*) +(?P<Type>[^ ]+.*$)")
NAME_LOC_REGEX = re.compile("(?P<Name>^[^ ].*) +(?P<Loc>[^ ]*;[^ ]*$)")
NAME_TYPE_REGEX = re.compile("(?P<Name>^[^ ].*) +(?P<Type>[^ ]+.*$)")
POINTER_TO_REGEX = re.compile("^POINTER TO .* \(#(?P<File>[.0-9]+)\)")
UNDEFINED_POINTER = re.compile("POINTER[ *]+ TO AN UNDEFINED FILE")
SUBFILE_REGEX = re.compile("Multiple #(?P<File>[.0-9]+)")
FILE_REGEX = re.compile("^ +(?P<File>[0-9\.]+) +")
POINTED_TO_BY_VALUE_REGEX = re.compile("field \(#(?P<fieldNo>[0-9.]+)\) (of the .*? sub-field \(#(?P<subFieldNo>[0-9.]+)\))?.*of the (?P<Name>.*) File \(#(?P<FileNo>[0-9.]+)\)$")
class IDDSectionParser(object):
def __init__(self):
pass
def onSectionStart(self, line, section, Global, CrossReference):
pass
def onSectionEnd(self, line, section, Global, CrossReference):
pass
def parseLine(self, line, Global, CrossReference):
pass
class DescriptionSectionParser(IDDSectionParser):
def __init__(self):
self._lines=None
self._curLine = None
self._section = IDataDictionaryListFileLogParser.DESCRIPTION_SECTION
def onSectionStart(self, line, section, Global, CrossReference):
self._lines=[]
self._curLine = ""
def onSectionEnd(self, line, section, Global, CrossReference):
if self._curLine:
self._lines.append(self._curLine)
Global.setDescription(self._lines)
def parseLine(self, line, Global, CrossReference):
if not line.strip(): # assume this is the paragraph break
if not self._curLine:
self._lines.append(self._curLine)
self._curLine = ""
else:
self._curLine += " " + line.strip()
#===============================================================================
# A class to parse Field # section in Data Dictionary schema log file output
#===============================================================================
class FileManFieldSectionParser(IDDSectionParser):
DEFAULT_VALUE_INDENT = 32
DEFAULT_NAME_INDENT = 14
MAXIMIUM_TYPE_START_INDEX = 50
#Dictionary
StringTypeMappingDict = {"COMPUTED":FileManField.FIELD_TYPE_COMPUTED,
"BOOLEAN COMPUTED":FileManField.FIELD_TYPE_COMPUTED,
"COMPUTED POINTER":FileManField.FIELD_TYPE_COMPUTED,
"COMPUTED DATE":FileManField.FIELD_TYPE_DATE_TIME,
"DATE":FileManField.FIELD_TYPE_DATE_TIME,
"NUMBER":FileManField.FIELD_TYPE_NUMBER,
"SET":FileManField.FIELD_TYPE_SET,
"FREE TEXT":FileManField.FIELD_TYPE_FREE_TEXT,
"WORD-PROCESSING":FileManField.FIELD_TYPE_WORD_PROCESSING,
"VARIABLE POINTER":FileManField.FIELD_TYPE_VARIABLE_FILE_POINTER,
"MUMPS":FileManField.FIELD_TYPE_MUMPS}
FieldAttributesInfoList = [["""(Required)""", "_isRequired"],
["""(audited)""", "_isAudited"],
["""(Add New Entry without Asking)""", "_isAddNewEntryWithoutAsking"],
["""(Multiply asked)""", "_isMultiplyAsked"],
["""(Key field)""", "_isKeyField"],
["""(NOWRAP)""", "_isNoWrap"],
['''(IGNORE "|")''', "_ignorePipe"]]
#'''Short Descr:''',
#'''Estimated Case Length (HOURS:''',
#'''Uniqueness Index:''')
#'''Description:''',
FileFieldCaptionList = ('''LAST EDITED BY:''',
'''LOSSES:''',
'''GROUP:''',
'''TOTAL DIALYSIS PATIENT REMAINING:''',
'''NOTES:''',
'''PATIENTS COMPLETING INITIAL TRAINING:''',
'''HELP-PROMPT:''',
'''SCREEN ON FILE:''',
'''EXECUTABLE HELP:''',
'''COMPLICATIONS WITHIN 24 HOURS:''',
'''REQUEST ENTERED BY:''',
'''IDENTIFIED BY:''',
'''DESIGNATED BEDS IN GENERAL PURPOSE UNITS:''',
'''DIALYSIS CENTER TREATMENTS:''',
'''TECHNICAL DESCR:''',
'''CROSS-REFERENCE:''',
'''RECORD INDEXES:''',
'''AUDIT CONDITION:''',
'''SECONDARY KEY:''',
'''DIALYSIS/BEDS/FACILITIES:''',
'''SUM:''',
'''AUDIT:''',
'''HOME (SELF) DIALYSIS TRAINING:''',
'''ALGORITHM:''',
'''DELETE TEST:''',
'''LAST EDITED:''',
'''SCREEN:''',
'''INDEXED BY:''',
'''HOME DIALYSIS:''',
'''MUMPS CODE:''',
'''DESCRIPTION:''',
'''ADDITIONS DURING REPORTING PERIOD:''',
'''EXPLANATION:''',
'''FIELD INDEX:''',
'''PRE-LOOKUP:''',
'''PRIMARY KEY:''',
'''OUTPUT TRANSFORM:''',
'''LAYGO TEST:''',
'''INPUT TRANSFORM:''')
def __init__(self):
self._lines=None
self._section = IDataDictionaryListFileLogParser.FILEMAN_FIELD_SECTION
self._curFile = None
self._field = None
self._isSubFile = False
def onSectionStart(self, line, section, Global, CrossReference):
self._lines = []
result = DataDictionaryListFileLogParser.FILEMAN_FIELD_START.search(line)
assert result
fileNo = result.group('FileNo')
fieldNo = result.group("FieldNo")
self._isSubFile = float(fileNo) != float(Global.getFileNo())
if self._isSubFile:
self._curFile = Global.getSubFileByFileNo(fileNo)
assert self._curFile, "Could not find subFile [%s] in file [%s] line [%s]" % (fileNo, Global.getFileNo(), line)
else:
self._curFile = Global
restOfLineStart = line.find("," + fieldNo) + len(fieldNo)
startIdent = self.DEFAULT_NAME_INDENT
defaultIdentLevel = self.__getDefaultIndentLevel__(self._curFile, self.DEFAULT_NAME_INDENT)
if restOfLineStart > defaultIdentLevel:
logger.warning("FileNo: %s, FieldNo: %s, line: %s, may not be a valid field no, %d, %d" %
(fileNo, fieldNo, line, restOfLineStart, defaultIdentLevel))
try:
floatValue = float(fieldNo)
except ValueError:
logger.error("invalid fieldNo %s" % fieldNo)
fieldNo = line[line.find(",")+1:defaultIdentLevel]
floatValue = float(fieldNo)
restOfLine = line[line.find("," + fieldNo) + len(fieldNo)+1:].strip()
result = NAME_LOC_TYPE_REGEX.search(restOfLine)
fName, fType, fLocation = None, None, None
if result:
fName = result.group('Name').strip()
fLocation = result.group('Loc').strip()
if fLocation == ";":
fLocation = None
fType = result.group('Type').strip()
else:
# handle three cases, 1. no location info 2. no type info 3. Both
if restOfLine.find(";") != -1: # missing type info
logger.warn("Missing Type information [%s]" % line)
result = NAME_LOC_REGEX.search(restOfLine)
if result:
fName = result.group('Name').strip()
fLocation = result.group('Loc').strip()
else:
logger.error("Could not parse [%s]" % restOfLine)
return
else: # missing location, assume at least two space seperate name and type
result = NAME_TYPE_REGEX.search(restOfLine)
if result:
fName = result.group('Name').strip()
fType = result.group('Type').strip()
else:
logger.warn("Guessing Name: %s at line [%s]" % (restOfLine.strip(), line))
stripedType = ""
if fType:
stripedType = self.__stripFieldAttributes__(fType)
if stripedType:
self.__createFieldByType__(fieldNo, stripedType, fName, fLocation, line, Global, CrossReference)
else:
self._field = FileManFieldFactory.createField(fieldNo, fName, FileManField.FIELD_TYPE_NONE, fLocation)
self._curFile.addFileManField(self._field)
if stripedType:
self.__parseFieldAttributes__(fType)
def onSectionEnd(self, line, section, Global, CrossReference):
if not self._lines:
pass
#elif self._isSubFilePointer and self._pointedToSubFile:
# self.__parsingSubFileDescription__()
elif self._field.isVariablePointerType():
self.__parsingVariablePointer__(Global, CrossReference)
elif self._field.isSetType():
self.__parsingSetTypeDetails__(Global)
# this is to parse the field details part
self.__parseFieldDetails__()
# this is to find out how many subfileds in the schema file
#self.__findTotalSubFileds__()
self.__resetVar__()
def parseLine(self, line, Global, CrossReference):
if not self._lines:
self._lines=[]
self._lines.append(line)
def __parseFieldDetails__(self):
if not self._lines:
return
curCaption = None
curValues = None
for line in self._lines:
found = False
for caption in self.FileFieldCaptionList:
result = re.search(" +%s ?(?P<Value>.*)" % caption, line)
if result:
if curCaption:
self._field.addProp(curCaption, curValues)
curCaption = caption
curValues = []
value = result.group('Value')
if value:
curValues.append(value.strip())
else:
curValues.append("")
found = True
break
if not found and curCaption:
if not curValues: curValues = []
curValues.append(line.strip())
if curCaption:
self._field.addProp(curCaption, curValues)
def __findTotalSubFileds__(self):
if not self._lines:
pass
indentValue = self.__getDefaultIndentLevel__(self._curFile, self.DEFAULT_NAME_INDENT)
for line in self._lines:
result = re.search("^ {%d,%d}(?P<Name>[A-Z][^:]+):" % (self.DEFAULT_NAME_INDENT, indentValue), line)
if result:
name = result.group('Name')
if name.startswith("SCREEN ON FILE "):
name = "SCREEN ON FILE"
def __getDefaultIndentLevel__(self, pointedToSubFile, startIndent):
retValue = startIndent
startFile = pointedToSubFile
while not startFile.isRootFile():
startFile = startFile.getParentFile()
retValue += 2
return retValue
def __parsingSubFileDescription__(self):
description = None
index = 0
desPos = -1
indentValue = self.__getDefaultIndentLevel__(self._pointedToSubFile,
self.DEFAULT_VALUE_INDENT)
for index in range(len(self._lines)):
if desPos == -1:
desPos = self._lines[index].find("DESCRIPTION:")
else:
if re.search("^ {%d,%d}[^ ]" % (self.DEFAULT_VALUE_INDENT, indentValue), self._lines[index]):
if not description: description = []
description.append(self._lines[index].strip())
else:
break
self._pointedToSubFile.setDescription(description)
def __parsingSetTypeDetails__(self, Global):
index, detailList, found = 0, None, False
indentValue = self.__getDefaultIndentLevel__(self._curFile,
self.DEFAULT_VALUE_INDENT)
for index in range(len(self._lines)):
if not found:
result = re.search("^ {%d,%d}(?P<Detail>[^ ]+.*)" % (self.DEFAULT_VALUE_INDENT, indentValue),
self._lines[index])
if result:
if not detailList: detailList = []
detailList.append(result.group('Detail').strip())
found = True
continue
else:
result = re.search("^ {%d,%d}(?P<Detail>[^ ]+.*)" % (self.DEFAULT_VALUE_INDENT, indentValue),
self._lines[index])
if result:
detailList.append(result.group('Detail').strip())
else:
break
self._field.setSetMembers(detailList)
def __parsingVariablePointer__(self, Global, CrossReference):
index, fileList, found = 0, None, False
indentValue = self.__getDefaultIndentLevel__(self._curFile,
self.DEFAULT_NAME_INDENT)
for index in range(len(self._lines)):
if not found:
if re.search("^ {%d,%d}FILE ORDER PREFIX LAYGO MESSAGE$" % (self.DEFAULT_NAME_INDENT, indentValue),
self._lines[index]):
found = True
continue
else:
if re.search("^ {%d,}$" % indentValue, self._lines[index]):
break
else:
result = FILE_REGEX.search(self._lines[index])
if result:
filePointedTo = CrossReference.getGlobalByFileNo(result.group('File'))
if not filePointedTo:
# log an warning for now, will handle this case later
logger.warning("INVALID File! File is %s, Global is %s" %
(result.group('File'), Global))
continue
if not fileList: fileList = []
fileList.append(filePointedTo)
self._field.setPointedToFiles(fileList)
def __createFieldByType__(self, fieldNo, fType, fName, fLocation, line, Global, CrossReference):
result = UNDEFINED_POINTER.search(fType)
if result:
self._field = FileManFieldFactory.createField(fieldNo, fName,
FileManField.FIELD_TYPE_FILE_POINTER, fLocation)
return
result = POINTER_TO_REGEX.search(fType)
if result:
fileNo = result.group('File')
filePointedTo = CrossReference.getGlobalByFileNo(fileNo)
self._field = FileManFieldFactory.createField(fieldNo, fName,
FileManField.FIELD_TYPE_FILE_POINTER,
fLocation)
if not filePointedTo:
logger.warning("Could not find file pointed to [%s], [%s], line:[%s]" %
(fileNo, self._curFile, line))
else:
self._field.setPointedToFile(filePointedTo)
return
# deal with file pointer to subFiles
result = SUBFILE_REGEX.search(fType)
if result:
# create a field for sub file type
self._field = FileManFieldFactory.createField(fieldNo, fName,
FileManField.FIELD_TYPE_SUBFILE_POINTER,
fLocation)
fileNo = result.group('File')
subFile = Global.getSubFileByFileNo(fileNo)
if not subFile: # this is a new subfile
subFile = FileManFile(fileNo, fName, self._curFile)
self._curFile.addFileManSubFile(subFile)
if self._isSubFile:
Global.addFileManSubFile(subFile)
self._field.setPointedToSubFile(subFile)
CrossReference.addFileManSubFile(subFile)
return
for (key, value) in iteritems(self.StringTypeMappingDict):
if fType.startswith(key):
self._field = FileManFieldFactory.createField(fieldNo, fName, value, fLocation)
break
if not self._field:
# double check the loc and type
if line.find(fType) > self.MAXIMIUM_TYPE_START_INDEX:
fType = line[self.MAXIMIUM_TYPE_START_INDEX:]
if fLocation:
fLocation = line[line.find(fLocation):self.MAXIMIUM_TYPE_START_INDEX]
self.__createFieldByType__(fieldNo, fType, fName, fLocation, line, Global, CrossReference)
assert self._field, "Could not find the right type for %s, %s, %s, %s, %s" % (fType, fLocation, fieldNo, line, self._curFile.getFileNo())
def __stripFieldAttributes__(self, fType):
outType = fType
for nameAttr in self.FieldAttributesInfoList:
if outType.find(nameAttr[0]) != -1:
outType = outType.replace(nameAttr[0], "")
return outType.strip()
def __parseFieldAttributes__(self, fType):
for nameAttr in self.FieldAttributesInfoList:
if fType.find(nameAttr[0]) != -1:
fType = fType.replace(nameAttr[0], "")
self._field.__setattr__(nameAttr[1], True)
fType.strip()
self._field.setTypeName(fType)
def __resetVar__(self):
self._lines = None
self._isSubFile = False
self._curFile = None
self._field = None
#===============================================================================
# A class to parse Pointed T By section in Data Dictionary schema log file output
#===============================================================================
class PointedToBySectionParser(IDDSectionParser):
POINTED_TO_BY_VALUE_INDEX = 15
def __init__(self):
self._global = None
self._section = IDataDictionaryListFileLogParser.POINTED_TO_BY_SECTION
def onSectionStart(self, line, section, Global, CrossReference):
assert self._section == section
self._global = Global
self.parseLine(line, Global, CrossReference)
def onSectionEnd(self, line, section, Global, CrossReference):
assert self._section == section
self._global = None
def parseLine(self, line, Global, CrossReference):
assert self._global
strippedLine = line.rstrip(" ")
if not strippedLine:
return
value = strippedLine[self.POINTED_TO_BY_VALUE_INDEX:]
result = POINTED_TO_BY_VALUE_REGEX.search(value)
if result:
fileManNo = result.group("FileNo")
fieldNo = result.group('fieldNo')
subFileNo = result.group('subFieldNo')
pointedByGlobal = CrossReference.getGlobalByFileNo(fileManNo)
if pointedByGlobal:
self._global.addPointedToByFile(pointedByGlobal, fieldNo, subFileNo)
else:
logger.warning("Could not find global based on %s, %s" %
(fileManNo, result.group("Name")))
else:
logger.warning("Could not parse pointer reference [%s] in file [%s]" %
(line, self._global.getFileNo()))
class IDataDictionaryListFileLogParser(object):
# Enum for section value
DESCRIPTION_SECTION = 1
COMPILED_CROSS_REFERENCE_ROUTINE_SECTION = 2
FILE_SCREEN_SECTION = 3
SPECIAL_LOOKUP_ROUTINE_SECTION = 4
POST_SELECTION_ACTION_SECTION = 5
DD_ACCESS_SECTION = 6
RD_ACCESS_SECTION = 7
WR_ACCESS_SECTION = 8
DEL_ACCESS_SECTION = 9
LAYGO_ACCESS_SECTION = 10
AUDIT_ACCESS_SECTION = 11
IDENTIFIED_BY_SECTION = 12
POINTED_TO_BY_SECTION = 13
A_FIELD_IS_SECTION = 14
TRIGGERED_BY_SECTION = 15
CROSS_SECTION = 16
REFERENCED_BY_SECTION = 17
INDEXED_BY_SECTION = 18
PRIMARY_KEY_SECTION = 19
FILEMAN_FIELD_SECTION = 20
FILES_POINTED_TO_SECTION = 21
FILE_RECORD_INDEXED_SECTION = 22
SUBFILE_RECORD_INDEXED_SECTION = 23
INPUT_TEMPLATE_SECTION = 24
PRINT_TEMPLATE_SECTION = 25
SORT_TEMPLATE_SECTION = 26
FORM_BLOCKS_SECTION = 27
#===============================================================================
# A class to parse Data Dictionary log file output and generate Global/Package dependencies
#===============================================================================
class DataDictionaryListFileLogParser(IDataDictionaryListFileLogParser):
# this is the global member
DESCRIPTION_START = re.compile(r"^-{254,254}$")
COMPILED_CROSS_REFERENCE_ROUTINE_START = re.compile("^COMPILED CROSS-REFERENCE ROUTINE:")
FILE_SCREEN_START = re.compile("^FILE SCREEN \(SCR-node\) :")
SPECIAL_LOOKUP_ROUTINE_START = re.compile("^SPECIAL LOOKUP ROUTINE :")
POST_SELECTION_ACTION_START = re.compile("^POST-SELECTION ACTION +:")
DD_ACCESS_START = re.compile("^ +DD ACCESS:")
RD_ACCESS_START = re.compile("^ +RD ACCESS:")
WR_ACCESS_START = re.compile("^ +WR ACCESS:")
DEL_ACCESS_START = re.compile("^ +DEL ACCESS:")
LAYGO_ACCESS_START = re.compile("^ +LAYGO ACCESS:")
AUDIT_ACCESS_START = re.compile("^ +AUDIT ACCESS:")
IDENTIFIED_BY_START = re.compile("^IDENTIFIED BY:")
POINTED_TO_BY_START = re.compile("^POINTED TO BY: ")
A_FIELD_IS_START = re.compile("^A FIELD IS$")
TRIGGERED_BY_START = re.compile("^TRIGGERED BY :")
CROSS_START = re.compile("^CROSS$")
REFERENCED_BY_START = re.compile("^REFERENCED BY:")
INDEXED_BY_START = re.compile("^INDEXED BY: ")
PRIMARY_KEY_START = re.compile("^PRIMARY KEY: ")
FILEMAN_FIELD_START = re.compile("(?P<FileNo>^[.0-9]+),(?P<FieldNo>[.0-9]+)")
FILES_POINTED_TO_START = re.compile("^ +FILES POINTED TO +FIELDS$")
FILE_RECORD_INDEXED_START = re.compile("^File #[.0-9]+$")
SUBFILE_RECORD_INDEXED_START = re.compile("^Subfile #[.0-9]+$")
INPUT_TEMPLATE_START = re.compile("^INPUT TEMPLATE\(S\):$")
PRINT_TEMPLATE_START = re.compile("^PRINT TEMPLATE\(S\):$")
SORT_TEMPLATE_START = re.compile("^SORT TEMPLATE\(S\):$")
FORM_BLOCKS_START = re.compile("^FORM\(S\)/BLOCK\(S\):$")
def __init__(self, CrossReference):
assert CrossReference
self._crossRef = CrossReference
self._curSect = None
self._curParser = None
self._curGlobal = None
self._sectionHeaderRegEx = dict()
self._sectionParserDict = dict()
self.__initSectionHeaderRegEx__()
self.__initSectionParser__()
def __initSectionHeaderRegEx__(self):
self._sectionHeaderRegEx[self.DESCRIPTION_START] = self.DESCRIPTION_SECTION
self._sectionHeaderRegEx[self.COMPILED_CROSS_REFERENCE_ROUTINE_START] = self.COMPILED_CROSS_REFERENCE_ROUTINE_SECTION
self._sectionHeaderRegEx[self.FILE_SCREEN_START] = self.FILE_SCREEN_SECTION
self._sectionHeaderRegEx[self.SPECIAL_LOOKUP_ROUTINE_START] = self.SPECIAL_LOOKUP_ROUTINE_SECTION
self._sectionHeaderRegEx[self.POST_SELECTION_ACTION_START] = self.POST_SELECTION_ACTION_SECTION
self._sectionHeaderRegEx[self.DD_ACCESS_START] = self.DD_ACCESS_SECTION
self._sectionHeaderRegEx[self.RD_ACCESS_START] = self.RD_ACCESS_SECTION
self._sectionHeaderRegEx[self.WR_ACCESS_START] = self.WR_ACCESS_SECTION
self._sectionHeaderRegEx[self.DEL_ACCESS_START] = self.DEL_ACCESS_SECTION
self._sectionHeaderRegEx[self.LAYGO_ACCESS_START] = self.LAYGO_ACCESS_SECTION
self._sectionHeaderRegEx[self.AUDIT_ACCESS_START] = self.AUDIT_ACCESS_SECTION
self._sectionHeaderRegEx[self.IDENTIFIED_BY_START] = self.IDENTIFIED_BY_SECTION
self._sectionHeaderRegEx[self.POINTED_TO_BY_START] = self.POINTED_TO_BY_SECTION
self._sectionHeaderRegEx[self.A_FIELD_IS_START] = self.A_FIELD_IS_SECTION
self._sectionHeaderRegEx[self.TRIGGERED_BY_START] = self.TRIGGERED_BY_SECTION
self._sectionHeaderRegEx[self.CROSS_START] = self.CROSS_SECTION
self._sectionHeaderRegEx[self.REFERENCED_BY_START] = self.REFERENCED_BY_SECTION
self._sectionHeaderRegEx[self.INDEXED_BY_START] = self.INDEXED_BY_SECTION
self._sectionHeaderRegEx[self.PRIMARY_KEY_START] = self.PRIMARY_KEY_SECTION
# START OF FILEMAN FIELD SECTION
self._sectionHeaderRegEx[self.FILEMAN_FIELD_START] = self.FILEMAN_FIELD_SECTION
self._sectionHeaderRegEx[self.FILES_POINTED_TO_START] = self.FILES_POINTED_TO_SECTION
self._sectionHeaderRegEx[self.FILE_RECORD_INDEXED_START] = self.FILE_RECORD_INDEXED_SECTION
self._sectionHeaderRegEx[self.SUBFILE_RECORD_INDEXED_START] = self.SUBFILE_RECORD_INDEXED_SECTION
self._sectionHeaderRegEx[self.INPUT_TEMPLATE_START] = self.INPUT_TEMPLATE_SECTION
self._sectionHeaderRegEx[self.PRINT_TEMPLATE_START] = self.PRINT_TEMPLATE_SECTION
self._sectionHeaderRegEx[self.SORT_TEMPLATE_START] = self.SORT_TEMPLATE_SECTION
self._sectionHeaderRegEx[self.FORM_BLOCKS_START] = self.FORM_BLOCKS_SECTION
def __initSectionParser__(self):
self._sectionParserDict[self.POINTED_TO_BY_SECTION] = PointedToBySectionParser()
self._sectionParserDict[self.DESCRIPTION_SECTION] = DescriptionSectionParser()
self._sectionParserDict[self.FILEMAN_FIELD_SECTION] = FileManFieldSectionParser()
def getCrossReference(self):
return self._crossRef
#===========================================================================
# pass the log file and get all routines ready
#===========================================================================
def parseAllDataDictionaryListLog(self, dirName, pattern):
dataDictionaryLogFiles = os.path.join(dirName, pattern)
allFiles = glob.glob(dataDictionaryLogFiles)
for logFileName in allFiles:
logger.info("Start parsing log file [%s]" % logFileName)
self.__parseDataDictionaryLogFile__(logFileName)
def __parseDataDictionaryLogFile__(self, logFileName):
if not os.path.exists(logFileName):
logger.error("File: %s does not exist" % logFileName)
return
logFileHandle = codecs.open(logFileName, 'r', encoding='ISO-8859-1', errors='ignore')
baseName = os.path.basename(logFileName)
fileNo = baseName[:-len(".schema")]
self._curGlobal = self._crossRef.getGlobalByFileNo(fileNo)
if not self._curGlobal:
logger.warning("Could not find global based on file# %s" % fileNo)
return
for line in logFileHandle:
# handle the empty line
line = line.rstrip("\r\n")
if not line: # ignore the empty line
continue
section = self.__isSectionHeader__(line)
if section:
if self._curSect and self._curParser:
self._curParser.onSectionEnd(line, self._curSect, self._curGlobal, self._crossRef)
self._curSect = section
self._curParser = self._sectionParserDict.get(self._curSect)
if self._curParser:
self._curParser.onSectionStart(line, self._curSect, self._curGlobal, self._crossRef)
elif self._curSect and self._curParser:
self._curParser.parseLine(line, self._curGlobal, self._crossRef)
def __isSectionHeader__(self, curLine):
for (regex, section) in iteritems(self._sectionHeaderRegEx):
if regex.search(curLine):
return section
return None
def parseDataDictionaryLogFile(crossRef, fileSchemaDir):
logger.progress("Parse data dictionary logfile")
DDFileParser = DataDictionaryListFileLogParser(crossRef)
DDFileParser.parseAllDataDictionaryListLog(fileSchemaDir, "*.schema")
DDFileParser.parseAllDataDictionaryListLog(fileSchemaDir, ".*.schema")
return DDFileParser
def createDataDictionaryAugumentParser():
parser = argparse.ArgumentParser(add_help=False) # no help page
argGroup = parser.add_argument_group("Data Dictionary Parser Auguments")
argGroup.add_argument('-fs', '--fileSchemaDir', required=True,
help='VistA File Man Schema log Directory')
return parser
| StarcoderdataPython |
1687220 | # -*- coding: utf-8 -*-
# Copyright 2015-2016 Yelp
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Merging errors, picking the best one, and displaying it."""
import json
from .ids import _make_time_sort_key
def _pick_error(log_interpretation):
"""Pick most recent error from a dictionary possibly containing
step, history, and task interpretations. Returns None if there
are no errors.
"""
def yield_errors():
for log_type in ('step', 'history', 'task'):
errors = log_interpretation.get(log_type, {}).get('errors')
for error in errors or ():
yield error
# looks like this is only available from history logs
container_to_attempt_id = log_interpretation.get(
'history', {}).get('container_to_attempt_id')
errors = _merge_and_sort_errors(yield_errors(), container_to_attempt_id)
if errors:
return errors[0]
else:
return None
def _merge_and_sort_errors(errors, container_to_attempt_id=None):
"""Merge errors from one or more lists of errors and then return
them, sorted by recency.
Optionally pass in *container_to_attempt_id
We allow None in place of an error list.
"""
sort_key = _make_time_sort_key(container_to_attempt_id)
key_to_error = {}
for error in errors:
key = sort_key(error)
key_to_error.setdefault(key, {})
key_to_error[key].update(error)
return [error for key, error in
sorted(key_to_error.items(), reverse=True)]
def _format_error(error):
"""Return string to log/print explaining the given error."""
# it's just sad if we error while trying to explain an error
try:
return _format_error_helper(error)
except:
return json.dumps(error, indent=2, sort_keys=True)
def _format_error_helper(error):
"""Return string to log/print explaining the given error."""
result = ''
hadoop_error = error.get('hadoop_error')
if hadoop_error:
result += hadoop_error.get('message', '')
if hadoop_error.get('path'):
result += '\n\n(from %s)' % _describe_source(hadoop_error)
# for practical purposes, there's always a hadoop error with a message,
# so don't worry too much about spacing.
task_error = error.get('task_error')
if task_error:
if hadoop_error:
result += '\n\ncaused by:\n\n%s' % (task_error.get('message', ''))
else:
result += task_error.get('message', '')
if task_error.get('path'):
result += '\n\n(from %s)' % _describe_source(task_error)
split = error.get('split')
if split and split.get('path'):
result += '\n\nwhile reading input from %s' % _describe_source(split)
return result
def _describe_source(d):
"""return either '<path>' or 'line N of <path>' or 'lines M-N of <path>'.
"""
path = d.get('path') or ''
if 'num_lines' in d and 'start_line' in d:
if d['num_lines'] == 1:
return 'line %d of %s' % (d['start_line'] + 1, path)
else:
return 'lines %d-%d of %s' % (
d['start_line'] + 1, d['start_line'] + d['num_lines'], path)
else:
return path
| StarcoderdataPython |
119861 | # Copyright (c) 2017, MD2K Center of Excellence
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import csv
import os
import json
import uuid
import argparse
import pytz
import pickle
from datetime import datetime
from cerebralcortex.core.datatypes.datastream import DataPoint
from cerebralcortex.core.datatypes.datastream import DataStream
from cerebralcortex.cerebralcortex import CerebralCortex
import cerebralcortex.cerebralcortex
CC_CONFIG_PATH = '/home/vagrant/CerebralCortex-DockerCompose/cc_config_file/cc_vagrant_configuration.yml'
DATA_DIR='/home/vagrant/mperf_data'
UUID_MAPPING = '/home/vagrant/mperf_ids.txt'
parser = argparse.ArgumentParser(description='CerebralCortex '
'Qualtrics data importer')
parser.add_argument("-c", "--cc-config", help="Path to file containing the "
"CerebralCortex configuration", required=True)
parser.add_argument("-d", "--data-dir", help="Path to dir containing the "
"qualtrics data" , required=True)
parser.add_argument("-u", "--user-mappings", help="Path to the file containing "
" the user id to uuid mappings", required=True)
args = vars(parser.parse_args())
if args['cc_config']:
CC_CONFIG_PATH = args['cc_config']
if args['data_dir']:
DATA_DIR = args['data_dir']
if args['user_mappings']:
UUID_MAPPING = args['user_mappings']
files_to_process=[]
FILE_NAME = 'Daily.tob.quantity.d.csv'
FILE_METADATA='metadata/daily.tob.d.non_mitre.json'
files_to_process.append((FILE_NAME,FILE_METADATA))
FILE_NAME = None
FILE_METADATA = None
FILE_NAME = 'Daily.alc.quantity.d.csv'
FILE_METADATA='metadata/daily.alc.d.non_mitre.json'
files_to_process.append((FILE_NAME,FILE_METADATA))
FILE_NAME = None
FILE_METADATA = None
FILE_NAME = 'Daily.sleep.d.csv'
FILE_METADATA='metadata/daily.sleep.d.non_mitre.json'
files_to_process.append((FILE_NAME,FILE_METADATA))
FILE_NAME = None
FILE_METADATA = None
FILE_NAME = 'Daily.total.pa.d.csv'
FILE_METADATA='metadata/daily.total.pa.d.non_mitre.json'
files_to_process.append((FILE_NAME,FILE_METADATA))
FILE_NAME = None
FILE_METADATA = None
'''
# Below are the list of filenames
FILE_NAME = 'Daily.tob.quantity.d.mitre.csv'
FILE_METADATA='metadata/daily.tob.d.json'
files_to_process.append((FILE_NAME,FILE_METADATA))
FILE_NAME = None
FILE_METADATA = None
FILE_NAME = 'Daily.stress.d.csv'
FILE_METADATA='metadata/daily.stress.d.json'
files_to_process.append((FILE_NAME,FILE_METADATA))
FILE_NAME = None
FILE_METADATA = None
FILE_NAME = 'Daily.anxiety.d.csv'
FILE_METADATA='metadata/daily.anxiety.d.json'
files_to_process.append((FILE_NAME,FILE_METADATA))
FILE_NAME = None
FILE_METADATA = None
FILE_NAME = 'Daily.alc.quantity.d.mitre.csv'
FILE_METADATA='metadata/daily.alc.d.json'
files_to_process.append((FILE_NAME,FILE_METADATA))
FILE_NAME = None
FILE_METADATA = None
IRB_D = 'Daily.irb.d.csv'
IRB_D_METADATA='metadata/daily.irb.d.json'
files_to_process.append((IRB_D,IRB_D_METADATA))
ITP_D = 'Daily.itp.d.csv'
ITP_D_METADATA='metadata/daily.itp.d.json'
files_to_process.append((ITP_D,ITP_D_METADATA))
FILE_NAME = 'Daily.pos.affect.d.csv'
FILE_METADATA='metadata/daily.pos.affect.d.json'
files_to_process.append((FILE_NAME,FILE_METADATA))
FILE_NAME = None
FILE_METADATA = None
FILE_NAME = 'Daily.neg.affect.d.csv'
FILE_METADATA='metadata/daily.neg.affect.d.json'
files_to_process.append((FILE_NAME,FILE_METADATA))
FILE_NAME = None
FILE_METADATA = None
FILE_NAME = 'Daily.ocb.d.csv'
FILE_METADATA='metadata/daily.ocb.d.json'
files_to_process.append((FILE_NAME,FILE_METADATA))
FILE_NAME = None
FILE_METADATA = None
FILE_NAME = 'Daily.cwb.d.csv'
FILE_METADATA='metadata/daily.cwb.d.json'
files_to_process.append((FILE_NAME,FILE_METADATA))
FILE_NAME = None
FILE_METADATA = None
FILE_NAME = 'Daily.sleep.d.mitre.csv'
FILE_METADATA='metadata/daily.sleep.d.json'
files_to_process.append((FILE_NAME,FILE_METADATA))
FILE_NAME = None
FILE_METADATA = None
FILE_NAME = 'Daily.total.pa.d.mitre.csv'
FILE_METADATA='metadata/daily.total.pa.d.json'
files_to_process.append((FILE_NAME,FILE_METADATA))
FILE_NAME = None
FILE_METADATA = None
FILE_NAME = 'Daily.neuroticism.d.csv'
FILE_METADATA='metadata/daily.neuroticism.d.json'
files_to_process.append((FILE_NAME,FILE_METADATA))
FILE_NAME = None
FILE_METADATA = None
FILE_NAME = 'Daily.conscientiousness.d.csv'
FILE_METADATA='metadata/daily.conscientiousness.d.json'
files_to_process.append((FILE_NAME,FILE_METADATA))
FILE_NAME = None
FILE_METADATA = None
FILE_NAME = 'Daily.extraversion.d.csv'
FILE_METADATA='metadata/daily.extraversion.d.json'
files_to_process.append((FILE_NAME,FILE_METADATA))
FILE_NAME = None
FILE_METADATA = None
FILE_NAME = 'Daily.agreeableness.d.csv'
FILE_METADATA='metadata/daily.agreeableness.d.json'
files_to_process.append((FILE_NAME,FILE_METADATA))
FILE_NAME = None
FILE_METADATA = None
FILE_NAME = 'Daily.openness.d.csv'
FILE_METADATA='metadata/daily.openness.d.json'
files_to_process.append((FILE_NAME,FILE_METADATA))
FILE_NAME = None
FILE_METADATA = None
# End list of file names
'''
# Map that contains the user
user_id_mappings={}
# Timezone in which all times are recorded
centraltz=pytz.timezone('US/Central')
easterntz=pytz.timezone('US/Eastern')
pacifictz=pytz.timezone('US/Pacific')
# CC intialization
CC = CerebralCortex(CC_CONFIG_PATH)
def parse_userid_mappings():
f = open_data_file(UUID_MAPPING)
if f is None:return
for line in f:
splits = line.split()
uuid = splits[1]
username = splits[0]
username_splts = username.split('_')
username = username_splts[1]
user_id_mappings[username] = uuid
def open_data_file(filename):
fp = os.path.join(DATA_DIR,filename)
if os.path.exists(fp):
return open(fp, newline='')
else:
print('File not found %s' % fp)
def process_feature(file_path, metadata_path):
f = open_data_file(file_path)
mf = open(metadata_path)
if f is None:return
reader = csv.reader(f)
count = 0
feature_data = {}
start_column_number = 3
for row in reader:
if count == 0:
header_row = row
count +=1
continue
# handling corrupt data, some user id's are NA
if row[0] not in user_id_mappings:continue
user_id = user_id_mappings[row[0]]
ems_start_time_str = row[1] + ' 12:00:00'
ems_start_time = datetime.strptime(ems_start_time_str, '%Y%m%d %H:%M:%S')
qualtrics_start_time = datetime.strptime(row[3], '%m/%d/%Y %H:%M')
if len(user_id) == 4 and int(user_id[0]) == 5: # all 5xxx users are incentral
ems_start_time = centraltz.localize(ems_start_time)
qualtrics_start_time = centraltz.localize(qualtrics_start_time)
elif len(user_id) == 4 and int(user_id[0]) == 1: # all 1xxx users are east
ems_start_time = easterntz.localize(ems_start_time)
qualtrics_start_time = easterntz.localize(qualtrics_start_time)
elif len(user_id) == 4 and int(user_id[0]) == 9: # all 9xxx users are west
ems_start_time = pacifictz.localize(ems_start_time)
qualtrics_start_time = pacifictz.localize(qualtrics_start_time)
else:
ems_start_time = centraltz.localize(ems_start_time)
qualtrics_start_time = centraltz.localize(qualtrics_start_time)
# handling the different format of the IGTB file
if 'IGTB' not in file_path:
end_time = datetime.strptime(row[4], '%m/%d/%Y %H:%M')
else:
end_time = datetime(year=start_time.year, month=start_time.month,
day=start_time.day, hour=start_time.hour,
minute=start_time.minute)
start_column_number = 2
if 'IGTB' not in file_path:
end_time = centraltz.localize(end_time)
utc_offset = ems_start_time.utcoffset().total_seconds() * 1000
# -1000 - DataPoint expects offset to be in milliseconds and negative is
# to account for being west of UTC
sample = row[6:]
values = []
for val in sample:
if 'yes' in val or 'no' in val:# Check for Daily.tob.d.mitre.csv
continue
if 'NA' in val:
values.append(float('Nan'))
else:
values.append(float(val))
ems_dp = DataPoint(start_time=ems_start_time, end_time=end_time,
offset=utc_offset, sample=values)
q_dp = DataPoint(start_time=qualtrics_start_time, end_time=end_time,
offset=utc_offset, sample=values)
if user_id not in feature_data:
feature_data[user_id] = []
feature_data[user_id].append((q_dp, ems_dp))
metadata = mf.read()
metadata = json.loads(metadata)
metadata_name = metadata['name']
for user in feature_data:
output_stream_id = str(uuid.uuid3(uuid.NAMESPACE_DNS, str(
metadata_name + user + file_path)))
q_dps = [dp[0] for dp in feature_data[user]]
q_ds = DataStream(identifier=output_stream_id, owner=user,
name=metadata_name,
data_descriptor= metadata['data_descriptor'],
execution_context=metadata['execution_context'],
annotations= metadata['annotations'],
stream_type=1,
data=q_dps)
ems_stream_name = \
metadata_name.replace('data_qualtrics','data_qualtrics_ems')
output_stream_id = str(uuid.uuid3(uuid.NAMESPACE_DNS, str(
ems_stream_name + user + file_path)))
ems_dps = [dp[1] for dp in feature_data[user]]
ems_ds = DataStream(identifier=output_stream_id, owner=user,
name=ems_stream_name,
data_descriptor= metadata['data_descriptor'],
execution_context=metadata['execution_context'],
annotations= metadata['annotations'],
stream_type=1,
data=ems_dps)
try:
CC.save_stream(q_ds, localtime=True)
except Exception as e:
print(e)
try:
CC.save_stream(ems_ds, localtime=True)
except Exception as e:
print(e)
f.close()
mf.close()
def main():
parse_userid_mappings()
# processing ALC_D
# ID","StartDate","EndDate","RecordedDate","SurveyType","alc_status","alc.quantity.d"
for feature in files_to_process:
print("PROCESSING %s %s"%(feature[0], feature[1]))
process_feature(feature[0], feature[1])
if __name__ == '__main__':
main()
| StarcoderdataPython |
3211085 | <reponame>Howardhuang98/Blog
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : 6051.py
@Contact : <EMAIL>
@Modify Time : 2022/4/30 22:31
------------
"""
from typing import List
class Solution:
def countUnguarded(self, m: int, n: int, guards: List[List[int]], walls: List[List[int]]) -> int:
map = [[1 for i in range(n)] for j in range(m)]
for w in walls:
x, y = w
map[x][y] = 'w'
for g in guards:
x, y = g
look_x, look_y = x, y
while 0 <= look_x < m and map[look_x][look_y] != 'w':
map[look_x][look_y] = 0
look_x += 1
look_x, look_y = x, y
while 0 <= look_x < m and map[look_x][look_y] != 'w':
map[look_x][look_y] = 0
look_x -= 1
look_x, look_y = x, y
while 0 <= look_y < n and map[look_x][look_y] != 'w':
map[look_x][look_y] = 0
look_y += 1
look_x, look_y = x, y
while 0 <= look_y < n and map[look_x][look_y] != 'w':
map[look_x][look_y] = 0
look_y -= 1
for w in walls:
x,y = w
map[x][y]=0
for i in range(m):
print(map[i])
return sum([sum(i) for i in map])
if __name__ == '__main__':
s = Solution()
print(s.countUnguarded(m=4, n=6, guards=[[0, 0], [1, 1], [2, 3]], walls=[[0, 1], [2, 2], [1, 4]]))
| StarcoderdataPython |
1622380 | from wcloud import app, db
if __name__ == '__main__':
app.run(debug=app.config['DEBUG'])
| StarcoderdataPython |
5481 | import os
class StressedNetConfig:
def __init__(self,
synaptic_environmental_constraint=0.8,
group_environmental_constraint=0.6,
stress_factor=0.8,
save_folder=os.path.expanduser("~/.nervous/models/")):
self._synaptic_environmental_constraint = synaptic_environmental_constraint
self._group_environmental_constraint = group_environmental_constraint
self._stress_factor = stress_factor
self._save_folder = save_folder
self._sanitize()
def _sanitize(self):
if 1. < self._group_environmental_constraint <= 0.:
raise ValueError("Group environmental constraint has to be in the range [0. - 1.)")
if 1. < self._synaptic_environmental_constraint <= 0.:
raise ValueError("Synaptic environmental constraint has to be in the range [0. - 1.)")
if 1. < self._stress_factor <= 0.:
raise ValueError("Stress factor has to be in the range [0. - 1.)")
if not os.path.exists(self._save_folder):
os.makedirs(self._save_folder)
@property
def synaptic_environmental_constraint(self):
return self._synaptic_environmental_constraint
@synaptic_environmental_constraint.setter
def synaptic_environmental_constraint(self, value):
self._synaptic_environmental_constraint = value
self._sanitize()
@property
def group_environmental_constraint(self):
return self._group_environmental_constraint
@group_environmental_constraint.setter
def group_environmental_constraint(self, value):
self._group_environmental_constraint = value
self._sanitize()
@property
def stress_factor(self):
return self._stress_factor
@stress_factor.setter
def stress_factor(self, value):
self._stress_factor = value
self._sanitize()
@property
def save_folder(self):
return self._save_folder
@save_folder.setter
def save_folder(self, value):
self._save_folder = value
self._sanitize()
def __getitem__(self, item):
if item == "self":
raise ValueError("Hahaha")
return self.__dict__[item]
| StarcoderdataPython |
1762952 | <filename>RocketStaging.py
from scipy import *
from RocketParameters import *
from Reference import *
import sys
################################################################################
# ROCKET STAGING
################################################################################
def rocket_mass(g0, f_inert, dV, isp, m_pay): #Calculates the total rocket mass
stage2 = initial_mass(m_pay, f_inert[1], dV[1], g0, isp[1])
rocketMass = initial_mass(stage2, f_inert[0], dV[0], g0, isp[0])
return(rocketMass)
def split_deltaV(g0, f_inert, dV_needed, isp): # calculates the split deltaV
massList = [] # Will hold the list of masses
splitList = []
x = 0.2
while x <= 0.8:
v0 = dV_needed * x
v1 = dV_needed * (1 - x)
altDV = [v0, v1]
mass = rocket_mass(g0, f_inert, altDV, isp, m_pay)
massList.append(mass)
splitList.append(x)
x += 0.01
minMass = min(massList)
minIndex = massList.index(minMass)
split = splitList[minIndex]
return(split)
print(rocket_mass(g0, f_inert, dV, isp, m_pay))
| StarcoderdataPython |
1716177 | # Test file in Python style
# sync-start:content_after_start __examples__/content_after_start/a.js
# sync-start:content_after_start __examples__/content_after_start/c.js
code = 1
# sync-end:content_after_start | StarcoderdataPython |
3210358 | import configparser
import logging
import os
import time
import json
import requests
import pygame
from logging.handlers import RotatingFileHandler
from datetime import datetime
import sys
config = configparser.ConfigParser()
config.read('config.ini')
auth_key = config['general'].get('auth_key')
device_uid = config['general'].get('device_uid')
force_playback_only = config['general'].getboolean('force_playback_only')
interval = config['device'].getint('interval')
sample_file = config['device'].get('sampleFile')
logging.basicConfig(
handlers=[RotatingFileHandler(filename='liarbird.log', mode='a', maxBytes=10000000, backupCount=10)],
level=10,
format='%(asctime)s %(levelname)-6s %(lineno)d %(name)-6s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
logging.debug(auth_key)
logging.debug(device_uid)
logging.debug(interval)
logging.debug(sample_file)
if __name__ == '__main__':
internet_connected = True
try:
logging.info('testing internet connectivity')
request = requests.get("http://google.com", timeout=5)
except (requests.ConnectionError, requests.Timeout):
internet_connected = False
try:
if internet_connected and not force_playback_only:
logging.info("internet connection found. running in configuration mode")
if not device_uid:
logging.info('no device identifier set - registering device')
response = requests.post("https://us-central1-liarbird-1df1e.cloudfunctions.net/registerDevice", data={ "authKey": auth_key })
if response.status_code != 200:
logging.error(response)
else:
logging.debug(response.text)
json_response = json.loads(response.text)
config.set('general', 'device_uid', json_response['uid'])
device_uid = json_response['uid']
logging.info('updating config.ini')
config.write(open('config.ini', 'w'))
if device_uid:
logging.info('fetching config')
response = requests.post("https://us-central1-liarbird-1df1e.cloudfunctions.net/getConfiguration", data={ "authKey": auth_key, "uid": device_uid })
if response.status_code != 200:
# failed request
logging.error(response)
else:
logging.info('config retrieved from server')
logging.debug(response.text)
response_data = json.loads(response.text)
if 'playbackFrequency' in response_data:
config.set('device', 'interval', response_data['playbackFrequency'])
config.write(open('config.ini', 'w'))
if 'sampleFile' in response_data:
config.set('device', 'sampleFile', response_data['sampleFile'])
config.write(open('config.ini', 'w'))
if 'sampleUri' in response_data:
logging.info('fetching sample')
response = requests.get(response_data["sampleUri"])
config.write(open('config.ini', 'w'))
logging.info('writing sample to disk')
open(response_data["sampleFile"], 'wb').write(response.content)
else:
logging.info("NO internet connection found. running in playback mode")
if not sample_file:
logging.error("missing sample file!")
elif not interval:
logging.error("missing interval!")
else:
logging.info("running as normal")
pygame.mixer.init()
while True:
logging.info("starting playback of sample_file")
pygame.mixer.music.load(sample_file)
pygame.mixer.music.play()
time.sleep(interval * 60)
except (IOError, SystemExit):
logging.error('IOError or SystemExit')
raise
except KeyboardInterrupt:
logging.error('Ctrl+C Interrupt')
print("Crtl+C Pressed. Shutting down.")
| StarcoderdataPython |
144229 | # -*- coding: utf-8 -*-
import pytest
import barbacoa
@pytest.fixture
def hub():
return barbacoa.hub
| StarcoderdataPython |
1707167 | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from mo.front.common.partial_infer.multi_box_prior import multi_box_prior_infer_mxnet
from mo.utils.graph import Node
from mo.utils.ir_reader.extender import Extender
class PriorBox_extender(Extender):
op = 'PriorBox'
@staticmethod
def extend(op: Node):
op['V10_infer'] = True
attrs = ['min_size', 'max_size', 'aspect_ratio', 'variance', 'fixed_ratio', 'fixed_size', 'density']
for attr in attrs:
PriorBox_extender.attr_restore(op, attr)
if op.graph.graph['cmd_params'].framework == 'mxnet':
op['infer'] = multi_box_prior_infer_mxnet
op['stop_attr_upd'] = True
@staticmethod
def attr_restore(node: Node, attribute: str, value=None):
# Function to restore some specific attr for PriorBox & PriorBoxClustered layers
if not node.has_valid(attribute):
node[attribute] = [] if value is None else [value]
if isinstance(node[attribute], str):
node[attribute] = []
else:
Extender.attr_to_list(node, attribute)
| StarcoderdataPython |
3263120 | <reponame>lukaspestalozzi/Master_Semester_Project
import unittest
from tichu.utils import crange
class OtherTest(unittest.TestCase):
def test_init(self):
def trick_ends_iterative(curr, leading, next_):
# assert curr != leading
for k in crange(curr, curr, 4):
if k == leading:
return True
elif k == next_:
return False
# fed
def trick_ends_fast(curr, leading, next_):
return leading == next_ or curr < leading < next_ or next_ < curr < leading or leading < next_ < curr
# fed
for c in range(4):
for n in range(4):
if c == n:
continue
for l in range(4):
if c == l:
continue
it, fast = trick_ends_iterative(c, l, n), trick_ends_fast(c, l, n)
self.assertEqual(it, fast, f"it: {it}, fast: {fast}, curr: {c}, leading: {l}, next: {n}")
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3360619 | <reponame>ace-ecosystem/ace2-modules<filename>tests/core_modules/conftest.py
# vim: ts=4:sw=4:et:cc=120
import pytest
from ace.system.database import DatabaseACESystem
from ace.system.threaded import ThreadedACESystem
class TestSystem(DatabaseACESystem, ThreadedACESystem):
pass
@pytest.fixture
async def system(tmpdir):
system = TestSystem(db_url="sqlite+aiosqlite://", storage_root=str(tmpdir))
await system.initialize()
await system.create_database()
await system.start()
yield system
await system.stop()
@pytest.fixture
async def root(system):
root = system.new_root()
yield root
await root.discard()
| StarcoderdataPython |
173190 | <gh_stars>1-10
'''
Module that contains UI objects such
as text, buttons and levers
'''
import pygame
'''
The interface classes below are general classes for
UI and text. The classes are often used as parent classes for
more specialized UI classes, for example classes with some
aesthethic decorations.
General classes that ends with _active means that the object
will be actively re-rendered before each blit. Tthe purpose of
that are for animations, such as moving text, text that changes
color and such.
'''
class text_float:
'''
Text with floating position, not actively rendered
'''
def __init__(self, text, pos, color, font, font_size):
self.text = text
self.pos = pos
self.color = color
self.font = font
self.font_size = font_size
self.text_type = pygame.font.Font(self.font, self.font_size)
self.text_render = self.text_type.render(self.text, True, self.color)
def draw(self, window):
'''Blits text to window'''
window.blit(
self.text_render,
self.pos
)
def interact_mouse(self, mouse_pos, click):
'''For compitability'''
pass
'''
Variables ending with _r are the actual variables used
in rendering
'''
class text_float_active:
'''
Actively rendered text with floating position
'''
def __init__(self, text, pos, color, font, font_size):
self.text = text
self.font_size = font_size
self.font = font
self.text_r = text
self.pos_r = pos
self.color_r = color
self.font_size_r = self.font_size
self.text_type = pygame.font.Font(self.font, self.font_size)
self.text_render = self.text_type.render(self.text_r, True, self.color_r)
# Updates the object __dict__ with all the local variables in init method
self.__dict__.update(locals())
def draw(self, window):
'''Rerenders text instance and blits to window'''
self.text_type = pygame.font.Font(self.font, self.font_size_r)
self.text_render = self.text_type.render(self.text_r, True, self.color_r)
window.blit(
self.text_render,
self.pos_r
)
def interact_mouse(self):
'''For compitability'''
pass
class text_center(text_float):
'''
Centered text, not actively rendered
'''
def __init__(self, text, pos, color, font, font_size, window_shape):
super().__init__(text, pos, color, font, font_size)
self.pos[0] = (window_shape[0]-self.text_render.get_width())/2
class text_center_active(text_float_active):
'''
Actively rendered centered text
'''
def __init__(self, text, pos, color, font, font_size, window_shape):
super().__init__(text, pos, color, font, font_size)
self.window_shape = window_shape
self.pos[0] = (window_shape[0]-self.text_render.get_width())/2
def draw(self, window):
self.text_type = pygame.font.Font(self.font, self.font_size_r)
self.text_render = self.text_type.render(self.text_r, True, self.color_r)
self.pos_r[0] = (self.window_shape[0]-self.text_render.get_width())/2
window.blit(
self.text_render,
self.pos_r
)
class button_float_active(text_float_active):
'''
Actively rendered floating position button
'''
def __init__(self, text, pos, color, color_mouseover, font, font_size):
super().__init__(text, pos, color, font, font_size)
self.__dict__.update(locals())
self.state = False
def interact_mouse(self, mouse_pos, click):
'''Button mouse interaction'''
self.rect = pygame.Rect(
self.pos_r[0],
self.pos_r[1],
self.text_render.get_width(),
self.text_render.get_height()
)
if(self.rect.collidepoint(mouse_pos)):
self.color_r = self.color_mouseover
if(click):
self.state = True
else:
self.state = False
self.color_r = self.color
class button_center_active(text_center_active):
'''
Actively rendered centered button. pos argument should be pos = [None, y-coordinate]
'''
def __init__(self, text, pos, color, color_mouseover, font, font_size, window_shape):
super().__init__(text, pos, color, font, font_size, window_shape)
self.color_mouseover = color_mouseover
self.state = False
self.rect = pygame.Rect(
self.pos_r[0],
self.pos_r[1],
self.text_render.get_width(),
self.text_render.get_height()
)
# Multiple inheritance is too scary for me
def interact_mouse(self, mouse_pos, click):
'''Button mouse interaction'''
self.rect = pygame.Rect(
self.pos_r[0],
self.pos_r[1],
self.text_render.get_width(),
self.text_render.get_height()
)
if(self.rect.collidepoint(mouse_pos)):
self.color_r = self.color_mouseover
if(click):
self.state = True
else:
self.state = False
self.color_r = self.color
class lever_float_active(button_float_active):
'''
Levers are toggle-able buttons.
'''
def __init__(self, text, pos, color, color_mouseover, color_active, font, font_size, state):
super().__init__(text, pos, color, color_mouseover, font, font_size)
self.state = state
self.color_active = color_active
if(self.state):
self.color_r = color_active
def interact_mouse(self, mouse_pos, click):
'''Button mouse interaction'''
if(self.rect.collidepoint(mouse_pos)):
self.color_r = self.color_mouseover
if(click):
self.state = not self.state
return True
else:
if(self.state):
self.color_r = self.color_active
else:
self.color_r = self.color
return False
class lever_center_active(lever_float_active):
'''Centered lever object, pos argument should be pos=[None, y-coordinate]'''
def __init__(self, text, pos, color, color_mouseover, color_active, font, font_size, window_shape, state):
super().__init__(text, pos, color, color_mouseover, color_active, font, font_size, state)
self.pos[0] = (window_shape[0]-self.text_render.get_width())/2
self.pos_r[0] = self.pos[0]
self.rect = pygame.Rect(
self.pos_r[0],
self.pos_r[1],
self.text_render.get_width(),
self.text_render.get_height()
)
# _change_state objects simply has the name of a state stored as a string
class button_change_state(button_float_active):
'''
A button with floating position that also contains the string name
of a state.
'''
def __init__(self, text, pos, color, color_mouseover, font, font_size, next_state):
super().__init__(text, pos, color, color_mouseover, font, font_size)
self.next_state = next_state
class button_center_change_state(button_center_active):
'''Centered button containing the string name of a state'''
def __init__(self, text, pos, color, color_mouseover, font, font_size, window_shape, next_state):
super().__init__(text, pos, color, color_mouseover, font, font_size, window_shape)
self.next_state = next_state
'''
The classes defined below are specialized classes that
inherits the classes above.
'''
class decorated_button(button_float_active):
'''
Actively renderd button with a bounding box
'''
def __init__(self, text, pos, color, color_mouseover, font, font_size):
super().__init__(text, pos, color, color_mouseover, font, font_size)
self.decor_pos_r = [0,0]
self.decor_pos_r[0] = self.pos_r[0] - self.text_render.get_width()*0.05
self.decor_pos_r[1] = self.pos_r[1] - self.text_render.get_height()*0.01
self.decor_shape = [0,0]
self.decor_shape[0] = self.text_render.get_width()*1.14
self.decor_shape[1] = self.text_render.get_height()
def draw(self, window):
'''Rerender text and blit to window'''
self.text_type = pygame.font.Font(self.font, self.font_size_r)
self.text_render = self.text_type.render(self.text, True, self.color_r)
window.blit(
self.text_render,
self.pos_r
)
pygame.draw.rect(
window,
self.color_r,
(self.decor_pos_r[0], self.decor_pos_r[1], self.decor_shape[0], self.decor_shape[1]),
2
)
class decorated_button_change_state(decorated_button):
'''
A button with floating position that also contains the string name
of a state. It is decorated with a square bounding box
'''
def __init__(self, text, pos, color, color_mouseover, font, font_size, next_state):
super().__init__(text, pos, color, color_mouseover, font, font_size)
self.next_state = next_state
class centered_indicator(text_float_active):
'''
Indicators are actively rendered text objects that has a show attribute. If show is True, then
the draw function will blit to window, else pass.
'''
def __init__(self, text, pos, color, font, font_size, window_shape, show=False):
super().__init__(text, pos, color, font, font_size)
self.show = show
self.window_shape = window_shape
def draw(self, window):
'''Blits if show attribute is True, else pass'''
if(self.show):
self.text_type = pygame.font.Font(self.font, self.font_size_r)
self.text_render = self.text_type.render(self.text_r, True, self.color_r)
self.pos_r[0] = (self.window_shape[0]-self.text_render.get_width())/2
window.blit(
self.text_render,
self.pos_r
)
| StarcoderdataPython |
3383776 | from collections import OrderedDict
from .metrics import format_metric_name, format_labels
from .utils import merge_dicts_ordered
def count_object_fields(object_mappings, counts=None):
if counts is None:
counts = {}
else:
counts = counts.copy()
for field, mapping in object_mappings['properties'].items():
# This field is an object, so count its fields.
if 'properties' in mapping:
counts = count_object_fields(mapping, counts=counts)
else:
field_type = mapping['type']
if field_type in counts:
counts[field_type] += 1
else:
counts[field_type] = 1
# If a field has any multifields (copies of the field with different mappings) we need
# to add their mappings as well.
if 'fields' in mapping:
for mfield, mfield_mapping in mapping['fields'].items():
mfield_type = mfield_mapping['type']
if mfield_type in counts:
counts[mfield_type] += 1
else:
counts[mfield_type] = 1
return counts
def parse_index(index, mappings, metric=None):
if metric is None:
metric = []
metric = metric + ['field', 'count']
labels = OrderedDict([('index', index)])
# In newer Elasticsearch versions, the mappings root is simply the object mappings for the whole
# document, so we can count the fields in it directly.
if 'properties' in mappings:
counts = count_object_fields(mappings)
# Older Elasticsearch versions had the concept of mapping types, so the root maps from mapping
# type to the object mappings for that type. We have to count the fields the types separately.
else:
counts = {}
for mapping_type, type_mappings in mappings.items():
counts = count_object_fields(type_mappings, counts=counts)
metrics = []
for field_type, count in counts.items():
metrics.append((metric, '', merge_dicts_ordered(labels, field_type=field_type), count))
return metrics
def parse_response(response, metric=None):
if metric is None:
metric = []
metrics = []
for index, data in response.items():
metrics.extend(parse_index(index, data['mappings'], metric=metric))
return [
(format_metric_name(*metric_name),
metric_doc,
format_labels(label_dict),
value)
for metric_name, metric_doc, label_dict, value
in metrics
]
| StarcoderdataPython |
3284783 | import logging
from pathlib import Path
from openpyxl import load_workbook
from .. import utils
from ..cache import Cache
__authors__ = ["zstumgoren", "Dilcia19", "ydoc5212"]
__tags__ = ["historical", "excel"]
logger = logging.getLogger(__name__)
def scrape(
data_dir: Path = utils.WARN_DATA_DIR,
cache_dir: Path = utils.WARN_CACHE_DIR,
) -> Path:
"""
Scrape data from New York.
Keyword arguments:
data_dir -- the Path were the result will be saved (default WARN_DATA_DIR)
cache_dir -- the Path where results can be cached (default WARN_CACHE_DIR)
Returns: the Path where the file is written
"""
# Request the page and save it to the cache
url = (
"https://storage.googleapis.com/bln-data-public/warn-layoffs/ny_historical.xlsx"
)
cache = Cache(cache_dir)
excel_path = cache.download("ny/source.xlsx", url)
# Open it up
workbook = load_workbook(filename=excel_path)
# Get the first sheet
worksheet = workbook.worksheets[0]
# Convert the sheet to a list of lists
row_list = []
for r in worksheet.rows:
column = [cell.value for cell in r]
row_list.append(column)
# Set the export path
data_path = data_dir / "ny.csv"
# Write out the file
utils.write_rows_to_csv(data_path, row_list)
# Return the path to the file
return data_path
if __name__ == "__main__":
scrape()
| StarcoderdataPython |
1754124 | <reponame>ruidacosta/SP500HistoryData
import sqlite3
filename = 'SP&500.csv'
conn = sqlite3.connect(':memory:')
tickers = []
def readInstruments():
first = True
with open(filename,'r') as fd:
for line in fd:
if not first:
tickers.append(str(line.replace('\r','').replace('\n','').split(';')[0]))
else:
first = False
def startDB():
conn = sqlite3.connect(':memory:')
def readDataFiles(ticker):
tmp = []
with open('data\\' + ticker + '.csv','r') as fd:
for line in fd:
tmp.append(tuple(line.replace('\r','').replace('\n','').split(',')))
return tmp[1:]
def createSQLiteDB():
cur = conn.cursor()
cur.execute('''Create table quotes
(symbol text,date text,open text,high text, low text,close text, volume text,adj_close text, primary key (symbol,date))''')
conn.commit()
def insertSQLiteDB(quotes_lst,ticker):
cur = conn.cursor()
cur.executemany('insert into quotes values ("'+ticker+'",?,?,?,?,?,?,?)', quotes_lst)
#'insert into quotes ("symbol","date","open","high","low","close","volume","adj_close") values ( )'
def sendToFile():
query = 'select date,symbol,open,high,low,close,volume,adj_close from quotes order by date'
cur = conn.cursor()
with open('SP500Quotes.csv','w') as fd:
fd.write(';'.join(('date','symbol','open','high','low','close','volume','adj_close')) + '\n')
for row in cur.execute(query):
fd.write(';'.join(row) + '\n')
def main():
print "Loading tickers..."
readInstruments()
#print tickers
#startDB()
print "Create database..."
createSQLiteDB()
for ticker in tickers:
print "Inserting " + ticker + " data..."
insertSQLiteDB(readDataFiles(ticker),ticker)
conn.commit()
print "Creating csv file..."
sendToFile()
print "Done."
if __name__ == '__main__':
main() | StarcoderdataPython |
3230649 | import cv2
import numpy as np
from copy import deepcopy
from registry import Registries
from .base_strategy import BaseStrategy
@Registries.strategy.register("group")
class GroupStrategy(BaseStrategy):
def __init__(self, score: object, **kwargs):
super().__init__(score, **kwargs)
def get_datas(self,
video_path: str,
group_size: int = 3,
transforms: object = None,
**kwargs):
self.group_size = group_size
self.video_cap = cv2.VideoCapture(video_path, 0)
self.datas = self.score(video_cap=self.video_cap,
group_size=group_size,
transforms=transforms)
return deepcopy(self.datas)
| StarcoderdataPython |
190387 | # Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import logging
from graph.types import NNEdge
from graph.types.others import ReshapeParameters, StridedSliceParameters
from utils.graph import GraphView
from utils.node_id import NodeId
from ..matcher import Matcher, match_name, description, run_before, groups
LOG = logging.getLogger("nntool." + __name__)
@match_name("remove_slice")
@description("Removes slices that are doing nothing and may insert a reshape if necessary.")
@run_before('*')
@groups('symmetric', 'scaled')
class RemoveSlice(Matcher):
def _match(self, G: GraphView, set_identity: bool = True, **kwargs) -> bool:
has_modified_graph = False
for node in [node for node in G.nodes(node_classes=StridedSliceParameters)]:
if node.slice_shape != tuple(node.in_dims[0].shape):
continue
has_modified_graph = True
nid = NodeId(node)
if node.slice_shape == node.out_shape:
LOG.info(f'removing strided slice {node.name} that does nothing')
G.remove_and_reconnect(node, edge_class=NNEdge)
if G.quantization and nid in G.quantization:
del G.quantization[nid]
else:
reshape = ReshapeParameters(G.unique_name(f'{node.name}_reshape'), old_shape=node.slice_shape, shape=node.out_shape)
LOG.info(f'replacing strided slice {node.name} with reshape {reshape.name}')
G.replace_node(node, reshape)
if G.quantization and nid in G.quantization:
G.quantization[NodeId(reshape)] = G.quantization[nid]
del G.quantization[nid]
if set_identity:
self.set_identity(G)
return has_modified_graph
| StarcoderdataPython |
3259280 | import asyncio
from unittest import mock
from aioresponses import aioresponses
from django.core import mail
from django.urls import reverse
from freezegun import freeze_time
from model_bakery import baker
from glitchtip.test_utils.test_case import GlitchTipTestCase
from organizations_ext.models import OrganizationUserRole
from users.models import ProjectAlertStatus
from ..constants import MonitorType
from ..models import Monitor, MonitorCheck
from ..tasks import dispatch_checks
from ..utils import fetch_all
class UptimeTestCase(GlitchTipTestCase):
@mock.patch("glitchtip.uptime.tasks.perform_checks.run")
def test_dispatch_checks(self, mocked):
mock.return_value = None
test_url = "https://example.com"
with freeze_time("2020-01-01"):
mon1 = baker.make(Monitor, url=test_url, monitor_type=MonitorType.GET)
mon2 = baker.make(Monitor, url=test_url, monitor_type=MonitorType.GET)
baker.make(MonitorCheck, monitor=mon1)
self.assertEqual(mocked.call_count, 2)
with freeze_time("2020-01-02"):
baker.make(MonitorCheck, monitor=mon2)
dispatch_checks()
self.assertEqual(mocked.call_count, 3)
@aioresponses()
def test_fetch_all(self, mocked):
test_url = "https://example.com"
mocked.get(test_url, status=200)
mon1 = baker.make(Monitor, url=test_url, monitor_type=MonitorType.GET)
mocked.get(test_url, status=200)
loop = asyncio.get_event_loop()
monitors = list(Monitor.objects.all().values())
results = loop.run_until_complete(fetch_all(monitors, loop))
self.assertEqual(results[0]["id"], mon1.pk)
@aioresponses()
def test_monitor_checks_integration(self, mocked):
test_url = "https://example.com"
mocked.get(test_url, status=200)
with freeze_time("2020-01-01"):
mon = baker.make(Monitor, url=test_url, monitor_type=MonitorType.GET)
self.assertEqual(mon.checks.count(), 1)
mocked.get(test_url, status=200)
with freeze_time("2020-01-01"):
dispatch_checks()
self.assertEqual(mon.checks.count(), 1)
with freeze_time("2020-01-02"):
with self.assertNumQueries(5):
dispatch_checks()
self.assertEqual(mon.checks.count(), 2)
@aioresponses()
@mock.patch("requests.post")
def test_monitor_notifications(self, mocked, mock_post):
self.create_user_and_project()
test_url = "https://example.com"
mocked.get(test_url, status=200)
with freeze_time("2020-01-01"):
baker.make(
Monitor,
name=test_url,
url=test_url,
monitor_type=MonitorType.GET,
project=self.project,
)
baker.make(
"alerts.AlertRecipient",
alert__uptime=True,
alert__project=self.project,
recipient_type="email",
)
baker.make(
"alerts.AlertRecipient",
alert__uptime=True,
alert__project=self.project,
recipient_type="webhook",
url="https://example.com",
)
mocked.get(test_url, status=500)
with freeze_time("2020-01-02"):
dispatch_checks()
self.assertEqual(len(mail.outbox), 1)
self.assertIn("is down", mail.outbox[0].body)
mock_post.assert_called_once()
mocked.get(test_url, status=500)
with freeze_time("2020-01-03"):
dispatch_checks()
self.assertEqual(len(mail.outbox), 1)
mocked.get(test_url, status=200)
with freeze_time("2020-01-04"):
dispatch_checks()
self.assertEqual(len(mail.outbox), 2)
self.assertIn("is back up", mail.outbox[1].body)
@aioresponses()
def test_notification_default_scope(self, mocked):
""" Subscribe by default should not result in alert emails for non-team members """
self.create_user_and_project()
test_url = "https://example.com"
# user2 is an org member but not in a relevant team, should not receive alerts
user2 = baker.make("users.user")
org_user2 = self.organization.add_user(user2, OrganizationUserRole.MEMBER)
team2 = baker.make("teams.Team", organization=self.organization)
team2.members.add(org_user2)
# user3 is in team3 which should receive alerts
user3 = baker.make("users.user")
org_user3 = self.organization.add_user(user3, OrganizationUserRole.MEMBER)
self.team.members.add(org_user3)
team3 = baker.make("teams.Team", organization=self.organization)
team3.members.add(org_user3)
team3.projects.add(self.project)
baker.make(
"alerts.AlertRecipient",
alert__uptime=True,
alert__project=self.project,
recipient_type="email",
)
mocked.get(test_url, status=200)
with freeze_time("2020-01-01"):
baker.make(
Monitor,
name=test_url,
url=test_url,
monitor_type=MonitorType.GET,
project=self.project,
)
mocked.get(test_url, status=500)
with self.assertNumQueries(12):
with freeze_time("2020-01-02"):
dispatch_checks()
self.assertNotIn(user2.email, mail.outbox[0].to)
self.assertIn(user3.email, mail.outbox[0].to)
self.assertEqual(len(mail.outbox[0].to), 2)
@aioresponses()
def test_user_project_alert_scope(self, mocked):
""" User project alert should not result in alert emails for non-team members """
self.create_user_and_project()
test_url = "https://example.com"
baker.make(
"alerts.AlertRecipient",
alert__uptime=True,
alert__project=self.project,
recipient_type="email",
)
user2 = baker.make("users.user")
self.organization.add_user(user2, OrganizationUserRole.MEMBER)
baker.make(
"users.UserProjectAlert",
user=user2,
project=self.project,
status=ProjectAlertStatus.ON,
)
mocked.get(test_url, status=200)
with freeze_time("2020-01-01"):
baker.make(
Monitor,
name=test_url,
url=test_url,
monitor_type=MonitorType.GET,
project=self.project,
)
mocked.get(test_url, status=500)
with self.assertNumQueries(12):
with freeze_time("2020-01-02"):
dispatch_checks()
self.assertNotIn(user2.email, mail.outbox[0].to)
def test_heartbeat(self):
self.create_user_and_project()
with freeze_time("2020-01-01"):
monitor = baker.make(
Monitor, monitor_type=MonitorType.HEARTBEAT, project=self.project,
)
baker.make(
"alerts.AlertRecipient",
alert__uptime=True,
alert__project=self.project,
recipient_type="email",
)
url = reverse(
"heartbeat-check",
kwargs={
"organization_slug": monitor.organization.slug,
"endpoint_id": monitor.endpoint_id,
},
)
self.assertFalse(monitor.checks.exists())
self.client.post(url)
self.assertTrue(monitor.checks.filter(is_up=True).exists())
dispatch_checks()
self.assertTrue(monitor.checks.filter(is_up=True).exists())
self.assertEqual(len(mail.outbox), 0)
with freeze_time("2020-01-02"):
dispatch_checks()
self.assertEqual(len(mail.outbox), 1)
with freeze_time("2020-01-03"):
dispatch_checks() # Still down
self.assertEqual(len(mail.outbox), 1)
with freeze_time("2020-01-04"):
self.client.post(url) # Back up
self.assertEqual(len(mail.outbox), 2)
def test_heartbeat_grace_period(self):
# Don't alert users when heartbeat check has never come in
self.create_user_and_project()
baker.make(Monitor, monitor_type=MonitorType.HEARTBEAT, project=self.project)
dispatch_checks()
self.assertEqual(len(mail.outbox), 0)
| StarcoderdataPython |
3244977 | # -*- coding: utf-8 -*-
"""
Copyright [2009-2021] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import logging
from pathlib import Path
import typing as ty
from sqlitedict import SqliteDict
from rnacentral_pipeline.databases import data
from rnacentral_pipeline.databases.generic import v1
LOGGER = logging.getLogger(__name__)
def update_entry(
context: SqliteDict, entry: ty.Dict[str, ty.Any]
) -> ty.Dict[str, ty.Any]:
prefix, raw_taxid = entry["taxonId"].split(":", 1)
taxid = int(raw_taxid)
if taxid not in context:
raise ValueError(f"Unknown tax id {taxid}")
tax_info = context[taxid]
if tax_info.replaced_by:
pid = entry["primaryId"]
updated = tax_info.replaced_by
entry["taxonId"] = f"{prefix}:{updated}"
LOGGER.info(f"Entry {pid} replaced taxid {taxid} -> {updated}")
return entry
def parse(context_file: Path, json_file: Path) -> ty.Iterable[data.Entry]:
context = SqliteDict(filename=context_file)
with json_file.open("r") as raw:
ncrnas = json.load(raw)
ncrnas["data"] = [update_entry(context, e) for e in ncrnas["data"]]
yield from v1.parse(ncrnas)
| StarcoderdataPython |
95878 | # -*- coding: utf-8 -*-
# Copyright 2018 IBM.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import unittest
from itertools import combinations, chain
import numpy as np
from parameterized import parameterized
from qiskit import QuantumCircuit, QuantumRegister
from qiskit_aqua import get_aer_backend
from qiskit import execute as q_execute
from qiskit.quantum_info import state_fidelity
from test.common import QiskitAquaTestCase
class TestMCT(QiskitAquaTestCase):
@parameterized.expand([
[1],
[2],
[3],
[4],
[5],
[6],
[7],
])
def test_mct(self, num_controls):
c = QuantumRegister(num_controls, name='c')
o = QuantumRegister(1, name='o')
allsubsets = list(chain(*[combinations(range(num_controls), ni) for ni in range(num_controls + 1)]))
for subset in allsubsets:
for mode in ['basic', 'advanced']:
qc = QuantumCircuit(o, c)
if mode == 'basic':
if num_controls <= 2:
num_ancillae = 0
else:
num_ancillae = num_controls - 2
else:
if num_controls <= 4:
num_ancillae = 0
else:
num_ancillae = 1
if num_ancillae > 0:
a = QuantumRegister(num_ancillae, name='a')
qc.add_register(a)
for idx in subset:
qc.x(c[idx])
qc.cnx(
[c[i] for i in range(num_controls)],
o[0],
[a[i] for i in range(num_ancillae)],
mode=mode
)
for idx in subset:
qc.x(c[idx])
vec = np.asarray(q_execute(qc, get_aer_backend(
'statevector_simulator')).result().get_statevector(qc, decimals=16))
vec_o = [0, 1] if len(subset) == num_controls else [1, 0]
# print(vec, np.array(vec_o + [0] * (2 ** (num_controls + num_ancillae + 1) - 2)))
f = state_fidelity(vec, np.array(vec_o + [0] * (2 ** (num_controls + num_ancillae + 1) - 2)))
self.assertAlmostEqual(f, 1)
return
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3264628 | import functools
import os, sys
import time
import cv2
import numpy as np
import pickle
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
import tensorflow as tf
import tensorflow.contrib.slim as slim
from sklearn import mixture
from sklearn.metrics.cluster import normalized_mutual_info_score
from scipy.stats import moment
from tensorflow.linalg import logdet, trace, inv
import libs.config as cfg
import libs.nets.nets_factory as network
from libs.load_data import *
from libs.dp.merge import Gibbs_DPM_Gaussian_summary_input
from libs.dp.VI_PYMMG_functions import R_VI_PYMMG_CoC
FLAGS = tf.app.flags.FLAGS
def gmm_loss(ys, mus, gammas):
"""clustering loss L0.
Args:
y: nxd tensor: NxD
mu: nxd tensor; mus multiplied by assign index: NxD
gamma: dxdxn; precison matrix: NxDxD
"""
ll = tf.zeros([], dtype=tf.float32)
def condition(i, ys, mus, gammas, ll):
r = tf.less(i, tf.shape(ys))
return r[0]
def loop(i, ys, mus, gammas, ll):
y = tf.expand_dims(ys[i], 0) #1xD
mu = tf.expand_dims(mus[i], 0) #1xD
gamma = gammas[i] #DxD
ll = ll + tf.squeeze(tf.matmul(tf.matmul((y - mu), gamma),
tf.transpose(y - mu)))
return [i+1, ys, mus, gammas, ll]
i = 0
[i, ys, mus, gammas, ll] = tf.while_loop(condition, loop,
[i, ys, mus, gammas, ll])
return ll/tf.cast(tf.shape(ys)[0], tf.float32)
def standardize(x):
"""standardize a tensor.
Args:
x is a nxp tensor
"""
meanv, varv = tf.nn.moments(x, 0) # p
stdv = tf.sqrt(varv)
return (x - meanv)/stdv
def np_standardize(x):
"""standardize a numpy array.
Args:
x is a nxp array
"""
stdv = (moment(x, moment=2,axis=0))**0.5
meanv = np.mean(x,axis=0)
return (x - meanv)/stdv, meanv, stdv
def restore(sess, opt=0):
"""restore session with different options
Args:
opt = 1: restore from checkpoint
opt = 0: restore from pretrained initializatoin (remove fc layers)
"""
checkpoint_path = FLAGS.checkpoint_path
vars_to_restore = tf.trainable_variables()
vars_to_restore1 = vars_to_restore[:]
if FLAGS.normalize == 1 and opt == 0:
for var in vars_to_restore1:
if 'batchnorm' in var.name:
vars_to_restore.remove(var)
for var in vars_to_restore1:
if 'ip' in var.name or 'fc4' in var.name:
vars_to_restore.remove(var)
restorer = tf.train.Saver(vars_to_restore)
restorer.restore(sess, checkpoint_path)
def train():
## set the parameters for different datasets
if FLAGS.dataset == 'mnist_test':
img_height = img_width = 28
learning_rate = 0.001
Detcoef = 50
apply_network = 'lenet'
elif FLAGS.dataset == 'usps':
img_height = img_width = 16
learning_rate = 0.0001
Detcoef = 50
apply_network = 'lenet0'
elif FLAGS.dataset == 'frgc':
img_height = img_width = 32
learning_rate = 0.1
Detcoef = 20
apply_network = 'lenet'
elif FLAGS.dataset == 'ytf':
img_height = img_width = 55
learning_rate = 0.1
Detcoef = 20
apply_network = 'lenet'
elif FLAGS.dataset == 'umist':
img_height = 112
img_width = 92
learning_rate = 0.0001
Detcoef = 20
apply_network = 'dlenet'
else:
img_height = FLAGS.img_height
img_width = FLAGS.img_width
learning_rate = FLAGS.learning_rate
Detcoef = FLAGS.Detcoef
apply_network = FLAGS.network
tf.logging.set_verbosity(tf.logging.DEBUG)
with tf.Graph().as_default():
# tensor for input images
if FLAGS.is_resize:
imageip = tf.placeholder(tf.float32, [None, FLAGS.resize_height, FLAGS.resize_width, 3])
else:
imageip = tf.placeholder(tf.float32, [None, img_height, img_width, 3])
# get the embedding data from the network
_, end_points =network.get_network(apply_network, imageip, FLAGS.max_k,
weight_decay=FLAGS.weight_decay, is_training=True, reuse = False, spatial_squeeze=False)
# fc3 is the name of our embedding layer
end_net = end_points['fc3']
# normalize the embedding data
if FLAGS.normalize==0: # standardize
end_data = standardize(end_net)
elif FLAGS.normalize==1: # batch normalize
end_data = slim.batch_norm(end_net, activation_fn=None, scope='batchnorm',is_training=True)
# calculate LD the sample covaraince variance matrix of embedding data
diff_data = end_data - tf.expand_dims(tf.reduce_mean(end_data, 0),0)
cov_data = 1. / (tf.cast(tf.shape(end_data)[0], tf.float32) - 1.)*tf.matmul(tf.transpose(diff_data), diff_data)
det_loss = - logdet(cov_data)
# get the numpy data for both purpose of clustering and evaluation
_, val_end_points =network.get_network(apply_network, imageip, FLAGS.max_k,
weight_decay=FLAGS.weight_decay, is_training=False, reuse = True, spatial_squeeze=False)
val_end_data = val_end_points['fc3']
if FLAGS.normalize==1:
val_end_data = slim.batch_norm(val_end_data, activation_fn=None, scope='batchnorm',is_training=False, reuse=True)
# clustering loss
cls_mus = tf.placeholder(tf.float32, [None, FLAGS.embed_dims])
cls_Gammas = tf.placeholder(tf.float32, [None, FLAGS.embed_dims, FLAGS.embed_dims])
cluster_loss = gmm_loss(end_data, cls_mus, cls_Gammas)
# l2 regularization
penalty = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
# total loss
total_loss = cluster_loss + Detcoef*det_loss
if penalty:
l2_penalty = tf.add_n(penalty)
total_loss += l2_penalty
global_step = slim.create_global_step()
## load the data
df_path = '{}/{}.h5'.format(FLAGS.dataset_dir, FLAGS.dataset)
f = h5py.File(df_path, 'r')
## Get the data
data = list(f['data'])
label = list(f['labels'])
train_datum = load_train_data(data,label)
train_datum.center_data()
train_datum.shuffle(100)
val_data, val_truth = np.copy(train_datum.data), np.copy(train_datum.label)
## set up mini-batch steps and optimizer
batch_num = train_datum.data.shape[0]//FLAGS.batch_size
learning_rate = tf.train.inverse_time_decay(learning_rate, global_step, batch_num, 0.0001*batch_num, True)
var_list = tf.trainable_variables()
opt = tf.train.MomentumOptimizer(learning_rate = learning_rate, momentum = FLAGS.momentum)
train_opt = slim.learning.create_train_op(
total_loss, opt,
global_step=global_step,
variables_to_train=var_list,
summarize_gradients=False)
## load session
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.90)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
init_op = tf.group(
tf.global_variables_initializer(),
tf.local_variables_initializer()
)
sess.run(init_op)
## log setting and results
timestampLaunch = time.strftime("%d%m%Y") + '-' + time.strftime("%H%M%S")
# record config
if not os.path.exists(FLAGS.out_dir):
os.makedirs(FLAGS.out_dir)
if not os.path.exists(os.path.join(FLAGS.out_dir, FLAGS.dataset)):
os.makedirs(os.path.join(FLAGS.out_dir, FLAGS.dataset))
outdir = os.path.join(FLAGS.out_dir, FLAGS.dataset, timestampLaunch)
if not os.path.exists(outdir):
os.makedirs(outdir)
if FLAGS.dataset == 'umist':
max_periods = 2000
else:
max_periods = FLAGS.max_periods
# load saver and restore session
saver = tf.train.Saver(max_to_keep=3)
if FLAGS.restore_previous_if_exists:
restore(sess, 1)
else:
if FLAGS.if_initialize_from_pretrain:
restore(sess, 0)
period_cluster_l, period_det_l, period_tot_l, conv_cluster_l = [], [], [], [sys.float_info.max]
""" start the training """
print('start training the dataset of {}'.format(FLAGS.dataset))
for period in range(max_periods):
real_period = period + FLAGS.checkpoint_periods
'''Forward steps'''
## get the numpy array of embedding data for clustering
val_embed = []
if FLAGS.dataset == 'mnist_test': #10000
for s in range(10):
start = s*1000
end = (s+1)*1000
val_embed_x = sess.run(val_end_data, feed_dict={imageip:val_data[start:end]})
val_embed.append(val_embed_x)
elif FLAGS.dataset == 'usps': # 11000
for s in range(11):
start = s*1000
end = (s+1)*1000
val_embed_x = sess.run(val_end_data, feed_dict={imageip:val_data[start:end]})
val_embed.append(val_embed_x)
elif FLAGS.dataset == 'frgc': # 2462
for s in range(25):
start = s*100
end = (s+1)*100
if s == 24:
end = end - 38
val_embed_x = sess.run(val_end_data, feed_dict={imageip:val_data[start:end]})
val_embed.append(val_embed_x)
elif FLAGS.dataset == 'ytf': ##55x55; 10000
for s in range(10):
start = s*1000
end = (s+1)*1000
val_embed_x = sess.run(val_end_data, feed_dict={imageip:val_data[start:end]})
val_embed.append(val_embed_x)
elif FLAGS.dataset == 'umist': # < 2000
val_embed = sess.run(val_end_data, feed_dict={imageip:val_data})
if FLAGS.dataset != 'umist':
val_embed = np.concatenate(val_embed,axis=0)
if FLAGS.normalize==0:
val_embed, val_mean, val_std = np_standardize(val_embed)
### use dpm to cluster the embedding data
dpgmm = mixture.BayesianGaussianMixture(n_components=FLAGS.max_k,
weight_concentration_prior=FLAGS.alpha/FLAGS.max_k,
weight_concentration_prior_type='dirichlet_process',
covariance_prior=FLAGS.embed_dims*np.identity(FLAGS.embed_dims),
covariance_type='full').fit(val_embed)
val_labels = dpgmm.predict(val_embed)
if FLAGS.onsign:
### SIGN algorithm to merge clusters
ulabels = np.unique(val_labels).tolist()
uln_l = []
ulxtx_l = []
ulxx_l = []
for ul in ulabels:
ulx = val_embed[val_labels==ul,:] #Nk x p
uln = np.sum(val_labels==ul) #Nk
ulxtx = np.matmul(ulx.T, ulx) #p x p
ulxx = np.sum(ulx, axis=0) # p
uln_l.append(uln)
ulxtx_l.append(ulxtx)
ulxx_l.append(ulxx)
uxx = np.stack(ulxx_l, axis=0) #kxp
un = np.array(uln_l) # k
uxtx = np.stack(ulxtx_l, axis=0).T # p x p x k
if FLAGS.embed_dims < 50:
Rest = Gibbs_DPM_Gaussian_summary_input(uxtx, uxx, un) # mcmc
else:
Rest = R_VI_PYMMG_CoC(uxtx, uxx, un) # variational inference
member, dp_Gammas, dp_mus = Rest['member_est'], Rest['Prec'], Rest['mu']
val_labels_new = np.copy(val_labels)
for u, ul in enumerate(ulabels):
val_labels_new[val_labels==ul] = int(member[u]) # order the cluster value with index
val_labels = np.copy(val_labels_new)
# evaluate and save the results
val_count = np.bincount(val_labels)
val_count2 = np.nonzero(val_count)
est_cls = {}
for v in val_count2[0].tolist():
est_cls[v] = []
for vv, vl in enumerate(val_labels.tolist()):
est_cls[vl].append(val_truth[vv])
## sort the labels to be used for backward
train_labels_new = np.copy(val_labels)
member1 = np.array([int(m) for m in member])
member2 = np.unique(member1)
member2.sort()
train_labels_new1 = np.copy(train_labels_new)
for mbi, mb in enumerate(member2.tolist()):
train_labels_new1[train_labels_new==mb] = mbi
train_labels_onehot = np.eye(member2.shape[0])[train_labels_new1]
else:
dp_mus = dpgmm.means_
dp_Gammas = dpgmm.precisions_.T
train_labels_onehot = np.eye(FLAGS.max_k)[val_labels]
nmi = normalized_mutual_info_score(val_labels, val_truth)
if period > 0:
print("NMI for period{} is {}".format(period,nmi))
if period >= 100:
## check if the results need to be saved using det_loss and cluster_loss
dperiod_det_loss = np.abs((period_det_l[-1] - period_det_l[-2])/period_det_l[-2])
if dperiod_det_loss <= FLAGS.epsilon:
conv_cluster_l.append(period_cluster_loss)
if conv_cluster_l[-1] < min(conv_cluster_l[:-1]):
best_nmi, best_period = nmi, real_period
saver.save(sess, os.path.join(outdir, 'ckpt'), real_period)
# save truth and labels
np.savez(os.path.join(outdir,'labels_{}.npy'.format(real_period)),
val_labels=val_labels, val_truth=val_truth,
val_mean=val_mean, val_std=val_std)
# save dpm model
with open(os.path.join(outdir, 'model_{}.pkl'.format(real_period)), 'wb') as pf:
pickle.dump(dpgmm, pf)
if period < max_periods - 1:
''' Backward steps'''
# require: train_labels_onehot:NxK; dp_mus: KxD; dp_Gammas: DxDxK
train_datum.reset() # reset data from the original order to match predicted label
period_cluster_loss, period_det_loss = 0., 0.
for step in range(batch_num):
real_step = step + real_period*batch_num
train_x, train_y = train_datum.nextBatch(FLAGS.batch_size)
start, end = step*FLAGS.batch_size, (step+1)*FLAGS.batch_size
step_labels_onehot = train_labels_onehot[start:end]
cls_mu = np.matmul(step_labels_onehot, dp_mus) # NxK x KxD=> NxD
cls_Gamma = np.matmul(dp_Gammas, step_labels_onehot.T).T # DxDxK KxN => DxDxN => NxDxD
_, dlossv, dtlossv= sess.run([train_opt, cluster_loss, det_loss],
feed_dict={imageip:train_x, cls_mus:cls_mu, cls_Gammas: cls_Gamma})
# save loss
period_cluster_loss += dlossv/batch_num
period_det_loss += dtlossv/batch_num
#print('DP loss for back step {} is {}; det loss is{}, total loss is{}'.format(real_step,
# dlossv, dtlossv, dlossv + Detcoef*dtlossv))
## shuffle train data for next batch
train_datum.shuffle(period)
val_data, val_truth = np.copy(train_datum.data), np.copy(train_datum.label)
## record the period loss
period_tot_loss = period_cluster_loss + Detcoef*period_det_loss
period_det_l.append(period_det_loss)
period_cluster_l.append(period_cluster_loss)
period_tot_l.append(period_tot_loss)
if __name__ == '__main__':
train()
| StarcoderdataPython |
3278165 | _base_ = [
'../_base_/models/deeplabv3plus_r50-d8.py', '../_base_/datasets/flood.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py'
]
img_scale = (640, 640)
crop_size = (480, 480)
train_pipeline = [
dict(type='LoadImageFromFile_flood'),
dict(type='LoadAnnotations_flood'),
dict(type='Resize', img_scale=img_scale, ratio_range=(0.8, 1.5)),
dict(type='RandomCrop', crop_size=crop_size),
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
dict(type='RandomFlip', prob=0.5, direction='vertical'),
dict(type='RandomRotate', prob=0.5, degree=180, pad_val=0,seg_pad_val=0),
# dict(type='PhotoMetricDistortion'),
# dict(type='Normalize', **img_norm_cfg),
# dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect_flood', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile_flood'),
dict(
type='MultiScaleFlipAug',
img_scale=[(480, 480), (512, 512), (640, 640)],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect_flood_test', keys=['img']),
])
]
data = dict(
test=dict(
pipeline=test_pipeline))
model = dict(
pretrained = None,
backbone=dict(in_channels=9,
depth=101),
decode_head=dict(num_classes=2),
auxiliary_head=dict(num_classes=2),
test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320)))
# load_from = "/data/projects/Doc_NTS_Cls/models/pretrained/resnet101-5d3b4d8f.pth"
| StarcoderdataPython |
3342934 | from test_junkie.rules import Rules
class BadAfterTestRules(Rules):
def after_test(self, **kwargs):
raise Exception("Expected")
| StarcoderdataPython |
4833111 | <reponame>ahmobayen/image_processing<filename>BackEnd/model/process/deep_learning.py
from model.joint.in_app_parameters import FRAME_PARAMETERS, ALGORITHM_PARAMETERS, TRACKER_PARAMETERS
from model.joint.static_paths import path, TRAINING_PATH
from model.widespread.db_connection import SqlDatabaseConnection
from model.widespread.logger import Logger
import datetime
import numpy
import cv2 as cv
class DNNDetection:
def __init__(self):
self.logging = Logger('deep_network')
self.logging.info('Initializing DNN Detection.')
self.frame = numpy.ndarray
# Flags (All Types are considered private)
self.elements_position = [] # considered for detected elements
self.elements_category = [] # considered for any detail needs to be displayed for an element
self.elements_id = [] # assign an ID to detected elements
self.elements_agg_info = {} # all info about detected items in each frame
self.elements_direction = {} # considered for detected direction
# initializing deep network
self.__classes = open(path.join(TRAINING_PATH, 'coco.names')).read().strip().split('\n')
self.__net = cv.dnn.readNetFromDarknet(cfgFile=path.join(TRAINING_PATH, 'yolov4-tiny.cfg'),
darknetModel=path.join(TRAINING_PATH, 'yolov4-tiny.weights'))
self.__net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
if cv.ocl.haveOpenCL():
self.logging.info('OpenCL Support status: TRUE', '\n\tenabling OpenCL support ... ')
cv.ocl.setUseOpenCL(True)
self.__net.setPreferableTarget(cv.dnn.DNN_TARGET_OPENCL)
else:
self.logging.info('OpenCL Support status: FALSE')
self.__net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)
self.__layer = self.__net.getLayerNames()
self.__layer = [self.__layer[i[0] - 1] for i in self.__net.getUnconnectedOutLayers()]
self.accuracy_performance_ratio = (608, 608) if ALGORITHM_PARAMETERS['accuracy_performance_ratio'] == 3 else \
(320, 320) if ALGORITHM_PARAMETERS['accuracy_performance_ratio'] == 1 else (416, 416)
try:
self.multi_tracker = cv.MultiTracker_create()
except cv.Error as error:
self.logging.error('Tracking is not supported:', error)
self.logging.info('DNN successfully initialized')
def multi_object_detection(self, frame):
""" Using YOLO deep learning algorithm to determine objects in each frame"""
# releasing allocated lists from previous frames
self.frame = frame
self.elements_position.clear() if len(self.elements_position) != 0 else self.elements_position
self.elements_id.clear() if len(self.elements_id) != 0 else self.elements_id
self.elements_category.clear() if len(self.elements_category) != 0 else self.elements_category
self.elements_agg_info.clear() if len(self.elements_agg_info) != 0 else None
# init local variables
boxes = []
confidences = []
class_ids = []
elements = []
texts = []
# running deep learning algorithm to determine required objects
self.__net.setInput(cv.dnn.blobFromImage(image=self.frame, scalefactor=1 / 255.0,
size=self.accuracy_performance_ratio, swapRB=True, crop=False))
frame_result = numpy.vstack(self.__net.forward(self.__layer))
for output in frame_result:
scores = output[5:]
class_id = numpy.argmax(scores)
confidence = scores[class_id]
if confidence > ALGORITHM_PARAMETERS['confidence_threshold']:
x, y, w, h = output[:4] * numpy.array([self.frame.shape[1], self.frame.shape[0],
self.frame.shape[1], self.frame.shape[0]])
boxes.append([int(x - w // 2), int(y - h // 2), int(w), int(h)])
confidences.append(float(confidence))
class_ids.append(class_id)
indices = cv.dnn.NMSBoxes(boxes, confidences, ALGORITHM_PARAMETERS['confidence_threshold'],
ALGORITHM_PARAMETERS['confidence_nms_threshold'])
if len(indices) > 0:
for i in indices.flatten():
element = boxes[i][0], boxes[i][1], boxes[i][2], boxes[i][3]
elements.append(element)
texts.append("{}: {:.4f}".format(self.__classes[class_ids[i]], confidences[i]))
element_info = list(zip(elements, texts))
element_info.sort(key=lambda mob_ayn: mob_ayn[0])
for count, item in enumerate(element_info, 1):
self.elements_position.append(item[0])
self.elements_category.append(item[1])
self.elements_agg_info[count] = [item[0], item[1]]
if len(self.elements_position) != 0:
self.logging.info(f'number of detected items in frame: {len(self.elements_position)}')
return self.elements_agg_info
def object_tracking_initialization(self, frame, elements_position):
"""Separated in purpose of possibility to make concurrent processing.
requirements of this class is frame and detected elements which must be
obtained by deep_network."""
# tracking parameters
def tracker_type():
tracker = cv.TrackerBoosting_create() if TRACKER_PARAMETERS == 'BOOSTING' else \
cv.TrackerMIL_create() if TRACKER_PARAMETERS == 'MIL' else \
cv.TrackerKCF_create() if TRACKER_PARAMETERS == 'KCF' else \
cv.TrackerTLD_create() if TRACKER_PARAMETERS == 'TLD' else \
cv.TrackerMedianFlow_create() if TRACKER_PARAMETERS == 'MEDIANFLOW' else \
cv.TrackerMOMOSSE_create() if TRACKER_PARAMETERS == 'MOSSE' else \
cv.TrackerGOTURN_create() if TRACKER_PARAMETERS == 'GOTURN' else \
cv.TrackerCSRTcreate() if TRACKER_PARAMETERS == 'CSRT' else cv.TrackerKCF_create()
return tracker
self.multi_tracker = cv.MultiTracker_create()
for element in elements_position:
self.multi_tracker.add(tracker_type(), frame, element)
def object_tracking(self, frame):
"""The main process of object tracking base on defined algorithm.
this must be used after object_tracking_initialization() method which
initialize multi tracking algorithm. """
# get updated location of objects in subsequent frames
self.elements_agg_info.clear() if len(self.elements_agg_info) == 0 else None
success, update_position = self.multi_tracker.update(frame)
for count, position in enumerate(update_position, 0):
self.elements_position[count] = (int(position[0]), int(position[1]), int(position[2]), int(position[3]))
self.elements_agg_info[count + 1] = [self.elements_position[count], self.elements_category[count]]
return self.elements_agg_info
def direction_detection(self):
"""detecting object direction"""
def summarize_given_info(element):
position, group = element[0], element[1]
centroid = int(position[0] + 0.5 * position[2]), int(position[1] + 0.5 * position[3])
group = element[1].split(":")
return [centroid, group[0]]
def area_detection(previous_frame_slice, current_frame_slice):
for current_frame_element_counter in current_frame_slice:
if current_frame_element_counter not in self.elements_direction.keys():
current_element = summarize_given_info(current_frame[current_frame_element_counter])
for previous_frame_element_counter in previous_frame_slice:
previous_element = summarize_given_info(frame[previous_frame_element_counter])
if current_element[1] == previous_element[1] and \
(abs(current_frame_element_counter - current_frame_element_counter) < 2):
x_movement = current_element[0][0] - previous_element[0][0]
y_movement = current_element[0][1] - previous_element[0][1]
if x_movement <= 5 * ALGORITHM_PARAMETERS['direction_sensitivity']:
x_direction = 'Right' if x_movement > ALGORITHM_PARAMETERS['direction_sensitivity'] \
else 'Left' if x_movement < - ALGORITHM_PARAMETERS['direction_sensitivity'] else\
'Still'
else:
continue
if y_movement <= 2 * ALGORITHM_PARAMETERS['direction_sensitivity']:
y_direction = 'Down' if y_movement > ALGORITHM_PARAMETERS['direction_sensitivity'] \
else 'Up' if y_movement < - ALGORITHM_PARAMETERS['direction_sensitivity'] else \
'Still'
else:
continue
cv.line(img=self.frame, pt1=current_element[0], pt2=previous_element[0],
color=FRAME_PARAMETERS['frame_red'], thickness=FRAME_PARAMETERS['frame_thickness'])
direction = [x_direction, y_direction]
self.elements_direction[current_frame_element_counter] = direction
break
# initialization parameters
self.elements_direction.clear() if len(self.elements_direction) != 0 else self.elements_direction
self.elements_history.append(self.elements_agg_info.copy())
# direction algorithm
element_history = self.elements_history.copy()
if self.elements_history[-1] != self.elements_history[0]:
current_frame = element_history.pop(-1)
for frame in element_history:
area_detection(frame, current_frame) if len(self.elements_direction) <= len(current_frame) else None
print(self.elements_direction)
print(self.elements_agg_info)
print('direction detected:', len(self.elements_direction))
# controlling validation of algorithm in continues situations
if len(self.elements_history) == ALGORITHM_PARAMETERS['direction_frame_sensitivity']:
self.elements_history.pop(0)
def save_to_db(self):
query_items = []
db_connection = SqlDatabaseConnection()
if self.elements_agg_info:
for list_id, elements_data in self.elements_agg_info.items():
current_time = datetime.datetime.now()
try:
if self.elements_direction:
if list_id in self.elements_direction:
if str(self.elements_direction[id]) != "['Still', 'Still']":
query_items.append((list_id, elements_data[1], str(self.elements_direction[list_id]),
current_time.strftime("%Y-%m-%d %H:%M:%S")))
else:
query_items.append((list_id, elements_data[1], '-',
current_time.strftime("%Y-%m-%d %H:%M:%S")))
finally:
db_connection.commit_query('INSERT INTO passage(id, detected_type, direction, time)'
'values (%s, %s, %s, %s)', query_items)
| StarcoderdataPython |
121579 | <gh_stars>0
import os
from abc import ABC
import torch
import core.utils as utils
from torch.optim import lr_scheduler
class BaseInpaint(torch.nn.Module, ABC):
def __init__(self, hy):
super(BaseInpaint, self).__init__()
######################
# init parameter
######################
self.hy = hy
self.is_train = hy['is_train']
self.num_train_semantic_net = hy['num_train_semantic_net']
self.which_epoch = hy['which_epoch']
self.experiment_name = hy['model_name']
self.checkpoints_dir = hy['checkpoints_dir']
if not os.path.exists(self.checkpoints_dir):
utils.mkdirs(self.checkpoints_dir)
# init net
self.gpu_ids = hy['gpu_ids']
self.init_type = hy['init_type']
self.init_gain = hy['init_gain']
# init lr
self.lr_gan = hy['lr_gen']
self.lr_dis = hy['lr_dis']
self.betas = (hy['beta1'], hy['beta2'])
# init GPU
self.gpu_ids = hy['gpu_ids']
self.num_semantic_label = self.hy['num_semantic_label']
self.input_dim = hy['input_dim']
self.mult_dis_para = hy['MultiscaleDis']
# scheduler
self.lr_policy = hy['lr_policy']
self.lr_decay_iters = hy['lr_decay_iters']
# for train
self.continue_train = hy['continue_train']
self.epoch_count = hy['epoch_count']
self.niter = hy['niter']
self.niter_decay = hy['niter_decay']
# print, display
self.print_freq = hy['print_freq']
self.display_freq = hy['display_freq']
self.save_epoch_freq = hy['save_epoch_freq']
self.output_dir = hy['output_dir']
######################
# init GPU
######################
self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) \
if len(self.gpu_ids) > 0 else torch.device('cpu')
######################
# init list
######################
self.model_names = []
self.optimizers = []
self.schedulers = []
######################
# init variable
######################
self.file_name = None
self.image_gt = None
self.mask = None
self.inv_ex_mask = None
self.semantic_label = None
self.semantic_one_hot = None
# just for Cross entropy Loss
self.target_semantic = None
self.edge = None
# inputs
self.input_edge = None
self.input_segmap = None
self.input_image = None
# output
self.fake_semantic = None
self.fake_image = None
######################
# init loss
######################
# inpaint_gen
self.loss_ig_L1 = None
self.loss_ig_perceptual = None
self.loss_ig_style = None
self.loss_ig_gan = None
self.loss_ig = None
# inpaint_dis
self.loss_id_gan = None
# semantic_gan
self.loss_sg_semantic = None
self.loss_sg_semantic_L1 = None
self.loss_sg = None
# semantic_dis
self.loss_sd_semantic = None
def print_network(self):
for model_name in self.model_names:
model = getattr(self, model_name)
utils.print_network(model)
def init_optimizer(self):
for model_name in self.model_names:
model = getattr(self, model_name)
if 'gan' in model_name:
lr = self.lr_gan
else:
lr = self.lr_dis
optimizer = torch.optim.Adam(model.parameters(), lr=lr, betas=self.betas)
setattr(self, 'optimizer_' + model_name, optimizer)
self.optimizers.append(getattr(self, 'optimizer_' + model_name))
self.schedulers.append(self.get_scheduler(optimizer=getattr(self, 'optimizer_' + model_name)))
def load_networks(self):
assert self.which_epoch >= 0, 'load epoch must > 0 !!'
for model_name in self.model_names:
if isinstance(model_name, str):
load_filename = '%s_%s_%s.pth' % (self.experiment_name, self.which_epoch, model_name)
load_path = os.path.join(self.checkpoints_dir, load_filename)
if os.path.exists(load_path):
state_dict = torch.load(load_path, map_location=str(self.device))
net = getattr(self, model_name)
if isinstance(net, torch.nn.DataParallel):
net = net.module
net.load_state_dict(state_dict['net'])
if self.is_train:
optimize = getattr(self, 'optimizer_' + model_name)
optimize.load_state_dict(state_dict['optimize'])
else:
print("{} not exists, will adapt default init net parameter!".format(model_name))
print("load [%s] successful!" % model_name)
def save_networks(self, which_epoch):
for model_name in self.model_names:
if isinstance(model_name, str):
save_filename = '%s_%s_%s.pth' % (model_name, which_epoch, model_name)
save_path = os.path.join(self.checkpoints_dir, save_filename)
net = getattr(self, model_name)
optimize = getattr(self, 'optimizer_' + model_name)
if len(self.gpu_ids) > 0 and torch.cuda.is_available():
torch.save({'net': net.module.cpu().state_dict(), 'optimize': optimize.state_dict()}, save_path)
net.cuda(self.gpu_ids[0])
else:
torch.save(net.cpu().state_dict(), save_path)
@staticmethod
def set_requires_grad(nets, requires_grad=False):
"""Set requires_grad=False for all the networks to avoid unnecessary computations
Parameters:
nets (network list) -- a list of networks
requires_grad (bool) -- whether the networks require gradients or not
"""
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
def update_learning_rate(self):
for scheduler in self.schedulers:
scheduler.step()
for i, optimizers in enumerate(self.optimizers):
lr = optimizers.param_groups[0]['lr']
print('optimizers_{} learning rate = {}'.format(str(i), str(lr)))
def get_scheduler(self, optimizer):
if self.lr_policy == 'lambda':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + 1 + self.epoch_count - self.niter) / float(self.niter_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif self.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=self.lr_decay_iters, gamma=0.1)
elif self.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01,
patience=5)
elif self.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=self.niter, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', self.lr_policy)
return scheduler
| StarcoderdataPython |
179125 | <gh_stars>1-10
import argparse
import numpy
from readers.read_blast import BlastReader
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--test', '-t',
help='Caminho para arquivo BLAST de teste',
required=True)
arg_parser.add_argument('--ref', '-r',
help='Caminho para arquivo BLAST de referencia',
required=True)
arg_parser.add_argument('--limit', '-l',
help='Limite de sentencas para processamento',
type=int,
required=False)
ARGS = arg_parser.parse_args()
BLAST_PATH_TEST = ARGS.test
BLAST_PATH_REF = ARGS.ref
SENT_LIMIT = ARGS.limit
TIPO_ERROS = ['lex-incTrWord', 'lex-notTrWord']
def calcula_medidas():
blast_reader_test = BlastReader(BLAST_PATH_TEST)
blast_reader_ref = BlastReader(BLAST_PATH_REF)
verdadeiro_positivo = 0
falso_positivo = 0
for (sent_idx_test, error) in blast_reader_test.error_lines:
if SENT_LIMIT is None or (SENT_LIMIT is not None and sent_idx_test < SENT_LIMIT):
error_type = error[-1]
sys_idxs = error[1]
fp = 1
for (sent_idx_ref, error2) in blast_reader_ref.get_filtered_errors([error_type]):
if sent_idx_ref == sent_idx_test:
if set(sys_idxs) & set(error2[1]):
verdadeiro_positivo += 1
fp = 0
break
falso_positivo += fp
falso_negativo = 0
for (idx, error) in blast_reader_ref.error_lines:
if SENT_LIMIT is None or (SENT_LIMIT is not None and sent_idx_test < SENT_LIMIT):
error_type = error[-1]
sys_idxs = error[1]
fn = 1
for (idx2, error2) in blast_reader_test.get_filtered_errors([error_type]):
if idx2 == idx:
if set(sys_idxs) & set(error2[1]):
fn = 0
break
falso_negativo += fn
precisao = verdadeiro_positivo / (verdadeiro_positivo + falso_positivo)
cobertura = verdadeiro_positivo / (verdadeiro_positivo + falso_negativo)
print('Precisao: {:.2f}%'.format(precisao*100))
print('Cobertura: {:.2f}%'.format(cobertura*100))
def calcula_matriz_confusao():
blast_reader_test = BlastReader(BLAST_PATH_TEST)
blast_reader_ref = BlastReader(BLAST_PATH_REF)
indices_matriz = {x: TIPO_ERROS.index(x) for x in TIPO_ERROS}
indices_matriz['correto'] = len(indices_matriz)
matriz = numpy.zeros((len(TIPO_ERROS) + 1, len(TIPO_ERROS) + 1))
for (sent_idx_test, sent) in enumerate(blast_reader_test.sys_lines):
if SENT_LIMIT is None or (SENT_LIMIT is not None and sent_idx_test < SENT_LIMIT):
sent_class = blast_reader_test.get_error_messages(sent_idx_test)
sent_class_ref = blast_reader_ref.get_error_messages(sent_idx_test)
for (palavra_idx, palavra) in enumerate(sent):
# Classificacao
palavra_erros = [x for x in sent_class if palavra_idx in x[1]]
if palavra_erros:
idx_linha = indices_matriz[palavra_erros[0][-1]]
else:
idx_linha = indices_matriz['correto']
# Referencia
palavra_ref = [x for x in sent_class_ref if palavra_idx in x[1] and x[-1] in TIPO_ERROS]
if palavra_ref:
idx_coluna = indices_matriz[palavra_ref[0][-1]]
else:
idx_coluna = indices_matriz['correto']
matriz[idx_linha, idx_coluna] += 1
print(indices_matriz)
matprint(matriz)
def matprint(mat, fmt="g"):
col_maxes = [max([len(("{:"+fmt+"}").format(x)) for x in col]) for col in mat.T]
for x in mat:
for i, y in enumerate(x):
print(("{:"+str(col_maxes[i])+fmt+"}").format(y), end=" ")
print("")
if __name__ == "__main__":
calcula_medidas()
print('\n------------------------\n')
calcula_matriz_confusao()
| StarcoderdataPython |
4832833 | from curator import api as curator
from mock import patch, Mock
from . import CuratorTestCase
class TestAlias(CuratorTestCase):
def test_add_to_alias_positive(self):
alias = 'testalias'
self.create_index('dummy')
self.client.indices.put_alias(index='dummy', name=alias)
self.create_index('foo')
curator.add_to_alias(self.client, 'foo', alias=alias)
self.assertEquals(2, len(self.client.indices.get_alias(name=alias)))
def test_add_to_alias_negative(self):
alias = 'testalias'
self.create_index('dummy')
self.client.indices.put_alias(index='dummy', name=alias)
self.create_index('foo')
self.assertFalse(curator.add_to_alias(self.client, 'foo', alias="ooga"))
def test_add_to_alias_with_closed(self):
alias = 'testalias'
self.create_index('dummy')
self.client.indices.put_alias(index='dummy', name=alias)
self.create_index('foo')
curator.close_indices(self.client, 'foo')
self.assertTrue(curator.index_closed(self.client, 'foo'))
curator.add_to_alias(self.client, 'foo', alias=alias)
self.assertFalse(curator.add_to_alias(self.client, 'foo', alias=alias))
def test_add_to_alias_idx_already_in_alias(self):
alias = 'testalias'
self.create_index('dummy')
self.client.indices.put_alias(index='dummy', name=alias)
self.create_index('foo')
curator.add_to_alias(self.client, 'foo', alias=alias)
self.assertTrue(curator.add_to_alias(self.client, 'foo', alias=alias))
def test_remove_from_alias_positive(self):
alias = 'testalias'
self.create_index('dummy')
self.client.indices.put_alias(index='dummy', name=alias)
self.create_index('foo')
curator.add_to_alias(self.client, 'foo', alias=alias)
curator.remove_from_alias(self.client, 'dummy', alias=alias)
self.assertEquals(1, len(self.client.indices.get_alias(name=alias)))
def test_remove_from_alias_negative(self):
alias = 'testalias'
self.create_index('dummy')
self.client.indices.put_alias(index='dummy', name=alias)
self.assertFalse(curator.remove_from_alias(self.client, 'dummy', alias="ooga"))
def test_full_alias_add_positive(self):
alias = 'testalias'
self.create_index('dummy')
self.client.indices.put_alias(index='dummy', name=alias)
self.create_index('foo')
self.assertTrue(curator.alias(self.client, 'foo', alias=alias))
self.assertEquals(2, len(self.client.indices.get_alias(name=alias)))
def test_full_alias_add_negative(self):
alias = 'testalias'
self.create_index('dummy')
self.client.indices.put_alias(index='dummy', name=alias)
self.assertFalse(curator.alias(self.client, 'foo', alias='ooga'))
def test_full_alias_remove_positive(self):
alias = 'testalias'
self.create_index('dummy')
self.client.indices.put_alias(index='dummy', name=alias)
self.create_index('foo')
curator.add_to_alias(self.client, 'foo', alias=alias)
curator.alias(self.client, 'dummy', alias=alias, remove=True)
self.assertEquals(1, len(self.client.indices.get_alias(name=alias)))
def test_full_alias_remove_negative(self):
alias = 'testalias'
self.create_index('dummy')
self.client.indices.put_alias(index='dummy', name=alias)
self.assertFalse(curator.alias(self.client, 'dummy', alias="ooga", remove=True))
#def alias(client, indices, alias=None, remove=False):
# retval = True
# for i in indices:
# if remove:
# success = remove_from_alias(client, i, alias=alias)
# else:
# success = add_to_alias(client, i, alias=alias)
# # if we fail once, we fail completely
# if not success:
# retval = False
# return retval
class TestChangeReplicas(CuratorTestCase):
def test_index_replicas_can_be_modified(self):
self.create_index('test_index')
self.assertEquals('0', self.client.indices.get_settings(index='test_index')['test_index']['settings']['index']['number_of_replicas'])
curator.change_replicas(self.client, 'test_index', replicas=1)
self.assertEquals('1', self.client.indices.get_settings(index='test_index')['test_index']['settings']['index']['number_of_replicas'])
def test_index_replicas_untouched(self):
self.create_index('test_index')
self.assertEquals('0', self.client.indices.get_settings(index='test_index')['test_index']['settings']['index']['number_of_replicas'])
curator.change_replicas(self.client, 'test_index', replicas=0)
self.assertEquals('0', self.client.indices.get_settings(index='test_index')['test_index']['settings']['index']['number_of_replicas'])
class TestDeleteIndex(CuratorTestCase):
def test_index_will_be_deleted(self):
self.create_index('test_index')
self.assertTrue(curator.delete_indices(self.client, 'test_index'))
self.assertFalse(self.client.indices.exists('test_index'))
class TestBloomIndex(CuratorTestCase):
def test_bloom_filter_will_be_disabled(self):
self.create_index('test_index')
# Bloom filters have been removed from the 1.x branch after 1.4.0
no_more_bloom = (1, 4, 0)
version_number = curator.get_version(self.client)
if version_number < no_more_bloom:
self.assertTrue(curator.disable_bloom_filter(self.client, 'test_index'))
settings = self.client.indices.get_settings(index='test_index')
self.assertEquals('false', settings['test_index']['settings']['index']['codec']['bloom']['load'])
def test_closed_index_will_be_skipped(self):
self.create_index('test_index')
self.client.indices.close(index='test_index')
self.assertTrue(curator.disable_bloom_filter(self.client, 'test_index'))
index_metadata = self.client.cluster.state(
index='test_index',
metric='metadata',
)
self.assertEquals('close', index_metadata['metadata']['indices']['test_index']['state'])
class TestOptimizeIndex(CuratorTestCase):
def test_optimized_index_will_be_skipped(self):
self.create_index('test_index')
self.client.create(index='test_index', doc_type='log', body={'message':'TEST DOCUMENT'})
# Will only have 1 segment
self.assertTrue(curator.optimize_index(self.client, 'test_index', max_num_segments=4))
# def test_unoptimized_index_will_be_optimized(self):
# self.create_index('test_index')
# for i in range(1, 11):
# self.client.create(index='test_index', doc_type='log' + str(i), body={'message':'TEST DOCUMENT'})
# curator.optimize(self.client, "test_index", max_num_segments=10, delay=1)
# # Make sure we have more than 4 segments before the optimize
# self.assertGreater(curator.get_segmentcount(self.client, "test_index")[1], 4 )
# self.assertTrue(curator.optimize_index(self.client, 'test_index', max_num_segments=1))
# self.assertEqual(1, curator.get_segmentcount(self.client, "test_index")[1] )
| StarcoderdataPython |
1600780 | from ramda.find_last import find_last
from ramda.private.asserts import assert_equal
def positive(x):
return x > 0
def find_last_nocurry_test():
assert_equal(find_last(positive, [-2, -1, 0, 1, 2, -2]), 2)
def find_last_curry_test():
assert_equal(find_last(positive)([-2, -1, 0, 1, 2, -1, -2]), 2)
def not_found_test():
assert_equal(find_last(positive, []), None)
| StarcoderdataPython |
23550 | class Samples:
def __init__(self):
#COMMANDS
self.PP = ('Für [https://osu.ppy.sh/b/{} {} [{}]{}] (OD {}, AR {}, '
'CS {}, {}★, {}:{}) wirst du {} {}')
self.PP_FOR = ('| {}pp bekommen für {}% ')
self.PP_PRED = ('Für [https://osu.ppy.sh/b/{} {} [{}]{}] (OD {}, AR {}, '
'CS {}, {}★, {}:{}) wirst du {} {} # {}')
self.PP_PRED_IMPOSSIBLE = ('Unmöglich zu FC für dich')
self.PP_PRED_FUTURE = ('Es erwarten dich: {}pp')
self.INFO = ('Sie kannst Quelle und Information '
'[https://suroryz.github.io/surbot-osu/ hier] finden')
self.LANG_CHANGED = ('Sprache erfolgreich geändert. '
'Localizer: some HiNative guy')
#ERRORS
self.ERROR_SYNTAX = ('Sie hast etwas falsches eingegeben. '
'Kontrollieren Sie die Hilfeseite -> .info')
self.ERROR_NP_NEED = ('Sie müssen /np vorher verwenden')
self.ERROR_NO_LANGUAGE = ('Entschuldigung, aber ich kann deine/Ihre Sprache nicht in meiner Datenbank finden. '
'Versuchen Sie den ISO 639-1 Sprachen-Code zu nutzen. '
'Wenn Ihre dort nicht vorzufinden ist, können Sie das '
'[https://suroryz.github.io/surbot-osu/lang/langs hier] melden')
| StarcoderdataPython |
17914 | <reponame>ltxwanzl/ainnovation_dcim
# default_app_config = '.apps.WorkflowConfig'
| StarcoderdataPython |
1769936 | import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator
from sklearn.utils.validation import (
check_X_y,
check_array,
check_is_fitted,
check_random_state,
)
from bpr_numba import fit_bpr
from utils import (
create_user_map_table,
create_item_map_table,
create_user_map,
create_item_map,
create_data_triplets_index_only,
)
class BPR(BaseEstimator):
def __init__(
self,
n_factors=10,
n_epochs=1,
batch_size=1,
init_mean=0,
init_std_dev=0.1,
lr_all=0.005,
reg_all=0.02,
lr_bi=None,
lr_pu=None,
lr_qi=None,
reg_bi=None,
reg_pu=None,
reg_qi=None,
random_state=None,
eps=1e-5,
):
self.n_factors = n_factors
self.n_epochs = n_epochs
self.batch_size = batch_size
self.init_mean = init_mean
self.init_std_dev = init_std_dev
self.lr_all = lr_all
self.reg_all = reg_all
self.lr_bi = lr_bi
self.lr_pu = lr_pu
self.lr_qi = lr_qi
self.reg_bi = reg_bi
self.reg_pu = reg_pu
self.reg_qi = reg_qi
self.random_state = random_state
self.user_factors = None
self.item_factors = None
self.item_biases = None
self.known_users = None
self.known_items = None
self.user_map = None
self.item_map = None
self.residuals = None
self.eps = eps
def fit(self, X, y):
"""Fit the model using stochastic gradient descent.
Parameters
----------
X : ndarray shape ( m, 2 )
Columns are [ user_id, item_id ]
y : ndarray shape ( m, )
Array of 1 : relevent and 0 if not
Returns
-------
"""
X, y = check_X_y(X, y)
n_users = len(np.unique(X[:, 0]))
n_items = len(np.unique(X[:, 1]))
df = pd.DataFrame({"user_id": X[:, 0], "item_id": X[:, 1], "relevance": y})
user_map_table = create_user_map_table(df)
item_map_table = create_item_map_table(df)
self.user_map = create_user_map(df)
self.item_map = create_item_map(df)
data_triplets = create_data_triplets_index_only(
df, user_map_table, item_map_table
)
print("Data triplets created")
m = data_triplets.shape[0]
self.is_fitted_ = True
self.random_state_ = check_random_state(self.random_state)
self.lr_bi = self.lr_bi if self.lr_bi is not None else self.lr_all
self.lr_pu = self.lr_pu if self.lr_pu is not None else self.lr_all
self.lr_qi = self.lr_qi if self.lr_qi is not None else self.lr_all
self.reg_bi = self.reg_bi if self.reg_bi is not None else self.reg_all
self.reg_pu = self.reg_pu if self.reg_pu is not None else self.reg_all
self.reg_qi = self.reg_qi if self.reg_qi is not None else self.reg_all
self.batch_size = self.batch_size if self.batch_size is not None else 1
self.residuals = np.zeros(self.n_epochs)
self.known_users = set(X[:, 0])
self.known_items = set(X[:, 1])
self.user_factors = self.random_state_.normal(
loc=self.init_mean,
scale=self.init_std_dev,
size=(n_users, self.n_factors),
)
self.item_factors = self.random_state_.normal(
loc=self.init_mean,
scale=self.init_std_dev,
size=(n_items, self.n_factors),
)
self.user_biases = self.random_state_.normal(
loc=self.init_mean, scale=self.init_std_dev, size=n_users
)
self.item_biases = self.random_state_.normal(
loc=self.init_mean, scale=self.init_std_dev, size=n_items
)
(
self.user_factors,
self.item_factors,
self.item_biases,
self.residuals,
) = fit_bpr(
data_triplets=data_triplets,
initial_user_factors=self.user_factors,
initial_item_factors=self.item_factors,
initial_item_biases=self.item_biases,
lr_bi=self.lr_bi,
lr_pu=self.lr_pu,
lr_qi=self.lr_qi,
reg_bi=self.reg_bi,
reg_pu=self.reg_pu,
reg_qi=self.reg_qi,
verbose=False,
n_epochs=self.n_epochs,
batch_size=self.batch_size,
eps=self.eps,
)
if len(self.residuals) < self.n_epochs:
print(f"Converged")
return self
def predict(self, X: np.ndarray):
"""
Parameters
----------
X : array-like
Columns [ user_id, item_id ]
Returns
-------
scores : ndarray
"""
check_is_fitted(self, "is_fitted_")
X = check_array(X)
m = X.shape[0]
scores = np.zeros(m)
for i in np.arange(m):
user_id = X[i, 0]
item_id = X[i, 1]
if user_id in self.user_map and item_id in self.item_map:
u_idx = self.user_map[user_id]
i_idx = self.item_map[item_id]
scores[i] = (
np.dot(self.user_factors[u_idx, :], self.item_factors[i_idx, :])
+ self.item_biases[i_idx]
)
elif item_id in self.item_map:
i_idx = self.item_map[item_id]
scores[i] = self.item_biases[i_idx]
else:
# item not in training set
scores[i] = -np.inf
return scores
| StarcoderdataPython |
11076 | import FWCore.ParameterSet.Config as cms
from SimMuon.GEMDigitizer.muonGEMDigis_cfi import *
from SimMuon.GEMDigitizer.muonGEMPadDigis_cfi import *
from SimMuon.GEMDigitizer.muonGEMPadDigiClusters_cfi import *
muonGEMDigiTask = cms.Task(simMuonGEMDigis, simMuonGEMPadDigis, simMuonGEMPadDigiClusters)
muonGEMDigi = cms.Sequence(muonGEMDigiTask)
| StarcoderdataPython |
1622624 | from bs4 import BeautifulSoup
import requests
import os
class App:
def __init__(self):
self.userlist = []
self.headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36"}
self.page = 1
os.system("title "+"THT IHBAR OTOMASYONU")
os.system("color F")
self.hashUser = input("'xf_user' Bilgisini giriniz: ").strip()
self.hashTfaTrust = input("'xf_tfa_trust' Bilgisini giriniz: ").strip()
self.cookies = {
'xf_user':f'{self.hashUser}',
'xf_tfa_trust':f'{self.hashTfaTrust}'
}
self.Transactions()
def ControlAccount(self):
request = requests.get("https://www.turkhackteam.org/uye/kaptantr.744109/", cookies=self.cookies, headers = self.headers)
controltext = "Giriş yap"
html = request.text
if controltext in html:
return "Giris Yapılmadı"
else:
return"Giriş Yapıldı"
def Scarping(self):
request = requests.get("https://www.turkhackteam.org/reports/closed?page="+ str(self.page), cookies=self.cookies, headers=self.headers).text
parser = BeautifulSoup(request, 'html.parser')
urls = parser.findAll("a", {"class": "structItem-title"},href=True)
for url in urls:
file = open("rapor.txt","a",encoding='utf-8')
file.write("*"*40)
file.write("\n")
reportedLink = "https://www.turkhackteam.org"+url["href"]
request = requests.get(reportedLink, cookies=self.cookies, headers=self.headers).text
contentParser = BeautifulSoup(request, 'html.parser')
content = contentParser.find_all("header",{"class":"message-attribution message-attribution--plain"})
for item in content:
userLink = item.find('a')["href"]
userLink = "https://www.turkhackteam.org"+userLink
userSituation = item.find("span", {"class": "label label--accent"})
userSituation = userSituation is None
userName = item.find('h4',{"class":"attribution"}).text
userSituation ={True: "İhbar Yapan", False: "İhbar Eden"} [userSituation]
text = f"{userLink} // {userName} // ({userSituation})"
file.write(reportedLink)
file.write("\n")
file.write(text)
file.write("\n")
file.write("-"*20)
file.write("\n")
file.close()
def Transactions(self):
print("""
///////////////////////////////////////////
// //
// THT Ihbar Otomasyonu //
// 1.0 //
// //
// Created By //
// Ar-Ge Team //
///////////////////////////////////////////
""")
if self.ControlAccount() == "<NAME>":
print("Giriş Yapılamadı. Çıkış yapmak için lütfen bir tuşa basınız.")
input()
exit()
else:
print(f"Login Control: {self.ControlAccount()}")
print("İşlem Başladı, Lütfen Bekleyiniz")
self.Scarping()
print("İşlem Tamamlandı, Çıkış Yapmak İçin Bir tuşa Basınız.")
input()
if __name__ == '__main__':
main = App()
| StarcoderdataPython |
3237363 | <gh_stars>0
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import logging
import os
import shutil
import ssl
import sys
import tempfile
import threading
import zipfile
from contextlib import contextmanager
from pathlib import Path
from queue import Queue
from socketserver import TCPServer
from typing import IO, Any, Callable, Iterator, Mapping
from colors import green
from pants.util.dirutil import safe_delete
class InvalidZipPath(ValueError):
"""Indicates a bad zip file path."""
@contextmanager
def environment_as(**kwargs: str | None) -> Iterator[None]:
"""Update the environment to the supplied values, for example:
with environment_as(PYTHONPATH='foo:bar:baz',
PYTHON='/usr/bin/python2.7'):
subprocess.Popen(foo).wait()
"""
new_environment = kwargs
old_environment = {}
def setenv(key: str, val: str | None) -> None:
if val is not None:
os.environ[key] = val
else:
if key in os.environ:
del os.environ[key]
for key, val in new_environment.items():
old_environment[key] = os.environ.get(key)
setenv(key, val)
try:
yield
finally:
for key, val in old_environment.items():
setenv(key, val)
def _purge_env() -> None:
# N.B. Without the use of `del` here (which calls `os.unsetenv` under the hood), subprocess
# invokes or other things that may access the environment at the C level may not see the
# correct env vars (i.e. we can't just replace os.environ with an empty dict).
# See https://docs.python.org/3/library/os.html#os.unsetenv for more info.
#
# Wraps iterable in list() to make a copy and avoid issues with deleting while iterating.
for k in list(os.environ.keys()):
del os.environ[k]
def _restore_env(env: Mapping[str, str]) -> None:
for k, v in env.items():
os.environ[k] = v
@contextmanager
def hermetic_environment_as(**kwargs: str | None) -> Iterator[None]:
"""Set the environment to the supplied values from an empty state."""
old_environment = os.environ.copy()
_purge_env()
try:
with environment_as(**kwargs):
yield
finally:
_purge_env()
_restore_env(old_environment)
@contextmanager
def argv_as(args: tuple[str, ...]) -> Iterator[None]:
"""Temporarily set `sys.argv` to the supplied value."""
old_args = sys.argv
try:
sys.argv = list(args)
yield
finally:
sys.argv = old_args
@contextmanager
def temporary_dir(
root_dir: str | None = None,
cleanup: bool = True,
suffix: str | None = None,
permissions: int | None = None,
prefix: str | None = tempfile.template,
) -> Iterator[str]:
"""A with-context that creates a temporary directory.
:API: public
You may specify the following keyword args:
:param root_dir: The parent directory to create the temporary directory.
:param cleanup: Whether or not to clean up the temporary directory.
:param suffix: If not None the directory name will end with this suffix.
:param permissions: If provided, sets the directory permissions to this mode.
:param prefix: If not None, the directory name will begin with this prefix,
otherwise a default prefix is used.
"""
path = tempfile.mkdtemp(dir=root_dir, suffix=suffix, prefix=prefix)
try:
if permissions is not None:
os.chmod(path, permissions)
yield path
finally:
if cleanup:
shutil.rmtree(path, ignore_errors=True)
@contextmanager
def temporary_file_path(
root_dir: str | None = None,
cleanup: bool = True,
suffix: str | None = None,
permissions: int | None = None,
) -> Iterator[str]:
"""A with-context that creates a temporary file and returns its path.
:API: public
You may specify the following keyword args:
:param root_dir: The parent directory to create the temporary file.
:param cleanup: Whether or not to clean up the temporary file.
"""
with temporary_file(root_dir, cleanup=cleanup, suffix=suffix, permissions=permissions) as fd:
fd.close()
yield fd.name
@contextmanager
def temporary_file(
root_dir: str | None = None,
cleanup: bool = True,
suffix: str | None = None,
permissions: int | None = None,
binary_mode: bool = True,
) -> Iterator[IO]:
"""A with-context that creates a temporary file and returns a writeable file descriptor to it.
You may specify the following keyword args:
:param root_dir: The parent directory to create the temporary file.
:param cleanup: Whether or not to clean up the temporary file.
:param suffix: If suffix is specified, the file name will end with that suffix.
Otherwise there will be no suffix.
mkstemp() does not put a dot between the file name and the suffix;
if you need one, put it at the beginning of suffix.
See :py:class:`tempfile.NamedTemporaryFile`.
:param permissions: If provided, sets the file to use these permissions.
:param binary_mode: Whether file opens in binary or text mode.
"""
mode = "w+b" if binary_mode else "w+" # tempfile's default is 'w+b'
with tempfile.NamedTemporaryFile(suffix=suffix, dir=root_dir, delete=False, mode=mode) as fd:
try:
if permissions is not None:
os.chmod(fd.name, permissions)
yield fd
finally:
if cleanup:
safe_delete(fd.name)
@contextmanager
def overwrite_file_content(
file_path: str | Path,
temporary_content: bytes | str | Callable[[bytes], bytes] | None = None,
) -> Iterator[None]:
"""A helper that resets a file after the method runs.
It will read a file, save the content, maybe write temporary_content to it, yield, then
write the original content to the file.
:param file_path: Absolute path to the file to be reset after the method runs.
:param temporary_content: Content to write to the file, or a function from current content
to new temporary content.
"""
file_path = Path(file_path)
original_content = file_path.read_bytes()
try:
if temporary_content is not None:
if callable(temporary_content):
content = temporary_content(original_content)
elif isinstance(temporary_content, bytes):
content = temporary_content
else:
content = temporary_content.encode()
file_path.write_bytes(content)
yield
finally:
file_path.write_bytes(original_content)
@contextmanager
def pushd(directory: str) -> Iterator[str]:
"""A with-context that encapsulates pushd/popd."""
cwd = os.getcwd()
os.chdir(directory)
try:
yield directory
finally:
os.chdir(cwd)
@contextmanager
def open_zip(path_or_file: str | Any, *args, **kwargs) -> Iterator[zipfile.ZipFile]:
"""A with-context for zip files.
Passes through *args and **kwargs to zipfile.ZipFile.
:API: public
:param path_or_file: Full path to zip file.
:param args: Any extra args accepted by `zipfile.ZipFile`.
:param kwargs: Any extra keyword args accepted by `zipfile.ZipFile`.
:raises: `InvalidZipPath` if path_or_file is invalid.
:raises: `zipfile.BadZipfile` if zipfile.ZipFile cannot open a zip at path_or_file.
"""
if not path_or_file:
raise InvalidZipPath(f"Invalid zip location: {path_or_file}")
if "allowZip64" not in kwargs:
kwargs["allowZip64"] = True
try:
zf = zipfile.ZipFile(path_or_file, *args, **kwargs)
except zipfile.BadZipfile as bze:
# Use the realpath in order to follow symlinks back to the problem source file.
raise zipfile.BadZipfile(f"Bad Zipfile {os.path.realpath(path_or_file)}: {bze}")
try:
yield zf
finally:
zf.close()
@contextmanager
def maybe_profiled(profile_path: str | None) -> Iterator[None]:
"""A profiling context manager.
:param profile_path: The path to write profile information to. If `None`, this will no-op.
"""
if not profile_path:
yield
return
import cProfile
profiler = cProfile.Profile()
try:
profiler.enable()
yield
finally:
profiler.disable()
profiler.dump_stats(profile_path)
view_cmd = green(
"gprof2dot -f pstats {path} | dot -Tpng -o {path}.png && open {path}.png".format(
path=profile_path
)
)
logging.getLogger().info(
f"Dumped profile data to: {profile_path}\nUse e.g. {view_cmd} to render and view."
)
@contextmanager
def http_server(handler_class: type, ssl_context: ssl.SSLContext | None = None) -> Iterator[int]:
def serve(port_queue: Queue[int], shutdown_queue: Queue[bool]) -> None:
httpd = TCPServer(("", 0), handler_class)
httpd.timeout = 0.1
if ssl_context:
httpd.socket = ssl_context.wrap_socket(httpd.socket, server_side=True)
port_queue.put(httpd.server_address[1])
while shutdown_queue.empty():
httpd.handle_request()
port_queue: Queue[int] = Queue()
shutdown_queue: Queue[bool] = Queue()
t = threading.Thread(target=lambda: serve(port_queue, shutdown_queue))
t.daemon = True
t.start()
try:
yield port_queue.get(block=True)
finally:
shutdown_queue.put(True)
t.join()
| StarcoderdataPython |
199495 | """Unit test for cleanup - cleanup node apps
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import glob
import os
import shutil
import tempfile
import unittest
import mock
import treadmill
import treadmill.runtime.runtime_base
from treadmill import cleanup
class CleanupTest(unittest.TestCase):
"""Mock test for treadmill.cleanup.Cleanup.
"""
@mock.patch('treadmill.appenv.AppEnvironment', mock.Mock(autospec=True))
@mock.patch('treadmill.watchdog.Watchdog', mock.Mock(autospec=True))
def setUp(self):
self.root = tempfile.mkdtemp()
self.cleanup_dir = os.path.join(self.root, 'cleanup')
self.cleaning_dir = os.path.join(self.root, 'cleaning')
self.cleanup_apps_dir = os.path.join(self.root, 'cleanup_apps')
self.cleanup_tombstone_dir = os.path.join(self.root, 'tombstones')
for tmp_dir in [self.cleanup_dir, self.cleaning_dir,
self.cleanup_apps_dir]:
os.mkdir(tmp_dir)
self.tm_env = mock.Mock(
root=self.root,
cleanup_dir=self.cleanup_dir,
cleaning_dir=self.cleaning_dir,
cleanup_apps_dir=self.cleanup_apps_dir,
cleanup_tombstone_dir=self.cleanup_tombstone_dir
)
self.cleanup = cleanup.Cleanup(self.tm_env)
def tearDown(self):
if self.root and os.path.isdir(self.root):
shutil.rmtree(self.root)
@mock.patch('treadmill.supervisor.control_svscan', mock.Mock())
def test__refresh_supervisor(self):
"""Check how the supervisor is being refreshed.
"""
# Access to a protected member _refresh_supervisor of a client class
# pylint: disable=W0212
self.cleanup._refresh_supervisor()
treadmill.supervisor.control_svscan.assert_called_with(
self.cleaning_dir, (
treadmill.supervisor.SvscanControlAction.alarm,
treadmill.supervisor.SvscanControlAction.nuke
)
)
@mock.patch('os.path.islink', mock.Mock())
@mock.patch('treadmill.supervisor.create_service', mock.Mock())
@mock.patch('treadmill.fs.symlink_safe', mock.Mock())
@mock.patch('treadmill.cleanup.Cleanup._refresh_supervisor', mock.Mock())
def test__add_cleanup_app(self):
"""Tests that a new cleanup app is correctly configured.
"""
# Access to a protected member _add_cleanup_app of a client class
# pylint: disable=W0212
os.path.islink.side_effect = [False, True]
self.cleanup._add_cleanup_app(
os.path.join(self.cleanup_dir, 'proid.app#0000000000001'))
treadmill.supervisor.create_service.assert_called_with(
self.cleanup_apps_dir,
name='proid.app#0000000000001',
app_run_script=mock.ANY,
userid='root',
monitor_policy={
'limit': 5,
'interval': 60,
'tombstone': os.path.join(self.cleanup_tombstone_dir,
'proid.app#0000000000001'),
'skip_path': os.path.join(self.cleanup_dir,
'proid.app#0000000000001')
},
log_run_script=None,
)
treadmill.fs.symlink_safe.assert_called_with(
os.path.join(self.cleaning_dir, 'proid.app#0000000000001'),
os.path.join(self.cleanup_apps_dir, 'proid.app#0000000000001')
)
treadmill.cleanup.Cleanup._refresh_supervisor.assert_called()
@mock.patch('os.path.islink', mock.Mock())
@mock.patch('treadmill.supervisor.create_service', mock.Mock())
def test__add_cleanup_app_exists(self):
"""Tests add app when already exists.
"""
# Access to a protected member _add_cleanup_app of a client class
# pylint: disable=W0212
os.path.islink.side_effect = [True]
self.cleanup._add_cleanup_app(
os.path.join(self.cleanup_dir, 'proid.app#0000000000001'))
treadmill.supervisor.create_service.assert_not_called()
# Disable C0103(Invalid method name)
# pylint: disable=C0103
@mock.patch('os.path.islink', mock.Mock())
@mock.patch('treadmill.supervisor.create_service', mock.Mock())
def test__add_cleanup_app_not_exists(self):
"""Tests add app when cleanup link does not exist.
"""
# Access to a protected member _add_cleanup_app of a client class
# pylint: disable=W0212
os.path.islink.side_effect = [False, False]
self.cleanup._add_cleanup_app(
os.path.join(self.cleanup_dir, 'proid.app#0000000000001'))
treadmill.supervisor.create_service.assert_not_called()
@mock.patch('treadmill.supervisor.create_service', mock.Mock())
def test__add_cleanup_app_temp(self):
"""Tests add app when cleanup link is a temp file
"""
# Access to a protected member _add_cleanup_app of a client class
# pylint: disable=W0212
self.cleanup._add_cleanup_app(
os.path.join(self.cleanup_dir, '.sdfasdfds'))
treadmill.supervisor.create_service.assert_not_called()
@mock.patch('os.path.exists', mock.Mock())
@mock.patch('treadmill.supervisor.ensure_not_supervised', mock.Mock())
@mock.patch('treadmill.fs.rm_safe', mock.Mock())
@mock.patch('treadmill.fs.rmtree_safe', mock.Mock())
@mock.patch('treadmill.cleanup.Cleanup._refresh_supervisor', mock.Mock())
def test__remove_cleanup_app(self):
"""Tests that a cleanup app is properly removed.
"""
# Access to a protected member _remove_cleanup_app of a client class
# pylint: disable=W0212
os.path.exists.side_effect = [True]
self.cleanup._remove_cleanup_app(
os.path.join(self.cleanup_dir, 'proid.app#0000000000001'))
treadmill.fs.rm_safe.assert_called_with(
os.path.join(self.cleaning_dir, 'proid.app#0000000000001')
)
treadmill.cleanup.Cleanup._refresh_supervisor.assert_called()
treadmill.supervisor.ensure_not_supervised.assert_called()
treadmill.fs.rmtree_safe.assert_called_with(
os.path.join(self.cleanup_apps_dir, 'proid.app#0000000000001')
)
# Disable C0103(Invalid method name)
# pylint: disable=C0103
@mock.patch('os.path.exists', mock.Mock())
@mock.patch('treadmill.supervisor.ensure_not_supervised', mock.Mock())
@mock.patch('treadmill.fs.rm_safe', mock.Mock())
@mock.patch('treadmill.fs.rmtree_safe', mock.Mock())
@mock.patch('treadmill.cleanup.Cleanup._refresh_supervisor', mock.Mock())
def test__remove_cleanup_app_no_link(self):
"""Tests that a cleanup app is removed even if the cleaning link
has been removed.
"""
# Access to a protected member _remove_cleanup_app of a client class
# pylint: disable=W0212
os.path.exists.side_effect = [False]
self.cleanup._remove_cleanup_app(
os.path.join(self.cleanup_dir, 'proid.app#0000000000001'))
treadmill.fs.rm_safe.assert_not_called()
treadmill.cleanup.Cleanup._refresh_supervisor.assert_not_called()
treadmill.supervisor.ensure_not_supervised.assert_not_called()
treadmill.fs.rmtree_safe.assert_called_with(
os.path.join(self.cleanup_apps_dir, 'proid.app#0000000000001')
)
@mock.patch('os.path.exists', mock.Mock())
@mock.patch('treadmill.supervisor.ensure_not_supervised', mock.Mock())
@mock.patch('treadmill.fs.rm_safe', mock.Mock())
@mock.patch('treadmill.fs.rmtree_safe', mock.Mock())
@mock.patch('treadmill.cleanup.Cleanup._refresh_supervisor', mock.Mock())
def test__remove_cleanup_app_temp(self):
"""Tests removed cleanup app when link is a temp file.
"""
# Access to a protected member _remove_cleanup_app of a client class
# pylint: disable=W0212
os.path.exists.side_effect = [False]
self.cleanup._remove_cleanup_app(
os.path.join(self.cleanup_dir, '.sdfasdfds'))
treadmill.fs.rm_safe.assert_not_called()
treadmill.cleanup.Cleanup._refresh_supervisor.assert_not_called()
treadmill.supervisor.ensure_not_supervised.assert_not_called()
treadmill.fs.rmtree_safe.assert_not_called()
@mock.patch('os.readlink', mock.Mock())
@mock.patch('os.path.exists', mock.Mock())
@mock.patch('treadmill.runtime.get_runtime', mock.Mock(
return_value=mock.Mock(
spec_set=treadmill.runtime.runtime_base.RuntimeBase)))
@mock.patch('treadmill.fs.rm_safe', mock.Mock())
def test_invoke(self):
"""Tests invoking the cleanup action.
"""
os.readlink.side_effect = [
os.path.join(self.cleanup_apps_dir, 'proid.app#0000000000001')
]
os.path.exists.side_effect = [True]
self.cleanup.invoke('test', 'proid.app#0000000000001')
mock_runtime = treadmill.runtime.get_runtime(
'test',
self.cleanup.tm_env,
os.path.join(self.cleanup_apps_dir, 'proid.app#0000000000001')
)
mock_runtime.finish.assert_called()
treadmill.fs.rm_safe.assert_called_with(
os.path.join(self.cleanup_dir, 'proid.app#0000000000001')
)
@mock.patch('os.readlink', mock.Mock())
@mock.patch('os.path.exists', mock.Mock())
@mock.patch('treadmill.runtime.get_runtime', mock.Mock(
return_value=mock.Mock(
spec_set=treadmill.runtime.runtime_base.RuntimeBase)))
@mock.patch('treadmill.fs.rm_safe', mock.Mock())
def test_invoke_not_exists(self):
"""Tests invoking the cleanup action when the app dir does not exist
anymore.
"""
os.readlink.side_effect = [
os.path.join(self.cleanup_apps_dir, 'proid.app#0000000000001')
]
os.path.exists.side_effect = [False]
self.cleanup.invoke('test', 'proid.app#0000000000001')
mock_runtime = treadmill.runtime.get_runtime(
'test',
self.cleanup.tm_env,
os.path.join(self.cleanup_apps_dir, 'proid.app#0000000000001')
)
mock_runtime.finish.assert_not_called()
treadmill.fs.rm_safe.assert_called_with(
os.path.join(self.cleanup_dir, 'proid.app#0000000000001')
)
@mock.patch('glob.glob', mock.Mock())
@mock.patch('treadmill.cleanup.Cleanup._add_cleanup_app', mock.Mock())
@mock.patch('treadmill.cleanup.Cleanup._remove_cleanup_app', mock.Mock())
def test__sync(self):
"""Tests a full sync of cleanup apps.
"""
# Access to a protected member _sync of a client class
# pylint: disable=W0212
glob.glob.side_effect = [
[
os.path.join(self.cleanup_dir, 'proid.app#0000000000002'),
os.path.join(self.cleanup_dir, 'proid.app#0000000000003')
],
[
os.path.join(self.cleanup_apps_dir, 'proid.app#0000000000001'),
os.path.join(self.cleanup_apps_dir, 'proid.app#0000000000002')
]
]
self.cleanup._sync()
treadmill.cleanup.Cleanup._add_cleanup_app.assert_has_calls([
mock.call('proid.app#0000000000003')
])
treadmill.cleanup.Cleanup._remove_cleanup_app.assert_has_calls([
mock.call('proid.app#0000000000001')
])
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
135031 | <gh_stars>1-10
from chaos_genius.controllers.config_controller import get_config_object
def get_creds(name):
return HELPER_FUNC_DICT[name](name)
def get_email_creds(name):
config_obj = get_config_object(name)
if config_obj is None:
return "", "", "", "", ""
configs = config_obj.as_dict.get('config_setting', {})
return (
configs.get('server', ''),
configs.get('port', ''),
configs.get('username', ''),
configs.get('password', ''),
configs.get('sender_email', '')
)
def get_slack_creds(name):
config_obj = get_config_object(name)
if config_obj is None:
return ""
configs = config_obj.as_dict.get('config_setting', {})
return configs.get('webhook_url', '')
HELPER_FUNC_DICT = {
"email": get_email_creds,
"slack": get_slack_creds
}
| StarcoderdataPython |
3361663 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : EXP
# -----------------------------------------------
import random
import string
from datetime import timedelta
import erb.yml as yaml
import os
PRJ_DIR = os.path.dirname(os.path.abspath(__file__))
CHARSET = 'utf-8'
SETTINGS_PATH = '%s/conf/settings.yml' % PRJ_DIR
FILES_STORAGE_DIR = '%s/files' % PRJ_DIR
class Settings :
def __init__(self, settings_path, charset) -> None:
if os.path.exists(settings_path) :
with open(settings_path, 'r', encoding=charset) as file:
context = yaml.load(file.read())
app = context.get('APPLICATION')
self.ENV = app.get('ENV')
self.DEBUG = self._to_bool(app.get('DEBUG'))
self.TESTING = self._to_bool(app.get('TESTING'))
self.PREFERRED_URL_SCHEME = app.get('PREFERRED_URL_SCHEME')
self.APPLICATION_ROOT = app.get('APPLICATION_ROOT')
self.APPLICATION_NAME = app.get('APPLICATION_NAME')
self.SERVER_NAME = app.get('SERVER_NAME')
self.LOGIN_USERNAME = app.get('LOGIN_USERNAME')
self.LOGIN_PASSWORD = app.get('LOGIN_PASSWORD')
cookie = context.get('COOKIE')
self.SECRET_KEY = cookie.get('SECRET_KEY') or self._gen_cookie_secret_key()
self.PERMANENT_SESSION_LIFETIME = timedelta(int(cookie.get('PERMANENT_SESSION_LIFETIME')))
self.SESSION_COOKIE_NAME = cookie.get('SESSION_COOKIE_NAME')
self.SESSION_COOKIE_DOMAIN = cookie.get('SESSION_COOKIE_DOMAIN')
self.SESSION_COOKIE_PATH = cookie.get('SESSION_COOKIE_PATH')
self.SESSION_COOKIE_HTTPONLY = self._to_bool(cookie.get('SESSION_COOKIE_HTTPONLY'))
self.SESSION_COOKIE_SECURE = self._to_bool(cookie.get('SESSION_COOKIE_SECURE'))
self.SESSION_COOKIE_SAMESITE = cookie.get('SESSION_COOKIE_SAMESITE')
self.SESSION_REFRESH_EACH_REQUEST = self._to_bool(cookie.get('SESSION_REFRESH_EACH_REQUEST'))
self.MAX_COOKIE_SIZE = int(cookie.get('MAX_COOKIE_SIZE'))
content = context.get('CONTENT')
self.BASEDIR = content.get('BASEDIR') or FILES_STORAGE_DIR
self.MAX_CONTENT_LENGTH = content.get('MAX_CONTENT_LENGTH')
self.JSON_AS_ASCII = self._to_bool(content.get('JSON_AS_ASCII'))
self.JSON_SORT_KEYS = self._to_bool(content.get('JSON_SORT_KEYS'))
self.JSONIFY_PRETTYPRINT_REGULAR = self._to_bool(content.get('JSONIFY_PRETTYPRINT_REGULAR'))
self.JSONIFY_MIMETYPE = content.get('JSONIFY_MIMETYPE')
self.TEMPLATES_AUTO_RELOAD = content.get('TEMPLATES_AUTO_RELOAD')
other = context.get('OTHER')
self.PROPAGATE_EXCEPTIONS = other.get('PROPAGATE_EXCEPTIONS')
self.PRESERVE_CONTEXT_ON_EXCEPTION = other.get('PRESERVE_CONTEXT_ON_EXCEPTION')
self.USE_X_SENDFILE = self._to_bool(other.get('USE_X_SENDFILE'))
self.SEND_FILE_MAX_AGE_DEFAULT = other.get('SEND_FILE_MAX_AGE_DEFAULT')
self.TRAP_BAD_REQUEST_ERRORS = other.get('TRAP_BAD_REQUEST_ERRORS')
self.TRAP_HTTP_EXCEPTIONS = self._to_bool(other.get('TRAP_HTTP_EXCEPTIONS'))
self.EXPLAIN_TEMPLATE_LOADING = self._to_bool(other.get('EXPLAIN_TEMPLATE_LOADING'))
def _gen_cookie_secret_key(self, size=32) :
return ''.join(
random.sample(string.ascii_letters + string.digits, size)
)
def _to_bool(self, val) :
if val is not None and not isinstance(val, bool) :
if val.lower() == 'false' :
val = False
elif val.lower() == 'true' :
val = True
return val
settings = Settings(SETTINGS_PATH, CHARSET)
| StarcoderdataPython |
3313074 | <reponame>THEMVFFINMAN/Self-Coding-Books
import zipfile, sys, os
from threading import Thread
def validateFile(fileName):
if not os.path.isfile(fileName):
print '[-] ' + fileName + ' does not exist.'
exit(0)
if not os.access(fileName, os.R_OK):
print '[-] ' + fileName + ' access denied.'
exit(0)
def validateFiles():
if len(sys.argv) == 3:
for fileName in range(1,3):
print sys.argv[fileName]
validateFile(sys.argv[fileName])
else:
print '[-] Incorrect file amount'
exit(0)
def extractFile(zFile, password):
try:
zFile.extractall(pwd=password)
print '[+] Found password: ' + password + '\n'
except:
pass
def main():
validateFiles()
zFile = zipfile.ZipFile(sys.argv[1])
passFile = open(sys.argv[2])
for line in passFile.readlines():
password = line.strip('\n')
t = Thread(target=extractFile, args=(zFile, password))
t.start()
if __name__ == '__main__':
main()
| StarcoderdataPython |
1691222 | # coding=utf-8
# Copyright 2021 The Deeplab2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains code to build a ViP-DeepLab decoder.
Reference:
- [ViP-DeepLab: Learning Visual Perception with Depth-aware Video
Panoptic Segmentation](https://arxiv.org/abs/2012.05258)
"""
import tensorflow as tf
from deeplab2 import common
from deeplab2.model.decoder import panoptic_deeplab
layers = tf.keras.layers
class ViPDeepLabDecoder(layers.Layer):
"""A ViP-DeepLab decoder layer.
This layer takes low- and high-level features as input and uses a dual-ASPP
and dual-decoder structure to aggregate features for semantic and instance
segmentation. On top of the decoders, three heads are used to predict semantic
segmentation, instance center probabilities, and instance center regression
per pixel. It also has a branch to predict the next-frame instance center
regression. Different from the ViP-DeepLab paper which uses Cascade-ASPP, this
reimplementation only uses ASPP.
"""
def __init__(self,
decoder_options,
vip_deeplab_options,
bn_layer=tf.keras.layers.BatchNormalization):
"""Initializes a ViP-DeepLab decoder.
Args:
decoder_options: Decoder options as defined in config_pb2.DecoderOptions.
vip_deeplab_options: Model options as defined in
config_pb2.ModelOptions.ViPDeeplabOptions.
bn_layer: An optional tf.keras.layers.Layer that computes the
normalization (default: tf.keras.layers.BatchNormalization).
"""
super(ViPDeepLabDecoder, self).__init__(name='ViPDeepLab')
low_level_feature_keys = [
item.feature_key for item in vip_deeplab_options.low_level
]
low_level_channels_project = [
item.channels_project for item in vip_deeplab_options.low_level
]
self._semantic_decoder = panoptic_deeplab.PanopticDeepLabSingleDecoder(
high_level_feature_name=decoder_options.feature_key,
low_level_feature_names=low_level_feature_keys,
low_level_channels_project=low_level_channels_project,
aspp_output_channels=decoder_options.aspp_channels,
decoder_output_channels=decoder_options.decoder_channels,
atrous_rates=decoder_options.atrous_rates,
name='semantic_decoder',
aspp_use_only_1x1_proj_conv=decoder_options.aspp_use_only_1x1_proj_conv,
decoder_conv_type=decoder_options.decoder_conv_type,
bn_layer=bn_layer)
self._semantic_head = panoptic_deeplab.PanopticDeepLabSingleHead(
vip_deeplab_options.semantic_head.head_channels,
vip_deeplab_options.semantic_head.output_channels,
common.PRED_SEMANTIC_LOGITS_KEY,
name='semantic_head',
conv_type=vip_deeplab_options.semantic_head.head_conv_type,
bn_layer=bn_layer)
self._instance_decoder = None
self._instance_center_head = None
self._instance_regression_head = None
self._next_instance_decoder = None
self._next_instance_regression_head = None
if vip_deeplab_options.instance.enable:
if vip_deeplab_options.instance.low_level_override:
low_level_options = vip_deeplab_options.instance.low_level_override
else:
low_level_options = vip_deeplab_options.low_level
# If instance_decoder is set, use those options; otherwise reuse the
# architecture as defined for the semantic decoder.
if vip_deeplab_options.instance.HasField(
'instance_decoder_override'):
decoder_options = (vip_deeplab_options.instance
.instance_decoder_override)
low_level_feature_keys = [item.feature_key for item in low_level_options]
low_level_channels_project = [
item.channels_project for item in low_level_options
]
self._instance_decoder = panoptic_deeplab.PanopticDeepLabSingleDecoder(
high_level_feature_name=decoder_options.feature_key,
low_level_feature_names=low_level_feature_keys,
low_level_channels_project=low_level_channels_project,
aspp_output_channels=decoder_options.aspp_channels,
decoder_output_channels=decoder_options.decoder_channels,
atrous_rates=decoder_options.atrous_rates,
name='instance_decoder',
aspp_use_only_1x1_proj_conv=(
decoder_options.aspp_use_only_1x1_proj_conv),
decoder_conv_type=decoder_options.decoder_conv_type,
bn_layer=bn_layer)
self._instance_center_head = panoptic_deeplab.PanopticDeepLabSingleHead(
vip_deeplab_options.instance.center_head.head_channels,
vip_deeplab_options.instance.center_head.output_channels,
common.PRED_CENTER_HEATMAP_KEY,
name='instance_center_head',
conv_type=(
vip_deeplab_options.instance.center_head.head_conv_type),
bn_layer=bn_layer)
self._instance_regression_head = (
panoptic_deeplab.PanopticDeepLabSingleHead(
vip_deeplab_options.instance.regression_head.head_channels,
vip_deeplab_options.instance.regression_head.output_channels,
common.PRED_OFFSET_MAP_KEY,
name='instance_regression_head',
conv_type=(
vip_deeplab_options.instance.regression_head.head_conv_type),
bn_layer=bn_layer))
if vip_deeplab_options.instance.HasField('next_regression_head'):
self._next_instance_decoder = (
panoptic_deeplab.PanopticDeepLabSingleDecoder(
high_level_feature_name=decoder_options.feature_key,
low_level_feature_names=low_level_feature_keys,
low_level_channels_project=low_level_channels_project,
aspp_output_channels=decoder_options.aspp_channels,
decoder_output_channels=decoder_options.decoder_channels,
atrous_rates=decoder_options.atrous_rates,
name='next_instance_decoder',
aspp_use_only_1x1_proj_conv=(
decoder_options.aspp_use_only_1x1_proj_conv),
decoder_conv_type=decoder_options.decoder_conv_type,
bn_layer=bn_layer))
self._next_instance_regression_head = (
panoptic_deeplab.PanopticDeepLabSingleHead(
(vip_deeplab_options.instance.next_regression_head
.head_channels),
(vip_deeplab_options.instance.next_regression_head
.output_channels),
common.PRED_NEXT_OFFSET_MAP_KEY,
name='next_instance_regression_head',
conv_type=(vip_deeplab_options.instance.next_regression_head
.head_conv_type),
bn_layer=bn_layer))
self._next_high_level_feature_name = decoder_options.feature_key
def reset_pooling_layer(self):
"""Resets the ASPP pooling layers to global average pooling."""
self._semantic_decoder.reset_pooling_layer()
if self._instance_decoder is not None:
self._instance_decoder.reset_pooling_layer()
if self._next_instance_decoder is not None:
self._next_instance_decoder.reset_pooling_layer()
def set_pool_size(self, pool_size):
"""Sets the pooling size of the ASPP pooling layers.
Args:
pool_size: A tuple specifying the pooling size of the ASPP pooling layers.
"""
self._semantic_decoder.set_pool_size(pool_size)
if self._instance_decoder is not None:
self._instance_decoder.set_pool_size(pool_size)
if self._next_instance_decoder is not None:
self._next_instance_decoder.set_pool_size(pool_size)
def get_pool_size(self):
return self._semantic_decoder.get_pool_size()
@property
def checkpoint_items(self):
items = {
common.CKPT_SEMANTIC_DECODER:
self._semantic_decoder,
common.CKPT_SEMANTIC_HEAD_WITHOUT_LAST_LAYER:
self._semantic_head.conv_block,
common.CKPT_SEMANTIC_LAST_LAYER:
self._semantic_head.final_conv
}
if self._instance_decoder is not None:
instance_items = {
common.CKPT_INSTANCE_DECODER:
self._instance_decoder,
common.CKPT_INSTANCE_CENTER_HEAD_WITHOUT_LAST_LAYER:
self._instance_center_head.conv_block,
common.CKPT_INSTANCE_CENTER_HEAD_LAST_LAYER:
self._instance_center_head.final_conv,
common.CKPT_INSTANCE_REGRESSION_HEAD_WITHOUT_LAST_LAYER:
self._instance_regression_head.conv_block,
common.CKPT_INSTANCE_REGRESSION_HEAD_LAST_LAYER:
self._instance_regression_head.final_conv,
}
items.update(instance_items)
if self._next_instance_decoder is not None:
next_instance_items = {
common.CKPT_NEXT_INSTANCE_DECODER:
self._next_instance_decoder,
common.CKPT_NEXT_INSTANCE_REGRESSION_HEAD_WITHOUT_LAST_LAYER:
self._next_instance_regression_head.conv_block,
common.CKPT_NEXT_INSTANCE_REGRESSION_HEAD_LAST_LAYER:
self._next_instance_regression_head.final_conv,
}
items.update(next_instance_items)
return items
def call(self, features, next_features, training=False):
"""Performs a forward pass.
Args:
features: An input dict of tf.Tensor with shape [batch, height, width,
channels]. Different keys should point to different features extracted
by the encoder, e.g. low-level or high-level features.
next_features: An input dict of tf.Tensor similar to features. The
features are computed with the next frame as input.
training: A boolean flag indicating whether training behavior should be
used (default: False).
Returns:
A dictionary containing the results of the semantic segmentation head and
depending on the configuration also of the instance segmentation head.
"""
semantic_features = self._semantic_decoder(features, training=training)
results = self._semantic_head(semantic_features, training=training)
if self._instance_decoder is not None:
instance_features = self._instance_decoder(features, training=training)
instance_center_predictions = self._instance_center_head(
instance_features, training=training)
instance_regression_predictions = self._instance_regression_head(
instance_features, training=training)
if results.keys() & instance_center_predictions.keys():
raise ValueError('The keys of the semantic branch and the instance '
'center branch overlap. Please use unique keys.')
results.update(instance_center_predictions)
if results.keys() & instance_regression_predictions.keys():
raise ValueError('The keys of the semantic branch and the instance '
'regression branch overlap. Please use unique keys.')
results.update(instance_regression_predictions)
if self._next_instance_decoder is not None:
# We update the high level features in next_features with the concated
# features of the high level features in both features and next_features.
high_level_feature_name = self._next_high_level_feature_name
high_level_features = features[high_level_feature_name]
next_high_level_features = next_features[high_level_feature_name]
next_high_level_features = tf.concat(
[high_level_features, next_high_level_features], axis=3)
next_features[high_level_feature_name] = next_high_level_features
next_regression_features = self._next_instance_decoder(
next_features, training=training)
next_regression_predictions = self._next_instance_regression_head(
next_regression_features, training=training)
if results.keys() & next_regression_predictions.keys():
raise ValueError('The keys of the next regresion branch overlap.'
'Please use unique keys.')
results.update(next_regression_predictions)
return results
| StarcoderdataPython |
129767 | <filename>funboost/utils/dependency_packages/mongomq/utils.py
def enum(name, *sequential, **named):
values = dict(zip(sequential, range(len(sequential))), **named)
# NOTE: Yes, we *really* want to cast using str() here.
# On Python 2 type() requires a byte string (which is str() on Python 2).
# On Python 3 it does not matter, so we'll use str(), which acts as
# a no-op.
return type(str(name), (), values) | StarcoderdataPython |
78428 | <reponame>Abrosimov-a-a/dvc
from __future__ import unicode_literals
from dvc.output.s3 import OutputS3
from dvc.remote.gs import RemoteGS
class OutputGS(OutputS3):
REMOTE = RemoteGS
| StarcoderdataPython |
3319889 | <reponame>nuo010/pyefun<filename>pyefun/typeConv.py
"""
.. Hint::
类型转换
.. literalinclude:: ../../../pyefun/typeConv_test.py
:language: python
:caption: 代码示例
:linenos:
"""
from .timeBase import *
import json
def 到文本(bytes):
return str(bytes, encoding="utf-8")
def 到字节集(str):
return bytes(str, encoding='utf-8')
def 到数值(val):
return float(val)
def 到整数(val):
return int(float(val))
def 到时间(str):
return 创建日期时间(str)
def json到文本(obj):
return json.dumps(obj)
def json解析(obj):
return json.loads(obj) | StarcoderdataPython |
3210884 | <gh_stars>1-10
import pathlib
import numpy as np
__all__ = ["load_my_format"]
def load_my_format(path, callback=None, meta_override=None):
"""Loads AFM data from my format
This is the main function for loading your file format. Please
add a description here.
Parameters
----------
path: str or pathlib.Path or io.TextIOBase
path to a .tab file
callback: callable
function for progress tracking; must accept a float in
[0, 1] as an argument.
meta_override: dict
if specified, contains key-value pairs of metadata that
are used when loading the files
(see :data:`afmformats.meta.META_FIELDS`)
"""
if meta_override is None:
meta_override = {}
path = pathlib.Path(path)
# Here you would start parsing your data and metadata from `path`
# You should specify as many metadata keys as possible. See
# afmformats.meta.DEF_ALL for a list of valid keys.
metadata = {"path": path}
# Valid column names are defined in afmformats.afm_data.known_columns.
data = {"force": np.linspace(1e-9, 5e-9, 100),
"height (measured)": np.linspace(2e-6, -1e-6, 100)}
metadata.update(meta_override)
dd = {"data": data,
"metadata": metadata}
if callback is not None:
callback(1)
# You may also return a list with more items in case the file format
# contains more than one curve.
return [dd]
recipe_myf = {
"descr": "A short description",
"loader": load_my_format,
"suffix": ".myf",
"modality": "force-distance",
"maker": "designer of file format",
}
| StarcoderdataPython |
1725363 | <gh_stars>10-100
import pytest
import virtool.users.utils
@pytest.fixture
def hmm_document():
return {
"_id": "f8666902",
"count": 4,
"length": 199,
"names": ["ORF-63", "ORF67", "hypothetical protein"],
"entries": [
{
"gi": "438000415",
"organism": "Thysanoplusia orichalcea nucleopolyhedrovirus",
"name": "hypothetical protein",
"accession": "YP_007250520.1",
},
{
"gi": "114679914",
"organism": "Leucania separata nucleopolyhedrovirus",
"name": "ORF67",
"accession": "YP_758364.1",
},
{
"gi": "209170953",
"organism": "Agrotis ipsilon multiple nucleopolyhedrovirus",
"name": "agip69",
"accession": "YP_002268099.1",
},
{
"gi": "90592780",
"organism": "Agrotis segetum nucleopolyhedrovirus",
"name": "ORF-63",
"accession": "YP_529733.1",
},
],
"total_entropy": 101.49,
"families": {"Baculoviridae": 3},
"genera": {"Alphabaculovirus": 3},
"cluster": 3463,
"mean_entropy": 0.51,
}
@pytest.fixture
def user_document():
return {
"_id": "bob",
"invalidate_sessions": False,
"last_password_change": "2017-10-06T13:00:00.000000",
"primary_group": "",
"groups": [],
"settings": {
"quick_analyze_workflow": "pathoscope_bowtie",
"show_ids": True,
"show_versions": True,
"skip_quick_analyze_dialog": True,
},
"permissions": {p: False for p in virtool.users.utils.PERMISSIONS},
"force_reset": False,
}
| StarcoderdataPython |
135245 | from smbus import SMBus
class MCPGPIO():
__IODIR = [0x00, 0x01] # レジスタ番号
__GPPU = [0x0C, 0x0D]
__GPIO = [0x12, 0x13]
__OLAT = [0x14, 0x15]
INPUT = 1
OUTPUT = 0
INPUTPULLUP = 3
HIGH = 1
LOW = 0
def __init__(self,address = 0x20):
self.bus = SMBus(1)
self.addr = address
def setup(self, pin, dir):
if pin < 16:
dir = self.bus.read_byte_data(self.addr,self.__IODIR[int(pin/8)])
dir &= ~(0x01 << int(pin % 8))
dir |= (dir & 1) << int(pin % 8)
self.bus.write_byte_data(self.addr, self.__IODIR[int(pin/8)], dir)
if (dir & 1) == 1:
pu = self.bus.read_byte_data(self.addr,self.__GPPU[int(pin/8)])
pu &= ~(0x01 << int(pin % 8))
pu |= ((dir >> 1) & 1) << int(pin % 8)
self.bus.write_byte_data(self.addr, self.__GPPU[int(pin/8)], pu)
def input(self, pin):
r = 0
if pin < 16:
gp = self.bus.read_byte_data(self.addr, self.__GPIO[int(pin/8)])
r = (gp >> int(pin%8) & 1)
return r
def output(self, pin, val):
if pin < 16:
gp = self.bus.read_byte_data(self.addr, self.__GPIO[int(pin/8)])
gp &= ~(0x01 << int(pin % 8))
gp |= (val & 1) << int(pin % 8)
self.bus.write_byte_data(self.addr, self.__GPIO[int(pin/8)], gp)
@property
def gpioa(self):
return self.bus.read_byte_data(self.addr, self.__GPIO[0])
@gpioa.setter
def gpioa(self, value):
self.bus.write_byte_data(self.addr,self.__GPIO[0], value)
@property
def gpiob(self):
return self.bus.read_byte_data(self.addr, self.__GPIO[1])
@gpiob.setter
def gpiob(self, value):
self.bus.write_byte_data(self.addr,self.__GPIO[1], value) | StarcoderdataPython |
50187 | # https://arxiv.org/pdf/1703.02910.pdf, Deep Bayesian Active Learning with Image Data
import numpy as np
from .baseline import Strategy
from ..helpers.time import timeit
class BayesianActiveLearning(Strategy):
def __init__(self, nb_forward=10, **kwargs):
super(BayesianActiveLearning, self).__init__()
self.nb_forward = nb_forward
@timeit
def evaluate_dataset(self, dataset, learner, log_time={}):
return np.stack([learner.inference(dataset, bayesian=True)['class_probabilities'] for _ in range(self.nb_forward)])
@timeit
def score_dataset(self, dataset, learner, log_time={}):
raise NotImplementedError
def return_top_indices(self, dataset, learner, top, log_time={}):
scores = self.score_dataset(dataset, learner, log_time=log_time)
sorted_idx = np.argsort(scores)
return sorted_idx[-top:]
class BayesianKLDivergence(BayesianActiveLearning):
@timeit
def score_dataset(self, dataset, learner, log_time={}):
stacked_probabilities = self.evaluate_dataset(
dataset, learner, log_time=log_time)
C, N, _ = stacked_probabilities.shape
consensus_probabilities = np.mean(stacked_probabilities, axis=0)
divergences = np.zeros((N, C))
for i in range(N):
for c in range(C):
probabilities_ic = stacked_probabilities[c, i]
probabilities_i = consensus_probabilities[i]
divergences[i, c] = np.sum(
probabilities_ic * np.log(probabilities_ic/probabilities_i))
return np.mean(divergences, axis=1)
class BayesianEntropyStrategy(BayesianActiveLearning):
@timeit
def score_dataset(self, dataset, learner, log_time={}):
stacked_probabilities = self.evaluate_dataset(
dataset, learner, log_time=log_time)
probabilities = np.mean(stacked_probabilities, axis=0)
assert len(probabilities) == len(dataset)
entropies = -np.sum(probabilities * np.log(probabilities), axis=1)
return entropies
class BayesianBALDStrategy(BayesianActiveLearning):
@timeit
def score_dataset(self, dataset, learner, log_time={}):
inference_result = learner.inference(dataset)
model_probabilities = inference_result['class_probabilities']
model_entropies = - \
np.sum(model_probabilities * np.log(model_probabilities), axis=1)
stacked_probabilities = self.evaluate_dataset(
dataset, learner, log_time=log_time)
average_entropies = - np.mean(
np.sum(stacked_probabilities * np.log(stacked_probabilities), axis=2), axis=0)
return model_entropies - average_entropies
| StarcoderdataPython |
86785 | <reponame>girisagar46/DjangoTrainingClass
from django.contrib.auth.decorators import login_required
from django.core.mail import EmailMessage
from django.db.models import Q
from django.shortcuts import render, get_object_or_404
from django.template.loader import get_template
from django.utils import timezone
from django.views.generic import TemplateView
from django.shortcuts import redirect
from .models import Blog
from .forms import PostForm, CommentForm, ContactForm
from DjangoBlog.settings import DEFAULT_FROM_EMAIL, BLOG_NAME
class PostListView(TemplateView):
def get(self, request, *args, **kwargs):
blogs = Blog.objects.order_by('-created_date')
ctx = {"blogs": blogs}
return render(request, "blog/post.html", context=ctx)
class ContactView(TemplateView):
pass
# template_name = 'blog/contacts.html'
def contact_us(request):
contact_form = ContactForm()
if request.method == "POST":
if contact_form.is_valid():
print("Form Valid")
name = request.POST.get('name', '')
user_email = request.POST.get('email', '')
text = request.POST.get('text', '')
context = {
'name': name,
'email': user_email,
'text': text,
}
template = get_template('contact_template.txt')
content = template.render(context=context)
email = EmailMessage(
"New comment is added in your blog.",
content,
BLOG_NAME + '',
[DEFAULT_FROM_EMAIL],
headers={'Reply-To': user_email}
)
email.send()
return redirect('contact')
else:
print("Form InValid")
return redirect('contact')
return render(request, 'blog/contacts.html', {"contact_form": contact_form})
class PostDetailView(TemplateView):
def get(self, request, *args, **kwargs):
post = get_object_or_404(Blog, pk=kwargs.get("pk"))
comments = post.comments.filter(active=True)
comment_form = CommentForm()
ctx = { "post": post, "comments":comments , "comment_form":comment_form}
return render(request, "blog/post_detail.html", context=ctx)
@login_required
def post(self, request, *args, **kwargs):
print("post called")
print(request.__dict__)
comment_form = CommentForm(data=request.POST)
if comment_form.is_valid():
new_comment = comment_form.save(commit=False)
post = get_object_or_404(Blog, pk=kwargs.get("pk"))
new_comment.post = post
new_comment.save()
return redirect('post_detail', pk=post.pk)
def post_detail(request, pk):
post = get_object_or_404(Blog, pk=pk)
return render(request, 'blog/post_detail.html', {'post': post})
# class PostView(TemplateView):
# template_name = "blog/post.html"
#
# def get_context_data(self, *args, **kwargs):
# context = super(PostView, self).get_context_data(*args, **kwargs)
# print(context)
# blogs = Blog.objects.order_by('-created_date')
# context = {"blogs": blogs}
#
# return context
def search(request):
query = request.GET.get('searchQuery')
results = []
count = 0
if(query):
results = Blog.objects.filter(
Q(title__contains=query) |
Q(text__contains=query)
)
count = results.count()
return render(request, 'blog/results.html', context={'results':results, 'count':count})
@login_required
def post_new(request):
print(request.__dict__)
if request.method == "POST":
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
# post.author = User.objects.get(username=request.username)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm()
return render(request, 'blog/post_edit.html', {'form': form}) | StarcoderdataPython |
3396819 | from .bert_for_EL_classification import BertForELClassification
__all__ = [
'BertForELClassification',
]
| StarcoderdataPython |
91808 | <reponame>rmishra1990/Hw-10-Web-Mongo-DB<filename>mars_scraping.py
from splinter import Browser
from bs4 import BeautifulSoup as bs
import time
import pandas as pd
def init_browser():
# @NOTE: Replace the path with your actual path to the chromedriver
executable_path = {"executable_path": "/usr/local/bin/chromedriver"}
return Browser("chrome", **executable_path, headless=False)
def mars_news():
mars_data={}
browser = init_browser()
#1. NASA Mars News scrape
url = 'https://mars.nasa.gov/news/'
print("Visiting news page")
browser.visit(url)
# Scrape page into Soup
html = browser.html
print("Scraping data...")
soup = bs(html, 'html.parser')
# latest article title
news_title = soup.find('div', class_='content_title').find('a').text
print("Found title..")
# latest article paragraph
news_p = soup.find('div', class_='article_teaser_body').text
mars_data['news_title'] = news_title
mars_data['news_p'] = news_p
print("Quitting browser")
browser.quit()
return mars_data
#2.scrape freatured image on nasa.gov
def featured_img():
mars_data={}
img_url = "https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
browser = init_browser()
browser.visit(img_url)
html = browser.html
# Scrape page into Soup
soup = bs(html, "html.parser")
# Featured image on mars
# Return results
articles=soup.find('div',class_="carousel_items").find('article')
imgtag=articles['style']
base_path='https://www.jpl.nasa.gov/'
#extract url from style
#get image URL through a string split
rel_path=imgtag.split("('", 1)[1].split("')")[0]
featured_image = base_path+ rel_path
mars_data['featured_image'] = featured_image
browser.quit()
return mars_data
#3.scrape Mars weather
def mars_weather():
mars_data={}
browser = init_browser()
weather_url = "https://twitter.com/marswxreport?lang=en"
browser.visit(weather_url)
html = browser.html
soup = bs(html, "html.parser")
weather_rel_path = soup.find("div", class_= "js-tweet-text-container")
mars_weather=weather_rel_path.find(class_="tweet-text")
myweather=''
i=0
for tag in mars_weather:
i=i+1
if i==1:
myweather=tag.string
print(myweather)
mars_data['weather'] = myweather
browser.quit()
return mars_data
#4. scrape mars facts
def mars_facts():
mars_data={}
browser = init_browser()
# scrape mars facts table
marsFacts_url = "https://space-facts.com/mars/"
browser.visit(marsFacts_url)
html = browser.html
soup = bs(html, "html.parser")
#Find Table and turn into data frame
marsFacts_table = soup.find("table",class_="tablepress-id-mars")
table_rows = marsFacts_table.find_all('tr')
res = []
for tr in table_rows:
td = tr.find_all('td')
row = [tr.text.strip() for tr in td if tr.text.strip()]
if row:
res.append(row)
#Initialize the pandas data frame
df = pd.DataFrame(res,columns=['Description','Measurement'])
mars_data['facts'] = res
browser.quit()
return mars_data
# 5. scrape mars hemisphere
def get_link_image(browser,link):
browser.visit(link)
html=browser.html
sp=bs(html,"html.parser")
tags=sp.find("div",class_="downloads").find('ul').find('li').find('a')
imglink=tags['href']
if 'jpg' in imglink:
return(imglink)
else:
return(None)
def mars_hemis():
mars_data={}
browser = init_browser()
marsurl='https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(marsurl)
html=browser.html
#Find all the items
soup = bs(html, "html.parser")
#Find all links('a') on the page
items= soup.find_all("a",class_="itemLink product-item")
marsroot='https://astrogeology.usgs.gov'
i=0
hemisphere_image_urls=[]
for tag in items:
imageURL=marsroot+tag['href']
title=tag.find('h3')
if title is not None:
i=i+1
print(imageURL)
name=title.string
print(name)
print("Title is "+title.string)
#Call the function to follow the links
fullimglink=get_link_image(browser,imageURL)
if fullimglink is not None:
hemisphere_image_urls.append({"title" : name, 'img_url':fullimglink })
print("----<>-----")
#Print result dictionary
print(hemisphere_image_urls)
mars_data['hemisphere_image_urls'] = hemisphere_image_urls
browser.quit()
return mars_data
def main_mars_scrape_info():
news_result=mars_news()
print("--Lookinf for Images")
image_result=featured_img()
print("--Looking for mars weather")
weather= mars_weather()
print('-- Looking for facts on mars')
facts = mars_facts()
print('--- Looking for <NAME>')
hemisphere = mars_hemis()
marsdata={}
marsdata['news']=news_result
marsdata['featured_image']=image_result
marsdata['weather']=weather
marsdata['hemisphere']=hemisphere
marsdata['facts']=facts
return(marsdata)
| StarcoderdataPython |
109171 | <reponame>mixmasteru/CarND-Advanced-Lane-Lines
import glob
import os
import pickle
import cv2
import numpy as np
# grid counts
nx = 9
ny = 6
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((nx * ny, 3), np.float32)
objp[:, :2] = np.mgrid[0:nx, 0:ny].T.reshape(-1, 2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Make a list of calibration images
dirname = os.path.dirname(__file__)
path = os.path.join(dirname, '../camera_cal/*.jpg')
images = glob.glob(path)
# Step through the list and search for chessboard corners
for idx, fname in enumerate(images):
img = cv2.imread(fname)
print(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
# If found, add object points, image points
if ret:
objpoints.append(objp)
imgpoints.append(corners)
print("corners_found")
# cv2.drawChessboardCorners(img, (nx, ny), corners, ret)
# cv2.imshow('img', img)
# cv2.waitKey(500)
path = os.path.join(dirname, '../camera_cal/calibration1.jpg')
img = cv2.imread(path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
# save calibration
dist_pickle = {"mtx": mtx, "dist": dist}
pickle.dump(dist_pickle, open(dirname + "/../assets/mtx_dist_pickle.p", "wb"))
dst = cv2.undistort(img, mtx, dist, None, mtx)
cv2.imshow('dst', dst)
cv2.waitKey(5000)
| StarcoderdataPython |
1760027 | import sys
from logging import getLogger
from logging import NullHandler
import grpc
import python_liftbridge.api_pb2_grpc
logger = getLogger(__name__)
logger.addHandler(NullHandler())
class BaseClient(object):
"""
Connect creates a Client connection for the given Liftbridge cluster.
"""
def __init__(self, ip_address='127.0.0.1:9292', timeout=5, tls_cert=None):
self.ip_address = ip_address
self.timeout = timeout
if tls_cert:
logger.debug(
'Creating a secure channel with address: %s' %
self.ip_address,
)
self.stub = self._secure_connect(tls_cert)
else:
logger.debug(
'Creating an insecure channel with address: %s' % self.ip_address,
)
self.channel = grpc.insecure_channel(self.ip_address)
self.stub = self._insecure_connect()
def _insecure_connect(self):
try:
grpc.channel_ready_future(
self.channel,
).result(timeout=self.timeout)
except grpc.FutureTimeoutError:
sys.exit('Error connecting to server')
else:
return python_liftbridge.api_pb2_grpc.APIStub(self.channel)
def _secure_connect(self, secure_file):
# TODO
pass
def close(self):
logger.debug('Closing channel')
self.channel.close()
def __repr__(self):
return str(self.__dict__)
| StarcoderdataPython |
3217438 | """Contains the Mode base class."""
from typing import Any, Optional
from typing import Callable
from typing import Dict
from typing import List
from typing import Set
from typing import Tuple
from mpf.core.delays import DelayManager
from mpf.core.logging import LogMixin
from mpf.core.switch_controller import SwitchHandler
from mpf.core.events import EventHandlerKey
from mpf.core.events import QueuedEvent # pylint: disable-msg=cyclic-import,unused-import
MYPY = False
if MYPY: # pragma: no cover
from mpf.core.mode_device import ModeDevice # pylint: disable-msg=cyclic-import,unused-import
from mpf.core.player import Player # pylint: disable-msg=cyclic-import,unused-import
from mpf.core.machine import MachineController # pylint: disable-msg=cyclic-import,unused-import
# pylint: disable-msg=too-many-instance-attributes
class Mode(LogMixin):
"""Base class for a mode."""
__slots__ = ["machine", "config", "name", "path", "priority", "_active", "_starting", "_mode_start_wait_queue",
"stop_methods", "start_callback", "stop_callbacks", "event_handlers", "switch_handlers",
"mode_stop_kwargs", "mode_devices", "start_event_kwargs", "stopping", "delay", "player",
"auto_stop_on_ball_end", "restart_on_next_ball", "asset_paths"]
# pylint: disable-msg=too-many-arguments
def __init__(self, machine: "MachineController", config, name: str, path, asset_paths) -> None:
"""Initialise mode.
Args:
machine: the machine controller
config: config dict for mode
name: name of mode
path: path of mode
asset_paths: all paths to consider for assets in this mode
"""
super().__init__()
self.machine = machine # type: MachineController
self.config = config # type: ignore
self.name = name
self.path = path
self.asset_paths = asset_paths
self.priority = 0
self._active = False
self._starting = False
self._mode_start_wait_queue = None # type: Optional[QueuedEvent]
self.stop_methods = list() # type: List[Tuple[Callable[[Any], None], Any]]
self.start_callback = None # type: Optional[Callable[[], None]]
self.stop_callbacks = [] # type: List[Callable[[], None]]
self.event_handlers = set() # type: Set[EventHandlerKey]
self.switch_handlers = list() # type: List[SwitchHandler]
self.mode_stop_kwargs = dict() # type: Dict[str, Any]
self.mode_devices = set() # type: Set[ModeDevice]
self.start_event_kwargs = {} # type: Dict[str, Any]
self.stopping = False
self.delay = DelayManager(self.machine)
'''DelayManager instance for delays in this mode. Note that all delays
scheduled here will be automatically canceled when the mode stops.'''
self.player = None # type: Optional[Player]
'''Reference to the current player object.'''
self.configure_logging('Mode.' + name,
self.config['mode']['console_log'],
self.config['mode']['file_log'])
self.configure_mode_settings(config.get('mode', dict()))
self.auto_stop_on_ball_end = self.config['mode']['stop_on_ball_end']
'''Controls whether this mode is stopped when the ball ends,
regardless of its stop_events settings.
'''
self.restart_on_next_ball = self.config['mode']['restart_on_next_ball']
'''Controls whether this mode will restart on the next ball. This only
works if the mode was running when the ball ended. It's tracked per-
player in the 'restart_modes_on_next_ball' player variable.
'''
if self.config['mode']['game_mode'] and not self.config['mode']['stop_on_ball_end']:
self.raise_config_error("All game modes need to stop at ball end. If you want to set stop_on_ball_end to "
"False also set game_mode to False.", 1)
@staticmethod
def get_config_spec() -> str:
"""Return config spec for mode_settings."""
return '''
__valid_in__: mode
__allow_others__:
'''
def __repr__(self):
"""Return string representation."""
return '<Mode.{}>'.format(self.name)
@property
def active(self) -> bool:
"""Return *True* if this mode is active."""
return self._active
@active.setter
def active(self, new_active: bool):
"""Setter for _active."""
if self._active != new_active:
self._active = new_active
self.machine.mode_controller.set_mode_state(self, self._active)
def configure_mode_settings(self, config: dict) -> None:
"""Process this mode's configuration settings from a config dictionary."""
self.config['mode'] = self.machine.config_validator.validate_config(
config_spec='mode', source=config, section_name='mode')
for event in self.config['mode']['start_events']:
self.machine.events.add_handler(event=event, handler=self.start,
priority=self.config['mode']['priority'] +
self.config['mode']['start_priority'])
@property
def is_game_mode(self) -> bool:
"""Return true if this is a game mode."""
return bool(self.config['mode']['game_mode'])
def start(self, mode_priority=None, callback=None, **kwargs) -> None:
"""Start this mode.
Args:
mode_priority: Integer value of what you want this mode to run at. If you
don't specify one, it will use the "Mode: priority" setting from
this mode's configuration file.
callback: Callback to call when this mode has been started.
**kwargs: Catch-all since this mode might start from events with
who-knows-what keyword arguments.
Warning: You can safely call this method, but do not override it in your
mode code. If you want to write your own mode code by subclassing Mode,
put whatever code you want to run when this mode starts in the
mode_start method which will be called automatically.
"""
# remove argument so we do not repost this
kwargs.pop('_from_bcp', None)
self.debug_log("Received request to start")
if self.config['mode']['game_mode'] and not (self.machine.game and self.player):
self.warning_log("Can only start mode %s during a game. Aborting start.", self.name)
return
if self._active:
self.debug_log("Mode is already active. Aborting start.")
return
if self._starting:
self.debug_log("Mode already starting. Aborting start.")
return
self._starting = True
self.machine.events.post('mode_{}_will_start'.format(self.name), **kwargs)
'''event: mode_(name)_will_start
desc: Posted when a mode is about to start. The "name" part is replaced
with the actual name of the mode, so the actual event posted is
something like *mode_attract_will_start*, *mode_base_will_start*, etc.
This is posted before the "mode_(name)_starting" event.
'''
if self.config['mode']['use_wait_queue'] and 'queue' in kwargs:
self.debug_log("Registering a mode start wait queue")
self._mode_start_wait_queue = kwargs['queue']
assert isinstance(self._mode_start_wait_queue, QueuedEvent)
self._mode_start_wait_queue.wait()
if isinstance(mode_priority, int):
self.priority = mode_priority
else:
self.priority = self.config['mode']['priority']
self.start_event_kwargs = kwargs
# hook for custom code. called before any mode devices are set up
self.mode_will_start(**self.start_event_kwargs)
self._add_mode_devices()
self.debug_log("Registering mode_stop handlers")
# register mode stop events
if 'stop_events' in self.config['mode']:
for event in self.config['mode']['stop_events']:
# stop priority is +1 so if two modes of the same priority
# start and stop on the same event, the one will stop before
# the other starts
self.add_mode_event_handler(event=event, handler=self.stop,
priority=self.config['mode']['stop_priority'] + 1)
self.start_callback = callback
self.debug_log("Calling mode_start handlers")
for item in self.machine.mode_controller.start_methods:
if item.config_section in self.config or not item.config_section:
result = item.method(config=self.config.get(item.config_section, self.config),
priority=self.priority,
mode=self,
**item.kwargs)
if result:
self.stop_methods.append(result)
self._setup_device_control_events()
self.machine.events.post_queue(event='mode_{}_starting'.format(self.name),
callback=self._started, **kwargs)
'''event: mode_(name)_starting
desc: The mode called "name" is starting.
This is a queue event. The mode will not fully start until the queue is
cleared.
'''
def _started(self, **kwargs) -> None:
"""Handle result of mode_<name>_starting queue event."""
del kwargs
if self.machine.is_shutting_down:
self.info_log("Will not start because machine is shutting down.")
return
self.info_log('Started. Priority: %s', self.priority)
self.active = True
self._starting = False
for event_name in self.config['mode']['events_when_started']:
self.machine.events.post(event_name)
self.machine.events.post(event='mode_{}_started'.format(self.name), callback=self._mode_started_callback,
**self.start_event_kwargs)
'''event: mode_(name)_started
desc: Posted when a mode has started. The "name" part is replaced
with the actual name of the mode, so the actual event posted is
something like *mode_attract_started*, *mode_base_started*, etc.
This is posted after the "mode_(name)_starting" event.
'''
def _mode_started_callback(self, **kwargs) -> None:
"""Handle result of mode_<name>_started queue event."""
del kwargs
self.mode_start(**self.start_event_kwargs)
self.start_event_kwargs = dict()
if self.start_callback:
self.start_callback()
self.debug_log('Mode Start process complete.')
def stop(self, callback: Any = None, **kwargs) -> bool:
"""Stop this mode.
Args:
callback: Method which will be called once this mode has stopped. Will only be called when the mode is
running (includes currently stopping)
**kwargs: Catch-all since this mode might start from events with
who-knows-what keyword arguments.
Warning: You can safely call this method, but do not override it in your
mode code. If you want to write your own mode code by subclassing Mode,
put whatever code you want to run when this mode stops in the
mode_stop method which will be called automatically.
Returns true if the mode is running. Otherwise false.
"""
if not self._active:
return False
if callback:
self.stop_callbacks.append(callback)
# do not stop twice. only register callback in that case
if self.stopping:
# mode is still running
return True
self.machine.events.post('mode_' + self.name + '_will_stop')
'''event: mode_(name)_will_stop
desc: Posted when a mode is about to stop. The "name" part is replaced
with the actual name of the mode, so the actual event posted is
something like *mode_attract_will_stop*, *mode_base_will_stop*, etc.
This is posted immediately before the "mode_(name)_stopping" event.
'''
self.stopping = True
self.mode_stop_kwargs = kwargs
self.debug_log('Mode Stopping.')
self._remove_mode_switch_handlers()
self.delay.clear()
self.machine.events.post_queue(event='mode_' + self.name + '_stopping',
callback=self._stopped)
'''event: mode_(name)_stopping
desc: The mode called "name" is stopping. This is a queue event. The
mode won't actually stop until the queue is cleared.
'''
return True
def _stopped(self) -> None:
self.info_log('Stopped.')
self.priority = 0
self.active = False
self.stopping = False
for item in self.stop_methods:
item[0](item[1])
self.stop_methods = list()
for event_name in self.config['mode']['events_when_stopped']:
self.machine.events.post(event_name)
self.machine.events.post('mode_' + self.name + '_stopped',
callback=self._mode_stopped_callback)
'''event: mode_(name)_stopped
desc: Posted when a mode has stopped. The "name" part is replaced
with the actual name of the mode, so the actual event posted is
something like *mode_attract_stopped*, *mode_base_stopped*, etc.
'''
self.machine.events.post('clear', key=self.name)
'''event: clear
args:
key: string name of the configs to clear
desc: Posted to cause config players to clear whatever they're running
based on the key passed. Typically posted when a show or mode ends.
'''
if self._mode_start_wait_queue:
self.debug_log("Clearing wait queue")
self._mode_start_wait_queue.clear()
self._mode_start_wait_queue = None
def _mode_stopped_callback(self, **kwargs) -> None:
del kwargs
# Call the mode_stop() method before removing the devices
self.mode_stop(**self.mode_stop_kwargs)
self.mode_stop_kwargs = dict()
# Clean up the mode handlers and devices
self._remove_mode_event_handlers()
self._remove_mode_devices()
for callback in self.stop_callbacks:
callback()
self.stop_callbacks = []
def _add_mode_devices(self) -> None:
# adds and initializes mode devices which get removed at the end of the mode
for collection_name, device_class in (
iter(self.machine.device_manager.device_classes.items())):
# check if there is config for the device type
if device_class.config_section in self.config:
for device_name in self.config[device_class.config_section]:
collection = getattr(self.machine, collection_name)
# get device
device = collection[device_name]
# Track that this device was added via this mode so we
# can remove it when the mode ends.
self.mode_devices.add(device)
if not self.config['mode']['game_mode'] and not device.can_exist_outside_of_game:
raise AssertionError("Device {} cannot exist in non game-mode {}.".format(
device, self.name
))
# This lets the device know it was added to a mode
device.device_loaded_in_mode(mode=self, player=self.player)
def create_mode_devices(self) -> None:
"""Create new devices that are specified in a mode config that haven't been created in the machine-wide."""
self.debug_log("Scanning config for mode-based devices")
for collection_name, device_class in iter(self.machine.device_manager.device_classes.items()):
# check if there is config for the device type
if device_class.config_section not in self.config:
continue
for device, settings in iter(self.config[device_class.config_section].items()):
collection = getattr(self.machine, collection_name)
if device not in collection: # no existing device, create
self.debug_log("Creating mode-based device: %s",
device)
self.machine.device_manager.create_devices(
collection.name, {device: settings})
async def load_mode_devices(self) -> None:
"""Load config of mode devices."""
for collection_name, device_class in iter(self.machine.device_manager.device_classes.items()):
# check if there is config for the device type
if device_class.config_section not in self.config:
continue
for device, settings in iter(self.config[device_class.config_section].items()):
collection = getattr(self.machine, collection_name)
device = collection[device]
settings = device.prepare_config(settings, True)
settings = device.validate_and_parse_config(settings, True, "mode:" + self.name)
if device.config:
self.debug_log("Overwrite mode-based device: %s", device)
# overload
device.overload_config_in_mode(self, settings)
else:
self.debug_log("Initializing mode-based device: %s", device)
# load config
device.load_config(settings)
for collection_name, device_class in iter(self.machine.device_manager.device_classes.items()):
# check if there is config for the device type
if device_class.config_section not in self.config:
continue
for device, settings in iter(self.config[device_class.config_section].items()):
collection = getattr(self.machine, collection_name)
device = collection[device]
await device.device_added_to_mode(mode=self)
def _remove_mode_devices(self) -> None:
for device in self.mode_devices:
device.device_removed_from_mode(self)
self.mode_devices = set()
def _setup_device_control_events(self) -> None:
# registers mode handlers for control events for all devices specified
# in this mode's config (not just newly-created devices)
self.debug_log("Scanning mode-based config for device control_events")
for event, method, delay, device in (
self.machine.device_manager.get_device_control_events(
self.config)):
if not delay:
self.add_mode_event_handler(
event=event,
handler=method,
blocking_facility=device.class_label)
else:
self.add_mode_event_handler(
event=event,
handler=self._control_event_handler,
callback=method,
ms_delay=delay,
blocking_facility=device.class_label)
# get all devices in the mode
device_list = set() # type: Set[ModeDevice]
for collection in self.machine.device_manager.collections:
if self.machine.device_manager.collections[collection].config_section in self.config:
for device, _ in \
iter(self.config[self.machine.device_manager.collections[collection].config_section].items()):
device_list.add(self.machine.device_manager.collections[collection][device])
for device in device_list:
device.add_control_events_in_mode(self)
def _control_event_handler(self, callback: Callable[..., None], ms_delay: int = 0, **kwargs) -> None:
del kwargs
self.debug_log("_control_event_handler: callback: %s,", callback)
self.delay.add(ms=ms_delay, callback=callback, mode=self)
def add_mode_event_handler(self, event: str, handler: Callable, priority: int = 0, **kwargs) -> EventHandlerKey:
"""Register an event handler which is automatically removed when this mode stops.
This method is similar to the Event Manager's add_handler() method,
except this method automatically unregisters the handlers when the mode
ends.
Args:
event: String name of the event you're adding a handler for. Since
events are text strings, they don't have to be pre-defined.
handler: The method that will be called when the event is fired.
priority: An arbitrary integer value that defines what order the
handlers will be called in. The default is 1, so if you have a
handler that you want to be called first, add it here with a
priority of 2. (Or 3 or 10 or 100000.) The numbers don't matter.
They're called from highest to lowest. (i.e. priority 100 is
called before priority 1.)
**kwargs: Any any additional keyword/argument pairs entered here
will be attached to the handler and called whenever that handler
is called. Note these are in addition to kwargs that could be
passed as part of the event post. If there's a conflict, the
event-level ones will win.
Returns a EventHandlerKey to the handler which you can use to later remove
the handler via ``remove_handler_by_key``. Though you don't need to
remove the handler since the whole point of this method is they're
automatically removed when the mode stops.
Note that if you do add a handler via this method and then remove it
manually, that's ok too.
"""
key = self.machine.events.add_handler(event, handler, self.priority + priority, mode=self, **kwargs)
self.event_handlers.add(key)
return key
def _remove_mode_event_handlers(self) -> None:
for key in self.event_handlers:
self.machine.events.remove_handler_by_key(key)
self.event_handlers = set()
def _remove_mode_switch_handlers(self) -> None:
for handler in self.switch_handlers:
self.machine.switch_controller.remove_switch_handler_by_key(handler)
self.switch_handlers = list()
def initialise_mode(self) -> None:
"""Initialise this mode."""
self.mode_init()
def mode_init(self) -> None:
"""User-overrideable method which will be called when this mode initializes as part of the MPF boot process."""
def mode_will_start(self, **kwargs) -> None:
"""User-overrideable method which will be called whenever this mode starts (i.e. before it becomes active)."""
def mode_start(self, **kwargs) -> None:
"""User-overrideable method which will be called whenever this mode starts (i.e. whenever it becomes active)."""
def mode_stop(self, **kwargs) -> None:
"""User-overrideable method which will be called whenever this mode stops."""
| StarcoderdataPython |
3333907 | <filename>Photo.py
class Photo:
def __init__(self, index, horizontal, tags):
self.index = index
self.horizontal = horizontal
self.tags = tags
| StarcoderdataPython |
144751 | import r_jeff_epler_1
print r_jeff_epler_1.blowup([2, 3, 5])
| StarcoderdataPython |
85730 | #!/usr/bin/env python -*- coding: utf-8 -*-
#
# Python Word Sense Disambiguation (pyWSD): SemEval REader API
#
# Copyright (C) 2014-2020 alvations
# URL:
# For license information, see LICENSE.md
import os, io
from collections import namedtuple
from BeautifulSoup import BeautifulSoup as bsoup
from pywsd.utils import remove_tags, semcor_to_synset
Instance = namedtuple('instance', 'id, lemma, word')
Term = namedtuple('term', 'id, pos, lemma, sense, type')
Word = namedtuple('word', 'id, text, sentid, paraid, term')
Answer = namedtuple('answer', 'sensekey, lemma, pos')
class SemEval2007_Coarse_WSD:
"""
Object to load data from SemEval-2007 Coarse-grain all-words WSD task.
USAGE:
>>> coarse_wsd = SemEval2007_Coarse_WSD()
>>> for inst, ans, sent, doc in coarse_wsd:
... print inst
... print inst.id, inst.lemma, inst.word
... print ans.sensekey
... break
instance(id=u'd001.s001.t001', lemma=u'editorial', word=u'editorial')
d001.s001.t001 editorial editorial
[u'editorial%1:10:00::']
"""
def __init__(self, path='data/semeval2007_coarse_grain_wsd/'):
self.path = path
self.test_file = self.path + 'eng-coarse-all-words.xml'
self.test_ans = self.path + 'dataset21.test.key'
def fileids(self):
""" Returns files from SemEval2007 Coarse-grain All-words WSD task. """
return [os.path.join(self.path,i) for i in os.listdir(self.path)]
def sents(self, filename=None):
"""
Returns the file, line by line. Use test_file if no filename specified.
"""
filename = filename if filename else self.test_file
with io.open(filename, 'r') as fin:
for line in fin:
yield line.strip()
def get_answers(self):
"""
Returns a {(key,value), ...} dictionary of {(instance_id,Answer),...)}
>>> coarse_wsd = SemEval2007_Coarse_WSD()
>>> inst2ans = coarse_wsd.get_answers()
>>> for inst in inst2ans:
... print inst, inst2ans[inst
... break
"""
inst2ans = {}
with io.open(self.test_ans, 'r') as fin:
for line in fin:
line, _, lemma = line.strip().rpartition(' !! ')
lemma, pos = lemma[6:].split('#')
textid, _, line = line.partition(' ')
instid, _, line = line.partition(' ')
sensekey = line.split()
# What to do if there is no synset to convert to...
# synsetkey = [semcor_to_synset(i) for i in sensekey]
inst2ans[instid] = Answer(sensekey, lemma, pos)
return inst2ans
def yield_sentences(self):
test_file = io.open(self.test_file, 'r').read()
inst2ans = self.get_answers()
for text in bsoup(test_file).findAll('text'):
if not text:
continue
textid = text['id']
context_doc = " ".join([remove_tags(i) for i in
str(text).split('\n') if remove_tags(i)])
for sent in text.findAll('sentence'):
context_sent = " ".join([remove_tags(i) for i in
str(sent).split('\n') if remove_tags(i)])
yield sent, context_sent, context_doc, inst2ans, textid
def test_instances(self):
"""
Returns the test instances from SemEval2007 Coarse-grain WSD task.
>>> coarse_wsd = SemEval2007_Coarse_WSD()
>>> inst2ans = coarse_wsd.get_answers()
>>> for inst in inst2ans:
... print inst, inst2ans[inst]
... break
d004.s073.t013 answer(sensekey=[u'pointer%1:06:01::', u'pointer%1:06:00::', u'pointer%1:10:00::'], lemma=u'pointer', pos=u'n')
"""
for sent, context_sent, context_doc, inst2ans, textid in self.yield_sentences():
for instance in sent.findAll('instance'):
instid = instance['id']
lemma = instance['lemma']
word = instance.text
inst = Instance(instid, lemma, word)
yield inst, inst2ans[instid],
unicode(context_sent), unicode(context_doc)
def sentences(self):
"""
Returns the instances by sentences, and yields a list of tokens,
similar to the pywsd.semcor.sentences.
>>> coarse_wsd = SemEval2007_Coarse_WSD()
>>> for sent in coarse_wsd.sentences():
>>> for token in sent:
>>> print token
>>> break
>>> break
word(id=None, text=u'Your', offset=None, sentid=0, paraid=u'd001', term=None)
"""
for sentid, ys in enumerate(self.yield_sentences()):
sent, context_sent, context_doc, inst2ans, textid = ys
instances = {}
for instance in sent.findAll('instance'):
instid = instance['id']
lemma = instance['lemma']
word = instance.text
instances[instid] = Instance(instid, lemma, word)
tokens = []
for i in sent: # Iterates through BeautifulSoup object.
if str(i).startswith('<instance'): # BeautifulSoup.Tag
instid = sent.find('instance')['id']
inst = instances[instid]
answer = inst2ans[instid]
term = Term(instid, answer.pos, inst.lemma, answer.sensekey,
type='open')
tokens.append(Word(instid, inst.word,
sentid, textid, term))
else: # if BeautifulSoup.NavigableString
tokens+=[Word(None, w, sentid, textid, None)
for w in i.split()]
yield tokens
def __iter__(self):
""" Iterator function, duck-type of test_instances() """
return self.sentences()
| StarcoderdataPython |
139725 | import os
import re
import dgl
import numpy as np
from data import *
def get_edgelists(edgelist_expression, directory):
if "," in edgelist_expression:
return edgelist_expression.split(",")
files = os.listdir(directory)
compiled_expression = re.compile(edgelist_expression)
return [filename for filename in files if compiled_expression.match(filename)]
def construct_graph(training_dir, edges, nodes, target_node_type, heterogeneous=True):
if heterogeneous:
print("Getting relation graphs from the following edge lists : {} ".format(edges))
edgelists, id_to_node = {}, {}
for i, edge in enumerate(edges):
edgelist, id_to_node, src, dst = parse_edgelist(os.path.join(training_dir, edge), id_to_node, header=True)
if src == target_node_type:
src = 'target'
if dst == target_node_type:
dst = 'target'
edgelists[(src, 'relation{}'.format(i), dst)] = edgelist
print("Read edges for relation{} from edgelist: {}".format(i, os.path.join(training_dir, edge)))
# reverse edge list so that relation is undirected
edgelists[(dst, 'reverse_relation{}'.format(i), src)] = [(b, a) for a, b in edgelist]
# get features for target nodes
features, new_nodes = get_features(id_to_node[target_node_type], os.path.join(training_dir, nodes))
print("Read in features for target nodes")
# handle target nodes that have features but don't have any connections
# if new_nodes:
# edgelists[('target', 'relation'.format(i+1), 'none')] = [(node, 0) for node in new_nodes]
# edgelists[('none', 'reverse_relation{}'.format(i + 1), 'target')] = [(0, node) for node in new_nodes]
# add self relation
edgelists[('target', 'self_relation', 'target')] = [(t, t) for t in id_to_node[target_node_type].values()]
g = dgl.heterograph(edgelists)
print(
"Constructed heterograph with the following metagraph structure: Node types {}, Edge types{}".format(
g.ntypes, g.canonical_etypes))
print("Number of nodes of type target : {}".format(g.number_of_nodes('target')))
g.nodes['target'].data['features'] = features
id_to_node = id_to_node[target_node_type]
else:
sources, sinks, features, id_to_node = read_edges(os.path.join(training_dir, edges[0]),
os.path.join(training_dir, nodes))
# add self relation
all_nodes = sorted(id_to_node.values())
sources.extend(all_nodes)
sinks.extend(all_nodes)
g = dgl.graph((sources, sinks))
if features:
g.ndata['features'] = np.array(features).astype('float32')
print('read graph from node list and edge list')
features = g.ndata['features']
return g, features, id_to_node
| StarcoderdataPython |
1662197 | <gh_stars>1-10
"""
355. shuttleInBuildings
https://www.lintcode.com/problem/shuttleinbuildings/description?_from=contest&&fromId=103
dp
"""
from collections import deque
class Solution:
"""
@param heights: the heights of buildings.
@param k: the vision.
@param x: the energy to spend of the first action.
@param y: the energy to spend of the second action.
@return: the minimal energy to spend.
"""
def shuttleInBuildings(self, heights, k, x, y):
# write your code here.
stack = []
n = len(heights)
first_highest = [-1] * n
for i in range(n):
while stack and heights[stack[-1]] < heights[i]:
idx = stack.pop()
if i - idx <= k:
first_highest[i] = idx
stack.append(i)
dp = [sys.maxsize] * (n)
dp[0] = 0
for i in range(1,n):
dp[i] = min(dp[i],dp[i - 1] + y)
if i >= 2:
dp[i] = min(dp[i],dp[i - 2] + y)
if first_highest[i] != -1:
dp[i] = min(dp[i],dp[first_highest[i]] + x)
return dp[n-1] | StarcoderdataPython |
3239463 | <gh_stars>1-10
###############################################################################
#
# Copyright (C) 2021 - Skinok
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from PyQt5 import QtWidgets
from PyQt5 import QtGui
from PyQt5 import QtCore
from numpy import NaN
from pyqtgraph.dockarea import DockArea, Dock
import sys, os
sys.path.append('../finplot')
import finplot as fplt
import backtrader as bt
# Ui made with Qt Designer
import strategyTesterUI
import strategyResultsUI
import indicatorParametersUI
import loadDataFilesUI
# Import Chart lib
import finplotWindow
import datetime
import qdarkstyle
import pandas as pd
import functools
class UserInterface:
#########
#
#########
def __init__(self,controller):
self.controller = controller
# It does not finish by a "/"
self.current_dir_path = os.path.dirname(os.path.realpath(__file__))
# Qt
self.app = QtWidgets.QApplication([])
self.win = QtWidgets.QMainWindow()
# All dock area of each time frame
self.dockAreaTimeframes = {}
self.dock_charts= {}
self.dock_rsi= {}
self.dock_stochastic= {}
self.dock_stochasticRsi= {}
self.fpltWindow = {}
self.timeFramePB = {}
# Resize windows properties
self.win.resize(1600,1100)
self.win.setWindowTitle("Skinok Backtrader UI v0.3")
# Set width/height of QSplitter
self.app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())
pass
def initialize(self):
# Docks
self.createMainDocks()
# Create all buttons above the chart window
self.createControlPanel()
self.createUIs()
# Enable run button
self.strategyTesterUI.runBacktestPB.setEnabled(False)
self.strategyTesterUI.initialize()
pass
#########
# Create all chart dock for ONE timeframe
#########
def createChartDock(self, timeframe):
# this need to be changed later
self.current_timeframe = timeframe
# Each time frame has its own dock area
self.dockAreaTimeframes[timeframe] = DockArea()
self.stackedCharts.addWidget(self.dockAreaTimeframes[timeframe])
# Create Chart widget
self.dock_charts[timeframe] = Dock("dock_chart_"+timeframe, size = (1000, 500), closable = False, hideTitle=True)
self.dockAreaTimeframes[timeframe].addDock(self.dock_charts[timeframe])
# Create Order widget
self.dock_rsi[timeframe] = Dock("RSI", size = (1000, 120), closable = False, hideTitle=True)
self.dockAreaTimeframes[timeframe].addDock(self.dock_rsi[timeframe], position='bottom', relativeTo=self.dock_charts[timeframe])
self.dock_rsi[timeframe].hide()
self.dock_stochastic[timeframe] = Dock("Stochastic", size = (1000, 120), closable = False, hideTitle=True)
self.dockAreaTimeframes[timeframe].addDock(self.dock_stochastic[timeframe], position='bottom', relativeTo=self.dock_charts[timeframe])
self.dock_stochastic[timeframe].hide()
self.dock_stochasticRsi[timeframe] = Dock("Stochastic Rsi", size = (1000, 120), closable = False, hideTitle=True)
self.dockAreaTimeframes[timeframe].addDock(self.dock_stochasticRsi[timeframe], position='bottom', relativeTo=self.dock_charts[timeframe])
self.dock_stochasticRsi[timeframe].hide()
# Create finplot Window
self.fpltWindow[timeframe] = finplotWindow.FinplotWindow(self.dockAreaTimeframes[timeframe], self.dock_charts[timeframe], self)
self.fpltWindow[timeframe].createPlotWidgets(timeframe)
self.fpltWindow[timeframe].show()
# Create timeframe button
self.timeFramePB[timeframe] = QtWidgets.QRadioButton(self.controlPanel)
self.timeFramePB[timeframe].setText(timeframe)
self.timeFramePB[timeframe].setCheckable(True)
self.timeFramePB[timeframe].setMaximumWidth(100)
self.timeFramePB[timeframe].toggled.connect(lambda: self.toogleTimeframe(timeframe) )
self.timeFramePB[timeframe].toggle()
self.controlPanelLayout.insertWidget(0,self.timeFramePB[timeframe])
# init checked after connecting the slot
if self.darkmodeCB.isChecked():
self.dark_mode_toggle()
pass
#########
# Create all main window docks
#########
def createMainDocks(self):
self.dockArea = DockArea()
self.win.setCentralWidget(self.dockArea)
# Create Stacked widget
self.dock_stackedCharts = Dock("dock_stackedCharts", size = (1000, 500), closable = False, hideTitle=True )
self.dockArea.addDock(self.dock_stackedCharts, position='above')
self.stackedCharts = QtWidgets.QStackedWidget(self.dock_stackedCharts)
self.dock_stackedCharts.addWidget( self.stackedCharts, 1 , 0 )
# Create Strategy Tester Tab
self.dock_strategyTester = Dock("Strategy Tester", size = (200, 500), closable = False, hideTitle=True)
self.dockArea.addDock(self.dock_strategyTester, position='left')
# Create Strategy Tester Tab
self.dock_strategyResultsUI = Dock("Strategy Tester", size = (1000, 250), closable = False, hideTitle=True)
self.dockArea.addDock(self.dock_strategyResultsUI, position='bottom')
pass
#########
# Create all dock contents
#########
def createUIs(self):
self.createStrategyTesterUI()
self.createTradesUI()
self.createLoadDataFilesUI()
#self.createOrdersUI()
self.createSummaryUI()
self.createActions()
self.createMenuBar()
pass
#########
# Quick menu actions
#########
def createActions(self):
# Indicators
#self.indicatorsActionGroup = QtWidgets.QActionGroup(self.win)
# Ichimoku
#self.addIchimokuAction = QtWidgets.QAction(QtGui.QIcon(""),"Add Ichimoku", self.indicatorsActionGroup)
#self.addIchimokuAction.triggered.connect( self.addIndicator )
#self.indicatorsActionGroup.addAction(self.addIchimokuAction)
# Data sources
self.backtestDataActionGroup = QtWidgets.QActionGroup(self.win)
self.openCSVAction = QtWidgets.QAction(QtGui.QIcon(""),"Open CSV File", self.backtestDataActionGroup)
self.openCSVAction.triggered.connect( self.loadDataFileUI.show )
#self.DataSourceAction = QAction(QtWidgets.QIcon(""),"Choose Data Source", self.toolbar)
#self.DataSourceAction.triggered.connect( self.l )
#self.toolbar.addAction(self.addIchimokuAction)
# Options
self.optionsActionGroup = QtWidgets.QActionGroup(self.win)
#self.darkModeAction = QtWidgets.QAction(QtGui.QIcon(""),"Switch Color Mode", self.optionsActionGroup)
#self.darkModeAction.triggered.connect( self.dark_mode_toggle )
#self.optionsActionGroup.addAction(self.darkModeAction)
pass
#########
# UI : main window menu bar
#########
def createMenuBar(self):
self.menubar = self.win.menuBar()
#self.indicatorsMenu = self.menubar.addMenu("Indicators")
#self.indicatorsMenu.addActions(self.indicatorsActionGroup.actions())
self.backtestDataMenu = self.menubar.addMenu("Backtest Data")
self.backtestDataMenu.addActions(self.backtestDataActionGroup.actions())
self.optionsMenu = self.menubar.addMenu("Options")
self.optionsMenu.addActions(self.optionsActionGroup.actions())
pass
#########
# Strategy results : trades tab
#########
def createTradesUI(self):
self.tradeTableWidget = QtWidgets.QTableWidget(self.strategyResultsUI.TradesGB)
self.tradeTableWidget.setColumnCount(7)
labels = [ "Trade id","Direction", "Date Open", "Date Close", "Price", "Commission", "Profit Net" ]
self.tradeTableWidget.setHorizontalHeaderLabels( labels )
self.tradeTableWidget.verticalHeader().setVisible(False)
self.tradeTableWidget.horizontalHeader().setStretchLastSection(True)
self.tradeTableWidget.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)
#self.tradeTableWidget.setStyleSheet("alternate-background-color: #AAAAAA;background-color: #CCCCCC;")
self.tradeTableWidget.setAlternatingRowColors(True)
self.tradeTableWidget.setSortingEnabled(True)
self.tradeTableWidget.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.tradeTableWidget.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.strategyResultsUI.ResultsTabWidget.widget(0).layout().addWidget(self.tradeTableWidget)
def fillTradesUI(self, trades):
# Delete all previous results by settings row count to 0
self.tradeTableWidget.setRowCount(0)
for key, values in trades:
self.tradeTableWidget.setRowCount(len(values[0]))
row = 0
for trade in values[0]:
if not trade.isopen:
# Trade id
item = QtWidgets.QTableWidgetItem( str(trade.ref) )
item.setTextAlignment(QtCore.Qt.AlignCenter)
self.tradeTableWidget.setItem(row,0,item)
item = QtWidgets.QTableWidgetItem( "Buy" if trade.long else "Sell" )
item.setTextAlignment(QtCore.Qt.AlignCenter)
self.tradeTableWidget.setItem(row,1,item)
item = QtWidgets.QTableWidgetItem( str(bt.num2date(trade.dtopen)) )
item.setTextAlignment(QtCore.Qt.AlignCenter)
self.tradeTableWidget.setItem(row,2,item)
item = QtWidgets.QTableWidgetItem( str(bt.num2date(trade.dtclose)) )
item.setTextAlignment(QtCore.Qt.AlignCenter)
self.tradeTableWidget.setItem(row,3,item)
item = QtWidgets.QTableWidgetItem( str(trade.price) )
item.setTextAlignment(QtCore.Qt.AlignCenter)
self.tradeTableWidget.setItem(row,4,item)
item = QtWidgets.QTableWidgetItem( str(trade.commission) )
item.setTextAlignment(QtCore.Qt.AlignCenter)
self.tradeTableWidget.setItem(row,5,item)
item = QtWidgets.QTableWidgetItem( str(trade.pnlcomm) )
item.setTextAlignment(QtCore.Qt.AlignCenter)
self.tradeTableWidget.setItem(row,6,item)
row += 1
pass
#########
# Strategy results : Order tab
#########
def createOrdersUI(self):
self.orderTableWidget = QtWidgets.QTableWidget(self.dock_orders)
self.orderTableWidget.setColumnCount(8)
labels = [ "Order ref" , "Direction", "Date Open", "Date Close", "Execution Type", "Size", "Price", "Profit" ]
self.orderTableWidget.setHorizontalHeaderLabels( labels )
self.orderTableWidget.horizontalHeader().setStretchLastSection(True)
self.orderTableWidget.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)
self.orderTableWidget.setStyleSheet("alternate-background-color: #AAAAAA;background-color: #CCCCCC;")
self.orderTableWidget.setAlternatingRowColors(True)
self.orderTableWidget.setSortingEnabled(True)
self.orderTableWidget.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.orderTableWidget.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.dock_orders.addWidget(self.orderTableWidget)
pass
def fillOrdersUI(self, orders):
self.orderTableWidget.setRowCount(len(orders))
for i in range(len(orders)):
order = orders[i]
self.orderTableWidget.setItem(i,0,QtWidgets.QTableWidgetItem( str(order.ref ) ))
self.orderTableWidget.setItem(i,1,QtWidgets.QTableWidgetItem( "Buy" if order.isbuy() else "Sell"))
self.orderTableWidget.setItem(i,2,QtWidgets.QTableWidgetItem( str(bt.num2date(order.created.dt)) ))
self.orderTableWidget.setItem(i,3,QtWidgets.QTableWidgetItem( str(bt.num2date(order.executed.dt)) ))
self.orderTableWidget.setItem(i,4,QtWidgets.QTableWidgetItem( str(order.exectype) ))
self.orderTableWidget.setItem(i,5,QtWidgets.QTableWidgetItem( str(order.size ) ))
self.orderTableWidget.setItem(i,6,QtWidgets.QTableWidgetItem( str(order.price ) ))
self.orderTableWidget.setItem(i,7,QtWidgets.QTableWidgetItem( str(order.executed.pnl) ))
pass
#########
# UI parameters for testing stategies
#########
def createLoadDataFilesUI(self):
self.loadDataFileUI = loadDataFilesUI.LoadDataFilesUI(self.controller, self.win)
self.loadDataFileUI.hide()
pass
#########
# UI parameters for testing stategies
#########
def createStrategyTesterUI(self):
self.strategyTesterUI = strategyTesterUI.StrategyTesterUI(self.controller)
self.dock_strategyTester.addWidget(self.strategyTesterUI)
self.strategyResultsUI = strategyResultsUI.StrategyResultsUI(self.controller)
self.dock_strategyResultsUI.addWidget(self.strategyResultsUI)
#
self.strategyTesterUI.startingCashLE.setText(str(self.controller.cerebro.broker.cash))
validator = QtGui.QDoubleValidator(-9999999, 9999999, 6, self.strategyTesterUI.startingCashLE)
validator.setLocale(QtCore.QLocale("en"))
self.strategyTesterUI.startingCashLE.setValidator( validator )
self.strategyTesterUI.startingCashLE.textChanged.connect( self.controller.cashChanged )
pass
#########
# Strategy results : Summary UI
#########
def createSummaryUI(self):
self.summaryTableWidget = QtWidgets.QTableWidget(self.strategyResultsUI.SummaryGB)
self.summaryTableWidget.setColumnCount(2)
self.summaryTableWidget.verticalHeader().hide()
self.summaryTableWidget.horizontalHeader().hide()
self.summaryTableWidget.setShowGrid(False)
self.strategyResultsUI.SummaryGB.layout().addWidget(self.summaryTableWidget)
pass
def fillSummaryUI(self, brokerCash, brokerValue, tradeAnalysis):
# Delete all previous rows
self.summaryTableWidget.setRowCount(0)
self.summaryTableWidget.setRowCount(8)
self.summaryTableWidget.setItem(0,0,QtWidgets.QTableWidgetItem("Cash"))
self.summaryTableWidget.setItem(0,1,QtWidgets.QTableWidgetItem(str(brokerCash)))
self.summaryTableWidget.setItem(1,0,QtWidgets.QTableWidgetItem("Value"))
self.summaryTableWidget.setItem(1,1,QtWidgets.QTableWidgetItem(str(brokerValue)))
# if there are some trades
if len(tradeAnalysis) > 1:
self.summaryTableWidget.setItem(2,0,QtWidgets.QTableWidgetItem("Profit total"))
self.summaryTableWidget.setItem(2,1,QtWidgets.QTableWidgetItem(str(tradeAnalysis["pnl"]["net"]["total"])))
self.summaryTableWidget.setItem(3,0,QtWidgets.QTableWidgetItem("Number of trades"))
self.summaryTableWidget.setItem(3,1,QtWidgets.QTableWidgetItem(str(tradeAnalysis["total"]["total"])))
self.summaryTableWidget.setItem(4,0,QtWidgets.QTableWidgetItem("Won"))
self.summaryTableWidget.setItem(4,1,QtWidgets.QTableWidgetItem(str(tradeAnalysis["won"]['total'])))
self.summaryTableWidget.setItem(5,0,QtWidgets.QTableWidgetItem("Lost"))
self.summaryTableWidget.setItem(5,1,QtWidgets.QTableWidgetItem(str(tradeAnalysis["lost"]['total'])))
self.summaryTableWidget.setItem(6,0,QtWidgets.QTableWidgetItem("Long"))
self.summaryTableWidget.setItem(6,1,QtWidgets.QTableWidgetItem(str(tradeAnalysis["long"]["total"])))
self.summaryTableWidget.setItem(7,0,QtWidgets.QTableWidgetItem("Short"))
self.summaryTableWidget.setItem(7,1,QtWidgets.QTableWidgetItem(str(tradeAnalysis["short"]["total"])))
self.summaryTableWidget.horizontalHeader().setStretchLastSection(True)
self.summaryTableWidget.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)
self.summaryTableWidget.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
pass
#########
# Show all
#########
def show(self):
#self.fpltWindow[timeframe].show() # prepares plots when they're all setup
self.win.show()
self.app.exec_()
pass
#########
# Get strategy running progress bar
#########
def getProgressBar(self):
return self.strategyTesterUI.runningStratPB
#########
# Draw chart
#########
def drawChart(self, data, timeframe):
self.fpltWindow[timeframe].setChartData(data)
self.fpltWindow[timeframe].updateChart()
pass
#########
# Draw orders on chart
#########
def setOrders(self, orders):
for timeframe,fpltWindow in self.fpltWindow.items():
fpltWindow.drawOrders(orders)
pass
#########
# Draw PnL chart
# A python expert could do waaaaay better optimized on this function
# But anyway... it works...
#########
def displayPnL(self, pnl_dataframe):
# draw charts
for timeframe, fpltwindow in self.fpltWindow.items():
fpltwindow.drawPnL(pnl_dataframe)
self.togglePnLWidget()
pass
#########
# Control panel overlay on top/above of the finplot window
#########
def createControlPanel(self):
self.controlPanel = QtWidgets.QWidget(self.dock_stackedCharts)
self.dock_stackedCharts.addWidget(self.controlPanel,0,0)
self.controlPanelLayout = QtWidgets.QHBoxLayout(self.controlPanel)
'''
panel.symbol = QtWidgets.QComboBox(panel)
[panel.symbol.addItem(i+'USDT') for i in 'BTC ETH XRP DOGE BNB SOL ADA LTC LINK DOT TRX BCH'.split()]
panel.symbol.setCurrentIndex(1)
layout.addWidget(panel.symbol, 0, 0)
panel.symbol.currentTextChanged.connect(change_asset)
layout.setColumnMinimumWidth(1, 30)
panel.interval = QtWidgets.QComboBox(panel)
[panel.interval.addItem(i) for i in '1d 4h 1h 30m 15m 5m 1m'.split()]
panel.interval.setCurrentIndex(6)
layout.addWidget(panel.interval, 0, 2)
panel.interval.currentTextChanged.connect(change_asset)
layout.setColumnMinimumWidth(3, 30)
'''
# Rest
self.ResetPB = QtWidgets.QPushButton(self.controlPanel)
self.ResetPB.setText("Reset")
self.ResetPB.setCheckable(True)
self.ResetPB.setMaximumWidth(100)
self.ResetPB.toggled.connect(self.resetChart)
self.controlPanelLayout.addWidget(self.ResetPB)
# Spacer
spacer = QtWidgets.QSpacerItem(50,20,QtWidgets.QSizePolicy.Minimum)
self.controlPanelLayout.addSpacerItem(spacer)
# SMA
self.SmaPB = QtWidgets.QPushButton(self.controlPanel)
self.SmaPB.setText("SMA")
self.SmaPB.setCheckable(True)
self.SmaPB.setMaximumWidth(100)
self.SmaPB.toggled.connect(self.addSma)
self.controlPanelLayout.addWidget(self.SmaPB)
# EMA
self.EmaPB = QtWidgets.QPushButton(self.controlPanel)
self.EmaPB.setText("EMA")
self.EmaPB.setCheckable(True)
self.EmaPB.setMaximumWidth(100)
self.EmaPB.toggled.connect(self.addEma)
self.controlPanelLayout.addWidget(self.EmaPB)
# Spacer
spacer = QtWidgets.QSpacerItem(50,20,QtWidgets.QSizePolicy.Minimum)
self.controlPanelLayout.addSpacerItem(spacer)
# RSI
self.RsiPB = QtWidgets.QPushButton(self.controlPanel)
self.RsiPB.setText("RSI")
self.RsiPB.setCheckable(True)
self.RsiPB.setMaximumWidth(100)
self.RsiPB.toggled.connect(self.toogleRsi)
self.controlPanelLayout.addWidget(self.RsiPB)
# Stochastic
self.StochasticPB = QtWidgets.QPushButton(self.controlPanel)
self.StochasticPB.setText("Stochastic")
self.StochasticPB.setCheckable(True)
self.StochasticPB.setMaximumWidth(100)
self.StochasticPB.toggled.connect(self.toogleStochastic)
self.controlPanelLayout.addWidget(self.StochasticPB)
# Stochastic RSI
self.StochasticRsiPB = QtWidgets.QPushButton(self.controlPanel)
self.StochasticRsiPB.setText("Stochastic RSI")
self.StochasticRsiPB.setCheckable(True)
self.StochasticRsiPB.setMaximumWidth(100)
self.StochasticRsiPB.toggled.connect(self.toogleStochasticRsi)
self.controlPanelLayout.addWidget(self.StochasticRsiPB)
# Ichimoku
self.IchimokuPB = QtWidgets.QPushButton(self.controlPanel)
self.IchimokuPB.setText("Ichimoku")
self.IchimokuPB.setCheckable(True)
self.IchimokuPB.setMaximumWidth(100)
self.IchimokuPB.toggled.connect(self.toogleIchimoku)
self.controlPanelLayout.addWidget(self.IchimokuPB)
# Spacer
spacer = QtWidgets.QSpacerItem(50,20,QtWidgets.QSizePolicy.Minimum)
self.controlPanelLayout.addSpacerItem(spacer)
# Dark mode
self.darkmodeCB = QtWidgets.QCheckBox(self.controlPanel)
self.darkmodeCB.setText('Dark mode')
self.darkmodeCB.toggled.connect(self.dark_mode_toggle)
self.darkmodeCB.setChecked(True)
self.controlPanelLayout.addWidget(self.darkmodeCB)
# Volumes
self.volumesCB = QtWidgets.QCheckBox(self.controlPanel)
self.volumesCB.setText('Volumes')
self.volumesCB.toggled.connect(self.volumes_toggle)
# init checked after connecting the slot
self.volumesCB.setChecked(False)
self.controlPanelLayout.addWidget(self.volumesCB)
# Spacer
self.controlPanelLayout.insertSpacerItem(0, QtWidgets.QSpacerItem( 0,0, hPolicy=QtWidgets.QSizePolicy.Expanding, vPolicy=QtWidgets.QSizePolicy.Preferred) )
return self.controlPanel
#########
# Toggle anther UI Theme
#########
def dark_mode_toggle(self):
for key,window in self.fpltWindow.items():
window.activateDarkMode(self.darkmodeCB.isChecked())
pass
##########
# INDICATORS
##########
def toogleTimeframe(self, timeframe):
if self.timeFramePB[timeframe].isChecked():
print("Display " + timeframe)
self.current_timeframe = timeframe
index = self.stackedCharts.indexOf( self.dockAreaTimeframes[timeframe])
self.stackedCharts.setCurrentIndex( index )
self.togglePnLWidget()
pass
def togglePnLWidget(self):
# hide all PnL windows & Show the good one
for tf, fpltWindow in self.fpltWindow.items():
if tf != self.current_timeframe:
fpltWindow.hidePnL()
else:
fpltWindow.showPnL()
def resetChart(self):
self.fpltWindow[self.current_timeframe].resetChart()
self.fpltWindow[self.current_timeframe].updateChart()
pass
# On chart indicators
def addSma(self):
# Show indicator parameter dialog
paramDialog = indicatorParametersUI.IndicatorParametersUI()
paramDialog.setWindowFlags(QtCore.Qt.CustomizeWindowHint)
paramDialog.setTitle("SMA Indicator parameters")
paramDialog.addParameter("SMA Period", 14)
paramDialog.addParameter("Plot width", 1)
paramDialog.addParameterColor("Plot color", "#FFFF00")
paramDialog.adjustSize()
if (paramDialog.exec() == QtWidgets.QDialog.Accepted ):
period = paramDialog.getValue("SMA Period")
width = paramDialog.getValue("Plot width")
qColor = paramDialog.getColorValue("Plot color")
self.fpltWindow[self.current_timeframe].drawSma( period, qColor, width)
pass
# On chart indicators
def addEma(self):
# Show indicator parameter dialog
paramDialog = indicatorParametersUI.IndicatorParametersUI()
paramDialog.setWindowFlags(QtCore.Qt.CustomizeWindowHint)
paramDialog.setTitle("EMA Indicator parameters")
paramDialog.addParameter("EMA Period", 9)
paramDialog.addParameter("Plot width", 1)
paramDialog.addParameterColor("Plot color", "#FFFF00")
paramDialog.adjustSize()
if (paramDialog.exec() == QtWidgets.QDialog.Accepted ):
period = paramDialog.getValue("EMA Period")
width = paramDialog.getValue("Plot width")
qColor = paramDialog.getColorValue("Plot color")
self.fpltWindow[self.current_timeframe].drawEma( period, qColor, width )
pass
# indicators in external windows
def toogleRsi(self):
if self.RsiPB.isChecked():
# Show indicator parameter dialog
paramDialog = indicatorParametersUI.IndicatorParametersUI()
paramDialog.setWindowFlags(QtCore.Qt.CustomizeWindowHint)
paramDialog.setTitle("RSI Indicator parameters")
paramDialog.addParameter("RSI Period", 14)
paramDialog.addParameterColor("Plot color", "#FFFF00")
paramDialog.adjustSize()
if (paramDialog.exec() == QtWidgets.QDialog.Accepted ):
period = paramDialog.getValue("RSI Period")
qColor = paramDialog.getColorValue("Plot color")
self.fpltWindow[self.current_timeframe].drawRsi( period, qColor )
self.dock_rsi[self.current_timeframe].show()
else:
# Cancel
self.RsiPB.setChecked(False)
self.dock_rsi[self.current_timeframe].hide()
else:
self.dock_rsi[self.current_timeframe].hide()
pass
def toogleStochastic(self):
if self.StochasticPB.isChecked():
# Show indicator parameter dialog
paramDialog = indicatorParametersUI.IndicatorParametersUI()
paramDialog.setWindowFlags(QtCore.Qt.CustomizeWindowHint)
paramDialog.setTitle("Stochastic Indicator parameters")
paramDialog.addParameter("Stochastic Period K", 14)
paramDialog.addParameter("Stochastic Smooth K", 3)
paramDialog.addParameter("Stochastic Smooth D", 3)
paramDialog.adjustSize()
if (paramDialog.exec() == QtWidgets.QDialog.Accepted ):
period = paramDialog.getValue("Stochastic Period K")
smooth_k = paramDialog.getValue("Stochastic Smooth K")
smooth_d = paramDialog.getValue("Stochastic Smooth D")
self.fpltWindow[self.current_timeframe].drawStochastic( period, smooth_k, smooth_d )
self.dock_stochastic[self.current_timeframe].show()
else:
# Cancel
self.RsiPB.setChecked(False)
self.dock_stochastic[self.current_timeframe].hide()
else:
self.dock_stochastic[self.current_timeframe].hide()
pass
def toogleStochasticRsi(self):
if self.StochasticRsiPB.isChecked():
# Show indicator parameter dialog
paramDialog = indicatorParametersUI.IndicatorParametersUI()
paramDialog.setWindowFlags(QtCore.Qt.CustomizeWindowHint)
paramDialog.setTitle("Stochastic Indicator parameters")
paramDialog.addParameter("Stochastic Rsi Period K", 14)
paramDialog.addParameter("Stochastic Rsi Smooth K", 3)
paramDialog.addParameter("Stochastic Rsi Smooth D", 3)
paramDialog.adjustSize()
if (paramDialog.exec() == QtWidgets.QDialog.Accepted ):
period = paramDialog.getValue("Stochastic Rsi Period K")
smooth_k = paramDialog.getValue("Stochastic Rsi Smooth K")
smooth_d = paramDialog.getValue("Stochastic Rsi Smooth D")
self.fpltWindow[self.current_timeframe].drawStochasticRsi( period, smooth_k, smooth_d)
self.dock_stochasticRsi[self.current_timeframe].show()
else:
# Cancel
self.RsiPB.setChecked(False)
self.dock_stochasticRsi[self.current_timeframe].hide()
else:
self.dock_stochasticRsi[self.current_timeframe].hide()
pass
# On chart indicators
def toogleIchimoku(self):
self.fpltWindow[self.current_timeframe].setIndicator("Ichimoku", self.IchimokuPB.isChecked() )
pass
def volumes_toggle(self):
self.fpltWindow[self.current_timeframe].setIndicator("Volumes", self.volumesCB.isChecked())
pass
#########
# Obsolete (Strategy results : transcations tab)
#########
def createTransactionsUI(self, trades):
self.transactionTableWidget = QtWidgets.QTableWidget(self.dock_trades)
self.transactionTableWidget.setRowCount(len(trades))
self.transactionTableWidget.setColumnCount(4)
labels = [ "Date","Size", "Price", "Value" ]
self.transactionTableWidget.setHorizontalHeaderLabels( labels )
row = 0
for date,values in trades:
#for trade in trades:
self.transactionTableWidget.setItem(row,0,QtWidgets.QTableWidgetItem( date.strftime("%Y/%m/%d %H:%M:%S") ))
self.transactionTableWidget.setItem(row,1,QtWidgets.QTableWidgetItem( str(values[0][0]) ))
self.transactionTableWidget.setItem(row,2,QtWidgets.QTableWidgetItem( str(values[0][1]) ))
self.transactionTableWidget.setItem(row,3,QtWidgets.QTableWidgetItem( str(values[0][2]) ))
row += 1
self.transactionTableWidget.horizontalHeader().setStretchLastSection(True)
self.transactionTableWidget.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)
self.transactionTableWidget.setStyleSheet("alternate-background-color: #AAAAAA;background-color: #CCCCCC;")
self.transactionTableWidget.setAlternatingRowColors(True)
self.transactionTableWidget.setSortingEnabled(True)
self.transactionTableWidget.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.transactionTableWidget.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.dock_transactions.addWidget(self.transactionTableWidget)
pass
def fillStrategyParameters(self, strategy):
# Rest widget rows
for indexRow in range(self.strategyTesterUI.parametersLayout.rowCount()):
self.strategyTesterUI.parametersLayout.removeRow(0)
# Insert parameters
row = 0
for parameterName, parameterValue in strategy.params._getitems():
label = QtWidgets.QLabel(parameterName)
lineEdit = QtWidgets.QLineEdit(str(parameterValue))
# Save the parameter to inject it in the addStrategy method
self.controller.strategyParametersSave(parameterName, parameterValue)
# Connect the parameter changed slot
lineEdit.textChanged.connect(functools.partial(self.controller.strategyParametersChanged, lineEdit, parameterName, parameterValue))
self.strategyTesterUI.parametersLayout.addRow(label, lineEdit )
row = row + 1
pass
# Parameter box size
self.strategyTesterUI.parametersLayout.update()
self.strategyTesterUI.parametersScrollArea.adjustSize()
pass
| StarcoderdataPython |
3360499 | <gh_stars>1-10
"""
Setup the package.
"""
from setuptools import find_packages, setup
with open('README.md') as read_me:
long_description = read_me.read()
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setup(
version='0.2.0',
name='eos-name-generator',
description='Python package for generation random name which suits `EOS` name conversations',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/Alladin9393/name-generator',
license='MIT',
author='Alladin9393',
author_email='<EMAIL>',
packages=find_packages(),
install_requires=requirements,
entry_points={
'console_scripts': [
'eos-name-generator = cli.entrypoint:cli',
],
},
include_package_data=True,
classifiers=[
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
| StarcoderdataPython |
3370215 | <filename>ibis/spark/tests/conftest.py
import pytest
from ibis.tests.all.conftest import get_spark_testing_client
@pytest.fixture(scope='session')
def client(data_directory):
pytest.importorskip('pyspark')
return get_spark_testing_client(data_directory)
@pytest.fixture(scope='session')
def simple(client):
return client.table('simple')
@pytest.fixture(scope='session')
def struct(client):
return client.table('struct')
@pytest.fixture(scope='session')
def nested_types(client):
return client.table('nested_types')
@pytest.fixture(scope='session')
def complicated(client):
return client.table('complicated')
| StarcoderdataPython |
18368 | <filename>charybde/parsers/dump_parser.py
from bz2 import BZ2File
from pathlib import Path
from queue import Queue
from threading import Thread
from typing import Any, Callable, Dict, Iterator, List, Tuple
from xmltodict import parse as xmltodict_parse
def parse(dump: Path) -> Iterator[Dict[str, Any]]:
def filter(path: List[Tuple[str, Dict[str, str]]], item: Dict[str, Any]) -> bool:
return (
len(path) == 2
and path[1][0] == "page"
and item["ns"] == "0"
and "redirect" not in item
)
queue: Queue = Queue()
thread = Thread(target=_parse_dump, args=(dump, queue, filter))
thread.start()
while True:
item = queue.get()
if item is None:
break
yield item
def _parse_dump(
dump: Path,
output_queue: Queue,
filter_callable: Callable[[List[Tuple[str, Dict[str, str]]], Dict[str, Any]], bool],
) -> None:
def handler(path: List[Tuple[str, Dict[str, str]]], item: Dict[str, Any]) -> bool:
if filter_callable(path, item):
output_queue.put_nowait(item)
return True
with BZ2File(str(dump)) as fh:
xmltodict_parse(fh, item_depth=2, item_callback=handler)
output_queue.put(None)
| StarcoderdataPython |
4810255 | <reponame>spsatuva/spsatuva<filename>img-resize.py
#
# Usage: this script converts all of the images located in _img/<folder>
# to responsive sizes and places them into assets/img/<folder>. Behavior
# beyond the default behavior can be referenced by running this script with
# the --help or -h options.
#
# Requirements: the pillow image manipulation package
# install by running in the command line:
# python -m pip install pillow
#
from __future__ import print_function
from __future__ import division
from builtins import input
import sys
import os
import shutil
from glob import glob
import argparse
is_win = 'win' in sys.platform
try:
from PIL import Image
except:
print("Cannot import PIL. Try:")
print("\npython -m pip install Pillow\n")
if is_win:
print("If you need permission to install, open cmd as administrator")
else:
print("If you need permission, instead run")
print("\nsudo python -m pip install Pillow\n")
print("If you run python with a command other than 'python', replace 'python' with the command you use above.\n")
exit(1)
class colors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
if is_win:
HEADER = ''
OKBLUE = ''
OKGREEN = ''
WARNING = ''
FAIL = ''
ENDC = ''
BOLD = ''
UNDERLINE = ''
resize_widths = {
"placehold" : 230,
"thumb" : 535,
"thumb@2x" : 535 * 2,
"xs" : 575,
"sm" : 767,
"md" : 991,
"lg" : 1999,
"self" : -1
}
def make_dir(save_dir):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
def update(msg):
print(colors.OKBLUE + msg + colors.ENDC)
def warn(msg):
msg_lines = msg.split("\n")
print(" " + colors.WARNING + "WARNING: " + msg_lines[0] + colors.ENDC)
for msg_line in msg_lines[1:]:
print(" " + colors.WARNING + " " + msg_line + colors.ENDC)
def error(msg):
msg_lines = msg.split("\n")
print('\n' + colors.FAIL + "ERROR: " + msg_lines[0] + colors.ENDC)
for msg_line in msg_lines[1:]:
print(colors.FAIL + " " + msg_line + colors.ENDC)
print()
def save(img, fn, format, quality):
print(" " + colors.OKGREEN + "Saving: " + fn + colors.ENDC)
img.save(fn, format, optimize=True, quality=quality)
def find_images(folder):
# Find images in root folder
images = [i for i in glob(os.path.join(folder, "*.jpg")) +
glob(os.path.join(folder, "*.png")) if os.path.isfile(i)]
# Recursively find images in sub-folders
folders = [f for f in glob(os.path.join(folder, "*")) if os.path.isdir(f)]
for folder in folders:
images = images + find_images(folder)
return images
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--convert", type=str, action='store',
help="The format to convert all images to. Either JPEG or PNG. Default = None",
default=None)
parser.add_argument("--quality", type=int, action='store',
help="The quality to use with JPEG compression. Valid range is 1 - 100.",
default=70)
parser.add_argument("--out-dir", type=str, action='store', dest='out_dir',
help="The root directory to send resized pictures to.",
default=os.path.join("assets", "img"))
parser.add_argument("--in-dir", type=str, action='store', dest='in_dir',
help="The root directory to pull pictures from.",
default="_img")
parser.add_argument("--clean", action='store_true', default=False,
help="Include this to clean the output directory (assets/img) by default.")
parser.add_argument("--deep", action='store_true', default=False,
help="Include this to overwrite all images.")
args = parser.parse_args()
in_dir = args.in_dir
out_dir = args.out_dir
convert = args.convert
quality = args.quality
clean = args.clean
deep = args.deep
if clean:
if out_dir in ['.', './']:
warn("You are attempting to remove everything in your current directory!!!!")
elif out_dir == in_dir:
warn("You are attempting to remove the input directory!!!")
else:
warn("You are attempting to remove the directory {}".format(out_dir))
ans = input("Are you sure you want to do this? [yes / no]: ")
if ans.lower() in ['yes', 'y']:
# Delete only folders that exist in in_dir
in_dir_folder_names = [os.path.basename(f) for f in glob(os.path.join(in_dir, '*')) if os.path.isdir(f)]
out_dir_folders = [os.path.join(out_dir, f) for f in in_dir_folder_names]
for f in out_dir_folders:
warn("Removing folder: {}".format(f))
shutil.rmtree(f)
# Check for spelling error
if convert is None:
pass
elif convert.lower() in ['jpg', 'jpeg']:
convert = 'JPEG'
elif convert.lower() in ['png']:
convert = 'PNG'
elif convert is not None:
error("Value passed to --convert not valid. Use {} --help for more info.".format(sys.argv[0]))
exit()
# Range checking
if quality > 100 or quality < 1:
error("Value passed to --quality not valid. Use {} --help for more info.".format(sys.argv[0]))
exit()
# Find all images in the input directory
image_names = find_images(in_dir)
images = [Image.open(i) for i in image_names]
for name, img in zip(image_names, images):
folder_name = os.path.dirname(name)
if folder_name != in_dir:
folder_name = folder_name.replace(in_dir + '/', '')
# Check if we are scanning the root directory, in which case we want
# those files to go to the root of the our directory
if folder_name == in_dir or folder_name + '/' == in_dir:
save_dir = out_dir
else:
save_dir = os.path.join(out_dir, folder_name)
make_dir(save_dir)
update("\nProcessing {}".format(name))
if img.format not in ['JPEG', 'PNG', 'MPO']:
warn("Image is required to be either JPEG or PNG format.\nImage is detected to be of format: {}\nSkipping...".format(img.format))
continue
width = img.width
height = img.height
aspect_ratio = float(width) / float(height)
# Sort by image size small to large
key_val_list = [(key, resize_widths[key]) for key in sorted(resize_widths,
key=resize_widths.get)]
for width_name, resize_width in key_val_list:
filename, ext = os.path.splitext(name)
if convert is None:
ext = ext[1:]
elif img.format == 'JPEG' or convert == 'JPEG' or img.format == 'MPO':
ext = 'jpg'
elif img.format == 'PNG' or convert == 'PNG':
ext = 'png'
if width_name == 'self':
save_name = "{}.{}".format(os.path.join(save_dir, os.path.basename(filename)), ext)
else:
save_name = "{}_{}.{}".format(os.path.join(save_dir, os.path.basename(filename)),
width_name, ext)
# If we don't want to overwrite images, skip ones that already exist
if not deep and os.path.exists(save_name):
update("Skipping existing file...")
continue
# Compute necessary height given aspect ratio
resize_height = int(resize_width / aspect_ratio)
# Check if we are upscaling, in which case a new image isn't necessary
if resize_width >= width:
warn("Upscaling detected! Format {} requires an image of width at least {}px\nSaving at normal resolution...".format(width_name, resize_width))
resize_width = width
resize_height = height
if width_name == 'self':
resize_width = width
resize_height = height
resized_img = img.resize((resize_width, resize_height))
# Check if we need to convert the image before saving
if convert is not None:
if convert == 'JPEG':
resized_img = resized_img.convert('RGB')
elif convert == 'PNG':
resized_img = resized_img.convert('RGBA')
save(resized_img, save_name, convert, quality)
else:
save(resized_img, save_name, img.format, quality)
if __name__ == "__main__":
main()
| StarcoderdataPython |
1789142 | <gh_stars>10-100
"""Basic tests of the sambuilder module."""
import pytest
from samwell import sam
from samwell.sam.sambuilder import SamBuilder
def test_add_pair_all_fields() -> None:
builder = SamBuilder()
builder.add_pair(
name="q1",
chrom="chr1",
bases1="ACGTG",
quals1=[20, 21, 22, 23, 24],
start1=10000,
cigar1="5M",
strand1="+",
bases2="GCGC",
quals2=[30, 31, 32, 33],
start2=10200,
cigar2="4M",
strand2="-",
attrs={"aa": "Hello", "bb": 42}
)
recs = builder.to_sorted_list()
assert len(recs) == 2
for rec in recs:
assert rec.query_name == "q1"
assert rec.reference_name == "chr1"
assert rec.is_paired
assert abs(rec.template_length) == 204
assert rec.get_tag("aa") == "Hello"
assert rec.get_tag("bb") == 42
if rec.is_read1:
assert rec.reference_start == 10000
assert not rec.is_reverse
assert rec.query_sequence == "ACGTG"
assert list(rec.query_qualities) == [20, 21, 22, 23, 24]
assert rec.cigarstring == "5M"
else:
assert rec.reference_start == 10200
assert rec.is_reverse
assert rec.query_sequence == "GCGC"
assert list(rec.query_qualities) == [30, 31, 32, 33]
assert rec.cigarstring == "4M"
def test_add_pair_minimal() -> None:
builder = SamBuilder(r1_len=10, r2_len=5, base_quality=25)
r1, r2 = builder.add_pair(chrom="chr1", start1=1000, start2=1200)
assert r1.query_name == r2.query_name
assert r1.reference_name == r2.reference_name == "chr1"
assert r1.reference_start == 1000
assert r2.reference_start == 1200
assert not r1.is_reverse
assert r2.is_reverse
assert len(r1.query_sequence) == len(r1.query_qualities) == 10
assert len(r2.query_sequence) == len(r2.query_qualities) == 5
assert r1.cigarstring == "10M"
assert r2.cigarstring == "5M"
assert r1.get_tag("RG") == builder.rg_id()
assert r2.get_tag("RG") == builder.rg_id()
def test_add_pair_mix_and_match() -> None:
builder = SamBuilder(r1_len=100, r2_len=100, base_quality=30)
r1, r2 = builder.add_pair(chrom="chr1", start1=500, start2=700, cigar1="75M", cigar2="9M1I30M")
assert len(r1.query_sequence) == len(r1.query_qualities) == 75
assert len(r2.query_sequence) == len(r2.query_qualities) == 40
r1, r2 = builder.add_pair(chrom="chr1", start1=500, start2=700,
bases1="ACGTGCATGC", bases2="ACGAC")
assert len(r1.query_sequence) == len(r1.query_qualities) == 10
assert len(r2.query_sequence) == len(r2.query_qualities) == 5
assert r1.cigarstring == "10M"
assert r2.cigarstring == "5M"
r1, r2 = builder.add_pair(chrom="chr1", start1=500, start2=700,
quals1=[30] * 20, quals2=[20] * 10)
assert len(r1.query_sequence) == len(r1.query_qualities) == 20
assert len(r2.query_sequence) == len(r2.query_qualities) == 10
assert r1.cigarstring == "20M"
assert r2.cigarstring == "10M"
# Now what if we provide multiple values that are inconsistent
with pytest.raises(ValueError, match="not length compatible"):
builder.add_pair(chrom="chr1", start1=10, start2=99, bases1="ACGTG", cigar1="10M")
with pytest.raises(ValueError, match="not length compatible"):
builder.add_pair(chrom="chr1", start1=10, start2=99, bases1="ACGTG", quals1=[2, 2])
with pytest.raises(ValueError, match="not length compatible"):
builder.add_pair(chrom="chr1", start1=10, start2=99, quals1=[2, 2], cigar1="5M")
def test_unmapped_reads() -> None:
builder = SamBuilder()
r1, r2 = builder.add_pair(chrom="chr1", start1=1000)
assert not r1.is_unmapped
assert r1.mate_is_unmapped
assert r2.is_unmapped
assert not r2.mate_is_unmapped
for rec in r1, r2:
assert rec.reference_name == "chr1"
assert rec.reference_start == 1000
assert rec.next_reference_name == "chr1"
assert rec.next_reference_start == 1000
r1, r2 = builder.add_pair(chrom="chr1", start2=2000)
assert r1.is_unmapped
assert not r1.mate_is_unmapped
assert not r2.is_unmapped
assert r2.mate_is_unmapped
for rec in r1, r2:
assert rec.reference_name == "chr1"
assert rec.reference_start == 2000
assert rec.next_reference_name == "chr1"
assert rec.next_reference_start == 2000
r1, r2 = builder.add_pair(chrom=sam.NO_REF_NAME)
assert r1.is_unmapped
assert r1.mate_is_unmapped
assert r2.is_unmapped
assert r2.mate_is_unmapped
for rec in r1, r2:
assert rec.reference_name is None
assert rec.reference_start == sam.NO_REF_POS
assert rec.next_reference_name is None
assert rec.next_reference_start == sam.NO_REF_POS
def test_invalid_strand() -> None:
with pytest.raises(ValueError, match="strand"):
SamBuilder().add_pair(chrom="chr1", start1=100, start2=200, strand1="F", strand2="R")
def test_proper_pair() -> None:
builder = SamBuilder()
# Regular innies
for rec in builder.add_pair(chrom="chr1", start1=5000, start2=5200, strand1="+", strand2="-"):
assert rec.is_proper_pair
for rec in builder.add_pair(chrom="chr1", start1=5200, start2=5000, strand1="-", strand2="+"):
assert rec.is_proper_pair
# Outies
for rec in builder.add_pair(chrom="chr1", start1=5000, start2=5200, strand1="-", strand2="+"):
assert not rec.is_proper_pair
for rec in builder.add_pair(chrom="chr1", start1=5200, start2=5000, strand1="+", strand2="-"):
assert not rec.is_proper_pair
# Unmapped
for rec in builder.add_pair(chrom="chr1", start1=5000, strand1="+"):
assert not rec.is_proper_pair
for rec in builder.add_pair(chrom="chr1", start2=5000, strand2="+"):
assert not rec.is_proper_pair
for rec in builder.add_pair():
assert not rec.is_proper_pair
def test_sorting() -> None:
builder = SamBuilder()
builder.add_pair(chrom="chr1", start1=5000, start2=4700, strand1="-", strand2="+")
builder.add_pair(chrom="chr1", start1=4000, start2=4300)
builder.add_pair(chrom="chr5", start1=4000, start2=4300)
builder.add_pair(chrom="chr2", start1=4000, start2=4300)
last_ref_id = -1
last_start = -1
for rec in builder.to_sorted_list():
ref_id = rec.reference_id
start = rec.reference_start
assert ref_id > last_ref_id or (ref_id == last_ref_id and start >= last_start)
last_ref_id = ref_id
last_start = start
def test_custom_sd() -> None:
builder1 = SamBuilder()
builder2 = SamBuilder(sd=[{"SN": "hi", "LN": 999}, {"SN": "bye", "LN": 888}])
builder1.add_pair(chrom="chr1", start1=200, start2=400)
builder2.add_pair(chrom="hi", start1=200, start2=400)
with pytest.raises(ValueError, match="not a valid chromosome name"):
builder1.add_pair(chrom="hi", start1=200, start2=400)
with pytest.raises(ValueError, match="not a valid chromosome name"):
builder2.add_pair(chrom="chr1", start1=200, start2=400)
def test_custom_rg() -> None:
builder = SamBuilder(rg={"ID": "novel", "SM": "custom_rg", "LB": "foo", "PL": "ILLUMINA"})
for rec in builder.add_pair(chrom="chr1", start1=100, start2=200):
assert rec.get_tag("RG") == "novel"
| StarcoderdataPython |
1748370 | <gh_stars>0
# The MIT License (MIT)
# Copyright (c) 2021 by the xcube development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import warnings
from typing import List
import click
from xcube.constants import LOG
from xcube.cli.common import (cli_option_quiet,
cli_option_verbosity,
configure_cli_output)
from xcube.webapi.defaults import (DEFAULT_PORT,
DEFAULT_ADDRESS,
DEFAULT_UPDATE_PERIOD,
DEFAULT_TILE_CACHE_SIZE,
DEFAULT_TILE_COMP_MODE)
__author__ = "<NAME> (Brockmann Consult GmbH)"
VIEWER_ENV_VAR = 'XCUBE_VIEWER_PATH'
CONFIG_ENV_VAR = 'XCUBE_SERVE_CONFIG_FILE'
BASE_ENV_VAR = 'XCUBE_SERVE_BASE_DIR'
@click.command(name='serve')
@click.argument('cube', nargs=-1)
@click.option('--address', '-A', metavar='ADDRESS', default=DEFAULT_ADDRESS,
help=f'Service address. Defaults to {DEFAULT_ADDRESS!r}.')
@click.option('--port', '-P', metavar='PORT', default=DEFAULT_PORT, type=int,
help=f'Port number where the service will listen on. Defaults to {DEFAULT_PORT}.')
@click.option('--prefix', metavar='PREFIX',
help='Service URL prefix. May contain template patterns such as "${version}" or "${name}". '
'For example "${name}/api/${version}". Will be used to prefix all API operation routes '
'and in any URLs returned by the service.')
@click.option('--revprefix', 'reverse_prefix', metavar='REVPREFIX',
help='Service reverse URL prefix. May contain template patterns such as "${version}" or "${name}". '
'For example "${name}/api/${version}". Defaults to PREFIX, if any. Will be used only in URLs '
'returned by the service e.g. the tile URLs returned by the WMTS service.')
@click.option('--update', '-u', 'update_period', metavar='PERIOD', type=float,
default=DEFAULT_UPDATE_PERIOD,
help='Service will update after given seconds of inactivity. Zero or a negative value will '
'disable update checks. '
f'Defaults to {DEFAULT_UPDATE_PERIOD!r}.')
@click.option('--styles', '-S', metavar='STYLES', default=None,
help='Color mapping styles for variables. '
'Used only, if one or more CUBE arguments are provided and CONFIG is not given. '
'Comma-separated list with elements of the form '
'<var>=(<vmin>,<vmax>) or <var>=(<vmin>,<vmax>,"<cmap>")')
@click.option('--config', '-c', 'config_file', metavar='CONFIG', default=None,
help='Use datasets configuration file CONFIG. '
'Cannot be used if CUBES are provided. '
'If not given and also CUBES are not provided, '
f'the configuration may be given by environment variable {CONFIG_ENV_VAR}.')
@click.option('--base-dir', '-b', 'base_dir', metavar='BASE_DIR', default=None,
help='Base directory used to resolve relative dataset paths in CONFIG '
'and relative CUBES paths. '
f'Defaults to value of environment variable {BASE_ENV_VAR}, if given, '
'otherwise defaults to the parent directory of CONFIG.')
@click.option('--tilecache', 'tile_cache_size', metavar='SIZE', default=DEFAULT_TILE_CACHE_SIZE,
help=f'In-memory tile cache size in bytes. '
f'Unit suffixes {"K"!r}, {"M"!r}, {"G"!r} may be used. '
f'Defaults to {DEFAULT_TILE_CACHE_SIZE!r}. '
f'The special value {"OFF"!r} disables tile caching.')
@click.option('--tilemode', 'tile_comp_mode', metavar='MODE', default=None, type=int,
help='Tile computation mode. '
'This is an internal option used to switch between different tile '
f'computation implementations. Defaults to {DEFAULT_TILE_COMP_MODE!r}.')
@click.option('--show', '-s', is_flag=True,
help=f"Run viewer app. Requires setting the environment variable {VIEWER_ENV_VAR} "
f"to a valid xcube-viewer deployment or build directory. "
f"Refer to https://github.com/dcs4cop/xcube-viewer for more information.")
@cli_option_quiet
@cli_option_verbosity
@click.option('--traceperf', 'trace_perf', is_flag=True,
help="Log extra performance diagnostics"
" using log level DEBUG.")
@click.option('--aws-prof', 'aws_prof', metavar='PROFILE',
help="To publish remote CUBEs, use AWS credentials from section "
"[PROFILE] found in ~/.aws/credentials.")
@click.option('--aws-env', 'aws_env', is_flag=True,
help="To publish remote CUBEs, use AWS credentials from environment "
"variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY")
def serve(cube: List[str],
address: str,
port: int,
prefix: str,
reverse_prefix: str,
update_period: float,
styles: str,
config_file: str,
base_dir: str,
tile_cache_size: str,
tile_comp_mode: int,
show: bool,
quiet: bool,
verbosity: int,
trace_perf: bool,
aws_prof: str,
aws_env: bool):
"""
Serve data cubes via web service.
Serves data cubes by a RESTful API and a OGC WMTS 1.0 RESTful and KVP interface.
The RESTful API documentation can be found at https://app.swaggerhub.com/apis/bcdev/xcube-server.
"""
configure_cli_output(quiet=quiet, verbosity=verbosity)
from xcube.cli.common import parse_cli_kwargs
import os.path
if config_file and cube:
raise click.ClickException("CONFIG and CUBES cannot be used at the same time.")
if not config_file and not cube:
config_file = os.environ.get(CONFIG_ENV_VAR)
if styles:
styles = parse_cli_kwargs(styles, "STYLES")
if (aws_prof or aws_env) and not cube:
raise click.ClickException(
"AWS credentials are only valid in combination with given CUBE argument(s).")
if config_file and not os.path.isfile(config_file):
raise click.ClickException(
f"Configuration file not found: {config_file}")
base_dir = base_dir or os.environ.get(BASE_ENV_VAR,
config_file and os.path.dirname(
config_file)) or '.'
if not os.path.isdir(base_dir):
raise click.ClickException(f"Base directory not found: {base_dir}")
from xcube.version import version
from xcube.webapi.defaults import SERVER_NAME, SERVER_DESCRIPTION
LOG.info(f'{SERVER_NAME}: {SERVER_DESCRIPTION}, version {version}')
if show:
_run_viewer()
from xcube.webapi.app import new_application
application = new_application(route_prefix=prefix, base_dir=base_dir)
from xcube.webapi.service import Service
service = Service(application,
prefix=reverse_prefix or prefix,
port=port,
address=address,
cube_paths=cube,
styles=styles,
config_file=config_file,
base_dir=base_dir,
tile_cache_size=tile_cache_size,
tile_comp_mode=tile_comp_mode,
update_period=update_period,
trace_perf=trace_perf,
aws_prof=aws_prof,
aws_env=aws_env)
service.start()
return 0
def _run_viewer():
import subprocess
import threading
import webbrowser
import os
viewer_dir = os.environ.get(VIEWER_ENV_VAR)
if viewer_dir is None:
raise click.UsageError('Option "--show": '
f"In order to run the viewer, "
f"set environment variable {VIEWER_ENV_VAR} "
f"to a valid xcube-viewer deployment or build directory.")
if not os.path.isdir(viewer_dir):
raise click.UsageError('Option "--show": '
f"Viewer path set by environment variable {VIEWER_ENV_VAR} "
f"must be a directory: " + viewer_dir)
def _run():
LOG.info("Starting web server...")
with subprocess.Popen(['python', '-m', 'http.server', '--directory', viewer_dir],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE):
LOG.info("Opening viewer...")
webbrowser.open("http://localhost:8000/index.html")
threading.Thread(target=_run, name="xcube-viewer-runner").start()
def main(args=None):
serve.main(args=args)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3378344 | <filename>utils/loader.py
from .dataset import NFDataset
from torch.utils.data import DataLoader
import torch
import numpy as np
def get_loaders(
train_dir,
train_maskdir,
val_dir,
val_maskdir,
img_shape,
batch_size,
cnn_mode,
num_workers=4,
pin_memory=True,
):
train_ds = NFDataset(
image_dir=train_dir,
mask_dir=train_maskdir,
img_shape=img_shape,
cnn_mode=cnn_mode,
)
train_loader = DataLoader(
train_ds,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=pin_memory,
shuffle=True,
)
val_ds = NFDataset(
image_dir=val_dir,
mask_dir=val_maskdir,
img_shape=img_shape,
cnn_mode=cnn_mode,
)
val_loader = DataLoader(
val_ds,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=pin_memory,
shuffle=False,
)
return train_loader, val_loader
def load_npy(npy_file, batch_size):
dataset = torch.from_numpy(np.load(npy_file))
data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size)
return data_loader
| StarcoderdataPython |
107350 | <reponame>CXPhoenix/python-cli-game
"""
這是 Text Game 的教學用模組
"""
import os
import sys
import time
class TextGame:
def __init__(self, playerName: str='player'):
self.playerName = playerName
self.scene = {}
self.clearCmd = ''
self.playerRecord = {}
# detect system
if sys.platform.startswith('win32'):
self.clearCmd = 'cls'
elif sys.platform.startswith('darwin'):
self.clearCmd = 'clear'
else:
raise Exception('This game just support Windows and MacOS system..')
""" about scene
Scene's format:
scene = {
sceneName:{
description -> str,
options -> str-list,
hiddenCondition -> list,
actions -> dict : {
option -> str : go_to_scene -> str
}
},
...
}
"""
def setScene(self, sceneName: str, description: str, options: list, hiddenCondition: list=[]) -> None:
self.scene[sceneName] = {
'description': description,
'options': options,
'hiddenCondition': hiddenCondition,
'actions': {},
}
def setMultiScenesByList(self, multiScene: list) -> None:
for ms in multiScene:
self.scene[ms.get('sceneName')] = {
'description': ms.get('description'),
'options': ms.get('options',[]),
'hiddenCondition': ms.get('hiddenCondition',[]),
'actions': ms.get('actions',{}),
}
def setMultiScenesByDict(self, multiScene: dict) -> None:
self.scene.update(multiScene)
def getFormatScene(self, sceneName: str, description: str, options: list, hiddenCondition: list=[], actions: dict={}) -> dict:
return {
'sceneName': sceneName,
'description': description,
'options': options,
'hiddenCondition': hiddenCondition,
'actions': actions
}
def getFormatMultiScene(self, *formatScene: dict) -> dict:
scene = {}
for fs in formatScene:
if fs.get('sceneName'):
scene.update({
fs.get('sceneName'):{
'desciption': fs.get('description', ''),
'options': fs.get('options', []),
'hiddenCondition': fs.get('hiddenCondition', []),
'actions': fs.get('actions', {}),
}
})
return scene
def getSceneInfo(self, sceneName: str) -> dict:
return self.scene.get(sceneName, {})
def getAllScenesName(self) -> list:
return [name for name in self.scene]
def setOptionAction(self, sceneName: str, option: str, action: str) -> None:
if self.scene.get(action):
self.scene[sceneName]['actions'].update({option:action})
def setOptionActions(self, sceneName: str, optionActions: dict) -> None:
self.scenen[sceneName].update(optionActions)
def getSceneOptions(self, sceneName: str) -> list:
return self.scene.get(sceneName).get('options')
def getSceneOptionAction(self, sceneName: str, option: str) -> str:
return self.scene.get(sceneName).get('actions', lambda: [_ for _ in ()].throw(Exception(f"The scene {sceneName} no actions detail.."))).get(option)
def showScene(self, sceneName: str) -> None:
sceneDesc = self.scene.get(sceneName).get('description')
sceneOptions = self.scene.get(sceneName).get('options')
if self.scene.get(sceneName).get('readed'):
TextGame.showString(sceneDesc, 0)
else:
TextGame.showString(sceneDesc)
option = TextGame.userInputOptions(sceneOptions)
""" about screen """
@staticmethod
def clearScreen() -> None:
if sys.platform.startswith('win32'):
os.system('cls')
elif sys.platform.startswith('darwin'):
os.system('clear')
else:
raise Exception('This game just support Windows and MacOS system..')
@staticmethod
def screenWait(milisecond: int=100) -> None:
time.sleep(milisecond*0.001)
@staticmethod
def showString(string: str, timer: int= 50) -> None:
s = ''
if timer > 0:
timer = timer*0.001
for ss in string:
TextGame.clearScreen()
s += ss
print(s)
time.sleep(timer)
else:
TextGame.clearScreen()
print(string)
@staticmethod
def userInputOptions(options: list) -> str:
for value, option in enumerate(options, start=1):
print(f"{value}. {option}")
try:
choose = input(": ")
return options[int(choose)]
except (IndexError, ValueError):
return choose
""" about player"""
def setPlayerName(self, playerName: str) -> None:
self.playerName = playerName
def getPlayerName(self) -> str:
return self.playerName
def setPlayerActionRecord(self, sceneName: str, action: str):
self.playerRecord[sceneName] | StarcoderdataPython |
1735512 | <reponame>ezequieljsosa/sndg-web
import json
import os
from tqdm import tqdm
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sndg.settings")
import django
django.setup()
from bioresources.models import BioProject, Assembly, Structure, Expression, ResourceRelation
mappings = {}
assembly_nuc = {}
dbmap = {"gds": Expression, "bioproject": BioProject, "nuccore": Assembly, "structure": Structure}
with open("/home/eze/workspace/genomica-arg/data/scopus_ncbi_assemblies.json2") as h:
lineas = [x for x in h.readlines() if x.strip()]
for l in tqdm(lineas):
data = json.loads(l)
if data["from_db"] == "nuccore":
assembly_nuc[data["from_id"]] = data["assembly_id"]
else:
t = Assembly.objects.get(external_ids__identifier=data["acc"])
dbmap = {"gds": Expression, "bioproject": BioProject, "nuccore": Assembly, "structure": Structure}
for p in dbmap[data["from_db"]].objects.filter(external_ids__identifier=data["from_id"],
external_ids__type="identifier"):
ResourceRelation.objects.get_or_create(source =p, target =t, role=data["from_db"] + "_assembly" )
# df = pd.read_table("/home/eze/workspace/genomica-arg/data/scopus_ncbi_links.txt", sep="\t",
# names=["doi", "pmc", "ncbi_id", "link", "ids"], index_col=False)
#
# for _, r in df.iterrows():
# k = r["link"].split("_")[1] + "|" + str(r["doi"] + "|" + str(r["ncbi_id"]))
# if r["link"].split("_")[1] == "nuccore":
# seqid = r["ids"].split(",")[0]
# if seqid in assembly_nuc:
# mappings[k] = assembly_nuc[seqid]
# else:
# print("seq sin assembly...")
#
# else:
# mappings[k] = r["ids"].split(",")[0]
#
#
# for k, v in tqdm(mappings.items()):
#
# db, doi, ncbi_id = k.split("|")
# if db == "sra":
# continue
# try:
# p = Publication.objects.get(doi=doi)
# except ObjectDoesNotExist:
# try:
# p = Publication.objects.get(pubmed_id=ncbi_id)
# except:
# print ((doi,ncbi_id) )
#
#
# rs = list(dbmap[db].objects.filter(external_ids__identifier=v, external_ids__type="identifier"))
# if not rs:
# print ("NO!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!1")
# for r in rs:
# ResourceRelation.objects.get_or_create(source =p, target =r, role=p.type + "_" + r.type)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.