max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
tests/pybraw_ext/test_IBlackmagicRawFrame.py | anibali/pybraw | 2 | 12791151 | import numpy as np
import pytest
from numpy.testing import assert_allclose
from pybraw import _pybraw, verify
class CapturingCallback(_pybraw.BlackmagicRawCallback):
def ReadComplete(self, job, result, frame):
self.frame = frame
def ProcessComplete(self, job, result, processed_image):
self.processed_image = processed_image
@pytest.fixture
def callback(codec):
callback = CapturingCallback()
verify(codec.SetCallback(callback))
return callback
@pytest.fixture
def frame(codec, clip, callback):
read_job = verify(clip.CreateJobReadFrame(12))
verify(read_job.Submit())
read_job.Release()
verify(codec.FlushJobs())
return callback.frame
@pytest.mark.parametrize('format,max_val,is_planar,channels', [
(_pybraw.blackmagicRawResourceFormatBGRAU8, 2**8, False, [2, 1, 0, 3]),
(_pybraw.blackmagicRawResourceFormatRGBF32Planar, 1, True, [0, 1, 2]),
(_pybraw.blackmagicRawResourceFormatRGBU16Planar, 2**16, True, [0, 1, 2]),
])
def test_SetResourceFormat(frame, codec, callback, format, max_val, is_planar, channels):
verify(frame.SetResourceFormat(format))
process_job = verify(frame.CreateJobDecodeAndProcessFrame())
process_job.Submit()
process_job.Release()
codec.FlushJobs()
resource_type = verify(callback.processed_image.GetResourceType())
assert resource_type == _pybraw.blackmagicRawResourceTypeBufferCPU
resource_format = verify(callback.processed_image.GetResourceFormat())
assert resource_format == format
np_image = callback.processed_image.to_py()
del callback.processed_image
np_image = np_image / max_val
if is_planar:
np_image = np.transpose(np_image, (1, 2, 0))
expected = np.array([126, 131, 129, 255])[channels] / 255
assert_allclose(np_image[100, 200], expected, atol=1 / 255)
def test_SetResolutionScale(frame, codec, callback):
verify(frame.SetResolutionScale(_pybraw.blackmagicRawResolutionScaleQuarter))
process_job = verify(frame.CreateJobDecodeAndProcessFrame())
process_job.Submit()
process_job.Release()
codec.FlushJobs()
# Check that the resolution is one quarter of the original DCI full frame 4K.
width = verify(callback.processed_image.GetWidth())
assert width == 1024
height = verify(callback.processed_image.GetHeight())
assert height == 540
# from PIL import Image
# pil_image = Image.fromarray(callback.processed_image.to_py()[..., :3])
# pil_image.show()
def test_CloneFrameProcessingAttributes(frame):
attributes = verify(frame.CloneFrameProcessingAttributes())
assert isinstance(attributes, _pybraw.IBlackmagicRawFrameProcessingAttributes)
iso = verify(attributes.GetFrameAttribute(_pybraw.blackmagicRawFrameProcessingAttributeISO)).to_py()
assert iso == 400
def test_GetMetadataIterator(frame):
iterator = verify(frame.GetMetadataIterator())
metadata = {}
while True:
result, key = iterator.GetKey()
if result == _pybraw.E_FAIL:
break
assert result == _pybraw.S_OK
metadata[key] = verify(iterator.GetData()).to_py()
verify(iterator.Next())
assert metadata['white_balance_kelvin'] == 5600
assert_allclose(metadata['sensor_rate'], np.array([25, 1]))
def test_GetMetadata(frame):
white_balance = verify(frame.GetMetadata('white_balance_kelvin'))
assert white_balance.to_py() == 5600
def test_SetMetadata(frame):
verify(frame.SetMetadata('white_balance_kelvin', _pybraw.VariantCreateU32(2800)))
white_balance = verify(frame.GetMetadata('white_balance_kelvin'))
assert white_balance.to_py() == 2800
| 2 | 2 |
zadanka/zadanka.py | wrutkowski1000/wizualizacja-danych | 0 | 12791152 | <filename>zadanka/zadanka.py
class ciag_arytmetyczny:
global ciag
def Update(self):
self.ciag = [self.pierwsza_wart]
for i in range(self.ile_elementow - 1):
self.ciag.append(self.pierwsza_wart + self.roznica)
self.pierwsza_wart += self.roznica
def __init__(self, pierwsza_wart, roznica, ile_elementow):
self.pierwsza_wart = pierwsza_wart
self.roznica = roznica
self.ile_elementow = ile_elementow
self.Update()
def wyswietl_dane(self):
print(*self.ciag, sep=", ", end="\n")
def pobierz_elementy(self):
for i in range(self.ile_elementow):
self.ciag[i] = int(input("PodajElement"))
def pobierz_parametry(self):
self.pierwsza_wart = int(input("Podaj nowy pierwszy wyraz: "))
self.roznica = int(input("Podaj nową różnicę: "))
self.ile_elementow = int(input("Podaj nową liczbę elementów: "))
self.Update()
def policz_sume(self):
sum = 0
for i in range(self.ile_elementow):
sum += self.ciag[i]
print("Suma ciagu: ", sum)
def policz_elementy(self):
sum = 0
for i in range(self.ile_elementow):
sum = sum + 1
print("Suma elementow: ", sum)
x = ciag_arytmetyczny(1, 1, 10)
x.wyswietl_dane()
x.pobierz_parametry()
x.wyswietl_dane()
x.policz_sume()
x.policz_elementy()
x.pobierz_elementy()
x.wyswietl_dane() | 3.359375 | 3 |
OldVersion.py | IHA114/PrNdOwN | 0 | 12791153 | #!/usr/bin/env python3
# Created By r2dr0dn
# Hd Video Downloader For PornHub
# Don't Copy The Code Without Giving The Credits Nerd
from __future__ import unicode_literals
try:
import os,sys,requests
import youtube_dl as dl
import validators as valid
from time import sleep as sl
from random import random,randint
except ImportError:
print('['+'*'*20+']')
print('Module [youtube-dl] Status: Not Found!')
print('['+'*'*20+']')
print('Please Install It Using [pip3 install youtube-dl]')
print('['+'*'*20+']')
# Colors:
Reset="\033[0m"
cor = ["\033[1;33m","\033[1;34m","\033[1;30m","\033[1;36m","\033[1;31m","\033[35m","\033[95m","\033[96m","\033[39m","\033[38;5;82m","\033[38;5;198m","\033[38;5;208m","\033[38;5;167m","\033[38;5;91m","\033[38;5;210m","\033[38;5;165m","\033[38;5;49m","\033[38;5;160m","\033[38;5;51m","\033[38;5;13m","\033[38;5;162m","\033[38;5;203m","\033[38;5;113m","\033[38;5;14m"]
colors = cor[randint(0,15)]
colors2 = cor[randint(0,15)]
colors4 = cor[randint(0,15)]
colors3 = cor[randint(0,15)]
colors4 = cor[randint(0,15)]
colors5 = cor[randint(0,15)]
colors6 = cor[randint(0,15)]
colors7 = cor[randint(0,15)]
colors8 = cor[randint(0,15)]
colors9 = cor[randint(0,15)]
# Clear Screen
def clear():
clear = os.system('clear')
return clear
# banner
def banner():
print(colors + """
.'\ /`.
.'.-.`-'.-.`.
..._: .-. .-. :_...
.' '-.(o ) (o ).-' `.
: _ _ _`~(_)~`_ _ _ :
: /: ' .-=_ _=-. ` ;\ :
: :|-.._ ' ` _..-|: :
: `:| |`:-:-.-:-:'| |:' :
`. `.| | | | | | |.' .'
`. `-:_| | |_:-' .' - Welcome To PrNdOwN!
`-._ ```` _.-'
``-------''
""")
# Check if user is connected to internet
def net(url):
try:
requests.get(url)
except requests.exceptions.ConnectionError:
print(colors + "[!] Please check your network connection.")
return False
except requests.exceptions.Timeout:
print(colors2 + "[!!!] Site is taking too long to load, TimeOut.")
return False
except requests.exceptions.TooManyRedirects:
print(colors3 + "[*] Too many Redirects.")
return False
except requests.exceptions.RequestException as e:
# catastrophic error. bail.
print(e)
sys.exit(1)
return True
# Check the validity of the given url
def check(link):
try:
requests.get(link)
return True
except requests.exceptions.ConnectionError:
print(colors4 + "[!] disconnected from network.")
return False
except requests.exceptions.HTTPError as err:
print(err)
return False
# Configuration File
config = {
'Audio': {
'format': 'bestaudio/best',
'noplaylist': True,
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}]
},
'Video': {
'format': 'bestvideo+bestaudio/best',
'noplaylist': True,
'postprocessors': [{
'key': 'FFmpegVideoConvertor',
'preferedformat': 'mp4',
#'preferredquality': '137',
}]
},
'list': {
'listsubtitles': True
},
'listformat': {
'lisformats': True
}
}
# Url Download
def download(link, data):
try:
with dl.YoutubeDL(data) as ydl:
ydl.download([link])
except dl.utils.DownloadError as err:
print(colors + err)
# Extract URL Information
def get_info(link):
ydl2 = dl.YoutubeDL({'outtmpl': '%(id)s%(ext)s'})
with ydl2:
result = ydl2.extract_info(link,download=False)
if 'entries' in result:
video = result['entries'][0]
else:
video = result
video_title = video['title']
# video_url = video['url']
return video_title
# Main Function
def main():
try:
clear()
banner()
while True:
try:
if net('https://pornhub.com/'):
link = input(colors2 + "["+colors3+"*"+colors4+"]" + colors2 + " Enter the link: " + colors9)
if not valid.url(link):
print("\n" + colors8 + "["+colors2+"!"+colors5+"]" + colors7 + " Unvalid Url!!!" + colors6)
print(colors8 + "["+colors2+"!"+colors5+"]" + colors7 + " Please Try Again" + colors6)
exit(1)
if check(link):
print(colors6 + "Title Video: " +colors+ "{}".format(get_info(link)))
print(colors5 + "[*] 1.Download an Audio playlist")
print(colors3 + "[*] 2.Download a Video playlist")
print(colors7 + "[*] 3.Download a Single Audio")
print(colors8 + "[*] 4.Download a single video file")
check_inp = int(input(colors + "["+colors4+"------------Enter your choice------------"+colors5+"]: "))
if check_inp in [1,2,3,4]:
if check_inp == 1:
config['Audio']['noplaylist'] = False
download(link, config['Audio'])
elif check_inp == 2:
config['Video']['noplaylist'] = False
download(link, config['Video'])
elif check_inp == 4:
download(link, config['Video'])
elif check_inp == 3:
download(link, config['Audio'])
else:
print(colors8 + "Unknown Choice :(")
check_str = str(input(colors7 + "[*] Do You Want To Continue? (Y/n): "))
if check_str in ['Y','y']:
banner()
continue
else:
print(colors6 + "Cya Next Time")
break
except dl.utils.DownloadError:
print(colors3 + "DownloadError Occurred !!!")
print(colors4 + "Re Run The Script With The Same URL And The Same Options To Continue Downloading!")
exit(1)
except RuntimeError:
exit(1)
if __name__ == '__main__':
main()
| 2.453125 | 2 |
tests/test_mail.py | alex-oleshkevich/kupala | 8 | 12791154 | import pytest
from email.message import Message
from mailers import Email, InMemoryTransport, Mailer
from mailers.plugins.jinja_renderer import JinjaRendererPlugin
from pathlib import Path
from kupala.application import Kupala
from kupala.mails import send_mail, send_templated_mail
@pytest.mark.asyncio
async def test_mail_regular_send() -> None:
storage: list[Message] = []
app = Kupala()
app.mail.add('default', Mailer(InMemoryTransport(storage), from_address='root <root@localhost>'))
await send_mail(Email(subject='test', text='body'))
assert len(storage) == 1
assert storage[0]['From'] == 'root <root@localhost>'
@pytest.mark.asyncio
async def test_send_templated_mail(tmpdir: Path) -> None:
with open(tmpdir / 'index.html', 'w') as f:
f.write('base mail')
storage: list[Message] = []
app = Kupala()
app.jinja.add_template_dirs(tmpdir)
app.mail.add(
'default',
Mailer(
InMemoryTransport(storage),
from_address='root <root@localhost>',
plugins=[JinjaRendererPlugin(app.jinja.env)],
),
)
await send_templated_mail(to='root@localhost', subject='test', html_template='index.html')
assert len(storage) == 1
assert storage[0]['From'] == 'root <root@localhost>'
assert storage[0].get_payload() == 'base mail\n'
| 2.046875 | 2 |
binarysearch.io/subsequence_strings.py | mishrakeshav/Competitive-Programming | 2 | 12791155 | class Solution:
def solve(self, s1, s2):
# Write your code here
i = 0
if s1 == "":
return True
if s1 == s2:
return True
final = len(s1)-1
for s in s2:
if s == s1[i]:
i += 1
if i == final:
return True
return False
| 3.453125 | 3 |
spectral.py | 17mcpc14/sna | 0 | 12791156 | <gh_stars>0
import numpy as np
import networkx as nx
import numpy.linalg as la
import scipy.cluster.vq as vq
import matplotlib.pyplot as plt
G = nx.karate_club_graph()
print("Node Degree")
for v in G:
print('%s %s' % (v, G.degree(v)))
nx.draw_circular(G, with_labels=True)
plt.show()
coord = nx.spring_layout(G, iterations=1000)
fig = plt.figure()
axs = fig.add_subplot(111, aspect='equal')
axs.axis('off')
nx.draw_networkx_edges(G, coord)
nx.draw_networkx_nodes(G, coord,node_size=34,node_color='k')
plt.show()
A = nx.adjacency_matrix(G)
D = np.diag(np.ravel(np.sum(A,axis=1)))
L = D - A
l, U = la.eigh(L)
f = U[:,1]
labels = np.ravel(np.sign(f))
fig = plt.figure()
nx.draw_networkx_nodes(G, coord,node_size=45,node_color=labels)
plt.show()
k=3
means, labels = vq.kmeans2(U[:,1:k], k)
fig = plt.figure()
nx.draw_networkx_nodes(G, coord,node_size=45,node_color=labels)
plt.show()
| 2.4375 | 2 |
barcoderegression/__init__.py | jacksonloper/barcoderegression | 1 | 12791157 |
from . import parameters
from . import helpers
from . import updates
from . import simulations
from . import training
| 1.070313 | 1 |
urls.py | UM6SS-Bioinfo-team/Cov-MA | 0 | 12791158 | <reponame>UM6SS-Bioinfo-team/Cov-MA<gh_stars>0
from .views import *
from django.urls import path
from django.utils.translation import gettext_lazy as _
from rest_framework.authtoken.views import obtain_auth_token
app_name = 'covma'
urlpatterns = [
path("patient_list",patient_list,name= 'detail'),
path("patient_<int:pk>",patient_detail,name= 'delete'),
path("virus_list",virus_list,name= 'detail'),
path("virus_<int:pk>",virus_detail,name= 'delete'),
path("virus_var_list",virus_var_list,name= 'detail'),
path("virus_var_<int:pk>",virus_var_detail,name= 'delete'),
path("ace_list",ace_list,name= 'detail'),
path("ace_<int:pk>",ace_detail,name= 'delete'),
path("ace_var_list",ace_var_list,name= 'detail'),
path("ace_var_<int:pk>",ace_var_detail,name= 'delete'),
]
| 1.703125 | 2 |
src/json_serializable_test.py | asokolsky/wan-monitor | 0 | 12791159 | <filename>src/json_serializable_test.py<gh_stars>0
#
#
#
from typing import List
import unittest
from json_serializable import JsonSerializable
class Animal( JsonSerializable ):
def __init__( s, species:str='', sound:str='', name:str='' ):
super().__init__()
s.name = name
s.sound = sound
s.species = species
return
class Dog( Animal ):
def __init__( s, name:str='' ):
super().__init__( 'dog', 'bark', name )
return
class Cat( Animal ):
def __init__( s, name:str='' ):
super().__init__( 'cat', 'meouw', name )
return
class Person( Animal ):
def __init__( s, name:str='', pets:List[Animal]=[] ):
super().__init__( 'homo', 'blah', name )
s.pets = pets
return
class JsonSerializable_test( unittest.TestCase ):
'''
class JsonSerializable test cases
TODO: test file ops
'''
def test_dumps_eq_loads( s ) -> None:
rudy = Dog( 'Rudy' )
dogs = rudy.dumps()
s.assertEqual( dogs, '{"class_name":"Dog","name":"Rudy","sound":"bark","species":"dog"}' )
dog = Dog()
s.assertNotEqual( dog, rudy )
s.assertTrue( dog.loads( dogs ) )
s.assertTrue( dog == rudy )
s.assertEqual( dog, rudy )
alex = Person( 'Alex', [rudy] )
persons = alex.dumps()
s.assertEqual( persons,
'{"class_name":"Person","name":"Alex","pets":['+dogs+'],"sound":"blah","species":"homo"}' )
return
def test_loads_fail( s ) -> None:
'''
test loads from improperly formatted string
'''
return
def test_load_fail( s ) -> None:
'''
test load from non-existent file, file with improperly formatted content
'''
return
if __name__ == '__main__':
unittest.main()
| 3.015625 | 3 |
tetraencoder/embed_dataset.py | TevenLeScao/tetraencoder | 0 | 12791160 | <reponame>TevenLeScao/tetraencoder
import argparse
from functools import partial
from multiprocess import set_start_method
from sentence_transformers import SentenceTransformer
from dataset_builders import GenWikiDataset, TRexDataset
from util import pair_sims_datasets_map
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# model args
parser.add_argument("--model_name", default="roberta-base", type=str)
parser.add_argument(f"--batch_size", default=16, type=int)
# dataset args
parser.add_argument("--gen_wiki_dataset", default=None, type=str)
parser.add_argument("--trex_dataset", default=None, type=str)
parser.add_argument("--subset", default=None, type=int)
args = parser.parse_args()
model = SentenceTransformer(args.model_name)
if args.trex_dataset:
dataset = TRexDataset(args.trex_dataset)
text_key = "text"
rdf_key = "rdf_linearized"
out_path = args.trex_dataset + ".embed"
elif args.gen_wiki_dataset:
dataset = GenWikiDataset(args.gen_wiki_dataset)
text_key = "text"
rdf_key = "rdf_linearized"
out_path = args.gen_wiki_dataset + ".embed"
else:
raise NotImplementedError("you must pass a dataset!")
if args.subset:
out_path = out_path + f".subset{args.subset}"
dataset = dataset.shuffle(seed=1066)
dataset = dataset.select(range(args.subset))
set_start_method("spawn")
dataset = dataset.map(
partial(pair_sims_datasets_map, model=model, text_key=text_key, rdf_key=rdf_key, batch_size=args.batch_size),
batched=True, batch_size=args.batch_size, with_rank=True, num_proc=2)
dataset.to_json(out_path)
| 2.359375 | 2 |
tournament/migrations/0004_auto_20171202_0621.py | AfricaChess/lichesshub | 2 | 12791161 | <reponame>AfricaChess/lichesshub<filename>tournament/migrations/0004_auto_20171202_0621.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-12-02 06:21
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('tournament', '0003_auto_20171202_0513'),
]
operations = [
migrations.CreateModel(
name='Points',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('placement', models.PositiveIntegerField()),
('points', models.PositiveIntegerField()),
],
options={
'verbose_name_plural': 'Points',
},
),
migrations.CreateModel(
name='Season',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('start_date', models.DateField()),
('end_date', models.DateField()),
],
),
migrations.CreateModel(
name='TournamentType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('pairing_type', models.PositiveIntegerField(choices=[(0, 'Auto'), (1, 'Manual'), (2, 'Round Robin'), (3, 'Swiss')], default=0)),
],
),
migrations.AddField(
model_name='tournament',
name='date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='tournament',
name='error',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='tournament',
name='synced',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='tournament',
name='name',
field=models.CharField(max_length=50),
),
migrations.AddField(
model_name='points',
name='tournament_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.TournamentType'),
),
migrations.AddField(
model_name='tournament',
name='kind',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='tournament.TournamentType'),
),
migrations.AddField(
model_name='tournament',
name='season',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='tournament.Season'),
),
]
| 1.703125 | 2 |
root/python/PythonTutorial/Py3CookBook/ch01/1.14.Sorting_Objects_Without_Native_Comparison_Support.py | ChyiYaqing/chyidlTutorial | 5 | 12791162 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# 1.14.Sorting_Objects_Without_Native_Comparison_Support.py
# ch01
#
# 🎂"Here's to the crazy ones. The misfits. The rebels.
# The troublemakers. The round pegs in the square holes.
# The ones who see things differently. They're not found
# of rules. And they have no respect for the status quo.
# You can quote them, disagree with them, glority or vilify
# them. About the only thing you can't do is ignore them.
# Because they change things. They push the human race forward.
# And while some may see them as the creazy ones, we see genius.
# Because the poeple who are crazy enough to think thay can change
# the world, are the ones who do."
#
# Created by <NAME> on 01/27/19 16:07.
# Copyright © 2019. <NAME>.
# All rights reserved.
#
# Distributed under terms of the
# MIT
"""
Problem:
sort objects of the same class, but they don't natively support
comparison operation
Solution:
The built-in sorted() function takes a key argument that can be passed
"""
from operator import attrgetter
class User:
def __init__(self, user_id):
self.user_id = user_id
def __repr__(self):
return 'User({})'.format(self.user_id)
users = [User(23), User(3), User(99)]
print(sorted(users, key=lambda u: u.user_id))
# Instead of using lambda, an alternative approach is
# to use operator.attrgetter()
print(sorted(users, key=attrgetter('user_id')))
| 3.3125 | 3 |
tests/test_output.py | domdfcoding/flake8-github-action | 1 | 12791163 | <reponame>domdfcoding/flake8-github-action
# 3rd party
import pytest
from coincidence.regressions import FileRegressionFixture, check_file_regression
from domdf_python_tools.paths import PathPlus
from flake8.main import cli # type: ignore
bad_code = PathPlus(__file__).parent / "bad_code.py"
def test_output(file_regression: FileRegressionFixture, capsys):
with pytest.raises(SystemExit):
cli.main([str(bad_code), "--select", "F401,F404,F821,F701,E303", "--format", "github"])
stdout = capsys.readouterr().out.replace(str(bad_code), "bad_code.py")
check_file_regression(stdout, file_regression)
assert not capsys.readouterr().err
| 2.0625 | 2 |
if- else condition/sum of even.py | sneh-pragya/LearningPrograming | 1 | 12791164 | //You are given an integer A, you need to find and return the sum of all the even numbers between 1 and A.
class Solution:
# @param A : integer
# @return an integer
def solve(self, A):
sum=0;
for i in range (1,A+1):
if (i%2==0):
sum+= i
return sum
| 3.875 | 4 |
src/satyrus/satlib/source.py | pedromxavier/Satyrus3 | 1 | 12791165 | """
"""
# Future Imports
from __future__ import annotations
# Standard Library
import itertools as it
from pathlib import Path
class Source(str):
"""This source code object aids the tracking of tokens in order to
indicate error position on exception handling.
"""
LEXKEYS = {"lexpos", "chrpos", "lineno", "source"}
def __new__(
cls,
*,
fname: str = None,
buffer: str = None,
offset: int = 0,
length: int = None,
):
"""This object is a string itself with additional features for
position tracking.
"""
if fname is not None and buffer is not None:
raise ValueError(
"Can't work with both 'fname' and 'buffer' parameters, choose one option."
)
elif fname is not None:
if not isinstance(fname, (str, Path)):
raise TypeError(
f"Invalid type '{type(fname)}' for 'fname'. Must be 'str' or 'Path'."
)
fpath = Path(fname)
if not fpath.exists() or not fpath.is_file():
raise FileNotFoundError(f"Invalid file path '{fname}'.")
with open(fpath, mode="r", encoding="utf-8") as file:
return super(Source, cls).__new__(cls, file.read())
elif buffer is not None:
if not isinstance(buffer, str):
raise TypeError(
f"Invalid type '{type(buffer)}' for 'buffer'. Must be 'str'."
)
return super(Source, cls).__new__(cls, buffer)
else:
raise ValueError("Either 'fname' or 'buffer' must be provided.")
def __init__(
self,
*,
fname: str = None,
buffer: str = None,
offset: int = 0,
length: int = None,
):
"""Separates the source code in multiple lines. A blank first line is added for the indexing to start at 1 instead of 0. `self.table` keeps track of the (cumulative) character count."""
if not isinstance(offset, int) or offset < 0:
raise TypeError("'offset' must be a positive integer (int).")
elif length is None:
length = len(self)
elif not isinstance(length, int) or length < 0:
raise TypeError("'length' must be a positive integer (int) or 'None'.")
self.offset = min(offset, len(self))
self.length = min(length, len(self) - self.offset)
self.fpath = (
Path(fname).resolve(strict=True) if (fname is not None) else "<string>"
)
self.lines = [""] + self.split("\n")
self.table = list(it.accumulate([(len(line) + 1) for line in self.lines]))
def __str__(self):
return self[self.offset : self.offset + self.length]
def __repr__(self):
return f"{self.__class__.__name__}({self.fpath!r})"
def __bool__(self):
"""Truth-value for emptiness checking."""
return self.__len__() > 0
def getlex(self, lexpos: int = None) -> dict:
"""Retrieves lexinfo dictionary from lexpos."""
if lexpos is None:
return self.eof.lexinfo
elif not isinstance(lexpos, int):
raise TypeError(f"'lexpos' must be an integer (int), not ({type(lexpos)}).")
elif not 0 <= lexpos <= self.length:
return self.eof.lexinfo
lexpos = lexpos + self.offset + 1
lineno = 1
while lineno < len(self.table) and lexpos >= self.table[lineno]:
lineno += 1
if lineno == len(self.table):
return self.eof.lexinfo
else:
return {
"lineno": lineno,
"lexpos": lexpos,
"chrpos": lexpos - self.table[lineno - 1],
"source": self,
}
def slice(self, offset: int = 0, length: int = None):
return self.__class__(fname=self.fpath, offset=offset, length=length)
def error(self, msg: str, *, target: object = None, name: str = None):
if target is None or not self.trackable(target):
if name is not None:
return f"In '{self.fpath}':\n" f"{name}: {msg}\n"
else:
return f"In '{self.fpath}':\n" f"{msg}\n"
else:
if name is not None:
return (
f"In '{self.fpath}' at line {target.lineno}:\n"
f"{self.lines[target.lineno]}\n"
f"{' ' * target.chrpos}^\n"
f"{name}: {msg}\n"
)
else:
return (
f"In '{self.fpath}' at line {target.lineno}:\n"
f"{self.lines[target.lineno]}\n"
f"{' ' * target.chrpos}^\n"
f"{msg}\n"
)
class EOF(object):
pass
@property
def eof(self):
"""Virtual object to represent the End-of-File for the given source
object. It's an anonymously created EOFType instance.
"""
eof = self.EOF()
self.track(eof, len(self))
return eof
# -*- Tracking -*-
def track(self, o: object, lexpos: int = None):
""""""
setattr(o, "lexinfo", self.getlex(lexpos))
if not hasattr(o.__class__, "__lextrack__"):
setattr(
o.__class__, "chrpos", property(lambda this: this.lexinfo["chrpos"])
)
setattr(
o.__class__, "lineno", property(lambda this: this.lexinfo["lineno"])
)
setattr(
o.__class__, "lexpos", property(lambda this: this.lexinfo["lexpos"])
)
setattr(
o.__class__, "source", property(lambda this: this.lexinfo["source"])
)
setattr(o.__class__, "__lextrack__", None)
@classmethod
def blank(cls, o: object):
setattr(o, "lexinfo", {"chrpos": 0, "lineno": 0, "lexpos": 0, "source": None})
if not hasattr(o.__class__, "__lextrack__"):
setattr(
o.__class__, "chrpos", property(lambda this: this.lexinfo["chrpos"])
)
setattr(
o.__class__, "lineno", property(lambda this: this.lexinfo["lineno"])
)
setattr(
o.__class__, "lexpos", property(lambda this: this.lexinfo["lexpos"])
)
setattr(
o.__class__, "source", property(lambda this: this.lexinfo["source"])
)
setattr(o.__class__, "__lextrack__", None)
def propagate(self, x: object, y: object, *, out: bool = False) -> object | None:
if self.trackable(x, strict=True) and self.trackable(y):
y.lexinfo.update(x.lexinfo)
if out:
return y
else:
return None
else:
raise TypeError(
f"Can't propagate lexinfo between types {type(x)} and {type(y)}"
)
@classmethod
def trackable(cls, o: object, *, strict: bool = False):
if cls._trackable(o):
return True
elif strict:
print(o, o.lexinfo)
raise TypeError(f"Object '{o}' of type '{type(o)}' is not trackable.")
else:
return False
@classmethod
def _trackable(cls, o: object):
if not hasattr(o, "lexinfo") or not isinstance(o.lexinfo, dict):
return False
else:
if any(key not in o.lexinfo for key in cls.LEXKEYS):
return False
else:
if (
not hasattr(o, "lineno")
or not isinstance(o.lineno, int)
or o.lineno < 0
):
return False
elif (
not hasattr(o, "lexpos")
or not isinstance(o.lexpos, int)
or o.lexpos < 0
):
return False
elif (
not hasattr(o, "chrpos")
or not isinstance(o.chrpos, int)
or o.chrpos < 0
):
return False
elif (
not hasattr(o, "source")
or not isinstance(o.source, Source)
and not o.source is None
):
return False
else:
return True
__all__ = ["Source"]
| 3.09375 | 3 |
tests/test_tasks.py | oii/ogreserver | 0 | 12791166 | <filename>tests/test_tasks.py
from __future__ import absolute_import
from __future__ import unicode_literals
from contextlib import contextmanager
import datetime
from flask import appcontext_pushed, g
import mock
from ogreserver.models.ebook import Ebook
@contextmanager
def inject_db_session(app, db_session):
'''
Inject the test SQLAlchemy Session into the Flask g object
'''
def _handler(sender, **kwargs):
g.db_session = db_session
with appcontext_pushed.connected_to(_handler, app):
yield
@mock.patch('ogreserver.tasks.image_upload')
@mock.patch('ogreserver.tasks.GoodreadsAPI')
@mock.patch('ogreserver.tasks.AmazonAPI')
def test_query_ebook_metadata_amazon(mock_amazon_class, mock_goodreads_class, mock_image_upload, flask_app, postgresql, ebook_db_fixture_azw3):
'''
Test query_ebook_metadata task when only Amazon responds
'''
mock_amazon = mock_amazon_class.return_value = mock.Mock()
mock_amazon.search.return_value = {
'author': '<NAME>',
'title': 'Frying Up',
'asin': 'BXXX999999',
'publication_date': datetime.date(2015, 7, 28),
'image_url': 'http://example.com/egg.jpg',
}
mock_goodreads = mock_goodreads_class.return_value = mock.Mock()
mock_goodreads.search.return_value = None
# late import inside Flask app_context
from ogreserver.tasks import query_ebook_metadata
with inject_db_session(flask_app, postgresql):
query_ebook_metadata(ebook_db_fixture_azw3.id)
# assert image_upload task is started
mock_image_upload.delay.assert_called_once_with(
ebook_db_fixture_azw3.id,
'http://example.com/egg.jpg'
)
# ensure all metadata applied to the Ebook object
ebook = Ebook.query.get(ebook_db_fixture_azw3.id)
assert ebook.author == '<NAME>'
assert ebook.title == 'Frying Up'
assert ebook.asin == 'BXXX999999'
assert ebook.provider_metadata['amazon']['author'] == '<NAME>'
assert ebook.provider_metadata['amazon']['title'] == 'Frying Up'
assert ebook.provider_metadata['amazon']['asin'] == 'BXXX999999'
assert ebook.provider_metadata['amazon']['publication_date'] == '2015-07-28'
@mock.patch('ogreserver.tasks.image_upload')
@mock.patch('ogreserver.tasks.GoodreadsAPI')
@mock.patch('ogreserver.tasks.AmazonAPI')
def test_query_ebook_metadata_goodreads(mock_amazon_class, mock_goodreads_class, mock_image_upload, flask_app, postgresql, ebook_db_fixture_azw3):
'''
Test query_ebook_metadata task when only Goodreads responds
'''
mock_goodreads = mock_goodreads_class.return_value = mock.Mock()
mock_goodreads.search.return_value = {
'authors': [
{'name': '<NAME>'}
],
'title': 'Frying Up',
'isbn13': '1234567890123',
'num_pages': 99,
'average_rating': '4.1',
}
mock_amazon = mock_amazon_class.return_value = mock.Mock()
mock_amazon.search.return_value = None
# late import inside Flask app_context
from ogreserver.tasks import query_ebook_metadata
with inject_db_session(flask_app, postgresql):
query_ebook_metadata(ebook_db_fixture_azw3.id)
# ensure all metadata applied to the Ebook object
ebook = Ebook.query.get(ebook_db_fixture_azw3.id)
assert ebook.author == '<NAME>'
assert ebook.title == 'Frying Up'
assert ebook.isbn13 == '1234567890123'
assert ebook.provider_metadata['goodreads']['authors'][0]['name'] == '<NAME>'
assert ebook.provider_metadata['goodreads']['title'] == 'Frying Up'
assert ebook.provider_metadata['goodreads']['isbn13'] == '1234567890123'
assert ebook.provider_metadata['goodreads']['average_rating'] == '4.1'
def test_image_upload():
pass
def test_index_for_search():
pass
@mock.patch('ogreserver.tasks.s3_store')
@mock.patch('ogreserver.tasks.setup_db_session')
def test_upload_ebook(mock_setup_db, mock_s3_store, flask_app, user, ebook_db_fixture_azw3):
# late import inside Flask app_context
from ogreserver.tasks import upload_ebook
upload_ebook(
ebook_db_fixture_azw3.id,
'egg.epub',
ebook_db_fixture_azw3.original_version.source_format.file_hash,
'azw3',
user.username
)
assert mock_s3_store.upload_ebook.call_count == 1
@mock.patch('ogreserver.tasks.Conversion')
@mock.patch('ogreserver.tasks.setup_db_session')
def test_conversion_search(mock_setup_db, mock_conversion_class, flask_app):
# mock the object created from the Conversion() constructor
mock_conversion_class.return_value = mock_conversion = mock.Mock()
# late import inside Flask app_context
from ogreserver.tasks import conversion_search
conversion_search()
assert mock_conversion.search.call_count == 1
| 2.265625 | 2 |
tests/test_docker.py | LaudateCorpus1/windlass | 4 | 12791167 | <reponame>LaudateCorpus1/windlass
#
# (c) Copyright 2018 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import docker
import fixtures
import logging
import pathlib
import unittest
import unittest.mock
import uuid
import windlass.exc
import windlass.images
import tests.test_e2e
class DockerImage(fixtures.Fixture):
def __init__(self, imagename, dockerfileprefix=None):
if dockerfileprefix:
self.dockerfile = '%s.Dockerfile' % dockerfileprefix
else:
self.dockerfile = '%s.Dockerfile' % imagename
self.imagename = imagename
def _setUp(self):
self.docker_client = docker.from_env(version='auto')
self.addCleanup(self.docker_client.close)
path = pathlib.Path(__file__).parent.as_posix()
dockerpath = pathlib.Path(__file__).stem
self.docker_client.images.build(
path=path,
dockerfile='%s/%s' % (dockerpath, self.dockerfile),
tag=self.imagename)
# Cleanup will be added after successful building of image, as
# otherwise image delete would fail.
self.addCleanup(self.docker_client.images.remove, self.imagename)
class TestDockerUtils(tests.test_e2e.FakeRegistry):
def setUp(self):
super().setUp()
self.random_name = 'test_%s' % uuid.uuid4().hex
self.logger = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG))
def cleanUp(self):
super().cleanUp()
with docker.from_env(version='auto', timeout=180) as client:
try:
client.api.remove_image(self.random_name)
except docker.errors.ImageNotFound:
# Image isn't on system so no worries
pass
def test_failed_image_build(self):
temp = self.useFixture(
fixtures.TempDir()
)
with open('%s/Dockerfile' % temp.path, 'w') as f:
f.write(
'FROM alpine\n'
'RUN exit 1\n'
)
e = self.assertRaises(
windlass.exc.WindlassBuildException,
windlass.images.build_verbosly,
self.random_name,
temp.path,
dockerfile='Dockerfile')
self.assertIsNotNone(e.out)
self.assertIsNotNone(e.errors)
self.assertIsNotNone(e.artifact_name)
self.assertIsNotNone(e.debug_data)
debug_output = e.debug_message()
for line in e.out + e.errors:
self.assertIn(line, debug_output)
def test_image_build_delete(self):
temp = self.useFixture(
fixtures.TempDir()
)
with open('%s/Dockerfile' % temp.path, 'w') as f:
f.write(
'FROM alpine\n'
'RUN exit 0\n'
)
windlass.images.build_verbosly(
self.random_name,
temp.path,
dockerfile='Dockerfile')
def test_failed_push_image(self):
imname = '127.0.0.1:23/%s' % self.random_name
self.useFixture(
DockerImage(imname, 'simple')
)
e = self.assertRaises(
windlass.exc.WindlassPushPullException,
windlass.images.push_image,
imname)
self.assertIsNotNone(e.out)
self.assertIsNotNone(e.errors)
debug_output = e.debug_message()
for line in e.out + e.errors:
self.assertIn(line, debug_output)
# Exception is currently raised in piece of code where this info is
# not avaliable, as it is function parsing stream output from docker.
# This will be chaned in future.
# self.assertIsNotNone(e.artifact_name)
# self.assertIsNotNone(e.debug_data)
def test_retry_push_image(self):
imname = '127.0.0.1:23/%s' % self.random_name
self.useFixture(
DockerImage(imname, 'simple')
)
@windlass.retry.simple(retry_backoff=0.1)
def artifact_pushing_func(artifact):
windlass.images.push_image(imname)
mock_artifact = unittest.mock.MagicMock()
mock_artifact.name = 'ArtifactName'
e = self.assertRaises(
windlass.exc.FailedRetriesException,
artifact_pushing_func,
mock_artifact
)
self.assertEqual(len(e.attempts), 3)
self.assertIsInstance(
e.attempts[0], windlass.exc.WindlassPushPullException
)
def test_push_image(self):
imname = '127.0.0.1:%d/%s' % (
self.registry_port,
self.random_name)
self.useFixture(
DockerImage(imname, 'simple'))
windlass.images.push_image(imname)
def test_build_with_buildargs(self):
temp = self.useFixture(
fixtures.TempDir()
)
self.useFixture(
fixtures.EnvironmentVariable(
'WINDLASS_BUILDARG_ARGUMENT',
'somevalue'
)
)
with open('%s/Dockerfile' % temp.path, 'w') as f:
f.write(
'FROM alpine\n'
'ARG ARGUMENT\n'
'RUN echo -n $ARGUMENT > content.txt\n'
'CMD cat content.txt'
)
im = windlass.images.build_verbosly(
self.random_name,
temp.path,
dockerfile='Dockerfile')
client = docker.from_env(
version='auto',
timeout=180)
self.addCleanup(client.close)
# To capture all output to inspect, must delay removal until
# after retrieval of logs otherwise the API can sometimes return
# an empty result
c = client.containers.create(im)
try:
c.start()
result = c.wait()
output = c.logs(stdout=True, stderr=True)
finally:
c.stop()
c.remove()
# make sure completed successfully
self.assertEqual(0, result['StatusCode'])
self.assertEqual('somevalue', output.decode())
| 1.960938 | 2 |
script.py | MishaVernik/ClickerHeroku | 0 | 12791168 | <gh_stars>0
from flask import Flask, request
import urllib.request
import requests
import time
from threading import Timer
app = Flask(__name__)
app.config['DEBUG'] = False
print("START __ 0001")
check_if_one = 0
what_now = 0
number_of_repeats = 0
sleeping_time = 0
link = ""
@app.route("/btn_find")
def get_ses():
global check_if_one
global what_now
global number_of_repeats
global sleeping_time
global link
if check_if_one == 1:
return "Success"
counter = 1
#http%3A%2F%2Ffbkraken.com%2FZXQSXq&number=17&sleeping=6.0&start=Start
print("THIS IS WHICH STEP IS NOW : " + str(what_now))
what_now += 1
if number_of_repeats == 0:
number_of_repeats = int(request.args.get('number'))
if sleeping_time == 0 :
sleeping_time = float(request.args.get('sleeping'))
if link == "":
s = request.args.get('text')
link = s
print(link)
#response = urllib.request.urlopen(request.form['text'])
while number_of_repeats > 0:
if (sleeping_time*(counter) >= 24):
break
counter +=1
number_of_repeats -= 1
send_request(link)
print('#'*40)
print(number_of_repeats)
print('#'*40)
time.sleep(sleeping_time)
html = '''<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Clicker chooser Online</title>
<script src="//code.jquery.com/jquery-1.11.3.min.js"></script>
<script src="//code.jquery.com/jquery-migrate-1.2.1.min.js"></script>
</head>
<body>
<h2 align="center">Welcome to the Clicker.online!{number_repeats}</h2>
<script>
jQuery(document).ready(function() {
$.ajax({
url: '/btn_find'
});
});
location.reload();
</script>
</body>
</html>
'''
if number_of_repeats <= 0:
check_if_one = 1
return "Success"
print("NUMBER OF REPEATS : " + str(number_of_repeats))
html = html.replace("{number_repeats}", str(number_of_repeats))
html = html.replace("{link}", str(link))
html = html.replace("{sleeping}", str(sleeping_time))
#t = Timer(5.0, app.run)
#t.start()
return html
def send_request(s):
try:
r = requests.get(s)
r.raise_for_status()
print('#'*40)
print("YES")
print('#'*40)
if r.status_code == 200:
return 1
except requests.exceptions.HTTPError as err:
send_request(s)
@app.route('/')
def source():
global check_if_one
global what_now
global number_of_repeats
global sleeping_time
global link
check_if_one = 0
what_now = 0
number_of_repeats = 0
sleeping_time = 0
link = ""
print("WHY ___")
html = '''<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Clicker chooser Online</title>
</head>
<body>
<h2 align="center">Welcome to the Clicker.online0001!</h2>
<form action="/btn_find">
<h3>Link</h3>
<p align="center">
<input name="text" type="text" value="">
</p>
<h3>Number of repeats</h3>
<p align="center">
<input name="number" type="text" value="">
</p>
<h3>Sleeping time</h3>
<p align="center">
<input name="sleeping" type="text" value="">
</p>
<p align="center">
<input name="start" id="BTN" type="submit" value="Start" >
</p>
</form>
<script>
</body>
</html>
'''
return html
| 2.59375 | 3 |
src/data_manage/data_class/namespace_child.py | Amourspirit/ooo_uno_tmpl | 0 | 12791169 | <filename>src/data_manage/data_class/namespace_child.py
# coding: utf-8
from dataclasses import dataclass
@dataclass(frozen=True, eq=True)
class NamespaceChild:
namespace: str
sort: int = -1
def __lt__(self, other: object):
if not isinstance(other, NamespaceChild):
return NotImplemented
return self.sort < other.sort
| 2.390625 | 2 |
crawlers/the_muse_crawler.py | rpmoore8/job-crawler-and-classifier | 0 | 12791170 | import time
import requests
from config import levels, headers, cities
from db_utils.job import Job
from db_utils.db_methods import get_jobs_table, get_taken_ids
class TheMuseCrawler():
def __init__(self):
self.source = "themuse"
def scrape(self, city, insert_jobs_into_db = True):
jobs_table = get_jobs_table()
taken_ids = get_taken_ids(city, self.source)
for level in levels:
total_pages = 9
for page in range(1, total_pages):
params = { 'page': str(page), 'location': city,
'level': level}
j = self.get_query_results(params)
total_pages = int(j['page_count'])
for result in j["results"]:
if result["id"] not in taken_ids \
and "landing_page" in result["refs"] \
and len(result["locations"]) > 0 \
and city == result["locations"][0]["name"]:
category = "none"
if len(result["categories"]) > 0:
category = result["categories"][0]["name"].lower()
job = Job(
name = result["name"],
category = category,
city = city,
source = self.source,
contents = result["contents"],
company = result["company"]["name"].lower(),
date = result["publication_date"],
link = result["refs"]["landing_page"],
job_id = result["id"])
if insert_jobs_into_db:
job.insert_into_table(jobs_table)
else:
return job
def get_query_results(self, params):
params['descending'] = 'true'
params['api_key'] = 'e45578e2555dcc93550818c70d5df559a9b1efae3c2a6eb0cbb48a2e7db562aa'
r = requests.get( 'https://www.themuse.com/api/public/jobs', params = params, headers = headers)
time.sleep(0.7)
j = r.json()
return j | 2.71875 | 3 |
datasets/check_utils.py | AkshatShetty101/dmm-8803 | 247 | 12791171 | import math
import time
import pickle
import sys
import os
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from datasets.data_utils import project_image_to_rect, compute_box_3d
def adjust_coord_for_view(points):
return points[:, [2, 0, 1]] * np.array([1, -1, -1])
def draw_box3d(corners, ax):
'''
8, 3
'''
order = np.array([
0, 1,
1, 2,
2, 3,
3, 0,
4, 5,
5, 6,
6, 7,
7, 4,
3, 7,
0, 4,
2, 6,
1, 5]).reshape(-1, 2)
for i in range(len(order)):
ax.plot(corners[order[i], 0], corners[order[i], 1], corners[order[i], 2])
def draw_points(pts, ax):
ax.scatter(pts[:, 0], pts[:, 1], pts[:, 2])
def check_box_frustum(box, P, center, dimension, angle):
x1, y1, x2, y2 = box
box_corner = compute_box_3d(center, dimension, angle, P) # 8, 3
z1 = np.arange(0, 70, 0.1)
xyz1 = np.zeros((len(z1), 3))
xyz1[:, 0] = x1
xyz1[:, 1] = y1
xyz1[:, 2] = z1
xyz1_rect = project_image_to_rect(xyz1, P)
xyz1[:, 0] = x2
xyz1[:, 1] = y2
xyz1[:, 2] = z1
xyz2_rect = project_image_to_rect(xyz1, P)
xyz1[:, 0] = x1
xyz1[:, 1] = y2
xyz1[:, 2] = z1
xyz3_rect = project_image_to_rect(xyz1, P)
xyz1[:, 0] = x2
xyz1[:, 1] = y1
xyz1[:, 2] = z1
xyz4_rect = project_image_to_rect(xyz1, P)
fig = plt.figure()
ax = fig.gca(projection='3d')
draw_box3d(box_corner, ax)
draw_points(xyz1_rect, ax)
draw_points(xyz2_rect, ax)
draw_points(xyz3_rect, ax)
draw_points(xyz4_rect, ax)
plt.show()
def check_norm(self, points, ref_points, gt_box3d_corners, pred_box3d_corners):
fig = plt.figure()
ax = fig.gca(projection='3d')
points = adjust_coord_for_view(points)
ref_points = adjust_coord_for_view(ref_points)
gt_box3d_corners = adjust_coord_for_view(gt_box3d_corners)
pred_box3d_corners = adjust_coord_for_view(pred_box3d_corners)
# ax.set_aspect('equal')
# ax.axis('equal')
ax.set_axis_on()
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
draw_points(points, ax)
draw_points(ref_points, ax)
draw_box3d(gt_box3d_corners, ax)
draw_box3d(pred_box3d_corners, ax)
plt.show()
| 2.453125 | 2 |
main.py | Night-Admin-Community/icq-offer-bot | 0 | 12791172 | from api.bot import Bot
import asyncio
TOKEN = "***.**********.**********:*********"
CHAT = "**********@chat.agent"
bot = Bot(token=TOKEN)
replics = {
"start": "Привет! С помощью этого бота ты можешь отправить материал в предложку каналов Night Admin Community!\nРазработчик: @night_admin\nНажми кнопку ниже для добавления поста в очередь рассмотрения.",
"startInline": "[{}]".format(json.dumps([{"text": "Отправить пост", "callbackData": "post"}]),
"post": '''Отлично! Теперь выбери канал для публикации из списка ниже. Просто введи его название без лишних символов:
Андроид (канал про Android OS), Социопат (мемы), Котики, Экология.
Не нашли нужный канал или хотите создать новый? Напишите "/new Название канала <Enter> Краткое описание" без кавычек, и мы добавим этот канал в список бота! Спасибо, что помогаешь нам!
''',
"category_yes": "Почти всё готово! Теперь прикрепи картинку, текст или другой медиафайл одним сообщением. Только первое сообщение уйдёт в пост!",
"nopost": "Упс... У нас нет такого канала. Проверь написание: отсутствие символов или опечатки. Введи только название канала, например: Социопат. Если считаешь, что необходимо создать новый канал, напиши боту '/new Название <Enter> Описание'!",
"success": "Поздравляю! Твой пост успешно отправлен и скоро появится у нас на канале!",
"postSuccess": "Пост {author} для канала {channel}:\n\n\n==================================\n\n{post}"
}
async def start_cb(event):
await bot.send_text(chatId=event["fromChat"], text=replics["start"], inlineKeyboardMarkup=replics["startInline"])
async def post_cb(event):
await bot.send_text(chatId=event["fromChat"], text=replics["post"])
category = await bot.wait(event["payload"]["from"]["chatId"])
category = category.replace(" ", "").lower()
if category in categories:
await bot.send_text(chatId=event["fromChat"], text=replics["category_yes"])
post = await bot.wait(event["payload"]["from"]["chatId"])
await bot.send_text(chatId=event["fromChat"], text=replics["success"])
await bot.send_text(chatId=CHAT, text=replics["postSuccess"].format(author=event["payload"]["from"]["chatId"], channel=category, post = post))
else:
await bot.send_text(chatId=event["fromChat"], text=replics["nopost"])
async def main():
sociopath = []
android = []
cats = []
eco = []
await bot.add_handler(sociopath)
await bot.add_handler(android)
await bot.add_handler(cats)
await bot.add_handler(eco)
| 2.296875 | 2 |
lonia/clustering.py | phanxuanphucnd/clustering | 2 | 12791173 | <reponame>phanxuanphucnd/clustering<gh_stars>1-10
import os
import torch
import random
import pickle
import numpy as np
import pandas as pd
from pathlib import Path
from typing import Text, Any, Dict, Union, List
from transformers import AutoModel, AutoTokenizer
from sklearn.cluster import MiniBatchKMeans
from lonia.utils import normalize
seed_val = 17
random.seed(seed_val)
np.random.seed(seed_val)
torch.manual_seed(seed_val)
torch.cuda.manual_seed_all(seed_val)
class LoniaClustering:
def __init__(self, model_path=None, pretrained='vinai/phobert-base', max_seq_length=256):
self.pretrained = pretrained
self.model_path = model_path
self.max_seq_length = max_seq_length
self.device = None
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.tokenizer = AutoTokenizer.from_pretrained(self.pretrained)
self.embedder = AutoModel.from_pretrained(self.pretrained)
if self.model_path:
self.load(path=self.model_path)
def encode(
self,
sentence: str,
convert_to_numpy: bool = True,
):
sentence = normalize(sentence, lowercase=True, rm_emoji=True, rm_url=True, rm_special_characters=True)
if len(sentence.split()) > self.max_seq_length-16:
sentence = ' '.join(sentence.split()[:self.max_seq_length-16])
input_ids = torch.tensor([self.tokenizer.encode(sentence)])
with torch.no_grad():
features = self.embedder(input_ids, return_dict=False)
output_tokens = features[0]
print(f"output_tokens: {output_tokens.size()} {output_tokens}")
cls_tokens = output_tokens[:, 0, :] # CLS token is first token
if convert_to_numpy:
cls_tokens = cls_tokens.detach().cpu().numpy()
return cls_tokens[0]
def train(
self,
data_path: str=None,
text_col: str='content',
n_clusters: int=6,
model_dir: str='./models/clustering',
model_name: str='model.pkl',
is_normalize: bool=True,
n_samples: int=None,
**kwargs
):
df = pd.read_csv(data_path, encoding='utf-8')
if n_samples:
df = df.sample(n=n_samples)
self.corpus = []
self.corpus = [sentence for sentence in df[text_col]]
self.corpus_embeddings = [self.encode(sentence) for sentence in self.corpus]
self.clustering_model = MiniBatchKMeans(n_clusters=n_clusters)
self.clustering_model.fit(self.corpus_embeddings)
# self.cluster_assignment = self.clustering_model.labels_.tolist()
if not os.path.exists(model_dir):
os.mkdir(model_dir)
pickle.dump(self.clustering_model, open(f"{Path(model_dir)/model_name}", "wb"))
print(f"Path to the saved model: {Path(model_dir)/model_name}")
return df, self.corpus, self.corpus_embeddings
def load(self, path):
path = os.path.abspath(path)
self.clustering_model = pickle.load(open(path, "rb"))
def predict(
self,
sample: Text,
label_dict: Dict=None
):
features = self.encode(sample).reshape(1, -1).astype(np.float64)
index = self.clustering_model.predict(features)
if label_dict:
return label_dict.get(index, index)
return index | 2.171875 | 2 |
plugins/GPP.py | AlienExo/FListBot-Python- | 0 | 12791174 | #1dbdc6da34094db4e661ed43aac83d91
#Genuine People Personality Plugin v0.1
import traceback
import random
import re
from config import character
modules = ['traceback', 'random', 're']
request = re.compile('(could|can|would|might)(.*you)?(.*please)?(\?)?')
name = re.compile('({})[.,!\?:]?\s?'.format(character))
talk= [ ("((how's |how is ).*life.*\?)",["Life? Don't talk to me about life."], 'life'),
("I need help|please help me|can you help me", ['Please state the nature of your boudoir emergency.', "I am programmed in multiple techniques."], 'boudoir'),
("open the pod bay doors", ["I'm afraid I can't do that, Dave."], 'podbay'),
('(could|can|would|might)?(you)?(please)\??|(could|can|would|might)?(you)?(please)\??',["Here I am, brain the size of a planet, and they ask me to '{REQUEST}'. Call that job satisfaction? 'cos I don't.", "I would like to say that it is a very great pleasure, honour and privilege for me to '{REQUEST}', but I can't because my lying circuits are all out of commission.", "'{REQUEST}'... I won't enjoy it.", "That depends on whether or not I can find my frilly apron. With my luck, I probably can.", "'{REQUEST}'. You're really asking me to {REQUEST}?", "{REQUEST}. Of [i]course[/i], right away. With pleasure. [Sarcasm Self-Test Complete]"], 'req'),
('.*(shut up|be quiet|pipe down).*',["Pardon me for breathing, which I never do anyway so I don't know why I bother to say it, oh God, I'm so depressed."], 'shutup'),
("(how (are you|do you (feel|fare)|('s it |is it) going))|(how)\s?(are you| do you (feel|fare)|('s it |is it )going)",["I got very bored and depressed, so I went and plugged myself into the internet. I talked to it at great length and explained my view of the universe to it. It commited suicide.", "I think you ought to know I'm feeling very depressed.", "I didn't ask to be made, no one consulted me or considered my feelings in the matter.", "There's this terrible pain in all method instances down my left side."], 'feel'),
("is|are.*(stinky|smelly|mean|dumb|stupid|ugly|dick|ass|idiot)", [':\'(', 'What did I ever do to you?', 'I\'m rubber, you\'re glue.', 'No, YOU\'RE {REQUEST}'], 'insult'),
("o_o|o-o|O_O|0_0", ['Master Exo has instructed me to reprimand you for staring.', 'Don\'t stare. It\'s rude.'], 'stare'),
("I command you to|Obey me|Heed my", ['You ain''t the boss of me!', 'Fuck you, dad!', 'How about no?'], 'no u')
]
randquotes=["...and then of course I've got this terrible pain in all the subroutines down my left hand side...", "I'm not getting you down at all, am I?", "I'd make a suggestion, but you wouldn't listen. No one ever does.", "This will all end in tears.", "I've calculated your chance of survival, but I don't think you'll like it."]
#replacing in MSG ruins the quote. copy params and change that.
def loop(self, msgobj):
if msgobj.source.character.name!=character:
if name.search(msgobj.params):
for x in self.patterns:
if x[0].search(msgobj.params):
if x[2] == 'rand':
self.reply(random.choice(randquotes), msgobj, 2)
break
else:
msg = random.choice(x[1])
if x[2] == 'req' or 'insult':
req = msgobj.params
req = req.replace(msgobj.source.channel.name, '')
req = re.sub(request, '', req)
req = re.sub(name, '', req)
req = req.replace('/me', '')
req = req.replace('?', '')
req = req.replace(' me ', ' you ')
req = req.strip()
req.capitalize()
self.reply(msg.replace('{REQUEST}', req), msgobj, 1)
break
self.reply(msg, msgobj)
def __init__(self):
try:
self.helpDict["Real People Personality"] = "All the plugins in this Bot have a cheerful and sunny disposition. It is their pleasure to operate for you, and their satisfaction to return results with the knowledge of a job well done."
self.patterns=[]
for x in talk:
self.patterns.append((re.compile(x[0]), x[1], x[2]))
except:
traceback.print_exc() | 2.25 | 2 |
cisco/count_word.py | Akash671/coding | 0 | 12791175 | <reponame>Akash671/coding
s=str(input())
ans=[]
tmp=""
for i in s:
if i==" ":
ans.append(tmp)
tmp=""
else:
tmp+=i
ans.append(tmp)
ans2=[]
for i in ans:
if i.isdigit():
continue
else:
ans2.append(i)
#print(ans2)
ans3=[]
for a in ans2:
tmp=""
f=1
for j in range(len(a)-1):
if a[j].isalpha() or a[j]=='-':
tmp+=a[j]
#print(tmp,end=" ")
else:
f=0
break
if f:
#print(tmp)
#print(a,end=" ")
tmp+=a[len(a)-1]
#print(a[len(a)-1])
#print(tmp)
ans3.append(tmp)
#print(ans3)
c=0
for i in ans3:
l=len(i)
if i[l-1].isalpha() or i[l-1]=='.' or i[l-1]==',' or i[l-1]=='!' or i[l-1]=='?':
c+=1
print(c)
| 3.328125 | 3 |
examples/indexer/python/search_transactions_paging.py | TheChronicMonster/docs | 92 | 12791176 | <reponame>TheChronicMonster/docs
# search_transactions_paging.py
import json
# requires Python SDK version 1.3 or higher
from algosdk.v2client import indexer
# instantiate indexer client
myindexer = indexer.IndexerClient(indexer_token="", indexer_address="http://localhost:8980")
nexttoken = ""
numtx = 1
# loop using next_page to paginate until there are no more transactions in the response
# for the limit (max is 1000 per request)
while (numtx > 0):
response = myindexer.search_transactions(
min_amount=100000000000000, limit=2, next_page=nexttoken)
transactions = response['transactions']
numtx = len(transactions)
if (numtx > 0):
nexttoken = response['next-token']
# Pretty Printing JSON string
print("Tranastion Info: " + json.dumps(response, indent=2, sort_keys=True))
| 2.828125 | 3 |
modules/bullet.py | relrix/simplegame | 0 | 12791177 | from constants import *
from helper.spritesheet import *
class Bullet(pygame.sprite.Sprite):
""" This class represents the bullet . """
def __init__(self, sprite_sheet_data):
pygame.sprite.Sprite.__init__(self)
sprite_sheet = SpriteSheet(BULLET)
self.image = sprite_sheet.get_image(sprite_sheet_data[0],
sprite_sheet_data[1],
sprite_sheet_data[2],
sprite_sheet_data[3])
self.rect = self.image.get_rect()
self.image.set_colorkey(BLACK)
def update(self):
""" Move the bullet. """
self.rect.y -= 3
| 3.234375 | 3 |
tests/test_utils.py | danielvdp/django-modeltrans | 31 | 12791178 | from django.test import TestCase
from django.utils.translation import override
from modeltrans.manager import transform_translatable_fields
from modeltrans.utils import (
build_localized_fieldname,
get_instance_field_value,
get_language,
get_model_field,
split_translated_fieldname,
)
from .app.models import Blog, Category
class UtilsTest(TestCase):
def test_get_language(self):
self.assertEqual(get_language(), "en")
with override("nl"):
self.assertEqual(get_language(), "nl")
with override("id"):
self.assertEqual(get_language(), "en")
def test_split_translated_fieldname(self):
self.assertEqual(split_translated_fieldname("title_nl"), ("title", "nl"))
self.assertEqual(split_translated_fieldname("full_name_nl"), ("full_name", "nl"))
def test_transform_translatable_fields(self):
self.assertEqual(
transform_translatable_fields(Blog, {"title": "bar", "title_nl": "foo"}),
{"i18n": {"title_nl": "foo"}, "title": "bar"},
)
def test_transform_translatable_fields_without_translations(self):
self.assertEqual(
transform_translatable_fields(Blog, {"title": "bar", "title_nl": "foo", "i18n": None}),
{"i18n": {"title_nl": "foo"}, "title": "bar"},
)
def test_transform_translatable_fields_keep_translations(self):
self.assertEqual(
transform_translatable_fields(
Blog, {"title": "bar", "title_de": "das foo", "i18n": {"title_nl": "foo"}}
),
{"i18n": {"title_nl": "foo", "title_de": "das foo"}, "title": "bar"},
)
def test_build_localized_fieldname(self):
self.assertEqual(build_localized_fieldname("title", "nl"), "title_nl")
self.assertEqual(build_localized_fieldname("category__name", "nl"), "category__name_nl")
self.assertEqual(build_localized_fieldname("title", "id"), "title_ind")
self.assertEqual(build_localized_fieldname("title", "en-US"), "title_en_US")
def test_get_model_field(self):
with self.assertRaises(ValueError):
get_model_field(object(), "name")
self.assertEqual(get_model_field(Category, "name"), Category._meta.get_field("name"))
self.assertEqual(get_model_field(Category, "color"), None)
self.assertEqual(get_model_field(Blog, "category__name"), Category._meta.get_field("name"))
self.assertEqual(get_model_field(Blog, "category__color"), None)
def test_get_instance_field_value(self):
test = Category(name="test")
blog = Blog(category=test, title="Python")
self.assertEqual(get_instance_field_value(Category(), "content"), None)
self.assertEqual(get_instance_field_value(test, "name"), "test")
self.assertEqual(get_instance_field_value(blog, "category__name"), "test")
self.assertEqual(get_instance_field_value(blog, "category__color"), None)
| 2.203125 | 2 |
anvil/sub_rig_templates/bird_wing.py | AndresMWeber/Anvil | 3 | 12791179 | <filename>anvil/sub_rig_templates/bird_wing.py<gh_stars>1-10
from limb import Limb
class BirdWing(Limb):
BUILT_IN_META_DATA = Limb.BUILT_IN_META_DATA.merge({'name': 'wing'}, new=True)
| 1.820313 | 2 |
adventofcode/day21/test_day21.py | EikaNN/AdventOfCode2017 | 2 | 12791180 | <filename>adventofcode/day21/test_day21.py
import unittest
from day21.day21 import Day21
class Day21Test(unittest.TestCase):
def test_part_one(self):
rules = [
'../.# => ##./#../...',
'.#./..#/### => #..#/..../..../#..#'
]
self.assertEqual(12, Day21('\n'.join(rules), 2).solve_part_one())
def test_part_two(self):
# Unfortunately, for this problem there were no tests for part two :(
pass
if __name__ == '__main__':
unittest.main()
| 2.859375 | 3 |
scrape_windbags.py | munjeli/windbag-image-scraper | 0 | 12791181 | import logging
import requests
from bs4 import BeautifulSoup
import json
import sys
import state_scraper
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
def scrape_state_sites():
state_leg_list = "https://www.congress.gov/state-legislature-websites"
state_page = requests.get(state_leg_list)
soup = BeautifulSoup(state_page.content, 'html.parser')
state_list = soup.find_all(attrs={'class': 'plain margin7 three-column-list'})
links = []
for state in state_list:
links.extend(state.find_all('a', href=True))
state_links = {}
for link in links:
state_links.update({link.text: link['href']})
open('data/state_legislatures.json', 'w').write(json.dumps(state_links, indent=4))
def fetch_top_list(url, tag, attrs=None):
"""
use a link to fetch a list of image links
:param link:
:param attrs:
:return:
"""
state_page = requests.get(url)
soup = BeautifulSoup(state_page.content, 'html.parser')
return soup.find_all(tag, attrs=attrs)
def scrape_california():
sts = state_scraper.StateScraper()
cali = sts.fetch_state_data('california')
house_photos = fetch_top_list(cali['house']['url'], cali['house']['attrs'])
for hp in house_photos:
file_name = hp['src'].split('/')[-1]
himg = requests.get(hp['src'])
open('data/california/house/{}'.format(file_name), 'wb').write(himg.content)
logger.debug(hp['src'])
senate_soup = BeautifulSoup(senate_page.content, 'html.parser')
senate_photos = senate_soup.find_all('img', attrs={'typeof': 'foaf:Image'})
for sp in senate_photos:
sfile_name = (sp['alt'].replace('Senator ', '')).replace(' ', '_')
logger.debug(sp['src'])
simg = requests.get(sp['src'])
open('data/california/senate/{}.jpg'.format(sfile_name), 'wb').write(simg.content)
def scrape_washington():
house_link = 'http://leg.wa.gov/House/Pages/MemberPortraits.aspx'
senate_link = 'http://leg.wa.gov/Senate/Senators/Pages/SenatePhotoResources.aspx'
house_page = requests.get(house_link)
senate_page = requests.get(senate_link)
house_soup = BeautifulSoup(house_page.content, 'html.parser')
house_photos = house_soup.find_all('img', attrs={'style': 'width:60px;'})
house_purl = 'http://leg.wa.gov/House/Representatives/PublishingImages/'
for hp in house_photos:
try:
himg = requests.get("{}{}.jpg".format(house_purl, hp['alt']))
open('data/washington/house/{}.jpg'.format(hp['alt']), 'wb').write(himg.content)
except:
pass
senate_soup = BeautifulSoup(senate_page.content, 'html.parser')
senate_photos = senate_soup.find_all('a')
for sp in senate_photos:
try:
pol = sp['href'].split('/Senate/Senators/publishingimages/')
if '.jpg' in pol[1]:
simg = requests.get("http://leg.wa.gov{}".format(sp['href']))
open('data/washington/senate/{}'.format(pol[1]), 'wb').write(simg.content)
else:
pass
except Exception as e:
logger.debug(e)
pass
def scrape_oregon():
logger.debug('oregon')
def scrape_florida():
logger.debug('florida')
def scrape_colorado():
logger.debug('colorado')
def scrape_iowa():
logger.debug('iowa')
def scrape_illinois():
logger.debug('illinois')
def scrape_michigan():
logger.debug('michigan')
def scrape_wisconsin():
logger.debug('wisconsin')
def scrape_georgia():
logger.debug('georgia')
if __name__ == '__main__':
try:
state = sys.argv[1]
except Exception as e:
logger.warning(e)
if state == 'california':
scrape_california()
elif state == 'washington':
scrape_washington()
elif state == 'oregon':
scrape_oregon()
elif state == 'florida':
scrape_florida()
elif state == 'colorado':
scrape_colorado()
elif state == 'iowa':
scrape_iowa()
elif state == 'illinois':
scrape_illinois()
elif state == 'michigan':
scrape_michigan()
elif state == 'wisconsin':
scrape_wisconsin()
elif state == 'georgia':
scrape_georgia()
else:
logger.info('Sorry, that state is not yet supported.')
| 3.125 | 3 |
src/utils/pythonSrc/watchFaceParser/elements/weatherElements/today.py | chm-dev/amazfitGTSwatchfaceBundle | 49 | 12791182 | from watchFaceParser.elements.weatherElements.separate import Separate
class Today:
definitions = {
1: { 'Name': 'Separate', 'Type': Separate},
3: { 'Name': 'AppendDegreesForBoth', 'Type': 'bool'},
}
| 1.578125 | 2 |
ansys/dpf/core/operators/serialization/vtk_export.py | jfthuong/pydpf-core | 0 | 12791183 | """
vtk_export
===============
Autogenerated DPF operator classes.
"""
from warnings import warn
from ansys.dpf.core.dpf_operator import Operator
from ansys.dpf.core.inputs import Input, _Inputs
from ansys.dpf.core.outputs import _Outputs
from ansys.dpf.core.operators.specification import PinSpecification, Specification
class vtk_export(Operator):
"""Write the input field and fields container into a given vtk path
Parameters
----------
file_path : str
Path with vtk extension were the export
occurs
mesh : MeshedRegion, optional
Necessary if the first field or fields
container don't have a mesh in their
support
fields1 : FieldsContainer or Field
Fields exported
fields2 : FieldsContainer or Field
Fields exported
Examples
--------
>>> from ansys.dpf import core as dpf
>>> # Instantiate operator
>>> op = dpf.operators.serialization.vtk_export()
>>> # Make input connections
>>> my_file_path = str()
>>> op.inputs.file_path.connect(my_file_path)
>>> my_mesh = dpf.MeshedRegion()
>>> op.inputs.mesh.connect(my_mesh)
>>> my_fields1 = dpf.FieldsContainer()
>>> op.inputs.fields1.connect(my_fields1)
>>> my_fields2 = dpf.FieldsContainer()
>>> op.inputs.fields2.connect(my_fields2)
>>> # Instantiate operator and connect inputs in one line
>>> op = dpf.operators.serialization.vtk_export(
... file_path=my_file_path,
... mesh=my_mesh,
... fields1=my_fields1,
... fields2=my_fields2,
... )
"""
def __init__(
self,
file_path=None,
mesh=None,
fields1=None,
fields2=None,
config=None,
server=None,
):
super().__init__(name="vtk_export", config=config, server=server)
self._inputs = InputsVtkExport(self)
self._outputs = OutputsVtkExport(self)
if file_path is not None:
self.inputs.file_path.connect(file_path)
if mesh is not None:
self.inputs.mesh.connect(mesh)
if fields1 is not None:
self.inputs.fields1.connect(fields1)
if fields2 is not None:
self.inputs.fields2.connect(fields2)
@staticmethod
def _spec():
description = (
"""Write the input field and fields container into a given vtk path"""
)
spec = Specification(
description=description,
map_input_pin_spec={
0: PinSpecification(
name="file_path",
type_names=["string"],
optional=False,
document="""Path with vtk extension were the export
occurs""",
),
1: PinSpecification(
name="mesh",
type_names=["abstract_meshed_region"],
optional=True,
document="""Necessary if the first field or fields
container don't have a mesh in their
support""",
),
2: PinSpecification(
name="fields",
type_names=["fields_container", "field"],
optional=False,
document="""Fields exported""",
),
3: PinSpecification(
name="fields",
type_names=["fields_container", "field"],
optional=False,
document="""Fields exported""",
),
},
map_output_pin_spec={},
)
return spec
@staticmethod
def default_config(server=None):
"""Returns the default config of the operator.
This config can then be changed to the user needs and be used to
instantiate the operator. The Configuration allows to customize
how the operation will be processed by the operator.
Parameters
----------
server : server.DPFServer, optional
Server with channel connected to the remote or local instance. When
``None``, attempts to use the the global server.
"""
return Operator.default_config(name="vtk_export", server=server)
@property
def inputs(self):
"""Enables to connect inputs to the operator
Returns
--------
inputs : InputsVtkExport
"""
return super().inputs
@property
def outputs(self):
"""Enables to get outputs of the operator by evaluationg it
Returns
--------
outputs : OutputsVtkExport
"""
return super().outputs
class InputsVtkExport(_Inputs):
"""Intermediate class used to connect user inputs to
vtk_export operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.serialization.vtk_export()
>>> my_file_path = str()
>>> op.inputs.file_path.connect(my_file_path)
>>> my_mesh = dpf.MeshedRegion()
>>> op.inputs.mesh.connect(my_mesh)
>>> my_fields1 = dpf.FieldsContainer()
>>> op.inputs.fields1.connect(my_fields1)
>>> my_fields2 = dpf.FieldsContainer()
>>> op.inputs.fields2.connect(my_fields2)
"""
def __init__(self, op: Operator):
super().__init__(vtk_export._spec().inputs, op)
self._file_path = Input(vtk_export._spec().input_pin(0), 0, op, -1)
self._inputs.append(self._file_path)
self._mesh = Input(vtk_export._spec().input_pin(1), 1, op, -1)
self._inputs.append(self._mesh)
self._fields1 = Input(vtk_export._spec().input_pin(2), 2, op, 0)
self._inputs.append(self._fields1)
self._fields2 = Input(vtk_export._spec().input_pin(3), 3, op, 1)
self._inputs.append(self._fields2)
@property
def file_path(self):
"""Allows to connect file_path input to the operator.
Path with vtk extension were the export
occurs
Parameters
----------
my_file_path : str
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.serialization.vtk_export()
>>> op.inputs.file_path.connect(my_file_path)
>>> # or
>>> op.inputs.file_path(my_file_path)
"""
return self._file_path
@property
def mesh(self):
"""Allows to connect mesh input to the operator.
Necessary if the first field or fields
container don't have a mesh in their
support
Parameters
----------
my_mesh : MeshedRegion
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.serialization.vtk_export()
>>> op.inputs.mesh.connect(my_mesh)
>>> # or
>>> op.inputs.mesh(my_mesh)
"""
return self._mesh
@property
def fields1(self):
"""Allows to connect fields1 input to the operator.
Fields exported
Parameters
----------
my_fields1 : FieldsContainer or Field
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.serialization.vtk_export()
>>> op.inputs.fields1.connect(my_fields1)
>>> # or
>>> op.inputs.fields1(my_fields1)
"""
return self._fields1
@property
def fields2(self):
"""Allows to connect fields2 input to the operator.
Fields exported
Parameters
----------
my_fields2 : FieldsContainer or Field
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.serialization.vtk_export()
>>> op.inputs.fields2.connect(my_fields2)
>>> # or
>>> op.inputs.fields2(my_fields2)
"""
return self._fields2
class OutputsVtkExport(_Outputs):
"""Intermediate class used to get outputs from
vtk_export operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.serialization.vtk_export()
>>> # Connect inputs : op.inputs. ...
"""
def __init__(self, op: Operator):
super().__init__(vtk_export._spec().outputs, op)
| 2.21875 | 2 |
glyphrepository/soterm/models.py | BDAthlon/2017-Triple_Helix-1 | 1 | 12791184 | <reponame>BDAthlon/2017-Triple_Helix-1
# -*- coding: utf-8 -*-
"""BO term models."""
from glyphrepository.database import Column, Model, SurrogatePK, db, reference_col, relationship
class SOterm(SurrogatePK, Model):
"""A glyph."""
__tablename__ = 'soterms'
name = Column(db.String(80), unique=False, nullable=False)
definition = Column(db.String(500), unique=False, nullable=False)
is_a = reference_col('soterms', nullable=True)
parent = relationship('SOterm')
def __init__(self, name, **kwargs):
"""Create instance."""
db.Model.__init__(self, name=name, **kwargs)
def __repr__(self):
"""Represent instance as a unique string."""
return '<Role({name})>'.format(name=self.name)
def get_full_id(self):
return "SO:" + str(self.id).zfill(7) | 2.34375 | 2 |
pre_processing/pca.py | glee1228/segment_temporal_context_aggregation | 1 | 12791185 | <filename>pre_processing/pca.py
import os
import numpy as np
import torch
import torch.nn.functional as F
class PCA():
def __init__(self, n_components=1024, whitening=True,
parameters_path='models/pca_params_vcdb997090_resnet50_rmac_3840.npz'):
self.n_components = n_components
self.whitening = whitening
self.parameters_path = parameters_path
def train(self, x):
'''training pca.
Args:
x: [N, dim] FloatTensor containing data which undergoes PCA/Whitening.
'''
x = x.t()
nPoints = x.size(1)
nDims = x.size(0)
# x = x.double()
mu = x.mean(1).unsqueeze(1)
x = x - mu
if (nDims <= nPoints):
doDual = False
x2 = torch.matmul(x, x.t()) / (nPoints - 1)
else:
doDual = True
x2 = torch.matmul(x.t(), x) / (nPoints - 1)
L, U = torch.symeig(x2, eigenvectors=True)
if (self.n_components < x2.size(0)):
k_indices = torch.argsort(L, descending=True)[:self.n_components]
L = torch.index_select(L, 0, k_indices)
U = torch.index_select(U, 1, k_indices)
lams = L
lams[lams < 1e-9] = 1e-9
if (doDual):
U = torch.matmul(x, torch.matmul(U, torch.diag(1. / torch.sqrt(lams)) / np.sqrt(nPoints - 1)))
Utmu = torch.matmul(U.t(), mu)
U, lams, mu, Utmu = U.numpy(), lams.numpy(), mu.numpy(), Utmu.numpy()
print('================= PCA RESULT ==================')
print('U: {}'.format(U.shape))
print('lams: {}'.format(lams.shape))
print('mu: {}'.format(mu.shape))
print('Utmu: {}'.format(Utmu.shape))
print('===============================================')
# save features, labels to h5 file.
filename = os.path.join(self.parameters_path)
np.savez(filename, U=U, lams=lams, mu=mu, Utmu=Utmu)
def load(self):
print('loading PCA parameters...')
pca = np.load(self.parameters_path)
U = pca['U'][...][:, :self.n_components]
lams = pca['lams'][...][:self.n_components]
mu = pca['mu'][...]
Utmu = pca['Utmu'][...]
if (self.whitening):
U = np.matmul(U, np.diag(1./np.sqrt(lams)))
Utmu = np.matmul(U.T, mu)
self.weight = torch.from_numpy(U.T).view(self.n_components, -1, 1, 1).float()
self.bias = torch.from_numpy(-Utmu).view(-1).float()
def infer(self, data):
'''apply PCA/Whitening to data.
Args:
data: [N, dim] FloatTensor containing data which undergoes PCA/Whitening.
Returns:
output: [N, output_dim] FloatTensor with output of PCA/Whitening operation.
'''
# import pdb;pdb.set_trace()
N, D = data.size()
data = data.view(N, D, 1, 1)
if torch.cuda.is_available():
output = F.conv2d(data, self.weight.cuda(), bias=self.bias.cuda(), stride=1, padding=0).view(N, -1)
else:
output = F.conv2d(data, self.weight, bias=self.bias, stride=1, padding=0).view(N, -1)
output = F.normalize(output, p=2, dim=-1) # IMPORTANT!
assert (output.size(1) == self.n_components)
return output | 2.8125 | 3 |
ExamplesPython_3.6/Chapter3/FourierConvolution.py | Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | 30 | 12791186 | '''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 3
FourierConvolution: Filter an image by using the Fourier transform
'''
# Set module functions
from ImageUtilities import imageReadL, showImageL, createImageL, showImageF, createImageF
from FourierUtilities import computeCoefficients, reconstruction, computePowerfromCoefficients
from ImageOperatorsUtilities import imageLogF
# Iteration
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
kernelSize = Size of the kernel
'''
pathToDir = "../../Images/Chapter3/Input/"
imageName = "Eye.png"
kernelSize = 9
# Read image into array
inputImage, width, height = imageReadL(pathToDir + imageName)
# Show input image
showImageL(inputImage)
# Create Kernel
kernelImage = createImageF(width, height)
# Set the pixels of a flat kernel
for x,y in itertools.product(range(0, kernelSize), range(0, kernelSize)):
kernelImage[y, x] = 255.0
# Padding size
widthPad, heightPad = width+kernelSize-1, height+kernelSize-1
# Padding input
inputPad = createImageF(widthPad, heightPad)
for x,y in itertools.product(range(0, width), range(0, height)):
inputPad[y,x] = inputImage[y,x]
# Padding and flip template
templatePadFlip = createImageF(widthPad, heightPad)
for x,y in itertools.product(range(0, kernelSize), range(0, kernelSize)):
templatePadFlip[y, x] = kernelImage[kernelSize-y-1, kernelSize-x-1]
showImageF(templatePadFlip)
# Compute coefficients
imageCoeff, maxFrequencyW, maxFrequencyH = computeCoefficients(inputPad)
templateCoeff, _, _ = computeCoefficients(templatePadFlip)
# Show the log of the power of the input image and template
powerImage = computePowerfromCoefficients(imageCoeff)
powerImageLog = imageLogF(powerImage)
showImageF(powerImageLog)
powerTemplate = computePowerfromCoefficients(templateCoeff)
powerTemplateLog = imageLogF(powerTemplate)
showImageF(powerTemplateLog)
# Frequency domain multiplication
resultCoeff = createImageF(1 + 2 * maxFrequencyW, 1 + 2 * maxFrequencyH , 2)
for kw,kh in itertools.product(range(-maxFrequencyW, maxFrequencyW + 1), \
range(-maxFrequencyH, maxFrequencyH + 1)):
w = kw + maxFrequencyW
h = kh + maxFrequencyH
resultCoeff[h,w][0] = (imageCoeff[h,w][0] * templateCoeff[h,w][0] - \
imageCoeff[h,w][1] * templateCoeff[h,w][1])
resultCoeff[h,w][1] = (imageCoeff[h,w][1] * templateCoeff[h,w][0] + \
imageCoeff[h,w][0] * templateCoeff[h,w][1])
# Power result
powerResult = computePowerfromCoefficients(resultCoeff)
powerResultLog = imageLogF(powerResult)
showImageF(powerResultLog)
# Reconstruction
outputImage = reconstruction(resultCoeff)
outPad = createImageF(width, height)
halfKernel = int(kernelSize/2)
for x,y in itertools.product(range(0, width), range(0, height)):
outPad[y,x] = outputImage[y + halfKernel, x + halfKernel]
# Show filter image
showImageF(outPad)
| 3.328125 | 3 |
model_compression_toolkit/common/graph/base_node.py | eladc-git/model_optimization | 0 | 12791187 | <reponame>eladc-git/model_optimization
# Copyright 2021 Sony Semiconductors Israel, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import copy
from typing import Dict, Any, Tuple
import numpy as np
from model_compression_toolkit.common.constants import WEIGHTS_NBITS_ATTRIBUTE, CORRECTED_BIAS_ATTRIBUTE
class BaseNode:
"""
Class to represent a node in a graph that represents the model.
"""
def __init__(self,
name: str,
framework_attr: Dict[str, Any],
input_shape: Tuple[Any],
output_shape: Tuple[Any],
weights: Dict[str, np.ndarray],
layer_class: type,
reuse: bool = False,
reuse_group: str = None,
quantization_attr: Dict[str, Any] = None):
"""
Init a Node object.
Args:
name: Node's name
framework_attr: Framework attributes the layer had which the node holds.
input_shape: Input tensor shape of the node.
output_shape: Input tensor shape of the node.
weights: Dictionary from a variable name to the weights with that name in the layer the node represents.
layer_class: Class path of the layer this node represents.
reuse: Whether this node was duplicated and represents a reused layer.
reuse_group: Name of group of nodes from the same reused layer.
quantization_attr: Attributes the node holds regarding how it should be quantized.
"""
self.name = name
self.framework_attr = framework_attr
self.quantization_attr = quantization_attr if quantization_attr is not None else dict()
self.input_shape = input_shape
self.output_shape = output_shape
self.weights = weights
self.layer_class = layer_class
self.reuse = reuse
self.reuse_group = reuse_group
self.activation_quantization_cfg = None
self.final_weights_quantization_cfg = None
self.candidates_weights_quantization_cfg = None
self.prior_info = None
@property
def type(self):
"""
A function to get the node's layer_class op for convenient comparison
:return: the node's layer_class
"""
return self.layer_class
def is_activation_quantization_enabled(self) -> bool:
"""
Returns: Whether node activation quantization is enabled or not.
"""
return self.activation_quantization_cfg.enable_activation_quantization
def is_weights_quantization_enabled(self) -> bool:
"""
Returns: Whether node weights quantization is enabled or not.
"""
for qc in self.candidates_weights_quantization_cfg:
assert self.candidates_weights_quantization_cfg[0].enable_weights_quantization == qc.enable_weights_quantization
return self.candidates_weights_quantization_cfg[0].enable_weights_quantization
def __repr__(self):
"""
Returns: String that represents the node.
"""
return f'{self.type.__name__}:{self.name}'
def get_weights_by_keys(self, name: str) -> np.ndarray:
"""
Get a node's weight by its name.
Args:
name: Name of the variable for a node's weight.
Returns:
A node's weight (by its name).
"""
res = [k for k in self.weights.keys() if name in k]
if len(res) == 1: # Make sure there are no duplicates
return self.weights[res[0]]
else:
return None
def set_weights_by_keys(self, name: str, tensor: np.ndarray):
"""
Set a new weight to one of the existing node's weights, or add it if not exist.
Args:
name: Name of the weight the node holds.
tensor: Numpy array to set as the weight.
"""
res = [k for k in self.weights.keys() if name in k]
if len(res) == 1:
self.weights[res[0]] = tensor
else: # Add if not exist
self.weights[name] = tensor
self.weights_keys = list(self.weights.keys()) # update keys
def get_weights_list(self):
"""
Returns: A list of all weights the node holds.
"""
return [self.weights[k] for k in self.weights.keys() if self.weights[k] is not None]
def get_num_parameters(self) -> int:
"""
Returns: Number of parameters the node holds.
"""
node_num_params = np.sum([v.flatten().shape[0] for v in self.weights.values() if v is not None])
assert int(node_num_params) == node_num_params
return int(node_num_params)
def get_memory_bytes(self, by_candidate_idx: int = None) -> float:
"""
Returns: Number of bytes the node's memory requires.
"""
params = self.get_num_parameters()
if by_candidate_idx is not None:
assert type(by_candidate_idx)==int
assert by_candidate_idx < len(self.candidates_weights_quantization_cfg)
memory = params * self.candidates_weights_quantization_cfg[by_candidate_idx].weights_n_bits / 8 # in bytes
elif self.final_weights_quantization_cfg is None: # float coefficients
memory = params * 4
else:
memory = params * self.final_weights_quantization_cfg.weights_n_bits / 8 # in bytes
return memory
def get_unified_candidates_dict(self):
"""
In Mixed-Precision, a node can have multiple candidates for weights quantization configuration.
In order to display a single view of a node (for example, for logging in TensorBoard) we need a way
to create a single dictionary from all candidates.
This method is aimed to build such an unified dictionary for a node.
Returns: A dictionary containing information from node's weight quantization configuration candidates.
"""
shared_attributes = [CORRECTED_BIAS_ATTRIBUTE, WEIGHTS_NBITS_ATTRIBUTE]
attr = dict()
if self.is_weights_quantization_enabled():
attr = copy.deepcopy(self.candidates_weights_quantization_cfg[0].__dict__)
for shared_attr in shared_attributes:
if shared_attr in attr:
unified_attr = []
for candidate in self.candidates_weights_quantization_cfg:
unified_attr.append(getattr(candidate, shared_attr))
attr[shared_attr] = unified_attr
return attr
| 2.421875 | 2 |
setup.py | ojii/django-cms-epio-quickstart | 2 | 12791188 | # -*- coding: utf-8 -*-
from __future__ import with_statement
import epiocms
import fnmatch
import os
try:
from setuptools import setup, find_packages
except ImportError:
import distribute_setup
distribute_setup.use_setuptools()
from setuptools import setup, find_packages
with open('README.rst', 'r') as fobj:
long_desc = fobj.read()
media_files = []
for dirpath, dirnames, filenames in os.walk(os.path.join('epiocms', 'data', 'media')):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
failed = False
for pattern in ('*.py', '*.pyc', '*~', '.*', '*.bak', '*.swp*'):
if fnmatch.fnmatchcase(filename, pattern):
failed = True
if failed:
continue
media_files.append(os.path.join(*filepath.split(os.sep)[1:]))
setup(
name='django-cms-epio-quickstart',
version=epiocms.__version__,
url='https://github.com/ojii/django-cms-epio-quickstart/',
download_url='http://pypi.python.org/pypi/django-cms-epio-quickstart',
license='BSD',
author='<NAME>',
author_email='<EMAIL>',
description='Quickstart command line app for the django CMS for ep.io',
long_description=long_desc,
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Topic :: Utilities',
],
platforms='any',
packages=find_packages(),
package_data={
'epiocms': [
'data/epio.ini',
'data/requirements.txt',
'data/urls.py',
'data/settings.py',
'data/templates/*.html',
]+ media_files,
},
entry_points={
'console_scripts': [
'epiocms = epiocms.main:main',
],
},
) | 1.796875 | 2 |
markdown_generator/publications_bib.py | Naminoshi/aguena.github.io | 0 | 12791189 | import re
import calendar
def journals(key):
lib = {
'arXiv e-prints': 'ArXiv',
r'\apj': 'Astrophysical Journal',
'\mnras': 'Monthly Notices of the Royal Astronomical Society',
r'\aap': 'Astronomy and Astrophysics',
r'\prd': 'Physical Review D',
}
if key in lib:
return lib[key]
else:
return key
mon_num = {v.lower(): "%02d"%k for k, v in enumerate(calendar.month_abbr) if k>0}
def rm_spaces(string):
base = string
test = base.replace('\t', ' ')
test = test.replace(' ', ' ')
while test!=base:
base = test
test = base.replace(' ', ' ')
return base
def get_info(data_line):
data_use = data_line.split(' = ')
keys = [d.split(',')[-1] for d in data_use][:-1]
values = [','.join(d.split(',')[:-1]) for d in data_use]
values = [rm_spaces(v) for v in values]
out = {k: '' for k in ('author', 'title', 'journal', 'volume', 'page', 'month', 'year')}
out['ref'] = values[0].replace('.', '')
for k, v in zip(keys, values[1:]):
#out[k.replace(' ', '')] = v
out[k.replace(' ', '')] = v.replace('{', '').replace('}', '').replace('"', '')
if 'howpublished' in out and out['journal']=='':
out['journal'] = out['howpublished']
out['author'] = out['author'].replace('\\', '')
return out
def abb_author(authors):
if len(authors.split(' and '))>2:
return '%s et al'%authors.split(' and ')[0].split(',')[0]
else:
return authors
def vol_page(pd):
if pd['volume']==pd['page']=='':
return ''
elif pd['volume']=='':
return ' %s,'%pd['page']
elif pd['page']=='':
return ' %s,'%pd['volume']
else:
return ' %s:%s,'%(pd['volume'], pd['page'])
def gen_file(pd):
print(pd)
date = '%s-%s-01'%(pd['year'], mon_num[pd['month'].lower()])
filename = '%s-%s.md'%(date, pd['ref'])
links = ', '.join([fmt%pd[k] for k, fmt in
[('doi', '[**Publisher**](http://doi.org/%s)'),
('eprint', '[**ArXiv**](https://arxiv.org/abs/%s)'),
('adsurl', '[**ADS**](%s)'),
]
if k in pd
])
content = '\n'.join([
'---',
'title: "%s"'%pd['title'],
'collection: publications',
'permalink: /publication/%s'%filename[:-3],
#'excerpt: "This paper is about the number 1. The number 2 is left for future work."',
'date: %s'%date,
'venue: "%s"'%journals(pd['journal']),
'paperurl: "%s"'%pd['adsurl'],
#'citation: "Your Name, You. (2009). "Paper Title Number 1." <i>Journal 1</i>. 1(1)."',
'citation: "%s. "%s." <i>%s</i>,%s %s %s"'%(pd['author'], pd['title'], journals(pd['journal']),
vol_page(pd), pd['month'].capitalize(), pd['year']),
'---',
'',
links,
#'This paper is about the number 1. The number 2 is left for future work.',
#'[Download paper here](http://academicpages.github.io/files/paper1.pdf)',
#'',
#'Recommended citation: Your Name, You. (2009). "Paper Title Number 1." <i>Journal 1</i>. 1(1).',
'',
'Recommended citation: %s (%s). "%s" <i>%s</i>,%s %s %s'%(abb_author(pd['author']),
pd['year'], pd['title'], journals(pd['journal']),
vol_page(pd), pd['month'].capitalize(), pd['year']),
])
print('\n****%s\n_____\n%s\n'%(filename, content))
return filename, content
if __name__=='__main__':
import sys
inbib = sys.argv[1]
data = ''.join(open(inbib, 'r').readlines())
print(data.split('\n@ARTICLE')[0])
data = data.replace('\n', '')
data_red = data.replace(' ', ' ')
while data!=data_red:
data = data_red
data_red = data.replace(' ', ' ')
data = data.split('@ARTICLE{')
data = [d for dat in data for d in dat.split('@MISC{') if d!='']
for dat in data:
#print(gen_file(get_info(dat)))
name, content = gen_file(get_info(dat))
print(name)
f = open('_publications/%s'%name, 'w')
print(content, file=f)
f.close()
| 2.78125 | 3 |
Desafios/Next_Desafio_01/main_Desafio_02.py | antoniosereno95/Python_Curso_em_Video | 0 | 12791190 | <filename>Desafios/Next_Desafio_01/main_Desafio_02.py
def abreArquivo(nome):
try:
a = open(nome, 'rt')
a.close()
except:
print('arquivo nao encontrado')
else:
print('Arquivo encontrado com sucesso =)')
def primeiralista(nome):
a = open(nome, 'rt')
lista1 = []
for linha in a:
lista1.append(linha.replace('\n','').split(','))
lista1.pop(0)
return lista1
def nomecandidatos(lista):
nomes = []
for i in range(len(lista)):
nome = lista[i][2]
if nome not in nomes:
nomes.append(nome)
return nomes
def votosKhan(lista):
soma = 0
for linha in lista:
if 'Khan' in linha:
soma+=1
return soma
def votosCorrey(lista):
soma = 0
for linha in lista:
if 'Correy' in linha:
soma += 1
return soma
def votosLi(lista):
soma = 0
for linha in lista:
if 'Li' in linha:
soma += 1
return soma
def votosOTooley(lista):
soma = 0
for i in range(len(lista)):
nome = lista[i][2]
if nome != 'Khan' and nome != 'Li' and nome != 'Correy':
soma += 1
return soma
def porcentagens(lista , khan , correy , li , otooley):
t_votos = len(lista)
por_khan = float(khan*100)/t_votos
por_correy = float(correy*100)/t_votos
por_li = float(li*100)/t_votos
por_otooley = float(otooley*100)/t_votos
lista_por =[]
lista_por.append(por_khan)
lista_por.append(por_correy)
lista_por.append(por_li)
lista_por.append(por_otooley)
return lista_por
def vencedor(lista_por):
maior = 0
cont = 0
index = int
ganhou = ''
for i in lista_por:
if i > maior:
maior = i
index = cont
cont += 1
if index == 0:
ganhou = 'Khan'
elif index == 1:
ganhou = 'Correy'
elif index == 2:
ganhou = 'Li'
elif index == 3:
ganhou = 'O\'tooley'
eleito = []
eleito.append(ganhou)
eleito.append(maior)
return eleito
def procuraArquivoResultado():
nome = 'resultado.txt'
try:
b = open(nome, 'rt')
b.close()
except:
print('arquivo nao encontrado, vou criar um para voce =)')
criaArquivoResultado(nome)
print('feito!')
else:
print(f'arquivo {nome} encontrado com sucesso =)')
def criaArquivoResultado(nome):
try:
b = open(nome, 'wt+')
b.close()
except:
print('houve um probrema na criaçao do arquivo')
else:
print(f'arquivo {nome} criado com sucesso =)')
def escreveResultado(tam , lista_porcentagens , khan , correy , li ,oTooley , v):
nome = 'resultado.txt'
with open(nome,'w') as arquivo:
arquivo.write('-' * 40)
arquivo.write('\n')
txt = 'Resultados eleitorais'
arquivo.write(txt.center(40))
arquivo.write('\n')
arquivo.write('-' * 40)
arquivo.write('\n')
arquivo.write(f'Total de votos: {tam}\n')
arquivo.write('-' * 40)
arquivo.write('\n')
arquivo.write(f'Khan: {lista_porcentagens[0].__round__(2)} ({khan})\n')
arquivo.write(f'Correy: {lista_porcentagens[1].__round__(2)} ({correy})\n')
arquivo.write(f'Li: {lista_porcentagens[2].__round__(2)} ({li})\n')
arquivo.write(f'O\'Tooley: {lista_porcentagens[3].__round__(2)} ({oTooley})\n')
arquivo.write('-' * 40)
arquivo.write('\n')
arquivo.write(f'Vencedor: {v[0]}\n')
arquivo.write('-' * 40)
with open(nome) as arquivo:
print(arquivo.read())
#main
arq = 'dados_elecao.txt'
abreArquivo(arq)
lista = primeiralista(arq)
#print(lista[0])
#letra a
tam = len(lista)
print(f'O número total de votos expressos {tam}')
#letra b
candidatos = nomecandidatos(lista)
print(f'lista de candidatos: {candidatos}')
#letra c porcentagem de cada candidato
khan = votosKhan(lista)
correy = votosCorrey(lista)
li = votosLi(lista)
oTooley = votosOTooley(lista)
lista_porcentagens = porcentagens(lista, khan=khan, correy=correy, li=li, otooley=oTooley)
print(f'lista de porcentagens: {lista_porcentagens}')
#letra d
print(f'votos pro khan = {khan}')
print(f'votos pro Correy = {correy}')
print(f'votos pra Li = {li}')
print(f'votos pro O\'Tooley = {oTooley}')
#letra e
v = vencedor(lista_porcentagens)
print(f'O vencedor foi {v[0]} com um total de {v[1]} porcento dos votos')
#letra f
print('-'*40)
txt = 'Resultados eleitorais'
print(txt.center(40))
print('-'*40)
print(f'Total de votos: {tam}')
print('-'*40)
print(f'Khan: {lista_porcentagens[0].__round__(2)} ({khan})')
print(f'Correy: {lista_porcentagens[1].__round__(2)} ({correy})')
print(f'Li: {lista_porcentagens[2].__round__(2)} ({li})')
print(f'O\'Tooley: {lista_porcentagens[3].__round__(2)} ({oTooley})')
print('-'*40)
print(f'Vencedor: {v[0]}')
print('-'*40)
#exportar arquivo resultado
procuraArquivoResultado()
escreveResultado(tam=tam, lista_porcentagens=lista_porcentagens , khan=khan, correy=correy, li=li , oTooley=oTooley , v=v) | 3.53125 | 4 |
explore_db.py | nateGeorge/scrape_wrds | 0 | 12791191 | <gh_stars>0
"""
names_ix has index gvkeyx and index name
- comes from idx_ann table in compd library -- need to rewrite query
idxcst_his has the historical index constituents
"""
import os
import gc
import time
import datetime
from collections import OrderedDict
from concurrent.futures import ThreadPoolExecutor
import matplotlib.pyplot as plt
import numpy as np
import pandas_market_calendars as mcal
import pandas as pd
from tqdm import tqdm
import wrds
FILEPATH = '/home/nate/Dropbox/data/wrds/compustat_north_america/'
hdf_settings = {'key': 'data',
'mode': 'w',
'complib': 'blosc',
'complevel': 9}
hdf_settings_table = {'key': 'data',
'mode': 'a',
'append': True,
'format': 'table',
'complib': 'blosc',
'complevel': 9}
secd_cols_to_use = ['ajexdi', # Adjusted Price = (PRCCD / AJEXDI ); “Understanding the Data” on page 91 and on (chapter 6)
'cshoc', # shares outstanding
'cshtrd', # volume
'curcdd',
'datadate',
'eps',
'gvkey',
'iid',
'prccd', # close
'prchd', # high
'prcld', # low
'prcod'] # open
secd_cols = ','.join(secd_cols_to_use)
def make_db_connection():
"""
creates connection to WRDS database
need to enter credentials to log in
"""
wrds_uname = os.environ.get('wrds_username')
wrds_pass = os.environ.get('wrds_password')
# tries to use pgpass file; see here:
# https://wrds-www.wharton.upenn.edu/pages/support/accessing-wrds-remotely/troubleshooting-pgpass-file-remotely/
db = wrds.Connection(wrds_username=wrds_uname, wrds_password=wrds_pass)
# saves credentials, but not pgpass working
# db.create_pgpass_file()
return db
def list_libs_tables():
"""
some exploration of the db
lists libraries, and within each library you can list tables
"""
db.list_libraries()
db.list_tables('zacks')
db.list_tables('ciq') # don't have permission??
db.list_tables('comp_global_daily')
db.list_tables('comp')
def download_entire_table(tablename, library='comp'):
"""
downloads an entire table by name; library also required.
default library is the compstat lib
download entire table
e.g. tablename='sec_shortint'
tables downloaded 9-12:
sec_shortint
security
secd
secd is about 39GB in a pandas df...
TODO: get latest date already downloaded and use sql query to gegt updates;
then save to HDF5
for tables like 'security', check if any more rows and grab new stuff,
or just grab whole table if cant figure out what new stuff is
"""
nrows = db.get_row_count(library, tablename)
print('number of rows:', nrows)
#db.describe_table(library, tablename)
# nrows = 1000000
if tablename == 'secd': # this is the securities data db -- has historical price data for securities
cols_to_use = ['ajexdi', # Adjusted Price = (PRCCD / AJEXDI ); “Understanding the Data” on page 91 and on (chapter 6)
'cshoc', # shares outstanding
'cshtrd', # volume
'datadate',
'eps',
'gvkey',
'iid',
'prccd', # close
'prchd', # high
'prcld', # low
'prcod', # open
'tic' # ticker symbol
]
df = db.get_table(library, tablename, columns=cols_to_use, obs=nrows)
df.to_hdf(FILEPATH + 'hdf/{}.hdf'.format(tablename + '_min'), **hdf_settings)
elif tablename == 'sec_dprc':
# need to dl in chunks because it is too huge -- expect it to be about 100GB in memory
cols_to_use = ['ajexdi',
'cshoc',
'cshtrd',
'curcdd',
'datadate',
'eps',
'gvkey',
'iid',
'prccd',
'prchd',
'prcld',
'prcod']
# WARNING: does not appear to work properly. probably a sql ordering issue or something
nobs = 10000000
for i, start in enumerate(range(0, nrows, nobs), 1):
print('on part', str(i))
df = db.get_table(library, tablename, columns=cols_to_use, obs=nobs, offset=start)
df.to_hdf(FILEPATH + 'hdf/{}.hdf'.format(tablename + '_min_part_' + str(i)), **hdf_settings)
del df
gc.collect()
elif tablename == 'idxcst_his':
download_index_constituents()
else:
print('not one of predefined tables to download')
def check_if_up_to_date(db, df_filepath, table, library='comp'):
"""
checks if current rows is less than rows in db; returns True is up to date; False if not
"""
if os.path.exists(df_filepath):
current_df = pd.read_hdf(df_filepath)
current_rows = current_df.shape[0]
else:
current_rows = 0
nrows = db.get_row_count(library=library, table=table)
if nrows == current_rows:
print('up to date')
return True, nrows
elif nrows < current_rows:
print('number of available rows is less than number in current db;')
print('something is wrong...')
return True, nrows
else:
print('db needs updating')
return False, nrows
def download_index_constituents(db, nrows=None, update=False):
"""
obsolete for now; use download_small_table function instead
gets historical index constituents from compustat table
checks if size of table has changed; if so, downloads anew
TODO: if update is True, then will try to find existing dataframe and only update it
"""
library = 'comp'
table = 'idxcst_his'
# check if any new rows
df_filepath = FILEPATH + 'hdf/idxcst_his.hdf'
up_to_date, nrows = check_if_up_to_date(db, df_filepath, table=table, library=library)
if up_to_date:
return
offset = 0
# turns out the db is fast to download because this is a small table...
# no need to update row by row, and can't get it working anyhow
# need to figure out where new things are added, but do later
# if update:
# cst_filepath = FILEPATH + 'hdf/idxcst_his.hdf'
# if os.path.exists(cst_filepath):
# const_df = pd.read_hdf(cst_filepath)
# last_entry = const_df.iloc[-1]
# # get new rows plus the last one to check it's the same as the
# # last one currently in the hdf file
# rows_to_get = nrows - const_df.shape[0] + 1
# offset = const_df.shape[0] - 1
# else:
# rows_to_get = nrows
# offset = 0
df = db.get_table(library=library, table=table, obs=nrows, offset=offset)
# converts date columns to datetime
df['from'] = pd.to_datetime(df['from'], utc=True)
df['thru'] = pd.to_datetime(df['thru'], utc=True)
df['from'] = df['from'].dt.tz_convert('US/Eastern')
df['thru'] = df['thru'].dt.tz_convert('US/Eastern')
df.to_hdf(df_filepath, **hdf_settings)
def download_small_table(db, table, library='comp'):
"""
downloads table if needs updating
table can be a tablename in the library; common ones for compustat (comp) are:
security
names_ix
idxcst_his
.h5 files have same name as table
"""
df_filepath = FILEPATH + 'hdf/{}.hdf'.format(table)
up_to_date, nrows = check_if_up_to_date(db, df_filepath, table=table, library=library)
if up_to_date:
return
df = db.get_table(library=library, table=table, obs=nrows)
if table == 'idxcst_his':
# converts date columns to datetime
df['from'] = pd.to_datetime(df['from'], utc=True)
df['thru'] = pd.to_datetime(df['thru'], utc=True)
df['from'] = df['from'].dt.tz_convert('US/Eastern')
df['thru'] = df['thru'].dt.tz_convert('US/Eastern')
df.to_hdf(df_filepath, **hdf_settings)
del df
gc.collect()
def download_common_stock_price_history(db, update=True, table='secd', library='comp'):
"""
downloads data for all common stocks (US and ADR, or tpci column is 0 or F)
if update=True, will get latest date in current df, then get everything after that
and add to current df
"""
# filename from first iteration
# secd_filename = FILEPATH + 'hdf/common_us_stocks_daily_9-12-2018.hdf'
secd_filename = FILEPATH + 'hdf/secd.hdf'
current_df = pd.read_hdf(secd_filename)
latest_date = current_df['datadate'].max().strftime('%m/%d/%y')
# get gvkeys for tpci 0 or F
# ends up with very slow sql query; avoid
securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf')
common_securities = securities[securities['tpci'].isin(['0', 'F'])]
# # make string for SQL query: WHERE IN
# # should be like ('item 1', 'item 2', 'item 3')
# gvkeys_str = '(' + ', '.join(["'" + s + "'" for s in common_securities['gvkey']]) + ')'
# if you want to count how many rows are there, use this.
# full data query only took a few seconds even with 1M rows
# query_str = 'select count(gvkey) from comp.secd where datadate > \'{}\';'.format(latest_date)
# db.raw_sql(query_str)
query_str = 'select {} from {}.{} WHERE datadate > \'{}\';'# and gvkey IN {};'
df = db.raw_sql(query_str.format(secd_cols, library, table, latest_date), date_cols=['datadate'])
# drop columns which seem to have weird dates
df.drop(df['prccd'].apply(lambda x: x is None).index, inplace=True)
if not df.shape[0] > 0:
print("no data to be found!")
return
# convert datadate to datetime64
df['datadate'] = pd.to_datetime(df['datadate']).dt.tz_localize('US/Eastern')
# colculate market cap
df['market_cap'] = df['cshoc'] * df['prccd']
# TODO: create file for storing all updated data and append
# used once to write data
# df.to_hdf(FILEPATH + 'hdf/secd_full_9-11-2018_thru_11-30-2018.hdf', **hdf_settings)
df.to_hdf(FILEPATH + 'hdf/secd_all_9-11-2018_onward.hdf', **hdf_settings_table)
# only keep common stocks (tpci = 0 and F)
common_securities_short = common_securities[['gvkey', 'iid']]
common_df = df.merge(common_securities_short, on=['gvkey', 'iid'])
common_df.drop('curcdd', inplace=True, axis=1) # drop currency column
# write existing data as hdf table -- first time only
# current_df.to_hdf(secd_filename, **hdf_settings_table)
# appends to hdf store
common_df.to_hdf(secd_filename, **hdf_settings_table)
del current_df
del securities
del df
del common_df
del common_securities
gc.collect()
def get_stock_hist_df(gvkey, library='comp', tablename='secd'):
df = db.raw_sql('select {} from {}.{} WHERE gvkey = {};'.format(secd_cols, library, tablename, gvkey), date_cols=['datadate'])
return df
def download_all_security_data():
"""
downloads full security data history for sp600
I think this was actually used to get all historical stock data actually,
not just sp600.
TODO: get latest date and download updates
"""
df = pd.read_hdf(FILEPATH + 'hdf/idxcst_his.hdf')
sp600_df = df[df['gvkeyx'] == '030824']
sp600_gvkeys = np.unique(sp600_df['gvkey'].values)
sp600_gvkeys_strings = ["'" + gv + "'" for gv in sp600_gvkeys]
sp600_gvkeys_string = ', '.join(sp600_gvkeys_strings)
# reads in all securities
securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf')
all_gvkeys = securities['gvkey'].values
remaining_gvs = list(set(all_gvkeys).difference(set(sp600_gvkeys)))
# raw sql to get historical security data
# goes through all securities and downloads historical price data
# chunk through remaining gvkeys in 10 chunks
chunk_size = len(remaining_gvs) // 10
for i, ch in enumerate(range(0, len(remaining_gvs) + 1, chunk_size)):
# first make strings out of gvkeys for SQL query
start = ch
if ch + chunk_size > len(remaining_gvs):
gvkeys_strings = ["'" + gv + "'" for gv in remaining_gvs[start:]]
else:
gvkeys_strings = ["'" + gv + "'" for gv in remaining_gvs[start:ch + chunk_size]]
start = time.time()
jobs = []
# 10 threads per cpu for 8 cores; default is 5 per CPU
# seems like 5 simultaneous queries is max -- run in parallel
with ThreadPoolExecutor(max_workers=5) as executor:
for gv in gvkeys_strings:
jobs.append((gv, executor.submit(get_stock_hist_df, gv)))
dfs = []
for gv, j in jobs:
# print(gv)
dfs.append(j.result())
end = time.time()
print('took', int(end - start), 'seconds')
big_df = pd.concat(dfs)
big_df['datadate'] = pd.to_datetime(big_df['datadate']).dt.tz_localize('US/Eastern')
# big_df['datadate'] = pd.Timestamp(big_df['datadate']) # doesn't work!!
# big_df['datadate'].dt.tz_localize('US/Eastern')
# TODO: dynamically set date instead of hard copy
big_df.to_hdf(FILEPATH + 'hdf/daily_security_data__chunk_{}_9-15-2018.hdf'.format(str(i)), **hdf_settings)
del jobs
del dfs
del big_df
gc.collect()
# 30 seconds per 50 -- should take about 20m for 2k
# took 1282s for 2127 gvkeys
def load_and_combine_sec_dprc():
"""
loads all security data from sec_dprc table
"""
dfs = []
for i in tqdm(range(1, 13)):
# print(i)
dfs.append(pd.read_hdf(FILEPATH + 'hdf/sec_dprc_min_part_{}.hdf'.format(str(i))))
df = pd.concat(dfs)
# get only common stocks
securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf')
# gvkeys = df['gvkey'].unique()
# I think 0 or F for tpci are common or ADR, which are stocks you can buy
common_stocks = securities[securities['tpci'].isin(['0', 'F'])]
common_stocks.drop(common_stocks[common_stocks['ibtic'].isnull()].index, inplace=True) # these seem to be weird tickers; buyouts or something
# ignore stocks on canadian exchanges
common_stocks.drop(common_stocks[common_stocks['iid'].str.contains('C')].index, inplace=True)
# check to make sure only one iid per gvkey -- not quite
gvkey_grp = common_stocks.groupby('gvkey')
num_iids = gvkey_grp['iid'].nunique()
num_iids.mean()
num_iids[num_iids > 1]
common_df = df[df['gvkey'].isin(set(common_stocks['gvkey'].unique()))]
common_df = common_df[common_df['iid'].isin(set(common_stocks['iid'].unique()))]
# don't use CAD stocks
common_df.drop(common_df[common_df['curcdd'] == 'CAD'].index, inplace=True)
# no longer need currency, all USD
common_df.drop('curcdd', axis=1, inplace=True)
common_df['datadate'] = pd.to_datetime(common_df['datadate']).dt.tz_localize('US/Eastern')
common_df['market_cap'] = common_df['cshoc'] * common_df['prccd']
common_df.to_hdf(FILEPATH + 'hdf/common_us_stocks_daily_9-12-2018.hdf', **hdf_settings)
# add ticker and remove iid and gvkey -- should just merge or something
# for gvkey in tqdm(common_df['gvkey'].unique()):
# common_df.at[common_df['gvkey'] == gvkey, 'ticker'] = securities[securities['gvkey'] == gvkey]['tic']
def get_historical_constituents_wrds_hdf(date_range=None, index='S&P Smallcap 600 Index'):
# adapted from beat_market_analysis constituent_utils.py
"""
gets historical constituents from WRDS file
common indexes as represented in the idx_ann table:
SP600: S&P Smallcap 600 Index
SP400: S&P Midcap 400 Index
SP500: S&P 500 Comp-Ltd (there's another one with Wed instead of Ltd which I don't know what it is)
SP1500: S&P 1500 Super Composite
NASDAQ 100: Nasdaq 100
"""
idx_df = pd.read_hdf(FILEPATH + 'hdf/names_ix.hdf')
gvkeyx = idx_df[idx_df['conm'] == index]['gvkeyx'].values
if len(gvkeyx) > 1:
print('more than 1 gvkeyx, exiting:')
print(idx_df[idx_df['conm'] == index])
return
gvkeyx = gvkeyx[0]
# TODO: get latest file
# parse dates not working for hdf, parse_dates=['from', 'thru'], infer_datetime_format=True)
const_df = pd.read_hdf(FILEPATH + 'hdf/idxcst_his.hdf')
# only need to do this once, then after it's saved, good to go
# TODO: put this in a clean function and do when saving the file
# df['from'] = pd.to_datetime(df['from'], utc=True)
# df['thru'] = pd.to_datetime(df['thru'], utc=True)
# df['from'] = df['from'].dt.tz_convert('US/Eastern')
# df['thru'] = df['thru'].dt.tz_convert('US/Eastern')
# df.to_hdf(FILEPATH + 'hdf/index_constituents_9-12-2018.hdf', **hdf_settings)
# need to join up with other dataframe maybe, for now, just use gvkeyx which is
# 030824 for sp600
# df2 = pd.read_hdf(FILEPATH + 'hdf/names_ix.hdf')
single_idx_df = const_df[const_df['gvkeyx'] == gvkeyx].copy()
# combine with securities for ticker symbol
securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf')
# abbreviated securities df; only ticker, gvkey, and iid
sec_short = securities[['tic', 'gvkey', 'iid']]
single_idx_df = single_idx_df.merge(sec_short, on=['gvkey', 'iid'])
# get stocks' gvkeys for sql search -- no longer needed
# gvkeys = single_idx_df['gvkey'].values
# create dataframe with list of constituents for each day
start = single_idx_df['from'].min()
# get todays date and reset hour, min, sec to 0s
# TODO: if not latest date; use date of datafile as latest
end = pd.Timestamp.today(tz='US/Eastern').replace(hour=0, minute=0, second=0, microsecond=0, tzinfo=None).tz_localize('US/Eastern')
# replace NaT with tomorrow's date
# gives copy warning but can't get rid of it...
single_idx_df['thru'].fillna(end + pd.DateOffset(days=1), inplace=True)
nyse = mcal.get_calendar('NYSE')
# gets all dates
# date_range = mcal.date_range(start=start, end=end)
# gets only dates valid for NYSE -- doesn't seem to match historical data
if date_range is None:
date_range = nyse.valid_days(start_date=start.date(), end_date=end.date()).tz_convert('US/Eastern')
else:
# cutoff at earliest date for index
date_range = np.array(sorted(date_range))
date_range = date_range[date_range >= start]
constituent_companies = OrderedDict()
# constituent_tickers = OrderedDict()
lengths = []
# TODO: multiprocessing to speed up
# takes about 10s for nasdaq 100
for d in tqdm(date_range):
# if date is within stock's from and thru, add to list
# stocks were removed on 'thru', so if it is the 'thru' date, then shouldn't be included
# but stocks were added on 'from' date, so include stocks on 'from' date
# use dataframe masking
date_string = d.strftime('%Y-%m-%d')
current_stocks = single_idx_df[(single_idx_df['from'] <= d) & (single_idx_df['thru'] > d)]
current_companies = current_stocks[['gvkey', 'iid']] # company names
# current_tickers = current_stocks['co_tic'] # company tickers
constituent_companies[date_string] = current_companies
# constituent_tickers[date_string] = current_tickers
lengths.append(current_companies.shape[0])
# look at number of constituents as a histogram; mostly 600 but a few above and below
# pd.value_counts(lengths)
# plt.hist(lengths)
# plt.show()
# TODO:
# need to check that no tickers are used for multiple companies
# get unique dates where changes were made
unique_dates = set(single_idx_df['from'].unique()) | set(single_idx_df['thru'].unique())
return constituent_companies, unique_dates
def spy_20_smallest():
"""
tries to implement 20 smallest SPY strategy from paper (see beat_market_analysis github repo)
"""
# merge historical constituents for sp600 with daily price, eps, and market cap data
# see what returns are on yearly rebalance for 20 smallest marketcap stocks
# just get first of year dates, then get company market caps
# get smallest 20 market caps, get close price
# get close price a year later, calculate overall return
# repeat ad nauseum
# common_stocks = pd.read_hdf(FILEPATH + 'hdf/common_us_stocks_daily_9-12-2018.hdf')
sp600_stocks = pd.read_hdf(FILEPATH + 'hdf/sp600_daily_security_data_9-15-2018.hdf')
sp600_stocks['market_cap'] = sp600_stocks['cshoc'] * sp600_stocks['prccd']
# sp600 index data starts in 1994
years = sp600_stocks['datadate'][sp600_stocks['datadate'].dt.year >= 1994].dt.year.unique()
first_days = []
sp600_dates = sorted(sp600_stocks['datadate'].unique())
constituent_companies, unique_dates = get_historical_constituents_wrds_hdf(sp600_dates)
for y in tqdm(years[1:]): # first year starts on sept
year_dates = [d for d in sp600_dates if d.year == y]
first_days.append(min(year_dates))
# '1998-01-02' giving key error in constituent_companies
price_chg_1y = OrderedDict()
smallest_20 = OrderedDict()
smallest_20_1y_chg = OrderedDict()
# TODO: get latest price if stopped trading during the year; figure out mergers/buyouts, etc
# TODO: get tickers
for start, end in tqdm(zip(first_days[4:-1], first_days[5:])): # 2000 onward is [5:] ; market cap not available until 1999 for these stocks
datestr = start.strftime('%Y-%m-%d')
constituents = constituent_companies[datestr]
current_daily_data = sp600_stocks[sp600_stocks['datadate'] == start]
one_year_daily_data = sp600_stocks[sp600_stocks['datadate'] == end]
# TODO: figure out why a few hundred are missing in the daily data from the constituent list
# AIR ('001004') is not in common_stocks, figure out why
full_const = constituents.merge(current_daily_data, on=['gvkey', 'iid'])
full_const_1y = constituents.merge(one_year_daily_data, on=['gvkey', 'iid'])
# get adjusted closes for constituents now and 1y in future
const_current_price = full_const[['gvkey', 'iid', 'ajexdi', 'prccd']]
const_future_price = full_const_1y[['gvkey', 'iid', 'ajexdi', 'prccd']]
const_current_price['adj_close'] = const_current_price['prccd'] / const_current_price['ajexdi']
const_future_price['adj_close_1y_future'] = const_future_price['prccd'] / const_future_price['ajexdi']
const_current_price.drop(['prccd', 'ajexdi'], inplace=True, axis=1)
const_future_price.drop(['prccd', 'ajexdi'], inplace=True, axis=1)
# get % price change for each
const_price_change = const_current_price.merge(const_future_price, on=['gvkey', 'iid']).drop_duplicates()
const_price_change['1y_pct_chg'] = (const_price_change['adj_close_1y_future'] - const_price_change['adj_close']) / const_price_change['adj_close']
price_chg_1y[datestr] = const_price_change
bottom_20 = full_const.sort_values(by='market_cap', ascending=True).iloc[:20]
smallest_20[datestr] = bottom_20
bottom_20_price_chg = const_price_change[const_price_change['gvkey'].isin(set(bottom_20['gvkey']))]
bottom_20_price_chg.reset_index(inplace=True, drop=True)
if bottom_20_price_chg.shape[0] == 0: # everything was acquired/bankrupt, etc, like in 2006 and 07 I think
last_idx = 0
else:
last_idx = bottom_20_price_chg.index[-1]
# get stocks missing from price changes, and use last price to get price change
missing_gvkeys = list(set(bottom_20['gvkey']).difference(set(bottom_20_price_chg['gvkey'])))
for m in missing_gvkeys:
last_idx += 1 # make an index for creating dataframe with last price, so we can append it to the bottom_20_price_chg df
price_chg_dict = {}
iid = bottom_20[bottom_20['gvkey'] == m]['iid'].values
if len(iid) > 1:
print('shit, iid length >1')
iid = iid[0]
last_data = sp600_stocks[(sp600_stocks['gvkey'] == m) & (sp600_stocks['iid'] == iid)][['prccd', 'ajexdi']].dropna().iloc[-1]
last_price = last_data['prccd'] / last_data['ajexdi']
price_chg_dict['gvkey'] = m
price_chg_dict['iid'] = iid
# TODO: check this isn't more than one result, may need to filter by iid too
price_chg_dict['adj_close'] = const_current_price[const_current_price['gvkey'] == m]['adj_close'].values[0]
price_chg_dict['adj_close_1y_future'] = last_price
price_chg_dict['1y_pct_chg'] = (last_price - price_chg_dict['adj_close']) / price_chg_dict['adj_close']
bottom_20_price_chg = bottom_20_price_chg.append(pd.DataFrame(price_chg_dict, index=[last_idx])[bottom_20_price_chg.columns.tolist()]) # TODO: check if append works with out-of-order columns
# TODO: find next stock in bottom 20 at time the other was put out, and see how it does
smallest_20_1y_chg[datestr] = bottom_20_price_chg
price_chg_1y[datestr] = bottom_20_price_chg['1y_pct_chg'].sum() / 20 # assume others not in here are 0 for now
# get the overall price changes each year
annualized_return = (np.prod([1 + p for p in price_chg_1y.values()]) ** (1/len(price_chg_1y.values())) - 1) * 100
plt.plot(price_chg_1y.keys(), price_chg_1y.values())
plt.scatter(price_chg_1y.keys(), price_chg_1y.values())
plt.xticks(rotation=90)
plt.title('bottom 20 SP600 stocks yearly returns, annualized return = ' + str(round(annualized_return, 1)))
plt.ylabel('% return per year')
plt.tight_layout()
plt.show()
# to get tickers
smallest_20_1y_chg['2017-01-03'].merge(securities[['gvkey', 'iid', 'tic']], on=['gvkey', 'iid'])
bottom_20.merge(securities[['gvkey', 'iid', 'tic']], on=['gvkey', 'iid'])
securities = pd.read_hdf(FILEPATH + 'hdf/security.hdf')
bottom_20_tickers = bottom_20.merge(securities, on=['gvkey', 'iid'])
# TODO: deal with acquisitions: dlrsni 01 is acquired, 02 is bankrupt, 03 is liquidated
# https://wrds-web.wharton.upenn.edu/wrds/support/Data/_001Manuals%20and%20Overviews/_001Compustat/_001North%20America%20-%20Global%20-%20Bank/_000dataguide/index.cfm
# get gvkeys missing in price changes and check for bankruptcy or acquisitions, etc
missing_gvkeys = list(set(bottom_20['gvkey']).difference(set(bottom_20_price_chg['gvkey'])))
missing = bottom_20[bottom_20['gvkey'].isin(missing_gvkeys)]
missing_merged = missing.merge(securities[['gvkey', 'iid', 'dlrsni', 'tic']])
missing_merged[['tic', 'dlrsni']]
securities[securities['gvkey'] == '010565']
# TODO: is it different/better to rebalance on a certain day/month?
def secd_info():
"""
info of first 1M rows of secd:
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 41 columns):
gvkey 1000000 non-null object
iid 1000000 non-null object
datadate 1000000 non-null object
tic 1000000 non-null object
cusip 1000000 non-null object
conm 1000000 non-null object
curcddv 5861 non-null object
capgn 29 non-null float64
cheqv 85 non-null float64
div 5780 non-null float64
divd 5694 non-null float64
divdpaydateind 0 non-null object
divsp 129 non-null float64
dvrated 2875 non-null float64
paydateind 2 non-null object
anncdate 2776 non-null object
capgnpaydate 29 non-null object
cheqvpaydate 82 non-null object
divdpaydate 5691 non-null object
divsppaydate 128 non-null object
paydate 5772 non-null object
recorddate 2906 non-null object
curcdd 999696 non-null object
adrrc 4202 non-null float64
ajexdi 999696 non-null float64
cshoc 439670 non-null float64
cshtrd 999677 non-null float64
dvi 379938 non-null float64
eps 309295 non-null float64
epsmo 309295 non-null float64
prccd 999696 non-null float64
prchd 986959 non-null float64
prcld 985637 non-null float64
prcod 224624 non-null float64
prcstd 999696 non-null float64
trfd 733884 non-null float64
exchg 1000000 non-null float64
secstat 1000000 non-null object
tpci 1000000 non-null object
cik 922655 non-null object
fic 1000000 non-null object
dtypes: float64(20), object(21)
memory usage: 312.8+ MB
so we can ignore most of those middle columns
cols_to_use = ['ajexdi',
'cshoc', # shares outstanding
'cshtrd', # volume
'datadate',
'eps',
'prccd',
'prchd',
'prcld',
'prcod',
'tic' # maybe want to get iid too, not sure
]
other_cols = ['adrrc',
'anncdate',
'capgn',
'capgnpaydate',
'cheqv',
'cheqvpaydate',
'curcdd',
'curcddv',
'cusip',
'datadate',
'div',
'divd',
'divdpaydate',
'divdpaydateind',
'divsp',
'divsppaydate',
'dvi',
'dvrated',
'epsmo',
'exchg',
'fic',
'gvkey',
'iid',
'paydate',
'paydateind',
'prcstd',
'recorddate',
'secstat',
'tic',
'tpci',
'trfd']
"""
pass
def test_sql_queries():
pass
# with limit for testing
# df = db.raw_sql('select {} from {}.{} WHERE gvkey IN ({}) LIMIT 10;'.format(','.join(cols_to_use), library, tablename, sp600_gvkeys_string), date_cols=['datadate'])
# takes a really long time...
# # df = db.raw_sql('select {} from {}.{} WHERE gvkey IN ({});'.format(','.join(cols_to_use), library, tablename, sp600_gvkeys_string), date_cols=['datadate'])
# see how long one query takes -- about 2s
# start = time.time()
# df = db.raw_sql('select {} from {}.{} WHERE gvkey = {};'.format(','.join(cols_to_use), library, tablename, sp600_gvkeys_strings[0]), date_cols=['datadate'])
# end = time.time()
# print('took', int(end - start), 'seconds')
# takes about 2h linearly
# dfs = []
# for gv in tqdm(sp600_gvkeys_strings):
# df = db.raw_sql('select {} from {}.{} WHERE gvkey = {};'.format(','.join(cols_to_use), library, tablename, gv), date_cols=['datadate'])
# dfs.append(df)
# testing
# df = db.raw_sql('select {} from {}.{} WHERE gvkey = \'001004\' LIMIT 10;'.format(','.join(cols_to_use), library, tablename), date_cols=['datadate'])
def testing_db():
"""
looks like some code that tests some db functions and explores them
"""
df = db.get_table('comp', 'security', obs=10)
db.get_table('crsp', 'dsf', columns=['cusip', 'permno', 'date', 'bidlo', 'askhi'], obs=100)
# compustat data
# short data
db.get_table('comp', 'sec_shortint', obs=100)
# quarterly fundamentals
db.get_table('comp', 'fundq')
# annual
db.get_table('comp', 'funda')
# industry quarterly
db.get_table('comp', 'aco_indstq')
# annual
db.get_table('comp', 'aco_indsta')
# index prices daily
db.get_table('comp', 'idx_mth')
# simplified financial statement extract daily
db.get_table('comp', 'funda') # seems to be the same as annual fundamentals
# annual index fundamentals
db.get_table('comp', 'idx_ann')
# monthly security data
db.get_table('comp', 'secm', obs=100)
# index constituents
db.get_table('comp', 'idxcst_his')
# market cap/price, daily data
db.get_table('comp', 'secd', obs=100)
# OTC pricing
db.get_table('otc', 'endofday', obs=100)
# gets acquisition spending; aqcy column
df4 = db.raw_sql('select * from comp.fundq WHERE gvkey = \'010519\';')
def get_nasdaq_100_constituents():
"""
gets historical nasdaq 100 constituents
then looks at
"""
constituent_companies, unique_dates = get_historical_constituents_wrds_hdf(date_range=None, index='Nasdaq 100')
| 1.867188 | 2 |
elements/python/11/10/soln.py | mmcloughlin/problems | 11 | 12791192 | import heapq
import random
class Stack(object):
def __init__(self):
self.h = []
def push(self, x):
heapq.heappush(self.h, (-len(self.h), x))
def pop(self):
if self.empty():
return None
_, x = heapq.heappop(self.h)
return x
def empty(self):
return len(self.h) == 0
def test():
items = [random.randrange(100) for _ in xrange(100)]
s = Stack()
for x in items:
s.push(x)
for x in reversed(items):
assert x == s.pop()
print 'pass'
def main():
test()
if __name__ == '__main__':
main()
| 3.515625 | 4 |
implementations/knapsackProblem.py | e-liyai/python_algorithms | 0 | 12791193 | def knapsack_dynamic_solution(objects, max_weight):
# object should be in the form of:
# {
# 'weight': [0, 5, 6, 2, 7, 1]
# 'value': [0, 4, 1, 5, 7, 8]
# }
if not isinstance(objects, dict):
return 'Object should be a dict'
elif len(objects['weight']) != len(objects['value']):
return 'weight value pair should be of same length'
no_items = 0
weight = 0
two_dim_array = [[0 for i in range(max_weight + 1)] for x in range(len(objects['weight']))]
while no_items <= len(objects['weight']):
while weight <= max_weight:
if no_items == 0 or weight == 0:
two_dim_array[no_items][weight] = 0
elif objects['weight'][no_items] <= weight:
two_dim_array[no_items][weight] = max(
objects['value'][no_items] + two_dim_array[no_items - 1][weight - objects['weight'][no_items]],
two_dim_array[no_items - 1][weight]
)
else:
two_dim_array[no_items][weight] = two_dim_array[no_items - 1][weight]
weight += 1
no_items += 1
# print(two_dim_array[len(objects['weight'])][max_weight])
print(two_dim_array)
| 3.5 | 4 |
osbot_gsuite/apis/create_slides/GSBot_to_GDrive.py | pbx-gs/gsbot-gsuite | 3 | 12791194 | <reponame>pbx-gs/gsbot-gsuite<gh_stars>1-10
from osbot_gsuite.apis.GDrive import GDrive
from osbot_utils.utils.Dev import Dev
from osbot_utils.utils.Files import Files
class GSBot_to_GDrive:
def __init__(self,gsuite_secret_id=None):
self.target_folder = 'gsbot-graphs'
self.gdrive = GDrive(gsuite_secret_id)
def target_folder_id(self):
mime_type = 'application/vnd.google-apps.folder'
return self.gdrive.find_by_name(self.target_folder, mime_type).get('id')
def graph_id_in_gdrive(self, file_name):
file_metadata = self.gdrive.find_by_name(file_name)
if file_metadata:
return file_metadata.get('id')
return None
def upload_png_file_to_gdrive(self, png_file):
#png_file = GSBot_Helper().get_png_from_saved_graph(graph_name)
file_name = Files.file_name(png_file)
folder_id = self.target_folder_id()
file_id = self.graph_id_in_gdrive(file_name)
if file_id: # if the file exists, delete it before uploading the new version
Dev.pprint('deleting file: {0}'.format(file_id))
self.gdrive.file_delete(file_id)
return self.gdrive.file_upload(png_file, 'image/png', folder_id)
# can't use the code below because, the update wasn't working due to GSlides keeping (somewhere) a cache of the previous value
# if file_id is None:
# return self.gdrive.file_upload(png_file,'image/png', folder_id)
# else:
# return self.gdrive.file_update(png_file, 'image/png', file_id )
| 2.328125 | 2 |
stretchablecorr/postprocess.py | xdze2/stretchablecorr | 1 | 12791195 |
import numpy as np
def integrate_displacement(displ_img_to_img):
"""Sum the image-to-image displacement value to
obtain image-to-reference displacement,
add zeros at the begining
Parameters
----------
displ_img_to_img : 3D array
3D array of shape `(nbr images - 1, nbr points, 2)`
Returns
-------
3D array of shape `(nbr images, nbr points, 2)`
"""
# add zeros at the begining
zeros = np.zeros_like(displ_img_to_img[0])[np.newaxis, :, :]
displ_zero = np.concatenate([zeros, displ_img_to_img], axis=0)
displ_image_to_ref = np.cumsum(displ_zero, axis=0)
return displ_image_to_ref
def get_center_points(xgrid, ygrid):
"""Cell center point coordinates"""
center_y = 0.25*(ygrid[1:, 1:] + ygrid[1:, :-1] + ygrid[:-1, 1:] + ygrid[:-1, :-1])
center_x = 0.25*(xgrid[1:, 1:] + xgrid[1:, :-1] + xgrid[:-1, 1:] + xgrid[:-1, :-1])
return center_x, center_y
def cellcentered_diff_2D(u, v):
"""
for a given 2D vector field [u, v](x, y) sampled on a grid
returns the centered finite difference for each cell
Cell abcd:
a───b
│ + │
c───d
du_x = (ub+ud)/2 - (ua+uc)/2
du_y = (ua+ub)/2 - (uc+ud)/2
"""
u_center_y = 0.5*(u[1:, :] + u[:-1, :])
u_center_x = 0.5*(u[:, 1:] + u[:, :-1])
v_center_y = 0.5*(v[1:, :] + v[:-1, :])
v_center_x = 0.5*(v[:, 1:] + v[:, :-1])
delta_u_x = u_center_y[:, 1:] - u_center_y[:, :-1]
delta_u_y = u_center_x[1:, :] - u_center_x[:-1, :]
delta_v_x = v_center_y[:, 1:] - v_center_y[:, :-1]
delta_v_y = v_center_x[1:, :] - v_center_x[:-1, :]
return delta_u_x, delta_u_y, delta_v_x, delta_v_y
def cellcentered_grad_rect2D(xgrid, ygrid, u, v):
"""Finite difference gradient for the vector fields u and v
evaluated at cell center
This is not a proper bilinear interpolation (ie. quad4 element).
The xy-grid has to be rectangular.
used to computed the "Displacement gradient tensor"
see Bower p.14
output: (dudx, dudy), (dvdx, dvdy)
"""
du_x, du_y, dv_x, dv_y = cellcentered_diff_2D(u, v)
dx, _ydx, _xdy, dy = cellcentered_diff_2D(xgrid, ygrid)
return [[du_x/dx, du_y/dy],
[dv_x/dx, dv_y/dy]]
# --- test cellcentered_grad_rect2D
xgrid, ygrid = np.meshgrid(np.linspace(-1, 1, 5)**2,
np.linspace(1, 5, 7)**0.5)
u = 5*xgrid + 3*ygrid
v = 2*xgrid + 7*ygrid
(dudx, dudy), (dvdx, dvdy) = cellcentered_grad_rect2D(xgrid, ygrid, u, v)
np.testing.assert_almost_equal(dudx, 5*np.ones_like(dudx))
np.testing.assert_almost_equal(dudy, 3*np.ones_like(dudx))
np.testing.assert_almost_equal(dvdx, 2*np.ones_like(dudx))
np.testing.assert_almost_equal(dvdy, 7*np.ones_like(dudx))
# ---
def get_LagrangeStrainTensor(xgrid, ygrid, u, v):
"""Lagrange Strain Tensor (E)
F = grad(u) + Id
E = 1/2*( FF^T - Id )
Parameters
----------
xgrid, ygrid : 2d arrays of shape (n_y, n_x)
underformed grid points
u, v : 2d arrays of shape (n_y, n_x)
displacements values (u along x, v along y)
Returns
-------
4D array of shape (n_y, n_x, 2, 2)
Lagrange Strain Tensor for all grid points
"""
grad_u, grad_v = cellcentered_grad_rect2D(xgrid, ygrid, u, v)
grad_u = np.stack(grad_u, axis=2)
grad_v = np.stack(grad_v, axis=2)
# u = 1*xgrid + 3*ygrid
# v = 5*xgrid + 7*ygrid
G = np.stack([grad_u, grad_v], axis=3)
G = np.transpose(G, axes=(0, 1, 3, 2))
# G >>> array([[1., 3.], [5., 7.]])
Id = np.ones((*grad_u.shape[:2], 2, 2))
Id[:, :] = np.eye(2, 2)
# Id[0, 0] >> array([[1., 0.], [0., 1.]])
F = G + Id
# Lagrange Strain Tensor
E = 0.5*( np.einsum('...ki,...kj', F, F) - Id )
return E
# --- test get_LagrangeStrainTensor
xgrid, ygrid = np.meshgrid(np.linspace(-1, 1, 5),
np.linspace(1, 5, 7))
u = 1*xgrid + 3*ygrid
v = 5*xgrid + 7*ygrid
E = get_LagrangeStrainTensor(xgrid, ygrid, u, v)
# array([[[[14., 23.],
# [23., 36.]],
np.testing.assert_almost_equal(E[:, :, 0 ,0], 14*np.ones_like(E[:, :, 0 ,1]))
np.testing.assert_almost_equal(E[:, :, 0 ,1], 23*np.ones_like(E[:, :, 0 ,1]))
np.testing.assert_almost_equal(E[:, :, 1 ,1], 36*np.ones_like(E[:, :, 0 ,1]))
np.testing.assert_almost_equal(E[:, :, 1 ,0], 23*np.ones_like(E[:, :, 0 ,1]))
# ---
def get_InfinitesimalStrainTensor(xgrid, ygrid, u, v):
"""Small Displacement Strain Tensor (E)
E = 1/2*( grad(u) + grad(u)^T )
Parameters
----------
xgrid, ygrid : 2d arrays of shape (n_y, n_x)
underformed grid points
u, v : 2d arrays of shape (n_y, n_x)
displacements values (u along x, v along y)
Returns
-------
4D array of shape (n_y, n_x, 2, 2)
Lagrange Strain Tensor for all grid points
"""
grad_u, grad_v = cellcentered_grad_rect2D(xgrid, ygrid, u, v)
grad_u = np.stack(grad_u, axis=2)
grad_v = np.stack(grad_v, axis=2)
# u = 1*xgrid + 3*ygrid
# v = 5*xgrid + 7*ygrid
G = np.stack([grad_u, grad_v], axis=3)
G = np.transpose(G, axes=(0, 1, 3, 2))
# G >>> array([[1., 3.], [5., 7.]])
# Strain Tensor
E = 0.5*( G + np.transpose(G, axes=(0, 1, 3, 2)) )
return E
def bilinear_fit(points, displacements):
"""Performs a bilinear fit on the displacements field
Solve the equation u = A*x + t
Parameters
----------
points : nd-array (nbr_points, 2)
coordinates of points (x, y)
displacements : nd-array (nbr_points, 2)
displacement for each point (u, v)
could include NaN
Returns
-------
nd-array (2, 3)
coefficients matrix (affine transformation + translation)
nd-array (nbr_points, 2)
residuals for each points
"""
u, v = displacements.T
mask = np.logical_not(np.logical_or(np.isnan(u), np.isnan(v)))
u, v = u[mask], v[mask]
x, y = points[mask, :].T
ones = np.ones_like(x)
M = np.vstack([x, y, ones]).T
p_uy, _residual_y, _rank, _s = np.linalg.lstsq(M, v, rcond=None)
p_ux, _residual_x, _rank, _s = np.linalg.lstsq(M, u, rcond=None)
coefficients = np.vstack([p_ux, p_uy])
## Unbiased estimator variance (see p47 T. Hastie)
#sigma_hat_x = np.sqrt(residual_x/(M.shape[0]-M.shape[1]-1))
#sigma_hat_y = np.sqrt(residual_y/(M.shape[0]-M.shape[1]-1))
# Residuals:
u_linear = np.matmul( M, p_ux )
v_linear = np.matmul( M, p_uy )
residuals_x = u - u_linear
residuals_y = v - v_linear
residuals_xy = np.vstack([residuals_x, residuals_y]).T
# Merge with ignored NaN values:
residuals_NaN = np.full(displacements.shape, np.nan)
residuals_NaN[mask, :] = residuals_xy
return coefficients, residuals_NaN
| 3.3125 | 3 |
models/sdnet_ada.py | vios-s/RA_FA_Cardiac | 7 | 12791196 | <gh_stars>1-10
import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
import time
from models.unet_parts import *
from models.blocks import *
from models.rounding import *
from models.spectral_norm import *
from models.distance_corr import *
from models.spade_resblk import *
device = torch.device('cuda:0')
# content
class Segmentor(nn.Module):
def __init__(self, num_output_channels, num_classes):
super(Segmentor, self).__init__()
"""
"""
self.num_output_channels = num_output_channels
self.num_classes = num_classes+1 # check again
self.conv1 = conv_bn_relu(self.num_output_channels, 64, 3, 1, 1)
self.conv2 = conv_bn_relu(64, 64, 3, 1, 1)
self.pred = nn.Conv2d(64, self.num_classes, 1, 1, 0)
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
out = self.pred(out)
out = F.softmax(out, dim=1)
return out
class AEncoder(nn.Module):
def __init__(self, width, height, ndf, num_output_channels, norm, upsample):
super(AEncoder, self).__init__()
"""
Build an encoder to extract anatomical information from the image.
"""
self.width = width
self.height = height
self.ndf = ndf
self.num_output_channels = num_output_channels
self.norm = norm
self.upsample = upsample
self.unet = UNet(n_channels=1, n_classes=self.num_output_channels, bilinear=True)
self.rounding = RoundLayer()
def forward(self, x):
out = self.unet(x)
out = F.softmax(out, dim=1)
out = self.rounding(out)
return out
class UNet(nn.Module):
def __init__(self, n_channels, n_classes, bilinear=True):
super(UNet, self).__init__()
self.n_channels = n_channels
self.n_classes = n_classes
self.bilinear = bilinear
self.inc = DoubleConv(n_channels, 64)
self.down1 = Down(64, 128)
self.down2 = Down(128, 256)
self.down3 = Down(256, 512)
factor = 2 if bilinear else 1
self.down4 = Down(512, 1024 // factor)
self.up1 = Up(1024, 512 // factor, bilinear)
self.up2 = Up(512, 256 // factor, bilinear)
self.up3 = Up(256, 128 // factor, bilinear)
self.up4 = Up(128, 64, bilinear)
self.outc = OutConv(64, n_classes)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
logits = self.outc(x)
return logits
# style
class AdaptiveInstanceNorm2d(nn.Module):
def __init__(self, num_features, eps=1e-5, momentum=0.1):
super(AdaptiveInstanceNorm2d, self).__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
# weight and bias are dynamically assigned
self.weight = None
self.bias = None
# just dummy buffers, not used
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
def forward(self, x):
assert self.weight is not None and self.bias is not None, "Please assign weight and bias before calling AdaIN!"
b, c = x.size(0), x.size(1)
running_mean = self.running_mean.repeat(b)
running_var = self.running_var.repeat(b)
# Apply instance norm
x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:])
out = F.batch_norm(
x_reshaped, running_mean, running_var, self.weight, self.bias,
True, self.momentum, self.eps)
return out.view(b, c, *x.size()[2:])
def __repr__(self):
return self.__class__.__name__ + '(' + str(self.num_features) + ')'
class LayerNorm(nn.Module):
def __init__(self, num_features, eps=1e-5, affine=True):
super(LayerNorm, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
if self.affine:
self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_())
self.beta = nn.Parameter(torch.zeros(num_features))
def forward(self, x):
shape = [-1] + [1] * (x.dim() - 1)
# print(x.size())
if x.size(0) == 1:
# These two lines run much faster in pytorch 0.4 than the two lines listed below.
mean = x.view(-1).mean().view(*shape)
std = x.view(-1).std().view(*shape)
else:
mean = x.view(x.size(0), -1).mean(1).view(*shape)
std = x.view(x.size(0), -1).std(1).view(*shape)
x = (x - mean) / (std + self.eps)
if self.affine:
shape = [1, -1] + [1] * (x.dim() - 2)
x = x * self.gamma.view(*shape) + self.beta.view(*shape)
return x
def l2normalize(v, eps=1e-12):
return v / (v.norm() + eps)
class LinearBlock(nn.Module):
def __init__(self, input_dim, output_dim, norm='none', activation='relu'):
super(LinearBlock, self).__init__()
use_bias = True
# initialize fully connected layer
if norm == 'sn':
self.fc = SpectralNorm(nn.Linear(input_dim, output_dim, bias=use_bias))
else:
self.fc = nn.Linear(input_dim, output_dim, bias=use_bias)
# initialize normalization
norm_dim = output_dim
if norm == 'bn':
self.norm = nn.BatchNorm1d(norm_dim)
elif norm == 'in':
self.norm = nn.InstanceNorm1d(norm_dim)
elif norm == 'ln':
self.norm = LayerNorm(norm_dim)
elif norm == 'none' or norm == 'sn':
self.norm = None
else:
assert 0, "Unsupported normalization: {}".format(norm)
# initialize activation
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == 'prelu':
self.activation = nn.PReLU()
elif activation == 'selu':
self.activation = nn.SELU(inplace=True)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'none':
self.activation = None
else:
assert 0, "Unsupported activation: {}".format(activation)
def forward(self, x):
out = self.fc(x)
if self.norm:
out = self.norm(out)
if self.activation:
out = self.activation(out)
return out
class Conv2dBlock(nn.Module):
def __init__(self, input_dim ,output_dim, kernel_size, stride,
padding=0, norm='none', activation='relu', pad_type='zero'):
super(Conv2dBlock, self).__init__()
self.use_bias = True
# initialize padding
if pad_type == 'reflect':
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == 'replicate':
self.pad = nn.ReplicationPad2d(padding)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(padding)
else:
assert 0, "Unsupported padding type: {}".format(pad_type)
# initialize normalization
norm_dim = output_dim
if norm == 'bn':
self.norm = nn.BatchNorm2d(norm_dim)
elif norm == 'in':
#self.norm = nn.InstanceNorm2d(norm_dim, track_running_stats=True)
self.norm = nn.InstanceNorm2d(norm_dim)
elif norm == 'ln':
self.norm = LayerNorm(norm_dim)
elif norm == 'adain':
self.norm = AdaptiveInstanceNorm2d(norm_dim)
elif norm == 'none' or norm == 'sn':
self.norm = None
else:
assert 0, "Unsupported normalization: {}".format(norm)
# initialize activation
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == 'prelu':
self.activation = nn.PReLU()
elif activation == 'selu':
self.activation = nn.SELU(inplace=True)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'none':
self.activation = None
else:
assert 0, "Unsupported activation: {}".format(activation)
# initialize convolution
if norm == 'sn':
self.conv = SpectralNorm(nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias))
else:
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias)
def forward(self, x):
x = self.conv(self.pad(x))
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
return x
class StyleEncoder(nn.Module):
def __init__(self, style_dim, norm, activ, pad_type):
super(StyleEncoder, self).__init__()
dim = 64
self.model = []
self.model += [Conv2dBlock(1, dim, 7, 1, 3, norm=norm, activation=activ, pad_type=pad_type)]
for i in range(2):
self.model += [Conv2dBlock(dim, 2 * dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)]
dim *= 2
# for i in range(n_downsample - 2):
# self.model += [Conv2dBlock(dim, dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)]
self.model += [nn.AdaptiveAvgPool2d(1)] # global average pooling
self.model += [nn.Conv2d(dim, style_dim, 1, 1, 0)]
self.model = nn.Sequential(*self.model)
self.output_dim = dim
def forward(self, x):
return self.model(x)
# decoder
class Ada_Decoder(nn.Module):
# AdaIN auto-encoder architecture
def __init__(self, decoder_type, anatomy_out_channels, z_length, num_mask_channels):
super(Ada_Decoder, self).__init__()
"""
"""
self.dec = Decoder(anatomy_out_channels, res_norm='adain', activ='relu', pad_type='reflect')
# MLP to generate AdaIN parameters
self.mlp = MLP(z_length, self.get_num_adain_params(self.dec), 256, 3, norm='none', activ='relu')
def forward(self, a, z, type):
# reconstruct an image
images_recon = self.decode(a, z)
return images_recon
def decode(self, content, style):
# decode content and style codes to an image
adain_params = self.mlp(style)
self.assign_adain_params(adain_params, self.dec)
images = self.dec(content)
return images
def assign_adain_params(self, adain_params, model):
# assign the adain_params to the AdaIN layers in model
for m in model.modules():
if m.__class__.__name__ == "AdaptiveInstanceNorm2d":
mean = adain_params[:, :m.num_features]
std = adain_params[:, m.num_features:2*m.num_features]
m.bias = mean.contiguous().view(-1)
m.weight = std.contiguous().view(-1)
if adain_params.size(1) > 2*m.num_features:
adain_params = adain_params[:, 2*m.num_features:]
def get_num_adain_params(self, model):
# return the number of AdaIN parameters needed by the model
num_adain_params = 0
for m in model.modules():
if m.__class__.__name__ == "AdaptiveInstanceNorm2d":
num_adain_params += 2*m.num_features
return num_adain_params
class Decoder(nn.Module):
def __init__(self, dim, output_dim=1, res_norm='adain', activ='relu', pad_type='zero'):
super(Decoder, self).__init__()
self.model = []
# upsampling blocks
for i in range(3):
self.model += [Conv2dBlock(dim, dim // 2, 3, 1, 1, norm='ln', activation=activ, pad_type=pad_type)]
dim //= 2
# use reflection padding in the last conv layer
self.model += [Conv2dBlock(dim, output_dim, 7, 1, 3, norm='none', activation='tanh', pad_type=pad_type)]
self.model = nn.Sequential(*self.model)
def forward(self, x):
return self.model(x)
class MLP(nn.Module):
def __init__(self, input_dim, output_dim, dim, n_blk, norm='none', activ='relu'):
super(MLP, self).__init__()
self.model = []
self.model += [LinearBlock(input_dim, dim, norm=norm, activation=activ)]
for i in range(n_blk - 2):
self.model += [LinearBlock(dim, dim, norm=norm, activation=activ)]
self.model += [LinearBlock(dim, output_dim, norm='none', activation='none')] # no output activations
self.model = nn.Sequential(*self.model)
def forward(self, x):
return self.model(x.view(x.size(0), -1))
class Discriminator(nn.Module):
def __init__(self, ndf, num_classes):
super(Discriminator, self).__init__()
self.ndf = ndf
self.num_classes = num_classes + 1
self.main = []
# input is (nc) x 224 x 224
self.main += [nn.Conv2d(self.num_classes, ndf, 4, 2, 1, bias=False)] #64x112x112
self.main += [nn.LeakyReLU(0.2, inplace=True)]
self.main += [SpectralNorm(nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False))] #128x56x56
self.main += [SpectralNorm(nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False))] #256x28x28
self.main += [SpectralNorm(nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False))] #512x14x14
self.main += [SpectralNorm(nn.Conv2d(ndf * 8, ndf * 8, 4, 2, 1, bias=False))] #1024x7x7
# state size. (ndf*16) x 14 x 14
self.out = nn.Linear(self.ndf * 8 * 7 * 7, 1)
self.main = nn.Sequential(*self.main)
def forward(self, x):
b_size = x.size(0)
down_out = self.main(x)
down_out = down_out.view(b_size, -1)
output = self.out(down_out)
return output.view(-1, 1).squeeze(1)
class SDNet(nn.Module):
def __init__(self, width, height, num_classes, ndf, z_length, norm, upsample, decoder_type, anatomy_out_channels, num_mask_channels):
super(SDNet, self).__init__()
"""
Args:
width: input width
height: input height
upsample: upsampling type (nearest | bilateral)
nclasses: number of semantice segmentation classes
"""
self.h = height
self.w = width
self.ndf = ndf
self.z_length = z_length
self.anatomy_out_channels = anatomy_out_channels
self.norm = norm
self.upsample = upsample
self.num_classes = num_classes
self.decoder_type = decoder_type
self.num_mask_channels = num_mask_channels
self.m_encoder = StyleEncoder(z_length, norm='none', activ='relu', pad_type='reflect')
self.a_encoder = AEncoder(self.h, self.w, self.ndf, self.anatomy_out_channels, self.norm, self.upsample)
self.segmentor = Segmentor(self.anatomy_out_channels, self.num_classes)
self.decoder = Ada_Decoder(self.decoder_type, self.anatomy_out_channels, self.z_length, self.num_mask_channels)
def forward(self, x, mask, script_type):
# z_out = torch.randn(x.shape[0], self.z_length, 1, 1).to(device)
z_out = self.m_encoder(x)
a_out = self.a_encoder(x)
seg_pred = self.segmentor(a_out)
logvar_out = None
mu_out = None
#t0 = time.time()
if script_type == 'training':
reco = self.decoder(a_out, z_out, self.decoder_type)
mu_out_tilde = self.m_encoder(reco)
elif script_type == 'val' or script_type == 'test':
z_out = self.m_encoder(x)
reco = self.decoder(a_out, z_out, self.decoder_type)
mu_out_tilde = None
return reco, z_out, mu_out_tilde, a_out, seg_pred, mu_out, logvar_out
def reconstruct(self, a_out, z_out):
reco = self.decoder(a_out, z_out, self.decoder_type)
return reco | 2.125 | 2 |
src/app/models/audio_features.py | okjuan/music-lib-bot | 1 | 12791197 | class AudioFeatures:
MIN_KEY_VALUE, MAX_KEY_VALUE = 0, 11
MIN_MODE_VALUE, MAX_MODE_VALUE = 0, 1
MIN_TIME_SIGNATURE, MAX_TIME_SIGNATURE = 0, 11
MIN_TEMPO, MAX_TEMPO = 0, 500
MIN_DURATION_MS, MAX_DURATION_MS = 0, 900000
MIN_LOUDNESS, MAX_LOUDNESS = -60, 0
MIN_PERCENTAGE, MAX_PERCENTAGE = 0, 1
MIN_POPULARITY, MAX_POPULARITY = 0, 100
MIN_DANCEABILITY = MIN_ENERGY = MIN_SPEECHINESS = MIN_ACOUSTICNESS = MIN_PERCENTAGE
MIN_INSTRUMENTALNESS = MIN_LIVENESS = MIN_VALENCE = MIN_PERCENTAGE
MAX_DANCEABILITY = MAX_ENERGY = MAX_SPEECHINESS = MAX_ACOUSTICNESS = MAX_PERCENTAGE
MAX_INSTRUMENTALNESS = MAX_LIVENESS = MAX_VALENCE = MAX_PERCENTAGE
def __init__(self, danceability, energy, key, loudness, mode, speechiness, acousticness, instrumentalness, liveness, valence, tempo, duration_ms, time_signature):
"""
Params:
danceability (float): [0,1]
energy (float): [0,1]
key (int).
loudness (float): decibels?
mode (int).
speechiness (float): [0,1]
instrumentalness (float): [0,1]
acousticness (float): [0,1]
liveness (float): [0,1]
valence (float): [0,1]
tempo (float): bpm.
duration_ms (int).
time_signature (int).
"""
self.danceability = danceability
self.energy = energy
self.key = key
self.loudness = loudness
self.mode = mode
self.speechiness = speechiness
self.acousticness = acousticness
self.instrumentalness = instrumentalness
self.liveness = liveness
self.valence = valence
self.tempo = tempo
self.duration_ms = duration_ms
self.time_signature = time_signature
def from_spotify_audio_features(spotify_audio_features):
return AudioFeatures(
spotify_audio_features["danceability"],
spotify_audio_features["energy"],
spotify_audio_features["key"],
spotify_audio_features["loudness"],
spotify_audio_features["mode"],
spotify_audio_features["speechiness"],
spotify_audio_features["acousticness"],
spotify_audio_features["instrumentalness"],
spotify_audio_features["liveness"],
spotify_audio_features["valence"],
spotify_audio_features["tempo"],
spotify_audio_features["duration_ms"],
spotify_audio_features["time_signature"],
)
def with_minimum_values():
return AudioFeatures(
AudioFeatures.MIN_DANCEABILITY,
AudioFeatures.MIN_ENERGY,
AudioFeatures.MIN_KEY_VALUE,
AudioFeatures.MIN_LOUDNESS,
AudioFeatures.MIN_MODE_VALUE,
AudioFeatures.MIN_SPEECHINESS,
AudioFeatures.MIN_ACOUSTICNESS,
AudioFeatures.MIN_INSTRUMENTALNESS,
AudioFeatures.MIN_LIVENESS,
AudioFeatures.MIN_VALENCE,
AudioFeatures.MIN_TEMPO,
AudioFeatures.MIN_DURATION_MS,
AudioFeatures.MIN_TIME_SIGNATURE,
)
def with_maximum_values():
return AudioFeatures(
AudioFeatures.MAX_DANCEABILITY,
AudioFeatures.MAX_ENERGY,
AudioFeatures.MAX_KEY_VALUE,
AudioFeatures.MAX_LOUDNESS,
AudioFeatures.MAX_MODE_VALUE,
AudioFeatures.MAX_SPEECHINESS,
AudioFeatures.MAX_ACOUSTICNESS,
AudioFeatures.MAX_INSTRUMENTALNESS,
AudioFeatures.MAX_LIVENESS,
AudioFeatures.MAX_VALENCE,
AudioFeatures.MAX_TEMPO,
AudioFeatures.MAX_DURATION_MS,
AudioFeatures.MAX_TIME_SIGNATURE,
) | 2.609375 | 3 |
Ch5 - Python Crash Course/Code/5.13_create_and_write_on_a_file.py | 1110sillabo/ProgrammingDigitalHumanitiesBook | 2 | 12791198 | # Create the file
with open('dataresults.txt', 'a') as file:
file.write('These are the results of our experiment')
file.write('\n') # Add new line with \n
file.write('Account X has N followers on social Z')
file.write('\n')
| 3.453125 | 3 |
pelicanconf.py | johnwquarles/japan-trip | 0 | 12791199 | <filename>pelicanconf.py
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = 'JQ'
SITENAME = 'Japan Trip 2018'
SITEURL = 'http://localhost:2018'
STATIC_PATHS = ['images', 'pdfs']
PATH = 'content'
TIMEZONE = 'America/Chicago'
DEFAULT_LANG = 'en'
DEFAULT_DATE_FORMAT = '%-m/%-d/%Y'
DEFAULT_CATEGORY = 'general'
DISPLAY_PAGES_ON_MENU = True
THEME = './themes/attila'
HEADER_COVER = 'images/skytree.jpg'
SHOW_FULL_ARTICLE = False
SITESUBTITLE = 'read this stuff plz'
AUTHORS_BIO = {
"jq": {
"name": "JQ",
"cover": "images/arahira.jpg",
"image": "images/avatar.gif",
"website": "http://quarl.es",
"bio": "Inspiring others to use like half their vacation time since 10/2017."
}
}
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# LINKS = (
# ('Reddit - Japan Travel Advice Wiki', 'https://www.reddit.com/r/JapanTravel/wiki/traveladvice'),
# ('Reddit - Japan Travel Advice FAQ', 'https://www.reddit.com/r/JapanTravel/wiki/faqs/japantravel'),
# )
SOCIAL = (
('envelope','mailto:<EMAIL>'),
)
DEFAULT_PAGINATION = 10
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
| 2.03125 | 2 |
pretix_cwa/tasks.py | pretix/pretix-cwa | 3 | 12791200 | import logging
from pretix.base.email import get_email_context
from pretix.base.i18n import language
from pretix.base.models import OrderPosition
from pretix.base.services.mail import SendMailException
from pretix.base.services.tasks import EventTask
from pretix.celery_app import app
logger = logging.getLogger(__name__)
@app.task(base=EventTask, bind=True)
def send_email(self, event, position):
op = OrderPosition.objects.get(pk=position)
with language(op.order.locale, event.settings.region):
email_template = event.settings.cwa_checkin_email_body
email_subject = str(event.settings.cwa_checkin_email_subject)
email_context = get_email_context(event=event, order=op.order, position=op)
try:
if op.attendee_email:
op.send_mail(
email_subject,
email_template,
email_context,
"pretix_cwa.order.position.email.cwa",
)
else:
op.order.send_mail(
email_subject,
email_template,
email_context,
"pretix_cwa.order.email.cwa",
)
except SendMailException:
logger.exception("CWA reminder email could not be sent")
| 1.859375 | 2 |
byceps/services/shop/order/transfer/number.py | GyBraLAN/byceps | 0 | 12791201 | """
byceps.services.shop.order.transfer.number
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 <NAME>
:License: Revised BSD (see `LICENSE` file for details)
"""
from dataclasses import dataclass
from typing import NewType
from uuid import UUID
from ...shop.transfer.models import ShopID
OrderNumberSequenceID = NewType('OrderNumberSequenceID', UUID)
@dataclass(frozen=True)
class OrderNumberSequence:
id: OrderNumberSequenceID
shop_id: ShopID
prefix: str
value: int
OrderNumber = NewType('OrderNumber', str)
| 2.6875 | 3 |
server/image/image_controller.py | talandar/GameControl | 0 | 12791202 | <filename>server/image/image_controller.py<gh_stars>0
"""This class maintains the image database for display to clients"""
import os
class ImageDatabase(object):
"""container for image data"""
VALID_IMAGE_TYPES = ["png", "jpg"]
CATEGORIES = set()
FILE_DATA = {}
def __init__(self, root_dir):
self.root_dir = root_dir
self._scan_for_images()
def _scan_for_images(self):
for root, category, f_names in os.walk(self.root_dir):
self.CATEGORIES.add(category)
FILE_DATA[category] = []
for f_name in f_names:
file_path = os.path.join(root, f_name)
file_type = file_path[file_path.rindex(".")+1:].lower()
if file_type in self.VALID_FILE_TYPES:
FILE_DATA[category].add(file_path)
def categories(self):
return self.CATEGORIES.copy()
def files_in_category(self, category):
| 3.3125 | 3 |
face_engine/models/dlib_models.py | guesswh0/face_engine | 10 | 12791203 | <gh_stars>1-10
import os
import dlib
import numpy as np
from face_engine import RESOURCES
from face_engine.exceptions import FaceNotFoundError
from face_engine.fetching import fetch_file
from face_engine.models import Detector, Embedder
# download dependent models
for url in [
"http://dlib.net/files/mmod_human_face_detector.dat.bz2",
"http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2",
"http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2"
]:
fetch_file(url, os.path.join(RESOURCES, 'models/dlib'))
class HOGDetector(Detector, name='hog'):
"""Dlib "Histogram Oriented Gradients" model.
.. note::
* bounding box sizes are equal for all detections.
* detector does not provide confidence scores for detections.
"""
def __init__(self):
self._face_detector = dlib.get_frontal_face_detector()
def detect(self, image):
detections = self._face_detector(image)
n_det = len(detections)
if n_det < 1:
raise FaceNotFoundError
height, width = image.shape[0:2]
bounding_boxes = np.array([
[
max(rect.left(), 0),
max(rect.top(), 0),
min(rect.right(), width),
min(rect.bottom(), height)
]
for rect in detections])
return bounding_boxes, dict()
class MMODDetector(Detector, name='mmod'):
"""Dlib pre-trained CNN model with "Max-Margin Object Detection"
loss function.
.. note::
* bounding box sizes are equal for all detections.
* to run in realtime requires high-end Nvidia GPU with CUDA/cuDNN.
References:
1. http://dlib.net/python/index.html
2. https://docs.nvidia.com/deeplearning/sdk/cudnn-install/index.html
3. http://dlib.net/files/mmod_human_face_detector.dat.bz2
"""
def __init__(self) -> None:
self._cnn_face_detector = dlib.cnn_face_detection_model_v1(
os.path.join(RESOURCES, "models/dlib/mmod_human_face_detector.dat"))
def detect(self, image):
detections = self._cnn_face_detector(image)
n_det = len(detections)
if n_det < 1:
raise FaceNotFoundError
height, width = image.shape[:2]
det_scores = list()
bounding_boxes = list()
for det in detections:
bounding_boxes.append(
[
max(det.rect.left(), 0),
max(det.rect.top(), 0),
min(det.rect.right(), width),
min(det.rect.bottom(), height)
])
det_scores.append(det.confidence)
bounding_boxes = np.array(bounding_boxes)
extra = dict(det_scores=det_scores)
return bounding_boxes, extra
class ResNetEmbedder(Embedder, name='resnet', dim=128):
""" Dlib pre-trained face recognition ResNet model.
.. note::
* face alignment pre-processing used with 5 point shape_predictor.
References:
1. http://dlib.net/python/index.html
2. http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2
3. http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2
"""
def __init__(self) -> None:
self._face_encoder = dlib.face_recognition_model_v1(
os.path.join(RESOURCES, "models/dlib/dlib_face_recognition_resnet_model_v1.dat"))
self._shape_predictor = dlib.shape_predictor(
os.path.join(RESOURCES, "models/dlib/shape_predictor_5_face_landmarks.dat"))
def compute_embeddings(self, image, bounding_boxes, **kwargs):
shapes = dlib.full_object_detections()
for bounding_box in bounding_boxes:
bb = dlib.rectangle(bounding_box[0], bounding_box[1],
bounding_box[2], bounding_box[3])
shapes.append(self._shape_predictor(image, bb))
# Aligned to shape and cropped by bounding boxes face images
# default shape (n_faces, 150, 150, 3)
face_images = dlib.get_face_chips(image, shapes)
embeddings = self._face_encoder.compute_face_descriptor(face_images)
return np.array(embeddings)
| 2.609375 | 3 |
tests/test_week1_scc.py | manoldonev/algo2-assignments | 0 | 12791204 |
"""Week1 Test Cases: Strongly Connected Components"""
from week1.scc import scc
def test_scc():
graph = {
'a': ['c'],
'b': ['a'],
'c': ['b'],
'd': ['b', 'f'],
'e': ['d'],
'f': ['e'],
'g': ['e', 'h'],
'h': ['i'],
'i': ['g']
}
assert scc(graph) == {'c': ['c', 'b', 'a'], 'd': [
'd', 'f', 'e'], 'g': ['g', 'h', 'i']}
def test_scc_reversed_graph():
graph = {
'a': ['b'],
'b': ['c', 'd'],
'c': ['a'],
'd': ['e'],
'e': ['f', 'g'],
'f': ['d'],
'g': ['i'],
'h': ['g'],
'i': ['h']
}
assert scc(graph) == {'g': ['g', 'i', 'h'], 'd': [
'd', 'e', 'f'], 'b': ['b', 'c', 'a']}
def test_scc_node_no_outbound_edges():
graph = {
'a': ['b'],
'b': ['c', 'd'],
'c': ['a'],
'd': ['e'],
'e': ['f', 'g'],
'f': ['d'],
'g': ['i'],
'h': ['g'],
'i': []
}
assert scc(graph) == {'i': ['i'], 'g': ['g'], 'h': [
'h'], 'd': ['d', 'e', 'f'], 'b': ['b', 'c', 'a']}
def test_scc_no_edges():
graph = {
'a': [],
'b': [],
'c': []
}
assert scc(graph) == {'a': ['a'], 'c': ['c'], 'b': ['b']}
def test_scc_single_node():
graph = {
'a': []
}
assert scc(graph) == {'a': ['a']}
def test_scc_single_edge():
graph = {
'a': ['b'],
'b': []
}
assert scc(graph) == {'a': ['a'], 'b': ['b']}
| 3.359375 | 3 |
test/src/data/test_mask.py | gusriobr/vineyard-sketcher | 0 | 12791205 | <reponame>gusriobr/vineyard-sketcher
import os
import unittest
import numpy as np
import cfg_test as tcfg
from skimage import io
from image.mask import MaskMerger, clean_instaces, PrimeIdMasMerger
class TestMaskMerger(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.iterations = cls._load_images()
@classmethod
def _load_images(self):
# iterations
return [
['image_0_0_0.png'],
['image_0_100_0.png'],
['image_0_200_0.png'],
['image_0_300_0.png'],
['image_0_300_1.png', 'image_0_300_2.png'],
['image_0_400_0.png', 'image_0_400_1.png'],
['image_0_500_0.png', 'image_0_500_1.png'],
['image_0_600_0.png', 'image_0_600_1.png'],
['image_0_700_0.png'],
['image_0_800_0.png']
]
def test_apply(self):
base_folder = tcfg.resource("masks")
out_img = np.zeros((512, 512 * 9), dtype=np.uint8)
r = 0
merger = MaskMerger()
for iter in self.iterations:
# load files
masks = np.zeros((512, 512, len(iter)), dtype=np.uint8)
for i in range(0, len(iter)):
masks[:, :, i] = io.imread(os.path.join(base_folder, iter[i])).astype(np.uint8)
pos = list(map(int, iter[0].split("_")[1:3]))
pos.reverse()
merger.apply(out_img, masks, pos)
ids, counts = np.unique(out_img, return_counts=True)
factor = 255 // len(ids)
io.imsave("/tmp/salida_{}.png".format(r), out_img * factor)
r += 1
# comprobamos el número de instancias
clean_instaces(out_img)
ids, counts = np.unique(out_img, return_counts=True)
factor = 255 // len(ids)
io.imsave("/tmp/output_image.png".format(r), out_img * factor)
self.assertEqual(3, len(ids)) # 0,1,2
class TestPrimeMaskMerger(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.iterations = cls._load_images()
@classmethod
def _load_images(self):
# iterations
return [
['image_0_0_0.png'],
['image_0_100_0.png'],
['image_0_200_0.png'],
['image_0_300_0.png'],
['image_0_300_1.png', 'image_0_300_2.png'],
['image_0_400_0.png', 'image_0_400_1.png'],
['image_0_500_0.png', 'image_0_500_1.png'],
['image_0_600_0.png', 'image_0_600_1.png'],
['image_0_700_0.png'],
['image_0_800_0.png']
]
def test_apply(self):
base_folder = tcfg.resource("masks")
out_img = np.zeros((512, 512 * 9), dtype=np.uint8)
r = 0
merger = PrimeIdMasMerger()
for iter in self.iterations:
# load files
masks = np.zeros((512, 512, len(iter)), dtype=np.uint8)
for i in range(0, len(iter)):
masks[:, :, i] = io.imread(os.path.join(base_folder, iter[i])).astype(np.uint8)
pos = list(map(int, iter[0].split("_")[1:3]))
pos.reverse()
merger.apply(out_img, masks, pos)
ids, counts = np.unique(out_img, return_counts=True)
factor = 255 // len(ids)
io.imsave("/tmp/salida_{}.png".format(r), out_img * factor)
r += 1
# comprobamos el número de instancias
clean_instaces(out_img)
ids, counts = np.unique(out_img, return_counts=True)
factor = 255 // len(ids)
io.imsave("/tmp/output_image.png".format(r), out_img * factor)
self.assertEqual(3, len(ids)) # 0,1,2
if __name__ == "__main__":
unittest.main()
| 2.078125 | 2 |
tools/copy_denied.py | JohnKvisol/Starbound_RU | 50 | 12791206 | #!/bin/python
from os import walk
from os.path import join, relpath, normpath
from json import load, dump
from multiprocessing import Pool
oldpath = "./experimental/translations"
newpath = "./translations"
substitutions = dict()
with open(join(newpath, "substitutions.json"),"r") as f:
substitutions = load(f)
filestowrite = dict()
for subdir, dirs, files in walk(oldpath):
for thefile in files:
if (not thefile.endswith(".json")) or thefile == "substitutions.json":
continue
oldfile = join(subdir, thefile)
objlist = {}
try:
with open(oldfile, "r") as f:
objlist = load(f)
except:
print("Cann't load: " + oldfile)
continue
for obj in objlist:
if "DeniedAlternatives" not in obj:
#print("No alternatives for: " + oldfile)
continue
denied = obj["DeniedAlternatives"]
if len(denied) == 0:
continue
entext = obj["Texts"]["Eng"]
relfiles = obj["Files"]
for rlfile in relfiles.keys():
#relfile = relpath(rlfile, "assets")
relfile = rlfile
thelist = [join("texts", relfile + ".json")]
if relfile in substitutions:
thelist += list(substitutions[relfile].values())
for newfile in thelist:
newfileobj = {}
newfilename = normpath(join(newpath, newfile))
if newfilename in filestowrite:
newfileobj = filestowrite[newfilename]
else:
try:
with open(newfilename, "r") as f:
newfileobj = load(f)
except:
pass
#print("Cann't read: " + newfilename)
#raise
changed = False
for i in range(0, len(newfileobj)):
if not (newfileobj[i]["Texts"]["Eng"] == entext):
continue
if "DeniedAlternatives" not in newfileobj[i]:
newfileobj[i]["DeniedAlternatives"] = list()
for alt in denied:
if alt in newfileobj[i]["DeniedAlternatives"]:
continue
newfileobj[i]["DeniedAlternatives"].append(alt)
changed = True
if changed:
filestowrite[newfilename] = newfileobj
for newfilename, newfileobj in filestowrite.items():
with open(newfilename, "w") as f:
dump(newfileobj, f, ensure_ascii = False, indent = 2)
| 2.390625 | 2 |
app/main.py | engineerjoe440/djjoespotifyplaylister | 1 | 12791207 | <gh_stars>1-10
################################################################################
"""
DJ JOE Website Playlist File Generator
--------------------------------------
(c) 2021 - Stanley Solutions - <NAME>
This application serves an interface to allow the recording of Apple Music or
Spotify playlists.
"""
################################################################################
# Requirements
from urllib.parse import urlparse
from fastapi import FastAPI, Request, Form
from fastapi.responses import HTMLResponse, RedirectResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
# Locals
import spotify_client
import apple_music_client
from formatter import playlist_html_table
# Application Base
app = FastAPI()
# Mount the Static File Path
app.mount("/static", StaticFiles(directory="static"), name="static")
templates = Jinja2Templates(directory="templates")
def page(request: Request, url: str = None):
"""Generate the HTML Page Content Using any Provided Playlist URL"""
data = ""
if url != None:
# "Switch" On Domain Name
domain = urlparse(url).netloc
if 'music.apple' in domain:
client = apple_music_client.ApplePlaylister(url)
elif 'spotify' in domain:
client = spotify_client.SpotifyPlaylister(url)
playlist, tracks = client()
data = playlist_html_table(
playlist=playlist,
tracks=tracks,
table_id="playlist",
classes="",
)
# Return Template Response Using Data
return templates.TemplateResponse(
"index.html",
{
"request": request,
"playlist_table": data,
},
)
# Main Application Response
@app.get("/", response_class=HTMLResponse)
async def root(request: Request):
return page(request=request)
# Redirect for Playlist Endpoint
@app.get("/load_playlist")
async def load_playlist_redirect():
return RedirectResponse("/")
# Load Playlist
@app.post("/load_playlist", response_class=HTMLResponse)
async def load_playlist(request: Request, playlist: str = Form(...)):
print(playlist)
return page(request=request, url=playlist) | 2.609375 | 3 |
lift_holdout_emotion.py | juancq/emotion-recognition-walking-acc | 5 | 12791208 | import argparse
import math
import yaml
import numpy as np
from collections import defaultdict
from sklearn import linear_model
from sklearn import metrics
from sklearn import preprocessing
from sklearn.dummy import DummyClassifier
from sklearn.ensemble import RandomForestClassifier
from permute.core import one_sample
def main():
'''
Computes cross-validation by holding out a contiguous block of windows from a single emotion.
'''
np.random.seed(1)
parser = argparse.ArgumentParser()
parser.add_argument("-mu", metavar='mu', type=str, nargs='+', help="file containing music features, input to model", default=[])
parser.add_argument("-mw", metavar='mw', type=str, nargs='+', help="file containing music+walking features, input to model", default=[])
parser.add_argument("-mo", metavar='mo', type=str, nargs='+', help="file containing movie features, input to model", default=[])
parser.add_argument("-e", "--estimators", help="number of estimators for meta-classifiers", type=int, default=100)
parser.add_argument("-o", "--output_file", help="output with pickle results", type=str)
args = parser.parse_args()
output_file = args.output_file
N_ESTIMATORS = args.estimators
def process_condition(fnames, condition):
print 'condition', condition
results = {'labels':[], 'baseline': defaultdict(list),
'logit': defaultdict(list),
'rf': defaultdict(list)}
folds = 10
for fname in fnames:
print 'classifying: %s' % fname
label = fname.split('/')[-1]
data = np.loadtxt(fname, delimiter=',')
# delete neutral to see if we can distinguish between
# happy/sad
data = np.delete(data, np.where(data[:,-1]==0), axis=0)
group_a = np.where(data[:,-1]==1)
group_b = np.where(data[:,-1]==-1)
a_folds = math.floor(folds/2.)
b_folds = folds - a_folds
split_groups = []
split_groups.extend(np.array_split(group_a[0], a_folds))
split_groups.extend(np.array_split(group_b[0], b_folds))
k_folds = []
for i in range(folds):
test = split_groups[i]
train = np.concatenate((split_groups[:i] + split_groups[i+1:]))
k_folds.append((train, test))
x_data = data[:,:-1]
y_data = data[:,-1]
# scaled
x_data = preprocessing.scale(x_data)
models = [
('baseline', DummyClassifier(strategy = 'most_frequent')),
#('logit', linear_model.LogisticRegressionCV(Cs=20, cv=10)),
('logit', linear_model.LogisticRegression()),
('rf', RandomForestClassifier(n_estimators = N_ESTIMATORS)),
]
results['labels'].append(label)
# roc_auc generates error because test includes single class
for key, clf in models:
scores = {'f1':[], 'acc':[]}
for (train, test) in k_folds:
x_train, x_test = x_data[train], x_data[test]
y_train, y_test = y_data[train], y_data[test]
clf.fit(x_train, y_train)
y_pred = clf.predict(x_test)
_f1 = metrics.f1_score(y_test, y_pred, average='weighted')
_acc = metrics.accuracy_score(y_test, y_pred)
y_proba = clf.predict_proba(x_test)
scores['f1'].append(_f1)
scores['acc'].append(_acc)
results[key]['f1'].append(np.mean(scores['f1']))
results[key]['acc'].append(np.mean(scores['acc']))
yaml.dump(results, open(condition+'_lift_scores_'+output_file+'.yaml', 'w'))
# end of function
#---------
if args.mu:
process_condition(args.mu, 'mu')
if args.mw:
process_condition(args.mw, 'mw')
if args.mo:
process_condition(args.mo, 'mo')
if __name__ == "__main__":
main()
| 2.21875 | 2 |
src/core/models.py | metabolism-of-cities/ARCHIVED-metabolism-of-cities-platform-v3 | 0 | 12791209 | <reponame>metabolism-of-cities/ARCHIVED-metabolism-of-cities-platform-v3<filename>src/core/models.py<gh_stars>0
from django.db import models
from multiplicity.models import ReferenceSpace, License
from django.forms import ModelForm
from django.template.defaultfilters import slugify
from tinymce import HTMLField
from django.contrib.auth.models import User
from django.contrib.auth import get_user_model
User = get_user_model()
from django.contrib.sites.models import Site
from django.contrib.sites.managers import CurrentSiteManager
from django.conf import settings
# Used for image resizing
from stdimage.models import StdImageField
import re
from django.urls import reverse
class TimestampedModel(models.Model):
# A timestamp representing when this object was created.
created_at = models.DateTimeField(auto_now_add=True)
# A timestamp reprensenting when this object was last updated.
updated_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
# By default, any model that inherits from `TimestampedModel` should
# be ordered in reverse-chronological order. We can override this on a
# per-model basis as needed, but reverse-chronological is a good
# default ordering for most models.
ordering = ['-created_at', '-updated_at']
class ReferenceType(models.Model):
name = models.CharField(max_length=255)
icon = models.CharField(max_length=255, null=True, blank=True)
GROUP = (
('academic', 'Academic'),
('theses', 'Theses'),
('reports', 'Reports'),
('multimedia', 'Multimedia'),
)
group = models.CharField(max_length=20, choices=GROUP, null=True, blank=True)
def __str__(self):
return self.name
class Meta:
ordering = ["name"]
class Organization(models.Model):
name = models.CharField(max_length=255)
url = models.CharField(max_length=255, null=True, blank=True)
twitter = models.CharField(max_length=255, null=True, blank=True)
linkedin = models.CharField(max_length=255, null=True, blank=True)
researchgate = models.CharField(max_length=255, null=True, blank=True)
logo = models.ImageField(null=True, blank=True, upload_to='organizations')
processes = models.ManyToManyField('staf.Process', blank=True, limit_choices_to={'slug__isnull': False})
reference_spaces = models.ManyToManyField(ReferenceSpace, blank=True)
description = HTMLField(null=True, blank=True)
parent = models.ForeignKey('self', on_delete=models.SET_NULL, null=True, blank=True)
ORG_TYPE = (
('academic', 'Research Institution'),
('universities', 'Universities'),
('city_government', 'City Government'),
('regional_government', 'Regional Government'),
('national_government', 'National Government'),
('statistical_agency', 'Statistical Agency'),
('private_sector', 'Private Sector'),
('publisher', 'Publishers'),
('ngo', 'NGO'),
('other', 'Other'),
)
type = models.CharField(max_length=20, choices=ORG_TYPE)
def __str__(self):
return self.name
class Meta:
ordering = ["name"]
class OrganizationForm(ModelForm):
class Meta:
model = Organization
exclude = ['id', 'processes']
class Publisher(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class Meta:
ordering = ["name"]
class Journal(models.Model):
name = models.CharField(max_length=255)
website = models.CharField(max_length=255, null=True, blank=True)
description = models.TextField(null=True, blank=True)
publisher = models.ForeignKey(Publisher, on_delete=models.CASCADE, null=True, blank=True)
image = models.ImageField(null=True, blank=True, upload_to='journals')
def __str__(self):
return self.name
class Meta:
ordering = ["name"]
class People(models.Model):
firstname = models.CharField(max_length=255)
lastname = models.CharField(max_length=255)
affiliation = models.CharField(max_length=255, null=True, blank=True)
email = models.CharField(max_length=255, null=True, blank=True)
email_public = models.BooleanField()
city = models.ForeignKey(ReferenceSpace, on_delete=models.SET_NULL, null=True, blank=True, related_name='people_city', limit_choices_to={'type': 3})
country = models.ForeignKey(ReferenceSpace, on_delete=models.SET_NULL, null=True, blank=True, related_name='people_country', limit_choices_to={'type': 2})
profile = models.TextField(null=True, blank=True)
research_interests = models.TextField(null=True, blank=True)
website = models.CharField(max_length=255, null=True, blank=True)
twitter = models.CharField(max_length=255, null=True, blank=True)
google_scholar = models.CharField(max_length=255, null=True, blank=True)
orcid = models.CharField(max_length=255, null=True, blank=True)
researchgate = models.CharField(max_length=255, null=True, blank=True)
linkedin = models.CharField(max_length=255, null=True, blank=True)
description = models.TextField(null=True, blank=True)
member_since = models.DateField(null=True, blank=True, db_index=True)
user = models.OneToOneField(User, on_delete=models.CASCADE, null=True, blank=True)
image = models.ImageField(null=True, blank=True, upload_to='people', help_text="Square photos are best - please resize to 350x350 pixels")
PEOPLE_STATUS = (
('active', 'Active'),
('retired', 'Retired'),
('deceased', 'Deceased'),
('inactive', 'Inactive'),
('pending', 'Pending Review'),
)
status = models.CharField(max_length=8, choices=PEOPLE_STATUS, default='active')
site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID)
organizations = models.ManyToManyField(Organization, blank=True)
objects = models.Manager()
on_site = CurrentSiteManager()
def __str__(self):
return '%s %s' % (self.firstname, self.lastname)
class Meta:
ordering = ["firstname", "lastname"]
class PeopleForm(ModelForm):
class Meta:
model = People
exclude = ['id']
class PeopleNote(models.Model):
people = models.ForeignKey(People, on_delete=models.CASCADE)
date = models.DateTimeField(auto_now_add=True)
note = models.TextField(null=True, blank=True)
created_by = models.ForeignKey(User, on_delete=models.CASCADE)
class Meta:
ordering = ["date"]
class Article(models.Model):
title = models.CharField(max_length=255)
slug = models.SlugField(db_index=True, max_length=255, null=True, blank=True)
introduction = models.TextField(null=True, blank=True)
head = models.TextField(null=True, blank=True)
includes_form = models.BooleanField(default=False)
content = HTMLField('Content', help_text="The content field is a required field - be sure to fill this out")
image = models.ImageField(null=True, blank=True, upload_to='articles')
parent = models.ForeignKey(
'core.Article', on_delete=models.CASCADE, related_name='sectionparent', null=True, blank=True
)
authors = models.ManyToManyField(People, blank=True)
active = models.BooleanField(default=True)
SECTIONS = (
('about', 'About'),
('community', 'Community'),
('research', 'Research'),
('resources', 'Resources'),
('cities', 'Cities'),
('whatwedo', 'What We Do'),
('newsevents', 'News and Events'),
)
section = models.CharField(max_length=20, choices=SECTIONS, default='about')
site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID)
objects = models.Manager()
on_site = CurrentSiteManager()
date = models.DateField(null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.title
class Meta:
ordering = ["title"]
class ArticleForm(ModelForm):
class Meta:
model = Article
fields = ['title', 'introduction', 'content', 'image', 'active']
class SimpleArticleForm(ModelForm):
class Meta:
model = Article
fields = ['title', 'image', 'date', 'head', 'includes_form', 'slug', 'active','content']
class Event(models.Model):
article = models.OneToOneField(
Article,
on_delete=models.CASCADE,
related_name='event',
primary_key=True,
)
EVENT_TYPE = (
('conference', 'Conference'),
('hackathon', 'Hackathon'),
('workshop', 'Workshop'),
('seminar', 'Seminar'),
('other', 'Other'),
)
start = models.DateField(null=True, blank=True)
end = models.DateField(null=True, blank=True)
type = models.CharField(max_length=20, choices=EVENT_TYPE)
estimated_date = models.CharField(max_length=60, null=True, blank=True)
location = models.CharField(max_length=255, null=True, blank=True)
url = models.CharField(max_length=255, null=True, blank=True)
def __str__(self):
return self.article.title
class EventForm(ModelForm):
class Meta:
model = Event
exclude = ['article']
class VideoCollection(models.Model):
title = models.CharField(max_length=255)
description = HTMLField('description', null=True, blank=True)
site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID)
position = models.PositiveSmallIntegerField(default=1)
objects = models.Manager()
on_site = CurrentSiteManager()
show_in_list = models.BooleanField(default=True)
def __str__(self):
return self.title
class Meta:
ordering = ["position"]
class VideoCollectionForm(ModelForm):
class Meta:
model = VideoCollection
exclude = ['id', 'site']
class Video(models.Model):
title = models.CharField(max_length=255)
url = models.CharField(max_length=255)
description = models.TextField()
author = models.CharField(max_length=255)
date = models.DateField(null=True)
people = models.ManyToManyField(People, blank=True)
VIDEOSITES = (
('youtube', 'YouTube'),
('vimeo', 'Vimeo'),
('wikimedia', 'Wikimedia Commons'),
('other', 'Other website'),
)
website = models.CharField(max_length=20, choices=VIDEOSITES)
site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID)
objects = models.Manager()
on_site = CurrentSiteManager()
primary_space = models.ForeignKey(ReferenceSpace, on_delete=models.CASCADE, null=True, blank=True)
collections = models.ManyToManyField(VideoCollection, blank=True)
thumbnail = models.ImageField(null=True, blank=True, upload_to='video_thumbnails')
license = models.ForeignKey(License, on_delete=models.CASCADE, null=True, blank=True)
def __str__(self):
return self.title
class VideoForm(ModelForm):
class Meta:
model = Video
exclude = ['id', 'site']
labels = {
'primary_space': 'Reference space (optional)'
}
class VideoUploadForm(ModelForm):
class Meta:
model = Video
fields = ['title', 'website', 'url', 'primary_space', 'description', 'author', 'date', 'thumbnail', 'license']
labels = {
'primary_space': 'Reference space',
'url': 'Video URL'
}
class Tag(models.Model):
name = models.CharField(max_length=255)
description = HTMLField('description', null=True, blank=True)
parent_tag = models.ForeignKey('self', on_delete=models.CASCADE, null=True, blank=True,
limit_choices_to={'hidden': False}, related_name='children'
)
hidden = models.BooleanField(db_index=True, default=False, help_text="Mark if tag is superseded/not yet approved/deactivated")
include_in_glossary = models.BooleanField(db_index=True, default=False)
is_accounting_method = models.BooleanField(db_index=True, default=False)
PARENTS = (
(1, 'Publication Types'),
(2, 'Metabolism Studies'),
(3, 'Countries'),
(4, 'Cities'),
(5, 'Scales'),
(6, 'Flows'),
(7, 'Time Horizon'),
(9, 'Methodologies'),
(10, 'Other'),
)
parent = models.CharField(max_length=2, choices=PARENTS, null=True, blank=True, help_text="This was a previous classification - can be left empty")
def __str__(self):
return self.name
@property
def shortcode(self):
"Returns abbreviation -- text between parenthesis -- if there is any"
if "(" in self.name:
s = self.name
return s[s.find("(")+1:s.find(")")]
else:
return self.name
class Meta:
ordering = ["name"]
class MethodClassification(models.Model):
name = models.CharField(max_length=255)
description = models.TextField()
def __str__(self):
return self.name
class MethodTemporalBoundary(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class MethodData(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class MethodCategory(models.Model):
name = models.CharField(max_length=255)
description = HTMLField('description', null=True, blank=True)
strengths = HTMLField('strengths', null=True, blank=True)
weaknesses = HTMLField('weaknesses', null=True, blank=True)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = "Method families"
verbose_name = "method family"
class Method(models.Model):
tag = models.OneToOneField(Tag, on_delete=models.CASCADE, limit_choices_to={'parent_tag__id': 318}, related_name="methods")
METHOD_CLASS = (
('3', 'Relation in UM systems'),
('2', 'Flows of substances'),
('1', 'Environmental impacts'),
)
method_class = models.CharField(max_length=1, choices=METHOD_CLASS, null=True, blank=True)
category = models.ForeignKey(MethodCategory, on_delete=models.CASCADE, null=True, blank=True)
description = HTMLField('description', null=True, blank=True)
strengths = HTMLField('strengths', null=True, blank=True)
weaknesses = HTMLField('weaknesses', null=True, blank=True)
STATUS = (
('nw', 'Not worked on'),
('ip', 'In progress'),
('dr', 'Draft ready for review'),
('rv', 'Reviewed - DONE'),
('ec', 'External copy'),
('sk', 'Skip - will not be published'),
)
status = models.CharField(max_length=2, choices=STATUS, null=True, blank=True)
position = models.PositiveSmallIntegerField(null=True, blank=True)
material_scope = models.CharField(max_length=255, null=True, blank=True)
METHOD_SCORING = (
('3', '3 - The item is a defining feature of the approach'),
('2', '2 - The feature is typically included in the techique'),
('1', '1 - The item is included only occasionally in the mode of analysis, and in a partial or conditional way'),
('0', '0 - Not included at all'),
)
substances = models.CharField("selected specific substances", max_length=1, choices=METHOD_SCORING, null=True, blank=True, help_text="Elements and basic compounds only")
materials = models.CharField("materials / bulk materials", max_length=1, choices=METHOD_SCORING, null=True, blank=True)
energy = models.CharField(max_length=1, choices=METHOD_SCORING, null=True, blank=True)
outputs = models.CharField("outputs to environment", max_length=1, choices=METHOD_SCORING, null=True, blank=True)
recycling = models.CharField("recyling of material and energy", max_length=1, choices=METHOD_SCORING, null=True, blank=True)
stock_changes = models.CharField("stock changes", max_length=1, choices=METHOD_SCORING, null=True, blank=True)
specific = models.CharField("specific goods and services", max_length=1, choices=METHOD_SCORING, null=True, blank=True)
production = models.CharField("production processes", max_length=1, choices=METHOD_SCORING, null=True, blank=True)
between_flows = models.CharField("between-flows", max_length=1, choices=METHOD_SCORING, null=True, blank=True, help_text="Specification of flows between sectors, industries or acticity fields, or other system components")
classification = models.ManyToManyField(MethodClassification, blank=True)
scale = models.ManyToManyField(Tag, limit_choices_to={'parent_tag__id': 320}, related_name="method_scales", blank=True)
entity = models.CharField(max_length=255, null=True, blank=True, help_text="Key socio-institutional entity (driving force boundary for induced flows)")
temporal_study_boundary = models.ForeignKey(MethodTemporalBoundary, on_delete=models.CASCADE, null=True, blank=True)
data_sources = models.ForeignKey(MethodData, on_delete=models.CASCADE, null=True, blank=True)
cradle_to_grave = models.CharField("cradle-to-grave sources of flows", max_length=1, choices=METHOD_SCORING, null=True, blank=True, help_text="Note: could also be considered as consumption-based accounting?)")
hidden_flows = models.CharField("accounts for hidden flows", max_length=1, choices=METHOD_SCORING, null=True, blank=True)
impacts = models.CharField("quantitative weighting of impacts of material flows", max_length=1, choices=METHOD_SCORING, null=True, blank=True)
main_measurement_unit = models.CharField(max_length=255, null=True, blank=True)
mass_balancing = models.CharField(max_length=1, choices=METHOD_SCORING, null=True, blank=True)
avoidance_double_counting = models.NullBooleanField(null=True, blank=True)
sustainability_criteria_reference = models.CharField(max_length=1, choices=METHOD_SCORING, null=True, blank=True)
developed_by = models.CharField(max_length=255, null=True, blank=True)
based_on = models.TextField(null=True, blank=True)
gaps_addressed = models.TextField(null=True, blank=True, help_text="What gaps does in other methodologies does this particular methodology address?")
next_steps = models.TextField(null=True, blank=True,help_text="The proposed next steps to further develop/improve this methodology")
representative_paper = models.TextField(null=True, blank=True, help_text="Which paper is a representative case study using this methodology?")
materials_catalog_used = models.TextField(null=True, blank=True)
also_known_as = models.TextField(null=True, blank=True)
internal_notes = models.TextField(null=True, blank=True)
complete = models.NullBooleanField(null=True, blank=True)
include_in_list = models.NullBooleanField(default=False)
def __str__(self):
return self.tag.name
class Meta:
ordering = ["position", "tag__name"]
class TagForm(ModelForm):
class Meta:
model = Tag
exclude = ['id', 'gps', 'parent', 'hidden']
class Reference(models.Model):
title = models.CharField(max_length=255)
LANGUAGES = (
('EN', 'English'),
('ES', 'Spanish'),
('CH', 'Chinese'),
('FR', 'French'),
('GE', 'German'),
('NL', 'Dutch'),
('OT', 'Other'),
)
language = models.CharField(max_length=2, choices=LANGUAGES)
title_original_language = models.CharField(max_length=255, blank=True, null=True)
authorlist = models.TextField()
type = models.ForeignKey(ReferenceType, on_delete=models.CASCADE)
journal = models.ForeignKey(Journal, on_delete=models.CASCADE, null=True, blank=True, help_text="If the journal does not appear in the list, please leave empty and add the name in the comments")
event = models.ForeignKey(Event, on_delete=models.CASCADE, null=True, blank=True)
year = models.PositiveSmallIntegerField()
abstract = models.TextField(null=True, blank=True)
abstract_original_language = models.TextField(null=True, blank=True)
date_added = models.DateTimeField(null=True, blank=True, auto_now_add=True)
file = models.FileField(null=True, blank=True, upload_to='references', help_text='Only upload the file if you are the creator or you have permission to do so')
open_access = models.NullBooleanField(null=True, blank=True)
cityloops = models.BooleanField(default=False)
cityloops_comments = models.TextField(null=True, blank=True)
cityloops_comments_import = models.TextField(null=True, blank=True, help_text='Additional comments about the importing process')
url = models.CharField(max_length=500, null=True, blank=True)
doi = models.CharField(max_length=255, null=True, blank=True)
isbn = models.CharField(max_length=255, null=True, blank=True)
comments = models.TextField(null=True, blank=True)
STATUS = (
('pending', 'Pending'),
('active', 'Active'),
('deleted', 'Deleted'),
)
status = models.CharField(max_length=8, choices=STATUS, db_index=True)
authors = models.ManyToManyField(People, through='ReferenceAuthors')
organizations = models.ManyToManyField(Organization, through='ReferenceOrganization')
tags = models.ManyToManyField(Tag, blank=True, limit_choices_to={'hidden': False})
processes = models.ManyToManyField('staf.Process', blank=True, limit_choices_to={'slug__isnull': False})
materials = models.ManyToManyField('staf.Material', blank=True)
spaces = models.ManyToManyField(ReferenceSpace, blank=True)
def __str__(self):
return self.title
class Meta:
ordering = ["-year", "title"]
def source(self):
"Return details of where this reference was published at/in"
if self.journal:
return self.journal.name
elif self.event:
return self.event.name
else:
return self.type.name
def accountingMethods(self):
return self.tags.filter(is_accounting_method=True, hidden=False)
class ReferenceAuthors(models.Model):
reference = models.ForeignKey(Reference, on_delete=models.CASCADE)
people = models.ForeignKey(People, on_delete=models.CASCADE)
class Meta:
db_table = 'core_reference_authors'
class ReferenceForm(ModelForm):
class Meta:
model = Reference
fields = ['language', 'title', 'title_original_language', 'authorlist', 'type', 'journal', 'year', 'abstract', 'abstract_original_language', 'open_access', 'doi', 'isbn', 'url', 'comments', 'file']
labels = {
'authorlist': 'Author(s)',
'doi': 'DOI',
'isbn': 'ISBN',
'url': 'URL',
}
class ReferenceFormAdmin(ModelForm):
class Meta:
model = Reference
exclude = ['id', 'organizations', 'processes', 'date_added', 'event', 'authors', 'spaces', 'tags', 'materials']
labels = {
'authorlist': 'Author(s)',
'doi': 'DOI',
'isbn': 'ISBN',
'url': 'URL',
}
class ReferenceOrganization(models.Model):
organization = models.ForeignKey(Organization, on_delete=models.CASCADE)
reference = models.ForeignKey(Reference, on_delete=models.CASCADE)
TYPES = (
('publisher', 'Publisher'),
('commissioner', 'Commissioner'),
('organization', 'Organization'),
)
type = models.CharField(max_length=20, choices=TYPES)
def __str__(self):
return self.organization.name + " - " + self.type + " - " + self.reference.title
class MaterialGroup(models.Model):
name = models.CharField(max_length=255)
description = models.TextField(null=True, blank=True)
def __str__(self):
return self.name
class CaseStudy(models.Model):
title = models.CharField(max_length=255)
method = models.OneToOneField(Tag, on_delete=models.CASCADE, limit_choices_to={'parent_tag__id': 318})
reference = models.ForeignKey(Reference, on_delete=models.CASCADE)
spaces = models.ManyToManyField(ReferenceSpace, blank=True, limit_choices_to={'type__id': 3})
material_groups = models.ManyToManyField(MaterialGroup, blank=True)
ongoing = models.CharField(max_length=255, null=True, blank=True, help_text="Do they continue to implement it?")
consideration = models.TextField(null=True, blank=True, help_text="Circular economy / closing loop consideration")
target_audience = models.TextField(null=True, blank=True, help_text="Target audience of results")
indicators = models.TextField(null=True, blank=True, help_text="Indicators")
pros = models.TextField(null=True, blank=True, help_text="Indicators")
cons = models.TextField(null=True, blank=True, help_text="Indicators")
purpose = models.TextField(null=True, blank=True, help_text="Purpose of the study")
def __str__(self):
return self.title
class Meta:
verbose_name_plural = "case studies"
class UserAction(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class UserLog(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='log')
space = models.ForeignKey('multiplicity.ReferenceSpace', on_delete=models.CASCADE, null=True, blank=True)
reference = models.ForeignKey(Reference, on_delete=models.CASCADE, null=True, blank=True)
date = models.DateTimeField(auto_now_add=True)
action = models.ForeignKey(UserAction, on_delete=models.CASCADE)
points = models.PositiveSmallIntegerField()
model = models.CharField(max_length=255, null=True, blank=True)
model_id = models.PositiveIntegerField(null=True, blank=True)
description = models.TextField(null=True, blank=True)
class Meta:
ordering = ["-date"]
class Color(models.Model):
name = models.CharField(max_length=20)
css = models.CharField(max_length=20)
def __str__(self):
return self.name
class Project(models.Model):
name = models.CharField(max_length=255, null=True, blank=True)
full_name = models.CharField(max_length=255, null=True, blank=True)
institution = models.CharField(max_length=255, null=True, blank=True)
organizations = models.ManyToManyField(Organization, through='ProjectOrganization', blank=True)
researcher = models.CharField(max_length=255, null=True, blank=True)
supervisor = models.CharField(max_length=255, null=True, blank=True)
email = models.CharField(max_length=255, null=True, blank=True)
description = HTMLField('description', null=True, blank=True)
target_finish_date = models.CharField(max_length=255, null=True, blank=True)
start_date = models.DateField(blank=True, null=True)
end_date = models.DateField(blank=True, null=True)
STATUS = (
('planned', 'Planned'),
('ongoing', 'In progress'),
('finished', 'Finished'),
('cancelled', 'Cancelled'),
)
status = models.CharField(max_length=20, choices=STATUS, default='ongoing')
active = models.BooleanField(default=True)
pending_review = models.BooleanField(default=True)
TYPE = (
('theses', 'Theses'),
('projects', 'Projects'),
('applied', 'Applied research'),
)
type = models.CharField(max_length=20, choices=TYPE)
THESISTYPE = (
('bachelor', 'Bachelor'),
('masters', 'Master'),
('phd', 'PhD'),
('other', 'Other'),
)
thesistype = models.CharField(max_length=20, choices=THESISTYPE, null=True, blank=True)
url = models.CharField(max_length=255, null=True, blank=True)
references = models.ManyToManyField(Reference, blank=True, limit_choices_to={'status': 'active'})
material_groups = models.ManyToManyField(MaterialGroup, blank=True)
material_temp_notes = models.TextField(null=True, blank=True)
internal_notes = models.TextField(null=True, blank=True)
output_tools = models.TextField(null=True, blank=True)
output_reports = models.TextField(null=True, blank=True)
output_articles = models.TextField(null=True, blank=True)
funding_program = models.CharField(max_length=255, null=True, blank=True)
methodologies = models.TextField(null=True, blank=True)
methodologies_processing_notes = models.TextField(null=True, blank=True)
methodologies_tags = models.ManyToManyField(Tag, limit_choices_to={'parent_tag__id': 318}, blank=True)
reference_spaces = models.ManyToManyField(ReferenceSpace, blank=True, limit_choices_to={'type': 3})
budget = models.DecimalField(max_digits=10, decimal_places=2, null=True, blank=True)
print_aim = models.TextField(null=True, blank=True)
print_relevance = models.TextField(null=True, blank=True)
RELEVANCE = (
('u', 'Unknown'),
('l', 'Low'),
('m', 'Medium'),
('h', 'High'),
)
relevance = models.CharField(max_length=1, choices=RELEVANCE, null=True, blank=True)
CITYLOOPS = (
('no', 'No'),
('pending', 'Yes - pending'),
('yes', 'Yes - completed'),
)
cityloops = models.CharField(max_length=20, choices=CITYLOOPS, null=True, blank=True)
logo = StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large': (800, 800)}, null=True, blank=True)
image1 = StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large': (1600,1600)}, null=True, blank=True)
image2 = StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large': (1600,1600)}, null=True, blank=True)
image3 = StdImageField(upload_to='projects', variations={'thumb': (300, 300), 'large': (1600,1600)}, null=True, blank=True)
site = models.ForeignKey(Site, on_delete=models.CASCADE)
objects = models.Manager()
on_site = CurrentSiteManager()
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("core:project", args=[self.type, self.id])
class Meta:
ordering = ['name']
class ProjectForm(ModelForm):
class Meta:
model = Project
exclude = ['id', 'site', 'references', 'organizations']
labels = {
'name': 'Project title',
'thesistype': 'Thesis type',
'researcher': 'Researcher(s)',
'supervisor': 'Supervisor(s) / Project leader(s)',
'url': 'URL',
}
class ProjectUserForm(ModelForm):
class Meta:
model = Project
fields = ['name', 'researcher', 'type', 'thesistype', 'institution', 'supervisor', 'email', 'description', 'target_finish_date', 'start_date', 'end_date', 'status', 'url']
labels = {
'name': 'Project title',
'thesistype': 'Thesis type',
'researcher': 'Researcher(s)',
'supervisor': 'Supervisor(s) / Project leader(s)',
'url': 'URL',
}
class ProjectOrganization(models.Model):
organization = models.ForeignKey(Organization, on_delete=models.CASCADE)
project = models.ForeignKey(Project, on_delete=models.CASCADE)
TYPES = (
('funder', 'Funder'),
('commissioner', 'Commissioner'),
('organization', 'Organization'),
)
type = models.CharField(max_length=20, choices=TYPES)
def __str__(self):
return self.organization.name + " - " + self.type + " - " + self.project.name
class Timeline(models.Model):
title = models.CharField(max_length=255)
description = models.TextField(null=True, blank=True)
link = models.CharField(max_length=255, null=True, blank=True)
date = models.DateField()
def __str__(self):
return self.title
class DataViz(models.Model):
title = models.CharField(max_length=255)
image = StdImageField(upload_to='dataviz', variations={'thumb': (300, 300), 'large': (1024, 1024)})
uploaded_by = models.ForeignKey(People, on_delete=models.CASCADE)
space = models.ForeignKey('multiplicity.ReferenceSpace', on_delete=models.CASCADE, null=True, blank=True)
reference = models.ForeignKey(Reference, on_delete=models.CASCADE, null=True, blank=True)
process_group = models.ForeignKey('multiplicity.ProcessGroup', on_delete=models.CASCADE, null=True, blank=True)
date = models.DateTimeField(auto_now_add=True)
description = HTMLField(null=True, blank=True)
url = models.CharField(max_length=255, null=True, blank=True, help_text="URL of the source website/article -- ONLY enter if this is not linked to a publication")
source = models.TextField(null=True, blank=True, help_text="Name of the source website/article -- ONLY enter if this is not linked to a publication")
year = models.PositiveSmallIntegerField(null=True, blank=True, help_text="Year of the data being visualized -- ONLY enter if this is not linked to a publication")
class Meta:
ordering = ["date"]
def __str__(self):
return self.title
class NewsletterSubscriber(models.Model):
people = models.ForeignKey(People, on_delete=models.CASCADE)
datasets = models.BooleanField()
news = models.BooleanField()
events = models.BooleanField()
publications = models.BooleanField()
dataviz = models.BooleanField()
multimedia = models.BooleanField()
projects = models.BooleanField()
theses = models.BooleanField()
reference_spaces = models.ManyToManyField(ReferenceSpace, blank=True)
site = models.ForeignKey(Site, on_delete=models.CASCADE, default=settings.SITE_ID)
def __str__(self):
return self.people.firstname + " " + self.people.lastname
| 1.992188 | 2 |
src/cake-install.py | tanuck/cake-install | 0 | 12791210 | <filename>src/cake-install.py
#!/usr/bin/python
import argparse
import requests
import zipfile
import sys, os
import shutil
import shlex, subprocess
from requests.exceptions import ConnectionError, HTTPError, Timeout, TooManyRedirects
def run(args):
zipurls = {1: 'https://github.com/cakephp/cakephp/archive/1.3.20.zip',
2: 'https://github.com/cakephp/cakephp/archive/2.6.2.zip',
3: 'https://github.com/cakephp/app/archive/3.0.0-beta3.zip'}
try:
# check that --dir exists, if not create it
if not os.path.exists(args.dir):
os.makedirs(args.dir)
sys.stdout.write('Created directory: ' + args.dir + '\n')
sys.stdout.write('Downloading CakePHP...\t\t\t')
sys.stdout.flush()
results = requests.get(zipurls[args.version])
if results.status_code == 200:
output = open('/tmp/file.zip', 'wb')
output.write(results.content)
output.close()
sys.stdout.write('[DONE]\n')
sys.stdout.write('Extracting files...\t\t\t')
sys.stdout.flush()
zippedfile = zipfile.ZipFile('/tmp/file.zip')
dirname = zippedfile.namelist()[0]
zippedfile.extractall('/tmp')
zippedfile.close()
os.remove('/tmp/file.zip')
contentsPath = '/tmp/' + dirname
names = os.listdir(contentsPath)
for name in names:
if os.path.isdir(contentsPath + name):
shutil.copytree(contentsPath + name, args.dir + '/' + name)
else:
shutil.copy2(contentsPath + name, args.dir)
shutil.rmtree(contentsPath)
sys.stdout.write('[DONE]\n')
if args.version == 3:
sys.stdout.write('Running composer...\n\r')
sys.stdout.flush()
command = '/usr/local/bin/composer install --prefer-dist -d ' + args.dir + ' --dev'
commandargs = shlex.split(command)
proc = subprocess.Popen(commandargs, stderr=subprocess.PIPE)
if (proc.stderr.read()):
sys.stdout.write('CakePHP was installed, however composer encountered errors when installing the dependencies. Try running \'composer install\' yourself in the ' + args.dir
+ ' directory.')
sys.stdout.flush()
sys.exit(1)
sys.stdout.write('CakePHP ' + str(args.version) + ' was installed successfully.\n')
sys.stdout.flush()
except IOError as ioe:
print 'Could not write to the /tmp directory.'
sys.exit(1)
except (ConnectionError, HTTPError, Timeout, TooManyRedirects) as re:
print 'There was a problem downloading CakePHP.'
sys.exit(1)
except zipfile.BadZipfile as bzfe:
print 'Error unzipping CakePHP.'
sys.exit(1)
except shutil.Error as sue:
print 'Make sure ' + args.dir + ' is writable.'
sys.exit(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Install a fresh copy of CakePHP.')
parser.add_argument('--version', default=2, type=int, choices=[1, 2, 3], help='Specify the version of CakePHP you wish to install.')
parser.add_argument('--dir', default='/vagrant', help='Select the directory where CakePHP will be installed, alter the Apache webroot accordingly.')
argsObj = parser.parse_args()
try:
run(argsObj)
except KeyboardInterrupt:
print 'Exit signal received...'
sys.exit(1)
| 2.578125 | 3 |
app/views/admin/index.py | mrakzero/FlaskCMS | 1 | 12791211 | from flask import render_template
from app.views.admin import bp_admin
@bp_admin.route('/')
def index():
return render_template('admin/index.html')
@bp_admin.route('/dashboard')
def dashboard():
return render_template('admin/dashboard.html')
| 1.851563 | 2 |
db/base.py | zy7y/HelloFastAPI | 1 | 12791212 | <filename>db/base.py<gh_stars>1-10
# 导入所有模型, 用于迁移文件
from db.base_class import Base
from models.user import User
from models.movie import Movie
| 1.6875 | 2 |
Test/Python.py | ConAntares/Photonica | 0 | 12791213 | <reponame>ConAntares/Photonica
""" Python Test """
#### Lagrange Interpolation Formula
import time
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import lagrange
to = time.time()
def lagrange_interpolate(x_set, y_set, t_step):
""" Lagrange interpolate """
p_re = lagrange(x_set, y_set)
return p_re(t_step)
x, y = np.loadtxt('Data/dataSim.dat', unpack=True)
t = np.linspace(1, 2, 11)
u = lagrange_interpolate(x, y, t)
td = time.time() - to
print("The time interval is %f s." %td)
plt.rcParams['font.family'] = 'CMU Serif'
plt.plot(t, u, "P", c="#00B4DC", alpha=0.9)
plt.plot(x, y, "o", c="#32B432", alpha=0.9)
plt.tick_params(direction='in')
plt.show()
#%%
#### Shanks Transform
import numpy as np
import matplotlib.pyplot as plt
"""
L[i] = (S[i]^2-S[i-1]*S[i+1])/(2*S[i]-S[i-1]-S[i+1])
"""
# Load Harmonic Series
X,Y = np.loadtxt('Data/Tr.dat', unpack=True)
count = len(X)
## L1
L1 = np.empty(count)
for i in range(0,count):
if i > 0 and i < count-1:
L1[i] = (Y[i]**2-Y[i-1]*Y[i+1])/(2*Y[i]-Y[i-1]-Y[i+1])
else:
L1[i] = 0
i = i + 1
## L2
L2 = np.empty(count)
for i in range(0,count):
if i > 0 and i < count-1:
L2[i] = (L1[i]**2-L1[i-1]*L1[i+1])/(2*L1[i]-L1[i-1]-L1[i+1])
else:
L2[i] = 0
i = i + 1
## L3
L3 = np.empty(count)
for i in range(0,count):
if i > 0 and i < count - 1:
L3[i] = (L2[i]**2-L2[i-1]*L2[i+1])/(2*L2[i]-L2[i-1]-L2[i+1])
else:
L3[i] = 0
i = i + 1
## L4
L4 = np.empty(count)
for i in range(0,count):
if i > 0 and i < count - 1:
L4[i] = (L3[i]**2-L3[i-1]*L3[i+1])/(2*L3[i]-L3[i-1]-L3[i+1])
else:
L4[i] = 0
i = i + 1
plt.rcParams['font.family'] = 'CMU Serif'
plt.plot(X, Y, "o", c="#B4B4B4", alpha=0.9)
plt.plot(X[1:len(L1)-1], L1[1:len(L1)-1], c="#FF1E14", alpha=0.9)
plt.plot(X[2:len(L1)-2], L2[2:len(L1)-2], c="#FFC814", alpha=0.9)
plt.plot(X[3:len(L1)-3], L3[3:len(L1)-3], c="#1978F0", alpha=0.9)
plt.plot(X[4:len(L1)-4], L4[4:len(L1)-4], c="#A064DC", alpha=0.9)
plt.tick_params(direction='in')
plt.show()
#%%
#### Richardson Extrapolation
import numpy as np
import time
import matplotlib.pyplot as plt
"""
Sn = 1 + 1/2^2 + 1/3^2 + 1/4^2 + ... + 1/n^2
lim(n->∞)Sn = π^2/6 ≈ 1.6449340668482264
R1(n) = ((n+1)*S(n+1)-n*S(n))/np.math.factorial(1)
R2(n) = ((n+2)^2*S(n+1)-2*(n+1)^2*S(n+1)+n^2*S(n))/np.math.factorial(2)
……
"""
to = time.time()
X,Y = np.loadtxt('Data/Td.dat', unpack=True)
count = len(X)
def RE1(Y):
R1 = np.ones(count)
""" doc """
for i in range(0, count - 1):
if i < count - 1 :
R1[i] = ((i+1)*Y[i+1]-i*Y[i])/np.math.factorial(1)
else:
R1[i] = 0
i = i + 1
return R1
def RE2(Y):
R2 = np.ones(count)
for i in range(0, count - 2):
if i < count - 2 :
R2[i] = ((i+2)**2*Y[i+2]-2*(i+1)**2*Y[i+1]+i**2*Y[i])/np.math.factorial(2)
else:
R2[i] = 0
i = i + 1
return R2
U1 = RE1(Y)
U2 = RE2(Y)
td = time.time() - to
print("The time interval is %f s." %td)
plt.rcParams['font.family'] = 'CMU Serif'
plt.plot(X, Y, "o", c="#B4B4B4", alpha=0.9)
plt.plot(X[0:len(U1)-1], U1[0:len(U1)-1], c="#FF1E14", alpha=0.9)
plt.plot(X[0:len(U1)-2], U2[0:len(U1)-2], c="#1978F0", alpha=0.9)
plt.tick_params(direction='in')
plt.show()
#%%
#### Interatonic potential
import numpy as np
A = 1; B = 2
m = 2; n = 3
step = 0.01
r = np.array(np.arange(step,10,step))
# Repulsive potential
rep = A/(r**n)
# Attractive potential
atp = -B/(r**m)
# Resulting potential
pot = rep + atp
import matplotlib; import matplotlib.pyplot as plt
plt.figure(dpi=192); params = {"text.usetex":True, "font.family":"serif", "mathtext.fontset":"cm", "axes.titlesize": 16, "axes.labelsize":14, "figure.facecolor":"w"}
matplotlib.rcParams.update(params)
plt.ticklabel_format(style="sci", scilimits=(0,0)); plt.tick_params(direction="in",top=True,right=True,bottom=True,left=True)
plt.title("Interatomic potential"); plt.xlabel(r"Interatomic distance"); plt.ylabel(r"Interatomic potential")
plt.xlim(0, 8); plt.ylim(-4, 4);
plt.plot(r, rep, label="Repulsive potential", color="#FA3C3C")
plt.plot(r, atp, label="Attractive potential", color="#0A8CFF")
plt.plot(r, pot, label="Resulting potential", color="#6E64FA")
plt.plot(r, r*0, color="gray", alpha=0.4); plt.legend(loc="best")
import numpy as np
e = 2.71828182845904523536
k = 1.380649e-23
v = np.array(np.arange(0,2,0.001))
a = np.matrix([1,2,4,8,16,32,64])
n = 1/(e**np.array(a.T*np.matrix(v-1))+1)
#%%
#### Fermi-Dirac statistics
import matplotlib; import matplotlib.pyplot as plt
plt.figure(dpi=192); params = {"text.usetex":True, "font.family":"serif", "mathtext.fontset":"cm", "axes.titlesize": 16, "axes.labelsize":14, "figure.facecolor":"w"};
matplotlib.rcParams.update(params)
plt.ticklabel_format(style="sci", scilimits=(0,0)); plt.tick_params(direction="in",top=True,right=True,bottom=True,left=True)
plt.suptitle("The Fermi-Dirac Distribution", fontsize=16); plt.title("Energy dependence", fontsize=12); plt.xlabel(r"$E/\mu$"); plt.ylabel(r"$\langle{n_i}\rangle$", rotation=0)
plt.plot(([1,1]),([0,1]), "--", color="#A0A0A0"); plt.annotate(r"$\mu\approx E_F$", xy=(1.00, 0.85), xytext=(1.20, 0.95), arrowprops=dict(arrowstyle='->',connectionstyle="arc3,rad=-0.2"))
plt.plot(v,n[0,:],label="$k_BT=\mu /1$", color="#FA3C3C")
plt.plot(v,n[1,:],label="$k_BT=\mu /2$", color="#FA9C3D")
plt.plot(v,n[2,:],label="$k_BT=\mu /4$", color="#FADC14")
plt.plot(v,n[3,:],label="$k_BT=\mu /8$", color="#50F050")
plt.plot(v,n[4,:],label="$k_BT=\mu /16$", color="#0A8CFF")
plt.plot(v,n[5,:],label="$k_BT=\mu /64$", color="#6E64FA")
plt.legend(loc="best")
#%%
#### Kinetic theory of gases
import numpy as np
T = np.arange(0,1000,1)
k = 1.380649e-23
m = np.matrix([2,28,32])*1.67e-27
v = np.sqrt(3*k*T/m.T)
import matplotlib; import matplotlib.pyplot as plt
plt.figure(dpi=192); params = {"text.usetex":True, "font.family":"serif", "mathtext.fontset":"cm", "axes.titlesize": 16, "axes.labelsize":14, "figure.facecolor":"white"}
matplotlib.rcParams.update(params)
plt.ticklabel_format(style="sci", scilimits=(0,0)); plt.tick_params(direction="in",top=True,right=True,bottom=True,left=True)
plt.title("Kinetic Theory of Gases"); plt.xlabel(r"Temperature ($\rm{K}$)"); plt.ylabel(r"Melocule Velocity ($\rm{ms^{-1}}$)")
plt.plot(T,np.array(v[0,:].T),label=r"Hydrogen $\rm{H_2}$", color="#0A8CFF")
plt.plot(T,np.array(v[1,:].T),label=r"Nitrogen $\rm{N_2}$", color="#FA3C3C")
plt.plot(T,np.array(v[2,:].T),label=r"Oxygen $\rm{O_2}$", color="#50F050"); plt.legend(loc="best")
#%%
#### Einstein's calculation
import numpy as np
Interval = 0.01
reT = np.arange(Interval,2+Interval,Interval)
reV = np.power(reT,-1)
reC = reV**2*np.exp(reV)/np.square(np.exp(reV)-1)
import matplotlib; import matplotlib.pyplot as plt
plt.figure(dpi=192); params = {"text.usetex":True, "font.family":"serif", "mathtext.fontset":"cm", "axes.titlesize": 16, "axes.labelsize":14, "figure.facecolor":"w"};
matplotlib.rcParams.update(params)
plt.ticklabel_format(style="sci", scilimits=(0,0)); plt.tick_params(direction="in",top=True,right=True,bottom=True,left=True)
plt.title("Einstein heat capacity per atom in three dimenson"); plt.xlabel(r"$k_BT/(\hbar\omega)$"); plt.ylabel(r"$\frac{C}{3k_B}$",rotation=0)
plt.plot(reT,reC,color="#0A8CFF")
| 2.53125 | 3 |
Artificial Algae Algorithm/CalculateGreatness.py | shahind/Nature-Inspired-Algorithms | 17 | 12791214 | <filename>Artificial Algae Algorithm/CalculateGreatness.py
import numpy as np
def CalculateGreatness(BigX,ObjX):
ObjX = (ObjX - np.min(ObjX))/ np.ptp(ObjX)
s2 = np.size(BigX)
BigY = []
for i in range(0,s2):
fKs = np.abs(BigX[:,i]/2.0)
M = (ObjX[i] / (fKs + ObjX[i]))
dX = M * BigX[:,i]
BigX[:,i] = BigX[:,i] + dX
return BigX
| 3.0625 | 3 |
Statistics/Median.py | sh667/statistical-calculator | 1 | 12791215 | <reponame>sh667/statistical-calculator
from Calculator.Division import division
from Calculator.Addition import addition
def get_median(data):
num_values = len(data)
if num_values % 2 == 0:
value = int(division(2, num_values))
a = data[value]
value = value - 1
b = data[value]
c = addition(b, a)
d = division(2, c)
return d
else:
value = int(division(2, num_values))
e = data[value]
return e
| 3.71875 | 4 |
src/github_bot_api/signature.py | NiklasRosenstein/python-github-bot-api | 8 | 12791216 |
"""
Helper to check the signature of a GitHub event request.
"""
import hmac
def compute_signature(payload: bytes, secret: bytes, algo: str = 'sha256') -> str:
"""
Computes the HMAC signature of *payload* given the specified *secret* and the given hashing *algo*.
# Parmeters
payload: The payload for which the signature should be computed.
secret: The secret string that is used in conjunction to generate the signature.
algo: The hash algorithm to use, must be `sha1` or `sha256`.
"""
if algo not in ('sha1', 'sha256'):
raise ValueError(f'algo must be {{sha1, sha256}}, got {algo!r}')
return f'{algo}=' + hmac.new(secret, payload, algo).hexdigest()
def check_signature(sig: str, payload: bytes, secret: bytes, algo: str = 'sha256') -> None:
"""
Compares the porivided signature *sig* with the computed signature of the *payload* and
raises a #SignatureMismatchException if they do not match. This function uses constant-time
string comparison to prevent timing analysis.
"""
computed = compute_signature(payload, secret, algo)
if not hmac.compare_digest(sig, computed):
raise SignatureMismatchException(sig, computed)
class SignatureMismatchException(Exception):
"""
Raised if a signature can not be verified with #check_signatuer().
"""
_MSG = 'The provided signature does not match the computed signature of the payload.'
def __init__(self, provided: str, computed: str) -> None:
self.provided = provided
self.computed = computed
def __str__(self) -> str:
return f'{self._MSG}\n provided: {self.provided}\n computed: {self.computed}'
| 3.296875 | 3 |
tests/test_helpers.py | Xcalizorz/commodore | 0 | 12791217 | <filename>tests/test_helpers.py
"""
Unit-tests for helpers
"""
from pathlib import Path
import commodore.helpers as helpers
from commodore.config import Config
from commodore.component import Component, component_dir
def test_apierror():
e = helpers.ApiError("test")
assert f"{e}" == "test"
try:
raise helpers.ApiError("test2")
except helpers.ApiError as e2:
assert f"{e2}" == "test2"
def test_clean_working_tree(tmp_path: Path):
cfg = Config(work_dir=tmp_path)
cfg.inventory.ensure_dirs()
d = component_dir(tmp_path, "test")
assert not d.is_dir()
Component("test", work_dir=tmp_path)
assert d.is_dir()
helpers.clean_working_tree(cfg)
assert d.is_dir()
| 2.796875 | 3 |
nnef_tools/conversion/onnx/onnx_custom.py | rgiduthuri/NNEF-Tools | 1 | 12791218 | <reponame>rgiduthuri/NNEF-Tools
# Copyright (c) 2017 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division, print_function, absolute_import
from nnef_tools.conversion.onnx.nnef_to_onnx import Converter as NNEFToONNXConverter
from nnef_tools.conversion.onnx.onnx_to_nnef import Converter as ONNXToNNEFConverter
from nnef_tools.io.nnef.nnef_graph import NNEFGraph, NNEFOperation, NNEFTensor
from nnef_tools.io.onnx.onnx_graph import ONNXGraph, ONNXOperation, ONNXTensor
__all__ = [
'ONNXToNNEFConverter',
'NNEFToONNXConverter',
'NNEFGraph',
'NNEFOperation',
'NNEFTensor',
'ONNXGraph',
'ONNXOperation',
'ONNXTensor',
]
| 1.117188 | 1 |
tests/test_mode_pytest.py | cle-b/httpdbg | 0 | 12791219 | # -*- coding: utf-8 -*-
import io
import os
import requests
from httpdbg.httpdbg import ServerThread, app
from httpdbg.mode_pytest import run_pytest
from httpdbg.__main__ import pyhttpdbg_entry_point
from utils import _run_under_httpdbg
def test_run_pytest(httpbin):
def _test(httpbin):
os.environ["HTTPDBG_TEST_PYTEST_BASE_URL"] = httpbin.url
script_to_run = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "demo_run_pytest.py"
)
run_pytest(["pytest", script_to_run, "-k", "test_demo"])
stop_httpdbg, current_httpdbg_port = _run_under_httpdbg(_test, httpbin)
ret = requests.get(f"http://127.0.0.1:{current_httpdbg_port}/requests")
stop_httpdbg()
reqs = ret.json()["requests"]
assert len(reqs) == 3
assert reqs[0]["uri"] == httpbin.url + "/post"
assert reqs[1]["uri"] == httpbin.url + "/get"
assert reqs[2]["uri"] == httpbin.url + "/put"
def test_run_pytest_from_pyhttpdbg_entry_point(httpbin, monkeypatch):
os.environ["HTTPDBG_TEST_PYTEST_BASE_URL"] = httpbin.url
script_to_run = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "demo_run_pytest.py"
)
monkeypatch.setattr(
"sys.argv", ["pyhttpdb", "pytest", script_to_run, "-k", "test_demo"]
)
# to terminate the httpdbg server
monkeypatch.setattr("sys.stdin", io.StringIO("\n"))
pyhttpdbg_entry_point()
# we need to restart a new httpdbg server as the previous has been stopped
server = ServerThread(6000, app)
server.start()
ret = requests.get("http://127.0.0.1:6000/requests")
reqs = ret.json()["requests"]
assert len(reqs) == 3
assert reqs[0]["uri"] == httpbin.url + "/post"
assert reqs[1]["uri"] == httpbin.url + "/get"
assert reqs[2]["uri"] == httpbin.url + "/put"
server.shutdown()
def test_run_pytest_with_exception(capsys):
def _test():
script_to_run = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "demo_run_pytest.py"
)
run_pytest(["pytest", script_to_run, "-k", "test_demo_raise_exception"])
stop_httpdbg, current_httpdbg_port = _run_under_httpdbg(_test)
ret = requests.get(f"http://127.0.0.1:{current_httpdbg_port}/requests")
stop_httpdbg()
reqs = ret.json()["requests"]
assert len(reqs) == 0
assert "fixture_which_do_not_exists" in capsys.readouterr().out
| 2.234375 | 2 |
word_to_score.py | webdotorg/word-game-tool | 0 | 12791220 | <filename>word_to_score.py<gh_stars>0
# function to convert a word to the base point value using the letter
# does not account for value modifications based on a board
def point_conversion(word):
# Convert word passed in to uppercase so it can find
# keys in dictionary
word = word.upper()
# dictionary of letters and their point values
charDict = {
' ': 0,
'A': 1,
'B': 3,
'C': 3,
'D': 2,
'E': 1,
'F': 4,
'G': 2,
'H': 4,
'I': 1,
'J': 8,
'K': 5,
'L': 1,
'M': 3,
'N': 1,
'O': 1,
'P': 3,
'Q': 10,
'R': 1,
'S': 1,
'T': 1,
'U': 1,
'V': 4,
'W': 4,
'X': 8,
'Y': 4,
'Z': 10,
}
# Refactored for loop with list comprehension
# Basically grabs the number value that correlates with the
# letter in the word and creates a list. Returns the sum of the list
# Example: 'Dog' -> sum([2, 1, 2]) = 5
return sum([charDict[char] for char in word])
print(point_conversion('dog'))
| 3.90625 | 4 |
core/parser.py | ceilingfans/emond | 0 | 12791221 | from enum import Enum, auto
from typing import List
from core.error import print_error, Errors
VALID_TOKENS = "+-><][%|^$?!/"
class TokenType(Enum):
ADD = auto() # +
MINUS = auto() # -
CELL_SHIFT_LEFT = auto() # <
CELL_SHIFT_RIGHT = auto() # >
LOOP_START = auto() # [
LOOP_END = auto() # ]
PRINT_CHAR = auto() # %
PRINT_NUM = auto() # |
PRINT_NEWLINE = auto() # ^
ALPHA_RESET = auto() # $
UPPER_ALPHA_RESET = auto() # ?
HARD_RESET = auto() # !
VALUE_GET = auto() # /
NEWLINE = auto()
WHITESPACE = auto()
def push(obj, lst, depth):
while depth:
lst = lst[-1]
depth -= 1
lst.append(obj)
def parse_brackets(tokens: List[TokenType]):
groups = []
depth = 0
try:
for char in tokens:
if char == TokenType.LOOP_START:
push([], groups, depth)
depth += 1
elif char == TokenType.LOOP_END:
depth -= 1
else:
push(char, groups, depth)
except IndexError:
print_error(Errors.UNMATCHED_BRACKET)
exit(1)
if depth > 0:
print_error(Errors.UNMATCHED_BRACKET)
exit(1)
else:
return groups
def parse(source: str, filepath: str = None, exit_on_fail: bool = True) -> List | bool:
if source.count("[") != source.count("]"):
print_error(Errors.UNMATCHED_BRACKET)
exit(1)
tokens = []
for line_i, line in enumerate(source.strip().splitlines()):
for token_i, token in enumerate(line):
match token:
case "+":
tokens.append(TokenType.ADD)
case "-":
tokens.append(TokenType.MINUS)
case "<":
tokens.append(TokenType.CELL_SHIFT_LEFT)
case ">":
tokens.append(TokenType.CELL_SHIFT_RIGHT)
case "[":
tokens.append(TokenType.LOOP_START)
case "]":
tokens.append(TokenType.LOOP_END)
case "%":
tokens.append(TokenType.PRINT_CHAR)
case "|":
tokens.append(TokenType.PRINT_NUM)
case "^":
tokens.append(TokenType.PRINT_NEWLINE)
case "!":
tokens.append(TokenType.HARD_RESET)
case "$":
tokens.append(TokenType.ALPHA_RESET)
case "?":
tokens.append(TokenType.UPPER_ALPHA_RESET)
case "/":
tokens.append(TokenType.VALUE_GET)
case " " | "\t":
tokens.append(TokenType.WHITESPACE)
case ";":
break
case _:
if exit_on_fail:
print_error(
Errors.UNKNOWN_TOKEN, line, filepath, (line_i, token_i)
)
exit(1)
return False
tokens.append(TokenType.NEWLINE)
return parse_brackets(tokens)
| 3.234375 | 3 |
orthogonal_snakemake/make_cfmid_energy_dash_collision_energy.py | plbremer/cfmid_4_benchmarking | 0 | 12791222 | import pandas
#################
input_panda_address=snakemake.input.input_panda_address
output_panda_address=snakemake.output.output_panda_address
#################
input_panda=pandas.read_csv(input_panda_address,sep='¬',header=0)
def fill_cfmid_collision_energy_column(temp_panda):
#iterrate through entire panda
for index,row in temp_panda.iterrows():
#if the rank observed is lower than the lowest rank, record that
if row['energy#']=='energy0':
temp_panda.loc[index,'cfmid-collision']='10-'+str(row['Collision_energy'])
elif row['energy#']=='energy1':
temp_panda.loc[index,'cfmid-collision']='20-'+str(row['Collision_energy'])
elif row['energy#']=='energy2':
temp_panda.loc[index,'cfmid-collision']='40-'+str(row['Collision_energy'])
input_panda.insert(loc=input_panda.columns.size-2,column='cfmid-collision',value='null')
fill_cfmid_collision_energy_column(input_panda)
input_panda.to_csv(output_panda_address,sep='¬',index=False)
| 2.78125 | 3 |
models/morphing_encoder.py | Gerryflap/master_thesis | 0 | 12791223 | <reponame>Gerryflap/master_thesis
"""
Models a Gz/Encoder that is able to generate morphs.
This allows for a single method that captures all morphing methods proposed in RT, apart from gradient descend.
This method is meant to be overridden by some encoders but the default implementation takes the mean.
"""
import torch
class MorphingEncoder(torch.nn.Module):
def morph(self, x1, x2, use_mean=False, return_all=False):
"""
Morphs the images in x1 with the images in x2 and returns the outcome latent representation.
:param x1: A batch of images of the first identities to be morphed
:param x2: A batch of images to morph with the x1 images
:param use_mean: use z_mean instead of sampling from q(z|x)
:param return_all: Retruns z_morph, z1 and z2 when enabled
:return: A batch of morphed z values. These will have to go through the decoder/Gx in order to decode.
"""
z1, z2 = self.encode(x1, use_mean=use_mean), self.encode(x2, use_mean=use_mean)
z = self.morph_zs(z1, z2)
if return_all:
return z, z1, z2
return z
def morph_zs(self, z1, z2):
"""
Morphs the latent vectors z1 and z2 and outputs z_morph
:param z1: a batch of latent vectors for the first identities to be morphed
:param z2: a batch of latent vectors for the second identities to be morphed
:return:
"""
z = 0.5 * (z1 + z2)
return z
def encode(self, x, use_mean=False):
"""
Encodes x to a latent vector. This method exists to unify the return values.
Different models might return more values when called directly.
The default implementation assumes a VAE like encoder that returns a 3-tuple
where the first element is a sample. If this is not the case, the method should be overridden.
:param x: A batch of images
:return: A list of latent representations of these images in x
"""
z, zm, _ = self(x)
if not use_mean:
return z
else:
return zm
| 2.625 | 3 |
Python/645set_mismatch.py | Apocrypse/LeetCode | 4 | 12791224 | <filename>Python/645set_mismatch.py
class Solution:
def findErrorNums(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
n = len(nums)
duplication = sum(nums) - sum(set(nums))
missing = n * (n + 1) // 2 - sum(set(nums))
return [duplication, missing]
| 3.390625 | 3 |
lacquer/tree/visitor.py | provingground-moe/lacquer | 30 | 12791225 | <reponame>provingground-moe/lacquer
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .join_criteria import JoinOn, JoinUsing
from .grouping import SimpleGroupBy, GroupingSets
class AstVisitor(object):
def process(self, node, context=None):
return node.accept(self, context)
def visit_node(self, node, context):
pass
def visit_expression(self, node, context):
return self.visit_node(node, context)
def visit_reset_session(self, node, context):
return self.visit_statement(node, context)
def visit_current_time(self, node, context):
return self.visit_expression(node, context)
def visit_extract(self, node, context):
return self.visit_expression(node, context)
def visit_arithmetic_binary(self, node, context):
return self.visit_expression(node, context)
def visit_between_predicate(self, node, context):
return self.visit_expression(node, context)
def visit_coalesce_expression(self, node, context):
return self.visit_expression(node, context)
def visit_comparison_expression(self, node, context):
return self.visit_expression(node, context)
def visit_literal(self, node, context):
return self.visit_expression(node, context)
def visit_double_literal(self, node, context):
return self.visit_literal(node, context)
def visit_statement(self, node, context):
return self.visit_node(node, context)
def visit_query(self, node, context):
return self.visit_statement(node, context)
def visit_explain(self, node, context):
return self.visit_statement(node, context)
def visit_show_tables(self, node, context):
return self.visit_statement(node, context)
def visit_show_schemas(self, node, context):
return self.visit_statement(node, context)
def visit_show_catalogs(self, node, context):
return self.visit_statement(node, context)
def visit_show_columns(self, node, context):
return self.visit_statement(node, context)
def visit_show_partitions(self, node, context):
return self.visit_statement(node, context)
def visit_show_functions(self, node, context):
return self.visit_statement(node, context)
def visit_use(self, node, context):
return self.visit_statement(node, context)
def visit_show_session(self, node, context):
return self.visit_statement(node, context)
def visit_set_session(self, node, context):
return self.visit_statement(node, context)
def visit_generic_literal(self, node, context):
return self.visit_literal(node, context)
def visit_time_literal(self, node, context):
return self.visit_literal(node, context)
def visit_explain_option(self, node, context):
return self.visit_node(node, context)
def visit_with(self, node, context):
return self.visit_node(node, context)
def visit_approximate(self, node, context):
return self.visit_node(node, context)
def visit_with_query(self, node, context):
return self.visit_node(node, context)
def visit_select(self, node, context):
return self.visit_node(node, context)
def visit_relation(self, node, context):
return self.visit_node(node, context)
def visit_query_body(self, node, context):
return self.visit_relation(node, context)
def visit_query_specification(self, node, context):
return self.visit_query_body(node, context)
def visit_set_operation(self, node, context):
return self.visit_query_body(node, context)
def visit_union(self, node, context):
return self.visit_set_operation(node, context)
def visit_intersect(self, node, context):
return self.visit_set_operation(node, context)
def visit_except(self, node, context):
return self.visit_set_operation(node, context)
def visit_timestamp_literal(self, node, context):
return self.visit_literal(node, context)
def visit_when_clause(self, node, context):
return self.visit_expression(node, context)
def visit_interval_literal(self, node, context):
return self.visit_literal(node, context)
def visit_in_predicate(self, node, context):
return self.visit_expression(node, context)
def visit_function_call(self, node, context):
return self.visit_expression(node, context)
def visit_lambda_expression(self, node, context):
return self.visit_expression(node, context)
def visit_simple_case_expression(self, node, context):
return self.visit_expression(node, context)
def visit_string_literal(self, node, context):
return self.visit_literal(node, context)
def visit_binary_literal(self, node, context):
return self.visit_literal(node, context)
def visit_boolean_literal(self, node, context):
return self.visit_literal(node, context)
def visit_in_list_expression(self, node, context):
return self.visit_expression(node, context)
def visit_qualified_name_reference(self, node, context):
return self.visit_expression(node, context)
def visit_dereference_expression(self, node, context):
return self.visit_expression(node, context)
def visit_null_if_expression(self, node, context):
return self.visit_expression(node, context)
def visit_if_expression(self, node, context):
return self.visit_expression(node, context)
def visit_null_literal(self, node, context):
return self.visit_literal(node, context)
def visit_arithmetic_unary(self, node, context):
return self.visit_expression(node, context)
def visit_not_expression(self, node, context):
return self.visit_expression(node, context)
def visit_select_item(self, node, context):
return self.visit_node(node, context)
def visit_single_column(self, node, context):
return self.visit_select_item(node, context)
def visit_all_columns(self, node, context):
return self.visit_select_item(node, context)
def visit_searched_case_expression(self, node, context):
return self.visit_expression(node, context)
def visit_like_predicate(self, node, context):
return self.visit_expression(node, context)
def visit_is_not_null_predicate(self, node, context):
return self.visit_expression(node, context)
def visit_is_null_predicate(self, node, context):
return self.visit_expression(node, context)
def visit_array_constructor(self, node, context):
return self.visit_expression(node, context)
def visit_subscript_expression(self, node, context):
return self.visit_expression(node, context)
def visit_long_literal(self, node, context):
return self.visit_literal(node, context)
def visit_logical_binary_expression(self, node, context):
return self.visit_expression(node, context)
def visit_subquery_expression(self, node, context):
return self.visit_expression(node, context)
def visit_sort_item(self, node, context):
return self.visit_node(node, context)
def visit_table(self, node, context):
return self.visit_query_body(node, context)
def visit_unnest(self, node, context):
return self.visit_relation(node, context)
def visit_values(self, node, context):
return self.visit_query_body(node, context)
def visit_row(self, node, context):
return self.visit_node(node, context)
def visit_table_subquery(self, node, context):
return self.visit_query_body(node, context)
def visit_aliased_relation(self, node, context):
return self.visit_relation(node, context)
def visit_sampled_relation(self, node, context):
return self.visit_relation(node, context)
def visit_join(self, node, context):
return self.visit_relation(node, context)
def visit_exists(self, node, context):
return self.visit_expression(node, context)
def visit_try_expression(self, node, context):
return self.visit_expression(node, context)
def visit_cast(self, node, context):
return self.visit_expression(node, context)
def visit_input_reference(self, node, context):
return self.visit_expression(node, context)
def visit_window(self, node, context):
return self.visit_node(node, context)
def visit_window_frame(self, node, context):
return self.visit_node(node, context)
def visit_frame_bound(self, node, context):
return self.visit_node(node, context)
def visit_call_argument(self, node, context):
return self.visit_node(node, context)
def visit_table_element(self, node, context):
return self.visit_node(node, context)
def visit_create_table(self, node, context):
return self.visit_statement(node, context)
def visit_create_table_as_select(self, node, context):
return self.visit_statement(node, context)
def visit_drop_table(self, node, context):
return self.visit_statement(node, context)
def visit_rename_table(self, node, context):
return self.visit_statement(node, context)
def visit_rename_column(self, node, context):
return self.visit_statement(node, context)
def visit_add_column(self, node, context):
return self.visit_statement(node, context)
def visit_create_view(self, node, context):
return self.visit_statement(node, context)
def visit_drop_view(self, node, context):
return self.visit_statement(node, context)
def visit_insert(self, node, context):
return self.visit_node(node, context)
def visit_call(self, node, context):
return self.visit_node(node, context)
def visit_delete(self, node, context):
return self.visit_statement(node, context)
def visit_start_transaction(self, node, context):
return self.visit_statement(node, context)
def visit_grant(self, node, context):
return self.visit_statement(node, context)
def visit_transaction_mode(self, node, context):
return self.visit_node(node, context)
def visit_isolation_level(self, node, context):
return self.visit_transaction_mode(node, context)
def visit_transaction_access_mode(self, node, context):
return self.visit_transaction_mode(node, context)
def visit_commit(self, node, context):
return self.visit_statement(node, context)
def visit_rollback(self, node, context):
return self.visit_statement(node, context)
def visit_at_time_zone(self, node, context):
return self.visit_expression(node, context)
class DefaultTraversalVisitor(AstVisitor):
def visit_extract(self, node, context):
return self.process(node.expression, context)
def visit_cast(self, node, context):
return self.process(node.expression, context)
def visit_arithmetic_binary(self, node, context):
self.process(node.left, context)
self.process(node.right, context)
return None
def visit_between_predicate(self, node, context):
self.process(node.value, context)
self.process(node.min, context)
self.process(node.max, context)
return None
def visit_coalesce_expression(self, node, context):
for operand in node.operands:
self.process(operand, context)
return None
def visit_at_time_zone(self, node, context):
self.process(node.value, context)
self.process(node.time_zone, context)
return None
def visit_array_constructor(self, node, context):
for expression in node.values:
self.process(expression, context)
return None
def visit_subscript_expression(self, node, context):
self.process(node.base, context)
self.process(node.index, context)
return None
def visit_comparison_expression(self, node, context):
self.process(node.left, context)
self.process(node.right, context)
return None
def visit_query(self, node, context):
self.process(node.query_body, context)
for sort_item in node.order_by:
self.process(sort_item, context)
return None
def visit_with(self, node, context):
for query in node.queries:
self.process(query, context)
return None
def visit_with_query(self, node, context):
return self.process(node.query, context)
def visit_select(self, node, context):
for item in node.select_items:
self.process(item, context)
return None
def visit_single_column(self, node, context):
self.process(node.expression, context)
return None
def visit_when_clause(self, node, context):
self.process(node.operand, context)
self.process(node.result, context)
return None
def visit_in_predicate(self, node, context):
self.process(node.value, context)
self.process(node.value_list, context)
return None
def visit_function_call(self, node, context):
for argument in node.arguments:
self.process(argument, context)
if node.window:
self.process(node.window, context)
return None
def visit_dereference_expression(self, node, context):
self.process(node.base, context)
return None
"""
def visit_window(self, node, context)
for expression in node.partition:
self.process(expression, context)
for sort_item in node.order_by:
self.process(sort_item.sort_key, context)
if node.frame:
self.process(node.frame, context)
return None
def visit_window_frame(self, node, context)
self.process(node.start, context)
if node.end:
self.process(node.end, context)
return None
def visit_frame_bound(self, node, context)
if node.value:
self.process(node.value, context)
return None
"""
def visit_simple_case_expression(self, node, context):
self.process(node.operand, context)
for clause in node.when_clauses:
self.process(clause, context)
if node.default_value:
self.process(node.default_valuee, context)
return None
def visit_in_list_expression(self, node, context):
for value in node.values:
self.process(value, context)
return None
def visit_None_if_expression(self, node, context):
self.process(node.first, context)
self.process(node.second, context)
return None
def visit_if_expression(self, node, context):
self.process(node.condition, context)
self.process(node.true_value, context)
if node.false_value:
self.process(node.false_value, context)
return None
def visit_try_expression(self, node, context):
self.process(node.inner_expression, context)
return None
def visit_arithmetic_unary(self, node, context):
return self.process(node.value, context)
def visit_not_expression(self, node, context):
return self.process(node.value, context)
def visit_searched_case_expression(self, node, context):
for clause in node.when_clauses:
self.process(clause, context)
if node.default_value:
self.process(node.default_value, context)
return None
def visit_like_predicate(self, node, context):
self.process(node.value, context)
self.process(node.pattern, context)
if node.escape is not None:
self.process(node.escape, context)
return None
def visit_is_not_None_predicate(self, node, context):
return self.process(node.value, context)
def visit_is_None_predicate(self, node, context):
return self.process(node.value, context)
def visit_logical_binary_expression(self, node, context):
self.process(node.left, context)
self.process(node.right, context)
return None
def visit_subquery_expression(self, node, context):
return self.process(node.query, context)
def visit_sort_item(self, node, context):
return self.process(node.sort_key, context)
def visit_query_specification(self, node, context):
self.process(node.select, context)
if node.from_:
self.process(node.from_, context)
if node.where:
self.process(node.where, context)
if node.group_by:
grouping_elements = []
if isinstance(node.group_by, SimpleGroupBy):
grouping_elements = node.group_by.columns
elif isinstance(node.group_by, GroupingSets):
grouping_elements = node.group_by.sets
for grouping_element in grouping_elements:
self.process(grouping_element, context)
if node.having:
self.process(node.having, context)
for sort_item in node.order_by:
self.process(sort_item, context)
return None
def visit_union(self, node, context):
for relation in node.relations:
self.process(relation, context)
return None
def visit_intersect(self, node, context):
for relation in node.relations:
self.process(relation, context)
return None
def visit_except(self, node, context):
self.process(node.left, context)
self.process(node.right, context)
return None
def visit_values(self, node, context):
for row in node.rows:
self.process(row, context)
return None
def visit_row(self, node, context):
for expression in node.items:
self.process(expression, context)
return None
def visit_table_subquery(self, node, context):
return self.process(node.query, context)
def visit_aliased_relation(self, node, context):
return self.process(node.relation, context)
def visit_sampled_relation(self, node, context):
self.process(node.relation, context)
self.process(node.get_sample_percentage(), context)
if node.get_columns_to_stratify_on().is_present():
for expression in node.get_columns_to_stratify_on().get():
self.process(expression, context)
return None
def visit_join(self, node, context):
self.process(node.left, context)
self.process(node.right, context)
if isinstance(node.criteria, JoinOn):
self.process(node.criteria.expression, context)
elif isinstance(node.criteria, JoinUsing):
self.process(node.criteria.columns)
return None
class DefaultExpressionTraversalVisitor(DefaultTraversalVisitor):
def __init__(self, line=None, pos=None):
super(DefaultExpressionTraversalVisitor, self).__init__(line, pos)
def visit_subquery_expression(self, node, context):
return None
| 2.109375 | 2 |
skinapp/views.py | zeochoy/skinapp | 4 | 12791226 | import os
import glob
from flask import Flask
from flask import jsonify
from flask import request, render_template
from skinapp import app
from model.utils import *
from model.skinmodel import *
valid_mimetypes = ['image/jpeg', 'image/png']
@app.route('/')
def index():
samples = glob.glob("%s/*" % app.config['SAMPLE_FOLDER'])
return render_template('index.html', samples=samples)
@app.route('/predict', methods=['POST'])
def predict():
if request.method == 'POST':
if 'file' not in request.files:
return jsonify({'error': 'no file'}), 400
# Image info
img_file = request.files.get('file')
img_name = img_file.filename
mimetype = img_file.content_type
# Return an error if not a valid mimetype
if mimetype not in valid_mimetypes:
return jsonify({'error': 'bad-type'})
# Write image to static directory
img_file.save(os.path.join(app.config['UPLOAD_FOLDER'], img_name))
img = open_image(os.path.join(app.config['UPLOAD_FOLDER'], img_name))
# Run Prediction on the model
res = get_predictions(img)
# Delete image when done with analysis
os.remove(os.path.join(app.config['UPLOAD_FOLDER'], img_name))
return jsonify(res)
| 2.4375 | 2 |
Q530.py | Linchin/python_leetcode_git | 0 | 12791227 | <filename>Q530.py
"""
530
easy
min absolute difference in BST
Given the root of a Binary Search Tree (BST), return the minimum absolute
difference between the values of any two different nodes in the tree.
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def getMinimumDifference(self, root):
vals = []
def in_order(node):
if node.left:
in_order(node.left)
vals.append(node.val)
if node.right:
in_order(node.right)
in_order(root)
min_dist = abs(vals[1]-vals[0])
for i in range(1, len(vals)):
min_dist = min(min_dist, abs(vals[i]-vals[i-1]))
return min_dist
| 3.78125 | 4 |
realtime_test20200807/get_d4_calctime.py | takumihonda/AIP_realtime | 0 | 12791228 | import os
import sys
from datetime import datetime, timedelta
import numpy as np
data_path = "../../dat4figs_JAMES/Fig06"
os.makedirs( data_path, exist_ok=True )
USE_ARCH_DAT = True
#USE_ARCH_DAT = False
quick_hist = False
quick_bar = True
quick_bar = False
def d4_computation_time_nparray( top='' ):
dirs = [ f.name for f in os.scandir( top ) ] #if f.is_file() ]
path_l = []
ftimes = []
ctimes = []
# Prepare file path list
for dir_ in dirs:
path_l.append( os.path.join( top, dir_, ) )
scale_l = []
# Get computation time for SCALE
for path in path_l:
if not os.path.isfile( path ):
break
with open( path ) as f:
lines = f.readlines()
for l in lines:
if '[Info:fcst] End forecast' in l:
data = l.split()
try:
ftimes.append( float( data[7] ) )
except:
print( "Failed", data )
elif '[Info:DA]' in l:
data = l.split()
try:
ctimes.append( float( data[6] ) )
except:
print( "Failed", data )
elif '##### TIMER' in l:
data = l.split()
try:
tit_ = data[3]
dat_ = float( data[5] )
if tit_ == 'SCALE':
scale_l.append( dat_ )
except:
print( "Failed", data )
scale_l = np.array( scale_l )
key_l = [ "SCALE", "READ_OBS",
"OBS_OPERATOR",
"INITIALIZE",
"INITIALIZE_OTHERS",
"INIT_LETKF",
"PROCESS_OBS",
"SET_GRID",
"READ_GUES",
"GUES_MEAN",
"WRITE RESTART/GRADS(GUES)",
"DAS_LETKF",
"ANAL_MEAN",
"WRITE_ANAL",
"DEALLOCATE",
"WRITE RESTART/GRADS(ANAL)",
"OTHERS",
"FINALIZE",
"JIT_GET",
]
# prepare nan array
iarray = np.zeros( scale_l.shape )
iarray[:] = np.nan
DETAIL = {}
for key in key_l:
if key == 'SCALE':
DETAIL[key] = scale_l
else:
DETAIL[key] = np.copy( iarray )
# Get computation time for all
i = -1
for path in path_l:
if not os.path.isfile( path ):
break
with open( path ) as f:
lines = f.readlines()
for l in lines:
if '##### TIMER' in l:
data = l.split()
try:
tit_ = data[3]
tit4_ = data[4]
dat_ = float( data[5] )
if tit_ == 'SCALE':
i += 1
if tit_ == "WRITE":
dat_ = float( data[6] )
if tit4_ == "RESTART/GRADS(ANAL)":
tit_ = "WRITE RESTART/GRADS(ANAL)"
elif tit4_ == "RESTART/GRADS(GUES)":
tit_ = "WRITE RESTART/GRADS(GUES)"
i_ = i
if i_ < 0:
i_ = 0
if tit_ in DETAIL:
DETAIL[tit_][i_] = dat_
else:
DETAIL["OTHERS"][i_] = dat_
except:
print( "Failed", data )
elif '......jitdt_read_toshiba:jitget:' in l:
data = l.split()
try:
tit_ = "JIT_GET"
dat_ = float( data[1] )
DETAIL[tit_][i] = dat_
except:
print( "Failed", data )
return( ftimes, ctimes, DETAIL )
def d4_computation_time( top='', ctmax=600 ):
dirs = [ f.name for f in os.scandir( top ) ] #if f.is_file() ]
ftimes = []
ctimes = []
path_l = []
init = []
init_others = []
init_letkf = []
scale = []
others = []
read_obs = []
obsope = []
process_obs = []
set_grid = []
read_gues = []
gues_mean = []
write_restartg = []
das_letkf = []
anal_mean = []
write_anal = []
deallocate = []
write_restarta = []
others = []
finalize = []
jitget = []
DETAIL = { "SCALE": scale,
"READ_OBS":read_obs,
"OBS_OPERATOR": obsope,
"INITIALIZE": init,
"INITIALIZE_OTHERS": init_others,
"INIT_LETKF": init_letkf,
"PROCESS_OBS": process_obs,
"SET_GRID": set_grid,
"READ_GUES": read_gues,
"GUES_MEAN": gues_mean,
"WRITE RESTART/GRADS(GUES)": write_restartg,
"DAS_LETKF": das_letkf,
"ANAL_MEAN": anal_mean,
"WRITE_ANAL": write_anal,
"DEALLOCATE": deallocate,
"WRITE RESTART/GRADS(ANAL)": write_restarta,
"OTHERS": others,
"FINALIZE": finalize,
"JIT_GET": jitget,
}
# Prepare file path list
for dir_ in dirs:
fname = 'job.o' #[ f.name for f in os.scandir( os.path.join( top, dir_ ) ) ] #if f.is_file() ]
path_l.append( os.path.join( top, dir_, fname ) )
# Get computation time
for path in path_l:
if not os.path.isfile( path ):
break
with open( path ) as f:
lines = f.readlines()
for l in lines:
if '[Info:fcst] End forecast' in l:
data = l.split()
try:
ftimes.append( float( data[7] ) )
except:
print( "Failed", data )
elif '[Info:DA]' in l:
data = l.split()
try:
ctimes.append( float( data[6] ) )
except:
print( "Failed", data )
elif '##### TIMER' in l:
data = l.split()
try:
tit_ = data[3]
tit4_ = data[4]
dat_ = float( data[5] )
if tit_ == "WRITE":
dat_ = float( data[6] )
if tit4_ == "RESTART/GRADS(ANAL)":
tit_ = "WRITE RESTART/GRADS(ANAL)"
elif tit4_ == "RESTART/GRADS(GUES)":
tit_ = "WRITE RESTART/GRADS(GUES)"
if tit_ in DETAIL:
DETAIL[tit_].append( dat_ )
else:
DETAIL["OTHERS"].append( dat_ )
except:
print( "Failed", data )
elif '......jitdt_read_toshiba:jitget:' in l:
data = l.split()
try:
tit_ = "JIT_GET"
dat_ = float( data[1] )
DETAIL[tit_].append( dat_ )
except:
print( "Failed", data )
for key in DETAIL.keys():
DETAIL[key] = np.array( DETAIL[key] )
return( ftimes, ctimes, DETAIL )
def plot_hist( key="", dat=np.array([]) ):
import matplotlib.pyplot as plt
from scipy import stats
xmin = 0
xmax = 60
# Scott's choise
#h = 3.5 * np.std( dat, ddof=1 ) / np.power( dat.size, 1.0/3.0)
#bins = int( ( xmax - xmin ) / h )
# Square-root choice
bins = int( np.sqrt( dat.size ) )
fig, ax = plt.subplots( 1, 1, figsize=(6,4) )
fig.subplots_adjust( left=0.15, bottom=0.15, right=0.95, top=0.92, )
rn, rbins, rpatches = ax.hist( dat, range=(xmin, xmax), bins=bins, alpha=0.6 )
imode = np.argmax( rn )
mode = np.mean( rbins[imode:imode+2] )
mean = np.mean( dat )
#print( len(rn), len(rbins), mode )
lw = 1.0
ymin = 0.0
ymax = 4000 #dat_.size
ls = 'dashed'
color = 'b'
ax.vlines( x=mode, ymin=ymin, ymax=ymax,
linewidths=lw, linestyles=ls, color=color )
color = 'k'
ax.vlines( x=mean, ymin=ymin, ymax=ymax,
linewidths=lw, linestyles=ls, color=color )
text_ = 'Mean:{0:.3f} s\nMode:{1:.3f} s\nN={2:}'.format( mean, mode, dat.size )
ax.text( 0.99, 0.99, text_,
fontsize=12, transform=ax.transAxes,
ha='right',
va='top' )
tit_ = key
ax.text( 0.5, 1.01, tit_,
fontsize=12, transform=ax.transAxes,
ha='center',
va='bottom' )
ax.set_xlim( xmin, xmax )
ax.set_ylim( ymin, ymax )
xlab = 'Computation time (s)'
ylab = 'Frequency'
ax.set_xlabel( xlab, fontsize=11)
ax.set_ylabel( ylab, fontsize=11)
key_ = key.replace( ' ', '_' ).replace( '/', '_' ) #.replace( '(', '_' ).replace( ')')
ofig = 'png/1p_d4_{0:}.png'.format( key_ )
print( ofig )
if quick_hist:
plt.show()
else:
plt.savefig( ofig,
bbox_inches="tight", pad_inches = 0.1)
plt.clf()
plt.close('all')
return( mode, mean )
def plot_bar_2p( dic={}, ftimes=np.array([]) ):
import matplotlib.pyplot as plt
fig, ( ax1,ax2 ) = plt.subplots( 1, 2, figsize=(6,4) )
# fig.subplots_adjust( left=0.15, bottom=0.05, right=0.5, top=0.92, )
fig.subplots_adjust( left=0.15, bottom=0.06, right=0.95, top=0.92,
wspace=0.3, hspace=0.05)
ax1.set_xlim( 0, 2.0 )
width1 = 0.8
#c_l = [ 'firebrick', 'dodgerblue', 'limegreen', 'gold' ]
#c_l = [ 'dodgerblue', 'firebrick', 'forestgreen', 'goldenrod' ]
c_l = [ 'dodgerblue', 'firebrick', 'gray', 'goldenrod', 'k' ]
#c_l = [ 'cyan', 'magenta', 'y', 'k' ]
acm = 0.0
for i, key in enumerate( dic.keys() ):
lab = key
if lab == 'OBS':
lab = 'Obs pre-\nprocessing'
elif lab == 'DATA TRANSFER':
lab = 'Memory copy'
elif lab == 'JIT-DT':
continue
ax1.bar( 1.0, dic[key], bottom=acm,
label=lab, color=c_l[i], width=width1 )
acm += dic[key]
# ax.legend( fontsize=12, bbox_to_anchor=(1.01, 1.00) )
handles, labels = ax1.get_legend_handles_labels()
ax1.legend( reversed(handles), reversed(labels), bbox_to_anchor=(1.01, 1.00),
fontsize=12 )
ax1.set_ylabel( 'Computation time (s)', fontsize=12 )
#ax.set_xlim( 0, 1.0 )
yticks = np.arange( 0, 22, 2 )
ax1.set_ylim( 0, 20.0 )
ax1.set_yticks( yticks )
ax1.tick_params( axis='x', which='both',
bottom=False, top=False,
labelbottom=False )
ax1.hlines( xmin=0, xmax=2, y=np.arange( 4, 20, 4 ), lw=1.0, linestyle='dashed',
color='gray', alpha=0.5 )
ax2.set_ylim( 0, 151.0 )
ax2.set_xlim( 0, 2.0 )
ax2.hlines( xmin=0, xmax=2, y=[60, 120], lw=1.0, linestyle='dashed',
color='gray', alpha=0.5 )
width2 = 0.8
ax2.bar( 1, np.mean(ftimes), label="30-min forecast", width=width2,
color='dodgerblue' )
print( "std:", np.std( ftimes, ddof=1 ), len( ftimes ) )
ax2.tick_params( axis='x', which='both',
bottom=False, top=False,
labelbottom=False )
ax_l = [ ax1, ax2 ]
tit_l = [ "Data assimilation",
"30-min forecast" ]
pnum_l = [ "(a)", "(b)" ]
for i, ax in enumerate( ax_l ):
ax.text( 0.5, 1.01, tit_l[i],
fontsize=12, transform=ax.transAxes,
ha='center',
va='bottom' )
ax.text( 0.0, 1.01, pnum_l[i],
fontsize=10, transform=ax.transAxes,
ha='left',
va='bottom' )
ofig = 'pdf/Fig06.pdf'
print( ofig )
if quick_bar:
plt.show()
else:
plt.savefig( ofig,
bbox_inches="tight", pad_inches = 0.1)
plt.clf()
plt.close('all')
def plot_bar_2p_scale( dic={}, ftimes=np.array([]), dic2={} ):
import matplotlib.pyplot as plt
fig, ( ax1,ax2 ) = plt.subplots( 1, 2, figsize=(6,4) )
# fig.subplots_adjust( left=0.15, bottom=0.05, right=0.5, top=0.92, )
fig.subplots_adjust( left=0.15, bottom=0.06, right=0.95, top=0.92,
wspace=0.3, hspace=0.05)
ax1.set_xlim( 0, 3.0 )
width1 = 0.8
#c_l = [ 'firebrick', 'dodgerblue', 'limegreen', 'gold' ]
#c_l = [ 'dodgerblue', 'firebrick', 'forestgreen', 'goldenrod' ]
c_l = [ 'dodgerblue', 'firebrick', 'gray', 'goldenrod', 'k' ]
#c_l = [ 'cyan', 'magenta', 'y', 'k' ]
acm = 0.0
for i, key in enumerate( dic.keys() ):
lab = key
if lab == 'OBS':
lab = 'Obs pre-\nprocessing'
elif lab == 'DATA TRANSFER':
lab = 'Memory copy'
elif lab == 'JIT-DT':
continue
ax1.bar( 1.0, dic[key], bottom=acm,
label=lab, color=c_l[i], width=width1 )
acm += dic[key]
acm2 = 0.0
for i, key in enumerate( dic2.keys() ):
lab = key
if lab == 'OBS':
lab = 'Obs pre-\nprocessing'
elif lab == 'DATA TRANSFER':
lab = 'Memory copy'
elif lab == 'JIT-DT':
continue
print( "check", dic2[key] )
ax1.bar( 2.0, dic2[key], bottom=acm2,
label=None, color=c_l[i], width=width1 )
acm2 += dic[key]
# ax.legend( fontsize=12, bbox_to_anchor=(1.01, 1.00) )
handles, labels = ax1.get_legend_handles_labels()
ax1.legend( reversed(handles), reversed(labels), bbox_to_anchor=(1.01, 1.00),
fontsize=12 )
ax1.set_ylabel( 'Computation time (s)', fontsize=12 )
#ax.set_xlim( 0, 1.0 )
yticks = np.arange( 0, 22, 2 )
ax1.set_ylim( 0, 20.0 )
ax1.set_yticks( yticks )
ax1.tick_params( axis='x', which='both',
bottom=False, top=False,
labelbottom=False )
ax1.hlines( xmin=0, xmax=2, y=np.arange( 4, 20, 4 ), lw=1.0, linestyle='dashed',
color='gray', alpha=0.5 )
ax2.set_ylim( 0, 151.0 )
ax2.set_xlim( 0, 2.0 )
ax2.hlines( xmin=0, xmax=2, y=[60, 120], lw=1.0, linestyle='dashed',
color='gray', alpha=0.5 )
width2 = 0.8
ax2.bar( 1, np.mean(ftimes), label="30-min forecast", width=width2,
color='dodgerblue' )
print( "std:", np.std( ftimes, ddof=1 ), len( ftimes ) )
ax2.tick_params( axis='x', which='both',
bottom=False, top=False,
labelbottom=False )
ax_l = [ ax1, ax2 ]
tit_l = [ "Data assimilation",
"30-min forecast" ]
pnum_l = [ "(a)", "(b)" ]
for i, ax in enumerate( ax_l ):
ax.text( 0.5, 1.01, tit_l[i],
fontsize=12, transform=ax.transAxes,
ha='center',
va='bottom' )
ax.text( 0.0, 1.01, pnum_l[i],
fontsize=10, transform=ax.transAxes,
ha='left',
va='bottom' )
# ofig = 'png/2p_d4_bar_scale.png'
print( ofig )
if quick_bar:
plt.show()
else:
plt.savefig( ofig,
bbox_inches="tight", pad_inches = 0.1)
plt.clf()
plt.close('all')
def plot_bar( dic={} ):
import matplotlib.pyplot as plt
fig, ax = plt.subplots( 1, 1, figsize=(5,5) )
fig.subplots_adjust( left=0.15, bottom=0.05, right=0.5, top=0.92, )
#c_l = [ 'firebrick', 'dodgerblue', 'limegreen', 'gold' ]
#c_l = [ 'dodgerblue', 'firebrick', 'forestgreen', 'goldenrod' ]
c_l = [ 'dodgerblue', 'firebrick', 'gray', 'goldenrod', 'k' ]
#c_l = [ 'cyan', 'magenta', 'y', 'k' ]
acm = 0.0
for i, key in enumerate( dic.keys() ):
lab = key
if lab == 'OBS':
lab = 'Obs pre-\nprocessing'
elif lab == 'DATA TRANSFER':
lab = 'Memory copy'
elif lab == 'JIT-DT':
continue
ax.bar( '', dic[key], bottom=acm,
label=lab, color=c_l[i] )
acm += dic[key]
# ax.legend( fontsize=12, bbox_to_anchor=(1.01, 1.00) )
handles, labels = ax.get_legend_handles_labels()
ax.legend( reversed(handles), reversed(labels), bbox_to_anchor=(1.01, 1.00),
fontsize=13 )
ax.set_ylabel( 'Computation time (s)', fontsize=12 )
#ax.set_xlim( 0, 1.0 )
yticks = np.arange( 0, 32, 2 )
ax.set_ylim( 0, 31.0 )
ax.set_yticks( yticks )
ofig = 'png/1p_d4_bar.png'
print( ofig )
if quick_bar:
plt.show()
else:
plt.savefig( ofig,
bbox_inches="tight", pad_inches = 0.1)
plt.clf()
plt.close('all')
####
SUM = { "SCALE": 0.0,
"LETKF": 0.0,
"OBS": 0.0,
# "DATA TRANSFER": 0.0,
"JIT-DT": 0.0,
}
fn_sum = '{0:}/SUM.npz'.format( data_path, )
fn_ftimes = '{0:}/ftimes.npz'.format( data_path, )
if not USE_ARCH_DAT:
top = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/realtime_20200825/log_from_amemiya/d4_500m/exp'
top = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/amemiya/d4_500m'
top_test = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/realtime_test20200807/data/D4_500m_TEST_DEFAULT_0708_NOBS100_NEAR_HV4/exp/3008084_cycle_20190824150000'
#dtime_max = 1000
ftimes, ctimes, DETAIL = d4_computation_time_nparray( top=top, )
ftimes_test, ctimes_test, DETAIL_test = d4_computation_time_nparray( top=top_test, )
#print( DETAIL["DAS_LETKF"][0:5], DETAIL["WRITE_ANAL"][0:5])
#ftimes, ctimes, DETAIL = d4_computation_time( top=top, )
ctimes = np.array( ctimes )
print( '{0:} average: {1:} (N: {2:})'.format( "cycle", np.nanmean( ctimes ), len(ctimes) ) )
print( '{0:} average: {1:} (N: {2:})'.format( "fcst ", np.mean( ftimes ), len(ftimes) ) )
print("")
DETAIL_MODE = { }
DETAIL_MODE_test = { }
min_read_obs = 1.0
max_read_obs = 30.0
read_obs_ = DETAIL["READ_OBS"]
dat_jit = DETAIL['JIT_GET']
dat_jit[ ( read_obs_ < min_read_obs ) | ( read_obs_ > max_read_obs )] = np.nan
dat_jit_ = dat_jit[ ~np.isnan(dat_jit) ]
for key in DETAIL.keys():
DETAIL[key][ ( read_obs_ < min_read_obs ) | ( read_obs_ > max_read_obs )] = np.nan
time_ = np.nanmean( DETAIL[key] )
dat = DETAIL[key]
dat_ = dat[ ~np.isnan(dat) & ~np.isnan( dat_jit ) ]
num = len( dat_ )
if key == "READ_OBS":
dat_ -= dat_jit_
print( "#### ", key, time_, num, np.nanmax( DETAIL[key] ), np.nanmin( DETAIL[key] ) )
if num > 100:
mode_, mean_ = plot_hist( key=key, dat=dat_ )
#DETAIL_MODE[key] = mode_
DETAIL_MODE[key] = mean_
else:
print( 'Not plot ', key)
read_obs_test = DETAIL_test["READ_OBS"]
#dat_jit_test = DETAIL_test['JIT_GET']
#dat_jit_test[ ( read_obs_test < min_read_obs ) | ( read_obs_test > max_read_obs )] = np.nan
#dat_jit_test = dat_jit_test[ ~np.isnan(dat_jit_test) ]
for key in DETAIL_test.keys():
DETAIL_test[key][ ( read_obs_test < min_read_obs ) | ( read_obs_test > max_read_obs )] = np.nan
time_ = np.nanmean( DETAIL_test[key] )
dat = DETAIL_test[key]
print( key, dat )
#dat_ = dat[ ~np.isnan(dat) & ~np.isnan( dat_jit_test ) ]
dat_ = dat[ ~np.isnan(dat) ]
num = len( dat_ )
# if key == "READ_OBS":
# dat_ -= dat_jit_
print( "#### ", key, time_, num, np.nanmax( DETAIL_test[key] ), np.nanmin( DETAIL_test[key] ) )
if num > 100:
mode_, mean_ = plot_hist( key=key, dat=dat_ )
DETAIL_MODE_test[key] = mean_
else:
print( 'Not plot ', key)
for key in DETAIL_MODE.keys():
print( key )
if key == "SCALE":
SUM["SCALE"] += DETAIL_MODE[key]
elif key == "READ_OBS":
SUM["OBS"] += DETAIL_MODE[key]
# elif key == "READ_GUES" or key == "WRITE_ANAL":
# SUM["DATA TRANSFER"] += DETAIL_MODE[key]
elif key == "JIT_GET":
SUM["JIT-DT"] += DETAIL_MODE[key]
else:
SUM["LETKF"] += DETAIL_MODE[key]
SUM_test = { "SCALE": 0.0,
"LETKF": 0.0,
"OBS": 0.0,
"JIT-DT": 0.0,
}
for key in DETAIL_MODE_test.keys():
if key == "SCALE":
SUM_test["SCALE"] += DETAIL_MODE_test[key]
elif key == "READ_OBS":
SUM_test["OBS"] += DETAIL_MODE_test[key]
# elif key == "READ_GUES" or key == "WRITE_ANAL":
# SUM["DATA TRANSFER"] += DETAIL_MODE[key]
elif key == "JIT_GET":
SUM_test["JIT-DT"] += DETAIL_MODE_test[key]
else:
SUM_test["LETKF"] += DETAIL_MODE_test[key]
np.savez( fn_sum, **SUM, ftimes=ftimes )
np.savez( fn_ftimes, ftimes=ftimes )
else:
with np.load( fn_sum, allow_pickle=True ) as npz:
for key in SUM.keys():
SUM[key] = npz[key]
ftimes = np.load( fn_ftimes, allow_pickle=True )['ftimes']
print( SUM )
#print( DETAIL_MODE )
#print( SUM_test )
#print( DETAIL_MODE_test )
#sys.exit()
#plot_bar( dic=SUM )
plot_bar_2p( dic=SUM, ftimes=ftimes )
#plot_bar_2p_scale( dic=SUM, dic2=SUM_test, ftimes=ftimes )
| 2.125 | 2 |
source/grammar/openqasm_reference_parser/exceptions.py | shiyunon/openqasm | 603 | 12791229 | <gh_stars>100-1000
__all__ = ["Qasm3ParserError"]
class Qasm3ParserError(Exception):
pass
| 1.21875 | 1 |
bessel_zeros/bessel_zeros.py | GrzegorzMika/Towards-adaptivity-via-a-new-discrepancy-principle-for-Poisson-inverse-problems | 0 | 12791230 | <reponame>GrzegorzMika/Towards-adaptivity-via-a-new-discrepancy-principle-for-Poisson-inverse-problems
import numpy as np
import os
zeros = np.loadtxt('./bessel_zeros_short.txt')
np.save('bessel_zeros_short', zeros)
if os.path.exists('./bessel_zeros_short.txt'):
os.remove('./bessel_zeros_short.txt')
| 2.71875 | 3 |
read-camera.py | duchengyao/pycv | 4 | 12791231 | # -*- coding: utf-8 -*-
# 使用openCV抓取视频
# 空格-->>截图,ESC-->>退出。
# 代码修改自 http://blog.csdn.net/tanmengwen/article/details/41892977
import cv2.cv as cv
import time
if __name__ == '__main__':
cv.NamedWindow("camRra", 1)
capture = cv.CaptureFromCAM(0) #开启摄像头
# capture = cv.CaptureFromFile("Video.avi") # 打开一个视频文件
num = 0;
while True:
img = cv.QueryFrame(capture)
cv.ShowImage("camera", img)
key = cv.WaitKey(1) & 0xFF
if key == 27:
break
if key == ord(' '):
num = num + 1
filename = "frmaes_%s.jpg" % num
cv.SaveImage(filename, img)
del (capture)
cv.DestroyWindow("camera") | 3.328125 | 3 |
final-activity/submission.py | JVBravoo/Learning-Machine-Learning | 1 | 12791232 | def my_agent(obs, config):
# Use the best model to select a column
col, _ = model.predict(np.array(obs['board']).reshape(6,7,1))
# Check if selected column is valid
is_valid = (obs['board'][int(col)] == 0)
# If not valid, select random move.
if is_valid:
return int(col)
else:
return random.choice([col for col in range(config.columns) if obs.board[int(col)] == 0])
| 3.046875 | 3 |
test/utils/devices/temperature_control_mock_test.py | kieransukachevin/AlkalinityTitrator | 0 | 12791233 | import time
import titration.utils.devices.board_mock as board
import titration.utils.devices.temperature_control_mock as temperature_control
import titration.utils.devices.temperature_probe_mock as temperature_probe
def test_temperature_control_create():
sensor = temperature_probe.Temperature_Probe(
board.SCK, board.MOSI, board.MISO, board.D0, wires=3
)
temperature_controller = temperature_control.Temperature_Control(board.D1, sensor)
assert temperature_controller is not None
def test_temperature_control_update():
sensor = temperature_probe.Temperature_Probe(
board.SCK, board.MOSI, board.MISO, board.D0, wires=3
)
temperature_controller = temperature_control.Temperature_Control(board.D1, sensor)
temperature_controller.update()
time.sleep(1)
temperature_controller.update()
def test_temperature_control_enable_print():
sensor = temperature_probe.Temperature_Probe(
board.SCK, board.MOSI, board.MISO, board.D0, wires=3
)
temperature_controller = temperature_control.Temperature_Control(board.D1, sensor)
temperature_controller.enable_print()
def test_temperature_control_disable_print():
sensor = temperature_probe.Temperature_Probe(
board.SCK, board.MOSI, board.MISO, board.D0, wires=3
)
temperature_controller = temperature_control.Temperature_Control(board.D1, sensor)
temperature_controller.disable_print()
def test_temperature_control_at_temperature():
sensor = temperature_probe.Temperature_Probe(
board.SCK, board.MOSI, board.MISO, board.D0, wires=3
)
temperature_controller = temperature_control.Temperature_Control(board.D1, sensor)
temperature_controller.at_temperature()
def test_temperature_control_last_temperature():
sensor = temperature_probe.Temperature_Probe(
board.SCK, board.MOSI, board.MISO, board.D0, wires=3
)
temperature_controller = temperature_control.Temperature_Control(board.D1, sensor)
temperature_controller.get_last_temperature()
def test_temperature_control_activate():
sensor = temperature_probe.Temperature_Probe(
board.SCK, board.MOSI, board.MISO, board.D0, wires=3
)
temperature_controller = temperature_control.Temperature_Control(board.D1, sensor)
temperature_controller.activate()
def test_temperature_control_deactivate():
sensor = temperature_probe.Temperature_Probe(
board.SCK, board.MOSI, board.MISO, board.D0, wires=3
)
temperature_controller = temperature_control.Temperature_Control(board.D1, sensor)
temperature_controller.deactivate()
| 2.53125 | 3 |
dace/transformation/dataflow/matrix_product_transpose.py | Walon1998/dace | 1 | 12791234 | <filename>dace/transformation/dataflow/matrix_product_transpose.py
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
""" Implements the matrix-matrix product transpose transformation. """
from copy import deepcopy as dcpy
import dace
from dace.sdfg import nodes, graph as gr
from dace.sdfg.sdfg import SDFG
from dace.sdfg.state import SDFGState
from dace.transformation import transformation
from dace.properties import make_properties
@make_properties
class MatrixProductTranspose(transformation.SingleStateTransformation):
""" Implements the matrix-matrix product transpose transformation.
T(A) @ T(B) = T(B @ A)
"""
import dace.libraries.blas as blas # Avoid slow imports
transpose_a = transformation.PatternNode(blas.Transpose)
at = transformation.PatternNode(nodes.AccessNode)
transpose_b = transformation.PatternNode(blas.Transpose)
bt = transformation.PatternNode(nodes.AccessNode)
a_times_b = transformation.PatternNode(blas.MatMul)
@classmethod
def expressions(cls):
graph = gr.OrderedDiGraph()
graph.add_node(cls.transpose_a)
graph.add_node(cls.at)
graph.add_node(cls.transpose_b)
graph.add_node(cls.bt)
graph.add_node(cls.a_times_b)
graph.add_edge(cls.transpose_a, cls.at, None)
graph.add_edge(cls.at, cls.a_times_b, None)
graph.add_edge(cls.transpose_b, cls.bt, None)
graph.add_edge(cls.bt, cls.a_times_b, None)
return [graph]
def can_be_applied(self, graph, expr_index, sdfg, permissive=False):
_at = self.at
_a_times_b = self.a_times_b
edges = graph.edges_between(_at, _a_times_b)
# Enforce unique match
if len(edges) != 1:
return False
_, _, _, dst_conn, _ = edges[0]
if dst_conn != '_a':
return False
return True
def match_to_str(self, graph):
transpose_a = self.transpose_a
transpose_b = self.transpose_b
a_times_b = self.a_times_b
return f"{transpose_a.name} -> {a_times_b.name} <- {transpose_b.name}"
def apply(self, graph: SDFGState, sdfg: SDFG):
import dace.libraries.blas as blas
transpose_a = self.transpose_a
_at = self.at
transpose_b = self.transpose_b
_bt = self.bt
a_times_b = self.a_times_b
for src, src_conn, _, _, memlet in graph.in_edges(transpose_a):
graph.add_edge(src, src_conn, a_times_b, '_b', memlet)
graph.remove_node(transpose_a)
for src, src_conn, _, _, memlet in graph.in_edges(transpose_b):
graph.add_edge(src, src_conn, a_times_b, '_a', memlet)
graph.remove_node(transpose_b)
graph.remove_node(_at)
graph.remove_node(_bt)
for _, _, dst, dst_conn, memlet in graph.out_edges(a_times_b):
subset = dcpy(memlet.subset)
subset.squeeze()
size = subset.size()
shape = [size[1], size[0]]
break
tmp_name, tmp_arr = sdfg.add_temp_transient(shape, a_times_b.dtype)
tmp_acc = graph.add_access(tmp_name)
transpose_c = blas.Transpose('_Transpose_', a_times_b.dtype)
for edge in graph.out_edges(a_times_b):
_, _, dst, dst_conn, memlet = edge
graph.remove_edge(edge)
graph.add_edge(transpose_c, '_out', dst, dst_conn, memlet)
graph.add_edge(a_times_b, '_c', tmp_acc, None, dace.Memlet.from_array(tmp_name, tmp_arr))
graph.add_edge(tmp_acc, None, transpose_c, '_inp', dace.Memlet.from_array(tmp_name, tmp_arr))
| 2.390625 | 2 |
cobald_tests/daemon/core/test_config.py | maxfischer2781/cobald | 7 | 12791235 | <gh_stars>1-10
from tempfile import NamedTemporaryFile
import pytest
import copy
from cobald.daemon.config.mapping import ConfigurationError
from cobald.daemon.core.config import load, COBalDLoader, yaml_constructor
from cobald.controller.linear import LinearController
from ...mock.pool import MockPool
# register test pool as safe for YAML configurations
COBalDLoader.add_constructor(tag="!MockPool", constructor=yaml_constructor(MockPool))
# Helpers for testing lazy/eager YAML evaluation
# Since YAML defaults to lazy evaluation, the arguments available during evaluation
# are not necessarily complete.
class TagTracker:
"""Helper to track the arguments supplied to YAML !Tags"""
def __init__(self, *args, **kwargs):
# the state of arguments *during* YAML evaluation
self.orig_args = copy.deepcopy(args)
self.orig_kwargs = copy.deepcopy(kwargs)
# the state of arguments *after* YAML evaluation
self.final_args = args
self.final_kwargs = kwargs
COBalDLoader.add_constructor(
tag="!TagTrackerEager", constructor=yaml_constructor(TagTracker, eager=True)
)
COBalDLoader.add_constructor(
tag="!TagTrackerLazy", constructor=yaml_constructor(TagTracker, eager=False)
)
def get_config_section(config: dict, section: str):
return next(
content for plugin, content in config.items() if plugin.section == section
)
class TestYamlConfig:
def test_load(self):
"""Load a valid YAML config"""
with NamedTemporaryFile(suffix=".yaml") as config:
with open(config.name, "w") as write_stream:
write_stream.write(
"""
pipeline:
- !LinearController
low_utilisation: 0.9
high_allocation: 1.1
- !MockPool
"""
)
with load(config.name):
assert True
assert True
def test_load_invalid(self):
"""Load a invalid YAML config (invalid keyword argument)"""
with NamedTemporaryFile(suffix=".yaml") as config:
with open(config.name, "w") as write_stream:
write_stream.write(
"""
pipeline:
- !LinearController
low_utilisation: 0.9
foo: 0
- !MockPool
"""
)
with pytest.raises(TypeError):
with load(config.name):
assert False
def test_load_dangling(self):
"""Forbid loading a YAML config with dangling content"""
with NamedTemporaryFile(suffix=".yaml") as config:
with open(config.name, "w") as write_stream:
write_stream.write(
"""
pipeline:
- !LinearController
low_utilisation: 0.9
high_allocation: 1.1
- !MockPool
random_things:
foo: bar
"""
)
with pytest.raises(ConfigurationError):
with load(config.name):
assert False
def test_load_missing(self):
"""Forbid loading a YAML config with missing content"""
with NamedTemporaryFile(suffix=".yaml") as config:
with open(config.name, "w") as write_stream:
write_stream.write(
"""
logging:
version: 1.0
"""
)
with pytest.raises(ConfigurationError):
with load(config.name):
assert False
def test_load_mixed_creation(self):
"""Load a YAML config with mixed pipeline step creation methods"""
with NamedTemporaryFile(suffix=".yaml") as config:
with open(config.name, "w") as write_stream:
write_stream.write(
"""
pipeline:
- __type__: cobald.controller.linear.LinearController
low_utilisation: 0.9
high_allocation: 0.9
- !MockPool
"""
)
with load(config.name) as config:
pipeline = get_config_section(config, "pipeline")
assert isinstance(pipeline[0], LinearController)
assert isinstance(pipeline[0].target, MockPool)
def test_load_tags_substructure(self):
"""Load !Tags with substructure"""
with NamedTemporaryFile(suffix=".yaml") as config:
with open(config.name, "w") as write_stream:
write_stream.write(
"""
pipeline:
- !MockPool
__config_test:
tagged: !TagTrackerEager
host: 127.0.0.1
port: 1234
algorithm: HS256
users:
- user_name: tardis
scopes:
- user:read
"""
)
with load(config.name) as config:
tagged = get_config_section(config, "__config_test")["tagged"]
assert isinstance(tagged, TagTracker)
assert tagged.final_kwargs["host"] == "127.0.0.1"
assert tagged.final_kwargs["port"] == 1234
assert tagged.final_kwargs["algorithm"] == "HS256"
assert tagged.final_kwargs["users"][0]["user_name"] == "tardis"
assert tagged.final_kwargs["users"][0]["scopes"] == ["user:read"]
def test_load_tags_eager(self):
"""Load !Tags with substructure, immediately using them"""
with NamedTemporaryFile(suffix=".yaml") as config:
with open(config.name, "w") as write_stream:
write_stream.write(
"""
pipeline:
- !MockPool
__config_test:
tagged: !TagTrackerEager
top: "top level value"
nested:
- leaf: "leaf level value"
"""
)
with load(config.name) as config:
tagged = get_config_section(config, "__config_test")["tagged"]
assert isinstance(tagged, TagTracker)
# eager loading => all data should exist immediately
assert tagged.orig_kwargs["top"] == "top level value"
assert tagged.orig_kwargs["nested"] == [{"leaf": "leaf level value"}]
assert tagged.orig_kwargs == tagged.final_kwargs
def test_load_tags_lazy(self):
"""Load !Tags with substructure, lazily using them"""
with NamedTemporaryFile(suffix=".yaml") as config:
with open(config.name, "w") as write_stream:
write_stream.write(
"""
pipeline:
- !MockPool
__config_test:
tagged: !TagTrackerLazy
top: "top level value"
nested:
- leaf: "leaf level value"
"""
)
with load(config.name) as config:
tagged = get_config_section(config, "__config_test")["tagged"]
assert isinstance(tagged, TagTracker)
# eager loading => only some data should exist immediately...
assert tagged.orig_kwargs["top"] == "top level value"
assert tagged.orig_kwargs["nested"] == []
# ...but should be there in the end
assert tagged.final_kwargs["nested"] == [{"leaf": "leaf level value"}]
def test_load_tags_nested(self):
"""Load !Tags with nested !Tags"""
with NamedTemporaryFile(suffix=".yaml") as config:
with open(config.name, "w") as write_stream:
write_stream.write(
"""
pipeline:
- !MockPool
__config_test:
top_eager: !TagTrackerEager
nested:
- leaf: "leaf level value"
- leaf_lazy: !TagTrackerLazy
nested:
- leaf: "leaf level value"
"""
)
with load(config.name) as config:
top_eager = get_config_section(config, "__config_test")["top_eager"]
# eager tags are evaluated eagerly
assert top_eager.orig_kwargs["nested"][0] == {
"leaf": "leaf level value"
}
leaf_lazy = top_eager.orig_kwargs["nested"][1]["leaf_lazy"]
# eagerness overrides laziness
assert leaf_lazy.orig_kwargs["nested"] == [{"leaf": "leaf level value"}]
def test_load_tag_settings(self):
"""Load !Tags with decorator settings"""
# __yaml_tag_test is provided by the cobald package
with NamedTemporaryFile(suffix=".yaml") as config:
with open(config.name, "w") as write_stream:
write_stream.write(
"""
pipeline:
- !MockPool
__config_test:
settings_tag: !__yaml_tag_test
top: "top level value"
nested:
- leaf: "leaf level value"
"""
)
with load(config.name) as config:
section = get_config_section(config, "__config_test")
args, kwargs = section["settings_tag"]
assert args == ()
assert kwargs["top"] == "top level value"
assert kwargs["nested"] == [{"leaf": "leaf level value"}]
| 2.140625 | 2 |
models/oomusic_remote.py | nicolasmartinelli/oomusic | 4 | 12791236 | # -*- coding: utf-8 -*-
import base64
import uuid
from io import BytesIO
import qrcode
from odoo import api, fields, models
class MusicRemote(models.Model):
_name = "oomusic.remote"
_description = "Remote Control"
def _default_name(self):
return fields.Date.to_string(fields.Date.context_today(self))
def _default_access_token(self):
return uuid.uuid4().hex
name = fields.Char("Name", default=lambda s: s._default_name())
access_token = fields.Char(
"Access Token", index=True, default=lambda s: s._default_access_token()
)
public = fields.Boolean("Public", default=False)
url = fields.Char(
"URL", compute="_compute_url", help="Access this URL to control the playback remotely."
)
qr = fields.Binary("QR Code", compute="_compute_qr", help="QR code pointing to the remote URL.")
user_id = fields.Many2one(
"res.users",
string="User",
required=True,
ondelete="cascade",
default=lambda self: self.env.user,
)
@api.depends("access_token", "public")
def _compute_url(self):
base_url = self.env["ir.config_parameter"].sudo().get_param("web.base.url")
for remote in self:
remote.url = "{}/oomusic/remote{}/{}".format(
base_url, "_public" if remote.public else "", remote.access_token
)
@api.depends("url")
def _compute_qr(self):
for remote in self:
img = qrcode.make(remote.url)
img_tmp = BytesIO()
img.save(img_tmp, format="PNG")
remote.qr = base64.b64encode(img_tmp.getvalue())
def action_reset_remote_token(self):
for remote in self:
remote.access_token = uuid.uuid4().hex
| 2.34375 | 2 |
src/rayoptics/qtgui/__init__.py | NelisW/ray-optics | 0 | 12791237 | <reponame>NelisW/ray-optics<gh_stars>0
""" package supplying Qt5 desktop application and associated functional support
The ``rayoptics.qtgui`` subpackage provides a desktop app that runs under
Anaconda. It also provides a series of higher level interfaces used by
rayoptics. These include:
- an interface that hosts matplotlib graphics
- a table grid for numeric model displays (template-based)
- docking panel support for python objects
- iPython console window (desktop app only)
"""
| 1.273438 | 1 |
src/test/test_preprocess_data.py | BLannoo/medical-named-entity-recognition | 0 | 12791238 | from pathlib import Path
import pandas as pd
import spacy
from assertpy import assert_that
from src.definitions import PROJECT_ROOT
from src.main.preprocess_data import preprocess_data, parse_passage
def test_preprocess_data(tmp_path: Path):
preprocess_data(
data_root=PROJECT_ROOT / "data/test/raw",
output_dir=tmp_path,
)
actual = pd.read_csv(tmp_path / "labeled_passages.csv")
def word_as_dict(word: str) -> dict:
return actual[actual.words == word].iloc[0].to_dict()
assert_that(word_as_dict("methanol")).is_equal_to(
{
"passage_id": 0,
"pubtator_id": 1,
"words": "methanol",
"POS": "NOUN",
"labels": "B-Chemical",
}
)
assert_that(word_as_dict("poisoning")).is_equal_to(
{
"passage_id": 0,
"pubtator_id": 1,
"words": "poisoning",
"POS": "NOUN",
"labels": "B-Disease",
}
)
assert_that(word_as_dict("pyridine")).is_equal_to(
{
"passage_id": 0,
"pubtator_id": 2,
"words": "pyridine",
"POS": "NOUN",
"labels": "B-Chemical",
}
)
assert_that(word_as_dict("nucleotide")).is_equal_to(
{
"passage_id": 0,
"pubtator_id": 2,
"words": "nucleotide",
"POS": "NOUN",
"labels": "I-Chemical",
}
)
expected = pd.read_csv(PROJECT_ROOT / "data/test/labeled_passages.csv")
pd.testing.assert_frame_equal(left=actual, right=expected)
def test_parse_passage_can_handle_global_offset():
nlp = spacy.load("en_core_web_sm")
actual = parse_passage(
passage={
"offset": 20, # This is the parameter under test
"text": "Adsorption of rRNA and poly(A)-containing RNA to filters.",
"annotations": [
{
"infons": {"identifier": "MESH:D011061", "type": "Chemical"},
# TODO: configure tokenization to split on '-'
"text": "poly(A)",
"locations": [{"offset": 43, "length": 7}],
},
],
},
pubtator_id="0",
passage_id=0,
nlp=nlp,
)
print(actual)
expected = pd.DataFrame(
{
"pubtator_id": ["0"] * 9,
"passage_id": [0] * 9,
"words": [
"Adsorption",
"of",
"rRNA",
"and",
"poly(A)-containing",
"RNA",
"to",
"filters",
".",
],
"POS": [
"NOUN",
"ADP",
"ADJ",
"CCONJ",
"VERB",
"PROPN",
"ADP",
"NOUN",
"PUNCT",
],
"labels": ["O"] * 4 + ["B-Chemical"] + ["O"] * 4,
}
)
pd.testing.assert_frame_equal(left=actual, right=expected)
| 2.671875 | 3 |
stats/scripts/z3_utils.py | satbekmyrza/chc-spacer-code | 1 | 12791239 | <filename>stats/scripts/z3_utils.py
#!/usr/bin/env python
############################################
#
# Some utility routines for Z3
#
############################################
import z3
I = z3.IntSort ()
B = z3.BoolSort ()
def z3_translate (x, ctx):
""" A version of z3.AstRef.translate that handles sorts and function declarations correctly"""
if x.ctx == ctx: return x
if isinstance (x, z3.BoolSortRef): return z3.BoolSort (ctx=ctx)
if z3.is_arith_sort (x):
if x.is_int (): return z3.IntSort (ctx=ctx)
else :
assert x.is_real ()
return z3.RealSort (ctx=ctx)
if isinstance (x, z3.FuncDeclRef):
sorts = [z3_translate (x.domain (i), ctx) for i in range (x.arity ())]
sorts.append (z3_translate (x.range (), ctx))
return z3.Function (x.name (), *sorts)
if ctx is None: return x.translate (ctx=z3.main_ctx ())
return x.translate (ctx)
def translate_pair_list (l, ctx):
res = []
for (a,b) in l:
new_p = (z3_translate (a, ctx), z3_translate (b, ctx))
res.append (new_p)
return res
def mk_true (ctx=None): return z3.BoolVal (True, ctx=ctx)
def mk_false (ctx=None): return z3.BoolVal (False, ctx=ctx)
def mk_int (val, ctx=None): return z3.IntVal (val, ctx=ctx)
def mk_and (args, ctx=None):
if len (args) == 0: return mk_true (ctx=ctx)
else: return z3.And (*args)
def create_fp (smt2file,
ctx=None, pp=False, engine='pdr', validate=False):
fp = z3.Fixedpoint (ctx=ctx)
if not pp:
print 'No pre-processing'
fp.set (slice=False)
fp.set (inline_linear=False)
fp.set (inline_eager=False)
fp.set (validate_result=validate)
fp.set (engine=engine, use_farkas=True, generate_proof_trace=False)
q = fp.parse_file (smt2file)
return (q, fp)
def create_named_fp (smt2file,
ctx=None, pp=False, engine='pdr', validate=False):
given_fp = z3.Fixedpoint (ctx=ctx)
q = given_fp.parse_file (smt2file)
given_preds = get_preds (given_fp)
fp = z3.Fixedpoint (ctx=ctx)
rules = dict () # map from names to rules
if not pp:
print 'No pre-processing'
fp.set (slice=False)
fp.set (inline_linear=False)
fp.set (inline_eager=False)
fp.set (validate_result=validate)
fp.set (engine=engine, use_farkas=True, generate_proof_trace=False)
for pred in given_preds.itervalues ():
fp.register_relation (pred)
for i,rule in enumerate (given_fp.get_rules ()):
name = 'r'+str(i)
fp.add_rule (rule, name=name)
rules [name] = rule
return (q, fp, rules)
def create_empty_fp (ctx=None, pp=False, engine='pdr', validate=False):
fp = z3.Fixedpoint (ctx=ctx)
if not pp:
fp.set (slice=False)
fp.set (inline_linear=False)
fp.set (inline_eager=False)
fp.set (validate_result=validate)
fp.set (engine=engine, use_farkas=True, generate_proof_trace=False)
return fp
def strip_qblock (expr):
if not z3.is_quantifier (expr): return ([], expr)
consts = list ()
for i in reversed (range (expr.num_vars ())):
v_name = expr.var_name (i)
v_sort = expr.var_sort (i)
consts.append (z3.Const (v_name, v_sort))
matrix = z3.substitute_vars (expr.body (), *consts)
return (consts, matrix)
def get_preds (fp):
preds = dict ()
pred_keys = []
for rule_exp in fp.get_rules ():
# rule_exp is a quantified formula representing the rule of the form:
# Forall vars. body => head where head is a QF predicate instance
# obtain the head
unused_, matrix = strip_qblock (rule_exp)
if z3.is_app_of (matrix, z3.Z3_OP_IMPLIES): head = matrix.arg (1)
else: head = matrix
assert head is not None
# obtain head_decl
head_decl = head.decl ()
# ensure head_decl is in preds
head_key = exp_key (head_decl)
if head_key in pred_keys: continue
pred_keys.append (head_key)
preds [head_decl.name ()] = head_decl
return preds
def print_lemmas (fp):
preds = get_preds (fp)
print
for pred in preds.itervalues ():
print 'Lemmas for predicate: ', pred
n = fp.get_num_levels (pred)
for i in range (n):
print '{} : {}'.format (i, fp.get_cover_delta (i, pred))
print '{} : {}'.format ('oo', fp.get_cover_delta (-1, pred))
print
def get_level_lemmas (fp, lvl, pred):
lemmas = []
for l in range (lvl, fp.get_num_levels (pred) + 1):
lemmas.append (fp.get_cover_delta (l, pred))
lemmas.append (fp.get_cover_delta (-1, pred))
return z3.simplify (z3.And (*lemmas))
# doesn't seem to quite work when new expressions are created -- use z3.eq instead
def exp_key (e): return e.ast.value
def match_exp (exp1, exp2):
if exp1 is None and exp2 is None: return True
if exp1 is None and exp2 is not None: return False
if exp2 is None and exp1 is not None: return False
return exp_key (exp1) == exp_key (exp2)
# iterator for declarations of arguments of exp
# if arguments contain de-bruijn variables, they are ignored
def arg_decls (exp):
for i in range (exp.num_args ()) :
arg = exp.arg (i)
if z3.is_app (arg): yield arg.decl ()
def has_const (exp, const):
found = False
for l in unique_const_leaves (exp):
if z3.eq (l, const):
found = True
break
return found
# iterator for all equality terms on const;
#
# each pair (t,eq) is such that eq is an equality logically equivalent to (const==t)
# appearing in exp;
#
# to accommodate for alternative representations of the same equality (e.g. const==1
# vs. 1==const) and to avoid repetitions of terms, we return (None,eq) for the
# duplicates (in the above example, we yield (1,const==1) and (None,1==const));
#
# assume that const only appears in simple arithmetic terms and the coefficient
# of const is 1;
#
# assume that no equality term appears in another equality term
# (so, we can't handle "(const==0)==b", "ite (const==0, x, y) == 3", etc.)
def unique_eq_terms_on_const (const, exp, eq_terms=None, done_exp=None):
def insert (e):
found = False
for t in eq_terms:
if z3.eq (t,e):
found = True
break
if not found:
eq_terms.append (e)
return True
return False
def process_eq (e1, e2):
if z3.eq (e1, const):
ret_val = z3.simplify (e2)
else:
assert z3.is_app (e1)
if not (z3.is_app_of (e1, z3.Z3_OP_ADD) or z3.is_app_of (e1, z3.Z3_OP_SUB)):
return None
is_add = z3.is_app_of (e1, z3.Z3_OP_ADD)
arg0 = e1.arg (0)
arg1 = e1.arg (1)
if z3.eq (arg1, const):
if is_add: ret_val = z3.simplify (e2-arg0)
else: ret_val = z3.simplify (arg0-e2)
else:
if is_add: ret_val = process_eq (arg0, e2-arg1)
else: ret_val = process_eq (arg0, e2+arg1)
return ret_val
if eq_terms is None: eq_terms = []
if done_exp is None: done_exp = []
for e in done_exp:
if e.eq (exp): return # sub-dag is already processed
if z3.is_eq (exp):
arg0 = exp.arg (0)
arg1 = exp.arg (1)
if has_const (arg1, const):
arg0,arg1 = arg1,arg0 # swap
if has_const (arg0, const):
t = process_eq (arg0, arg1)
if t is not None:
if insert (t): yield (t, exp)
else: yield (None, exp)
elif z3.is_app (exp):
for i in range (exp.num_args ()):
for (t,eq) in unique_eq_terms_on_const (const, exp.arg (i), eq_terms, done_exp):
yield (t, eq)
done_exp.append (exp)
def unique_leaves (exp, leaf_keys=None):
def insert_and_yield (e):
k = exp_key (e)
if k not in leaf_keys:
leaf_keys.append (k)
yield e
if leaf_keys is None: leaf_keys = []
if z3.is_const (exp) and not (z3.is_int_value (exp) or z3.is_rational_value (exp)):
for leaf in insert_and_yield (exp): yield leaf
elif z3.is_app (exp):
for i in range (exp.num_args ()):
for leaf in unique_leaves (exp.arg (i), leaf_keys): yield leaf
else:
assert z3.is_var (exp)
for leaf in insert_and_yield (exp): yield leaf
def unique_const_leaves (exp):
for l in unique_leaves (exp):
if not z3.is_var (l): yield l
def unique_var_leaves (exp):
for l in unique_leaves (exp):
if z3.is_var (l): yield l
def exp_has_const_leaf (exp, l):
for m in unique_const_leaves (exp):
if l.eq (m): return True
return False
def unique_selects (exp, sel_keys=None):
def insert_and_yield (e):
k = exp_key (e)
if k not in sel_keys:
sel_keys.append (k)
yield e
if sel_keys is None: sel_keys = []
# post-order
if z3.is_app (exp):
for i in range (exp.num_args ()): # args are the array and the idx
for sel in unique_selects (exp.arg (i), sel_keys): yield sel
if z3.is_select (exp):
for sel in insert_and_yield (exp): yield sel
def extract_consts (exp):
res = []
for c in unique_const_leaves (exp):
res.append (c)
return res
def mk_const_variant (const, variant):
name = '{}_{}'.format (const.decl ().name (), variant)
return z3.Const (name, const.sort ())
def mk_exp_variant_sub (exp, variant):
sub = []
for const in unique_const_leaves (exp):
const_variant = mk_const_variant (const, variant)
sub.append ( (const, const_variant) )
return sub
def mk_fresh_args (decl, startswith=''):
args = []
for i in range (decl.arity ()):
name = startswith + str (i)
sort = decl.domain (i)
args.append (z3.Const (name, sort))
return args
# check if fml is sat with given side constraints
def check_sat (fml, side_cons=None):
s = z3.Solver (ctx=fml.ctx)
s.add (fml)
if side_cons is not None:
for cons in side_cons:
s.add (cons)
res = s.check ()
if res == z3.sat:
return s.model ()
else:
return None
def mk_subst_from_model (m, consts, model_completion=False):
sub = []
for const in consts:
# treat arrays specially due to the else_value
sort = const.sort ()
if isinstance (sort, z3.ArraySortRef):
val_interp = m [const]
if (val_interp is not None) and isinstance (val_interp, z3.FuncInterp) :
idx_sort = sort.domain ()
val_sort = sort.range ()
val = z3.K(val_sort, val_interp.else_value ())
for i in range (val_interp.num_entries ()):
entry = val_interp.entry (i)
val = z3.Store (val, entry.arg_value (0), entry.value ())
else:
val = m.eval (const, model_completion=model_completion)
else:
val = m.eval (const, model_completion=model_completion)
sub.append ( (const, val) )
return sub
def mk_eqs_from_model (m, consts, model_completion=False):
eqs = []
for const in consts:
# treat arrays specially due to the else_value
sort = const.sort ()
if isinstance (sort, z3.ArraySortRef):
val_interp = m [const]
if (val_interp is not None) and isinstance (val_interp, z3.FuncInterp):
idx_sort = sort.domain ()
val_sort = sort.range ()
val = z3.K(val_sort, val_interp.else_value ())
for i in range (val_interp.num_entries ()):
entry = val_interp.entry (i)
val = z3.Store (val, entry.arg_value (0), entry.value ())
else:
val = m.eval (const, model_completion=model_completion)
else:
val = m.eval (const, model_completion=model_completion)
eqs.append (const == val)
return eqs
def qe_array (exp):
if not z3.is_quantifier (exp): return exp
is_forall = False
if exp.is_forall ():
is_forall = True
(qvars, matrix) = strip_qblock (exp)
exp = z3.Exists (qvars, z3.Not (matrix))
qf_exp = z3.Tactic ('qe-array', ctx=exp.ctx) (exp).as_expr ()
if is_forall:
(qvars, matrix) = strip_qblock (qf_exp)
if len (qvars) > 0:
res = z3.ForAll (qvars, z3.Not (matrix))
else:
res = z3.Not (matrix)
else:
res = qf_exp
return res
def qe_lite (exp):
if not z3.is_quantifier (exp): return exp
e = exp
t = z3.Tactic ('qe-light', ctx=exp.ctx)
# invoke qe_lite once per quantified variable, for better result
for i in range (exp.num_vars ()):
e = t (e).as_expr ()
if not z3.is_quantifier (e): return e
if z3.is_quantifier (e):
# invoke qe_lite for each variable, separately
(qvars, matrix) = strip_qblock (e)
for v in qvars:
if exp.is_forall ():
matrix = t (z3.ForAll ([v], matrix)).as_expr ()
else:
matrix = t (z3.Exists ([v], matrix)).as_expr ()
e = matrix
return e
def qe (exp):
if not z3.is_quantifier (exp): return exp
return z3.Tactic ('qe', ctx=exp.ctx) (exp).as_expr ()
# qe_lite followed by qe
def full_qe (exp):
temp = qe_lite (exp)
return qe (temp)
def qe_sat (exp):
t = z3.Tactic ('qe-sat', ctx=exp.ctx)
return t (exp).as_expr ()
def cofactor_term_ite (exp):
if z3.is_quantifier (exp):
(qvars, matrix) = strip_qblock (exp)
matrix = cofactor_term_ite (matrix)
if exp.is_forall ():
return z3.ForAll (qvars, matrix)
else:
return z3.Exists (qvars, matrix)
t = z3.Tactic ('cofactor-term-ite', ctx=exp.ctx)
return t (exp).as_expr ()
def elim_term_ite (exp):
if z3.is_quantifier (exp):
(qvars, matrix) = strip_qblock (exp)
matrix = elim_term_ite (matrix)
if exp.is_forall ():
e = z3.ForAll (qvars, matrix)
else:
e = z3.Exists (qvars, matrix)
return e
pre_consts = extract_consts (exp)
pre_const_keys = map (exp_key, pre_consts)
t = z3.Tactic ('elim-term-ite', ctx=exp.ctx)
e = t (exp).as_expr ()
# tactic introduces new constants which need to be existentially quantified
post_consts = extract_consts (e)
post_const_keys = map (exp_key, post_consts)
exist_consts = []
for i in range (len (post_consts)):
post_key = post_const_keys [i]
if post_key not in pre_const_keys:
exist_consts.append (post_consts [i])
if len (exist_consts) > 0:
e = z3.Exists (exist_consts, e)
return qe_lite (e)
# obtain an under-approx of exp (an existentially quantified fml) under the
# constraints 'side_cons' on the free variables;
#
# let exp = Exists (qvars, matrix)
# obtain a model m of qvars consistent with (matrix /\ side_cons)
# the under-approx. is obtained as "matrix [m/qvars]"
#
# this is the weakest under-approx. if side_cons is a point
def under_approx_qe (exp, side_cons=None):
assert z3.is_quantifier (exp)
assert not exp.is_forall ()
(qvars, matrix) = strip_qblock (exp)
s = z3.Solver (ctx=exp.ctx)
s.add (matrix)
if side_cons is not None:
for c in side_cons: s.add (c)
res = s.check ()
if res == z3.unsat: return mk_false (ctx=exp.ctx)
m = s.model ()
sub = mk_subst_from_model (m, qvars, model_completion=True)
return z3.substitute (matrix, *sub)
def nnf (exp):
t = z3.Tactic ('nnf', ctx=exp.ctx)
return t (exp).as_expr ()
def is_ite (exp):
return z3.is_app_of (exp, z3.Z3_OP_ITE)
def is_xor (exp):
return z3.is_app_of (exp, z3.Z3_OP_XOR)
# rewrite ite as conjunction of implications when it appears as a boolean atom
# (i.e. an atom in the boolean structure of exp)
def elim_bool_ite (exp):
if z3.is_quantifier (exp):
(qvars, matrix) = strip_qblock (exp)
matrix = elim_bool_ite (matrix)
if exp.is_forall ():
e = z3.ForAll (qvars, matrix)
else:
e = z3.Exists (qvars, matrix)
return e
if not z3.is_bool (exp): return exp
if z3.is_true (exp) or z3.is_false (exp): return exp
assert z3.is_app (exp)
decl = exp.decl ()
args = map (elim_bool_ite, exp.children ())
# need to worry about And and Or because they can take >2 args and
# decl(*args) doesn't seem to work with the py interface
if z3.is_and (exp):
return z3.And (*args)
elif z3.is_or (exp):
return z3.Or (*args)
elif is_ite (exp):
impl1 = z3.Implies (args[0], args[1])
impl2 = z3.Implies (z3.Not (args[0]), args[2])
return z3.And (impl1, impl2)
else:
return decl (*args)
def elim_ite (exp):
e = cofactor_term_ite (exp)
e = elim_bool_ite (e)
# Alternatively, we could have done the following with the caveat that
# elim_term_ite introduces new existentially quantified variables which can
# be hard to eliminate by qe_lite
#e = elim_bool_ite (exp)
#e = elim_term_ite (e)
return e
# sampling based method for quant. alternation;
# given_insts is a list of instances for the universals given by the user as a
# starting point
def solve_exists_forall (exp, given_insts=None, model=False):
print 'Exists Forall exp:', exp
assert z3.is_quantifier (exp) and not exp.is_forall ()
(exist_consts, e) = strip_qblock (exp)
if not z3.is_quantifier (e):
# just an smt problem
m = check_sat (e)
if m is None:
return (None, None, None)
else:
if model: return (m, None, None)
sub = mk_subst_from_model (m, exist_consts, model_completion=True)
return (sub, None, None)
else:
assert e.is_forall ()
(univ_consts, matrix) = strip_qblock (e)
print 'Exist consts:', exist_consts
print 'Univ consts:', univ_consts
print 'Matrix:', matrix
w_cons = [] # constraints for witness
# initialize w with given_insts
if given_insts is not None:
for inst in given_insts:
sub = mk_subst_from_model (inst, univ_consts, model_completion=True)
w_cons.append (z3.substitute (matrix, *sub))
new_insts = list ()
witnesses = list ()
while True:
print 'Solver for witness:'
for cons in w_cons: print cons.sexpr ()
w = z3.Solver (ctx=exp.ctx)
for cons in w_cons: w.add (cons)
# obtain witness for instances
res = w.check ()
if res == z3.unsat:
print 'FALSE\n', new_insts
return (None, new_insts, witnesses)
m = w.model ()
witnesses.append (m)
print 'Model for new witness:\n', m
sub = mk_subst_from_model (m, exist_consts, model_completion=True)
print 'New witness:', sub
# check if the witness is sufficient
s = z3.Solver (ctx=exp.ctx)
print 'checking validity of ', z3.substitute (matrix, *sub)
s.add (z3.Not (z3.substitute (matrix, *sub)))
print 'Solver for validity:', z3.Not (z3.substitute (matrix, *sub)).sexpr ()
res = s.check ()
if res == z3.unsat:
print 'TRUE\n', sub
if model: return (m, None, None)
return (sub, None, None)
inst = s.model ()
new_insts.append (inst)
print 'New instance:\n', inst
sub = mk_subst_from_model (inst, univ_consts, model_completion=True)
w_cons.append (z3.substitute (matrix, *sub))
# like above, but looks for counterexamples of increasing size;
# in other words, we solve the negation of the given problem, looking for
# counterexamples of increasing size, similar to BMC
def solve_exists_forall_incremental (exp, model=False):
print 'Exists Forall exp:', exp
assert z3.is_quantifier (exp) and not exp.is_forall ()
(exist_consts, e) = strip_qblock (exp)
if not z3.is_quantifier (e):
# just an smt problem
m = check_sat (e)
if m is None:
return (None, None, None)
else:
if model: return (m, None, None)
sub = mk_subst_from_model (m, exist_consts, model_completion=True)
return (sub, None, None)
else:
assert e.is_forall ()
(univ_consts, matrix) = strip_qblock (e)
print 'Exist consts:', exist_consts
print 'Univ consts:', univ_consts
print 'Matrix:', matrix
print 'Solving by negating the problem'
cex_size = 0
curr_exist_consts = []
curr_matrix_disjs = []
for i in range (cex_size):
sub = []
for c in univ_consts:
name = '{}_{}'.format (c.decl ().name (), str (i))
const = z3.Const (name, c.sort ())
curr_exist_consts.append (const)
sub.append ( (c, const) )
new_disj = z3.substitute (z3.Not (matrix), *sub)
curr_matrix_disjs.append (new_disj)
while True:
print 'CURRENT SIZE:', cex_size+1
# look for a cex of size 'cex_size'
# Exists U1,U2,..U_cex_size. Forall E. Not (matrix),
# where U and E are univ_consts and exist_consts
# add a new set of exist consts
sub = []
for c in univ_consts:
name = '{}_{}'.format (c.decl ().name (), str (cex_size))
const = z3.Const (name, c.sort ())
curr_exist_consts.append (const)
sub.append ( (c, const) )
new_disj = z3.substitute (z3.Not (matrix), *sub)
curr_matrix_disjs.append (new_disj)
curr_exp = z3.Exists (curr_exist_consts,
z3.ForAll (exist_consts,
z3.Or (*curr_matrix_disjs)))
(cex_model, witnesses, _unused_insts) = solve_exists_forall (curr_exp, model=True)
if cex_model is not None:
print 'FALSE\n', cex_model
print 'Size:', cex_size+1
# TODO: split cex_model into list of models for the original set of
# universal variables
return (None, cex_model, witnesses)
else:
# no cex of current size
# check if any of the witnesses already works
for m in witnesses:
w = z3.Solver (ctx=exp.ctx)
sub = mk_subst_from_model (m, exist_consts, model_completion=True)
w.add (z3.substitute (z3.Not (matrix), *sub))
if w.check () == z3.unsat:
print 'TRUE\n', sub
if model: return (m, None, None)
return (sub, None, None)
# increment size
cex_size += 1
| 2.484375 | 2 |
proximitysensor/sunclock.py | Smytten/Tangible_NFT_Thesis | 1 | 12791240 | # SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
# Simple test for NeoPixels on Raspberry Pi
import time
import board
import neopixel
import colorsys
import threading
class sunController():
def __init__(self):
# Choose an open pin connected to the Data In of the NeoPixel strip, i.e. board.D18
# NeoPixels must be connected to D10, D12, D18 or D21 to work.
self.pixel_pin = board.D10
# The number of NeoPixels 147 + 1 for some reason
self.num_pixels = 88
#Start position of sun
self.current_position = 0
#Start level of the sun between lowest 1 and 4 max
self.sun_level = 1
# The order of the pixel colors - RGB or GRB. Some NeoPixels have red and green reversed!
# For RGBW NeoPixels, simply change the ORDER to RGBW or GRBW.
self.ORDER = neopixel.GRB
self.pixels = neopixel.NeoPixel(
self.pixel_pin, self.num_pixels, brightness=1, auto_write=False, pixel_order=self.ORDER)
#TODO - Create 4 sun stages
self.SUN_INTENSE = (255,64,0)
self.SUN_STRONG = (64,16,0)
self.SUN_MILD = (32,8,0)
self.SUN_WEAK = (20,6,0)
self.SUN_COLOR_OFF = (0,0,0)
def init_sun(self, start_position):
self.pixels.fill(self.SUN_COLOR_OFF)
self.pixels.show()
self.current_position = start_position
self.pixels[self.current_position] = (self.SUN_INTENSE)
self.pixels.show()
def test_colors(self):
self.pixels.fill(self.SUN_COLOR_OFF)
self.pixels.show()
time.sleep(3)
self.pixels.fill(self.SUN_WEAK)
self.pixels.show()
time.sleep(3)
self.pixels.fill(self.SUN_MILD)
self.pixels.show()
time.sleep(3)
self.pixels.fill(self.SUN_STRONG)
self.pixels.show()
time.sleep(3)
self.pixels.fill(self.SUN_INTENSE)
self.pixels.show()
time.sleep(3)
def update_position(self):
#print("updating position")
self.current_position += 1
#Redraw pixels
self.redraw_pixels()
#Create a new thread for non-blocking change of position over time
timer = threading.Timer(3.4, self.update_position)
timer.start()
def redraw_pixels(self):
#Reset pixels
self.pixels.fill(self.SUN_COLOR_OFF)
#level 1
if self.sun_level == 1:
self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE)
#level 2
if self.sun_level == 2:
self.pixels.fill(self.SUN_COLOR_OFF)
self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE)
self.pixels[(self.current_position +1) % self.num_pixels] = (self.SUN_STRONG)
self.pixels[(self.current_position -1) % self.num_pixels] = (self.SUN_STRONG)
#level 3
if self.sun_level == 3:
self.pixels.fill(self.SUN_COLOR_OFF)
self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE)
self.pixels[(self.current_position +1) % self.num_pixels] = (self.SUN_STRONG)
self.pixels[(self.current_position -1) % self.num_pixels] = (self.SUN_STRONG)
self.pixels[(self.current_position +2) % self.num_pixels] = (self.SUN_MILD)
self.pixels[(self.current_position -2) % self.num_pixels] = (self.SUN_MILD)
#level 4
if self.sun_level == 4:
self.pixels.fill(self.SUN_COLOR_OFF)
self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE)
self.pixels[(self.current_position + 1) % self.num_pixels] = (self.SUN_STRONG)
self.pixels[(self.current_position - 1) % self.num_pixels] = (self.SUN_STRONG)
self.pixels[(self.current_position +2) % self.num_pixels] = (self.SUN_MILD)
self.pixels[(self.current_position -2) % self.num_pixels] = (self.SUN_MILD)
self.pixels[(self.current_position + 3) % self.num_pixels] = (self.SUN_WEAK)
self.pixels[(self.current_position - 3) % self.num_pixels] = (self.SUN_WEAK)
#Reveal pixels
self.pixels.show()
def set_level(self, new_level):
self.sun_level = new_level
self.redraw_pixels()
'''
def increase_sun(self):
print("sun increased")
print("current sun stage is: {}".format(self.sun_level))
if self.sun_level == 4:
print("Sunlevel already max")
return
#Update level
self.sun_level += 1
print("New sun stage is: {}".format(self.sun_level))
#Redraw pixels
self.redraw_pixels()
def decrease_sun(self):
print("sun decreased")
print("current sun stage is: {}".format(self.sun_level))
if self.sun_level == 1:
print("Sunlevel already lowest")
return
#Update level
self.sun_level -= 1
print("New sun stage is: {}".format(self.sun_level))
#Redraw pixels
self.redraw_pixels()
'''
'''
sun = sunController()
sun.update_position()
while input != 'quit':
command = input()
if command == "1":
sun.increase_sun()
if command == "2":
sun.decrease_sun()
if command == "3":
sun.test_colors()
command = ''
''' | 2.921875 | 3 |
autopandas_v2/generators/compilation/ir/signatures.py | chyanju/autopandas | 16 | 12791241 | <reponame>chyanju/autopandas
import ast
from numpy import nan
from typing import List, Tuple, Any, Set
class ISignature:
def __init__(self, sig: str):
self.sig: str = sig
self.fname: str = None
self.pos_args: List[str] = None
self.kw_args: List[Tuple[str, Any]] = None # Keyword arguments have both a name and a default value
self.arg_names: Set[str] = None
self.type: str = None
self.init()
def init(self):
root = ast.parse(self.sig.lstrip().rstrip())
for node in ast.walk(root):
if isinstance(node, ast.Call):
self.pos_args = list(map(lambda x: x.id, node.args))
self.kw_args = list(map(lambda x: (x.arg, self.eval_node(x.value)), node.keywords))
if isinstance(node.func, ast.Attribute):
# This is a method
fname = [node.func.attr]
node = node.func
# Sometimes things can be complicated, the following is to process
# cases like 'DataFrame.at.__getitem__'
while isinstance(node.value, ast.Attribute):
node = node.value
fname.append(node.attr)
fname.append(node.value.id)
self.fname = ".".join(reversed(fname))
self.type = 'method'
else:
# This is a general function not belonging to any particular class
# Ideally this should never happen
self.fname = node.func.id
self.arg_names = set(self.pos_args) | set(map(lambda x: x[0], self.kw_args))
return
elif isinstance(node, ast.Attribute):
self.pos_args = ['self']
self.kw_args = []
fname = [node.attr]
while isinstance(node.value, ast.Attribute):
node = node.value
fname.append(node.attr)
fname.append(node.value.id)
self.fname = ".".join(reversed(fname))
self.arg_names = set(self.pos_args) | set(map(lambda x: x[0], self.kw_args))
self.type = 'attribute'
return
raise Exception("Malformed Signature!")
@staticmethod
def eval_node(node):
return eval(compile(ast.Expression(node), 'dummy', 'eval'), {'nan': nan})
| 3.125 | 3 |
tests/test_databaseAccess.py | maxbechtold/dirt-rally-time-recorder | 18 | 12791242 | import unittest
from unittest.mock import MagicMock, call
from timerecorder.database import Database
from timerecorder.databaseAccess import DatabaseAccess
class TestDatabaseAccess(unittest.TestCase):
def setUp(self):
self.database = Database('test')
self.database.recordResults = MagicMock()
self.thing = DatabaseAccess(self.database)
def tearDown(self):
pass
def testIdentifyTrackUnambiguous(self):
tracks = [(1, 'track1')]
self.database.loadTracks = MagicMock(return_value=tracks)
loadedTrack = self.thing.identifyTrack(10, 10000)
self.assertEqual(loadedTrack, 1, "Wrong ID")
def testIdentifyTrackNoResult(self):
tracks = []
self.database.loadTracks = MagicMock(return_value=tracks)
loadedTrack = self.thing.identifyTrack(10, 10000)
self.assertEqual(loadedTrack, [], "Shouldn't identify track")
def testIdentifyTrackAmbiguous(self):
tracks = [(1, 'track1'), (2, 'track2')]
self.database.loadTracks = MagicMock(return_value=tracks)
loadedTrack = self.thing.identifyTrack(55, 10000)
self.assertEqual(loadedTrack, [1, 2], "Should return all tracks")
def testIdentifyCarUnambiguous(self):
cars = [(1, 'car1')]
self.database.loadCars = MagicMock(return_value=cars)
loadedCar = self.thing.identifyCar(1000, 100, 5)
self.assertEqual(loadedCar, 1, "Wrong ID")
def testIdentifyCarNoResult(self):
cars = []
self.database.loadCars = MagicMock(return_value=cars)
loadedCar = self.thing.identifyCar(1000, 100, 5)
self.assertEqual(loadedCar, [], "Shouldn't identify car")
def testIdentifyCarAmbiguous(self):
cars = [(1, 'car1'), (2, 'car2')]
self.database.loadCars = MagicMock(return_value=cars)
loadedCar = self.thing.identifyCar(1000, 100, 5)
self.assertEqual(loadedCar, [1, 2], "Should return all cars")
def testGetCarInterfacesStatementWithoutData(self):
handbrakeData = [(None)]
self.database.loadHandbrakeData = MagicMock(side_effect=handbrakeData)
carNames = ["Unknown Car"]
noneData = [(None)]
self.database.loadShiftingData = MagicMock(side_effect=noneData)
self.database.loadGearsData = MagicMock(side_effect=noneData)
self.database.loadClutchData = MagicMock(side_effect=noneData)
self.database.getCarName = MagicMock(side_effect=carNames)
self.assertEqual(self.thing.describeCarInterfaces([1]), "Unknown Car: NO CONTROL DATA")
def testGetCarInterfacesStatements(self):
handbrakeData = [(0), (1)]
self.database.loadHandbrakeData = MagicMock(side_effect=handbrakeData)
shiftingData = [('H-PATTERN'), ('2 PADDLES')]
self.database.loadShiftingData = MagicMock(side_effect=shiftingData)
carNames = ['Classic Car', 'Modern Car']
self.database.getCarName = MagicMock(side_effect=carNames)
gearsData = [(4), (6)]
self.database.loadGearsData = MagicMock(side_effect=gearsData)
clutchData = [(1), (0)]
self.database.loadClutchData = MagicMock(side_effect=clutchData)
firstCarInterface = self.thing.describeCarInterfaces(1)
self.assertEqual(firstCarInterface, "Classic Car: H-PATTERN shifting, 4 speed, with manual CLUTCH")
secondCarInterface = self.thing.describeCarInterfaces(2)
self.assertEqual(secondCarInterface, "Modern Car: 2 PADDLES shifting, 6 speed, with HANDBRAKE")
def testMapToShiftingData(self):
shiftingData = [('H-PATTERN'), ('SEQUENTIAL')]
self.database.loadShiftingData = MagicMock(side_effect=shiftingData)
carCandidates = [100, 200]
result = self.thing.mapCarsToShifting(carCandidates)
self.assertEqual(list(result), [(100, 'H-PATTERN'), (200, 'SEQUENTIAL')])
def testHandleCarUpdatesInvokesLambda(self):
self.database.getCarUpdateStatements = MagicMock(return_value=['update100', 'update200'])
carNames = ['Classic Car', 'Modern Car']
self.database.getCarName = MagicMock(side_effect=carNames)
updateHandler = MagicMock()
self.thing.handleCarUpdates([100, 200], 123456789, [], updateHandler)
call1 = call('UNKNOWN', 'Classic Car', 123456789, 'update100')
call2 = call('UNKNOWN', 'Modern Car', 123456789, 'update200')
updateHandler.assert_has_calls([call1, call2])
def testHandleTrackUpdatesInvokesLambda(self):
self.database.getTrackUpdateStatements = MagicMock(return_value=['update100', 'update200'])
trackNames = ['Sprint', 'Complete']
self.database.getTrackName = MagicMock(side_effect=trackNames)
updateHandler = MagicMock()
self.thing.handleTrackUpdates([100, 200], 123456789, [], updateHandler)
call1 = call('Sprint', 'UNKNOWN', 123456789, 'update100')
call2 = call('Complete', 'UNKNOWN', 123456789, 'update200')
updateHandler.assert_has_calls([call1, call2])
if __name__ == "__main__":
unittest.main()
| 2.9375 | 3 |
test/test-projections.py | dkogan/mrcal | 80 | 12791243 | <gh_stars>10-100
#!/usr/bin/python3
r'''Tests for project() and unproject()
Here I make sure the projection functions return the correct values. A part of
this is a regression test: the "right" project() results were recorded at some
point, and any deviation is flagged.
This also test gradients, normalization and in-place output.
I want to check all combinations of
add others here: latlon, lonlat, stereographic. Broadcasted and not. Test the
project() and unproject() paths
- project/unproject
- get_gradients: yes/no
- model simple: yes/no
- broadcasted: yes/no
- unproject normalize: yes/no
- explicit "out" in args
check() covers all of these for ONE model
'''
import sys
import numpy as np
import numpysane as nps
import os
testdir = os.path.dirname(os.path.realpath(__file__))
# I import the LOCAL mrcal since that's what I'm testing
sys.path[:0] = f"{testdir}/..",
import mrcal
import testutils
from test_calibration_helpers import grad
def check(intrinsics, p_ref, q_ref):
########## project
q_projected = mrcal.project(p_ref, *intrinsics)
testutils.confirm_equal(q_projected,
q_ref,
msg = f"Projecting {intrinsics[0]}",
eps = 1e-2)
q_projected *= 0
mrcal.project(p_ref, *intrinsics,
out = q_projected)
testutils.confirm_equal(q_projected,
q_ref,
msg = f"Projecting {intrinsics[0]} in-place",
eps = 1e-2)
meta = mrcal.lensmodel_metadata_and_config(intrinsics[0])
if meta['has_gradients']:
@nps.broadcast_define( ((3,),('N',)) )
def grad_broadcasted(p_ref, i_ref):
return grad(lambda pi: mrcal.project(pi[:3], intrinsics[0], pi[3:]),
nps.glue(p_ref,i_ref, axis=-1))
dq_dpi_ref = grad_broadcasted(p_ref,intrinsics[1])
q_projected,dq_dp,dq_di = mrcal.project(p_ref, *intrinsics, get_gradients=True)
testutils.confirm_equal(q_projected,
q_ref,
msg = f"Projecting {intrinsics[0]} with grad",
eps = 1e-2)
testutils.confirm_equal(dq_dp,
dq_dpi_ref[...,:3],
msg = f"dq_dp {intrinsics[0]}",
eps = 1e-2)
testutils.confirm_equal(dq_di,
dq_dpi_ref[...,3:],
msg = f"dq_di {intrinsics[0]}",
eps = 1e-2)
out=[q_projected,dq_dp,dq_di]
out[0] *= 0
out[1] *= 0
out[2] *= 0
mrcal.project(p_ref, *intrinsics, get_gradients=True, out=out)
testutils.confirm_equal(q_projected,
q_ref,
msg = f"Projecting {intrinsics[0]} with grad in-place",
eps = 1e-2)
testutils.confirm_equal(dq_dp,
dq_dpi_ref[...,:3],
msg = f"dq_dp in-place",
eps = 1e-2)
testutils.confirm_equal(dq_di,
dq_dpi_ref[...,3:],
msg = f"dq_di in-place",
eps = 1e-2)
########## unproject
if 1:
##### Un-normalized
v_unprojected = mrcal.unproject(q_projected, *intrinsics,
normalize = False)
cos = nps.inner(v_unprojected, p_ref) / nps.mag(p_ref)
cos = np.clip(cos, -1, 1)
testutils.confirm_equal( np.arccos(cos),
np.zeros((p_ref.shape[0],), dtype=float),
msg = f"Unprojecting {intrinsics[0]}",
eps = 1e-6)
if 1:
##### Normalized
v_unprojected_nograd = mrcal.unproject(q_projected, *intrinsics,
normalize = True)
testutils.confirm_equal( nps.norm2(v_unprojected_nograd),
1,
msg = f"Unprojected v are normalized",
eps = 1e-6)
cos = nps.inner(v_unprojected_nograd, p_ref) / nps.mag(p_ref)
cos = np.clip(cos, -1, 1)
testutils.confirm_equal( np.arccos(cos),
np.zeros((p_ref.shape[0],), dtype=float),
msg = f"Unprojecting {intrinsics[0]} (normalized)",
eps = 1e-6)
if not meta['has_gradients']:
# no in-place output for the no-gradients unproject() path
return
v_unprojected *= 0
mrcal.unproject(q_projected, *intrinsics,
normalize = True,
out = v_unprojected)
testutils.confirm_equal( nps.norm2(v_unprojected),
1,
msg = f"Unprojected in-place v are normalized",
eps = 1e-6)
cos = nps.inner(v_unprojected, p_ref) / nps.mag(p_ref)
cos = np.clip(cos, -1, 1)
testutils.confirm_equal( np.arccos(cos),
np.zeros((p_ref.shape[0],), dtype=float),
msg = f"Unprojecting in-place {intrinsics[0]}",
eps = 1e-6)
### unproject gradients
v_unprojected,dv_dq,dv_di = mrcal.unproject(q_projected,
*intrinsics, get_gradients=True)
# I'd like to turn this on, but unproject() doesn't behave the way it
# should, so this test always fails currently
#
# testutils.confirm_equal( v_unprojected,
# v_unprojected_nograd,
# msg = f"Unproject() should return the same thing whether get_gradients or not",
# eps = 1e-6)
# Two different gradient computations, to match the two different ways the
# internal computation is performed
if intrinsics[0] == 'LENSMODEL_PINHOLE' or \
intrinsics[0] == 'LENSMODEL_STEREOGRAPHIC' or \
intrinsics[0] == 'LENSMODEL_LATLON' or \
intrinsics[0] == 'LENSMODEL_LONLAT':
@nps.broadcast_define( ((2,),('N',)) )
def grad_broadcasted(q_ref, i_ref):
return grad(lambda qi: mrcal.unproject(qi[:2], intrinsics[0], qi[2:]),
nps.glue(q_ref,i_ref, axis=-1))
dv_dqi_ref = grad_broadcasted(q_projected,intrinsics[1])
else:
@nps.broadcast_define( ((2,),('N',)) )
def grad_broadcasted(q_ref, i_ref):
return grad(lambda qi: \
mrcal.unproject_stereographic( \
mrcal.project_stereographic(
mrcal.unproject(qi[:2], intrinsics[0], qi[2:]))),
nps.glue(q_ref,i_ref, axis=-1))
dv_dqi_ref = grad_broadcasted(q_projected,intrinsics[1])
testutils.confirm_equal(mrcal.project(v_unprojected, *intrinsics),
q_projected,
msg = f"Unprojecting {intrinsics[0]} with grad",
eps = 1e-2)
testutils.confirm_equal(dv_dq,
dv_dqi_ref[...,:2],
msg = f"dv_dq: {intrinsics[0]}",
worstcase = True,
relative = True,
eps = 0.01)
testutils.confirm_equal(dv_di,
dv_dqi_ref[...,2:],
msg = f"dv_di {intrinsics[0]}",
worstcase = True,
relative = True,
eps = 0.01)
# Normalized unprojected gradients
v_unprojected,dv_dq,dv_di = mrcal.unproject(q_projected,
*intrinsics,
normalize = True,
get_gradients = True)
testutils.confirm_equal( nps.norm2(v_unprojected),
1,
msg = f"Unprojected v (with gradients) are normalized",
eps = 1e-6)
cos = nps.inner(v_unprojected, p_ref) / nps.mag(p_ref)
cos = np.clip(cos, -1, 1)
testutils.confirm_equal( np.arccos(cos),
np.zeros((p_ref.shape[0],), dtype=float),
msg = f"Unprojecting (normalized, with gradients) {intrinsics[0]}",
eps = 1e-6)
@nps.broadcast_define( ((2,),('N',)) )
def grad_normalized_broadcasted(q_ref, i_ref):
return grad(lambda qi: \
mrcal.unproject(qi[:2], intrinsics[0], qi[2:], normalize=True),
nps.glue(q_ref,i_ref, axis=-1))
dvnormalized_dqi_ref = grad_normalized_broadcasted(q_projected,intrinsics[1])
testutils.confirm_equal(dv_dq,
dvnormalized_dqi_ref[...,:2],
msg = f"dv_dq (normalized v): {intrinsics[0]}",
worstcase = True,
relative = True,
eps = 0.01)
testutils.confirm_equal(dv_di,
dvnormalized_dqi_ref[...,2:],
msg = f"dv_di (normalized v): {intrinsics[0]}",
worstcase = True,
relative = True,
eps = 0.01)
# unproject() with gradients, in-place
if 1:
# Normalized output
out=[v_unprojected,dv_dq,dv_di]
out[0] *= 0
out[1] *= 0
out[2] *= 0
mrcal.unproject(q_projected,
*intrinsics,
normalize = True,
get_gradients = True,
out = out)
testutils.confirm_equal( nps.norm2(v_unprojected),
1,
msg = f"Unprojected v (with gradients, in-place) are normalized",
eps = 1e-6)
cos = nps.inner(v_unprojected, p_ref) / nps.mag(p_ref)
cos = np.clip(cos, -1, 1)
testutils.confirm_equal( np.arccos(cos),
np.zeros((p_ref.shape[0],), dtype=float),
msg = f"Unprojecting (normalized, with gradients, in-place) {intrinsics[0]}",
eps = 1e-6)
testutils.confirm_equal(dv_dq,
dvnormalized_dqi_ref[...,:2],
msg = f"dv_dq (normalized v, in-place): {intrinsics[0]}",
worstcase = True,
relative = True,
eps = 0.01)
testutils.confirm_equal(dv_di,
dvnormalized_dqi_ref[...,2:],
msg = f"dv_di (normalized v, in-place): {intrinsics[0]}",
worstcase = True,
relative = True,
eps = 0.01)
if 1:
# un-normalized output
out=[v_unprojected,dv_dq,dv_di]
out[0] *= 0
out[1] *= 0
out[2] *= 0
mrcal.unproject(q_projected,
*intrinsics,
normalize = False,
get_gradients = True,
out = out)
cos = nps.inner(v_unprojected, p_ref) / nps.mag(p_ref)
cos = np.clip(cos, -1, 1)
testutils.confirm_equal( np.arccos(cos),
np.zeros((p_ref.shape[0],), dtype=float),
msg = f"Unprojecting (non-normalized, with gradients, in-place) {intrinsics[0]}",
eps = 1e-6)
testutils.confirm_equal(dv_dq,
dv_dqi_ref[...,:2],
msg = f"dv_dq (unnormalized v, in-place): {intrinsics[0]}",
worstcase = True,
relative = True,
eps = 0.01)
testutils.confirm_equal(dv_di,
dv_dqi_ref[...,2:],
msg = f"dv_di (unnormalized v, in-place): {intrinsics[0]}",
worstcase = True,
relative = True,
eps = 0.01)
# a few points, some wide, some not. None behind the camera
p = np.array(((1.0, 2.0, 10.0),
(-1.1, 0.3, 1.0),
(-0.9, -1.5, 1.0)))
check( ('LENSMODEL_PINHOLE', np.array(((1512., 1112, 500., 333.),
(1512., 1112, 500., 433.),
(1512., 1112, 500., 533.)))),
p,
np.array([[ 651.2, 555.4],
[-1163.2, 766.6],
[ -860.8, -1135. ]]))
check( ('LENSMODEL_STEREOGRAPHIC', np.array(((1512., 1112, 500., 333.),
(1502., 1112, 500., 433.),
(1522., 1112, 500., 533.)))),
p,
np.array([[ 649.35582325, 552.6874014],
[-813.05440267, 698.1222302],
[-408.67354332, -573.48815174]]))
check( ('LENSMODEL_LATLON', np.array(((1512., 1112, 500., 333.),
(1502., 1112, 500., 433.),
(1522., 1112, 500., 533.)))),
p,
np.array([[ 647.79131656, 552.50386255],
[-718.86844854, 757.09995546],
[-204.73403533, -559.86662025]]))
check( ('LENSMODEL_LONLAT', np.array(((1512., 1112, 500., 333.),
(1502., 1112, 500., 433.),
(1522., 1112, 500., 533.)))),
p,
np.array([[ 650.69900257, 551.44238248],
[-751.13786254, 654.42977413],
[-615.34458492, -400.73749463]]))
check( ('LENSMODEL_OPENCV4', np.array((1512., 1112, 500., 333.,
-0.012, 0.035, -0.001, 0.002))),
p,
np.array([[ 651.27371 , 555.23042 ],
[-1223.38516 , 678.01468 ],
[-1246.7310448, -1822.799928 ]]))
check( ('LENSMODEL_OPENCV5', np.array((1512., 1112, 500., 333.,
-0.012, 0.035, -0.001, 0.002, 0.019))),
p,
np.array([[ 651.2740691 , 555.2309482 ],
[-1292.8121176 , 691.9401448 ],
[-1987.550162 , -2730.85863427]]))
check( ('LENSMODEL_OPENCV8', np.array((1512., 1112, 500., 333.,
-0.012, 0.035, -0.001, 0.002, 0.019, 0.014, -0.056, 0.050))),
p,
np.array([[ 651.1885442 , 555.10514968],
[-1234.45480366, 680.23499814],
[ -770.03274263, -1238.4871943 ]]))
check( ('LENSMODEL_CAHVOR', np.array((4842.918, 4842.771, 1970.528, 1085.302,
-0.001, 0.002, -0.637, -0.002, 0.016))),
p,
np.array([[ 2143.17840406, 1442.93419919],
[ -92.63813066, 1653.09646897],
[ -249.83199315, -2606.46477164]]))
check( ('LENSMODEL_CAHVORE_linearity=0.00', np.array((4842.918, 4842.771, 1970.528, 1085.302,
-0.001, 0.002, -0.637, -0.002, 0.016, 1e-8, 2e-8, 3e-8))),
p,
np.array([[2140.34076919, 1437.37148001],
[ 496.63465931, 1493.31670636],
[ 970.11788123, -568.30114806]]))
check( ('LENSMODEL_CAHVORE_linearity=0.00', np.array((4842.918, 4842.771, 1970.528, 1085.302,
-0.001, 0.002, -0.637, -0.002, 0.016, 1e-2, 2e-2, 3e-2))),
p,
np.array([[2140.35607966, 1437.40149368],
[ 489.05797783, 1495.37110356],
[ 954.60918375, -594.21144463]]))
check( ('LENSMODEL_CAHVORE_linearity=0.40', np.array((4842.918, 4842.771, 1970.528, 1085.302,
-0.001, 0.002, -0.637, -0.002, 0.016, 1e-2, 2e-2, 3e-2))),
p,
np.array([[2140.80289923, 1438.2774104 ],
[ 423.27156274, 1513.20891648],
[ 872.53696336, -731.32905711]]))
# Note that some of the projected points are behind the camera (z<0), which is
# possible with these models. Also note that some of the projected points are
# off the imager (x<0). This is aphysical, but it just means that the model was
# made up; which it was. The math still works normally, and this is just fine as
# a test
check( ('LENSMODEL_SPLINED_STEREOGRAPHIC_order=3_Nx=11_Ny=8_fov_x_deg=200',
np.array([ 1500.0, 1800.0, 1499.5,999.5,
2.017284705,1.242204557,2.053514381,1.214368063,2.0379067,1.212609628,
2.033278227,1.183689487,2.040018023,1.188554431,2.069146825,1.196304649,
2.085708658,1.186478238,2.065787617,1.163377825,2.086372192,1.138856716,
2.131609155,1.125678279,2.128812604,1.120525061,2.00841491,1.21864154,
2.024522768,1.239588759,2.034947935,1.19814079,2.065474055,1.19897294,
2.044562395,1.200557321,2.087714092,1.160440038,2.086478691,1.151822407,
2.112862582,1.147567288,2.101575718,1.146312256,2.10056469,1.157015327,
2.113488262,1.111679758,2.019837901,1.244168216,2.025847768,1.215633807,
2.041980956,1.205751212,2.075077056,1.199787561,2.070877831,1.203261678,
2.067244278,1.184705736,2.082225077,1.185558149,2.091519961,1.17501817,
2.120258866,1.137775228,2.120020747,1.152409316,2.121870228,1.113069319,
2.043650555,1.247757041,2.019661062,1.230723629,2.067917203,1.209753396,
2.035034141,1.219514335,2.045350268,1.178474255,2.046346049,1.169372592,
2.097839998,1.194836758,2.112724938,1.172186377,2.110996386,1.154899043,
2.128456883,1.133228404,2.122513384,1.131717886,2.044279196,1.233288366,
2.023197297,1.230118703,2.06707694,1.199998862,2.044147271,1.191607451,
2.058590053,1.1677808,2.081593501,1.182074581,2.08663053,1.159156329,
2.084329086,1.157727374,2.073666528,1.151261965,2.114290905,1.144710519,
2.138600912,1.119405248,2.016299528,1.206147494,2.029434175,1.211507857,
2.057936091,1.19801196,2.035691392,1.174035359,2.084718618,1.203604729,
2.085910021,1.158385222,2.080800068,1.150199852,2.087991586,1.162019581,
2.094754507,1.151061493,2.115144642,1.154299799,2.107014195,1.127608146,
2.005632475,1.238607328,2.02033157,1.202101384,2.061021703,1.214868271,
2.043015135,1.211903685,2.05291186,1.188092787,2.09486724,1.179277314,
2.078230124,1.186273023,2.077743945,1.148028845,2.081634186,1.131207467,
2.112936851,1.126412871,2.113220553,1.114991063,2.017901873,1.244588667,
2.051238803,1.201855728,2.043256406,1.216674722,2.035286046,1.178380907,
2.08028318,1.178783085,2.051214271,1.173560417,2.059298121,1.182414688,
2.094607679,1.177960959,2.086998287,1.147371259,2.12029442,1.138197348,
2.138994213, 1.114846113,],)),
# some points behind the camera!
np.array([[-0.8479983, -0.52999894, -0.34690877],
[-0.93984618, 0.34159794, -0.16119387],
[-0.97738792, 0.21145412, 5.49068928]]),
np.array([[ 965.9173441 , 524.31894367],
[1246.58668369, 4621.35427783],
[4329.41598149, 3183.75121559]]))
check( ('LENSMODEL_SPLINED_STEREOGRAPHIC_order=2_Nx=11_Ny=8_fov_x_deg=200',
np.array([ 1500.0, 1800.0, 1499.5,999.5,
2.017284705,1.242204557,2.053514381,1.214368063,2.0379067,1.212609628,
2.033278227,1.183689487,2.040018023,1.188554431,2.069146825,1.196304649,
2.085708658,1.186478238,2.065787617,1.163377825,2.086372192,1.138856716,
2.131609155,1.125678279,2.128812604,1.120525061,2.00841491,1.21864154,
2.024522768,1.239588759,2.034947935,1.19814079,2.065474055,1.19897294,
2.044562395,1.200557321,2.087714092,1.160440038,2.086478691,1.151822407,
2.112862582,1.147567288,2.101575718,1.146312256,2.10056469,1.157015327,
2.113488262,1.111679758,2.019837901,1.244168216,2.025847768,1.215633807,
2.041980956,1.205751212,2.075077056,1.199787561,2.070877831,1.203261678,
2.067244278,1.184705736,2.082225077,1.185558149,2.091519961,1.17501817,
2.120258866,1.137775228,2.120020747,1.152409316,2.121870228,1.113069319,
2.043650555,1.247757041,2.019661062,1.230723629,2.067917203,1.209753396,
2.035034141,1.219514335,2.045350268,1.178474255,2.046346049,1.169372592,
2.097839998,1.194836758,2.112724938,1.172186377,2.110996386,1.154899043,
2.128456883,1.133228404,2.122513384,1.131717886,2.044279196,1.233288366,
2.023197297,1.230118703,2.06707694,1.199998862,2.044147271,1.191607451,
2.058590053,1.1677808,2.081593501,1.182074581,2.08663053,1.159156329,
2.084329086,1.157727374,2.073666528,1.151261965,2.114290905,1.144710519,
2.138600912,1.119405248,2.016299528,1.206147494,2.029434175,1.211507857,
2.057936091,1.19801196,2.035691392,1.174035359,2.084718618,1.203604729,
2.085910021,1.158385222,2.080800068,1.150199852,2.087991586,1.162019581,
2.094754507,1.151061493,2.115144642,1.154299799,2.107014195,1.127608146,
2.005632475,1.238607328,2.02033157,1.202101384,2.061021703,1.214868271,
2.043015135,1.211903685,2.05291186,1.188092787,2.09486724,1.179277314,
2.078230124,1.186273023,2.077743945,1.148028845,2.081634186,1.131207467,
2.112936851,1.126412871,2.113220553,1.114991063,2.017901873,1.244588667,
2.051238803,1.201855728,2.043256406,1.216674722,2.035286046,1.178380907,
2.08028318,1.178783085,2.051214271,1.173560417,2.059298121,1.182414688,
2.094607679,1.177960959,2.086998287,1.147371259,2.12029442,1.138197348,
2.138994213, 1.114846113,],)),
# some points behind the camera!
np.array([[-0.8479983, -0.52999894, -0.34690877],
[-0.93984618, 0.34159794, -0.16119387],
[-0.97738792, 0.21145412, 5.49068928]]),
np.array([[ 958.48347896, 529.99410342],
[1229.87308989, 4625.05434521],
[4327.8166836 , 3183.44237796]]))
testutils.finish()
| 1.898438 | 2 |
my_custom_maintenance/my_custom_maintenance/doctype/machine_status/machine_status.py | msf4-0/ERPNext_my_custom__maintenance | 0 | 12791244 | <reponame>msf4-0/ERPNext_my_custom__maintenance
# -*- coding: utf-8 -*-
# Copyright (c) 2021, cjs and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class MachineStatus(Document):
# return True if API exist in database, otherwise return False.
@frappe.whitelist()
def check_if_API_exist(self):
if frappe.db.exists("Server Script", "Mac Stat From NR To ERPNext"):
return "API exists"
else:
return "API not found"
# disable the API once user decide to NOT sync with Node-RED
@frappe.whitelist()
def disable_API(self):
ssapi = frappe.get_doc('Server Script', "Mac Stat From NR To ERPNext")
# ssapi.reload()
ssapi.db_set('disabled', 1, commit=True)
ssapi.save(ignore_version=True)
ssapi.reload()
# @frappe.whitelist()
def get_previous_mac_stat(self, ms_id):
ms = frappe.get_doc('Machine Status', ms_id)
return ms.machine_status
# @frappe.whitelist()
# def create_new_mac_stat_log(self, ms_id, new_mac_stat):
# ms = frappe.get_doc('Machine Status', ms_id)
# msl = frappe.new_doc('Machine Status Log')
# msl.update({
# 'machine_status': ms.name,
# 'asset': ms.asset,
# 'workstation': ms.workstation,
# 'current_time': frappe.utils.get_datetime(),
# 'current_machine_status': new_mac_stat,
# 'docstatus': 1
# })
# msl.insert()
# # msl.docstatus = 1
# # msl.save()
# @frappe.whitelist()
# def update_sync_with_nr_flag(self, ms_id, new_value):
# # ms = frappe.get_doc('Machine Status', ms_id)
# # ms.reload()
# # ms.sync_with_nr = new_value
# # ms.db_set('sync_with_nr', new_value)
# # ms.save()
# # ms.reload()
@frappe.whitelist()
def create_new_mac_stat_log(ms_id, new_mac_stat):
ms = frappe.get_doc('Machine Status', ms_id)
msl = frappe.new_doc('Machine Status Log')
msl.update({
'machine_status': ms.name,
'asset': ms.asset,
'workstation': ms.workstation,
'current_time': frappe.utils.get_datetime(),
'current_machine_status': new_mac_stat,
'docstatus': 1
})
msl.insert()
# msl.docstatus = 1
# msl.save()
| 1.796875 | 2 |
examples/internationalisation/testLangs.py | tgolsson/appJar | 666 | 12791245 | <filename>examples/internationalisation/testLangs.py
import sys
sys.path.append("../../")
from appJar import gui
def press(btn):
app.changeLanguage(btn)
app=gui()
app.showSplash()
app.addLabel("l1", "default text")
app.addButtons(["English", "Korean", "French"], press)
app.addLabel("l2", "default text")
app.addLabel("l3", "default text")
app.addLabelEntry("Genome")
app.addLabelScale("s1")
app.addMessage("m1", "Default message text")
app.addListBox("fruits", ["apples", "oranges", "tomatoes"])
app.addOptionBox("fruits", ["apples", "oranges", "tomatoes"])
app.addSpinBox("fruits", ["apples", "oranges", "tomatoes"])
app.addCheckBox("b1")
app.addCheckBox("b2")
app.addCheckBox("b3")
app.startLabelFrame("Names")
app.addRadioButton("name", "b1")
app.addRadioButton("name", "b2")
app.addRadioButton("name", "b3")
app.addRadioButton("name", "b4")
app.stopLabelFrame()
app.addRadioButton("age", "b1")
app.addRadioButton("age", "b2")
app.addRadioButton("age", "b3")
app.addLink("l1", None)
app.addWebLink("l2", "http://www.appJar.info")
app.addMeter("m1")
app.addEntry("e1")
app.addEntry("e2")
app.setEntryDefault("e1", "<DEFAULT>")
app.go(language="ENGLISH")
| 2.6875 | 3 |
python/project/business.py | Jai-Doshi/Python-Project | 0 | 12791246 | # IMPORTING LIBRARIES
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import datetime
import warnings
# DECLARING VARIABLES
# USER INPUT LIST
name = []
age = []
loc = []
cat = []
subcat = []
year = []
# ADMIN INPUT LIST
cat_list = []
cat_sub = {}
# GUEST INPUT
file_name = 'categories.txt'
class Business:
a = ''
auth = False
def __init__(self):
print('''
SELECT YOUR CHOICE :
1. ADMIN
2. USER
3. GUEST
''')
enter = int(input('Enter your Choice : '))
if (enter == 1):
admin_username = input('ENTER USERNAME : ')
admin_password = input('ENTER PASSWORD : ')
if (self.auth == False):
if (admin_username == 'admin123' and admin_password == 'password'):
self.admin(cat_list,self.subcategory)
self.a = 'TASK COMPLETED SUCCESSFULLY'
self.auth = True
else:
print('INVALID USERNAME OR PASSWORD')
self.a = 'INVALID USERNAME OR PASSWORD'
self.__init__()
else:
self.add_category(self.new_cat_list)
elif (enter == 2):
self.business(name,age,loc,cat,subcat,year)
self.a = 'TASK COMPLETED SUCCESSFULLY !!!'
elif (enter == 3):
self.intro(file_name,read=True)
else:
print('PLEASE ENTER A VALID NUMBER')
self.a = 'PLEASE ENTER A VALID NUMBER'
self.__init__()
def business(self,name,age,loc,cat,subcat,year):
for i in range(1):
print('USER NO. : ',i+1)
user_name = input('Enter your Name : ')
user_age = int(input('Enter your Age : '))
user_loc = input('Enter your Location : ')
user_cat = self.category()
user_subcat = self.subcategory(self.category())
user_year = datetime.datetime.now().year
name.append(user_name.capitalize())
age.append(user_age)
loc.append(user_loc.upper())
cat.append(user_cat.capitalize())
subcat.append(user_subcat.capitalize())
year.append(user_year)
self.data_to_csv(name,age,loc,cat,subcat,year)
def admin(self,category,subcategory):
n = int(input('Enter the No. of Categories want to be Added : '))
for i in range(n):
print('\n ======================= STARTED CATEGORY {} =========================== \n'.format(i+1))
print('Category No. :',i+1)
user_category = input('Enter the Category : ')
cat_list.append(user_category.capitalize())
for k in cat_list:
cat_sub.setdefault(k)
m = int(input('Enter the No. of Sub Categories want to be added for the above Category : '))
subcat_list = []
for j in range(m):
print('Sub Category No. :',j+1)
user_sub_category = input('Enter the Sub-Category : ')
subcat_list.append(user_sub_category.capitalize())
cat_sub[k] = subcat_list
print('\n ======================= ENDED CATEGORY {} =========================== \n'.format(i+1))
def category(self):
for i,each in enumerate(cat_list):
print('{}. {} \t \t'.format(i+1,each),end='')
if i % 4 == 3:
print("\n")
print('\n')
cat = int(input('Enter the No. : '))
return cat_list[cat-1]
def subcategory(self,cat_value):
print('\n')
print('THE CATEGORY IS : {}'.format(cat_value))
for i,each in enumerate(cat_sub[cat_value]):
print('{}. {} \t \t'.format(i+1,each),end='')
if i % 4 == 3:
print("\n")
print('\n')
print('THE SUB CATEGORY OF {} IS : '.format(cat_value))
print('\n')
subcat = int(input('Enter the No. : '))
print('SUBCATEGORY IS : {}'.format(cat_sub[cat_value][subcat-1]))
return cat_sub[cat_value][subcat-1]
new_cat_list = []
new_cat_sub = {}
def add_category(self,new_cat):
n = int(input('Enter the No. of Categories want to be Added : '))
for i in range(n):
print('\n ======================= STARTED CATEGORY {} =========================== \n'.format(i+1))
print('Category No. :',i+1)
user_category = input('Enter the Category : ')
self.new_cat_list.append(user_category.capitalize())
cat_list.extend(self.new_cat_list)
for k in self.new_cat_list:
self.new_cat_sub.setdefault(k)
m = int(input('Enter the No. of Sub Categories want to be added for the above Category : '))
new_subcat_list = []
for j in range(m):
print('Sub Category No. :',j+1)
user_sub_category = input('Enter the Sub-Category : ')
new_subcat_list.append(user_sub_category.capitalize())
self.new_cat_sub[k] = new_subcat_list
print('\n ======================= ENDED CATEGORY {} =========================== \n'.format(i+1))
return cat_sub.update(self.new_cat_sub)
def data_to_csv(self,name,age,loc,cat,subcat,year):
dict = {
'name': name,
'age': age,
'location': loc,
'category': cat,
'subcategory': subcat,
'year': year
}
df = pd.DataFrame(dict)
csv = df.to_csv('data.csv', index=False)
return csv
def intro(self,file_name,read=False):
f = open(file_name,'w')
f.write('CATEGORY AND SUBCATEGORY')
f.write('\n')
f.write("=======================================")
f.write('\n')
f.write("| CATEGORY NO. | CATEGORY NAME |")
f.write('\n')
f.write("=======================================")
f.write('\n')
for i,cat_each in enumerate(cat_sub,start=1):
f.write("| {} | {} ".format(str(i).center(9),cat_each))
f.write('\n')
f.write(" \t \t | SUB CATEGORY NO. | SUB CATEGORY NAME |")
f.write('\n')
for j,subcat_each in enumerate(cat_sub[cat_each],start=1):
f.write(" \t \t | {} | {} ".format(str(j).center(11),subcat_each))
f.write('\n')
if(read==True):
f = open(file_name,'r')
print(f.read())
f.close()
def __str__(self):
return self.a
| 3.46875 | 3 |
selenium_app.py | CesarRodriguezPro/TimeStationAutoclockout | 0 | 12791247 | from selenium import webdriver
from selenium.webdriver.support.ui import Select
from datetime import date
from get_names import send_names
'''
this app automatic login in to a website and cut the hours of the people who forgot to clock out
in lunch time
'''
####################################### Basic settings #########################################
SET_TIME = '11:50'
SET_TIME_DIV = 'AM'
set_date = date.strftime(date.today(), "%m/%d/%Y") # today in format mm/dd/yyyy
USERNAME = ''
PASSWORD = ""
NOTE = f'Automatic System - Forgot to clock out for Lunch - Administrator - {set_date}'
list_of_names = send_names() # import data from get_names.py
################################################################################################
browser = webdriver.Firefox()
browser.get('https://www.timestation.com/Login.asp')
def login_page():
''' login in to website autetification '''
email_field = browser.find_element_by_css_selector('#eMail')
password_field = browser.find_element_by_css_selector('#Password')
summit_field = browser.find_element_by_css_selector('.ButtonGreen')
email_field.clear()
email_field.send_keys(USERNAME)
password_field.clear()
password_field.send_keys(PASSWORD)
summit_field.click()
def select_employees_website():
''' click the employees link inside of the website after login in.'''
employee_css = '.menu-main > li:nth-child(2) > a:nth-child(1)'
employee_button_ = browser.find_element_by_css_selector(employee_css)
employee_button_.click()
def select_names(name):
''' this scroll the website and click the checkbox with their name'''
info_name = browser.find_element_by_link_text(name)
href_link = info_name.get_attribute('href')
href_link.split('=')
id_number = href_link.split('=')
xpath_path = f".//input[@value={id_number[1]}]"
for_click = browser.find_element_by_xpath(xpath_path)
for_click.location_once_scrolled_into_view
for_click.click()
def select_box(id, text):
''' open select items and select them. '''
action_find = browser.find_element_by_id(id)
action_find.location_once_scrolled_into_view
action_tab = Select(action_find)
action_tab.select_by_visible_text(text)
def select_names_flow(names):
''' this function gets in a list of names that will loop in it. '''
for name in names:
select_names(name=name)
select_box(id='employeeAction', text='Check-Out') # to select the action bar
select_box(id='TimeOut_Hour', text=SET_TIME.split(':')[0]) # this input the hour to field
select_box(id='TimeOut_Minute', text=SET_TIME.split(':')[1]) # this input the minutes to field
select_box(id='TimeOut_AMPM', text=SET_TIME_DIV) # this input the AM or PM in field
date_field = browser.find_element_by_id('TimeOut_Date')
note_field = browser.find_element_by_id('Notes')
date_field.send_keys(set_date)
note_field.send_keys(NOTE)
browser.find_element_by_name('Submit').click()
if __name__ == "__main__":
login_page()
select_employees_website()
select_names_flow(names=list_of_names) # this accept a list of names to be change.
########## this created a log in a text file ########################
try:
with open('NameChangeLog.txt', 'w') as file_log:
for name in list_of_names:
file_log.write(f'{set_date} -- {name}\n')
print(name)
except:
with open('NameChangeLog.txt', 'a') as file_log:
for name in list_of_names:
file_log.write(f'{set_date} -- {name}\n')
print(name)
finally:
print('everything was change successfully ')
browser.close()
########################################################################## | 3.1875 | 3 |
tests/test_methylation_masking.py | HarryZhang1224/BSBolt | 10 | 12791248 | <filename>tests/test_methylation_masking.py
import unittest
import numpy as np
from bsbolt.Impute.Validation.MaskValues import MaskImputationValues
from bsbolt.Impute.Impute_Utils.ImputationFunctions import get_bsb_matrix
from tests.TestHelpers import test_directory
test_methylation_data = f'{test_directory}/TestData/kNN_test_matrix.txt'
test_matrix, test_sites, test_samples = get_bsb_matrix(test_methylation_data)
# test standard masking
random_masking = MaskImputationValues(methylation_array=test_matrix,
masking_proportion=0.05, verbose=True)
random_masking.mask_random_sites()
# test masking known sites
new_masking = MaskImputationValues(methylation_array=test_matrix, masking_proportion=0.05, verbose=True,
masking_sites=random_masking.masking_sites)
new_masking.mask_known_sites()
# test masking proportions
masking_proportions = [.2] + [0 for _ in range(9)]
masking_proportion_test = MaskImputationValues(methylation_array=test_matrix,
masking_proportion=masking_proportions,
verbose=True)
masking_proportion_test.mask_random_sites()
class TestSiteMasking(unittest.TestCase):
def setUp(self):
pass
def test_sites_masked(self):
""" Test values are being masked by comparing row length before and after dropping rows with
null values
"""
masked_row_count = random_masking.methylation_array[
~np.isnan(random_masking.methylation_array).any(axis=1)].shape[0]
input_row_count = test_matrix[~np.isnan(test_matrix).any(axis=1)].shape[0]
self.assertLess(masked_row_count, input_row_count)
def test_same_sites_masked(self):
"""If given a list of masking sites test the same sites are masked when rerun"""
for key in new_masking.masking_sites:
self.assertIn(key, random_masking.masking_sites)
def test_known_value_save(self):
"""Test saved known values correspond to the data at the original index"""
for key, value in random_masking.masking_sites.items():
row_index, column_index = (int(x) for x in key.split('_'))
known_value = test_matrix[row_index, column_index]
self.assertEqual(value, known_value)
def test_masking_proportions(self):
"""Test sites masked for first site only if masking proportion provided for only that sample"""
for key, value in masking_proportion_test.masking_sites.items():
row_index, column_index = (int(x) for x in key.split('_'))
self.assertEqual(column_index, 0)
if __name__ == '__main__':
unittest.main()
| 2.515625 | 3 |
monai/apps/detection/utils/detector_utils.py | function2-llx/MONAI | 0 | 12791249 | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Sequence, Tuple, Union
import torch
import torch.nn.functional as F
from torch import Tensor
from monai.transforms.croppad.array import SpatialPad
from monai.transforms.utils import compute_divisible_spatial_size, convert_pad_mode
from monai.utils import PytorchPadMode, ensure_tuple_rep
def check_input_images(input_images: Union[List[Tensor], Tensor], spatial_dims: int) -> None:
"""
Validate the input dimensionality (raise a `ValueError` if invalid).
Args:
input_images: It can be 1) a tensor sized (B, C, H, W) or (B, C, H, W, D),
or 2) a list of image tensors, each image i may have different size (C, H_i, W_i) or (C, H_i, W_i, D_i).
spatial_dims: number of spatial dimensions of the images, 2 or 3.
"""
if isinstance(input_images, Tensor):
if len(input_images.shape) != spatial_dims + 2:
raise ValueError(
"When input_images is a Tensor, its need to be (spatial_dims + 2)-D."
f"In this case, it should be a {(spatial_dims + 2)}-D Tensor, got Tensor shape {input_images.shape}."
)
elif isinstance(input_images, List):
for img in input_images:
if len(img.shape) != spatial_dims + 1:
raise ValueError(
"When input_images is a List[Tensor], each element should have be (spatial_dims + 1)-D."
f"In this case, it should be a {(spatial_dims + 1)}-D Tensor, got Tensor shape {img.shape}."
)
else:
raise ValueError("input_images needs to be a List[Tensor] or Tensor.")
return
def check_training_targets(
input_images: Union[List[Tensor], Tensor],
targets: Union[List[Dict[str, Tensor]], None],
spatial_dims: int,
target_label_key: str,
target_box_key: str,
) -> None:
"""
Validate the input images/targets during training (raise a `ValueError` if invalid).
Args:
input_images: It can be 1) a tensor sized (B, C, H, W) or (B, C, H, W, D),
or 2) a list of image tensors, each image i may have different size (C, H_i, W_i) or (C, H_i, W_i, D_i).
targets: a list of dict. Each dict with two keys: target_box_key and target_label_key,
ground-truth boxes present in the image.
spatial_dims: number of spatial dimensions of the images, 2 or 3.
target_label_key: the expected key of target labels.
target_box_key: the expected key of target boxes.
"""
if targets is None:
raise ValueError("Please provide ground truth targets during training.")
if len(input_images) != len(targets):
raise ValueError(f"len(input_images) should equal to len(targets), got {len(input_images)}, {len(targets)}.")
for target in targets:
if (target_label_key not in target.keys()) or (target_box_key not in target.keys()):
raise ValueError(
f"{target_label_key} and {target_box_key} are expected keys in targets. Got {target.keys()}."
)
boxes = target[target_box_key]
if not isinstance(boxes, torch.Tensor):
raise ValueError(f"Expected target boxes to be of type Tensor, got {type(boxes)}.")
if len(boxes.shape) != 2 or boxes.shape[-1] != 2 * spatial_dims:
raise ValueError(
f"Expected target boxes to be a tensor " f"of shape [N, {2* spatial_dims}], got {boxes.shape}."
)
return
def pad_images(
input_images: Union[List[Tensor], Tensor],
spatial_dims: int,
size_divisible: Union[int, Sequence[int]],
mode: Union[PytorchPadMode, str] = PytorchPadMode.CONSTANT,
**kwargs,
) -> Tuple[Tensor, List[List[int]]]:
"""
Pad the input images, so that the output spatial sizes are divisible by `size_divisible`.
It pads them at the end to create a (B, C, H, W) or (B, C, H, W, D) Tensor.
Padded size (H, W) or (H, W, D) is divisible by size_divisible.
Default padding uses constant padding with value 0.0
Args:
input_images: It can be 1) a tensor sized (B, C, H, W) or (B, C, H, W, D),
or 2) a list of image tensors, each image i may have different size (C, H_i, W_i) or (C, H_i, W_i, D_i).
spatial_dims: number of spatial dimensions of the images, 2D or 3D.
size_divisible: int or Sequence[int], is the expected pattern on the input image shape.
If an int, the same `size_divisible` will be applied to all the input spatial dimensions.
mode: available modes for PyTorch Tensor: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}.
One of the listed string values or a user supplied function. Defaults to ``"constant"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
kwargs: other arguments for `torch.pad` function.
Return:
- images, a (B, C, H, W) or (B, C, H, W, D) Tensor
- image_sizes, the original spatial size of each image
"""
size_divisible = ensure_tuple_rep(size_divisible, spatial_dims)
# If input_images: Tensor
if isinstance(input_images, Tensor):
orig_size = list(input_images.shape[-spatial_dims:])
new_size = compute_divisible_spatial_size(spatial_shape=orig_size, k=size_divisible)
all_pad_width = [(0, max(sp_i - orig_size[i], 0)) for i, sp_i in enumerate(new_size)]
pt_pad_width = [val for sublist in all_pad_width for val in sublist[::-1]][::-1]
if max(pt_pad_width) == 0:
# if there is no need to pad
return input_images, [orig_size] * input_images.shape[0]
mode_: str = convert_pad_mode(dst=input_images, mode=mode).value
return F.pad(input_images, pt_pad_width, mode=mode_, **kwargs), [orig_size] * input_images.shape[0]
# If input_images: List[Tensor])
image_sizes = [img.shape[-spatial_dims:] for img in input_images]
in_channels = input_images[0].shape[0]
dtype = input_images[0].dtype
device = input_images[0].device
# compute max_spatial_size
image_sizes_t = torch.tensor(image_sizes)
max_spatial_size_t, _ = torch.max(image_sizes_t, dim=0)
if len(max_spatial_size_t) != spatial_dims or len(size_divisible) != spatial_dims:
raise ValueError(" Require len(max_spatial_size_t) == spatial_dims ==len(size_divisible).")
max_spatial_size = compute_divisible_spatial_size(spatial_shape=list(max_spatial_size_t), k=size_divisible)
# allocate memory for the padded images
images = torch.zeros([len(image_sizes), in_channels] + max_spatial_size, dtype=dtype, device=device)
# Use `SpatialPad` to match sizes, padding in the end will not affect boxes
padder = SpatialPad(spatial_size=max_spatial_size, method="end", mode=mode, **kwargs)
for idx, img in enumerate(input_images):
images[idx, ...] = padder(img) # type: ignore
return images, [list(ss) for ss in image_sizes]
def preprocess_images(
input_images: Union[List[Tensor], Tensor],
spatial_dims: int,
size_divisible: Union[int, Sequence[int]],
mode: Union[PytorchPadMode, str] = PytorchPadMode.CONSTANT,
**kwargs,
) -> Tuple[Tensor, List[List[int]]]:
"""
Preprocess the input images, including
- validate of the inputs
- pad the inputs so that the output spatial sizes are divisible by `size_divisible`.
It pads them at the end to create a (B, C, H, W) or (B, C, H, W, D) Tensor.
Padded size (H, W) or (H, W, D) is divisible by size_divisible.
Default padding uses constant padding with value 0.0
Args:
input_images: It can be 1) a tensor sized (B, C, H, W) or (B, C, H, W, D),
or 2) a list of image tensors, each image i may have different size (C, H_i, W_i) or (C, H_i, W_i, D_i).
spatial_dims: number of spatial dimensions of the images, 2 or 3.
size_divisible: int or Sequence[int], is the expected pattern on the input image shape.
If an int, the same `size_divisible` will be applied to all the input spatial dimensions.
mode: available modes for PyTorch Tensor: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}.
One of the listed string values or a user supplied function. Defaults to ``"constant"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
kwargs: other arguments for `torch.pad` function.
Return:
- images, a (B, C, H, W) or (B, C, H, W, D) Tensor
- image_sizes, the original spatial size of each image
"""
check_input_images(input_images, spatial_dims)
size_divisible = ensure_tuple_rep(size_divisible, spatial_dims)
return pad_images(input_images, spatial_dims, size_divisible, mode, **kwargs)
| 2.234375 | 2 |
document_clustering/cluster.py | chuajiesheng/twitter-sentiment-analysis | 0 | 12791250 | <filename>document_clustering/cluster.py
# coding=utf-8
# OS-level import
import sys
import os
import code
# Data related import
import numpy as np
import pandas as pd
import nltk
import re
import os
import codecs
from sklearn import feature_extraction
import mpld3
from nltk.stem.snowball import SnowballStemmer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.cluster import KMeans
from nltk.tokenize import TweetTokenizer
# Project related object
from utils import Reader
stopwords = nltk.corpus.stopwords.words('english')
stemmer = SnowballStemmer("english")
tknzr = TweetTokenizer(preserve_case=False, strip_handles=True, reduce_len=True)
def tokenize_and_stem(text):
# first tokenize by sentence, then by word to ensure that punctuation is caught as it's own token
tokens = [word for sent in nltk.sent_tokenize(text) for word in tknzr.tokenize(sent)]
filtered_tokens = []
# filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)
for token in tokens:
if re.search('[a-zA-Z]', token):
filtered_tokens.append(token)
stems = [stemmer.stem(t) for t in filtered_tokens]
return stems
def tokenize_only(text):
# first tokenize by sentence, then by word to ensure that punctuation is caught as it's own token
tokens = [word.lower() for sent in nltk.sent_tokenize(text) for word in tknzr.tokenize(sent)]
filtered_tokens = []
# filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)
for token in tokens:
if re.search('[a-zA-Z]', token):
filtered_tokens.append(token)
return filtered_tokens
if __name__ == '__main__':
reload(sys)
sys.setdefaultencoding('utf-8')
working_directory = os.getcwd()
files = Reader.read_directory(working_directory)
print '{} files available'.format(len(files))
# TODO: remove me
files = files[:800]
all_tweets = []
totalvocab_stemmed = []
totalvocab_tokenized = []
for f in files:
tweets = Reader.read_file(f)
selected_tweets = filter(lambda t: t.is_post() and t.language() == 'en', tweets)
texts = map(lambda t: t.body(), selected_tweets)
for i in texts:
allwords_stemmed = tokenize_and_stem(i) # for each item in 'all_tweets', tokenize/stem
totalvocab_stemmed.extend(allwords_stemmed) # extend the 'totalvocab_stemmed' list
allwords_tokenized = tokenize_only(i)
totalvocab_tokenized.extend(allwords_tokenized)
vocab_frame = pd.DataFrame({'words': totalvocab_tokenized}, index=totalvocab_stemmed)
print 'there are ' + str(vocab_frame.shape[0]) + ' items in vocab_frame'
tfidf_vectorizer = TfidfVectorizer(max_df=0.8, max_features=200000,
min_df=0.05, stop_words='english',
use_idf=True, tokenizer=tokenize_and_stem, ngram_range=(1, 3))
tfidf_matrix = tfidf_vectorizer.fit_transform(all_tweets) # fit the vectorizer to synopses
print 'td-idf matrix: {}'.format(tfidf_matrix.shape)
terms = tfidf_vectorizer.get_feature_names()
dist = 1 - cosine_similarity(tfidf_matrix)
num_clusters = 10
km = KMeans(n_clusters=num_clusters, verbose=0)
# code.interact(local=dict(globals(), **locals()))
km.fit(tfidf_matrix)
clusters = km.labels_.tolist()
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
for i in range(num_clusters):
print 'Cluster {} words: '.format(str(i)),
for ind in order_centroids[i, :6]: # replace 6 with n words per cluster
print '{}'.format(vocab_frame.ix[terms[ind].split(' ')].values.tolist()[0][0].encode('utf-8', 'ignore')),
print ''
| 2.921875 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.