max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
iot/rmq/config.py | aabdulwahed/Sitechain_paper_performanceAnalysis | 0 | 12797051 | <filename>iot/rmq/config.py<gh_stars>0
import os
import sys
from ConfigParser import ConfigParser
def configLoader(filename):
if os.path.exists(filename):
_config = ConfigParser()
_config.read(filename)
return _config._sections
else:
return False
| 2.453125 | 2 |
Engine/Metrics.py | teridax5/Minesweeper | 0 | 12797052 | from random import sample
n = 9
class MineMap:
def __init__(self, size=9, mine_coefficient=0.1):
self.size = size
self.field = [[0 for _ in range(self.size)] for _ in range(self.size)]
self.mines = int((self.size**2) * mine_coefficient)
self.shuffle()
def shuffle(self):
counter = self.mines
for idx1 in range(self.size):
for idx2 in range(self.size):
if counter > 0:
self.field[idx1][idx2] = '*'
counter -= 1
epoch = 1000
while epoch > 0:
idx1, idx2, idx3, idx4 = sample([num for num in
range(self.size)], 4)
self.field[idx1][idx2], self.field[idx3][idx4] = \
self.field[idx3][idx4], self.field[idx1][idx2]
epoch -= 1
def checking(self):
idx1 = 0
idx2 = 0
# searching neighbours
while idx1 in range(self.size) and idx2 in range(self.size):
neighbours = []
if idx1-1 >= 0:
if idx2 - 1 >= 0:
neighbours.append([idx1-1, idx2-1])
if idx2 + 1 < self.size:
neighbours.append([idx1-1, idx2+1])
neighbours.append([idx1-1, idx2])
if idx1+1 < self.size:
if idx2 - 1 >= 0:
neighbours.append([idx1+1, idx2-1])
if idx2 + 1 < self.size:
neighbours.append([idx1+1, idx2+1])
neighbours.append([idx1+1, idx2])
if idx2 - 1 >= 0:
neighbours.append([idx1, idx2-1])
if idx2 + 1 < self.size:
neighbours.append([idx1, idx2+1])
# checking neighbours
if self.field[idx1][idx2] == '*':
for neighbour in neighbours:
n_idx1, n_idx2 = neighbour[0], neighbour[1]
if self.field[n_idx1][n_idx2] != '*':
self.field[n_idx1][n_idx2] += 1
idx2 += 1
if idx2 > self.size-1:
idx2 = 0
idx1 += 1
return self.field
def __repr__(self):
text = ''
for string in self.field:
text += ' '.join([str(elem) for elem in string])
text += '\n'
return text
def __int__(self):
return self.mines
def main():
new_map = MineMap()
new_map.checking()
print(new_map, '\n', int(new_map), sep='')
if __name__ == '__main__':
main()
| 3.125 | 3 |
tests/test_typing.py | kazh98/fpmlib | 0 | 12797053 | #!/usr/bin/env python3
import unittest
from fpmlib.typing import *
| 1.179688 | 1 |
reachy_pyluos_hal/device.py | pollen-robotics/reachy_pyluos_hal | 0 | 12797054 | """Device type annotation."""
from typing import Union
from .dynamixel import DynamixelMotor
from .fan import Fan
from .force_sensor import ForceSensor
from .joint import Joint
from .orbita import OrbitaActuator
Device = Union[Fan, Joint, DynamixelMotor, ForceSensor, OrbitaActuator]
| 1.578125 | 2 |
problems/linked-lists/linked_list_cycle.py | andrenbrandao/algorithm-problems | 0 | 12797055 | <reponame>andrenbrandao/algorithm-problems
"""
LeetCode 141. Linked List Cycle
https://leetcode.com/problems/linked-list-cycle/
"""
# O(n) time
# O(n) memory
from typing import Optional
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def hasCycle(self, head: Optional[ListNode]) -> bool:
currentNode = head
visitedNodes = {}
while currentNode is not None:
if visitedNodes.get(currentNode) is not None:
return True
visitedNodes[currentNode] = True
currentNode = currentNode.next
return False
| 3.59375 | 4 |
setup.py | nitros12/pyumlgen | 0 | 12797056 | <filename>setup.py
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, "README.md")) as f:
long_desc = f.read()
setup(
name="pyumlgen",
version="0.1.6",
description="Generate UML diagrams with type information from python modules",
author="<NAME>",
packages=find_packages(),
entry_points={
"console_scripts": [
"pyumlgen=pyumlgen:main"
]
}
)
| 1.515625 | 2 |
tests/unit/shipping/discount_tests.py | dimka665/django-oscar | 0 | 12797057 | from decimal import Decimal as D
from django.test import TestCase
from nose.plugins.attrib import attr
import mock
from oscar.apps.shipping import methods
from oscar.apps.shipping.models import OrderAndItemCharges
@attr('shipping')
class TestStandardMethods(TestCase):
def setUp(self):
self.non_discount_methods = [
methods.Free(),
methods.FixedPrice(D('10.00'), D('10.00')),
OrderAndItemCharges(price_per_order=D('5.00'),
price_per_item=D('1.00'))]
def test_have_is_discounted_property(self):
for method in self.non_discount_methods:
self.assertFalse(method.is_discounted)
class TestDiscountingMethodsWithoutTax(TestCase):
def setUp(self):
self.base_method = methods.FixedPrice(D('10.00'))
offer = mock.Mock()
offer.shipping_discount = mock.Mock(
return_value=D('5.00'))
self.method = methods.TaxExclusiveOfferDiscount(
self.base_method, offer)
def test_delegates_properties_onto_wrapped_method(self):
self.assertFalse(self.method.is_tax_known)
self.assertEqual(
self.method.charge_excl_tax_before_discount, D('10.00'))
self.assertEqual(self.method.code, self.base_method.code)
self.assertEqual(self.method.name, self.base_method.name)
self.assertEqual(self.method.description,
self.base_method.description)
def test_discounts_charge(self):
self.assertEqual(self.method.charge_excl_tax, D('5.00'))
def test_correctly_sets_tax(self):
self.method.tax = D('2.00')
self.assertTrue(self.method.is_tax_known)
self.assertEqual(self.method.charge_incl_tax, D('7.00'))
| 2.171875 | 2 |
django_comments/django_comments/views.py | anistark/django-react-comments | 6 | 12797058 | <gh_stars>1-10
from django.shortcuts import render_to_response
# from django.template import RequestContext
from django.views.decorators.csrf import csrf_exempt
from comment_react.models import Comments
from django.http import HttpResponse
import json
import urllib2
from django.http import QueryDict
@csrf_exempt
def home(request):
# context_instance = RequestContext(request)
if request.method == 'GET':
return render_to_response('index.html')
elif request.method == 'POST':
queryDict = QueryDict()
queryDict = request.POST
a = dict(queryDict.lists())
comment = str(a.get('text')).split("'")[1].decode('utf-8')
author = str(a.get('author')).split("'")[1].decode('utf-8')
print 'comment - ' + comment
print 'author - ' + author
p1 = Comments(user_id=author,
post_id='1',
comment=comment)
p1.save()
return render_to_response('index.html')
@csrf_exempt
def getComments(request):
data = Comments.objects.all()
comments = []
for obj in data:
commentCurr = {
"author": obj.user_id,
"text": urllib2.unquote(obj.comment.replace('+', ' ')),
"post_id": obj.post_id,
}
comments.append(commentCurr)
comments.append({'status_code': 'success'})
return HttpResponse(json.dumps(comments), content_type="application/json")
| 2.203125 | 2 |
model.py | jouleffect/SEIRD-Epidemics-Simulator | 0 | 12797059 | # Network
import numpy as np
import pandas as pd
import simulator
import random
from igraph import *
import matplotlib.pyplot as plt
class Network():
"""docstring for Network"""
def __init__(self, simulator):
# Genero un grafo random
self.g = Graph.Erdos_Renyi(simulator.num_nodi,simulator.p_link)
# Inizializzazione dei vettori degli step temporali e degli stati epidemici
self.t_state = np.zeros((simulator.num_nodi,1))
self.e_state = np.zeros((simulator.num_nodi,1),dtype=np.int8)
# assegnazione iniziale random dei nodi esposti
np.put(self.e_state,np.random.choice(range(simulator.num_nodi*1), simulator.exp0, replace=False),1)
self.states = {} # Lista degli stati
self.data = pd.DataFrame(columns=["index","days","exposed","infected","severe infected","recovered","dead","susceptible","total"]) # Tabella stati
def update_states(self,i,simulator): # Aggiornamento degli stati
"""Lista degli stati:
- Susceptible = 0
- Exposed = 1
- Infected = 2
- Severe Infected = 3
- Recovered = 4
- Dead = 5
"""
# Copia degli stati epidemici dagli array degli stati epidemici al dizionario
self.states = { 'exposed':np.where(np.copy(self.e_state)==1,self.e_state,0),
'infected':np.where(np.copy(self.e_state)==2,self.e_state,0),
'recovered':np.where(np.copy(self.e_state)==4,self.e_state,0),
'severe_infected':np.where(np.copy(self.e_state)==3,self.e_state,0),
'dead':np.where(np.copy(self.e_state)==5,self.e_state,0),
'susceptible':(simulator.num_nodi - np.count_nonzero(np.copy(self.e_state))),
'total_cases':np.count_nonzero(np.copy(self.e_state)) }
# Inserimento della somma di ogni stato epidemico nel dataframe
self.data.loc[i,:] = [i, i*simulator.dt_state,np.count_nonzero(self.states['exposed']), np.count_nonzero(self.states['infected']),
np.count_nonzero(self.states['severe_infected']), np.count_nonzero(self.states['recovered']),
np.count_nonzero(self.states['dead']), self.states['susceptible'], self.states['total_cases']]
#print(self.data)
def plot(self,i,simulator): # Creazione Grafici
plt.clf()
ax = plt.gca()
self.data.plot(x = 'days', y = 'susceptible', kind = 'line', color = 'cyan', ax = ax)
self.data.plot(x = 'days', y = 'exposed', kind = 'line', color = 'yellow', ax = ax)
self.data.plot(x = 'days', y = 'infected', kind = 'line', color = 'blue', ax = ax)
self.data.plot(x = 'days', y = 'severe infected', kind = 'line', color = 'magenta', ax = ax)
self.data.plot(x = 'days', y = 'recovered', kind = 'line', color = 'green', ax = ax)
self.data.plot(x = 'days', y = 'dead', kind = 'line', color = 'brown', ax = ax)
plt.title('link_p: {}; exp0: {}; t_inc: {}; t_inf: {}\n alpha: {}; beta: {}; gamma: {}'.format(simulator.p_link, simulator.exp0,simulator.t_exp,simulator.t_inf,simulator.alfa,simulator.beta,simulator.gamma))
plt.xlabel('Time (days)')
plt.ylabel('Number of nodes')
plt.savefig('./plots/states.png')
def update_nodes(self,i,simulator): # Aggiornamento dei nodi del network (rimozione dei nodi morti e isolamento dei nodi gravemente infetti)
pass
def get_new_cases(self,i,simulator): # Nuovi casi (aggiornamento dei nodi che propagano l'epidemia)
# Trova i vicini degli esposti, infetti e gravemente infetti
# Calcola la probabilità che i vicini siano contaggiati con tasso alfa
# Nodi esposti
n_exp = np.array(np.nonzero(self.states['exposed'])[0])
# Nodi infetti
n_inf = np.array(np.nonzero(self.states['infected'])[0])
# Nodi gravemente infetti
n_g_inf = np.array(np.nonzero(self.states['severe_infected'])[0])
# Nodi guariti
n_rec = np.array(np.nonzero(self.states['recovered'])[0])
# Nodi morti
n_dead = np.array(np.nonzero(self.states['dead'])[0])
new_cases = []
# Ciclo i Nodi esposti, infetti e gravemente infetti e trovo i loro vicini suscettibili che vengono contaggiati con tasso alfa
contaggiosi = np.concatenate((n_exp,n_inf,n_g_inf), axis=None)
for x in contaggiosi:
for n in self.g.neighbors(x):
Rand = np.random.random()
# Condizione per entrare nei nuovi casi di esposto (Rientra nella prob, non è nella categoria contaggiati, ne in quella guariti ne in quella morti, nemmeno doppione)
if (Rand<simulator.alfa) and (n not in contaggiosi) and (n not in n_rec) and (n not in n_dead) and (n not in new_cases):
new_cases.append(n)
#print(new_cases)
return new_cases
| 2.96875 | 3 |
shortener/admin.py | shubhamnishad97/URL-Shortener | 7 | 12797060 | from django.contrib import admin
# Register your models here.
from .models import shortenedUrl
admin.site.register(shortenedUrl) | 1.34375 | 1 |
binilla/windows/filedialog.py | delan/binilla | 1 | 12797061 | <reponame>delan/binilla<gh_stars>1-10
'''
This module is our wrapper for filedialog so we can use a better one
if it is available.
The filepicker for Tkinter on Linux is just... ouch.
So, this is the alternative solution.
'''
import sys
from pathlib import Path
USE_TK_DIALOG = True
if "linux" in sys.platform:
import subprocess
import re
from os.path import join
def _fix_output(output):
'''
Removes miscelanous stdout output that can happen with Mesa drivers.
Only accept absolute paths that start with the root separator.
'''
return list(filter(lambda a : a.startswith("/"), output.split("\n")))
# Name of the command of the native filedialog if we have one.
DIALOG_NAME = ""
# Used for getting the width and height of the primary monitor from the
# xrandr command output. Tests here: https://regex101.com/r/clpmtZ/1
SCREEN_RES_REGEX = re.compile(r'primary (\d+)x(\d+)')
screen_resolution = (0,0,)
# Try to get the primary screen resolution from xrandr.
try:
screen_resolution = SCREEN_RES_REGEX.search(
subprocess.run(
"xrandr",
capture_output=True, universal_newlines=True).stdout
).group(1,2)
screen_resolution = (
int(screen_resolution[0]),
int(screen_resolution[1]),
)
except Exception:
print("Couldn't retrieve screen resolution.")
# Figure out what native file dialog we can use.
try:
# Yad is the best, please have yad.
if subprocess.run(["yad", "--help"], capture_output=True).returncode == 0:
DIALOG_NAME = "yad"
except Exception:
try:
# kdialog is second best, give us that.
if subprocess.run("kdialog", capture_output=True).returncode == 0:
DIALOG_NAME = "kdialog"
except Exception:
try:
# This one is nice. But it has a tendency to keep opening the
# recent files folder. And I don't like that >:P
if subprocess.run("zenity", capture_output=True).returncode == 255:
DIALOG_NAME = "zenity"
except Exception:
pass
# These are the functions to wrap zenity and yad.
if DIALOG_NAME in ("yad", "zenity"):
# Construct the common arguments for the calling of zenity or yad.
ZENITY_COMMON_ARGS = [
"--file-selection",
"--separator=\n",
]
# If any of these arguments are present zenity won't open.
if DIALOG_NAME == "yad":
ZENITY_COMMON_ARGS.extend([
"--on-top",
"--mouse",
])
# Yad likes to open with a really small window size. Work around that.
if screen_resolution[0] and screen_resolution[1]:
ZENITY_COMMON_ARGS.extend([
"--width=%d" % (int(screen_resolution[0]/1.5)),
"--height=%d" % (int(screen_resolution[1]/1.5)),
])
def _parse_file_filters(the_filters):
'''
Parses the tkinter file filters into a set of filters for zenity.
'''
# Filters look like "name (extension)" to users.
# Filters get a * prepended so they actually work.
return list(map(lambda a : '--file-filter=%s (%s) | *%s' %
(a[0], a[1], a[1].lstrip('*')), the_filters))
def askopenfilename(
title="Open file", initialdir=str(Path.cwd()),
filetypes=(('All', '*'),), **kw):
'''
Tkinter style wrapper for zenity --file-selection.
Arguments listed at the top are the only ones actually accounted for.
The rest are discarded.
'''
res = subprocess.run(
[DIALOG_NAME,
"--title=%s" % (title),
*ZENITY_COMMON_ARGS,
"--filename=%s/" % (initialdir),
*_parse_file_filters(filetypes)],
capture_output=True, universal_newlines=True)
try:
return _fix_output(res.stdout)[0]
except IndexError:
return ""
def askopenfilenames(
title="Open files", initialdir=str(Path.cwd()),
filetypes=(('All', '*'),), **kw):
'''
Tkinter style wrapper for zenity --file-selection.
Arguments listed at the top are the only ones actually accounted for.
The rest are discarded.
'''
res = subprocess.run(
[DIALOG_NAME,
"--title=%s" % (title),
*ZENITY_COMMON_ARGS,
# Get multiple items, put them on different lines for parsing.
"--multiple",
"--filename=%s/%s" % (initialdir, filetypes[0][1]),
*_parse_file_filters(filetypes)],
capture_output=True, universal_newlines=True)
return _fix_output(res.stdout)
def askdirectory(
title="Choose folder", initialdir=str(Path.cwd()), **kw):
'''
Tkinter style wrapper for zenity --file-selection.
Arguments listed at the top are the only ones actually accounted for.
The rest are absolutely trashed.
'''
res = subprocess.run(
[DIALOG_NAME,
"--title=%s" % (title),
*ZENITY_COMMON_ARGS,
# Get a directory.
"--directory",
"--filename=%s/" % (initialdir)],
capture_output=True, universal_newlines=True)
try:
return _fix_output(res.stdout)[0]
except IndexError:
return ""
def asksaveasfilename(
title="Open file", initialdir=str(Path.cwd()),
filetypes=(('All', '*'),),
defaultextension="", **kw):
'''
Tkinter style wrapper for zenity --file-selection.
Arguments listed at the top are the only ones actually accounted for.
The rest are discarded.
'''
res = subprocess.run(
[DIALOG_NAME,
"--title=%s" % (title),
*ZENITY_COMMON_ARGS,
# Start in save mode.
"--save", "--confirm-overwrite",
"--filename=%s/%s" % (initialdir, defaultextension),
*_parse_file_filters(filetypes)],
capture_output=True, universal_newlines=True)
try:
return _fix_output(res.stdout)[0]
except IndexError:
return ""
USE_TK_DIALOG = False
# These are the functions used for kdialog.
if DIALOG_NAME == "kdialog":
# capture_output to hide it from our terminal.
if subprocess.run("kdialog", capture_output=True).returncode != 0:
# Hacky way to jump into the except block.
raise ValueError
def _parse_file_filters(the_filters):
'''
Parses the tkinter file filters into a set of filters for kdialog.
'''
# This sucks right now.
# We can't get file type descriptions into kdialog.
# Still, anything better than the default filedialog
# from tkinter on Linux.
# This joins all the filters like so: "*.mp3|*.ogg|*.wav"
# If we weren't supplying * as a filter everywhere I would have
# done a "thing1 thing2 thing3 ( *.ext1 *.ext2 *.ext3 )" filter.
# kdialog sadly isn't the nicest thing ever. But we have something at
# least.
return "|".join(map(lambda a : "*%s" % a[1].lstrip('*'), the_filters))
def askopenfilename(
title="Open file", initialdir=str(Path.cwd()),
filetypes=(('All', '*'),), **kw):
'''
Tkinter style wrapper for kdialog --getopenfilename.
Arguments listed at the top are the only ones actually accounted for.
The rest are discarded.
'''
res = subprocess.run(
[DIALOG_NAME,
"--title", str(title),
"--getopenfilename",
str(initialdir), _parse_file_filters(filetypes)],
capture_output=True, universal_newlines=True)
try:
return _fix_output(res.stdout)[0]
except IndexError:
return ""
def askopenfilenames(
title="Open files", initialdir=str(Path.cwd()),
filetypes=(('All', '*'),), **kw):
'''
Tkinter style wrapper for kdialog --getopenfilename.
Arguments listed at the top are the only ones actually accounted for.
The rest are discarded.
'''
res = subprocess.run(
[DIALOG_NAME,
"--title", str(title),
# Get multiple items, put them on different lines for parsing.
"--multiple", "--separate-output",
"--getopenfilename",
str(initialdir), _parse_file_filters(filetypes)],
capture_output=True, universal_newlines=True)
return _fix_output(res.stdout)
def askdirectory(
title="Choose folder", initialdir=str(Path.cwd()), **kw):
'''
Tkinter style wrapper for kdialog --getexistingdirectory.
Arguments listed at the top are the only ones actually accounted for.
The rest are absolutely trashed.
'''
res = subprocess.run(
[DIALOG_NAME,
"--title", str(title),
"--getexistingdirectory",
str(initialdir)],
capture_output=True, universal_newlines=True)
try:
return _fix_output(res.stdout)[0]
except IndexError:
return ""
def asksaveasfilename(
title="Open file", initialdir=str(Path.cwd()),
filetypes=(('All', '*'),),
defaultextension="", **kw):
'''
Tkinter style wrapper for kdialog --getsavefilename.
Arguments listed at the top are the only ones actually accounted for.
The rest are discarded.
'''
res = subprocess.run(
[DIALOG_NAME,
"--title", str(title),
"--getsavefilename",
# Joining these causes the extension to appear in the name box
join(str(initialdir), defaultextension),
_parse_file_filters(filetypes)],
capture_output=True, universal_newlines=True)
try:
return _fix_output(res.stdout)[0]
except IndexError:
return ""
USE_TK_DIALOG = False
if DIALOG_NAME and not USE_TK_DIALOG:
print("Using native %s for filedialogs." % DIALOG_NAME)
# Fallback for Linux, default for mac and Windows.
if USE_TK_DIALOG:
if "linux" in sys.platform:
from tkinter import messagebox
error = ("No supported native filedialog package installed.",
"The default tkinter filedialog for Linux does not work\n"
"properly with symlinks.\n\n"
"Please install either yad, kdialog, or zenity.\n"
"(These suggestions are ordered based on how well they work.)")
print("\n".join(error))
def no_native_file_dialog_error():
'''
Only exists if there is no native file dialog.
Displays an error warning the user when called.
'''
messagebox.showerror(error[0], error[1])
from tkinter.filedialog import ( askopenfilename, askopenfilenames,
askdirectory, asksaveasfilename )
del sys
| 2.5625 | 3 |
main.py | sagaragarwal94/TwitterScraper | 15 | 12797062 | from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import standard_library
import twitter_scraper as twitter_scraper
import json
import codecs
standard_library.install_aliases()
def main():
def build_list(t, tweet_list):
tweet_list.append(
{
"username": t.username,
"retweet": t.retweets,
"tweet": t.text,
"mentions": t.mentions,
"hashtags": t.hashtags,
"date": t.date.__str__()
}
)
return tweet_list
def print_to_file(data, filename):
try:
with codecs.open(filename + '.json', 'a', 'utf-8') as f:
f.write(data)
return True
except BaseException as e:
print(e)
search_term = '@meshivammathur'
search_params = twitter_scraper.scraper.SearchParams().set_username(search_term).set_max_tweets(400)
tweets = twitter_scraper.scraper.Scraper.get_tweets(search_params)
t_list = []
for tweet in tweets:
t_list = build_list(tweet, t_list)
json_data = json.dumps(t_list, indent=4)
print_to_file(json_data, search_term)
if __name__ == '__main__':
main()
| 2.796875 | 3 |
sample_app/_get_banned_users.py | encyphered/accelbyte-python-sdk | 0 | 12797063 | import yaml
import click
from accelbyte_py_sdk.api.iam import admin_get_banned_users_v3
from ._utils import login_as as login_as_internal
@click.command()
@click.argument("active_only", type=bool)
@click.argument("ban_type")
@click.argument("offset", type=int)
@click.argument("limit", type=int)
@click.option("--namespace")
@click.option("--doc", type=bool)
@click.option("--login_as", type=click.Choice(["client", "user"], case_sensitive=False))
def get_banned_users(
active_only,
ban_type,
limit,
offset,
namespace,
doc,
login_as,
):
login_as_internal(login_as)
if doc:
click.echo(admin_get_banned_users_v3.__doc__)
result, error = admin_get_banned_users_v3(
active_only=active_only,
ban_type=ban_type,
offset=offset,
limit=limit,
namespace=namespace,
)
if error:
raise Exception(str(error))
click.echo("Get banned users success.")
click.echo(yaml.safe_dump(result.to_dict()))
| 1.890625 | 2 |
2015/08.py | bernikr/advent-of-code | 1 | 12797064 | from aocd import get_data
def part1(a):
return sum(len(l) - len(l.encode('utf-8').decode('unicode_escape')) + 2 for l in a)
def part2(a):
return sum(len(l.encode('unicode_escape').decode('utf-8').replace('"', '\\"')) - len(l) + 2 for l in a)
if __name__ == '__main__':
data = get_data(day=8, year=2015)
inp = data.splitlines()
print(part1(inp))
print(part2(inp))
| 3 | 3 |
textscroll_animation.py | mfinkle/circuitpython_animationextras | 0 | 12797065 | from adafruit_led_animation.animation import Animation
import adafruit_framebuf
class TextScroll(Animation):
def __init__(self, grid_object, speed, text, color, font_name='font5x8.bin', name=None):
self._text = text
self._font_name = font_name
self._frame = 0
# We're only using the frame buffer for on/off information, not color
self._buffer = bytearray(grid_object.width * grid_object.height)
self._fb = adafruit_framebuf.FrameBuffer(self._buffer, grid_object.width, grid_object.height, buf_format=adafruit_framebuf.MVLSB)
super().__init__(grid_object, speed, color, name=name)
on_cycle_complete_supported = True
def _get_color(self, x, y):
return self.color
def draw(self):
self._fb.fill(0x000000)
self._fb.text(self._text, self.pixel_object.width - self._frame, 0, 0xFFFFFF, font_name=self._font_name)
# Cheating to get the character width
char_width = self._fb._font.font_width
for y in range(self.pixel_object.height):
for x in range(self.pixel_object.width):
self.pixel_object[x, y] = self._get_color(x, y) if self._fb.pixel(x, y) else (0, 0, 0)
self._frame += 1
if self._frame >= len(self._text) * (char_width + 1) + self.pixel_object.width:
# Cycle completes after text scrolls completely out of view on the display
self.cycle_complete = True
self._frame = 0
def reset(self):
self._frame = 0 | 3.03125 | 3 |
models/cnn_gru/cnn_gru.py | RyanMokarian/Solar_Irradiance_Forecast_using_Conv3_LSTM_GRU_Attention | 4 | 12797066 | import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, models, activations
from models.cnn_gru.cnn import CNN, Encoder
class CnnGru(tf.keras.Model):
def __init__(self, seq_len):
super().__init__()
self.seq_len = seq_len
self.cnn = CNN()
ip_dims = self.cnn.compute_output_shape((None,None,None,5))[-1]
self.encoder = Encoder(self.cnn,ip_dims)
self.flatten = layers.Flatten()
self.drop = layers.Dropout(0.3)
self.fc1 = layers.Dense(128)
self.fc2 = layers.Dense(4)
def call(self, x, training=False):
x,_ = self.encoder(x)
x = self.flatten(x)
x = self.drop(x,training=training)
x = self.fc1(x)
x = tf.nn.relu(x)
x = self.fc2(x)
return x | 2.84375 | 3 |
config.py | mplewis/trianglebot | 0 | 12797067 | <reponame>mplewis/trianglebot
import re
from os import environ
# URL to the Groupme API endpoint
API_URL = 'https://api.groupme.com/v3/bots/post'
# URL to the Google Sheets JSON API endpoint
SHEET_BASE_URL = ('https://spreadsheets.google.com/feeds/cells/%s/%s'
'/public/values?alt=json')
SHEET_ID_MATCHER = r'spreadsheets\/d\/(.+)\/'
# Set by Heroku variables
COMMAND_PREFIX = environ['BOT_NAME'] + ' '
BOT_ID = environ['GROUPME_BOT_ID']
WORKSHEET_KEYS_TO_SCRAPE = environ['WORKSHEET_KEYS'].split(',')
INDEX_REDIRECT_URL = 'https://github.com/mplewis/trianglebot'
SHEET_KEY = re.search(SHEET_ID_MATCHER, environ['DOCUMENT_URL']).group(1)
| 2.390625 | 2 |
chia/types/spend_bundle_conditions.py | nur-azhar/chia-blockchain | 1 | 12797068 | <reponame>nur-azhar/chia-blockchain
from chia_rs import Spend, SpendBundleConditions
__all__ = ["Spend", "SpendBundleConditions"]
| 0.960938 | 1 |
helper/f2g_omim_mapping.py | PEDIA-Charite/PEDIA-workflow | 9 | 12797069 | '''Create an omim mapping based on information from Face2Gene using the
Face2Gene library.'''
from pprint import pprint
import json
from lib.api.face2gene import Face2Gene
from lib.model.config import ConfigManager
config_data = ConfigManager()
f2g_session = Face2Gene(config=config_data)
s_list = f2g_session.browse_all_syndromes()
with open("f2g_library_dump.json", "w") as f2g_dump:
json.dump(s_list, f2g_dump, indent=4)
| 2.71875 | 3 |
tests/test_shadow_maya_system.py | rBrenick/shadow-maya | 1 | 12797070 | from maya import cmds
from base import MayaBaseTestCase
import shadow_maya.shadow_maya_system as system
class TestShadowMayaSystem(MayaBaseTestCase):
def test_system(self):
"""Test system in some fashion"""
return True
| 1.984375 | 2 |
aqua_bot/aqua_comandos.py | OHomemParede/aqua_bot_project | 0 | 12797071 | from googletrans import LANGUAGES
from googletrans import Translator
translator = Translator()
async def ajuda(message, comandos :dict):
msg = "```\n"
for c in comandos.keys():
msg += comandos[c][1]+'\n'
msg += "```"
await message.channel.send(msg)
async def traduz(message, _):
msg = message.content.strip().lower().split()
if len(msg)<4:
return Exception
cod1 = msg[-1]
cod2 = msg[-2]
if (len(cod1) > 2 and cod1 in list(LANGUAGES.values())):
for k in LANGUAGES.keys():
if LANGUAGES[k] == cod1:
cod1 = k
elif (len(cod1) == 2 and cod1 not in list(LANGUAGES.keys())):
return Exception
if (len(cod2) > 2 and cod2 in list(LANGUAGES.values())):
for k in LANGUAGES.keys():
if LANGUAGES[k] == cod2:
cod2 = k
elif (len(cod2) == 2 and cod2 not in list(LANGUAGES.keys())):
return Exception
msg = ' '.join(msg[1:-2])
out = translator.translate(text=msg, dest=cod1, src=cod2).text
await message.channel.send(out)
async def linguas(message, _):
msg = "```\n"
for k in LANGUAGES.keys():
msg += str(k)+' - '+str(LANGUAGES[k])+'\n'
msg += "```"
await message.channel.send(msg)
| 2.921875 | 3 |
src/ape/managers/accounts.py | unparalleled-js/ape | 210 | 12797072 | from typing import Dict, Iterator, List, Type
from dataclassy import dataclass
from pluggy import PluginManager # type: ignore
from ape.api.accounts import AccountAPI, AccountContainerAPI, TestAccountAPI
from ape.types import AddressType
from ape.utils import cached_property, singledispatchmethod
from .config import ConfigManager
from .converters import ConversionManager
from .networks import NetworkManager
@dataclass
class AccountManager:
"""
The ``AccountManager`` is a container of containers for
:class:`~ape.api.accounts.AccountAPI` objects.
All containers must subclass :class:`~ape.api.accounts.AccountContainerAPI`
and are treated as singletons.
Import the accounts manager singleton from the root ``ape`` namespace.
Usage example::
from ape import accounts # "accounts" is the AccountManager singleton
my_accounts = accounts.load("dev")
"""
config: ConfigManager
converters: ConversionManager
plugin_manager: PluginManager
network_manager: NetworkManager
@cached_property
def containers(self) -> Dict[str, AccountContainerAPI]:
"""
The list of all :class:`~ape.api.accounts.AccountContainerAPI` instances
across all installed plugins.
Returns:
dict[str, :class:`~ape.api.accounts.AccountContainerAPI`]
"""
containers = {}
data_folder = self.config.DATA_FOLDER
data_folder.mkdir(exist_ok=True)
for plugin_name, (container_type, account_type) in self.plugin_manager.account_types:
# Ignore containers that contain test accounts.
if issubclass(account_type, TestAccountAPI):
continue
accounts_folder = data_folder / plugin_name
accounts_folder.mkdir(exist_ok=True)
containers[plugin_name] = container_type(accounts_folder, account_type, self.config)
return containers
@property
def aliases(self) -> Iterator[str]:
"""
All account aliases from every account-related plugin. The "alias"
is part of the :class:`~ape.api.accounts.AccountAPI`. Use the
account alias to load an account using method
:meth:`~ape.managers.accounts.AccountManager.load`.
Returns:
Iterator[str]
"""
for container in self.containers.values():
yield from container.aliases
def get_accounts_by_type(self, type_: Type[AccountAPI]) -> List[AccountAPI]:
"""
Get a list of accounts by their type.
Args:
type_ (Type[:class:`~ape.api.accounts.AccountAPI`]): The type of account
to get.
Returns:
List[:class:`~ape.api.accounts.AccountAPI`]
"""
accounts_with_type = []
for account in self:
if isinstance(account, type_):
self._inject_provider(account)
accounts_with_type.append(account)
return accounts_with_type
def __len__(self) -> int:
"""
The number of accounts managed by all account plugins.
Returns:
int
"""
return sum(len(container) for container in self.containers.values())
def __iter__(self) -> Iterator[AccountAPI]:
for container in self.containers.values():
for account in container:
self._inject_provider(account)
yield account
def __repr__(self) -> str:
return "[" + ", ".join(repr(a) for a in self) + "]"
@cached_property
def test_accounts(self) -> List[TestAccountAPI]:
"""
Accounts generated from the configured test mnemonic. These accounts
are also the subject of a fixture available in the ``test`` plugin called
``accounts``. Configure these accounts, such as the mnemonic and / or
number-of-accounts using the ``test`` section of the `ape-config.yaml` file.
Usage example::
def test_my_contract(accounts):
# The "accounts" fixture uses the AccountsManager.test_accounts()
sender = accounts[0]
receiver = accounts[1]
...
Returns:
List[:class:`~ape.api.accounts.TestAccountAPI`]
"""
accounts = []
for plugin_name, (container_type, account_type) in self.plugin_manager.account_types:
if not issubclass(account_type, TestAccountAPI):
continue
container = container_type(None, account_type, self.config)
for account in container:
self._inject_provider(account)
accounts.append(account)
return accounts
def load(self, alias: str) -> AccountAPI:
"""
Get an account by its alias.
Raises:
IndexError: When there is no local account with the given alias.
Returns:
:class:`~ape.api.accounts.AccountAPI`
"""
if alias == "":
raise ValueError("Cannot use empty string as alias!")
for account in self:
if account.alias and account.alias == alias:
self._inject_provider(account)
return account
raise IndexError(f"No account with alias '{alias}'.")
@singledispatchmethod
def __getitem__(self, account_id) -> AccountAPI:
raise NotImplementedError(f"Cannot use {type(account_id)} as account ID.")
@__getitem__.register
def __getitem_int(self, account_id: int) -> AccountAPI:
"""
Get an account by index. For example, when you do the CLI command
``ape accounts list --all``, you will see a list of enumerated accounts
by their indices. Use this method as a quicker, ad-hoc way to get an
account from that index. **NOTE**: It is generally preferred to use
:meth:`~ape.managers.accounts.AccountManager.load` or
:meth:`~ape.managers.accounts.AccountManager.__getitem_str`.
Returns:
:class:`~ape.api.accounts.AccountAPI`
"""
for idx, account in enumerate(self.__iter__()):
if account_id == idx:
self._inject_provider(account)
return account
raise IndexError(f"No account at index '{account_id}'.")
@__getitem__.register
def __getitem_str(self, account_str: str) -> AccountAPI:
"""
Get an account by address.
Raises:
IndexError: When there is no local account with the given address.
Returns:
:class:`~ape.api.accounts.AccountAPI`
"""
account_id = self.converters.convert(account_str, AddressType)
for container in self.containers.values():
if account_id in container:
account = container[account_id]
self._inject_provider(account)
return account
raise IndexError(f"No account with address '{account_id}'.")
def __contains__(self, address: AddressType) -> bool:
"""
Determine if the given address matches an account in ``ape``.
Args:
address (:class:`~ape.types.AddressType`): The address to check.
Returns:
bool: ``True`` when the given address is found.
"""
return any(address in container for container in self.containers.values())
def _inject_provider(self, account: AccountAPI):
if self.network_manager.active_provider is not None:
account.provider = self.network_manager.active_provider
| 2.140625 | 2 |
11-20/Euler_12.py | Drudoo/Euler | 0 | 12797073 | <reponame>Drudoo/Euler<gh_stars>0
# Highly divisible triangular number
# ------------------------------------------------- #
# Return triangle number, return number of factors.
# While factors less than 500, get the triangle number,
# then the factors of that number.
# ------------------------------------------------- #
def t(n):
return (n*(n+1))/2
def tau(n):
sqrt,t=int(n**0.5),0
for factor in range(1, sqrt+1):
if n % factor == 0:
t+=2
if sqrt*sqrt==n:
t -=1
return t
result,d,num,_tau=0,0,0,0
while _tau < 500:
_t=t(d)
_tau=tau(_t)
if result < _tau:
result = _tau
num=d
d+=1
print "Project Euler #12:"
print "The value of the first triangle number is", t(num)
| 3.359375 | 3 |
src/models/base.py | armavox/vae-cancer-nodules | 0 | 12797074 | # Code provided by:
# @misc{Subramanian2020,
# author = {<NAME>},
# title = {PyTorch-VAE},
# year = {2020},
# publisher = {GitHub},
# journal = {GitHub repository},
# howpublished = {\url{https://github.com/AntixK/PyTorch-VAE}}
# }
from torch import nn
from abc import abstractmethod
from typing import List, Any, TypeVar
Tensor = TypeVar("torch.tensor")
class BaseVAE(nn.Module):
def __init__(self) -> None:
super(BaseVAE, self).__init__()
def encode(self, input: Tensor) -> List[Tensor]:
raise NotImplementedError
def decode(self, input: Tensor) -> Any:
raise NotImplementedError
def sample(self, batch_size: int, current_device: int, **kwargs) -> Tensor:
raise RuntimeWarning()
def generate(self, x: Tensor, **kwargs) -> Tensor:
raise NotImplementedError
def embed(self, x: Tensor, **kwargs) -> Tensor:
raise NotImplementedError
@abstractmethod
def forward(self, *inputs: Tensor) -> Tensor:
pass
@abstractmethod
def loss_function(self, *inputs: Any, **kwargs) -> Tensor:
pass
| 2.46875 | 2 |
pymelet/point_stat/point_stat_continuous.py | wqshen/pymelet | 2 | 12797075 | # -*- coding: utf-8 -*-
# @Author: wqshen
# @Email: <EMAIL>
# @Date: 2020/6/10 14:43
# @Last Modified by: wqshen
import numpy as np
from logzero import logger
from .point_stat_base import PointStatBase
class ContinuousVariableVerification(PointStatBase):
def __init__(self, forecast=None, obs=None, fcsterr=None, group=None):
if (forecast is None or obs is None) and fcsterr is None:
raise Exception("Initialize failed, check forecast and obs and fcsterr values.")
elif forecast is not None and obs is not None and fcsterr is not None:
logger.warning("You give forecast, obs and fcsterr, but the fcsterr will be ignored.")
fcsterr = None
self._available_score = ['N', 'ME', 'ME2', 'MSE', 'RMSE', 'ESTDEV', 'BCMSE', 'MAE', 'IQR', 'MAD', 'EPCT']
if fcsterr is not None:
self._error = fcsterr[~np.isnan(fcsterr)]
if forecast is None:
forecast = fcsterr + obs
if obs is None:
obs = forecast - fcsterr
# Not Available, 'BAGSS', 'ANOM_CORR'
self._available_score += ['FBAR', 'OBAR', 'FSTDEV', 'OSTDEV', 'PR_CORR', 'SP_CORR', 'KT_CORR', 'MBIAS', ]
super(ContinuousVariableVerification, self).__init__(forecast, obs, group)
@property
def FBAR(self):
"""**The sample mean forecast, FBAR**"""
return self.mean_forecast(self._f)
@staticmethod
def mean_forecast(forecast):
r"""**The sample mean forecast, FBAR**
the sample mean forecast (FBAR) is defined as,
.. math::
\bar{f} = \frac{1}{n}\sum_{i=1}^{n}f_i
Returns
------
numpy.ndarray, the sample mean forecast (FBAR)
"""
return np.average(forecast)
@property
def OBAR(self):
"""**The sample mean observation, OBAR**"""
return self.mean_observation(self._o)
@staticmethod
def mean_observation(obs):
r"""**The sample mean observation, OBAR**
the sample mean observation (OBAR) is defined as,
.. math::
\bar{o} = \frac{1}{n}\sum_{i=1}^{n}o_i
Returns
-------
numpy.ndarray, the sample mean observation (OBAR)
"""
return np.average(obs)
@property
def FSTDEV(self):
"""**The forecast standard deviation (FSTDEV)**"""
return self.forecast_standard_deviation(self._f)
@staticmethod
def forecast_standard_deviation(forecast):
r"""**The forecast standard deviation (FSTDEV)**
The sample variance of the forecasts is defined as
.. math::
s^{2}_{f} = \frac{1}{T-1}\sum_{i=1}^{T}(f_i - \bar{f})^2
The forecast standard deviation, FSTDEV, is defined as
.. math::
s_{f} = \sqrt{s^{2}_{f}}
Returns
-------
numpy.ndarray, the forecast standard deviation (FSTDEV)
"""
return np.std(forecast)
@property
def OSTDEV(self):
r"""**The observed standard deviation (OSTDEV)**"""
return self.observation_standard_deviation(self._o)
@staticmethod
def observation_standard_deviation(obs):
r"""**The observed standard deviation (OSTDEV)**
The sample variance of the observations is defined as
.. math::
s^{2}_{o} = \frac{1}{T-1}\sum_{i=1}^{T}(o_i - \bar{o})^2
The observed standard deviation, OSTDEV, is defined as
.. math::
s_{o} = \sqrt{s^{2}_{o}}
Returns
-------
numpy.ndarray, the observed standard deviation (OSTDEV)
"""
return np.std(obs)
@property
def PR_CORR(self):
r"""**The Pearson correlation coefficient ( :math:`r` , PR_CORR)**"""
return self.pearson_correlation_coefficient(self._f, self._o)
@staticmethod
def pearson_correlation_coefficient(forecast, obs):
r"""**The Pearson correlation coefficient ( :math:`r` , PR_CORR)**
The Pearson correlation coefficient, **r**,
measures the strength of linear association between the forecasts and observations.
The Pearson correlation coefficient is defined as:
.. math::
r = \frac{\sum^{T}_{i=1}(f_i - \bar{f})(o_i - \bar{o})}{\sqrt{\sum{(f_i - \bar{f})^2}}\sqrt{\sum{(o_i - \bar{o})^2}}}
r can range between -1 and 1;
a value of 1 indicates perfect correlation and
a value of -1 indicates perfect negative correlation.
A value of 0 indicates that the forecasts and observations are not correlated.
Returns
-------
numpy.ndarray, the Pearson correlation coefficient (PR_CORR)
"""
return np.corrcoef(forecast, obs)[1, 0]
@property
def SP_CORR(self):
r"""**The Spearman rank correlation coefficient ( :math:`\rho_s` , SP_CORR)**"""
return self.spearman_rank_correlation_cofficient(self._f, self._o)
@staticmethod
def spearman_rank_correlation_cofficient(forecast, obs):
r"""**The Spearman rank correlation coefficient ( :math:`\rho_s` , SP_CORR)**
The Spearman rank correlation cofficient ( :math:`\rho_s` ) is a robust measure of association
that is based on the ranks of the forecast and observed values rather than the actual values.
That is, the forecast and observed samples are ordered from smallest to largest
and rank values (from 1 to **n**, where **n** is the total number of pairs) are assigned.
The pairs of forecast-observed ranks are then used to compute a correlation cofficient,
analogous to the Pearson correlation cofficient, **r**.
A simpler formulation of the Spearman-rank correlation is based on differences
between the each of the pairs of ranks (denoted as ( :math:`d_i` ) ):
.. math::
\rho_s = \frac{6}{n(n^2 - 1)}\sum^{n}_{i=1}d^{2}_{i}
Like **r**, the Spearman rank correlation coecient ranges between -1 and 1;
a value of 1 indicates perfect correlation and
a value of -1 indicates perfect negative correlation.
A value of 0 indicates that the forecasts and observations are not correlated.
Returns
-------
numpy.ndarray, the Spearman correlation coefficient (SP_CORR)
"""
from scipy.stats import spearmanr
return spearmanr(forecast, obs)
@property
def KT_CORR(self):
r"""**Kendall's Tau statistic ( :math:`\tau` , KT_CORR)**"""
return self.kendall_tau_statistic(self._f, self._o)
@staticmethod
def kendall_tau_statistic(forecast, obs):
r"""**Kendall's Tau statistic ( :math:`\tau` , KT_CORR)**
Kendall's Tau statistic ( :math:`\tau` ) is a robust measure of the level of association
between the forecast and observation pairs. It is defined as
.. math::
\tau = \frac{N_c - N_p}{n(n-1)/2}
where NC is the number of "concordant" pairs and ND is the number of "discordant" pairs.
Concordant pairs are identied by comparing each pair with all other pairs in the sample;
this can be done most easily by ordering all of the ( :math:`f_i, o_i` ) pairs
according to :math:`f_i`, in which case the :math:`o_i`, values won't necessarily be in order.
The number of concordant matches of a particular pair with other pairs is computed by
counting the number of pairs (with larger values)
for which the value of oi for the current pair is exceeded (that is, pairs for which
the values of **f** and **o** are both larger than the value for the current pair).
Once this is done, Nc is computed by summing the counts for all pairs.
The total number of possible pairs is ; thus, the number of discordant pairs is .
Like **r** and :math:`\rho_s` , Kendall's Tau ( :math:`\tau` ) ranges between -1 and 1;
a value of 1 indicates perfect association (concor-dance) and
a value of -1 indicates perfect negative association.
A value of 0 indicates that the forecasts and observations are not associated.
Returns
-------
numpy.ndarray, Kendall's Tau statistic ( :math:`\tau` , KT_CORR)
"""
from scipy.stats import kendalltau
return kendalltau(forecast, obs)
@property
def ME(self):
"""**The Mean Error (ME)**"""
return self.mean_error(self.error)
@staticmethod
def mean_error(error):
r"""**The Mean Error (ME)**
The Mean Error, ME, is a measure of overall bias for continuous variables;
in particular ME = Bias. It is defined as
.. math::
ME = \frac{1}{n}\sum^{n}_{i=1}(f_i - o_i) = \bar{f} - \bar{o}
A perfect forecast has ME = 0.
Returns
-------
numpy.ndarray, The Mean Error (ME)
"""
return np.average(error)
@property
def ME2(self):
"""**The Mean Error Squared** (ME2)"""
return self.mean_error_squared(self.error)
@staticmethod
def mean_error_squared(error):
"""**The Mean Error Squared** (ME2)
The Mean Error Squared, ME2, is provided to give a complete breakdown of MSE
in terms of squared Bias plus estimated variance of the error,
as detailed below in the section on BCMSE. It is defined as ME2 = ME2.
A perfect forecast has ME2 = 0.
Returns
-------
numpy.ndarray, The Mean Error (ME)
"""
return np.square(np.average(error))
@property
def MBIAS(self):
"""**Multiplicative bias (MBIAS)**"""
return self.multiplicative_bias(self._f, self._o)
@staticmethod
def multiplicative_bias(forecast, error):
r"""**Multiplicative bias (MBIAS)**
Multiplicative bias is simply the ratio of the means of the forecasts and the observations:
.. math::
MBIAS = \frac{\bar{f}}{\bar{o}}
Returns
-------
numpy.ndarray, Multiplicative bias (MBIAS)
"""
return np.average(forecast) / np.average(error)
@property
def MSE(self):
"""**Mean-squared error (MSE)**"""
return self.mean_squared_error(self.error)
@staticmethod
def mean_squared_error(error):
r"""**Mean-squared error (MSE)**
MSE measures the average squared error of the forecasts. Specifically,
.. math::
MSE = \frac{1}{n}\sum{(f_i - o_i)^2}
Returns
-------
numpy.ndarray, Mean-squared error (MSE)
"""
return np.average(error ** 2)
@property
def RMSE(self):
"""**Root-mean-squared error (RMSE)**"""
return self.root_mean_squared_error(self.error)
@staticmethod
def root_mean_squared_error(error):
"""**Root-mean-squared error (RMSE)**
RMSE is simply the square root of the MSE, :math:`RMSE = \sqrt{MSE}`
Returns
-------
numpy.ndarray, Root-mean-squared error (RMSE)
"""
return np.sqrt(np.average(error ** 2))
@property
def ESTDEV(self):
"""**Standard deviation of the error** (ESTDEV)"""
return self.standard_deviation_of_error(self.error)
@staticmethod
def standard_deviation_of_error(error):
"""**Standard deviation of the error** (ESTDEV)
Returns
-------
numpy.ndaray, Standard deviation of the error
"""
return np.std(error)
@property
def BCMSE(self):
"""**Bias-Corrected MSE (BCMSE)**"""
return self.bias_corrected_mse(self.error)
@staticmethod
def bias_corrected_mse(error):
r"""**Bias-Corrected MSE (BCMSE)**
MSE and RMSE are strongly impacted by large errors.
They also are strongly impacted by large bias (ME) values.
MSE and RMSE can range from 0 to infinity.
A perfect forecast would have MSE = RMSE = 0.
MSE can be re-written as,
.. math::
MSE = (\bar{f} - \bar{o})^2 + s^{2}_{f} + s^{2}_{o} -2 s_f s_o r_{fo}
where :math:`\bar{f} - \bar{o} = ME` and :math:`s^{2}_{f} + s^{2}_{o} -2 s_f s_o r_{fo}` is
the estimated variance of the error, :math:`s^{2}_{fo}` . Thus, :math:`MSE = ME^2 + s^{2}_{f-o}`
To understand the behavior of MSE, it is important to examine both of the terms of MSE,
rather than examining MSE alone. Moreover, MSE can be strongly influenced by ME,
as shown by this decomposition.
The standard deviation of the error, :math:`s_{f-o}` , is
.. math::
s_{f-o}=\sqrt{s^{2}_{f-o}}=\sqrt{s^{2}_{f} + s^{2}_{o} -2 s_f s_o r_{fo}}
Note that the square of the standard deviation of the error (ESTDEV2) is
sometimes called the "Bias-corrected MSE" (BCMSE)
because it removes the effect of overall bias from the forecast-observation squared differences.
Returns
-------
numpy.ndarray, Bias-Corrected MSE (BCMSE)
"""
return np.square(np.std(error))
@property
def MAE(self):
"""**Mean Absolute Error (MAE)**"""
return self.mean_absolute_error(self.error)
@staticmethod
def mean_absolute_error(error):
r"""**Mean Absolute Error (MAE)**
The Mean Absolute Error (MAE) is defined as :math:`MAE = \frac{1}{n}\sum{|f_i - o_i|}`
MAE is less inuenced by large errors and also does not depend on the mean error.
A perfect forecast would have MAE = 0.
Returns
-------
numpy.ndarray, Mean Absolute Error (MAE)
"""
return np.average(np.abs(error))
@property
def IQR(self):
""""**Inter Quartile Range of the Errors (IQR)**"""
return self.inter_quartile_range_of_errors(self.error)
@staticmethod
def inter_quartile_range_of_errors(error):
r"""**Inter Quartile Range of the Errors (IQR)**
The Inter Quartile Range of the Errors (IQR) is the difference
between the 75th and 25th percentiles of the errors. It is dened as
.. math::
IQR = p_{75} (f_i - o_i) - p_{25}(f_i - o_i)
IQR is another estimate of spread, similar to standard error,
but is less inuenced by large errors and also does not depend on the mean error.
A perfect forecast would have IQR = 0.
Returns
-------
nupmy.ndarray, Inter Quartile Range of the Errors (IQR)
"""
return np.percentile(error, 75) - np.percentile(error, 25)
@property
def MAD(self):
"""Median Absolute Deviation (MAD)"""
return self.median_absolute_deviation(self.error)
@staticmethod
def median_absolute_deviation(error):
"""Median Absolute Deviation (MAD)
The Median Absolute Deviation (MAD) is defined as :math:`MAD=median|f_i - o_i|`
MAD is an estimate of spread, similar to standard error,
but is less inuenced by large errors and also does not depend on the mean error.
A perfect forecast would have MAD = 0.
Returns
-------
numpy.ndarray, Median Absolute Deviation (MAD)
"""
return np.median(np.abs(error))
@property
def BAGSS(self):
"""Bias Adjusted Gilbert Skill Score (BAGSS)"""
return self.bias_adjusted_gilbert_skill_score(self._f, self._o)
@staticmethod
def bias_adjusted_gilbert_skill_score(forecast, obs):
"""Bias Adjusted Gilbert Skill Score (BAGSS)
The Bias Adjusted Gilbert Skill Score (BAGSS) is the Gilbert Skill Score,
but with the contingency table counts adjusted to eliminate
as much bias in the forecast as possible.
For details, see `Brill and Messinger, 2009. <https://www.adv-geosci.net/16/137/2008/>`_
Returns
-------
Not implemented
numpy.ndarray, Bias Adjusted Gilbert Skill Score (BAGSS)
"""
return
@property
def EPCT(self):
"""Percentiles (0.1, 0.25, 0.5, 0.75, 0.9) of the errors"""
return self.percentile_errors(self.error)
@staticmethod
def percentile_errors(error):
"""Percentiles of the errors
Percentiles of the errors provide more information about the distribution of errors
than can be obtained from the mean and standard deviations of the errors.
Percentiles are computed by ordering the errors from smallest to largest
and computing the rank location of each percentile in the ordering,
and matching the rank to the actual value.
Percentiles can also be used to create box plots of the errors.
The 0.10th, 0.25th, 0.50th, 0.75th, and 0.90th quantile values of the errors are computed.
Returns
-------
numpy.ndarray, Percentiles of the errors
"""
quantiles = np.array([0.1, 0.25, 0.5, 0.75, 0.9])
return np.quantile(error, quantiles)
@property
def ANOM_CORR(self):
"""The Anomaly correlation coefficient (ANOM_CORR)"""
return self.anomaly_correlation_coefficient(self._f, self._o, None)
@staticmethod
def anomaly_correlation_coefficient(forecast, obs, climate):
r"""The Anomaly correlation coefficient (ANOM_CORR)
The Anomaly correlation coecient is equivalent to the Pearson correlation coefficient,
except that both the forecasts and observations are first adjusted according to a climatology value.
The anomaly is the difference between the individual forecast or observation and the typical situation,
as measured by a climatology (**c**) of some variety.
It measures the strength of linear association between the forecast anomolies and observed anomalies.
The Anomaly correlation coefficient is defined as:
.. math::
Anomoly Correlation = \frac{\sum{(f_i - c)(o_i - c)}} {\sqrt{\sum{(f_i - c)^2}} \sqrt{\sum{(o_i - c)^2}}}
Anomaly correlation can range between -1 and 1;
- a value of 1 indicates perfect correlation and
- a value of -1 indicates perfect negative correlation.
- A value of 0 indicates that the forecast and observed anomalies are not correlated.
Returns
-------
Not implemented
"""
return
def list_score(self):
"""list all available score"""
return {k: np.round(getattr(self, k), self.round) for k in self._available_score}
| 2.453125 | 2 |
sir/SIR_continuous_reinfected.py | xinyushi/SIR.Model | 0 | 12797076 | import numpy as np
from scipy.integrate import solve_ivp
import matplotlib.pyplot as plt
from numpy.random import randint, rand
from sir import *
def SIR_continuous_reinfected(b,k,time,ii,r):
"""
Simulates continuous SIR model
ii = initial percentage of infected
time = Days of simulation
b = probability that people getting infectious
k = probability that people getting recovered
r = reinfected probability
returns sol from solve_ivp
"""
def SIR(t, X):
#The main set of equations
Y = np.zeros((3))
Y[0] = -b * X[0] * X[2]
Y[1] = k * X[2] - r * X[1]
Y[2] = b * X[0] * X[2] - (k * X[2]) + r * X[1]
return Y
t_eval = np.linspace(0, time, time)
sol1 = solve_ivp(SIR, [0, time], [1-ii, 0, ii], method='RK45', t_eval=t_eval) # solve the equation
return sol1
## Discrete
class Person_reinfection(Person):
"""
An agent representing a person.
By default, a person is susceptible but not infectious. They can become infectious by exposing with disease method.
Status: 0 = susceptible 1 = infected 2 = removed
"""
def __init__(self,startpos=None):
self.status = 0
if startpos==None:
self.pos = np.random.rand(2)
else:
self.pos = np.array(startpos)
self.reinfection=1
def reinfectionrate(self):
return self.reinfection
def immunization(self,p):
q=self.reinfection-p
if q<0:
q=0
self.reinfection=q
def count_susceptible(pop):
"""
counts number of susceptible
"""
return sum(p.is_susceptible() for p in pop)
def count_infected(pop):
"""
counts number of infected
"""
return sum(p.is_infected() for p in pop)
def count_removed(pop):
"""
counts number of removed
"""
return sum(p.is_removed() for p in pop)
def SIR_discrete_reinfection(N,ii,b,T,k):
"""
Simulates discrete SIR model
N = Total number of people
ii = initial percentage of infected
b = number of contacts per day
T = Days of simulation
k = probability that people getting recovered
returns list of s,i,r
"""
pop = [Person_reinfection() for i in range(N)]
initial_infection = randint(N,size=np.int(N*ii))
for i in initial_infection:
pop[i].infection()
counts_susceptible = [count_susceptible(pop)]
counts_infected = [count_infected(pop)]
counts_removed = [count_removed(pop)]
for t in range(T):
# update the population
for i in range(N):
if pop[i].is_infected():
# person i infected all their contacts
contacts = randint(N, size=b)
for j in contacts:
if not pop[j].is_removed():
pop[j].infection()
#if rand() < p:
# pop[j].infection()
if pop[j].is_removed():
if rand()<pop[j].reinfectionrate():
pop[j].infection()
if rand()< k:
pop[i].remove()
pop[i].immunization(rand())
# add to our counts
counts_susceptible.append(count_susceptible(pop))
counts_infected.append(count_infected(pop))
counts_removed.append(count_removed(pop))
return np.array([counts_susceptible,counts_infected,counts_removed])
| 3.3125 | 3 |
bloom/editor/operations/flip.py | thomasrogers03/bloom | 9 | 12797077 | <reponame>thomasrogers03/bloom
# Copyright 2020 <NAME>
# SPDX-License-Identifier: Apache-2.0
from .. import map_objects
from ..undo_stack import SimpleUndoableOperation, UndoStack
class Flip:
def __init__(
self, undo_stack: UndoStack, map_object: map_objects.EmptyObject, part: str
):
self._undo_stack = undo_stack
self._map_object = map_object
self._part = part
def flip(self):
self._map_object.invalidate_geometry()
with self._undo_stack.property_change(
"Sprite/Wall Flip", self._map_object, self._part, "xflip", "yflip"
):
if self._stat.xflip and self._stat.yflip:
self._stat.xflip = 0
elif self._stat.xflip:
self._stat.xflip = 1
self._stat.yflip = 1
elif self._stat.yflip:
self._stat.yflip = 0
else:
self._stat.xflip = 1
@property
def _stat(self):
return self._map_object.get_stat_for_part(self._part)
| 2.453125 | 2 |
icrv=club/trial.py | IamWafula/Kiranja | 1 | 12797078 | print("Enter the first number:")
f_num = int(input())
print("Enter the second number:")
s_num = int(input())
subtraction = f_num - s_num
print ("The value is:")
print ( subtraction ) | 3.984375 | 4 |
Algorithm/Easy/1-500/365Count1inBinary.py | MartinYan623/Lint-Code | 0 | 12797079 | <filename>Algorithm/Easy/1-500/365Count1inBinary.py<gh_stars>0
class Solution:
"""
@param: num: An integer
@return: An integer
"""
def countOnes(self, num):
# write your code here
"""
count=0
while num!=0:
num = num & (num-1)
count+=1
return count
"""
count = 0
for i in range(0, 32):
if num & 1:
count = count + 1
num = num >> 1
return count
| 3.453125 | 3 |
simulator/web/lset.py | ondiiik/meteoink | 2 | 12797080 | <reponame>ondiiik/meteoink<filename>simulator/web/lset.py
from config import location
from log import dump_exception
def page(web):
try:
i = int(web.args['idx'])
args = web.args['name'], float(web.args['lat']), float(web.args['lon'])
location[i].name, location[i].lat, location[i].lon = args
location[i].flush()
except Exception as e:
dump_exception('WEB error:', e)
yield web.index
| 2.1875 | 2 |
rabbit_tools/base.py | andrzejandrzej/rabbit-tools | 0 | 12797081 | import argparse
import logging
import re
import sys
from collections import Sequence
from pyrabbit import Client
from pyrabbit.http import HTTPError
from rabbit_tools.config import (
Config,
ConfigFileMissingException,
)
logger = logging.getLogger(__name__)
class StopReceivingInput(Exception):
"""
Raised when no queues are left, or user typed a quitting command.
"""
class RabbitToolBase(object):
"""
Base class providing AMQP communication, parsing of user input
and applying some action to chosen queues. Tools implemented
by concrete classes give user a faster (and probably more
convenient) way to manipulate AMQP queues, than GUI, API
or other command line tools, provided with RabbitMQ.
Concrete classes provide, most of all, a method of client
instance, which will be used to manipulate chosen queues.
Subclasses of the class have to implement attributes:
* description (str) - description of the tool, which
will be shown inside of script's help (run with
-h argument);
* client_method_name (str) - name of a method of PyRabbit's
client instance, which will be used to manipulate queues.
Other attributes, that may be overridden in a subclass:
* do_remove_chosen_numbers (bool) - set to True, if it is
expected, that successfully manipulated queues will
disappear from the list, and numbers associated with
them should not be bound to other names to avoid
wrong selections (like in case of deleting queues);
* queue_not_affected_msg (str) - message to log, when
an action was unsuccessful for a current queue;
* queues_affected_msg (str) - message to log, to show,
which queues were successfully affected by an action;
* no_queues_affected_msg (str) - message logged, when
there was no success with any of chosen queues;
Note that some messages end with dot, other not, because
they are formatted differently. If not overridden in
a subclass, default messages will be logged.
** Usage
There are two ways of using Rabbit Tools. The first is to
pass a known queue name or many queue names, separated by
space, or "all" string, to choose all queues. The other way
is to run a tool with no arguments, so current list of queues
will be shown and it will wait for input from user. Each
queue has a number associated with it, so in this mode user
should choose queues using these numbers.
Additional comfort of usage comes from:
* using the config file, so there is no need to define every
time options like API address or user credentials;
* ability to choose ranges of queue numbers.
** Choosing queues in the interactive mode:
In this mode a tool runs in a loop, until user quits or there
are no queues left. In each iteration of the loop, the list
of available queues is shown, each queue has a number assigned,
so user inputs proper number, not a whole name. Input can be
a single number, list of numbers or range.
In the list of numbers, each number should be separated by
space, comma or space and comma (number of spaces does not
matter, there can be more than one, before and after the
comma):
1, 2 3 , 4 (will chose numbers: 1, 2, 3, 4)
The range of numbers is presented as two numbers (beginning
of the range and its end) separated by dash (-). Numbers and
the symbol of dash can be separated by one or more spaces:
2 - 5 (will chose: 2, 3, 4, 5)
IMPORTANT: list and range of numbers should not be mixed
in one input.
"""
config_section = 'rabbit_tools'
client_method_name = NotImplemented
description = NotImplemented
args = {
'queue_name': {
'help': 'Name of one or more queues (seperated by space) / '
'"all" to choose all queues.',
'nargs': '*',
},
}
queue_not_affected_msg = 'Queue not affected'
queues_affected_msg = 'Queues affected'
no_queues_affected_msg = 'No queues have been affected.'
quitting_commands = ['q', 'quit', 'exit', 'e']
choose_all_commands = ['a', 'all']
# set to True, if queues are deleted after an action,
# and the associated number should not be shown anymore
do_remove_chosen_numbers = False
single_choice_regex = re.compile(r'^\d+$')
range_choice_regex = re.compile(r'^(\d+)[ ]*-[ ]*(\d+)$')
multi_choice_regex = re.compile(r'^((\d+)*[ ]*,?[ ]*){2,}$')
multi_choice_inner_regex = re.compile(r'\b(\d+)\b')
def __init__(self):
try:
self.config = Config(self.config_section)
except ConfigFileMissingException:
sys.exit('Config file has not been found. Use the "rabbit_tools_config" command'
' to generate it.')
self._parsed_args = self._get_parsed_args()
self._vhost = self.config['vhost']
self.client = self._get_client(**self.config)
self._method_to_call = getattr(self.client, self.client_method_name)
self._chosen_numbers = set()
def _get_parsed_args(self):
parser = argparse.ArgumentParser(description=self.description)
for arg_name, arg_opts in self.args.iteritems():
parser.add_argument(arg_name, **arg_opts)
return parser.parse_args()
def _get_client(self, host, port, user, password, **kwargs):
api_url = self._get_api_url(host, port)
cl = Client(api_url, user, password)
return cl
@staticmethod
def _get_api_url(host, port):
return '{0}:{1}'.format(host, str(port))
def _yield_queue_list(self):
return (x['name'] for x in self.client.get_queues(self._vhost))
def _get_queue_mapping(self):
queue_names = list(self._yield_queue_list())
if not queue_names:
raise StopReceivingInput
full_range = range(1, len(queue_names) + len(self._chosen_numbers) + 1)
if self.do_remove_chosen_numbers:
output_range = set(full_range) - self._chosen_numbers
else:
output_range = full_range
return dict(zip(output_range, queue_names))
@staticmethod
def _get_user_input(mapping):
if mapping:
for nr, queue in mapping.iteritems():
print '[{}] {}'.format(nr, queue)
user_input = raw_input("Queue number ('all' to choose all / 'q' to quit'): ")
user_input = user_input.strip().lower()
return user_input
else:
logger.info('No more queues to choose.')
raise StopReceivingInput
def _parse_input(self, user_input):
if user_input in self.quitting_commands:
raise StopReceivingInput
if user_input in self.choose_all_commands:
return 'all'
single_choice = self.single_choice_regex.search(user_input)
if single_choice:
return [int(single_choice.group(0))]
range_choice = self.range_choice_regex.search(user_input)
if range_choice:
return range(int(range_choice.group(1)), int(range_choice.group(2))+1)
multi_choice = self.multi_choice_regex.search(user_input)
if multi_choice:
raw_numbers = multi_choice.group(0)
return map(int, self.multi_choice_inner_regex.findall(raw_numbers))
logger.error('Input could not be parsed.')
return None
@staticmethod
def _get_selected_mapping(mapping, parsed_input):
if isinstance(parsed_input, Sequence):
selected_mapping = {nr: mapping[nr] for nr in parsed_input if nr in mapping}
if not selected_mapping:
logger.error('No queues were selected.')
return None
return selected_mapping
elif parsed_input == 'all':
return mapping
return None
def make_action_from_args(self, all_queues, queue_names):
if len(queue_names) == 1 and queue_names[0] in self.choose_all_commands:
chosen_queues = all_queues
else:
chosen_queues = queue_names
affected_queues = []
for queue in chosen_queues:
try:
self._method_to_call(self._vhost, queue)
except HTTPError as e:
if e.status == 404:
logger.error("Queue %r does not exist.", queue)
else:
logger.warning("%s: %r.", self.queue_not_affected_msg, queue)
else:
affected_queues.append(queue)
else:
logger.info("%s: %s", self.queues_affected_msg, ', '.join(affected_queues))
def make_action(self, chosen_queues):
affected_queues = []
chosen_numbers = []
for queue_number, queue_name in chosen_queues.iteritems():
try:
self._method_to_call(self._vhost, queue_name)
except HTTPError as e:
if e.status == 404:
logger.error("Queue %r does not exist.", queue_name)
chosen_numbers.append(queue_number)
else:
logger.warning("%s: %r.", self.queue_not_affected_msg, queue_name)
else:
affected_queues.append(queue_name)
chosen_numbers.append(queue_number)
if affected_queues:
logger.info("%s: %s.", self.queues_affected_msg, ', '.join(affected_queues))
else:
logger.warning(self.no_queues_affected_msg)
return chosen_numbers
def run(self):
queue_names = self._parsed_args.queue_name
if queue_names:
all_queues = self._yield_queue_list()
self.make_action_from_args(all_queues, queue_names)
else:
while True:
try:
mapping = self._get_queue_mapping()
user_input = self._get_user_input(mapping)
parsed_input = self._parse_input(user_input)
except StopReceivingInput:
print 'bye'
break
if parsed_input:
selected_mapping = self._get_selected_mapping(mapping, parsed_input)
if selected_mapping:
self._chosen_numbers.update(self.make_action(selected_mapping))
| 2.71875 | 3 |
showcase/migrations/0001_initial.py | aurthurm/my-site | 0 | 12797082 | # Generated by Django 2.1.1 on 2018-09-13 18:15
from django.db import migrations, models
import django.utils.timezone
import showcase.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Portfolio',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='title')),
('slug', models.SlugField(help_text="Used to build the category's URL.", max_length=255, unique=True, verbose_name='slug')),
('content', models.TextField(blank=True, verbose_name='content')),
('image', models.ImageField(blank=True, help_text='Used for illustration.', upload_to=showcase.models.image_upload_to_dispatcher, verbose_name='image')),
('image_caption', models.TextField(blank=True, help_text="Image's caption.", verbose_name='caption')),
('creation_date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='creation date')),
],
),
]
| 1.921875 | 2 |
jump.py | shelper/leetcode | 0 | 12797083 | def jump(steps, method=1):
if method == 1:
if len(steps) <= 1:
return True
for i in range(len(steps)):
if i == 0:
cur_cover = steps[0]
else:
cur_cover = max(cur_cover, i + steps[i])
if cur_cover <= i:
return False
if cur_cover >= len(steps) - 1:
return True
return False
elif method == 2:
cover = 0
i = 0
while i < cover:
cover = max(cover, i + steps[i])
i += 1
if cover >= len(steps) - 1:
return True
return False
elif method == 3:
step_num = 0
furthest = steps[0]
cur_cover = steps[0]
if cur_cover > len(steps) - 1:
return step_num
for i in range(len(steps)):
if furthest < i: # if the furthest i can go < i, means i can never reach i
return None
furthest = max(
furthest, i + steps[i]
) # the furthest i can go with current i
if (
i == cur_cover
): # if i reaches the previous limit, check to see if i need to update the limit
if i < len(steps) - 1:
step_num += 1 # each switch of limit means one step further.
cur_cover = (
furthest
) # the new limte is the furtheest curret i can go,
if cur_cover > len(steps) - 1:
return step_num
else:
return step_num
"""
there are three values in this problem
1. current furthest distance
2. current index
3. current range that index can iterate to
"""
steps = [2, 2, 1, 0, 4]
print(jump(steps, 3))
| 3.984375 | 4 |
w02/e25.py | Luccifer/PythonCoruseraHSE | 1 | 12797084 | # Список квадратов
def list_of_squares(num):
i = 1
squares = []
while i**2 <= num:
squares.append(i**2)
i += 1
return squares
if __name__ == '__main__':
num = int(input())
print(*list_of_squares(num))
| 4.0625 | 4 |
pogoiv/poke_data_error.py | tmwilder/pogoiv | 17 | 12797085 | class PokeDataError(ValueError):
pass | 1.09375 | 1 |
BlogPosts/Average_precision/average_precision_post_code.py | markgraves/roamresearch | 190 | 12797086 | <gh_stars>100-1000
from copy import copy
from collections import OrderedDict
import numpy as np
import pandas as pd
from sklearn.metrics import average_precision_score, auc, roc_auc_score
from sklearn.metrics import precision_recall_curve
from sklearn.linear_model import LogisticRegressionCV
from sklearn.cross_validation import StratifiedShuffleSplit
import matplotlib.pyplot as plt
import matplotlib
from IPython.display import display, HTML
matplotlib.style.use('../../src/roam.mplstyle')
def generate_data_and_constant_predictions(n, frac_positive):
"""
Generates data in a fixed positive:negative ratio, and returns the
data and scores from a dummy model that predicts 0.5 for all examples.
Parameters
----------
n : int
Number of examples
frac_positive : float
Fraction of the examples that are positive
Returns
-------
observations : list
Consisting of (frac_positive * n) 1s, and (n - (frac_positive * n)) 0s
constant_predictions : list
Same length as observations
"""
n_positive = int(frac_positive * n)
n_negative = n - n_positive
observations = [1 for _ in range(n_positive)] + \
[0 for _ in range(n_negative)]
constant_predictions = [0.5 for _ in range(n_positive + n_negative)]
return observations, constant_predictions
def plot_recall_precision_from_predictions(true, scores, **kwargs):
"""
Computes precision and recall from some observations and scores assigned
to them, and plots a precision-recall curve.
Parameters
----------
true : list
Must be binary (i.e. 1s and 0s).
scores : list
Consisting of floats.
kwargs : optional
See plot_axes.
"""
p, r, thresholds = precision_recall_curve(true, scores)
plot_recall_precision(p, r, **kwargs)
def plot_recall_precision(p, r, **kwargs):
"""
Plots a precision-recall graph from a series of operating points.
Parameters
----------
p : list
Precision.
r : recall
Recall.
kwargs : optional
See plot_axes.
Returns
-------
"""
fig, ax = plt.subplots(1, 1, figsize=(7, 4))
plot_axes(ax, p, r, legend_text='IAP', **kwargs)
plt.show()
def plot_axes(
ax, y, x,
interpolation=None,
marker_size=30,
title=None,
legend_text='Area'):
"""
Plots a graph on axes provided.
Parameters
----------
ax : matplotlib axes
y : list
x : list
interpolation : None (default) or string ['linear', 'step']
marker_size : float (default: 30)
title : None or string
legend_text : string (default: 'Area')
Text to include on the legend before showing the area. Only used
if interpolation is not None.
"""
ax.scatter(x, y, marker='o', linewidths=0, s=marker_size, clip_on=False)
# Show first and last points more visably
ax.scatter([x[i] for i in [0, -1]], [y[i] for i in [0, -1]],
marker='x', linewidths=2, s=100, clip_on=False)
ax.set_xlim((-0.05, 1.05))
ax.set_ylim((-0.08, 1.08))
ax.set_xlabel('Recall')
ax.set_ylabel('Precision')
if title is not None:
ax.set_title(title, fontsize=20)
if interpolation is not None:
if interpolation == 'linear':
ax.plot(x, y)
area = auc(x, y)
ax.fill_between(x, 0, y, alpha=0.2,
label='{} = {:5.4f}'.format(legend_text, area))
leg = ax.legend()
leg.get_frame().set_linewidth(0.0)
elif interpolation == 'step':
p_long = [v for v in y for _ in (0, 1)][:-1]
r_long = [v for v in x for _ in (0, 1)][1:]
ax.plot(r_long, p_long)
area = auc_using_step(x, y)
ax.fill_between(r_long, 0, p_long, alpha=0.2,
label='{} = {:5.4f}'.format(legend_text, area))
leg = ax.legend()
leg.get_frame().set_linewidth(0.0)
else:
print("Interpolation value of '{}' not recognised. "
"Choose from 'linear', 'quadrature'.".format(interpolation))
def compare_recall_precisions_from_predictions(true, score_dict, **kwargs):
"""
Show two graphs side-by-side for two different sets of scores, against the
same true observations.
Parameters
----------
true : list
score_dict : dict
Consisting of `{name: scores}` where `name` is a string and
`scores` is a list of floats.
kwargs : optional
See plot_axes.
"""
pr = OrderedDict()
for name, score in score_dict.items():
p, r, threshold = precision_recall_curve(true, score)
pr[name] = [p, r]
compare_recall_precision_graph(pr, **kwargs)
def compare_recall_precision_graph(pr_dict, title=None, **kwargs):
"""
Parameters
----------
pr_dict : dict
Consisting of `{name: pr}` where `name` is a string and
`pr` is a tuple of precision and recall values.
title : string
kwargs : optional
See plot_axes.
"""
fig, ax = plt.subplots(1, 2, figsize=(15, 4))
for side, (name, [p, r]) in enumerate(pr_dict.items()):
plot_axes(ax[side], p, r, title=name, legend_text='IAP', **kwargs)
if title is not None:
fig.suptitle(title, fontsize=20, y=1.05)
plt.show()
def operating_points(ranking):
"""
Computes lists of precision and recall from an ordered list of observations.
Parameters
----------
ranking : list
Entries should be binary (0 or 1) and in descending order
(i.e. top-ranked is first).
Returns
-------
precision : list
recall : list
"""
precision, recall = list(), list()
for pos in range(len(ranking)):
p, r = precision_recall_from_ranking(ranking, pos)
precision.append(p)
recall.append(r)
return precision, recall
def precision_recall_from_ranking(ranking, position):
"""
Computes the precision and recall of a particular assignment of labelled
observations to a positive and negative class, where the positive class
comes first in the list, and the negative class comes second, and the
split point is specified.
Parameters
----------
ranking : list
Ordered list of binary observations.
position : int
Position to split the list into positive and negative.
Returns
-------
precision : float
recall : float
"""
if position == 0:
precision = 1.0
recall = 0.0
else:
ranking = np.array(ranking)
precision = (ranking[:position] == 1).sum() / position
recall = (ranking[:position] == 1).sum() / (ranking == 1).sum()
return precision, recall
def auc_using_step(recall, precision):
return sum([(recall[i] - recall[i+1]) * precision[i]
for i in range(len(recall) - 1)])
def roam_average_precision(y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
return auc_using_step(recall, precision)
def generate_positive_semi_definite_matrix(n_dim):
"""
Creates a positive semi-definite matrix.
Parameters
----------
n_dim : int
Returns
-------
np.array : (n_dim, n_dim)
"""
cov = np.random.randn(n_dim, n_dim)
return np.dot(cov, cov.T)
def subsample(X, y, frac_positive):
"""
Subsamples a feature matrix and target vector to ensure that a specified
fraction of the target values are positive.
Parameters
----------
X : np.array (n, m)
y : np.array (n, )
frac_positive : float
Returns
-------
X : np.array (n', m)
Some subset of the rows of the input X (i.e. n' <= n)
y : np.array (n', )
Some subset of the rows of the input y (i.e. n' <= n)
"""
positive_idx = np.arange(len(y))[y == 1]
negative_idx = np.arange(len(y))[y == 0]
num_positive = int(frac_positive * len(negative_idx))
positive_idx = np.random.choice(positive_idx, size=num_positive, replace=False)
indices_to_use = np.concatenate([positive_idx, negative_idx])
np.random.shuffle(indices_to_use)
return X[indices_to_use], y[indices_to_use]
def generate_continuous_data_and_targets(
n_dim,
n_samples,
mixing_factor=0.025,
frac_positive=0.1):
"""
Generates a multivariate Gaussian-distributed dataset and a response
variable that is conditioned on a weighted sum of the data.
Parameters
----------
n_dim : int
n_samples : int
mixing_factor : float
'Squashes' the weighted sum into the linear regime of a sigmoid.
Smaller numbers squash closer to 0.5.
Returns
-------
X : np.array
(n_samples, n_dim)
y : np.array
(n_samples, )
"""
cov = generate_positive_semi_definite_matrix(n_dim)
X = np.random.multivariate_normal(
mean=np.zeros(n_dim),
cov=cov,
size=n_samples)
weights = np.random.randn(n_dim)
y_probs = sigmoid(mixing_factor * np.dot(X, weights))
y = np.random.binomial(1, p=y_probs)
X, y = subsample(X, y, frac_positive)
return X, y
def sigmoid(x):
"""
Computes sigmoid(x) for some activation x.
Parameters
----------
x : float
Returns
-------
sigmoid(x) : float
"""
return 1 / (1 + np.exp(-x))
def train_model_and_evaluate(n_dim=50, n_samples=10000, frac_positive=0.05,
mixing_factor=0.025):
"""
Generates some data and trains a logistic regression model.
Parameters
----------
n_dim : int
Number of dimensions for the training data.
n_samples : int
Number of observations.
frac_positive : float
mixing_factor : float
Numbers nearer to 0 make the task more challenging.
Returns
-------
y : np.array (n_test, )
True observed values in the test set.
y_scores : np.array (n_test, )
Model predictions of the test samples.
roc_auc : float
ROC AUC score on the test data
"""
X, y = generate_continuous_data_and_targets(
n_dim=n_dim, n_samples=n_samples, frac_positive=frac_positive,
mixing_factor=mixing_factor)
splits = StratifiedShuffleSplit(y, test_size=0.3, random_state=42)
train_idx, test_idx = list(splits)[0]
lr = LogisticRegressionCV()
lr.fit(X[train_idx], y[train_idx])
y_scores = lr.predict_proba(X[test_idx])[:, 1]
roc_auc = roc_auc_score(y[test_idx], y_scores)
return y[test_idx], y_scores, roc_auc
| 3.015625 | 3 |
Queens.py | mtxrii/NQueens | 0 | 12797087 | def placeQueen(B, i, j):
B[i][j] += 1
B[i][0] = j
down = j
# vertical downwards queen line of sight
for k in range(len(B)):
if k != i:
B[k][down] -= 1
# diagonal downwards queen line of sight
for l in range(len(B)):
rightClear = (i + l < len(B)) and (j + l < len(B))
leftClear = (i + l < len(B)) and (j - l > 0)
right = j + l
left = j - l
if rightClear:
B[i + l][right] -= 1
if leftClear:
B[i + l][left] -= 1
def removeQueen(B, i, j):
B[i][j] -= 1
B[i][0] = 0
down = j
# vertical downwards queen line of sight
for k in range(len(B)):
if k != i:
B[k][down] += 1
# diagonal downwards queen line of sight
for l in range(len(B)):
rightClear = (i + l < len(B)) and (j + l < len(B))
leftClear = (i + l < len(B)) and (j - l > 0)
right = j + l
left = j - l
if rightClear:
B[i + l][right] += 1
if leftClear:
B[i + l][left] += 1
def findSolutions(B, i, mode):
n = len(B)-1
v = False
if mode == "verbose":
v = True
printer = "("
if i > n:
if v:
for e in range(len(B)):
printer += str(B[e][0])
if e != n:
printer += ", " #if not at end, keep adding separators to text
printer += ")"
print(printer)
B[0][0] += 1
return 1
else:
for j in range(len(B)):
if B[i][j] == 0:
placeQueen(B, i, j)
findSolutions(B, i+1, mode)
removeQueen(B, i, j)
return B[0][0]
def printBoard(B):
sol = None
if B[0][1] == 1:
sol = findSolutions(B, 1, "verbose")
else:
sol = findSolutions(B, 1, "")
print(str((len(B) - 1)) + "-Queens has " + str(sol) + " solutions")
def main(args):
usage = "Usage: Queens [-v] number \nOption: -v verbose output, print all solutions"
if len(args) < 1:
print(usage)
return
ns = "" # string that holds n
v = None
if len(args) == 1:
v = False
ns = args[0]
else:
if not args[0] == "-v":
print(usage)
return
v = True
ns = args[1]
n = 0
try:
n = int(ns)
except ValueError:
print(usage)
return
# Parsing finished.
# Create board & run other methods.
B = []
for n1 in range(n+1):
B_mini = []
for n2 in range(n+1):
B_mini.append(0)
B.append(B_mini)
# Put a T/F (1/0) in B[0][1] to indicate mode
if v:
B[0][1] = 1
else:
B[0][1] = 0
printBoard(B)
if __name__ == "__main__":
# execute only if run as a script
args = input("Queens ").split(" ")
main(args) | 3.46875 | 3 |
src/extendable_pydantic/__init__.py | lmignon/pydantic-ext | 0 | 12797088 | <gh_stars>0
"""A lib to define pydantic models extendable at runtime."""
# shortcut to main used class
from .main import ExtendableModelMeta
from .version import __version__
| 1.179688 | 1 |
ScratchServer.py | manab/MugbotActionDesigner | 1 | 12797089 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
import socketserver
except:
import SocketServer as socketserver
import signal
import socket
import serial
import os
import json
import sys
HOST, PORT = '0.0.0.0', 51234
serial = serial.Serial('/dev/ttyACM0', 57600)
class ScratchHandler(socketserver.BaseRequestHandler):
def setup(self):
os.system('/home/pi/mugbot-talk-1.1.sh ' + 'スクラッチとの接続を開始しました &')
# for speak in English
# os.system('espeak -ven+f3 -k5 -s150 "Scratch connection established" &')
def handle(self):
while True:
self.data = self.request.recv(1024).strip()
if len(self.data) == 0:
break
json_obj = json.loads(self.data)
action = json_obj['action']
arg = json_obj['arg']
if action == 'face_y':
arg = min(max(int(arg) + 95, 80), 110)
serial.write((str(arg) + 'y').encode())
elif action == 'face_x':
arg = min(max(int(arg) + 90, 5), 175)
serial.write((str(arg) + 'x').encode())
elif action == 'eye':
arg = min(max(int(arg), 0), 255)
serial.write((str(arg) + 'z').encode())
elif action == 'speech':
serial.write('t'.encode())
if sys.version_info.major == 2:
arg = arg.encode('utf-8')
os.system('/home/pi/mugbot-talk-1.1.sh ' + arg + ' &')
# for speak in English
# os.system('espeak -ven+f3 -k5 -s150 ' + '"' + arg +'" &')
serial.write('n'.encode())
else:
print('Unknown Command')
class ScratchServer(socketserver.ThreadingTCPServer):
def server_bind(self):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
self.socket.bind(self.server_address)
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal.SIG_DFL)
server = ScratchServer((HOST, PORT), ScratchHandler)
server.serve_forever()
| 2.578125 | 3 |
bytesink.py | hdb3/BGPstream | 0 | 12797090 | <gh_stars>0
# bytesink.py
from logger import trace, info, show, warn, error
from basemessage import WireMessage
from nullsink import NullSink
class Sink:
def __init__(self,source):
self.input_type = WireMessage
self.iter = source
def run(self):
trace("")
n = 0
s = 0
_max = 0
_min = None
for msg in self.iter:
if n == 0:
info("message type = %s" % str(type(msg)))
n += 1
s += len(msg)
if len(msg) > _max:
_max = len(msg)
if not _min or _min > len(msg):
_min = len(msg)
show("%d messages read" % n)
show("%d bytes read" % s)
show("%d = average message size" % int(s/n))
show("%d = minimum message size" % _min)
show("%d = maximum message size" % _max)
| 2.4375 | 2 |
mycroft/tests/logic/test_run_actions.py | Yelp/mycroft | 50 | 12797091 | # -*- coding: utf-8 -*-
import pytest
from boto.dynamodb2.fields import HashKey
from boto.dynamodb2.fields import RangeKey
from boto.dynamodb2.fields import GlobalAllIndex
from boto.dynamodb2.table import Table
from mycroft.models.aws_connections import get_avro_schema
from mycroft.models.etl_records import ETLRecords
from mycroft.logic.run_actions import _parse_runs
from mycroft.logic.run_actions import list_runs_by_job_id
from tests.models.test_abstract_records import dynamodb_connection # noqa
from tests.models.test_abstract_records import NAME_TO_SCHEMA
from tests.models.test_etl_record import FakeETLRecord
from tests.data.etl_record import SAMPLE_JOB_ID
from tests.data.etl_record import SAMPLE_RECORD_JOBS
BASE_DICT = {
'hash_key': None,
'data_date': None,
'etl_status': None,
'et_runtime': None,
'et_starttime': None,
'load_runtime': None,
'load_starttime': None,
'redshift_id': None,
's3_path': None,
'updated_at': None,
'run_by': None,
'job_id': None,
'etl_error': None,
'additional_arguments': None,
}
class TestRunActions(object):
@pytest.yield_fixture(scope='module') # noqa
def etl_records(self, dynamodb_connection):
avro_schema = get_avro_schema('mycroft/avro/etl_record.json')
index_job_id = GlobalAllIndex(
ETLRecords.INDEX_JOB_ID_AND_DATA_DATE,
parts=[HashKey('job_id'), RangeKey('data_date')])
table = Table.create(
'ETLRecords',
schema=NAME_TO_SCHEMA['etl_records'],
connection=dynamodb_connection,
global_indexes=[index_job_id])
etl_records = ETLRecords(persistence_object=table, avro_schema_object=avro_schema)
for job in SAMPLE_RECORD_JOBS:
assert etl_records.put(**job)
yield etl_records
assert table.delete()
def test__parse_runs_empty_run(self):
empty_runs = [FakeETLRecord(BASE_DICT)]
result = _parse_runs(empty_runs)
assert result['runs'][0] == BASE_DICT
def test_list_runs_by_job_id(self, etl_records):
return_value = list_runs_by_job_id(SAMPLE_JOB_ID, etl_records)
expected_count = len([job for job in SAMPLE_RECORD_JOBS
if job['job_id'] == SAMPLE_JOB_ID])
assert len(return_value['runs']) == expected_count
@pytest.mark.parametrize("job_id", ['y', '..', '!', '', '_'])
def test_list_runs_by_job_id_bad_job_id(self, job_id):
with pytest.raises(ValueError) as e:
list_runs_by_job_id(job_id, None)
assert e.exconly().startswith("ValueError: invalid job_id")
| 1.8125 | 2 |
accounts/forms.py | shivBoy77/Syk_toW | 0 | 12797092 | from django import forms
from django.contrib.auth.models import Group
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from django.core.exceptions import ValidationError
from django.contrib.auth import authenticate
from .models import User, Profile
from django.contrib import messages
from phonenumber_field.formfields import PhoneNumberField
from phonenumber_field.widgets import PhoneNumberPrefixWidget
from django.core.validators import RegexValidator
class UserAdminCreationForm(forms.ModelForm):
"""A form for creating new users. Includes all the required
fields, plus a repeated password."""
password1 = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(
label='Password confirmation', widget=forms.PasswordInput)
class Meta:
model = User
fields = ('email', 'username')
def clean_username(self):
username = self.cleaned_data.get('username').lower()
try:
User.objects.get(username__exact=username)
except User.DoesNotExist:
return username
raise forms.ValidationError("This username is already taken.")
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise ValidationError("Passwords don't match")
return password2
def clean_email(self):
email = self.cleaned_data['email'].lower()
try:
account = User.objects.get(email=email)
except User.DoesNotExist:
return email
raise forms.ValidationError(f"Email {email} is already in use.")
def save(self, commit=True):
# Save the provided password in hashed format
user = super().save(commit=False)
user.set_password(self.cleaned_data["password2"])
if commit:
user.save()
return user
class UserAdminChangeForm(forms.ModelForm):
"""A form for updating users. Includes all the fields on
the user, but replaces the password field with admin's
password hash display field.
"""
password = ReadOnlyPasswordHashField()
class Meta:
model = User
fields = ('__all__')
def clean_password(self):
# Regardless of what the user provides, return the initial value.
# This is done here, rather than on the field, because the
# field does not have access to the initial value
return self.initial["password"]
class UserProfileForm(forms.ModelForm):
GENDER = (
('Male', 'Male'),
('Female', 'Female'),
('Other', 'Other'),
)
gender = forms.ChoiceField(
label='Gender', choices=GENDER, widget=forms.RadioSelect, required=False)
date_of_birth = forms.DateField(widget=forms.DateInput(
attrs={'type': 'date'}), required=False)
phonenumber = PhoneNumberField(
widget = PhoneNumberPrefixWidget(initial='IN')
)
class Meta:
model = Profile
fields = ['first_name', 'last_name', 'phonenumber', 'country', 'avatar', 'address', 'gender',
'date_of_birth', 'pincode', 'language', 'location', 'website', 'bio']
widgets = {
'first_name': forms.TextInput(attrs={'placeholder': 'your first name'}),
'last_name': forms.TextInput(attrs={'placeholder': 'your last name'}),
'email': forms.EmailInput(attrs={'placeholder': 'you <EMAIL>'}),
'country': forms.TextInput(attrs={'placeholder': 'country you where you live'}),
'address': forms.TextInput(attrs={'placeholder': 'your address where you live'}),
'pincode': forms.TextInput(attrs={'placeholder': 'pincode'}),
'language': forms.TextInput(attrs={'placeholder': 'language'}),
'location': forms.TextInput(attrs={'placeholder': 'location'}),
'bio': forms.TextInput(attrs={'placeholder': 'about you'}),
'website': forms.TextInput(attrs={'placeholder': 'your website url e.g. https://your_website.com'}),
}
| 2.53125 | 3 |
smart_heating/models.py | spiegelm/smart-heating-server | 0 | 12797093 | """
Copyright 2016 <NAME>, <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from abc import ABCMeta, abstractmethod
from django.core import validators
from django.db import models
alpha_numeric_validator = validators.RegexValidator(r'^[0-9a-zA-Z]+$', 'Only alphanumeric characters are allowed.')
rfid_validator = alpha_numeric_validator
class Model(models.Model):
"""
Base class for all models.
"""
__metaclass__ = ABCMeta
class Meta:
abstract = True
def __repr__(self):
fields_string = ', '.join(['%s:"%s"' % (field.name, getattr(self, field.name)) for field in self._meta.fields])
return '<%s(%s)>' % (self.__class__._meta.object_name, fields_string)
def __str__(self):
return str(self.pk)
@abstractmethod
def get_recursive_pks(self):
"""
Returns a list of primary keys of all recursive parents.
Used to determine the URL of an object.
"""
pass
class Residence(Model):
"""
Represents a residence.
"""
rfid = models.CharField(primary_key=True, max_length=100, validators=[rfid_validator])
class Meta:
ordering = ('rfid',)
def get_recursive_pks(self):
pks = [self.pk]
return pks
class User(Model):
"""
Represents a user.
"""
imei = models.CharField(primary_key=True, max_length=100, validators=[alpha_numeric_validator])
name = models.CharField(max_length=100)
residence = models.ForeignKey('Residence', related_name='users')
class Meta:
ordering = ('imei',)
def get_recursive_pks(self):
pks = self.residence.get_recursive_pks()
pks.append(self.pk)
return pks
class Room(Model):
"""
Represents a room.
"""
# id is automatically generated if no other primary_key is defined
name = models.CharField(max_length=100)
residence = models.ForeignKey('Residence', related_name='rooms')
class Meta:
ordering = ('name',)
def get_recursive_pks(self):
pks = self.residence.get_recursive_pks()
pks.append(self.pk)
return pks
class Thermostat(Model):
"""
Represents a thermostat.
"""
rfid = models.CharField(primary_key=True, max_length=100, validators=[rfid_validator])
room = models.ForeignKey('Room', related_name='thermostats')
name = models.CharField(max_length=100, blank=False)
class Meta:
ordering = ('rfid',)
def get_recursive_pks(self):
pks = self.room.get_recursive_pks()
pks.append(self.pk)
return pks
class Temperature(Model):
"""
Represents a temperature.
"""
datetime = models.DateTimeField(primary_key=True)
value = models.FloatField()
thermostat = models.ForeignKey('Thermostat', related_name='temperatures')
class Meta:
ordering = ('datetime',)
def get_recursive_pks(self):
pks = self.thermostat.get_recursive_pks()
assert (self.pk == self.datetime)
pks.append(self.datetime.isoformat())
return pks
class ThermostatMetaEntry(Model):
"""
Represents a thermistat meta entry containing signal strength, uptime and battery level.
"""
id = models.AutoField(primary_key=True)
datetime = models.DateTimeField()
rssi = models.IntegerField(null=True)
uptime = models.IntegerField(null=True)
battery = models.IntegerField(null=True)
thermostat = models.ForeignKey('Thermostat', related_name='meta_entries')
class Meta:
unique_together = ('thermostat', 'datetime')
ordering = ('datetime',)
def get_recursive_pks(self):
pks = self.thermostat.get_recursive_pks()
pks.append(self.pk)
return pks
class Device(Model):
"""
Base class for a physical device with an RFID number and MAC address.
"""
__metaclass__ = ABCMeta
rfid = models.CharField(primary_key=True, max_length=100, validators=[rfid_validator])
mac = models.CharField(max_length=17, unique=True)
class Meta:
abstract = True
def get_recursive_pks(self):
return [self.pk]
class RaspberryDevice(Device):
"""
Represents a physical Raspberry Pi device.
"""
@property
def residence(self):
residences = Residence.objects.filter(rfid=self.rfid)
assert (0 <= len(residences) <= 1)
if len(residences) > 0:
return residences[0]
else:
return None
@property
def thermostat_devices(self):
"""
:return: Thermostat devices associated to the Raspberry Pi.
"""
residence = self.residence
if residence is None:
return None
rooms = Room.objects.filter(residence=residence)
room_pks = [room.pk for room in rooms]
thermostats = Thermostat.objects.filter(room__in=room_pks)
thermostat_rfids = [thermostat.rfid for thermostat in thermostats]
thermostat_devices = ThermostatDevice.objects.filter(rfid__in=thermostat_rfids)
return thermostat_devices
class ThermostatDevice(Device):
"""
Represents a physical thermostat device.
"""
@property
def thermostat(self):
thermostats = Thermostat.objects.filter(rfid=self.rfid)
assert (0 <= len(thermostats) <= 1)
if len(thermostats) > 0:
return thermostats[0]
else:
return None
class TimetableEntry(Model):
"""
Base class for a weekly timetable entry.
"""
__metaclass__ = ABCMeta
MONDAY = 0
TUESDAY = 1
WEDNESDAY = 2
THURSDAY = 3
FRIDAY = 4
SATURDAY = 5
SUNDAY = 6
DAY_IN_WEEK_CHOICES = [
(MONDAY, 'Monday'),
(TUESDAY, 'Tuesday'),
(WEDNESDAY, 'Wednesday'),
(THURSDAY, 'Thursday'),
(FRIDAY, 'Friday'),
(SATURDAY, 'Saturday'),
(SUNDAY, 'Sunday'),
]
day = models.CharField(max_length=3, choices=DAY_IN_WEEK_CHOICES)
time = models.TimeField()
class Meta:
abstract = True
class HeatingTableEntry(TimetableEntry):
"""
Represents an entry of a heating schedule.
"""
class Meta:
unique_together = ('day', 'time', 'thermostat')
ordering = ('day', 'time')
temperature = models.FloatField(validators=[validators.MinValueValidator(5), validators.MaxValueValidator(30)])
thermostat = models.ForeignKey(Thermostat, related_name='heating_table_entries')
def get_recursive_pks(self):
pks = self.thermostat.get_recursive_pks()
pks.append(self.pk)
return pks
class OccupancyPredictionEntry(TimetableEntry):
"""
Represents an user occupancy prediction entry.
This is a stub and is intended to be used in future work.
"""
class Meta:
unique_together = ('day', 'time', 'user')
ordering = ('day', 'time')
user = models.ForeignKey(User)
def get_recursive_pks(self):
pks = self.user.get_recursive_pks()
pks.append(self.pk)
return pks
| 2.296875 | 2 |
frozenlake.py | alexandrefch/q-learning-python | 1 | 12797094 | <gh_stars>1-10
import gym
from agent import *
from progressBar import *
from matplotlib.pyplot import *
class Frozenlake():
"""
A class to manage a frozen lake from gym lib and make an agent play.
...
Attributes
----------
setting : Dict()
Dict of all game setting
agent : Agent()
Agent object that will play and learn
env : Env()
Object that store all the gym environment
Methods
-------
reset():
Reset gym environment
play(is_training=True):
Make agent play until end of the game
train():
Make agent play for specific amount of round
test():
Make agent play during 1000 game to make statistics
"""
def __init__(self, game_setting, ai_setting):
"""
Create agent object using ai_setting and set gym environment up
Parameters
----------
game_setting : Dict()
Dictionnary of all game setting
ai_setting : Dict()
Dictionnary of all agent setting
"""
game_setting["lake_type"] = "FrozenLake-v1" if game_setting["lake_size"] == "4x4" else "FrozenLake8x8-v1"
self.cell_count = 16 if game_setting["lake_size"] == "4x4" else 64
ai_setting["state_count"] = self.cell_count
if(ai_setting["q_learning_type"] == "simple"):
self.agent = AgentSimpleQLearn(ai_setting)
else:
self.agent = AgentDoubleQLearn(ai_setting)
self.setting = game_setting
self.setting["lake_type"] = "FrozenLake-v1" if game_setting["lake_size"] == "4x4" else "FrozenLake8x8-v1"
self.env = gym.make(self.setting["lake_type"])
self.reset()
def reset(self):
"""
Reset gym environment
"""
self.env.reset()
def play(self, is_training=True):
"""
Make agent play until end of the game
Parameters
----------
is_training : boolean
true if the agent will learn and update his q-table
"""
while True:
action = self.agent.next_step()
new_state, _, is_done, _ = self.env.step(action)
reward = 0
is_win = new_state == self.cell_count - 1
if is_done:
reward = self.setting["reward"] if is_win else self.setting["punish"]
self.agent.update_table(new_state,reward,is_training)
if is_done:
return new_state == self.cell_count - 1
def train(self):
"""
Make agent play for specific amount of round
"""
wins_rate = [0]
rounds = [0]
win_amount = 0
for count in progressbar(range(self.setting["round_to_train"]),"Entrainement",33):
self.reset()
is_win = self.play()
if is_win:
win_amount += 1
if (count+1) % int(self.setting["round_to_train"] / 100) == 0:
wins_rate.append(win_amount*100/count)
rounds.append(count)
figure()
plot(rounds,wins_rate)
title("Win rate over training time")
ylim(0,100)
xlim(0,self.setting["round_to_train"])
draw()
def test(self):
"""
Make agent play during 1000 game to make statistics
"""
win_amount = 0
for _ in progressbar(range(1000),"Test",33):
self.reset()
is_win = self.play(False)
if is_win:
win_amount += 1
return win_amount * 100 / 1000
| 3.34375 | 3 |
src/main.py | axelbr/jax-estimators | 0 | 12797095 | """
Adapted Code from https://github.com/AtsushiSakai/PythonRobotics
"""
from functools import partial
from estimation import ExtendedKalmanFilter, KalmanFilter
import jax.numpy as jnp
import numpy as np
from jax import jacfwd, jit
import matplotlib.pyplot as plt
from src.environments import DiffDriveRobot
from util import plot, History
def _motion_model(x, u, w, dt=0.01):
return jnp.array([
x[0] + x[3] * jnp.cos(x[2]) * dt,
x[1] + x[3] * jnp.sin(x[2]) * dt,
x[2] + u[1] * dt,
u[0]
]) + w
def _observation_model(x, v):
H = jnp.array([[1, 0, 0, 0],
[0, 1, 0, 0]])
return H @ x + v
def controller(x):
v = 1.0 # [m/s]
yawrate = 0.1 # [rad/s]
u = jnp.array([v, yawrate])
return u
def main():
env = DiffDriveRobot()
z = env.reset()
x_hat = jnp.zeros(4) # [x, y, yaw, velocity]
x_cov = jnp.eye(4)
Q = jnp.diag(jnp.array([
0.1, # variance of location on x-axis
0.1, # variance of location on y-axis
jnp.deg2rad(0.5), # variance of yaw angle
0.1 # variance of velocity
])) ** 2 # predict state covariance
R = jnp.diag(jnp.array([2, 2])) ** 2 # Observation x,y position covariance
filter = ExtendedKalmanFilter(
process_model=_motion_model,
observation_model=_observation_model,
process_noise_covariance=Q,
observation_noise_covariance=R
)
filter = jit(filter)
history = History()
history.update(x=x_hat, z=z, x_hat=x_hat, covariance=x_cov)
for t in range(5000):
print(t)
u = controller(x_hat) # [velocity, yaw_rate]
obs, _, _, info = env.step(u)
x_hat, x_cov = filter(x=x_hat, P=x_cov, u=u, z=obs)
history.update(x=info['x'], z=obs, x_hat=x_hat, covariance=x_cov)
if t % 100 == 0:
plot(data=history)
if __name__ == '__main__':
main() | 2.359375 | 2 |
setup.py | DNKonanov/uni_cli | 0 | 12797096 | <reponame>DNKonanov/uni_cli
import setuptools
setuptools.setup(
name="uniqpy",
version="0.1.3",
author="<NAME>",
author_email="<EMAIL>",
description="UNIQUAC-based tool for multicomponent VLEs",
long_description="uniqpy",
long_description_content_type="",
url="https://github.com/DNKonanov/uni_cli",
project_urls={
"Bug Tracker": "https://github.com/DNKonanov/uni_cli",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.7",
include_package_data=True,
packages=['uniqpy'],
install_requires=[
'numpy',
'scipy'
],
entry_points={
'console_scripts': [
'uniqpy=uniqpy.uni_cli:main'
]
}
)
| 1.382813 | 1 |
examples/plot_logreg_timings.py | Badr-MOUFAD/celer | 0 | 12797097 | """
==================================================================
Compare LogisticRegression solver with sklearn's liblinear backend
==================================================================
"""
import time
import warnings
import numpy as np
from numpy.linalg import norm
import matplotlib.pyplot as plt
from sklearn import linear_model
from libsvmdata import fetch_libsvm
from celer import LogisticRegression
warnings.filterwarnings("ignore", message="Objective did not converge")
warnings.filterwarnings("ignore", message="Liblinear failed to converge")
X, y = fetch_libsvm("news20.binary")
C_min = 2 / norm(X.T @ y, ord=np.inf)
C = 20 * C_min
def pobj_logreg(w):
return np.sum(np.log(1 + np.exp(-y * (X @ w)))) + 1. / C * norm(w, ord=1)
pobj_celer = []
t_celer = []
for n_iter in range(10):
t0 = time.time()
clf = LogisticRegression(
C=C, solver="celer-pn", max_iter=n_iter, tol=0).fit(X, y)
t_celer.append(time.time() - t0)
w_celer = clf.coef_.ravel()
pobj_celer.append(pobj_logreg(w_celer))
pobj_celer = np.array(pobj_celer)
pobj_libl = []
t_libl = []
for n_iter in np.arange(0, 50, 10):
t0 = time.time()
clf = linear_model.LogisticRegression(
C=C, solver="liblinear", penalty='l1', fit_intercept=False,
max_iter=n_iter, random_state=0, tol=1e-10).fit(X, y)
t_libl.append(time.time() - t0)
w_libl = clf.coef_.ravel()
pobj_libl.append(pobj_logreg(w_libl))
pobj_libl = np.array(pobj_libl)
p_star = min(pobj_celer.min(), pobj_libl.min())
plt.close("all")
fig = plt.figure(figsize=(4, 2), constrained_layout=True)
plt.semilogy(t_celer, pobj_celer - p_star, label="Celer-PN")
plt.semilogy(t_libl, pobj_libl - p_star, label="liblinear")
plt.legend()
plt.xlabel("Time (s)")
plt.ylabel("objective suboptimality")
plt.show(block=False)
| 2.53125 | 3 |
test/test_definition.py | illingwo/sam-web-client | 0 | 12797098 | #! /usr/bin/env python
import testbase
import unittest
import samweb_client
import samweb_cli
import time,os
defname = 'test-project'
class TestDefinition(testbase.SamdevTest):
def test_descDefinition_DefNotFound(self):
fake_def_name = 'doesnotexist_%d' % time.time()
self.assertRaises(samweb_client.exceptions.DefinitionNotFound, self.samweb.descDefinition, fake_def_name)
self.assertRaises(samweb_client.exceptions.DefinitionNotFound, self.samweb.descDefinitionDict, fake_def_name)
def test_descDefinition(self):
output = self.samweb.descDefinition(defname)
assert defname in output
d = self.samweb.descDefinitionDict(defname)
assert d['defname'] == defname
def test_snapshot(self):
output = self.samweb.takeSnapshot(defname)
self.assertEquals(int(output),1)
def test_create_rename_delete_definition(self):
defname = 'samweb_client_test_def_%s_%d' % (os.getpid(), int(time.time()))
self.samweb.createDefinition(defname, "file_name dummy", "illingwo", "samdev")
d = self.samweb.descDefinition(defname)
assert defname in d
d = self.samweb.descDefinitionDict(defname)
assert defname == d["defname"]
defname2 = defname + '_2'
self.samweb.modifyDefinition(defname,defname=defname2)
d = self.samweb.descDefinitionDict(defname2)
assert defname2 == d["defname"]
self.samweb.deleteDefinition(defname2)
class TestDefinitionCommands(testbase.SAMWebCmdTest):
def test_takeSnapshot(self):
cmdline = '-e samdev take-snapshot %s' % defname
self.check_cmd_return(cmdline.split())
assert "1\n" == self.stdout
if __name__ == '__main__':
unittest.main()
| 2.3125 | 2 |
utils_nlp/models/gensen/utils.py | gohanlon/nlp | 4,407 | 12797099 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Minibatching utilities."""
import itertools
import operator
import os
import pickle
import numpy as np
import torch
from sklearn.utils import shuffle
from torch.autograd import Variable
# Change to python3+.
# from itertools import zip
class DataIterator(object):
"""Data Iterator."""
@staticmethod
def _trim_vocab(vocab, vocab_size):
"""Discard start, end, pad and unk tokens if already present.
Args:
vocab(list): Vocabulary.
vocab_size(int): The size of the vocabulary.
Returns:
word2id(list): Word to index list.
id2word(list): Index to word list.
"""
if "<s>" in vocab:
del vocab["<s>"]
if "<pad>" in vocab:
del vocab["<pad>"]
if "</s>" in vocab:
del vocab["</s>"]
if "<unk>" in vocab:
del vocab["<unk>"]
word2id = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
id2word = {0: "<s>", 1: "<pad>", 2: "</s>", 3: "<unk>"}
sorted_word2id = sorted(
vocab.items(), key=operator.itemgetter(1), reverse=True
)
if vocab_size != -1:
sorted_words = [x[0] for x in sorted_word2id[:vocab_size]]
else:
sorted_words = [x[0] for x in sorted_word2id]
for ind, word in enumerate(sorted_words):
word2id[word] = ind + 4
for ind, word in enumerate(sorted_words):
id2word[ind + 4] = word
return word2id, id2word
def construct_vocab(
self, sentences, vocab_size, lowercase=False, charlevel=False
):
"""Create vocabulary.
Args:
sentences(list): The list of sentences.
vocab_size(int): The size of vocabulary.
lowercase(bool): If lowercase the sentences.
charlevel(bool): If need to split the sentence with space.
Returns:
word2id(list): Word to index list.
id2word(list): Index to word list.
"""
vocab = {}
for sentence in sentences:
if isinstance(sentence, str):
if lowercase:
sentence = sentence.lower()
if not charlevel:
sentence = sentence.split()
for word in sentence:
if word not in vocab:
vocab[word] = 1
else:
vocab[word] += 1
word2id, id2word = self._trim_vocab(vocab, vocab_size)
return word2id, id2word
class BufferedDataIterator(DataIterator):
"""Multi Parallel corpus data iterator."""
def __init__(
self,
src,
trg,
src_vocab_size,
trg_vocab_size,
tasknames,
save_dir,
buffer_size=1e6,
lowercase=False,
seed=0,
):
"""Initialize params.
Args:
src(list): source dataset.
trg(list): target dataset.
src_vocab_size(int): The size of source vocab.
trg_vocab_size(int): The size of target vocab.
tasknames(list): The list of task names.
save_dir(str): The saving dir.
buffer_size(float): Buffer size.
lowercase(bool): if lowercase the data.
"""
self.seed = seed
self.fname_src = src
self.fname_trg = trg
self.src_vocab_size = src_vocab_size
self.trg_vocab_size = trg_vocab_size
self.tasknames = tasknames
self.save_dir = save_dir
self.buffer_size = buffer_size
self.lowercase = lowercase
# Open a list of file pointers to all the files.
self.f_src = [
open(fname, "r", encoding="utf-8") for fname in self.fname_src
]
self.f_trg = [
open(fname, "r", encoding="utf-8") for fname in self.fname_trg
]
# Initialize dictionaries that contain sentences & word mapping dicts
self.src = [
{"data": [], "word2id": None, "id2word": None}
for i in range(len(self.fname_src))
]
self.trg = [
{"data": [], "word2id": None, "id2word": None}
for i in range(len(self.fname_trg))
]
self.build_vocab()
"""Reset file pointers to the start after reading the file to
build vocabularies."""
for idx in range(len(self.src)):
self._reset_filepointer(idx)
for idx in range(len(self.src)):
self.fetch_buffer(idx)
def _reset_filepointer(self, idx):
"""Reset file pointer.
Args:
idx(int): Index used to reset file pointer.
"""
self.f_src[idx] = open(self.fname_src[idx], "r", encoding="utf-8")
self.f_trg[idx] = open(self.fname_trg[idx], "r", encoding="utf-8")
def fetch_buffer(self, idx, reset=True):
"""Fetch sentences from the file into the buffer.
Args:
idx(int): Index used to fetch the sentences.
reset(bool): If need to reset the contents of the current buffer.
"""
# Reset the contents of the current buffer.
if reset:
self.src[idx]["data"] = []
self.trg[idx]["data"] = []
# Populate buffer
for src, trg in zip(self.f_src[idx], self.f_trg[idx]):
if len(self.src[idx]["data"]) == self.buffer_size:
break
if self.lowercase:
self.src[idx]["data"].append(src.lower().split())
self.trg[idx]["data"].append(trg.lower().split())
else:
self.src[idx]["data"].append(src.split())
self.trg[idx]["data"].append(trg.split())
# Sort sentences by decreasing length (hacky bucketing)
self.src[idx]["data"], self.trg[idx]["data"] = zip(
*sorted(
zip(self.src[idx]["data"], self.trg[idx]["data"]),
key=lambda x: len(x[0]),
reverse=True,
)
)
"""If buffer isn't full after reading the contents of the file,
cycle around. """
if len(self.src[idx]["data"]) < self.buffer_size:
assert len(self.src[idx]["data"]) == len(self.trg[idx]["data"])
# Cast things to list to avoid issue with calling .append above
self.src[idx]["data"] = list(self.src[idx]["data"])
self.trg[idx]["data"] = list(self.trg[idx]["data"])
self._reset_filepointer(idx)
self.fetch_buffer(idx, reset=False)
def build_vocab(self):
"""Build a memory efficient vocab."""
# Construct common source vocab.
# Check if save directory exists.
if not os.path.exists(self.save_dir):
raise ValueError("Could not find save dir : %s" % self.save_dir)
# Check if a cached vocab file exists.
if os.path.exists(os.path.join(self.save_dir, "src_vocab.pkl")):
vocab = pickle.load(
open(os.path.join(self.save_dir, "src_vocab.pkl"), "rb")
)
word2id, id2word = vocab["word2id"], vocab["id2word"]
# If not, compute the vocab from scratch and store a cache.
else:
word2id, id2word = self.construct_vocab(
itertools.chain.from_iterable(self.f_src),
self.src_vocab_size,
self.lowercase,
)
pickle.dump(
{"word2id": word2id, "id2word": id2word},
open(os.path.join(self.save_dir, "src_vocab.pkl"), "wb"),
)
for corpus in self.src:
corpus["word2id"], corpus["id2word"] = word2id, id2word
# Do the same for the target vocabulary.
if os.path.exists(os.path.join(self.save_dir, "trg_vocab.pkl")):
vocab = pickle.load(
open(os.path.join(self.save_dir, "trg_vocab.pkl"), "rb")
)
for idx, (corpus, fname) in enumerate(zip(self.trg, self.f_trg)):
word2id, id2word = (
vocab[self.tasknames[idx]]["word2id"],
vocab[self.tasknames[idx]]["id2word"],
)
corpus["word2id"], corpus["id2word"] = word2id, id2word
else:
trg_vocab_dump = {}
for idx, (corpus, fname) in enumerate(zip(self.trg, self.f_trg)):
word2id, id2word = self.construct_vocab(
fname, self.trg_vocab_size, self.lowercase
)
corpus["word2id"], corpus["id2word"] = word2id, id2word
trg_vocab_dump[self.tasknames[idx]] = {}
trg_vocab_dump[self.tasknames[idx]]["word2id"] = word2id
trg_vocab_dump[self.tasknames[idx]]["id2word"] = id2word
pickle.dump(
trg_vocab_dump,
open(os.path.join(self.save_dir, "trg_vocab.pkl"), "wb"),
)
def shuffle_dataset(self, idx):
"""Shuffle current buffer."""
self.src[idx]["data"], self.trg[idx]["data"] = shuffle(
self.src[idx]["data"],
self.trg[idx]["data"],
random_state=self.seed,
)
def get_parallel_minibatch(
self, corpus_idx, index, batch_size, max_len_src, max_len_trg
):
"""Prepare minibatch.
Args:
corpus_idx(int): Corpus Index.
index(int): Index.
batch_size(int): Batch Size.
max_len_src(int): Max length for resource.
max_len_trg(int): Max length ofr target.
Returns: minibatch of src-trg pairs(dict).
"""
src_lines = [
["<s>"] + line[: max_len_src - 2] + ["</s>"]
for line in self.src[corpus_idx]["data"][
index : index + batch_size
]
]
trg_lines = [
["<s>"] + line[: max_len_trg - 2] + ["</s>"]
for line in self.trg[corpus_idx]["data"][
index : index + batch_size
]
]
"""Sort sentences by decreasing length within a minibatch for
`torch.nn.utils.packed_padded_sequence`"""
src_lens = [len(line) for line in src_lines]
sorted_indices = np.argsort(src_lens)[::-1]
sorted_src_lines = [src_lines[idx] for idx in sorted_indices]
sorted_trg_lines = [trg_lines[idx] for idx in sorted_indices]
sorted_src_lens = [len(line) for line in sorted_src_lines]
sorted_trg_lens = [len(line) for line in sorted_trg_lines]
max_src_len = max(sorted_src_lens)
max_trg_len = max(sorted_trg_lens)
# Map words to indices
input_lines_src = [
[
self.src[corpus_idx]["word2id"][w]
if w in self.src[corpus_idx]["word2id"]
else self.src[corpus_idx]["word2id"]["<unk>"]
for w in line
]
+ [self.src[corpus_idx]["word2id"]["<pad>"]]
* (max_src_len - len(line))
for line in sorted_src_lines
]
input_lines_trg = [
[
self.trg[corpus_idx]["word2id"][w]
if w in self.trg[corpus_idx]["word2id"]
else self.trg[corpus_idx]["word2id"]["<unk>"]
for w in line[:-1]
]
+ [self.trg[corpus_idx]["word2id"]["<pad>"]]
* (max_trg_len - len(line))
for line in sorted_trg_lines
]
output_lines_trg = [
[
self.trg[corpus_idx]["word2id"][w]
if w in self.trg[corpus_idx]["word2id"]
else self.trg[corpus_idx]["word2id"]["<unk>"]
for w in line[1:]
]
+ [self.trg[corpus_idx]["word2id"]["<pad>"]]
* (max_trg_len - len(line))
for line in sorted_trg_lines
]
# Cast lists to torch tensors
input_lines_src = Variable(torch.LongTensor(input_lines_src)).cuda()
input_lines_trg = Variable(torch.LongTensor(input_lines_trg)).cuda()
output_lines_trg = Variable(torch.LongTensor(output_lines_trg)).cuda()
sorted_src_lens = (
Variable(torch.LongTensor(sorted_src_lens), volatile=True)
.squeeze()
.cuda()
)
# Return minibatch of src-trg pairs
return {
"input_src": input_lines_src,
"input_trg": input_lines_trg,
"output_trg": output_lines_trg,
"src_lens": sorted_src_lens,
"type": "seq2seq",
}
class NLIIterator(DataIterator):
"""Data iterator for tokenized NLI datasets."""
def __init__(
self, train, dev, test, vocab_size, lowercase=True, vocab=None, seed=0
):
"""Initialize params.
Each of train/dev/test is a tab-separate file of the form
premise \t hypothesis \t label.
Args:
train(torch.Tensor): Training dataset.
dev(torch.Tensor): Validation dataset.
test(torch.Tensor): Testing dataset.
vocab_size(int): The size of the vocabulary.
lowercase(bool): If lowercase the dataset.
vocab(Union[bytes,str): The list of the vocabulary.
"""
self.seed = seed
self.train = train
self.dev = dev
self.test = test
self.vocab_size = vocab_size
self.lowercase = lowercase
self.vocab = vocab
self.train_lines = [
line.strip().lower().split("\t")
for line in open(self.train, encoding="utf-8")
]
self.dev_lines = [
line.strip().lower().split("\t")
for line in open(self.dev, encoding="utf-8")
]
self.test_lines = [
line.strip().lower().split("\t")
for line in open(self.test, encoding="utf-8")
]
if self.vocab is not None:
# binary mode doesn't take an encoding argument
self.vocab = pickle.load(open(self.vocab, "rb"))
self.word2id = self.vocab["word2id"]
self.id2word = self.vocab["id2word"]
self.vocab_size = len(self.word2id)
else:
self.word2id, self.id2word = self.construct_vocab(
[x[0] for x in self.train_lines]
+ [x[1] for x in self.train_lines],
self.vocab_size,
lowercase=self.lowercase,
)
# Label text to class mapping.
self.text2label = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.shuffle_dataset()
def shuffle_dataset(self):
"""Shuffle training data."""
self.train_lines = shuffle(self.train_lines, random_state=self.seed)
def get_parallel_minibatch(self, index, batch_size, sent_type="train"):
"""Prepare minibatch.
Args:
index(int): The index for line.
batch_size(int): Batch size.
sent_type(str): Type of dataset.
Returns:
dict for batch training.
"""
if sent_type == "train":
lines = self.train_lines
elif sent_type == "dev":
lines = self.dev_lines
else:
lines = self.test_lines
sent1 = [
["<s>"] + line[0].split() + ["</s>"]
for line in lines[index : index + batch_size]
]
sent2 = [
["<s>"] + line[1].split() + ["</s>"]
for line in lines[index : index + batch_size]
]
labels = [
self.text2label[line[2]]
for line in lines[index : index + batch_size]
]
sent1_lens = [len(line) for line in sent1]
sorted_sent1_indices = np.argsort(sent1_lens)[::-1]
sorted_sent1_lines = [sent1[idx] for idx in sorted_sent1_indices]
rev_sent1 = np.argsort(sorted_sent1_indices)
sent2_lens = [len(line) for line in sent2]
sorted_sent2_indices = np.argsort(sent2_lens)[::-1]
sorted_sent2_lines = [sent2[idx] for idx in sorted_sent2_indices]
rev_sent2 = np.argsort(sorted_sent2_indices)
sorted_sent1_lens = [len(line) for line in sorted_sent1_lines]
sorted_sent2_lens = [len(line) for line in sorted_sent2_lines]
max_sent1_len = max(sorted_sent1_lens)
max_sent2_len = max(sorted_sent2_lens)
sent1 = [
[
self.word2id[w] if w in self.word2id else self.word2id["<unk>"]
for w in line
]
+ [self.word2id["<pad>"]] * (max_sent1_len - len(line))
for line in sorted_sent1_lines
]
sent2 = [
[
self.word2id[w] if w in self.word2id else self.word2id["<unk>"]
for w in line
]
+ [self.word2id["<pad>"]] * (max_sent2_len - len(line))
for line in sorted_sent2_lines
]
sent1 = Variable(torch.LongTensor(sent1)).cuda()
sent2 = Variable(torch.LongTensor(sent2)).cuda()
labels = Variable(torch.LongTensor(labels)).cuda()
sent1_lens = (
Variable(torch.LongTensor(sorted_sent1_lens), requires_grad=False)
.squeeze()
.cuda()
)
sent2_lens = (
Variable(torch.LongTensor(sorted_sent2_lens), requires_grad=False)
.squeeze()
.cuda()
)
rev_sent1 = (
Variable(torch.LongTensor(rev_sent1), requires_grad=False)
.squeeze()
.cuda()
)
rev_sent2 = (
Variable(torch.LongTensor(rev_sent2), requires_grad=False)
.squeeze()
.cuda()
)
return {
"sent1": sent1,
"sent2": sent2,
"sent1_lens": sent1_lens,
"sent2_lens": sent2_lens,
"rev_sent1": rev_sent1,
"rev_sent2": rev_sent2,
"labels": labels,
"type": "nli",
}
def get_validation_minibatch(
src, trg, index, batch_size, src_word2id, trg_word2id
):
"""Prepare minibatch.
Args:
src(list): source data.
trg(list): target data.
index(int): index for the file.
batch_size(int): batch size.
src_word2id(list): Word to index for source.
trg_word2id(list): Word to index for target.
Returns:
Dict for seq2seq model.
"""
src_lines = [
["<s>"] + line + ["</s>"] for line in src[index : index + batch_size]
]
trg_lines = [
["<s>"] + line + ["</s>"] for line in trg[index : index + batch_size]
]
src_lens = [len(line) for line in src_lines]
sorted_indices = np.argsort(src_lens)[::-1]
sorted_src_lines = [src_lines[idx] for idx in sorted_indices]
sorted_trg_lines = [trg_lines[idx] for idx in sorted_indices]
sorted_src_lens = [len(line) for line in sorted_src_lines]
sorted_trg_lens = [len(line) for line in sorted_trg_lines]
max_src_len = max(sorted_src_lens)
max_trg_len = max(sorted_trg_lens)
input_lines_src = [
[src_word2id[w] if w in src else src_word2id["<unk>"] for w in line]
+ [src_word2id["<pad>"]] * (max_src_len - len(line))
for line in sorted_src_lines
]
input_lines_trg = [
[
trg_word2id[w] if w in trg_word2id else trg_word2id["<unk>"]
for w in line[:-1]
]
+ [trg_word2id["<pad>"]] * (max_trg_len - len(line))
for line in sorted_trg_lines
]
output_lines_trg = [
[
trg_word2id[w] if w in trg_word2id else trg_word2id["<unk>"]
for w in line[1:]
]
+ [trg_word2id["<pad>"]] * (max_trg_len - len(line))
for line in sorted_trg_lines
]
# For pytroch 0.4
with torch.no_grad():
input_lines_src = Variable(torch.LongTensor(input_lines_src)).cuda()
input_lines_trg = Variable(torch.LongTensor(input_lines_trg)).cuda()
output_lines_trg = Variable(torch.LongTensor(output_lines_trg)).cuda()
# sorted_src_lens = Variable(
# torch.LongTensor(sorted_src_lens)
# ).squeeze().cuda()
sorted_src_lens = (
Variable(torch.LongTensor(sorted_src_lens))
.view(len(sorted_src_lens))
.cuda()
)
return {
"input_src": input_lines_src,
"input_trg": input_lines_trg,
"output_trg": output_lines_trg,
"src_lens": sorted_src_lens,
"type": "seq2seq",
}
def compute_validation_loss(
config, model, train_iterator, criterion, task_idx, lowercase=False
):
"""Compute validation loss for a task.
Args:
config(dict): configuration list.
model(MultitaskModel): model.
train_iterator(BufferedDataIterator): Multi Parallel corpus data iterator.
criterion(nn.CrossEntropyLoss): criterion function for loss.
task_idx(int): Task index.
lowercase(bool): If lowercase the data.
Returns: float as the mean of the loss.
"""
val_src = config["data"]["paths"][task_idx]["val_src"]
val_trg = config["data"]["paths"][task_idx]["val_trg"]
if lowercase:
val_src = [
line.strip().lower().split()
for line in open(val_src, "r", encoding="utf-8")
]
val_trg = [
line.strip().lower().split()
for line in open(val_trg, "r", encoding="utf-8")
]
else:
val_src = [
line.strip().split()
for line in open(val_src, "r", encoding="utf-8")
]
val_trg = [
line.strip().split()
for line in open(val_trg, "r", encoding="utf-8")
]
batch_size = config["training"]["batch_size"]
losses = []
for j in range(0, len(val_src), batch_size):
minibatch = get_validation_minibatch(
val_src,
val_trg,
j,
batch_size,
train_iterator.src[task_idx]["word2id"],
train_iterator.trg[task_idx]["word2id"],
)
decoder_logit = model(minibatch, task_idx)
loss = criterion(
decoder_logit.contiguous().view(-1, decoder_logit.size(2)),
minibatch["output_trg"].contiguous().view(-1),
)
# losses.append(loss.data[0])
losses.append(loss.item())
return np.mean(losses)
# Original source: https://github.com/Maluuba/gensen
| 2.6875 | 3 |
tempCodeRunnerFile.py | MuhammadTalha28/space-invader-fyp | 1 | 12797100 | <reponame>MuhammadTalha28/space-invader-fyp
game.image.load(
'space_breaker_asset\Background\stars_texture.png').convert_alpha(), (1300, 800))
| 1.554688 | 2 |
app_SQLite.py | hyhplus/FlaskFirstDemo | 0 | 12797101 | <reponame>hyhplus/FlaskFirstDemo
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sqlite3
import config
from flask import *
app = Flask(__name__)
app.config.from_object('config')
@app.before_request
def before_request():
g.db = sqlite3.connect(app.config['DATABASE'])
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
# URL 重定向
@app.route('/')
def index():
if 'user' in session:
return render_template('hello.html', name=session['user'])
else:
return redirect(url_for('login'), 302)
@app.route('/login', methods=['POST', 'GET'])
def login():
if request.method == 'POST':
name = request.form['user']
password = request.form['password']
cursor = g.db.execute('select * from users where name=? and password=?', [name, password])
if cursor.fetchone():
session['user'] = name
flash('login successfully!')
return redirect(url_for('index'))
else:
flash('No such user!', 'error')
return redirect(url_for('login'))
else:
return render_template('login.html')
| 3.015625 | 3 |
backend/src/auth.py | raduschirliu/calgary-hacks-2022 | 1 | 12797102 | <filename>backend/src/auth.py
from flask import request
import jwt
import os
JWT_SECRET = os.getenv('JWT_SECRET')
JWT_AUDIENCE = os.getenv('JWT_AUDIENCE')
# Check if the request header includes a valid JWT
def verify_jwt():
authorization = request.headers.get('Authorization')
if not authorization:
print('No authorization')
return False
parts = authorization.split(' ')
if len(parts) != 2:
print('Invalid parts')
return False
if parts[0] != 'Bearer':
print('No bearer')
return False
try:
audience = [JWT_AUDIENCE]
decoded = jwt.decode(parts[1], JWT_SECRET, audience=audience, algorithms=['HS256'])
return decoded
except Exception:
print('Invalid JWT')
return False | 3.0625 | 3 |
patrole_tempest_plugin/tests/api/network/test_availability_zones_rbac.py | lingxiankong/patrole | 14 | 12797103 | # Copyright 2018 AT&T Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.common import utils
from tempest.lib import decorators
from patrole_tempest_plugin import rbac_rule_validation
from patrole_tempest_plugin.tests.api.network import rbac_base as base
class AvailabilityZoneExtRbacTest(base.BaseNetworkExtRbacTest):
@classmethod
def skip_checks(cls):
super(AvailabilityZoneExtRbacTest, cls).skip_checks()
if not utils.is_extension_enabled('availability_zone',
'network'):
msg = "network_availability_zone extension not enabled."
raise cls.skipException(msg)
@rbac_rule_validation.action(service="neutron",
rules=["get_availability_zone"])
@decorators.idempotent_id('3c521be8-c32e-11e8-a611-080027758b73')
def test_list_availability_zone_rbac(self):
"""List all available zones.
RBAC test for the neutron ``list_availability_zones``
function and the ``get_availability_zone`` policy
"""
admin_resources = (self.ntp_client.list_availability_zones()
["availability_zones"])
with self.override_role_and_validate_list(
admin_resources=admin_resources) as ctx:
ctx.resources = (self.ntp_client.list_availability_zones()
['availability_zones'])
| 1.726563 | 2 |
py/py_0113_non-bouncy_numbers.py | lcsm29/project-euler | 0 | 12797104 | # Solution of;
# Project Euler Problem 113: Non-bouncy numbers
# https://projecteuler.net/problem=113
#
# Working from left-to-right if no digit is exceeded by the digit to its left
# it is called an increasing number; for example, 134468. Similarly if no
# digit is exceeded by the digit to its right it is called a decreasing
# number; for example, 66420. We shall call a positive integer that is neither
# increasing nor decreasing a "bouncy" number; for example, 155349. As n
# increases, the proportion of bouncy numbers below n increases such that
# there are only 12951 numbers below one-million that are not bouncy and only
# 277032 non-bouncy numbers below 1010. How many numbers below a googol
# (10100) are not bouncy?
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 113
timed.caller(dummy, n, i, prob_id)
| 3.5 | 4 |
airbyte-integrations/connectors/source-firebolt/source_firebolt/source.py | faros-ai/airbyte | 22 | 12797105 | <reponame>faros-ai/airbyte
#
# Copyright (c) 2022 Airbyte, Inc., all rights reserved.
#
import json
from asyncio import gather, get_event_loop
from typing import Dict, Generator
from airbyte_cdk.logger import AirbyteLogger
from airbyte_cdk.models import (
AirbyteCatalog,
AirbyteConnectionStatus,
AirbyteMessage,
AirbyteStream,
ConfiguredAirbyteCatalog,
Status,
SyncMode,
)
from airbyte_cdk.sources import Source
from firebolt.async_db import Connection as AsyncConnection
from .database import establish_async_connection, establish_connection, get_firebolt_tables
from .utils import airbyte_message_from_data, convert_type
SUPPORTED_SYNC_MODES = [SyncMode.full_refresh]
async def get_table_stream(connection: AsyncConnection, table: str) -> AirbyteStream:
"""
Get AirbyteStream for a particular table with table structure defined.
:param connection: Connection object connected to a database
:return: AirbyteStream object containing the table structure
"""
column_mapping = {}
cursor = connection.cursor()
await cursor.execute(f"SHOW COLUMNS {table}")
for t_name, c_name, c_type, nullable in await cursor.fetchall():
airbyte_type = convert_type(c_type, nullable)
column_mapping[c_name] = airbyte_type
cursor.close()
json_schema = {
"type": "object",
"properties": column_mapping,
}
return AirbyteStream(name=table, json_schema=json_schema, supported_sync_modes=SUPPORTED_SYNC_MODES)
class SourceFirebolt(Source):
def check(self, logger: AirbyteLogger, config: json) -> AirbyteConnectionStatus:
"""
Tests if the input configuration can be used to successfully connect to the integration
e.g: if a provided Stripe API token can be used to connect to the Stripe API.
:param logger: Logging object to display debug/info/error to the logs
(logs will not be accessible via airbyte UI if they are not passed to this logger)
:param config: Json object containing the configuration of this source, content of this json is as specified in
the properties of the spec.json file
:return: AirbyteConnectionStatus indicating a Success or Failure
"""
try:
with establish_connection(config, logger) as connection:
# We can only verify correctness of connection parameters on execution
with connection.cursor() as cursor:
cursor.execute("SELECT 1")
return AirbyteConnectionStatus(status=Status.SUCCEEDED)
except Exception as e:
return AirbyteConnectionStatus(status=Status.FAILED, message=f"An exception occurred: {str(e)}")
def discover(self, logger: AirbyteLogger, config: json) -> AirbyteCatalog:
"""
Returns an AirbyteCatalog representing the available streams and fields in this integration.
For example, given valid credentials to a Postgres database,
returns an Airbyte catalog where each postgres table is a stream, and each table column is a field.
:param logger: Logging object to display debug/info/error to the logs
(logs will not be accessible via airbyte UI if they are not passed to this logger)
:param config: Json object containing the configuration of this source, content of this json is as specified in
the properties of the spec.json file
:return: AirbyteCatalog is an object describing a list of all available streams in this source.
A stream is an AirbyteStream object that includes:
- its stream name (or table name in the case of Postgres)
- json_schema providing the specifications of expected schema for this stream (a list of columns described
by their names and types)
"""
async def get_streams():
async with await establish_async_connection(config, logger) as connection:
tables = await get_firebolt_tables(connection)
logger.info(f"Found {len(tables)} available tables.")
return await gather(*[get_table_stream(connection, table) for table in tables])
loop = get_event_loop()
streams = loop.run_until_complete(get_streams())
logger.info(f"Provided {len(streams)} streams to the Aribyte Catalog.")
return AirbyteCatalog(streams=streams)
def read(
self,
logger: AirbyteLogger,
config: json,
catalog: ConfiguredAirbyteCatalog,
state: Dict[str, any],
) -> Generator[AirbyteMessage, None, None]:
"""
Returns a generator of the AirbyteMessages generated by reading the source with the given configuration,
catalog, and state.
:param logger: Logging object to display debug/info/error to the logs
(logs will not be accessible via airbyte UI if they are not passed to this logger)
:param config: Json object containing the configuration of this source, content of this json is as specified in
the properties of the spec.json file
:param catalog: The input catalog is a ConfiguredAirbyteCatalog which is almost the same as AirbyteCatalog
returned by discover(), but
in addition, it's been configured in the UI! For each particular stream and field, there may have been provided
with extra modifications such as: filtering streams and/or columns out, renaming some entities, etc
:param state: When a Airbyte reads data from a source, it might need to keep a checkpoint cursor to resume
replication in the future from that saved checkpoint.
This is the object that is provided with state from previous runs and avoid replicating the entire set of
data everytime.
:return: A generator that produces a stream of AirbyteRecordMessage contained in AirbyteMessage object.
"""
logger.info(f"Reading data from {len(catalog.streams)} Firebolt tables.")
with establish_connection(config, logger) as connection:
with connection.cursor() as cursor:
for c_stream in catalog.streams:
table_name = c_stream.stream.name
table_properties = c_stream.stream.json_schema["properties"]
columns = list(table_properties.keys())
# Escape columns with " to avoid reserved keywords e.g. id
escaped_columns = ['"{}"'.format(col) for col in columns]
query = "SELECT {columns} FROM {table}".format(columns=",".join(escaped_columns), table=table_name)
cursor.execute(query)
logger.info(f"Fetched {cursor.rowcount} rows from table {table_name}.")
for result in cursor.fetchall():
message = airbyte_message_from_data(result, columns, table_name)
if message:
yield message
logger.info("Data read complete.")
| 2.078125 | 2 |
responder_commons/report_builder.py | Infosecurity-LLC/responder_commons | 1 | 12797106 | <filename>responder_commons/report_builder.py
import os
import logging
import json
import copy
from benedict import benedict
from responder_commons.exc import TemplateFileNotExist
logger = logging.getLogger('responder_commons')
class Builder:
def __init__(self, translate_params):
# self.translate_collection = benedict(translate_params)
self.translate_collection = translate_params
self.tpl_name = 'default'
self.normal_thive_incident = {}
@staticmethod
def __load_tpl(tpl_name: str) -> dict:
tpl_path = os.path.join(os.getcwd(), f'templates/{tpl_name}.json')
if not os.path.exists(tpl_path):
logger.error(f'Error getting setting. Check your settings file in {tpl_path}')
raise TemplateFileNotExist(f"File {tpl_path} doesn't exist")
with open(tpl_path, encoding='utf-8') as f:
tpl = json.load(f)
return tpl
def __get_tpl(self, tpl: str = None) -> dict:
if not tpl:
logger.debug('Имя шаблона не задано, будет использован default')
return self.__load_tpl("default")
logger.debug(f'Будет использован шаблон {tpl}')
return self.__load_tpl(tpl_name=tpl)
def __get_recursively(self, search_obj: object, field: str, d_path: str = None) -> dict:
""" Получаем список объектов, которые нужно заменить """
paths_to_fields = {}
if isinstance(search_obj, list):
for i, value in enumerate(search_obj):
new_d_path = None
if d_path:
new_d_path = f'{d_path}[{i}]'
results = self.__get_recursively(value, field, new_d_path)
paths_to_fields = {**paths_to_fields, **results}
if isinstance(search_obj, dict):
for key, value in search_obj.items():
if field in value:
if not d_path:
paths_to_fields.update({key: value})
if d_path:
paths_to_fields.update({f'{d_path}.{key}': value})
elif isinstance(value, dict):
if d_path:
key = d_path + '.' + key
results = self.__get_recursively(value, field, key)
paths_to_fields = {**paths_to_fields, **results}
elif isinstance(value, list):
if d_path:
key = d_path + '.' + key
results = self.__get_recursively(value, field, key)
paths_to_fields = {**paths_to_fields, **results}
return paths_to_fields
def get_translate_of_word(self, word: str = None, position: str = None):
translate_list = self.translate_collection
if not word:
return word
for write in translate_list:
if isinstance(write, dict) and write['word'] == word:
if position:
# position может быть обязательным для сравнения
if write['position'] == position:
return write['translate']
else:
return write['translate']
if isinstance(write, str) and write in self.translate_collection:
return self.translate_collection[word]
return word # Если не найдено перевода - оставляем как есть
@staticmethod
def __sort_thive_list(thdict: dict):
""" Сортируем словарь-список thive """
thsort = sorted(thdict, key=lambda x: thdict[x]['order'])
def cast(value_type, value):
from datetime import datetime
if not value:
return None
if value_type == 'date':
return datetime.fromtimestamp(value / 1e3).isoformat()
if value_type == 'string':
return str(value)
if value_type == 'numder':
return int(value)
return value
def get_value(v: dict):
v.pop('order')
for value_type, value in v.items():
return cast(value_type, value)
fdict = {}
for key in thsort:
v = get_value(thdict[key])
fdict.update({key: v})
return fdict
def __normalize_thive_dict(self, data):
""" Нормализуем инцидент для респондера """
normalize_incident = copy.deepcopy(data)
normalize_incident.update({"customFields": self.__sort_thive_list(normalize_incident['customFields'])})
return normalize_incident
def __get_value_of_the_path(self, value_bank: benedict, value_path: str) -> object:
""" Получение значения по пути через точку """
_value_bank = copy.deepcopy(value_bank)
def get_value(_data, _v):
_data = _data.get(_v)
return _data
if "soc_inc>" in value_path:
""" soc_inc>{что взять из инцидента} """
v = value_path.replace("soc_inc>", '')
data_value = get_value(_value_bank, v)
return data_value
if "soc_static_field>" in value_path:
""" soc_static_field>{где}.{по какому word взять translate}"""
v = value_path.replace("soc_static_field>", '')
translate_path = v.split('.')
if translate_path[0] == "items_translate":
return self.get_translate_of_word(word=translate_path[1])
if len(translate_path) == 1:
return self.get_translate_of_word(word=v)
if "soc_inc_rows>" in value_path:
"""Отрисовать список"""
v = value_path.replace("soc_inc_rows>", '')
rows_data = get_value(_value_bank, v)
rows = []
for k, v in self.normal_thive_incident['customFields'].items():
rows.append({"left": self.get_translate_of_word(word=k, position='left'),
"right": self.get_translate_of_word(word=v, position='right')})
return rows
return value_path
@staticmethod
def __clean_format_tpl(data):
""" Очистка щаполненного шаблона от пустых блоков"""
def is_rows_null(section):
""" тут проверяем, пустой блок или нет"""
for item in section['blocks'][0]['rows']:
if item['right'] is not None:
return False
return True
from copy import deepcopy
final_format_tpl = deepcopy(data)
for i in data['categories'][0]['sections']:
if is_rows_null(i):
final_format_tpl['categories'][0]['sections'].remove(i)
return final_format_tpl
def build(self, data: dict, tpl: dict) -> dict:
if not data:
logger.warning('No data')
return {}
self.normal_thive_incident = self.__normalize_thive_dict(data)
format_data = benedict(self.normal_thive_incident)
format_tpl = benedict(copy.deepcopy(tpl))
replacement_list = self.__get_recursively(benedict(copy.deepcopy(tpl)), 'soc_inc')
for repl_key, repl_value in replacement_list.items():
format_tpl.set(repl_key, self.__get_value_of_the_path(format_data, repl_value))
replacement_list = self.__get_recursively(benedict(copy.deepcopy(tpl)), 'soc_static_field')
for repl_key, repl_value in replacement_list.items():
format_tpl.set(repl_key, self.__get_value_of_the_path(self.translate_collection, repl_value))
final_format_tpl = self.__clean_format_tpl(format_tpl)
return final_format_tpl
def translate_other_items(self, data: dict):
""" Перевод дополнительных полей """
if not data:
return None
translate_result = {}
for k, v in data.items():
translate_result[k] = self.get_translate_of_word(word=v, position=k)
return translate_result
| 2.03125 | 2 |
run.py | Anglelengyug/MagiChaum | 0 | 12797107 | import bot
if __name__ == '__main__':
zonbot = bot.Bot('!', pm_help = True)
zonbot.run(zonbot.token)
| 1.132813 | 1 |
mascoord/sim2yaml.py | bbrighttaer/ddcop-dynagraph | 0 | 12797108 | import argparse
import os.path
import oyaml as yaml
def parse_constraint(con_str):
# sample: (0,1):(1,1,1)
agents_str, coefficients_str = con_str.split(':')
x, y = agents_str.replace('(', '').replace(')', '').split(',')
a, b, c = coefficients_str.replace('(', '').replace(')', '').split(',')
c1 = f'{a} * var{x}^2 + {b} * var{x} * var{y} + {c} * var{y}^2'
c2 = f'{c} * var{y}^2 + {b} * var{y} * var{x} + {a} * var{x}^2'
return c1, c2
def main(args):
lines_4_config = {}
with open(args.file, 'r') as f:
line = f.readline()
while line:
kv = line.split('=')
lines_4_config[kv[0]] = kv[1].strip()
line = f.readline()
yaml_dict = {
'name': args.name,
'objective': 'min',
}
# domains
domains = {}
domain_info = lines_4_config['domains'].split(' ')
agent_ids = []
for domain_str in domain_info:
agent_id, dvals = domain_str.split(':')
domains[f'd{agent_id}'] = {
'values': [int(v) for v in dvals.split(',')],
}
agent_ids.append(agent_id)
yaml_dict['domains'] = domains
# variables
variables = {}
for agent in agent_ids:
variables[f'var{agent}'] = {
'domain': f'd{agent}',
}
yaml_dict['variables'] = variables
# constraints
constraints = {}
for con in lines_4_config['cons'].split('>'):
eq1, eq2 = parse_constraint(con)
constraints[f'c{len(constraints)}'] = {
'type': 'intention',
'function': eq1,
}
constraints[f'c{len(constraints)}'] = {
'type': 'intention',
'function': eq2,
}
yaml_dict['constraints'] = constraints
# agents
agents = [f'a{agent_id}' for agent_id in agent_ids]
yaml_dict['agents'] = agents
# export to yaml
exported_file = args.file.split('/')[-1] + '.yaml'
yaml_file = os.path.join('./yaml-files', exported_file)
with open(yaml_file, 'w') as f:
yaml.dump(yaml_dict, f)
print(f'Simulation config file saved: {yaml_file}')
# create scenario file
events = [{
'id': 'w',
'delay': 1,
}]
scenarios = {'events': events}
for i, cmd in enumerate(lines_4_config['commands'].split(' ')):
cmd, agent = cmd.split(':')
if cmd == 'remove_agent': # only agent removal is supported by pydcop
events.append({
'id': f'e{i}',
'actions': {
'type': cmd,
'agent': f'a{agent}'
}
})
events.append({
'id': 'w',
'delay': 1,
})
exported_file = args.file.split('/')[-1] + '-scenario.yaml'
yaml_file = os.path.join('./yaml-files', exported_file)
with open(yaml_file, 'w') as f:
yaml.dump(scenarios, f)
print(f'Simulation scenario file saved: {yaml_file}')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert DynaGraph sim file to pyDCOP compatible yaml config')
parser.add_argument('-f', '--file', type=str, required=True, help='sim file path')
parser.add_argument('-n', '--name', type=str, required=True, help='DCOP name')
args = parser.parse_args()
main(args)
| 2.84375 | 3 |
modules/7.Cal_changes_in_inequality.py | YixuanZheng/Aerosol_Inequality_2019 | 3 | 12797109 | # -*- coding: utf-8 -*-
'''
This code calculates changes in the ratio between different population-weighted GDP deciles and quintiles
by <NAME> (<EMAIL>)
'''
import pandas as pd
import numpy as np
from netCDF4 import Dataset
import _env
datasets = _env.datasets
scenarios = _env.scenarios
gdp_year = 2010
sgdp_year = str(gdp_year)
idir_temp = _env.odir_root + '/sim_temperature/'
####summarize global and regional GDP changes####
gdp_year = 2010
sgdp_year = str(gdp_year)
boot_methods = ['country-lag0','country-lag1','country-lag5','year','year-blocks']
itbl_gdp_baseline = pd.read_csv(_env.odir_root + 'basic_stats' + '/Country_Basic_Stats.csv')
itbl_gdp_baseline.sort_values([sgdp_year + '_gdpcap'],inplace=True)
tot_pop = itbl_gdp_baseline[sgdp_year + '_pop'].sum()
#itbl_gdp_baseline['2010_pop_ratio'] = itbl_gdp_baseline['2010_pop']/tot_pop
itbl_gdp_baseline[sgdp_year + '_gdpsum'] = 0
#itbl_gdp_baseline['2010_popw_gdp'] = 0
itbl_gdp_baseline[sgdp_year + '_popsum'] = 0
#itbl_gdp_baseline['2010_pop_ratio_sum'] = 0
for irow, row in enumerate(itbl_gdp_baseline.index):
if irow == 0:
itbl_gdp_baseline.loc[row,sgdp_year + '_gdpsum'] = itbl_gdp_baseline.loc[row,sgdp_year + '_gdp']
itbl_gdp_baseline.loc[row, sgdp_year + '_popsum'] = itbl_gdp_baseline.loc[row,sgdp_year + '_pop']
else:
itbl_gdp_baseline.loc[row,sgdp_year + '_gdpsum'] = itbl_gdp_baseline[sgdp_year + '_gdpsum'].iloc[irow-1] + itbl_gdp_baseline.loc[row,sgdp_year + '_gdp']
itbl_gdp_baseline.loc[row, sgdp_year + '_popsum'] = itbl_gdp_baseline[sgdp_year + '_popsum'].iloc[irow-1] + itbl_gdp_baseline.loc[row,sgdp_year + '_pop']
itbl_gdp_baseline[sgdp_year + '_pop_ratio_sum'] = itbl_gdp_baseline[sgdp_year + '_popsum']/tot_pop
#deciles (<=10% and >=90%)
deciles = {}
ind10 = np.where(itbl_gdp_baseline[sgdp_year + '_pop_ratio_sum']<=0.1)[0]
deciles[10] = itbl_gdp_baseline.iloc[ind10].copy()
ind90 = np.where(itbl_gdp_baseline[sgdp_year + '_pop_ratio_sum']>=0.9)[0]
deciles[90] = itbl_gdp_baseline.iloc[ind90].copy()
#quintiles (<=20% and >=80%)
ind20 = np.where(itbl_gdp_baseline[sgdp_year + '_pop_ratio_sum']<=0.2)[0]
deciles[20] = itbl_gdp_baseline.iloc[ind20].copy()
ind80 = np.where(itbl_gdp_baseline[sgdp_year + '_pop_ratio_sum']>=0.8)[0]
deciles[80] = itbl_gdp_baseline.iloc[ind80].copy()
for ds in datasets:
scens = ['No-Aerosol']
if ds == 'ERA-Interim':
scens = ['No-Aerosol','No-Sulfate']
idir_gdp = _env.odir_root + '/gdp_' + ds + '/'
odir_summary = _env.odir_root + '/summary_' + ds + '/'
_env.mkdirs(odir_summary)
for scen in scens:
writer = pd.ExcelWriter(odir_summary + 'Deciles_and_Quintile_ratio_changes_'+ds+'_'+scen+'_Burke.xls')
otbls_ctry_GDP_stat = {}
otbls = {}
otbl_ineq = pd.DataFrame(index = boot_methods,columns = ['median_ratio','5_ratio','95_ratio','10_ratio','90_ratio','probability_reduced'])
otbls['deciles'] = otbl_ineq.copy()
otbls['quintiles'] = otbl_ineq.copy()
for b_m in boot_methods:
inc_gdp = Dataset(idir_gdp + 'GDP_Changes_Burke_' + b_m + '_' + str(gdp_year) + '_'+ds+'_'+scen+'.nc')
imtrx_gdp = inc_gdp['GDP'][:]
dec_var = {}
dec_base = {}
for perc in [10,20,80,90]:
dec = deciles[perc].copy()
dec_pop_tot = dec[sgdp_year + '_pop'].sum()
dec_gdp_tot = dec[sgdp_year + '_gdp'].sum()
dec_base[perc] = dec_gdp_tot/dec_pop_tot
ind_ctry = dec.index
imtrx_dec = imtrx_gdp[:,ind_ctry,:]
imtrx_dec_sum = dec_gdp_tot-(imtrx_dec.data).sum(axis=1)
# print(perc, np.median(imtrx_dec_sum),dec_gdp_tot,np.median(imtrx_dec_sum)/dec_gdp_tot)
dec_gdpcap = imtrx_dec_sum/dec_pop_tot
dec_var[perc] = dec_gdpcap.copy()
dec_diff = (dec_var[90]/dec_var[10]-dec_base[90]/dec_base[10])/(dec_base[90]/dec_base[10])*100
quin_diff = (dec_var[80]/dec_var[20] - dec_base[80]/dec_base[20])/(dec_base[80]/dec_base[20])*100
otbls['deciles'].loc[b_m,'median_ratio'] = np.median(dec_diff)
otbls['deciles'].loc[b_m,'5_ratio'] = np.percentile(dec_diff,5)
otbls['deciles'].loc[b_m,'95_ratio'] = np.percentile(dec_diff,95)
otbls['deciles'].loc[b_m,'10_ratio'] = np.percentile(dec_diff,10)
otbls['deciles'].loc[b_m,'90_ratio'] = np.percentile(dec_diff,90)
otbls['deciles'].loc[b_m,'probability_reduced'] = len(dec_diff[dec_diff<0])/np.size(dec_diff)
otbls['quintiles'].loc[b_m,'median_ratio'] = np.median(quin_diff)
otbls['quintiles'].loc[b_m,'5_ratio'] = np.percentile(quin_diff,5)
otbls['quintiles'].loc[b_m,'95_ratio'] = np.percentile(quin_diff,95)
otbls['quintiles'].loc[b_m,'10_ratio'] = np.percentile(quin_diff,10)
otbls['quintiles'].loc[b_m,'90_ratio'] = np.percentile(quin_diff,90)
otbls['quintiles'].loc[b_m,'probability_reduced'] = len(quin_diff[quin_diff<0])/np.size(quin_diff)
otbls['deciles'].to_excel(writer,'deciles')
otbls['quintiles'].to_excel(writer,'quintiles')
writer.save()
| 2.671875 | 3 |
typehint.py | xiaopeng163/whats-new-python3.9 | 1 | 12797110 | # from typing import List
mylist: list[int] = [1, 2, 3, 4]
print(mylist)
mylist = '1234'
print(mylist)
| 3.546875 | 4 |
commands.py | syndbg/ssh-chat | 6 | 12797111 | <filename>commands.py
class CommandsHandler:
def __init__(self, protocol):
self.protocol = protocol
self.terminal = self.protocol.terminal
def do_help(self):
public_methods = [function_name for function_name in dir(
self) if function_name.startswith('do_')]
commands = [cmd.replace('do_', '', 1) for cmd in public_methods]
self.terminal.write('Commands: ' + ' '.join(commands))
def do_echo(self, *args):
self.terminal.write(' '.join(args))
def do_whoami(self):
self.terminal.write(self.user.username)
def do_quit(self):
self.terminal.write('Bye!')
self.terminal.loseConnection()
def do_clear(self):
self.terminal.reset()
| 3.046875 | 3 |
dataspot-bokeh/dataspot/statistics/excel_importer.py | patrickdehoon/dataspot-docker | 3 | 12797112 | <filename>dataspot-bokeh/dataspot/statistics/excel_importer.py<gh_stars>1-10
from openpyxl import load_workbook
class ExcelImporter:
def __init__(self):
self.__relationships = dict()
def set_relationships(self, ws, statistic):
relationships = self.__relationships
relationships[statistic] = dict()
for row in ws.iter_rows(values_only=True):
if row[0] in relationships[statistic]:
for ind, i in enumerate(row):
if ind > 0 and i is not None and i not in relationships[statistic][row[0]]:
relationships[statistic][row[0]].append(i)
else:
relationships[statistic][row[0]] = [i for ind, i in enumerate(row) if ind > 0 and i is not None]
def get_relationships(self):
return self.__relationships
def build(self, path):
wb = load_workbook(path)
for sheet in wb.sheetnames:
ws = wb[sheet]
self.set_relationships(ws=ws, statistic=sheet)
| 2.59375 | 3 |
Tests/test_mesh_2d.py | robotsorcerer/LevelSetPy | 4 | 12797113 | __author__ = "<NAME>"
__copyright__ = "2021, Hamilton-Jacobi Analysis in Python"
__license__ = "Molux Licence"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Completed"
import argparse
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import os, sys
from os.path import abspath, dirname, exists, join
sys.path.append(dirname(dirname(abspath(__file__))))
from Grids import createGrid
from InitialConditions import *
from Visualization import *
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
"""
Test Implicit Functions
Lekan Molu, September 07, 2021
"""
parser = argparse.ArgumentParser(description='2D Plotter for Various Implicit Initial Conditions for the Value Function')
parser.add_argument('--delay', '-dl', type=float, default=3, help='pause time between successive updates of plots' )
args = parser.parse_args()
def levelset_viz(g, ax, fig, mesh, title='', savedict=None, fontdict=None, fc='c', ec='k'):
"""
Simultaneously visualize the level sets of a value function
on a 1X3 chart:
Chart 131: 2D Value function as a surface mesh
Chart 132: 2D Value function as colored contour levels
Chart 133: 2D Value zero - set as cyan contour.
Author: <NAME>, October 29, 2021
"""
ax[0].plot_surface(g.xs[0], g.xs[1], mesh, rstride=1, cstride=1,
cmap='viridis', edgecolor=ec, facecolor=fc)
ax[0].set_xlabel('X', fontdict=fontdict)
ax[0].set_ylabel('Y', fontdict=fontdict)
ax[0].set_zlabel('Z', fontdict=fontdict)
ax[0].set_title(f'{title}', fontdict=fontdict)
ax[1].contourf(g.xs[0], g.xs[1], mesh, colors=fc)
ax[1].set_xlabel('X', fontdict=fontdict)
ax[1].set_title(f'Contours', fontdict=fontdict)
ax[2].contour(g.xs[0], g.xs[1], mesh, levels=0, colors=fc)
ax[2].set_xlabel('X', fontdict=fontdict)
ax[2].set_ylabel('Y', fontdict=fontdict)
ax[2].grid('on')
ax[2].set_title(f'2D Zero level set', fontdict=fontdict)
fig.tight_layout()
if savedict["save"]:
plt.savefig(join(savedict["savepath"],savedict["savename"]),
bbox_inches='tight',facecolor='None')
fig.canvas.draw()
fig.canvas.flush_events()
def get_grid():
g2min = -2*np.ones((2, 1),dtype=np.float64)
g2max = +2*np.ones((2, 1),dtype=np.float64)
g2N = 51*np.ones((2, 1),dtype=np.int64)
g2 = createGrid(g2min, g2max, g2N, process=True)
return g2
def main(savedict):
# generate signed distance function for cylinder
center = np.array(([[-.5,.5]]), np.float64).T
g2 = get_grid()
# shapes generation
axis_align, radius=2, 1
cylinder = shapeCylinder(g2, axis_align, center, radius);
sphere = shapeSphere(g2, center, radius=1)
sphere2 = shapeSphere(g2, center=np.array(([-0., 0.])).T, radius=1)
rect = shapeRectangleByCorners(g2)
rect2 = shapeRectangleByCorners(g2, np.array([[ -1.0, -np.inf, ]]).T, np.array([[ np.inf, -1.0 ]]).T, )
rect3 = shapeRectangleByCorners(g2, np.array([[ -1.0, -0.5, ]]).T, np.array([[ .5, 1.0 ]]).T)
rect4 = shapeRectangleByCenter(g2, np.array([[ -1.0, -0.5, ]]).T, np.array([[ .5, 1.0 ]]).T)
# Set Ops
sphere_union = shapeUnion(sphere, sphere2)
rect_union = shapeUnion(rect, rect3)
rect_comp = shapeComplement(rect2)
sph_rect_diff = shapeDifference(sphere, rect)
fig = plt.figure(figsize=(16, 9))
gs = gridspec.GridSpec(1, 3, fig)
ax = [plt.subplot(gs[i], projection='3d') for i in range(2)] + [plt.subplot(gs[2])]
savedict["savename"] = "cylinder_2d.jpg"
levelset_viz(g2, ax, fig, cylinder, title='Cylinder', savedict=savedict, fontdict={'fontsize':12, 'fontweight':'bold'})
plt.pause(args.delay)
savedict["savename"] = "sphere_2d.jpg"
plt.clf()
ax = [plt.subplot(gs[i], projection='3d') for i in range(2)] + [plt.subplot(gs[2])]
levelset_viz(g2, ax, fig, sphere, title='Sphere', savedict=savedict, fontdict={'fontsize':12, 'fontweight':'bold'})
plt.pause(args.delay)
savedict["savename"]="sphere2_2d.jpg"
plt.clf()
ax = [plt.subplot(gs[i], projection='3d') for i in range(2)] + [plt.subplot(gs[2])]
levelset_viz(g2, ax, fig, sphere2, title='Sphere, C=(-.5, .5)', savedict=savedict, fontdict={'fontsize':12, 'fontweight':'bold'})
plt.pause(args.delay)
savedict["savename"]="rect_2d.jpg"
plt.clf()
ax = [plt.subplot(gs[i], projection='3d') for i in range(2)] + [plt.subplot(gs[2])]
levelset_viz(g2, ax, fig, rect, title='Unit Square@Origin', savedict=savedict, fontdict={'fontsize':12, 'fontweight':'bold'})
plt.pause(args.delay)
savedict["savename"]="rect2_2d.jpg"
plt.clf()
ax = [plt.subplot(gs[i], projection='3d') for i in range(2)] + [plt.subplot(gs[2])]
levelset_viz(g2, ax, fig, rect2, title='Rect by Corners', savedict=savedict, fontdict={'fontsize':12, 'fontweight':'bold'})
plt.pause(args.delay)
savedict["savename"]="rect3_2d.jpg"
plt.clf()
ax = [plt.subplot(gs[i], projection='3d') for i in range(2)] + [plt.subplot(gs[2])]
levelset_viz(g2, ax, fig, rect3, title='RectCorner: [1,-0.5], W: [0.5,1.0]', savedict=savedict, fontdict={'fontsize':12, 'fontweight':'bold'})
plt.pause(args.delay)
savedict["savename"]="rect4_2d.jpg"
plt.clf()
ax = [plt.subplot(gs[i], projection='3d') for i in range(2)] + [plt.subplot(gs[2])]
levelset_viz(g2, ax, fig, rect4, title='RectCent: [1,-0.5], W: [0.5,1.0]', savedict=savedict, fontdict={'fontsize':12, 'fontweight':'bold'})
plt.pause(args.delay)
# Show Unions
savedict["savename"]="sphere_union_2d.jpg"
plt.clf()
ax = [plt.subplot(gs[i], projection='3d') for i in range(2)] + [plt.subplot(gs[2])]
levelset_viz(g2, ax, fig, sphere_union, title='Spheres+Sphere', savedict=savedict, fontdict={'fontsize':12, 'fontweight':'bold'})
plt.pause(args.delay)
savedict["savename"]="rect_union_2d.jpg"
plt.clf()
ax = [plt.subplot(gs[i], projection='3d') for i in range(2)] + [plt.subplot(gs[2])]
levelset_viz(g2, ax, fig, rect_union, title='Union of 2 Rects', savedict=savedict, fontdict={'fontsize':12, 'fontweight':'bold'})
plt.pause(args.delay)
savedict["savename"]="rect_comp_2d.jpg"
plt.clf()
ax = [plt.subplot(gs[i], projection='3d') for i in range(2)] + [plt.subplot(gs[2])]
levelset_viz(g2, ax, fig, rect_comp, title='Rect Complement', savedict=savedict, fontdict={'fontsize':12, 'fontweight':'bold'})
plt.pause(args.delay)
savedict["savename"]="sph_rect_diff_2d.jpg"
plt.clf()
ax = [plt.subplot(gs[i], projection='3d') for i in range(2)] + [plt.subplot(gs[2])]
levelset_viz(g2, ax, fig, sph_rect_diff, title='Sphere-Rect Diff', savedict=savedict, fontdict={'fontsize':12, 'fontweight':'bold'})
plt.pause(args.delay)
if __name__ == '__main__':
savedict = dict(save=True, savename='cyl_2d.jpg',\
savepath=join("..", "jpeg_dumps"))
plt.ion()
main(savedict)
| 2.296875 | 2 |
utilities/cleaner.py | asdfhkga/Neutra | 11 | 12797114 | # Module for cleaning messages from all unwanted content
import re
def clean_all(msg):
msg = clean_invite_embed(msg)
msg = clean_backticks(msg)
msg = clean_mentions(msg)
msg = clean_emojis(msg)
return msg
def clean_invite_embed(msg):
"""Prevents invites from embedding"""
return msg.replace("discord.gg/", "discord.gg/\u200b")
def clean_backticks(msg):
"""Prevents backticks from breaking code block formatting"""
return msg.replace("`", "\U0000ff40")
def clean_formatting(msg):
"""Escape formatting items in a string."""
return re.sub(r"([`*_])", r"\\\1", msg)
def clean_mentions(msg):
"""Prevent discord mentions"""
return msg.replace("@", "@\u200b")
def clean_emojis(msg):
"""Escape custom emojis."""
return re.sub(r"<(a)?:([a-zA-Z0-9_]+):([0-9]+)>", "<\u200b\\1:\\2:\\3>", msg)
| 2.71875 | 3 |
analyzers/response_size_elliptic.py | dscrobonia/sawyer | 1 | 12797115 | <gh_stars>1-10
import json
import logging
import matplotlib.font_manager
import matplotlib.pyplot as plt
import numpy as np
from sklearn.covariance import EllipticEnvelope
from sklearn.svm import OneClassSVM
log = logging.getLogger(__name__)
def analyze(data):
# Convert this to python data for us to be able to run ML algorithms
json_to_python = json.loads(data)
per_size = dict() # IP-Response size
hostlist = dict()
# Data pre-processing here:
for y in json_to_python:
hostlist[y['HOST']] = 1
if y['HOST'] in per_size:
per_size[y['HOST']].append(int(y['SIZE']))
else:
per_size[y['HOST']] = [int(y['SIZE'])]
##Data pre-processing ends here
log.debug(
"*** Printing Input to analysis - 4 (1): K-means on IP and average response size ****"
)
#####*****SIZE******####
#### Analysis #4 (1): IP address - Size of response received feature
X = np.array([[0.00, 0.00]])
for x in hostlist:
avg_size = mean(per_size[x])
log.debug(x + ": " + str(avg_size))
y = x.split(".")
ip = ""
for z in range(4):
l = len(y[z])
l = 3 - l
if (l > 0):
zero = ""
for t in range(3 - len(y[z])):
zero = zero + "0"
y[z] = zero + y[z]
ip = ip + y[z]
# log.debug( str(float(float(ip)/1000)) + ": " + str(avg_size))
le = [float(float(ip) / 1000), avg_size]
X = np.vstack([X, le])
log.info(
"******** Printing Analysis #4: IP-Address and Response Size received: Elliptic Envelope ********"
)
log.info(
"******** Check the image elliptic.png saved in the working directory ********"
)
# print kmeans.labels_
####################################
## Analysis 4 (4): Outlier-unsupervised-elliptic (Currently not working our data)#####
X1 = X
# Define "classifiers" to be used
classifiers = {
"Empirical Covariance":
EllipticEnvelope(support_fraction=1., contamination=0.261),
"Robust Covariance (Minimum Covariance Determinant)":
EllipticEnvelope(contamination=0.261),
"OCSVM":
OneClassSVM(nu=0.261, gamma=0.05)
}
colors = ['m', 'g', 'b']
legend1 = {}
legend2 = {}
# Learn a frontier for outlier detection with several classifiers
xx1, yy1 = np.meshgrid(np.linspace(-8, 28, 500), np.linspace(3, 40, 500))
xx2, yy2 = np.meshgrid(np.linspace(3, 10, 500), np.linspace(-5, 45, 500))
for i, (clf_name, clf) in enumerate(classifiers.items()):
plt.figure(1)
clf.fit(X1)
Z1 = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
legend1[clf_name] = plt.contour(
xx1, yy1, Z1, levels=[0], linewidths=2, colors=colors[i])
legend1_values_list = list(legend1.values())
legend1_keys_list = list(legend1.keys())
# Plot the results (= shape of the data points cloud)
plt.figure(1) # two clusters
plt.title(
"Outlier detection on a real data set: IP-response size received:")
plt.scatter(X1[:, 0], X1[:, 1], color='black')
bbox_args = dict(boxstyle="round", fc="0.8")
plt.xlim((xx1.min(), xx1.max()))
plt.ylim((yy1.min(), yy1.max()))
plt.legend(
(legend1_values_list[0].collections[0],
legend1_values_list[1].collections[0],
legend1_values_list[2].collections[0]),
(legend1_keys_list[0], legend1_keys_list[1], legend1_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("Response size received")
plt.xlabel("Host-IP address")
##plt.show()
plt.savefig('elliptic.png')
def mean(numbers):
return float(sum(numbers)) / max(len(numbers), 1)
| 2.9375 | 3 |
muonic/analysis/muon_lifetime.py | LambdaDigamma/muonic | 3 | 12797116 | #!/usr/bin/env python
import sys
import gzip
#####################################################
#This is coincident level 0!!
#Order of scintillators is irrelevent!
#####################################################
files = sys.argv[1:]
BIT0_4 = 31
BIT5 = 1 << 5
BIT7 = 1 << 7
# For DAQ status
BIT0 = 1 # 1 PPS interrupt pending
BIT1 = 1 << 1 # Trigger interrupt pending
BIT2 = 1 << 2 # GPS data possible corrupted
BIT3 = 1 << 3 # Current or last 1PPS rate not within range
def time_to_seconds(time, correction):
'''
Convert hhmmss,xxx string int seconds since day start
'''
# print time,correction
tfields = time.split(".")
t = tfields[0]
secs_since_day_start = int(t[0:2])*3600+int(t[2:4])*60+int(t[4:6])
evt_time = secs_since_day_start + int(tfields[1])/1000.0+int(correction)/1000.0
return round(evt_time)
#filename = sys.argv[1]
f = open(sys.argv[1])
for filename in files:
ch0 = float(sys.argv[2]) #0 or 1
ch1 = float(sys.argv[3]) #0 or 1
ch2 = float(sys.argv[4]) #0 or 1
ch3 = float(sys.argv[5]) #0 or 1
dpch0 = float(sys.argv[6]) #0 or 1
dpch1 = float(sys.argv[7]) #0 or 1
dpch2 = float(sys.argv[8]) #0 or 1
dpch3 = float(sys.argv[9]) #0 or 1
muon = {0:False,1:False,2:False,"Time":0.}
freq = 25e6 # 25 MHz
last_onepps = 0
last_muon = 0
wait_fe0 = False
time_ch0 = 0.
wait_fe1 = False
time_ch1 = 0.
wait_fe2 = False
time_ch2 = 0.
wait_fe3 = False
time_ch3 = 0.
muon_start = 0.
nmuons = 0
last_pulse = 0.
decay_start_time_ch0 = 0.
decay_waiting_ch0 = False
decay_start_time_ch1 = 0.
decay_waiting_ch1 = False
decay_start_time_ch2 = 0.
decay_waiting_ch2 = False
decay_start_time_ch3 = 0.
decay_waiting_ch3 = False
last_seconds = 0.
last_time = 0.
last_triggercount = 0
switched_onepps = False
last_onepps = 0
onepps_count = 0
counter = 0.0
if filename.endswith('.gz'):
f = gzip.open(filename)
else:
f = open(sys.argv[1])
for line in f:
fields = line.rstrip("\n").split(" ")
#print "fields: ",fields
#print "line: ",line
# Ignore malformed lines
if len(fields) != 16:
continue
#Ignore everything that is not trigger data
if len(fields[0]) != 8:
continue
try:
int(fields[len(fields)-1])
except ValueError:
continue
trigger_count = int(fields[0],16)
onepps_count = int(fields[9],16)
re_0 = (fields[1] >= "00")
re_1 = (fields[3] >= "00")
re_2 = (fields[5] >= "00")
re_3 = (fields[7] >= "00")
fe_0 = (fields[2] >= "00")
fe_1 = (fields[4] >= "00")
fe_2 = (fields[6] >= "00")
fe_3 = (fields[8] >= "00")
#check the single pulse for channel
if (ch0==1 and fields[1] == "00"):
continue
if (ch1==1 and fields[3] == "00"):
continue
if (ch2==1 and fields[5] == "00"):
continue
if (ch3==1 and fields[7] == "00"):
continue
if (ch0==1 and ch1==1 and (fields[1] == "00" or fields[3] == "00")):
continue
if (ch0==1 and ch2==1 and (fields[1] == "00" or fields[5] == "00")):
continue
if (ch0==1 and ch3==1 and (fields[1] == "00" or fields[7] == "00")):
continue
if (ch1==1 and ch2==1 and (fields[3] == "00" or fields[5] == "00")):
continue
if (ch1==1 and ch3==1 and (fields[3] == "00" or fields[7] == "00")):
continue
if (ch2==1 and ch3==1 and (fields[5] == "00" or fields[7] == "00")):
continue
if (ch0==1 and ch1==1 and ch2==1 and (fields[1] == "00" and fields[3] == "00" or fields[5] == "00")):
continue
if (ch1==1 and ch2==1 and ch3==1 and (fields[3] == "00" and fields[5] == "00" or fields[7] == "00")):
continue
#check for double pulse
if fields[1] >= "80":
for line in f:
fields_next = line.rstrip("\n").split(" ")
#define the different options
if 0==0:
#current analysis for one event. Note, that one event can be described by more than one line!
if last_onepps != onepps_count:
if onepps_count > last_onepps:
freq = float(onepps_count - last_onepps)
else:
freq = float(0xFFFFFFFF + onepps_count - last_onepps)
prevlast_onepps = last_onepps
last_onepps = onepps_count
switched_onepps = True
time = fields[10]
correction = fields[15]
seconds0 = time_to_seconds(time,correction)+(trigger_count - onepps_count)/freq
seconds1 = time_to_seconds(time,correction)+(trigger_count - onepps_count)/freq
seconds2 = time_to_seconds(time,correction)+(trigger_count - onepps_count)/freq
seconds3 = time_to_seconds(time,correction)+(trigger_count - onepps_count)/freq
if time == last_time and switched_onepps:
print "Correcting delayed onepps switch:",seconds0,line
seconds0 = time_to_seconds(time,correction)+(trigger_count - prevlast_onepps)/freq
seconds1 = time_to_seconds(time,correction)+(trigger_count - prevlast_onepps)/freq
seconds2 = time_to_seconds(time,correction)+(trigger_count - prevlast_onepps)/freq
seconds3 = time_to_seconds(time,correction)+(trigger_count - prevlast_onepps)/freq
else:
last_time = time
switched_onepps = False
if trigger_count < last_triggercount and not switched_onepps:
print "Correcting trigger count rollover:",seconds0,line
seconds0 += int(0xFFFFFFFF)/freq
seconds1 += int(0xFFFFFFFF)/freq
seconds2 += int(0xFFFFFFFF)/freq
seconds3 += int(0xFFFFFFFF)/freq
else:
last_triggercount = trigger_count
#if last_seconds > seconds0:
# print "Wrong event order",seconds0,line
# continue
last_seconds = seconds0
print "seconds0:",seconds0
print "decay_start_time_ch0: ",decay_start_time_ch0
print "decay_start_time_ch1: ",decay_start_time_ch1
print "decay_start_time_ch2: ",decay_start_time_ch2
print "decay_start_time_ch3: ",decay_start_time_ch3
print "difference: ",seconds0-decay_start_time_ch0
pulse_ch0 = False
pulse_ch1 = False
pulse_ch2 = False
pulse_ch3 = False
#single channel++++++++++++++++++++++++++++
if dpch0==1 and (seconds0 !=decay_start_time_ch1 ) and (seconds0 != decay_start_time_ch2) and (seconds0 != decay_start_time_ch3) :
print "dpch0"
print "Decay ch0 %10.8f microseconds"%(1e6*(seconds0 - decay_start_time_ch0),)
if decay_waiting_ch0:
if re_0:
wait_fe0 = True
time_ch0 = seconds0
if time_ch0 - seconds0 > 50e-9:
wait_f0 = False
decay_waiting_ch0 = False
if fe_0 and wait_fe0:
print "Pulse ch0",seconds0,line
pulse_ch0 = True
wait_fe0 = False
if decay_waiting_ch0 and seconds0 - decay_start_time_ch0 > 20:
print "No decay",seconds0,decay_start_time_ch0, seconds0 - decay_start_time_ch0
decay_waiting_ch0 = False
else:
decay_waiting_ch0 = False
print "Decay ch0 %10.8f ch0microseconds"%((seconds0 - decay_start_time_ch0),)
else:
decay_waiting_ch0 = False
else:
print "no decay in channel 0"
if dpch1==1 and (seconds1 != decay_start_time_ch0) and (seconds1 != decay_start_time_ch2) and (seconds1 != decay_start_time_ch3) :
if decay_waiting_ch1:
if re_1:
wait_fe1 = True
time_ch1 = seconds1
if time_ch1 - seconds1 > 50e-9:
wait_f1 = False
if fe_1 and wait_fe1:
print "Pulse ch1",seconds1,line
pulse_ch1 = True
wait_fe1 = False
if decay_waiting_ch1 and seconds1 - decay_start_time_ch1 > 20:
print "No decay",seconds1,decay_start_time_ch1,seconds1 - decay_start_time_ch1
decay_waiting_ch1 = False
else:
decay_waiting_ch1 = False
print "Decay ch1 %10.8f ch1microseconds"%((seconds1 - decay_start_time_ch1),)
else:
decay_waiting_ch1 = False
else:
print "no decay in channel 1"
if dpch2==1 and (seconds2 != decay_start_time_ch0) and (seconds2 != decay_start_time_ch1) and (seconds2 != decay_start_time_ch3):
if decay_waiting_ch2:
if re_2:
wait_fe2 = True
time_ch2 = seconds2
if time_ch2 - seconds2 > 50e-9:
wait_f2 = False
decay_waiting_ch2 = False
if fe_2 and wait_fe2:
print "Pulse ch2",seconds2,line
pulse_ch2 = True
wait_fe2 = False
if decay_waiting_ch2 and seconds2 - decay_start_time_ch2 > 20:
print "No decay",seconds2,decay_start_time_ch2, seconds2 - decay_start_time_ch2
decay_waiting_ch2 = False
else:
decay_waiting_ch2 = False
print "Decay ch2 %10.8f ch2microseconds"%((seconds2 - decay_start_time_ch2),)
else:
decay_waiting_ch2 = False
else:
print "no decay in channel 2"
if dpch3==1 and (seconds3 != decay_start_time_ch0) and (seconds3 != decay_start_time_ch1) and (seconds3 != decay_start_time_ch2) :
if decay_waiting_ch3:
if re_3:
wait_fe3 = True
seconds3 = time_to_seconds(time,correction)+(trigger_count - onepps_count)/freq
time_ch3 = seconds3
if time_ch3 - seconds3 > 50e-9:
wait_f3 = False
if fe_3 and wait_fe3:
print "Pulse ch3",seconds3,line
pulse_ch3 = True
wait_fe3 = False
if decay_waiting_ch3 and seconds3 - decay_start_time_ch3 > 20:
print "No decay",seconds3,decay_start_time_ch3,seconds3 - decay_start_time_ch3
decay_waiting_ch3 = False
else:
decay_waiting_ch3 = False
print "Decay ch3 %10.8f ch3microseconds"%((seconds3 - decay_start_time_ch3),)
else:
decay_waiting_ch3 = False
else:
print "no decay in channel 3"
if re_0 and dpch0==1:
wait_fe0 = True
time_ch0 = seconds0
if time_ch0 - seconds0 > 50e-9:
wait_f0 = False
if fe_0 and wait_fe0:
print "Pulse ch0",seconds0,line
decay_start_time_ch0 = 0
decay_start_time_ch0 = seconds0
decay_waiting_ch0 = True
pulse_ch0 = True
wait_fe0 = False
else:
decay_waiting_ch0 = False
if re_1 and dpch1==1:
wait_fe1 = True
time_ch1 = seconds1
if time_ch1 - seconds1 > 50e-9:
wait_f1 = False
if fe_1 and wait_fe1:
#print "Pulse ch1",seconds1,line
decay_start_time_ch1 = 0
decay_start_time_ch1 = seconds1
decay_waiting_ch1 = True
pulse_ch1 = True
wait_fe1 = False
else:
decay_waiting_ch1 = False
if re_2 and dpch2==1:
wait_fe2 = True
time_ch2 = seconds2
if time_ch2 - seconds2 > 50e-9:
wait_f2 = False
if fe_2 and wait_fe2:
print "Pulse ch2",seconds2,line
decay_start_time_ch2 = seconds2
decay_waiting_ch2 = True
pulse_ch2 = True
wait_fe2 = False
else:
decay_waiting_ch2 = False
if re_3 and dpch3==1:
wait_fe3 = True
time_ch3 = seconds3
if time_ch3 - seconds3 > 50e-9:
wait_f3 = False
if fe_3 and wait_fe3:
print "Pulse ch2",seconds3,line
decay_start_time_ch3 = seconds3
decay_waiting_ch3 = True
pulse_ch3 = True
wait_fe3 = False
else:
decay_waiting_ch3 = False
#seconds0=decay_start_time_ch0
#seconds1=decay_start_time_ch1
#seconds2=decay_start_time_ch2
#seconds3=decay_start_time_ch3
if fields_next[1]>= "80":
previous_fe_0 = fe_0
previous_re_1 = re_1
previous_fe_1 = fe_1
previous_re_2 = re_2
previous_fe_2 = fe_2
previous_re_3 = re_3
previous_fe_3 = fe_3
break
| 2.53125 | 3 |
tools/bzldoc/bzldoc.bzl | george-enf/enkit | 0 | 12797117 | <filename>tools/bzldoc/bzldoc.bzl<gh_stars>0
load("//tools/codegen:codegen.bzl", "codegen")
load("//tools/mdfmt:mdfmt.bzl", "mdfmt_filter")
def _bzl2yaml_impl(ctx):
args = ctx.actions.args()
for f in ctx.files.src:
args.add("--input", f)
args.add("--short_path", f.short_path)
args.add("--output", ctx.outputs.out.path)
ctx.actions.run(
inputs = ctx.files.src,
outputs = [ctx.outputs.out],
executable = ctx.executable.bzl2yaml_tool,
arguments = [args],
)
bzl2yaml = rule(
doc = """
Runs bzl2yaml to parse a bzl file into data.
""",
implementation = _bzl2yaml_impl,
output_to_genfiles = True, # so that header files can be found.
attrs = {
"src": attr.label(
allow_files = [".bzl"],
doc = "BZL file to parse.",
),
"out": attr.output(
mandatory = True,
doc = "YAML file to generate.",
),
"bzl2yaml_tool": attr.label(
executable = True,
cfg = "exec",
allow_files = True,
default = Label("//tools/bzldoc:bzl2yaml"),
doc = "The path to the bzl2yaml tool itself.",
),
},
)
def bzldoc(name, src):
"""Convert a BZL file into documentation."""
bzl2yaml(
name = "%s-bzl2yaml" % name,
src = src,
out = "%s.yaml" % name,
)
codegen(
name = "%s-md-unformatted-gen" % name,
outs = [name + ".md.unformatted"],
srcs = ["@enkit//tools/bzldoc:md.template"],
data = ["%s.yaml" % name],
visibility = ["//visibility:public"],
)
mdfmt_filter(
name = "%s-md-gen" % name,
out = name + ".md",
src = name + ".md.unformatted",
visibility = ["//visibility:public"],
)
| 2.078125 | 2 |
src/wordsim/nn/nn.py | recski/wordsim | 21 | 12797118 | from ConfigParser import ConfigParser
import logging
import os
import sys
from wordsim.models import get_models
from wordsim.nn.utils import evaluate
from wordsim.nn.data import create_datasets
from wordsim.nn.model import KerasModel
def main():
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s : " +
"%(module)s (%(lineno)s) - %(levelname)s - %(message)s")
conf = ConfigParser(os.environ)
conf.read(sys.argv[1])
vectorizers = get_models(conf)
training_data, dev_data, test_data = create_datasets(conf)
if conf.getboolean('main', 'train'):
epochs = conf.getint('training', 'epochs')
batch_size = conf.getint('training', 'batch_size')
training_data.vectorize(vectorizers)
input_size, input_dim = training_data.vectors.shape
model = KerasModel(conf, input_dim, 1) # output is a score
model.train(training_data, epochs, batch_size)
model.save()
test_data.vectorize(vectorizers)
evaluate(model, dev_data)
if __name__ == "__main__":
main()
| 2.5 | 2 |
appengine/components/components/auth/realms.py | amymariaparker2401/luci-py | 74 | 12797119 | # Copyright 2020 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Utilities to work with realms_pb2 messages."""
from .proto import realms_pb2
# Currently acceptable version of Realms API. See api_version in realms.proto.
API_VERSION = 1
def merge(permissions, realms, out=None):
"""Merges multiple realms_pb2.Realms into one, fills in `api_version`.
The given list of permissions will become authoritative: if some realm uses
a permission not in the list, it will be silently dropped from the bindings.
This can potentially happen due to the asynchronous nature of realms config
updates (e.g. a role change that deletes some permissions can be committed
into the AuthDB before realms_pb2.Realms are reevaluated). Eventually the
state should converge to be 100% consistent.
Args:
permissions: a sorted list of realms_pb2.Permission with all permissions.
realms: a dict {project ID -> realms_pb2.Realms with its realms} to merge.
out: a realms_pb2.Realms to write the result into (will not be cleared!).
Returns:
`out` or a new realms_pb2.Realms if out was None.
"""
out = out or realms_pb2.Realms()
out.api_version = API_VERSION
out.permissions.extend(permissions)
# Permission name => its index in the merged realms_pb2.Realms.
perm_index = {p.name: idx for idx, p in enumerate(permissions)}
# Visit in order of project IDs.
for proj_id, proj_realms in sorted(realms.items()):
# Calculate a mapping from the permission index in `proj_realms` to
# the index in the final merged proto (or None if undefined).
old_to_new = [perm_index.get(p.name) for p in proj_realms.permissions]
# Visit all bindings in all realms.
for old_realm in proj_realms.realms:
# Relabel permission indexes, drop empty bindings that may appear.
bindings = []
for b in old_realm.bindings:
perms = sorted(
old_to_new[idx]
for idx in b.permissions
if old_to_new[idx] is not None
)
if perms:
bindings.append((perms, b.principals))
# Add the relabeled realm to the output.
assert old_realm.name.startswith(proj_id+':'), old_realm.name
new_realm = out.realms.add()
new_realm.name = old_realm.name
new_realm.bindings.extend(
realms_pb2.Binding(permissions=perms, principals=principals)
for perms, principals in sorted(bindings, key=lambda x: x[0])
)
if old_realm.HasField('data'):
new_realm.data.CopyFrom(old_realm.data)
return out
| 1.875 | 2 |
tests/test_context_manager.py | LouisPi/PiPortableRecorder | 51 | 12797120 | <reponame>LouisPi/PiPortableRecorder
"""tests for Context and ContextManager objects"""
import os
import unittest
from threading import Event
from mock import patch, Mock
try:
from context_manager import ContextManager, Context, ContextError
except ImportError:
print("Absolute imports failed, trying relative imports")
os.sys.path.append(os.path.dirname(os.path.abspath('.')))
# Store original __import__
orig_import = __import__
def import_mock(name, *args, **kwargs):
if name in ['helpers'] and not kwargs:
#Have to filter for kwargs since there's a package in 'json'
#that calls __builtins__.__import__ with keyword arguments
#and we don't want to mock that call
return Mock()
return orig_import(name, *args, **kwargs)
with patch('__builtin__.__import__', side_effect=import_mock):
from context_manager import ContextManager, Context, ContextError
class TestContext(unittest.TestCase):
"""tests context class"""
def test_constructor(self):
"""Tests constructor"""
c = Context("test_context", lambda *a, **k: True)
self.assertIsNotNone(c)
def test_threading(self):
"""Tests whether threaded and non-threaded contexts behave as they should"""
c = Context("test_context", lambda *a, **k: True)
e = Event()
finished = Event()
c.signal_finished = finished.set
c.threaded = False # Need to set this flag, otherwise a validation check fails
c.activate()
assert(not e.isSet())
assert(not finished.isSet())
c.threaded = True
c.set_target(e.set)
c.activate()
finished.wait()
assert(e.isSet())
def test_targetless_threaded_context(self):
"""Tests whether a target-less threaded context fails to activate"""
c = Context("test_context", lambda *a, **k: True)
try:
c.activate()
except ContextError:
pass
else:
raise AssertionError
# After marking context as non-threaded, it should activate OK
c.threaded = False
try:
c.activate()
except:
raise AssertionError
else:
pass
class TestContextManager(unittest.TestCase):
"""tests context manager class and interaction between contexts"""
def test_constructor(self):
"""Tests constructor"""
cm = ContextManager()
self.assertIsNotNone(cm)
def test_initial_contexts(self):
"""Tests whether initial contexts are getting created"""
cm = ContextManager()
cm.init_io(Mock(), Mock()) #Implicitly creates initial contexts
for context_alias, context in cm.contexts.items():
assert(context_alias in cm.initial_contexts)
assert(context)
def test_basic_context_switching(self):
"""Tests whether basic context switching works"""
cm = ContextManager()
cm.initial_contexts = [cm.fallback_context, "test1", "test2"]
cm.init_io(Mock(), Mock())
assert(cm.current_context is None)
cm.switch_to_context(cm.fallback_context)
assert(cm.current_context == cm.fallback_context)
e1 = Event()
e2 = Event()
cm.register_context_target("test1", e1.wait)
cm.register_context_target("test2", e2.wait)
cm.switch_to_context("test1")
assert(cm.current_context == "test1")
assert(cm.get_previous_context("test1") == cm.fallback_context)
cm.switch_to_context("test2")
assert(cm.current_context == "test2")
assert(cm.get_previous_context("test2") == "test1")
cm.switch_to_context("test1")
assert(cm.current_context == "test1")
assert(cm.get_previous_context("test1") == "test2")
#Setting events so that threads exit
e1.set()
e2.set()
def test_context_switching_on_context_finish(self):
"""Tests whether basic context switching works"""
cm = ContextManager()
cm.init_io(Mock(), Mock())
cm.switch_to_context(cm.fallback_context)
e1 = Event()
c = cm.create_context("test1")
cm.register_context_target("test1", e1.wait)
cm.switch_to_context("test1")
assert(cm.current_context == "test1")
finished = Event()
def new_signal_finished():
c.event_cb(c.name, "finished")
finished.set()
with patch.object(c, 'signal_finished', side_effect=new_signal_finished) as p:
e1.set()
#Waiting for the thread to exit
finished.wait()
assert(cm.current_context == cm.fallback_context)
def test_targetless_context_switching(self):
"""Tests that switching to a target-less context fails"""
cm = ContextManager()
cm.init_io(Mock(), Mock())
cm.switch_to_context(cm.fallback_context)
cm.create_context("test1")
assert(cm.current_context == cm.fallback_context)
cm.switch_to_context("test1")
assert(cm.current_context == cm.fallback_context)
def test_failsafe_fallback_on_io_fail(self):
cm = ContextManager()
cm.fallback_context = "m"
cm.initial_contexts = ["m"]
cm.init_io(Mock(), Mock())
cm.switch_to_context(cm.fallback_context)
c1 = cm.create_context("t1")
c2 = cm.create_context("t2")
e1 = Event()
e2 = Event()
cm.register_context_target("t1", e1.wait)
cm.register_context_target("t2", e2.wait)
cm.switch_to_context("t1")
# Fucking things up - since context objects are app-accessible,
# we can't really rely on them staying the same
del c1.i
c1.signal_finished = lambda: True
del c2.i
# Both current and new contexts are fucked up
cm.switch_to_context("t2")
# Setting events so that threads exit
e1.set()
e2.set()
assert(cm.current_context == cm.fallback_context)
def test_failsafe_fallback_on_thread_fail(self):
cm = ContextManager()
cm.fallback_context = "m"
cm.initial_contexts = ["m"]
cm.init_io(Mock(), Mock())
cm.switch_to_context(cm.fallback_context)
c1 = cm.create_context("t1")
c2 = cm.create_context("t2")
e1 = Event()
e2 = Event()
cm.register_context_target("t1", e1.wait)
cm.register_context_target("t2", e2.wait)
cm.switch_to_context("t1")
# Removing
c1.set_target(None)
del c1.thread
c1.signal_finished = lambda: True
c2.set_target(None)
# Again, switcing to the fucked up context
cm.switch_to_context("t2")
# Setting events so that threads exit
e1.set()
e2.set()
assert(cm.current_context == cm.fallback_context)
if __name__ == '__main__':
unittest.main()
""" def test_left_key_exits(self):
r = Refresher(lambda: "Hello", get_mock_input(), get_mock_output(), name=r_name)
r.refresh = lambda *args, **kwargs: None
# This test doesn't actually test whether the Refresher exits
# It only tests whether the in_foreground attribute is set
# Any ideas? Maybe use some kind of "timeout" library?
def scenario():
r.keymap["KEY_LEFT"]()
assert not r.in_foreground
# If the test fails, either the assert will trigger a test failure,
# or the idle loop will just run indefinitely
# The exception thrown should protect from the latter
raise KeyboardInterrupt
with patch.object(r, 'idle_loop', side_effect=scenario) as p:
try:
r.activate()
except KeyboardInterrupt:
pass #Test succeeded
def test_shows_data_on_screen(self):
i = get_mock_input()
o = get_mock_output()
r = Refresher(lambda: "Hello", i, o, name=r_name)
def scenario():
r.refresh()
r.deactivate()
with patch.object(r, 'idle_loop', side_effect=scenario) as p:
r.activate()
#The scenario should only be called once
assert r.idle_loop.called
assert r.idle_loop.call_count == 1
assert o.display_data.called
assert o.display_data.call_count == 2 #One in to_foreground, and one in patched idle_loop
assert o.display_data.call_args_list[0][0] == ("Hello", )
assert o.display_data.call_args_list[1][0] == ("Hello", )
def test_pause_resume(self):
i = get_mock_input()
o = get_mock_output()
r = Refresher(lambda: "Hello", i, o, name=r_name, refresh_interval=0.1)
#refresh_interval is 0.1 so that _counter always stays 0
#and idle_loop always refreshes
#Doing what an activate() would do, but without a loop
r.to_foreground()
assert o.display_data.called
assert o.display_data.call_count == 1 #to_foreground calls refresh()
r.idle_loop()
assert o.display_data.call_count == 2 #not paused
r.pause()
r.idle_loop()
assert o.display_data.call_count == 2 #paused, so count shouldn't change
r.resume()
assert o.display_data.call_count == 3 #resume() refreshes the display
r.idle_loop()
assert o.display_data.call_count == 4 #should be refresh the display normally now
def test_keymap_restore_on_resume(self):
i = get_mock_input()
o = get_mock_output()
r = Refresher(lambda: "Hello", i, o, name=r_name, refresh_interval=0.1)
r.refresh = lambda *args, **kwargs: None
r.to_foreground()
assert i.set_keymap.called
assert i.set_keymap.call_count == 1
assert i.set_keymap.call_args[0][0] == r.keymap
assert "KEY_LEFT" in r.keymap
r.pause()
assert i.set_keymap.call_count == 1 #paused, so count shouldn't change
i.set_keymap(None)
assert i.set_keymap.call_args[0][0] != r.keymap
r.resume()
assert i.set_keymap.call_count == 3 #one explicitly done in the test right beforehand
assert i.set_keymap.call_args[0][0] == r.keymap
def test_set_interval(self):
i = get_mock_input()
o = get_mock_output()
r = Refresher(lambda: "Hello", i, o, name=r_name, refresh_interval=1)
assert(r.refresh_interval == 1)
assert(r.sleep_time == 0.1)
assert(r.iterations_before_refresh == 10)
# Refresh intervals up until 0.1 don't change the sleep time
r.set_refresh_interval(0.1)
assert(r.refresh_interval == 0.1)
assert(r.sleep_time == 0.1)
assert(r.iterations_before_refresh == 1)
# Refresh intervals less than 0.1 change sleep_time to match refresh interval
r.set_refresh_interval(0.01)
assert(r.refresh_interval == 0.01)
assert(r.sleep_time == 0.01)
assert(r.iterations_before_refresh == 1)
# Now setting refresh_interval to a high value
r.set_refresh_interval(10)
assert(r.refresh_interval == 10)
assert(r.sleep_time == 0.1) # Back to normal
assert(r.iterations_before_refresh == 100)
def test_update_keymap(self):
i = get_mock_input()
o = get_mock_output()
r = Refresher(lambda: "Hello", i, o, name=r_name, refresh_interval=0.1)
r.refresh = lambda *args, **kwargs: None
# We need to patch "process_callback" because otherwise the keymap callbacks
# are wrapped and we can't test equivalence
with patch.object(r, 'process_callback', side_effect=lambda keymap:keymap) as p:
keymap1 = {"KEY_LEFT": lambda:1}
r.update_keymap(keymap1)
assert(r.keymap == keymap1)
keymap2 = {"KEY_RIGHT": lambda:2}
r.update_keymap(keymap2)
keymap2.update(keymap1)
assert(r.keymap == keymap2)"""
| 2.546875 | 3 |
Script/TriangularMAtrix.py | AlessandroCaula/Programming_Alessandro_Caula | 0 | 12797121 | matrix=open("/media/alessandro/DATA/User/BIOINFORMATICA.BOLOGNA/Programming_for_Bioinformatics/Module2/Exercise/PAM250.txt","r")
matrix1=open("/media/alessandro/DATA/User/BIOINFORMATICA.BOLOGNA/Programming_for_Bioinformatics/Module2/Exercise/PAM250(1).txt","r")
rows="ARNDCQEGHILKMFPSTWYV"
cols="ARNDCQEGHILKMFPSTWYV"
def tr_matrx(matrix,rows,cols):
dict={}
scores=[]
for row in matrix:
score=[]
row.rstrip()
score=row.split()
scores.append(score)
#print(scores)
row=0
for r in scores:
col=0
for c in r:
k=rows[row]+cols[col]
dict[k]=float(scores[row][col])
col+=1
row+=1
return(dict)
print(tr_matrx(matrix,rows,cols))
def dic_matrix(file):#funzione che crea il dizionario a partire da matrice di sostituzione
aminoacid = file.readline().split()
ami = aminoacid[3] #indicizzato a tre perche' se guardi il file in quella pos della lista della prima riga ci sono gli amminoacidi
#print(ami)
couple = {}
for a in ami:
col = file.readline().split()#scorro le righe a partire dalla prima con un solo elemento print col
#print(col)
for i in range(len(col)):
couple[a + ami[i]] = float(col[i])#posso anche mettere int(col[i][:-1]) che mi consente di rimuovere il punto
#che altrimenti da errore se non lo tolgo mettendo int
file.close()
return couple
print(dic_matrix(matrix1))
| 3.375 | 3 |
friendFight.py | ctguggbond/ZhihuDTW | 11 | 12797122 | import requests
import hashlib
import time
import json
from pymongo import MongoClient
headers = {
'content-type': 'application/x-www-form-urlencoded',
}
userInfo = {
'player1':{
'uid': '玩家1号的uid',
'token': '玩家1号的token'
},
'player2':{
'uid': '玩家2号的uid',
'token': '玩家2号的token'
}
}
session = requests.session()
roomID = -1
#命中题库次数
successTime = 0
#时间戳生成
nowTime = lambda:int(round(time.time() * 1000))
#mongodb
conn = MongoClient('localhost',27017)
quizSet = conn.zhdtw.quizs
intoRoomUrl = 'https://question-zh.hortor.net/question/bat/intoRoom'
leaveRoomUrl = 'https://question-zh.hortor.net/question/bat/leaveRoom'
beginFightUrl = 'https://question-zh.hortor.net/question/bat/beginFight'
findQuizUrl = 'https://question-zh.hortor.net/question/bat/findQuiz'
chooseUrl = 'https://question-zh.hortor.net/question/bat/choose'
fightResultUrl = 'https://question-zh.hortor.net/question/bat/fightResult'
#生成签名
def genSign(params,player):
tempParams = params.copy()
tempParams['token'] = userInfo[player]['token']+userInfo[player]['uid']
tempParams = sorted(tempParams.items(), key=lambda e:e[0])
originStr = ''
for key, value in tempParams:
originStr = originStr + key + '=' + str(value)
m = hashlib.md5()
m.update(originStr.encode(encoding='utf-8'))
return m.hexdigest()
def intoRoom(player):
global roomID
params = {
'roomID' : roomID,
'uid' : userInfo[player]['uid'],
't' : nowTime()
}
params['sign'] = genSign(params,player)
resp = session.post(url=intoRoomUrl,data=params,headers=headers)
try:
jdata = json.loads(resp.text)
roomID = jdata.get('data')['roomId']
print(player + ' 进入房间成功...')
except:
print(resp.text)
print(player + ' 进入房间失败...')
leaveRoom(player)
def leaveRoom(player):
params = {
'roomID' : roomID,
'uid' : userInfo[player]['uid'],
't' : nowTime()
}
params['sign'] = genSign(params,player)
resp = session.post(url=leaveRoomUrl,data=params,headers=headers)
try:
jdata = json.loads(resp.text)
if jdata.get('errcode') == 0:
print(player + ' 退出房间成功...')
else:
print(jdata)
except:
print(resp.text)
print(player + ' 退出房间失败...')
def beginFight():
params = {
'roomID' : roomID,
'uid' : userInfo['player1']['uid'],
't' : nowTime()
}
params['sign'] = genSign(params,'player1')
resp = session.post(url=beginFightUrl,data=params,headers=headers)
try:
jdata = json.loads(resp.text)
if jdata.get('errcode') == 0:
print('开始好友对战...')
else:
print(jdata)
except:
print(resp.text)
def findQuiz(quizNum):
params = {
'roomID' : roomID,
'quizNum' : quizNum,
'uid' : userInfo['player1']['uid'],
't' : nowTime()
}
params['sign'] = genSign(params,'player1')
resp = session.post(url=findQuizUrl,data=params,headers=headers)
try:
jdata = json.loads(resp.text)
if jdata.get('errcode') == 0:
print('获取题目成功...')
return jdata.get('data')
else:
print(jdata)
except:
print(resp.text)
def choose(player,quizNum,option,cfTime,magic):
params = {
'roomID' : roomID,
'uid' : userInfo[player]['uid'],
't' : nowTime(),
'option' : option,
'quizNum': quizNum,
'cfTime': cfTime,
'ccTime' : nowTime(),
'magic' : magic
}
params['sign'] = genSign(params,player)
resp = session.post(url=chooseUrl,data=params,headers=headers)
try :
jdata = json.loads(resp.text)
if jdata.get('errcode') == 0:
print(player + ' 选择成功...')
return jdata.get('data')
else:
print(jdata)
except:
print(player + ' 选择失败...')
print(resp.text)
def fightResult(player):
params = {
'roomID' : roomID,
'type' : 0,
'uid' : userInfo[player]['uid'],
't' : nowTime()
}
params['sign'] = genSign(params,player)
resp = session.post(url=fightResultUrl,data=params,headers=headers)
try:
jdata = json.loads(resp.text)
if jdata.get('errcode') == 0:
print(player + ' 获取结果成功...')
return jdata.get('data')
else:
print(jdata)
except:
print(player + ' 获取结果失败...')
print(resp.text)
def genMagic(optionList):
optionList.sort()
originStr = optionList[0]+optionList[1]+optionList[2]+optionList[3]
m = hashlib.md5()
m.update(originStr.encode(encoding='utf-8'))
return m.hexdigest()
def startAnswer():
global successTime
for i in range(1,6):
#请求数据与接收到数据延时
cfTime = nowTime()
quizInfo = findQuiz(i)
cfTime = nowTime() - cfTime
time.sleep(0.1)
optionList = quizInfo['options']
quiz = quizInfo['quiz']
option = 1
#题库查找题目
#print(quiz)
localQuiz = quizSet.find_one({'quiz':quiz})
if localQuiz:
successTime += 1
for j in range(0,4):
if(optionList[j] == localQuiz['answer']):
option = j+1
break
magic = genMagic(optionList.copy())
chooseResult = choose('player1',i,option,cfTime,magic)
choose('player2',i,2,cfTime+10,magic)
if not localQuiz:
quizModel = {}
quizModel['quiz'] = quiz
quizModel['options'] = optionList
quizModel['school'] = quizInfo['school']
quizModel['type'] = quizInfo['type']
quizModel['typeID'] = quizInfo['typeID']
quizModel['contributor'] = quizInfo['contributor']
quizModel['answer'] = optionList[chooseResult['answer']-1]
quizSet.insert_one(quizModel)
#print(optionList[chooseResult['answer']-1])
if __name__ == '__main__':
#自行修改开房对战次数 i
i = 5
gameTime = 0
while(i > 0):
roomID = -1
intoRoom('player1')
intoRoom('player2')
beginFight()
startAnswer()
fightResult('player1')
fightResult('player2')
leaveRoom('player1')
leaveRoom('player2')
gameTime += 1
print('答题数 %d /命中题库次数 %d ' % (gameTime*5,successTime))
time.sleep(1)
i = i - 1
conn.close() | 2.484375 | 2 |
rp2/adc.py | fedor2018/my_upython | 0 | 12797123 | <gh_stars>0
from machine import ADC, Pin
from ntc import *
adc0 = ADC(Pin(26)) # create ADC object on ADC pin
adc1 = ADC(Pin(27)) # create ADC object on ADC pin
#adc=None, Vref=3.3, R=10000, Ro=10000.0, To=25.0, beta=3950.0, V=5 )
ntc0=NTC(adc=ADC(Pin(26)), R=3300, Ro=47000, beta=3740)
ntc1=NTC(adc=ADC(Pin(27)), R=3300, Ro=47000, beta=3740)
print ("{} V".format(ntc0.in_volt()))
print ("{} V".format(ntc1.in_volt()))
print("{} ohm".format(ntc0.r_UP()))
print("{} ohm".format(ntc1.r_UP()))
print("{} C".format(ntc0.to_temp(ntc0.r_UP())))
print("{} C".format(ntc1.to_temp(ntc1.r_UP())))
| 2.6875 | 3 |
BOJ_Solved/BOJ-21633.py | CodingLeeSeungHoon/Python_Algorithm_TeamNote | 7 | 12797124 | """
백준 21633번 : Bank Transfer
"""
k = int(input())
fee = 25 + k * 0.01
fee = 2000 if fee > 2000 else fee
fee = 100 if fee < 100 else fee
print(fee) | 3.421875 | 3 |
propertiesND.py | Kruti235/109- | 0 | 12797125 | import random
import plotly.express as px
import plotly.figure_factory as ff
import statistics
dice_result = []
for i in range(0,1000):
dice1 = random.randint(1,6)
dice2 = random.randint(1,6)
dice_result.append(dice1+dice2)
mean = sum(dice_result)/len(dice_result)
print("mean of this data is {} ".format(mean))
median = statistics.median(dice_result)
print("median of this data is {} ".format(median))
mode= statistics.mode(dice_result)
print("mode of this data is {} ".format(mode))
std_deviation = statistics.stdev(dice_result)
print("stdev : {}".format(std_deviation) )
fig = ff.create_distplot([dice_result],["Result"], show_hist= False)
fig.show()
fig.show() | 3.28125 | 3 |
siiptool/scripts/subregion_sign.py | intel/iotg_fbu | 0 | 12797126 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020, Intel Corporation. All rights reserved.
# SPDX-License-Identifier: BSD-2-Clause
#
"""A signing utility for creating and signing a BIOS sub-region for UEFI
"""
from __future__ import print_function
import os
import sys
import subprocess
import argparse
import uuid
import struct
import re
from pathlib import Path
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from common.siip_constants import VERSION as __version__
from common.banner import banner
import common.utilities as utils
import common.logger as logging
LOGGER = logging.getLogger("subregion_sign")
__prog__ = "subregion_sign"
TOOLNAME = "Sub-Region Signing Tool"
if sys.version_info < (3, 6):
raise Exception("Python 3.6 is the minimal version required")
class UefiSubregAuthenClass:
""" Class define EFI subreation Authentication class """
# typedef struct {
# char Name[16 bytes]; // Name of the sub-region
# EFI_GUID VendorGuid; // Vendor GUID
# SUB_REGION_VERIFICATION CertParam; // Sub-Region Certificate Parameters
# } EFI_SUB_REGION_AUTHENTICATION;
# typedef struct {
# SUB_REGION_HEADER Hdr; // Certificate Header
# UINT8 CertData[1]; // Calculated Signature
# } SUB_REGION_VERIFICATION;
# typedef struct {
# UINT32 Revision; // Revision of Signature Structure
# UINT32 Length; // Length of the Signature + Header
# EFI_GUID CertType; // Signature type
# } SUB_REGION_HEADER;
# typedef struct {
# UINT8 PublicKey[384]; // Public Key pair of the Signing Key
# UINT8 Signature[384]; // SHA384-RSA3K Signature
# } EFI_CERT_BLOCK_RSA3072_SHA384;
_StructAuthInfoFormat = "<16s16sLL16s"
_StructAuthInfoSize = struct.calcsize(_StructAuthInfoFormat)
_StructSubRegionHdrFormat = "<LL16s"
_StructSubRegionHdrSize = struct.calcsize(_StructSubRegionHdrFormat)
def __init__(self, cert_info):
""" initialization of the variables for structure """
self._valid = False
self.w_name = cert_info["name"]
self.vendor_guid = cert_info["vendor_guid"]
self.w_revision = cert_info["revision"]
self.dw_length = self._StructAuthInfoSize
self.cert_type = cert_info["cert_type"]
self.cert_data = bytes()
self.payload = bytes()
def encode(self):
""" builds structure for subregion authentication header """
self.dw_length = self._StructSubRegionHdrSize + len(self.cert_data)
uefi_subreg_authen_hdr = struct.pack(
self._StructAuthInfoFormat,
self.w_name,
self.vendor_guid.bytes_le,
self.w_revision,
self.dw_length,
self.cert_type.bytes_le,
)
self._valid = True
return uefi_subreg_authen_hdr + self.cert_data + self.payload
def dump_info(self):
""" dump the information of subregion authentication structure """
if not self._valid:
raise ValueError
print(
"EFI_SUBREGION_AUTHENTICATION.AuthInfo.Hdr.dw_length = {dw_length:08X}".format(
dw_length=self.dw_length
)
)
print(
"EFI_SUBREGION_AUTHENTICATION.AuthInfo.Hdr.w_revision = {w_revision:04X}".format(
w_revision=self.w_revision
)
)
print(
"EFI_SUBREGION_AUTHENTICATION.AuthInfo.Hdr.wCertificateType = {Vendor_guid}".format(
Vendor_guid=str(self.vendor_guid).upper()
)
)
print(
"EFI_SUBREGION_AUTHENTICATION.AuthInfo.cert_type = {cert_type}".format(
cert_type=str(self.cert_type).upper()
)
)
print(
"sizeof (EFI_SUBREGION_AUTHENTICATION.AuthInfo.cert_data) = {Size:08X}".format(
Size=len(self.cert_data)
)
)
print(
"sizeof (payload) = {Size:08X}".format(
Size=len(self.payload)
)
)
def get_certifcation_info(cl_inputs, signer):
""" returns the certifcate type passed on subregion """
# different signature type supported by tool
CERT_TYPE = {
"pkcs7": [
"4aafd29d-68df-49ee-8aa9-347d375665a7",
"smime -sign -binary -outform DER -md sha256 -nodetach -signer",
None,
],
"rsa": [
"2ee9976f-9d4c-4442-a997-8cad1c875fa1",
"dgst -binary -keyform PEM -sha384 -sign",
"rsa -pubout -modulus -noout",
],
}
# Check if openssl is installed
path = utils.check_for_tool("openssl", "version", cl_inputs.tool_path)
# Get signing type information
cert_info = CERT_TYPE.get(cl_inputs.signer_type)
# Create openSSL command 1
cmd = f'{path} {cert_info[1]} "{signer}"'
# Create openSSL command 2
if cert_info[2] is not None:
cmd2 = f"{path} {cert_info[2]}"
else:
cmd2 = cert_info[2]
certification_info = {
"revision": 0x01,
"name": cl_inputs.name.encode("utf-8"),
"vendor_guid": uuid.UUID(cl_inputs.vendor_guid),
"cert_type": uuid.UUID(cert_info[0]),
"openssl_cmd": cmd,
"openssl_cmd2": cmd2,
}
return certification_info
def build_subreg_signed_file(cert_struct, outfile):
""" build output file """
try:
with open(outfile, mode="wb") as signed_file:
signed_file.write(cert_struct)
except ValueError:
LOGGER.critical("\nCannot write payload file: %s", outfile)
sys.exit(2)
def read_file(inputfile):
""" read input file to bytes """
try:
with open(inputfile, mode="rb") as file:
sign_file = file.read()
except ValueError:
LOGGER.critical("\nCannot read payload file: %s", inputfile)
sys.exit(2)
return sign_file
def generate_signature(openssl_cmd, payload):
""" signed input file """
# Run OpenSSL command with the specified private key and capture signature from STDOUT
try:
ssl_process = subprocess.run(
openssl_cmd,
input=payload,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
check=True,
)
signature = ssl_process.stdout
except:
LOGGER.warning("\nCannot run openssl.")
sys.exit(1)
if ssl_process.returncode != 0:
LOGGER.critical("\nopenssl failed.")
sys.exit(1)
return signature
def create_arg_parser():
""" Parsing and validating input arguments."""
def convert_arg_line_to_args(arg_line):
for arg in arg_line.split():
if not arg.strip():
continue
yield arg
my_parser = argparse.ArgumentParser(
prog=__prog__,
description=__doc__,
conflict_handler="resolve",
fromfile_prefix_chars="@",
)
my_parser.convert_arg_line_to_args = convert_arg_line_to_args
my_parser.add_argument(
"subregion_file", help="sub region data that needs to be signed."
)
my_parser.add_argument(
"-o",
"--output",
dest="signed_file",
help="Output capsule filename.",
metavar="Filename",
default="SIGNED_OUT.bin",
)
my_parser.add_argument(
"-n",
"--name",
help="The name of the subregion being signed. Max size is 16 bytes The name is stored in signed file.",
type=chk_string_size,
metavar="subregion",
required=True,
)
my_parser.add_argument(
"-vg",
"--vendor-guid",
help="Vender GUID is one specific value given by the vendor for the sub-region being signed.\
This is required. The format is '00000000-0000-0000-0000-000000000000'",
type=chk_guid_format,
metavar="v_guid",
required=True,
)
my_parser.add_argument(
"-t",
"--signer_type",
metavar="sign_type",
required=True,
help="Type of Signing pkcs7 or rsa.",
choices=["pkcs7", "rsa"],
)
my_parser.add_argument(
"-s",
"--signer",
dest="signerfile",
required=True,
help="OpenSSL signer private certificate filename.",
)
my_parser.add_argument(
"--toolpath",
dest="tool_path",
help="Path to signtool or OpenSSL tool. "
" Optional if path to tools are already in PATH.",
default=None,
)
my_parser.add_argument(
"--show",
help="Shows information about the subregion_authentication structure "
" Optional but requires all information in order to process.",
action="store_true",
)
my_parser.add_argument(
"-v",
"--version",
help="Shows the current version of the BIOS Stitching Tool",
action="version",
version="%(prog)s {version}".format(version=__version__),
)
return my_parser
def chk_string_size(string):
""""Check the size of the string"""
max_size = 16
size = len(string.encode("utf-8"))
msg = "The size of {} is {}. The {} size must not be greter than {}".format(
string, size, string, max_size
)
if size > max_size:
raise argparse.ArgumentTypeError(str(msg))
return string
def chk_guid_format(guid):
""" check for correct formate of GUID """
# format for guid xxxxxxxx-xxxx-xxxx-xxx-xxxxxxxxxxxx where x can be A-F or 0-9
guidFormat = re.compile(
r"([a-f\d]{8}[-][a-f\d]{4}[-][a-f\d]{4}[-][a-f\d]{4}[-]{1}[a-f\d]{12}$)", re.I
)
if guidFormat.match(guid) is None:
raise argparse.ArgumentTypeError(
"File guild value is not in correct format \
(xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx where x can be A-F or 0-9)\
{}".format(guid)
)
return guid
def sign_subregion(subregion_file, signer_file, signed_file, signer_type, subregion_name, vendor_guid, show = False, tool_path = None):
# Use absolute path for openSSL
sbrgn_file = Path(subregion_file).resolve()
signer_file = Path(signer_file).resolve()
outfile = Path(signed_file).resolve()
filenames = [str(sbrgn_file), str(signer_file)]
# Verify file input file exist
status = utils.file_exist(filenames, LOGGER)
if status != 0:
sys.exit(status)
if os.path.getsize(sbrgn_file) == 0:
LOGGER.critical("size of {} subregion file must be greater than 0!".format(sbrgn_file))
sys.exit(status)
status = utils.check_key(signer_file, signer_type, LOGGER)
if status != 0:
sys.exit(status)
outfile = utils.file_not_exist(outfile, LOGGER)
parser = argparse.ArgumentParser()
parser.add_argument("name, vendor_guid, tool_path, signer_type")
cl_inputs = parser.parse_args(['name={}'.format(subregion_name)])
cl_inputs.name = subregion_name
cl_inputs.vendor_guid = vendor_guid
cl_inputs.tool_path = tool_path
cl_inputs.signer_type = signer_type
cert_info = get_certifcation_info(cl_inputs, signer_file)
uefi_subreg_authen = UefiSubregAuthenClass(cert_info)
# read input file to store into structure
payload = read_file(sbrgn_file)
uefi_subreg_authen.payload = payload
# add Vendor Guid to Payload
payload = uefi_subreg_authen.vendor_guid.bytes_le + payload
# calculate the signature store in structure
cert_data = generate_signature(cert_info["openssl_cmd"], payload)
if cert_info["openssl_cmd2"]:
# Read in the private key
payload = read_file(signer_file)
# Extract the public key modulus from private key
cert_pub = generate_signature(cert_info["openssl_cmd2"], payload)
# convert public key from bytes to string
cert_pub_string = cert_pub.decode("utf-8")
# remove word Moudlus= from the file
cert_pubkey = cert_pub_string.replace("Modulus=", "")
# remove end of line from public key
cert_pubkey = cert_pubkey.rstrip()
# Conert to hex bytes and add to signature
cert_pubkey = bytes.fromhex(cert_pubkey)
# public key and signature are packed back to back
cert_data = cert_pubkey + cert_data
uefi_subreg_authen.cert_data = cert_data
# pack structure with signature and get update size of header
uefi_signed_data = uefi_subreg_authen.encode()
if show:
uefi_subreg_authen.dump_info()
# Create output EFI subregion authentication header and signature and original file
build_subreg_signed_file(uefi_signed_data, str(outfile))
print(
"Signed {} sub-region({}) was successfully generated.".format(
subregion_name, outfile
)
)
def main():
"""Entry to script."""
parser = create_arg_parser()
args = parser.parse_args()
sign_subregion(args.subregion_file, args.signerfile, args.signed_file,
args.signer_type, args.name, args.vendor_guid, args.show, args.tool_path)
if __name__ == "__main__":
banner(TOOLNAME, __version__)
main()
| 2.15625 | 2 |
vvcatalog/templatetags/vvcatalog_tags.py | synw/django-vvcatalog | 3 | 12797127 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from django import template
from django.conf import settings
from vvcatalog.conf import CURRENCY, PAGINATION
register = template.Library()
@register.simple_tag
def get_currency():
return CURRENCY
@register.simple_tag
def get_pagination():
return PAGINATION
| 1.398438 | 1 |
swagger_server/models/data_utility.py | DITAS-Project/data-utility-evaluator | 0 | 12797128 | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server import util
class DataUtility(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, url: str=None, accuracy: float=None, consistency: float=None, completeness: float=None, timeliness: float=None): # noqa: E501
"""DataUtility - a model defined in Swagger
:param url: The url of this DataUtility. # noqa: E501
:type url: str
:param accuracy: The accuracy of this DataUtility. # noqa: E501
:type accuracy: float
:param consistency: The consistency of this DataUtility. # noqa: E501
:type consistency: float
:param completeness: The completeness of this DataUtility. # noqa: E501
:type completeness: float
:param timeliness: The timeliness of this DataUtility. # noqa: E501
:type timeliness: float
"""
self.swagger_types = {
'url': str,
'accuracy': float,
'consistency': float,
'completeness': float,
'timeliness': float
}
self.attribute_map = {
'url': 'URL',
'accuracy': 'accuracy',
'consistency': 'consistency',
'completeness': 'completeness',
'timeliness': 'timeliness'
}
self._url = url
self._accuracy = accuracy
self._consistency = consistency
self._completeness = completeness
self._timeliness = timeliness
@classmethod
def from_dict(cls, dikt) -> 'DataUtility':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The DataUtility of this DataUtility. # noqa: E501
:rtype: DataUtility
"""
return util.deserialize_model(dikt, cls)
@property
def url(self) -> str:
"""Gets the url of this DataUtility.
:return: The url of this DataUtility.
:rtype: str
"""
return self._url
@url.setter
def url(self, url: str):
"""Sets the url of this DataUtility.
:param url: The url of this DataUtility.
:type url: str
"""
if url is None:
raise ValueError("Invalid value for `url`, must not be `None`") # noqa: E501
self._url = url
@property
def accuracy(self) -> float:
"""Gets the accuracy of this DataUtility.
:return: The accuracy of this DataUtility.
:rtype: float
"""
return self._accuracy
@accuracy.setter
def accuracy(self, accuracy: float):
"""Sets the accuracy of this DataUtility.
:param accuracy: The accuracy of this DataUtility.
:type accuracy: float
"""
if accuracy is None:
raise ValueError("Invalid value for `accuracy`, must not be `None`") # noqa: E501
self._accuracy = accuracy
@property
def consistency(self) -> float:
"""Gets the consistency of this DataUtility.
:return: The consistency of this DataUtility.
:rtype: float
"""
return self._consistency
@consistency.setter
def consistency(self, consistency: float):
"""Sets the consistency of this DataUtility.
:param consistency: The consistency of this DataUtility.
:type consistency: float
"""
if consistency is None:
raise ValueError("Invalid value for `consistency`, must not be `None`") # noqa: E501
self._consistency = consistency
@property
def completeness(self) -> float:
"""Gets the completeness of this DataUtility.
:return: The completeness of this DataUtility.
:rtype: float
"""
return self._completeness
@completeness.setter
def completeness(self, completeness: float):
"""Sets the completeness of this DataUtility.
:param completeness: The completeness of this DataUtility.
:type completeness: float
"""
if completeness is None:
raise ValueError("Invalid value for `completeness`, must not be `None`") # noqa: E501
self._completeness = completeness
@property
def timeliness(self) -> float:
"""Gets the timeliness of this DataUtility.
:return: The timeliness of this DataUtility.
:rtype: float
"""
return self._timeliness
@timeliness.setter
def timeliness(self, timeliness: float):
"""Sets the timeliness of this DataUtility.
:param timeliness: The timeliness of this DataUtility.
:type timeliness: float
"""
if timeliness is None:
raise ValueError("Invalid value for `timeliness`, must not be `None`") # noqa: E501
self._timeliness = timeliness
| 2.4375 | 2 |
py_tools/random.py | HAL-42/AlchemyCat | 8 | 12797129 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: <NAME>
@contact: <EMAIL>
@software: PyCharm
@file: random.py
@time: 2020/1/15 2:29
@desc:
"""
import numpy as np
import torch
import random
from typing import Union
__all__ = ['set_numpy_rand_seed', 'set_py_rand_seed', 'set_torch_rand_seed', 'set_rand_seed',
'set_rand_seed_according_torch']
def set_numpy_rand_seed(seed: Union[int, str]):
"""Set rand seed for numpy
Args:
seed (Union[int, str]): int seed or str, which will be hashed to get int seed
"""
if isinstance(seed, str):
seed = hash(seed)
elif not isinstance(int(seed), int):
raise ValueError(f"seed={seed} should be str or int")
seed = seed % (2**32)
np.random.seed(int(seed))
def set_torch_rand_seed(seed: Union[int, str]):
"""Set rand seed for torch on both cpu, cuda and cudnn
Args:
seed (Union[int, str]): int seed or str, which will be hashed to get int seed
"""
if isinstance(seed, str):
seed = hash(seed)
elif not isinstance(int(seed), int):
raise ValueError(f"seed={seed} should be str or int")
torch.manual_seed(int(seed))
if torch.cuda.is_available():
torch.cuda.manual_seed_all(int(seed))
torch.backends.cudnn.deterministic = True
def set_py_rand_seed(seed: Union[int, str]):
"""Set rand seed for python
Args:
seed (Union[int, str]): int seed or str, which will be hashed to get int seed
"""
if isinstance(seed, str):
seed = hash(seed)
elif not isinstance(int(seed), int):
raise ValueError(f"seed={seed} should be str or int")
random.seed(int(seed))
def set_rand_seed(seed: Union[int, str]):
"""Set rand seed for numpy, torch(cpu, cuda, cudnn), python
Args:
seed (Union[int, str]): int seed or str, which will be hashed to get int seed
"""
set_numpy_rand_seed(seed)
set_py_rand_seed(seed)
set_torch_rand_seed(seed)
def set_rand_seed_according_torch():
"""Set rand seed according to torch process's rand seed
The rand seed of non-torch libs may duplicate in several dataloader worker processes.
Use this function as dataloader's worker init function can solve this problem.
"""
seed = torch.initial_seed()
set_py_rand_seed(seed)
set_numpy_rand_seed(seed) | 2.71875 | 3 |
downloader.py | rocke97/SeedboxDownloader | 1 | 12797130 | import os
import threading
import logging
from ftplib import FTP_TLS
import socket
import time
def setInterval(interval, times = -1):
# This will be the actual decorator,
# with fixed interval and times parameter
def outer_wrap(function):
# This will be the function to be
# called
def wrap(*args, **kwargs):
stop = threading.Event()
# This is another function to be executed
# in a different thread to simulate setInterval
def inner_wrap():
i = 0
while i != times and not stop.isSet():
stop.wait(interval)
function(*args, **kwargs)
i += 1
t = threading.Timer(0, inner_wrap)
t.daemon = True
t.start()
return stop
return wrap
return outer_wrap
class PyFTPclient:
def __init__(self, host, port, login, passwd, monitor_interval = 30, directory = None):
self.host = host
self.port = port
self.login = login
self.passwd = <PASSWORD>
self.directory = directory
self.monitor_interval = monitor_interval
self.ptr = None
self.max_attempts = 15
self.waiting = True
def DownloadFile(self, dst_filename, local_filename = None):
res = ''
if local_filename is None:
local_filename = dst_filename
with open(local_filename, 'w+b') as f:
self.ptr = f.tell()
@setInterval(self.monitor_interval)
def monitor():
if not self.waiting:
i = f.tell()
if self.ptr < i:
logging.debug("%d - %0.1f Kb/s" % (i, (i-self.ptr)/(1024*self.monitor_interval)))
self.ptr = i
os.system('clear')
print(str(int((float(i)/float(dst_filesize)) * 100)) + '%')
else:
ftp.close()
def connect():
ftp.connect(self.host, self.port)
ftp.login(self.login, self.passwd)
ftp.prot_p()
if self.directory != None:
ftp.cwd(self.directory)
# optimize socket params for download task
ftp.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
ftp.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 75)
ftp.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 60)
ftp = FTP_TLS()
ftp.set_pasv(True)
connect()
ftp.voidcmd('TYPE I')
dst_filesize = ftp.size(dst_filename)
mon = monitor()
while dst_filesize > f.tell():
try:
connect()
self.waiting = False
# retrieve file from position where we were disconnected
res = ftp.retrbinary('RETR %s' % dst_filename, f.write) if f.tell() == 0 else \
ftp.retrbinary('RETR %s' % dst_filename, f.write, rest=f.tell())
except:
self.max_attempts -= 1
if self.max_attempts == 0:
mon.set()
logging.exception('')
raise
self.waiting = True
logging.info('waiting 30 sec...')
time.sleep(30)
logging.info('reconnect')
mon.set() #stop monitor
ftp.close()
if not res.startswith('226 Transfer complete'):
logging.error('Downloaded file {0} is not full.'.format(dst_filename))
os.remove(local_filename)
return None
return 1 | 2.890625 | 3 |
src/etl/ingest.py | jackshukla7/code-20220116-sanjeetshukla | 0 | 12797131 | <gh_stars>0
"""
# ingest.py for project bmi_calculator
# Created by @<NAME> at 9:06 AM 1/16/2022 using PyCharm
"""
import logging.config
from pyspark.sql.types import StructType, StringType, StructField, IntegerType
from src.utils import column_constants
class Ingest:
logging.config.fileConfig("config/logging.conf")
def __init__(self, spark):
self.spark = spark
def ingest_json_data(self, file_path):
"""
This function reads the json file and returns a data frame
:param spark: Spark session
:param file_path: input file path where the data is available
:return: dataframe from input JSON file
"""
logger = logging.getLogger("Ingest")
logger.info("Ingesting data")
try:
columns = getattr(column_constants, "column_constants")
schema = StructType([StructField(columns["GENDER"], StringType(), True),
StructField(columns["HEIGHT_CM"], IntegerType(), True),
StructField(columns["WEIGHT_KG"], IntegerType(), True)])
input_file = self.spark.read.option("multiLine", "true").schema(schema).json(file_path)
return input_file.persist()
except Exception as exp:
logger.error("An error occured while ingesting data > " + str(exp))
| 3.015625 | 3 |
testproject/testapp/tests/test_handlers_data_control.py | movermeyer/django-firestone | 1 | 12797132 | <reponame>movermeyer/django-firestone<filename>testproject/testapp/tests/test_handlers_data_control.py
"""
This module tests the ``get_data``, ``get_data_item``, ``get_data_set`` and
``get_working_set`` handler methods.
"""
from firestone.handlers import BaseHandler
from firestone.handlers import ModelHandler
from firestone import exceptions
from django.test import TestCase
from django.test import RequestFactory
from django.contrib.auth.models import User
from model_mommy import mommy
def init_handler(handler, request, *args, **kwargs):
# Mimicking the initialization of the handler instance
handler.request = request
handler.args = args
handler.kwargs = kwargs
return handler
class TestModelHandlerGetDataItem(TestCase):
def setUp(self):
request = RequestFactory().get('whateverpath/')
handler = init_handler(ModelHandler(), request)
handler.model = User
self.handler = handler
# Create some model instances
users = mommy.make(User, 10)
def test_get_data_item_single_field_selection(self):
handler = self.handler
self.assertEquals(
handler.get_data_item(),
None,
)
# Item selection based on 1 field
handler.kwargs = {'id':1}
self.assertEquals(
handler.get_data_item(),
User.objects.get(id=1),
)
def test_get_data_item_double_field_selection(self):
handler = self.handler
# Item selection based on 2 fields
user = User.objects.get(id=1)
handler.kwargs = {'id': user.id, 'first_name': user.first_name}
self.assertEquals(
handler.get_data_item(),
user,
)
def test_get_data_item_object_does_not_exist(self):
handler = self.handler
# ObjectDoesNotExist becomes exceptions.Gone
handler.kwargs = {'id': 1000}
self.assertRaises(
exceptions.Gone,
handler.get_data_item,
)
def test_get_data_item_value_error(self):
handler = self.handler
# ValueError becomes exceptions.Gone
handler.kwargs = {'id': 'string'}
self.assertRaises(
exceptions.Gone,
handler.get_data_item,
)
def test_get_data_item_type_error(self):
handler = self.handler
# TypeError becomes exceptions.Gone
handler.kwargs = {'id': {'key': 'value'}}
self.assertRaises(
exceptions.Gone,
handler.get_data_item,
)
class TestModelHandlerGetDataSet(TestCase):
def setUp(self):
request = RequestFactory().get('whateverpath/')
handler = init_handler(ModelHandler(), request)
handler.model = User
self.handler = handler
# Create some model instances
users = mommy.make(User, 10)
def test_get_data_set(self):
# Testing ``ModelHandler.get_data_set``
handler = self.handler
self.assertItemsEqual(
handler.get_data_set(),
User.objects.all(),
)
class TestModelHandlerGetWorkingSet(TestCase):
def setUp(self):
request = RequestFactory().get('whateverpath/')
handler = init_handler(ModelHandler(), request)
handler.model = User
self.handler = handler
# Create some model instances
users = mommy.make(User, 10)
def test_get_working_set(self):
# Testing ``ModelHandler.get_working_set``
handler = self.handler
self.assertItemsEqual(
handler.get_working_set(),
User.objects.all(),
)
class TestModelHandlerGetData(TestCase):
def setUp(self):
request = RequestFactory().get('whateverpath/')
handler = init_handler(ModelHandler(), request)
handler.model = User
self.handler = handler
# Create some model instances
users = mommy.make(User, 10)
def test_get_data(self):
# Testing ``ModelHandler.get_data``
handler = self.handler
self.assertItemsEqual(
handler.get_data(),
User.objects.all(),
)
def test_get_data_single_field_selection(self):
handler = self.handler
handler.kwargs = {'id': 1}
self.assertEquals(
handler.get_data(),
User.objects.get(id=1),
)
def test_get_data_double_field_selection(self):
handler = self.handler
user = User.objects.get(id=1)
handler.kwargs = {'id': user.id, 'first_name': user.first_name}
self.assertEquals(
handler.get_data(),
user,
)
def test_get_data_gone(self):
handler = self.handler
handler.kwargs = {'id': 1000}
self.assertRaises(
exceptions.Gone,
handler.get_data,
)
def test_get_data_method_not_allowed(self):
# Plural DELETE method, raises ``MethodNotAllowed`` exception
handler = self.handler
handler.request = RequestFactory().delete('/')
self.assertRaises(
exceptions.MethodNotAllowed,
handler.get_data,
)
| 2.421875 | 2 |
src/utils/tiff_to_geojson.py | dataforgoodfr/batch9_geowatchlabs-3-markets | 1 | 12797133 | <filename>src/utils/tiff_to_geojson.py
import os
import rasterio
from rasterio.features import shapes
import geopandas as gpd
def convert_tiff_to_geojson(original_tiff_path, destination_geojson_path, band):
"""
Convert tiff file to geojson for GeoDataFrame handling.
Args:
original_tiff_path (str): path+name of the tiff file we want to convert
destination_geojson_path (str): path+name of the targeted geojson
band (int): tiff band you want to handle
Returns:
Upload the geojson file in the destination.
"""
data = rasterio.open(original_tiff_path).meta
c = str(data["crs"])
mask = None
with rasterio.open(original_tiff_path) as src:
image = src.read(band) # first band
results = (
{"properties": {"property": v}, "geometry": s}
for i, (s, v) in enumerate(
shapes(image, mask=mask, transform=data["transform"])
)
)
geoms = list(results)
gpd_polygonized_raster = gpd.GeoDataFrame.from_features(geoms, crs=c)
gpd_polygonized_raster.to_file(destination_geojson_path, driver="GeoJSON")
if __name__ == "__main__":
for file in os.listdir("asset/tiff"):
print("Conversion of " + file + " starting ...")
try:
if file.replace("tiff", "geojson") not in os.listdir("asset/geojson"):
tiff_path = os.getcwd() + "asset/tiff/" + file
geojson_path = tiff_path.replace("tiff", "geojson")
convert_tiff_to_geojson(tiff_path, geojson_path, 1)
print("Conversion of " + file + " successful !")
else:
print("File already converted in geojson !")
except Exception as e:
print("Couldn't convert file " + file + ", exception :" + e.__str__())
| 3.15625 | 3 |
examples/cp/visu/house_building_optional.py | raineydavid/docplex-examples | 2 | 12797134 | <gh_stars>1-10
# --------------------------------------------------------------------------
# Source file provided under Apache License, Version 2.0, January 2004,
# http://www.apache.org/licenses/
# (c) Copyright IBM Corp. 2015, 2016
# --------------------------------------------------------------------------
"""
This is a problem of building five houses. The masonry, roofing,
painting, etc. must be scheduled. Some tasks must necessarily take
place before others and these requirements are expressed through
precedence constraints.
There are three workers, and each worker has a given non-negative
skill level for each task. Each task requires one worker that will
have to be selected among the ones who have a non null skill level for
that task. A worker can be assigned to only one task at a time. Each
house has a deadline. The objective is to maximize the skill levels of
the workers assigned to the tasks while respecting the deadlines.
Please refer to documentation for appropriate setup of solving configuration.
"""
from docplex.cp.model import CpoModel, INTERVAL_MIN
import docplex.cp.utils_visu as visu
#-----------------------------------------------------------------------------
# Initialize the problem data
#-----------------------------------------------------------------------------
NB_HOUSES = 5
DEADLINE = 318
WORKER_NAMES = ['Joe', 'Jack', 'Jim']
NB_WORKERS = len(WORKER_NAMES)
# House building task descriptor
class BuildingTask(object):
def __init__(self, name, duration, skills):
self.name = name
self.duration = duration # Task duration
self.skills = skills # Skills of each worker for this task
# List of tasks to be executed for each house
MASONRY = BuildingTask('masonry', 35, [9, 5, 0])
CARPENTRY = BuildingTask('carpentry', 15, [7, 0, 5])
PLUMBING = BuildingTask('plumbing', 40, [0, 7, 0])
CEILING = BuildingTask('ceiling', 15, [5, 8, 0])
ROOFING = BuildingTask('roofing', 5, [6, 7, 0])
PAINTING = BuildingTask('painting', 10, [0, 9, 6])
WINDOWS = BuildingTask('windows', 5, [8, 0, 5])
FACADE = BuildingTask('facade', 10, [5, 5, 0])
GARDEN = BuildingTask('garden', 5, [5, 5, 9])
MOVING = BuildingTask('moving', 5, [6, 0, 8])
# Tasks precedence constraints (each tuple (X, Y) means X ends before start of Y)
PRECEDENCES = ( (MASONRY, CARPENTRY),
(MASONRY, PLUMBING),
(MASONRY, CEILING),
(CARPENTRY, ROOFING),
(CEILING, PAINTING),
(ROOFING, WINDOWS),
(ROOFING, FACADE),
(PLUMBING, FACADE),
(ROOFING, GARDEN),
(PLUMBING, GARDEN),
(WINDOWS, MOVING),
(FACADE, MOVING),
(GARDEN, MOVING),
(PAINTING, MOVING),
)
#-----------------------------------------------------------------------------
# Prepare the data for modeling
#-----------------------------------------------------------------------------
# Assign an index to tasks
ALL_TASKS = (MASONRY, CARPENTRY, PLUMBING, CEILING, ROOFING, PAINTING, WINDOWS, FACADE, GARDEN, MOVING)
for i in range(len(ALL_TASKS)):
ALL_TASKS[i].id = i
#-----------------------------------------------------------------------------
# Build the model
#-----------------------------------------------------------------------------
# Create model
mdl = CpoModel()
# Initialize model variable sets
total_skill = 0 # Expression computing total of skills
worker_tasks = [[] for w in range(NB_WORKERS)] # Tasks (interval variables) assigned to a each worker
desc = dict() # Map retrieving task from interval variable
# Utility function
def make_house(loc, deadline):
''' Create model elements corresponding to the building of a house
loc Identification of house location
deadline Deadline for finishing the house
'''
# Create interval variable for each task for this house
tasks = [mdl.interval_var(size=t.duration,
end=(INTERVAL_MIN, deadline),
name='H' + str(loc) + '-' + t.name) for t in ALL_TASKS]
# Add precedence constraints
for p, s in PRECEDENCES:
mdl.add(mdl.end_before_start(tasks[p.id], tasks[s.id]))
# Allocate tasks to workers
global total_skill
for t in ALL_TASKS:
allocs = []
for w in range(NB_WORKERS):
if t.skills[w] > 0:
wt = mdl.interval_var(optional=True, name="H{}-{}({})".format(loc, t.name, WORKER_NAMES[w]))
worker_tasks[w].append(wt)
allocs.append(wt)
total_skill += (t.skills[w] * mdl.presence_of(wt))
desc[wt] = t
mdl.add(mdl.alternative(tasks[t.id], allocs))
# Make houses
for h in range(NB_HOUSES):
make_house(h, DEADLINE)
# Avoid overlapping between tasks of each worker
for w in range(NB_WORKERS):
mdl.add(mdl.no_overlap(worker_tasks[w]))
# Maximize total of skills
mdl.add(mdl.maximize(total_skill))
#-----------------------------------------------------------------------------
# Solve the model and display the result
#-----------------------------------------------------------------------------
def compact(name):
# Example: H3-garden -> G3
# ^ ^
loc, task = name[1:].split('-', 1)
return task[0].upper() + loc
# Solve model
print("Solving model....")
msol = mdl.solve(FailLimit=10000, TimeLimit=10)
print("Solution: ")
msol.print_solution()
# Draw solution
if msol and visu.is_visu_enabled():
visu.timeline('Solution SchedOptional', 0, DEADLINE)
for w in range(NB_WORKERS):
visu.sequence(name=WORKER_NAMES[w])
for t in worker_tasks[w]:
wt = msol.get_var_solution(t)
if wt.is_present():
if desc[t].skills[w] == max(desc[t].skills):
# Green-like color when task is using the most skilled worker
color = 'lightgreen'
else:
# Red-like color when task does not use the most skilled worker
color = 'salmon'
visu.interval(wt, color, compact(wt.get_name()))
visu.show()
| 2.46875 | 2 |
Numero_Par.py | caibacord6/Programming-2020B | 0 | 12797135 | <reponame>caibacord6/Programming-2020B
'''
Script que identifica si un numero ingresado por el usuario es PAR.
'''
N = 0
print ("Ingrese un numero Entero: ")
num = int(input())
if num % 2 == 0 :
print ("El Numero es Par: ")
else:
print ("El Numero es Impar: ")
| 3.90625 | 4 |
parentheses/0921_minimum_add_to_make_valid.py | MartinMa28/Algorithms_review | 0 | 12797136 | <reponame>MartinMa28/Algorithms_review
class Solution:
def minAddToMakeValid(self, S: str) -> int:
stack = []
violations = 0
if S == '':
return 0
for idx, ch in enumerate(S):
if ch == '(':
stack.append(idx)
elif ch == ')':
if len(stack) == 0:
violations += 1
else:
stack.pop()
if len(stack) > 0:
violations += len(stack)
return violations | 3.296875 | 3 |
tinyfilemanager/constants.py | pentatester/tinyfilemanager | 0 | 12797137 | from .version import __version__
CONFIG_URL = "https://tinyfilemanager.github.io/config.json"
USER_AGENT = f"pypi/tinyfilemanager/{__version__}"
| 1.242188 | 1 |
tests/test_nodes_types.py | WaylonWalker/find-kedro | 17 | 12797138 | """
This module tests the creation of pipeline nodes from various different types
and combinations of types.
"""
import textwrap
import pytest
from find_kedro import find_kedro
contents = [
(
"single_nodes",
2,
"""\
from kedro.pipeline import node
node_a_b = node(lambda x: x, "a", "b", name="a_b")
node_b_c = node(lambda x: x, "b", "c", name="b_c")
""",
),
(
"list_nodes",
2,
"""\
from kedro.pipeline import node
nodes = [
node(lambda x: x, "a", "b", name="a_b"),
node(lambda x: x, "b", "c", name="b_c")
]
""",
),
(
"set_nodes",
2,
"""\
from kedro.pipeline import node
nodes = {
node(lambda x: x, "a", "b", name="a_b"),
node(lambda x: x, "b", "c", name="b_c")
}
""",
),
(
"tuple_nodes",
2,
"""\
from kedro.pipeline import node
nodes = (
node(lambda x: x, "a", "b", name="a_b"),
node(lambda x: x, "b", "c", name="b_c")
)
""",
),
(
"pipeline_nodes",
2,
"""\
from kedro.pipeline import node, Pipeline
nodes = Pipeline([
node(lambda x: x, "a", "b", name="a_b"),
node(lambda x: x, "b", "c", name="b_c")
])
""",
),
(
"pipeline_list_nodes",
4,
"""\
from kedro.pipeline import node, Pipeline
nodes_pipeline = Pipeline([
node(lambda x: x, "a", "b", name="a_b"),
node(lambda x: x, "b", "c", name="b_c"),
])
nodes_list = [
node(lambda x: x, "a2", "b2", name="a_b2"),
node(lambda x: x, "b2", "c2", name="b_c2"),
]
""",
),
(
"pipeline_nodes_nodes",
4,
"""\
from kedro.pipeline import node, Pipeline
nodes_pipeline = Pipeline([
node(lambda x: x, "a", "b", name="a_b"),
node(lambda x: x, "b", "c", name="b_c"),
])
node_a2 = node(lambda x: x, "a2", "b2", name="a_b2")
node_b2 = node(lambda x: x, "b2", "c2", name="b_c2")
""",
),
(
"list_nodes_nodes",
4,
"""\
from kedro.pipeline import node
nodes_pipeline = [
node(lambda x: x, "a", "b", name="a_b"),
node(lambda x: x, "b", "c", name="b_c"),
]
node_a2 = node(lambda x: x, "a2", "b2", name="a_b2")
node_b2 = node(lambda x: x, "b2", "c2", name="b_c2")
""",
),
(
"dynamic_list_nodes",
100,
"""\
from kedro.pipeline import node
nodes_pipeline = [ node(lambda x: x, f"a{n}", f"a{n+1}", name=f"a{n}_a{n+1}") for n in range(100)]
""",
),
(
"dynamic_pipeline_nodes",
100,
"""\
from kedro.pipeline import node, Pipeline
nodes_pipeline = Pipeline([ node(lambda x: x, f"a{n}", f"a{n+1}", name=f"a{n}_a{n+1}") for n in range(100)])
""",
),
(
"nested_list_nodes",
4,
"""\
from kedro.pipeline import node
nodes_pipeline = [
node(lambda x: x, "a", "b", name="a_b"),
node(lambda x: x, "b", "c", name="b_c"),
[
node(lambda x: x, "a2", "b2", name="a_b2"),
node(lambda x: x, "b2", "c2", name="b_c2"),
]
]
""",
),
(
"nested_tuple_nodes",
4,
"""\
from kedro.pipeline import node
nodes_pipeline = (
node(lambda x: x, "a", "b", name="a_b"),
node(lambda x: x, "b", "c", name="b_c"),
(
node(lambda x: x, "a2", "b2", name="a_b2"),
node(lambda x: x, "b2", "c2", name="b_c2"),
)
)
""",
),
(
"nested_set_nodes",
4,
"""\
from kedro.pipeline import node
nodes_pipeline = {
node(lambda x: x, "a", "b", name="a_b"),
node(lambda x: x, "b", "c", name="b_c"),
(
node(lambda x: x, "a2", "b2", name="a_b2"),
node(lambda x: x, "b2", "c2", name="b_c2"),
)
}
""",
),
(
"function_nodes",
2,
"""\
from kedro.pipeline import Pipeline, node
def create_pipeline():
return Pipeline([
node(lambda x: x, "a", "b", name="a_b"),
node(lambda x: x, "b", "c", name="b_c"),
]
)
""",
),
(
"function_single_nodes",
4,
"""\
from kedro.pipeline import Pipeline, node
node_a_b = node(lambda x: x, "a", "b", name="a_b")
node_b_c = node(lambda x: x, "b", "c", name="b_c")
def create_pipeline():
return Pipeline([
node(lambda x: x, "fa", "fb", name="fa_fb"),
node(lambda x: x, "fb", "fc", name="fb_fc"),
]
)
""",
),
(
"function_list_nodes",
4,
"""\
from kedro.pipeline import Pipeline, node
nodes = [
node(lambda x: x, "a", "b", name="a_b"),
node(lambda x: x, "b", "c", name="b_c")
]
def create_pipeline():
return Pipeline([
node(lambda x: x, "fa", "fb", name="fa_fb"),
node(lambda x: x, "fb", "fc", name="fb_fc"),
]
)
""",
),
(
"list_create_pipeline",
2,
"""\
from kedro.pipeline import Pipeline, node
creaste_pipeline = [
node(lambda x: x, "a", "b", name="a_b"),
node(lambda x: x, "b", "c", name="b_c")
]
""",
),
]
@pytest.mark.parametrize("name, num_nodes, content", contents)
def test_create_file(tmpdir, name, num_nodes, content):
p = tmpdir.mkdir("nodes").join(f"{ name }.py")
p.write(textwrap.dedent(content))
pipelines = find_kedro(directory=tmpdir, verbose=True)
assert list(pipelines.keys()) == [f"nodes.{ name }", "__default__"]
assert (
len(pipelines["__default__"].nodes) == num_nodes
), f"did not collect all nodes from { name }.py"
assert len(tmpdir.listdir()) == 1
| 2.546875 | 3 |
bricks/tests/api/test_configfile.py | CloudBrewery/bricks-service | 0 | 12797139 | """
Tests for the API /configfiles/ methods.
"""
import datetime
import mock
from oslo.config import cfg
from bricks.common import utils
from bricks.openstack.common import timeutils
from bricks.tests.api import base
from bricks.tests.api import utils as apiutils
from bricks.tests.db import utils as dbutils
class TestListConfigFiles(base.FunctionalTest):
def test_empty(self):
data = self.get_json('/configfiles?brickconfig_uuid=1be26c0b-03f2-4d2e-ae87-c02d7f33c123')
self.assertEqual([], data['configfiles'])
def test_one(self):
ndict = dbutils.get_test_configfile()
config = self.dbapi.create_configfile(ndict)
data = self.get_json('/configfiles?brickconfig_uuid=1be26c0b-03f2-4d2e-ae87-c02d7f33c123')
self.assertEqual(config['uuid'], data['configfiles'][0]["uuid"])
self.assertNotIn('environ', data['configfiles'][0])
def test_detail(self):
cdict = dbutils.get_test_configfile()
config = self.dbapi.create_configfile(cdict)
data = self.get_json('/configfiles/detail?brickconfig_uuid=1be26c0b-03f2-4d2e-ae87-c02d7f33c123')
self.assertEqual(config['uuid'], data['configfiles'][0]["uuid"])
self.assertIn('description', data['configfiles'][0])
def test_detail_against_single(self):
cdict = dbutils.get_test_configfile()
config = self.dbapi.create_configfile(cdict)
response = self.get_json('/configfiles/%s/detail' % config['uuid'],
expect_errors=True)
self.assertEqual(404, response.status_int)
def test_many(self):
cf_list = []
for id in range(5):
ndict = dbutils.get_test_configfile(
id=id, uuid=utils.generate_uuid())
cf = self.dbapi.create_configfile(ndict)
cf_list.append(cf['uuid'])
data = self.get_json('/configfiles?brickconfig_uuid=1be26c0b-03f2-4d2e-ae87-c02d7f33c123')
self.assertEqual(len(data['configfiles']), len(cf_list))
uuids = [n['uuid'] for n in data['configfiles']]
self.assertEqual(uuids.sort(), cf_list.sort())
def test_brickconfig_filter(self):
cf_list = []
brickconfig_uuid = utils.generate_uuid()
for id in range(5):
ndict = dbutils.get_test_configfile(
id=id, uuid=utils.generate_uuid(),
brickconfig_uuid=brickconfig_uuid)
cf = self.dbapi.create_configfile(ndict)
cf_list.append(cf['uuid'])
data = self.get_json(
'/configfiles?brickconfig_uuid=%s' % brickconfig_uuid)
self.assertEqual(len(data['configfiles']), len(cf_list))
uuids = [n['uuid'] for n in data['configfiles']]
self.assertEqual(uuids.sort(), cf_list.sort())
def test_links(self):
uuid = utils.generate_uuid()
cdict = dbutils.get_test_configfile(id=1, uuid=uuid)
self.dbapi.create_configfile(cdict)
data = self.get_json('/configfiles/%s' % uuid)
self.assertIn('links', data.keys())
self.assertEqual(2, len(data['links']))
self.assertIn(uuid, data['links'][0]['href'])
self.assertTrue(self.validate_link(data['links'][0]['href']))
self.assertTrue(self.validate_link(data['links'][1]['href']))
def test_collection_links(self):
configs = []
for id in range(5):
ndict = dbutils.get_test_configfile(
id=id, uuid=utils.generate_uuid())
cf = self.dbapi.create_configfile(ndict)
configs.append(cf['uuid'])
data = self.get_json('/configfiles?limit=3&brickconfig_uuid=1be26c0b-03f2-4d2e-ae87-c02d7f33c123')
self.assertEqual(3, len(data['configfiles']))
next_marker = data['configfiles'][-1]['uuid']
self.assertIn(next_marker, data['next'])
def test_collection_links_default_limit(self):
cfg.CONF.set_override('max_limit', 3, 'api')
configs = []
for id in range(5):
ndict = dbutils.get_test_configfile(
id=id, uuid=utils.generate_uuid())
cf = self.dbapi.create_configfile(ndict)
configs.append(cf['uuid'])
data = self.get_json('/configfiles?brickconfig_uuid=1be26c0b-03f2-4d2e-ae87-c02d7f33c123')
self.assertEqual(3, len(data['configfiles']))
next_marker = data['configfiles'][-1]['uuid']
self.assertIn(next_marker, data['next'])
class TestPatch(base.FunctionalTest):
def setUp(self):
super(TestPatch, self).setUp()
cdict = dbutils.get_test_configfile()
self.dbapi.create_configfile(cdict)
def test_update_not_found(self):
uuid = utils.generate_uuid()
response = self.patch_json('/configfiles/%s' % uuid,
[{'path': '/contents',
'value': 'RUN: ls -lash',
'op': 'replace'}],
expect_errors=True,
context=self.context)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
@mock.patch.object(timeutils, 'utcnow')
def test_replace_singular(self, mock_utcnow):
cdict = dbutils.get_test_configfile()
test_time = datetime.datetime(2000, 1, 1, 0, 0)
desc = 'foo'
mock_utcnow.return_value = test_time
response = self.patch_json('/configfiles/%s' % cdict['uuid'],
[{'path': '/description',
'value': desc, 'op': 'replace'}],
context=self.context)
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
result = self.get_json('/configfiles/%s' % cdict['uuid'])
self.assertEqual(desc, result['description'])
return_updated_at = timeutils.parse_isotime(
result['updated_at']).replace(tzinfo=None)
self.assertEqual(test_time, return_updated_at)
def test_remove_uuid(self):
cdict = dbutils.get_test_configfile()
response = self.patch_json('/configfiles/%s' % cdict['uuid'],
[{'path': '/uuid', 'op': 'remove'}],
expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
class TestPost(base.FunctionalTest):
@mock.patch.object(timeutils, 'utcnow')
def test_create_configfile(self, mock_utcnow):
cdict = dbutils.get_test_configfile()
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.post_json(
'/configfiles', cdict, context=self.context)
self.assertEqual(201, response.status_int)
result = self.get_json('/configfiles/%s' % cdict['uuid'])
self.assertEqual(cdict['uuid'], result['uuid'])
self.assertFalse(result['updated_at'])
return_created_at = timeutils.parse_isotime(result['created_at']).replace(tzinfo=None)
self.assertEqual(test_time, return_created_at)
def test_create_configfile_generate_uuid(self):
cdict = dbutils.get_test_configfile()
del cdict['uuid']
self.post_json('/configfiles', cdict, context=self.context)
result = self.get_json('/configfiles?brickconfig_uuid=1be26c0b-03f2-4d2e-ae87-c02d7f33c123')
self.assertEqual(cdict['name'],
result['configfiles'][0]['name'])
self.assertTrue(utils.is_uuid_like(result['configfiles'][0]['uuid']))
def test_create_configfile_invalid_name(self):
cdict = dbutils.get_test_configfile()
del cdict['name']
response = self.post_json('/configfiles', cdict,
expect_errors=True, context=self.context)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
class TestDelete(base.FunctionalTest):
def test_delete_configfile(self):
cdict = dbutils.get_test_configfile()
self.dbapi.create_configfile(cdict)
self.delete('/configfiles/%s' % cdict['uuid'], context=self.context)
response = self.get_json('/configfiles/%s' % cdict['uuid'],
expect_errors=True, context=self.context)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_delete_brick_not_found(self):
uuid = utils.generate_uuid()
response = self.delete('/configfiles/%s' % uuid,
expect_errors=True, context=self.context)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
| 2.328125 | 2 |
vindauga/utilities/hexdump.py | gabbpuy/vindauga | 5 | 12797140 | # -*- coding: utf8 -*-
import itertools
def formatLine(r):
r = list(r)
l1 = ' '.join('{:02x}'.format(c) for c in r)
l2 = ''.join(chr(c) if 32 <= c < 127 else '.' for c in r)
return l1, l2
def hexDump(data):
size, over = divmod(len(data), 16)
if over:
size += 1
offsets = range(0, size * 16, 16)
for o in offsets:
row = itertools.islice(data, o, o + 16)
yield '{:010X}: {:48} {:16}'.format(o, *formatLine(row))
| 3.25 | 3 |
contentWidget.py | yujiecong/PyQt-Zhku-Client | 0 | 12797141 | <reponame>yujiecong/PyQt-Zhku-Client<filename>contentWidget.py
# Copyright (c) 2020. Lorem ipsum dolor sit amet, consectetur adipiscing elit.
# Morbi non lorem porttitor neque feugiat blandit. Ut vitae ipsum eget quam lacinia accumsan.
# Etiam sed turpis ac ipsum condimentum fringilla. Maecenas magna.
# Proin dapibus sapien vel ante. Aliquam erat volutpat. Pellentesque sagittis ligula eget metus.
# Vestibulum commodo. Ut rhoncus gravida arcu.
from PyQt5.QtWidgets import QWidget
from Ui_Content import Ui_Content
class ContentWidget(QWidget, Ui_Content):
def __init__(self, *args, **kwargs):
super(ContentWidget, self).__init__(*args, **kwargs)
self.setupUi(self)
self.par=self.parent()
# connect
self.pushButton_3.clicked.connect(self.par.loginShow)
self.pushButton_24.clicked.connect(self.par.checkResults)
| 1.78125 | 2 |
models/notes/views.py | choeminjun/src_server_1.2 | 0 | 12797142 | <filename>models/notes/views.py<gh_stars>0
from flask import Blueprint, request, session, url_for, render_template
from werkzeug.utils import redirect
from src.models.notes.note import Note
import src.models.users.decorators as user_decorators
from src.models.users.user import User
from src.models.error_logs.error_log import Error_
import traceback
note_blueprint = Blueprint('notes', __name__)
@note_blueprint.route('/my_notes/')
@user_decorators.require_login
def user_notes():
try:
user = User.find_by_email(session['email'])
user_notes = User.get_notes(user)
user_name = user.email
return render_template('/notes/my_notes.html', user_name=user_name, user_notes=user_notes)
except:
error_msg = traceback.format_exc().split('\n')
Error_obj = Error_(error_msg=''.join(error_msg), error_location='user note reading USER:' + session['email'])
Error_obj.save_to_mongo()
return render_template('error_page.html', error_msgr='Crashed during reading your notes...')
@note_blueprint.route('/note/<string:note_id>')
def note(note_id):
try:
note = Note.find_by_id(note_id)
user = User.find_by_email(note.author_email)
try:
if note.author_email == session['email']:
author_email_is_session = True
else:
author_email_is_session = False
except:
author_email_is_session = False
finally:
return render_template('/notes/note.html', note=note,
author_email_is_session=author_email_is_session, msg_=False, user=user)
except:
error_msg = traceback.format_exc().split('\n')
try:
Error_obj = Error_(error_msg=''.join(error_msg), error_location='note reading NOTE:' + note._id)
except:
Error_obj = Error_(error_msg=''.join(error_msg), error_location='note reading NOTE:NONE')
Error_obj.save_to_mongo()
return render_template('error_page.html', error_msgr='Crashed during reading your note...')
@note_blueprint.route('/add_note', methods=['GET', 'POST'])
@user_decorators.require_login
def create_note():
try:
if request.method == 'POST':
share = request.form['inputGroupSelect01']
if share == '0':
return render_template('/notes/create_note.html',
error_msg="You did not selected an Share label. Please select an Share label.")
if share == '1':
share = True
share_only_with_users = False
else:
share = False
share_only_with_users = True
title = request.form['title']
content = request.form['content']
author_email = session['email']
author_nickname = User.find_by_email(author_email).nick_name
note_for_save = Note(title=title, content=content, author_email=author_email, shared=share,
author_nickname=author_nickname ,share_only_with_users=share_only_with_users)
note_for_save.save_to_mongo()
return redirect(url_for('.user_notes'))
return render_template('/notes/create_note.html')
except:
error_msg = traceback.format_exc().split('\n')
Error_obj = Error_(error_msg=''.join(error_msg), error_location='create_note creating note')
Error_obj.save_to_mongo()
return render_template('error_page.html', error_msgr='Crashed during saving your note...')
@note_blueprint.route('/delete_note/<string:note_id>')
@user_decorators.require_login
def delete_note(note_id):
try:
Note.find_by_id(note_id).delete()
finally:
return redirect(url_for('.user_notes'))
@note_blueprint.route('/share_note/<string:note_id>')
@user_decorators.require_login
def share_note(note_id):
try:
Note.find_by_id(note_id).share_or_unshare()
finally:
return redirect(url_for('.note', note_id=note_id, msg='Your note is shared!!', msg_=True))
@note_blueprint.route('/pub_notes/')
def notes():
try:
try:
if session['email'] is None:
return render_template('/notes/pub_notes.html', notes=Note.find_shared_notes())
else:
return render_template('/notes/pub_notes.html',
notes=Note.get_only_with_users() + Note.find_shared_notes())
except:
return render_template('/notes/pub_notes.html', notes=Note.find_shared_notes())
except:
error_msg = traceback.format_exc().split('\n')
Error_obj = Error_(error_msg=''.join(error_msg), error_location='notes publick note reading')
Error_obj.save_to_mongo()
return render_template('error_page.html', error_msgr='Crashed during reading users notes...')
@note_blueprint.route('/edit_note/<string:note_id>', methods=['GET', 'POST'])
@user_decorators.require_login
def edit_note(note_id):
try:
note = Note.find_by_id(note_id)
if request.method == 'POST':
if request.method == 'POST':
share = request.form['inputGroupSelect01']
if share == '0':
return render_template('/notes/create_note.html',
error_msg="You did not selected an Share label. Please select an Share label.")
if share == '1':
share = True
share_only_with_users = False
else:
share = False
share_only_with_users = True
title = request.form['title']
content = request.form['content']
note.shared = share
note.share_only_with_users = share_only_with_users
note.title = title
note.content = content
note.save_to_mongo()
return redirect(url_for('.note', note_id=note_id))
else:
return render_template('/notes/edit_note.html', note=note)
except:
error_msg = traceback.format_exc().split('\n')
Error_obj = Error_(error_msg=''.join(error_msg), error_location='edit_note saveing and getting input from html file')
Error_obj.save_to_mongo()
return render_template('error_page.html', error_msgr='Crashed during saving your note...')
| 2.203125 | 2 |
caos/_internal/utils/dependencies.py | caotic-co/caos | 0 | 12797143 | from caos._internal.constants import ValidDependencyVersionRegex
from caos._internal.exceptions import InvalidDependencyVersionFormat, UnexpectedError
from typing import NewType
PipReadyDependency = NewType(name="PipReadyDependency", tp=str)
def _is_dependency_name_in_wheel(dependency_name: str, wheel: str, version: str) -> bool:
wheel = wheel[:-1*len("-{}".format(version))]\
.replace("_", "-")\
.lower()
return wheel.endswith(dependency_name.replace("_", "-").lower())
def _get_dependency_version_format(dependency_name: str, version: str) -> ValidDependencyVersionRegex:
"""
Raises:
InvalidDependencyVersionFormat
"""
if ValidDependencyVersionRegex.MAJOR_MINOR_PATCH.value.match(version):
return ValidDependencyVersionRegex.MAJOR_MINOR_PATCH
if ValidDependencyVersionRegex.MAJOR_MINOR.value.match(version):
return ValidDependencyVersionRegex.MAJOR_MINOR
if ValidDependencyVersionRegex.MAJOR.value.match(version):
return ValidDependencyVersionRegex.MAJOR
if ValidDependencyVersionRegex.LATEST.value.match(version):
return ValidDependencyVersionRegex.LATEST
wheel_info = ValidDependencyVersionRegex.WHEEL.value.match(version)
if wheel_info:
wheel = wheel_info.group("wheel")
wheel_version = wheel_info.group("version")
if not _is_dependency_name_in_wheel(dependency_name=dependency_name, wheel=wheel, version=wheel_version):
raise InvalidDependencyVersionFormat(
"The dependency '{dep}' is not present in the wheel filename '{wheel}'"
.format(dep=dependency_name, wheel=version)
)
if not ValidDependencyVersionRegex.MAJOR_MINOR_PATCH.value.match(wheel_version) and \
not ValidDependencyVersionRegex.MAJOR_MINOR.value.match(wheel_version) and \
not ValidDependencyVersionRegex.MAJOR.value.match(wheel_version):
raise InvalidDependencyVersionFormat(
"\nThe version format for the wheel dependency '{dep}' is invalid. Use a 'Final release' format "
"(see https://www.python.org/dev/peps/pep-0440/#final-releases)"
.format(dep=dependency_name)
)
return ValidDependencyVersionRegex.WHEEL
if ValidDependencyVersionRegex.TARGZ.value.match(version):
return ValidDependencyVersionRegex.TARGZ
raise InvalidDependencyVersionFormat(
"\nInvalid version format for the dependency '{dep}'. Only the following formats are allowed:"
"\n - 'latest' or 'LATEST'"
"\n - Final release format (see https://www.python.org/dev/peps/pep-0440/#final-releases)"
"\n - Wheel Binary Packages (see https://www.python.org/dev/peps/pep-0491/#file-format)"
"\n - .tar.gz Packages"
.format(dep=dependency_name)
)
def generate_pip_ready_dependency(dependency_name: str, version: str) -> PipReadyDependency:
"""
Raises:
InvalidDependencyVersionFormat
UnexpectedError
"""
dependency_regex: ValidDependencyVersionRegex = _get_dependency_version_format(
dependency_name=dependency_name,
version=version
)
if dependency_regex == ValidDependencyVersionRegex.MAJOR_MINOR_PATCH: # (^|~) X.X.X
if version.startswith("~"): # Allow patch updates
return version.replace("~", "~=") # ~=X.X.X
elif version.startswith("^"): # Allow minor updates
version = version.replace("^", "")
major, minor, patch = version.split(".")
return "~={}.{}".format(major, minor) # ~=X.X
else: # Allow exact version
return "=={}".format(version) # ==X.X.X
elif dependency_regex == ValidDependencyVersionRegex.MAJOR_MINOR:
if version.startswith("~"): # Allow patch updates
version = version.replace("~", "")
major, minor = version.split(".")
return "~={}.{}.0".format(major, minor) # ~=X.X.0
elif version.startswith("^"): # Allow minor updates
version = version.replace("^", "~=")
return version # ~=X.X
else: # Allow exact version
return "=={}".format(version) # ==X.X
elif dependency_regex == ValidDependencyVersionRegex.MAJOR:
if version.startswith("~"): # Allow patch updates
version = version.replace("~", "")
return "~={}.0.0".format(version) # ~=X.0.0
elif version.startswith("^"): # Allow minor updates
version = version.replace("^", "")
return "~={}.0".format(version) # ~=X.0
else: # Allow exact version
return "=={}".format(version) # ==X
elif dependency_regex == ValidDependencyVersionRegex.LATEST:
return dependency_name.lower()
elif dependency_regex == ValidDependencyVersionRegex.WHEEL:
return version
elif dependency_regex == ValidDependencyVersionRegex.TARGZ:
return version
raise UnexpectedError("The dependency given should have thrown 'InvalidDependencyVersionFormat' but it did not")
| 2.1875 | 2 |
game/views.py | lizheng3401/MetaStudio | 0 | 12797144 | import os
from django.shortcuts import render,get_object_or_404, redirect
from django.http import FileResponse
from .models import GameCategory, Game
from comment.forms import GameCommentForm,SubGCommentForm
from comment.models import SubGComment
from .forms import UploadGameForm
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def portfllio(request):
categories = GameCategory.objects.all().order_by("name")
gameList = []
for cate in categories:
games = Game.objects.filter(category = cate.pk).order_by("-createTime")
temp = (cate,games)
gameList.append(temp)
return render(request, 'home/portfolio.html', context={'gameList': gameList})
def gameInfo(request,pk):
game = get_object_or_404(Game, pk=pk)
form = GameCommentForm()
subForm = SubGCommentForm()
c = game.gamecomment_set.all()
comments = []
for comment in c:
subComment = SubGComment.objects.filter(parentComment=comment.pk).order_by("createTime")
temp = (comment,subComment)
comments.append(temp)
context = {
'game': game,
'form': form,
'subForm': subForm,
'comments': comments,
}
return render(request, 'game/game.html', context=context)
def downloadGame(request, pk):
gameObj = get_object_or_404(Game, pk=pk)
url = BASE_DIR+str(gameObj.game.url).replace('/', '\\')
name = str(gameObj.game)
file = open(url, 'rb')
response = FileResponse(file)
response['Content-Type'] = 'application/octet-stream'
response['Content-Disposition'] = 'attachment;filename="{0}"'.format(name)
gameObj.increase_times()
return response
def uploadGame(request):
categories = GameCategory.objects.all()
if request.method == 'POST':
form = UploadGameForm(request.POST)
gamelication = request.FILES['game']
if form.is_valid():
game = form.save(commit=False)
game.game = gamelication
if 'icon' not in request.POST:
game.icon = request.FILES['icon']
if 'foreImg' not in request.POST:
game.foreImg = request.FILES['foreImg']
game.save()
return redirect('/')
else:
form = UploadGameForm()
return render(request, 'game/upload.html', context={'form': form, 'categories': categories})
def deleteGame(request, pk):
Game.objects.filter(pk=pk).delete()
return redirect("/user/")
def editGame(request, pk):
categories = GameCategory.objects.all()
game = get_object_or_404(Game, pk=pk)
if request.method == 'POST':
content = request.POST
game.name = content['name']
game.version = content['version']
game.category.pk = content['category']
game.inTro = content['inTro']
if 'icon' not in request.POST:
game.icon = request.FILES['icon']
if 'foreImg' not in request.POST:
game.foreImg = request.FILES['foreImg']
if 'game' not in request.POST:
game.game = request.FILES['game']
game.save()
return redirect("/user/")
context = {'categories': categories,'game': game}
return render(request, 'game/edit.html',context=context) | 2.15625 | 2 |
testing_tools/dummy_numa_ctl.py | TobiasWinchen/mpikat | 0 | 12797145 | #!/usr/bin/env python
#
# Dummy script to replace numactl in testing environment
#
import argparse
import subprocess
print("Using dummy numactl")
parser = argparse.ArgumentParser()
parser.add_argument("cmd", nargs="*")
args, unknown = parser.parse_known_args()
p = subprocess.Popen(args.cmd)
p.wait()
| 2.09375 | 2 |
src/labeling_doccano/doccano_functions.py | rasleh/StanceVeracityDetector | 0 | 12797146 | import json
import os
from pathlib import Path
current_path = os.path.abspath(__file__)
default_raw_path = os.path.join(current_path, '../../data/datasets/twitter/raw/')
unlabeled_data_path = Path(os.path.join(os.path.abspath(__file__), '../../../data/datasets/twitter/raw/unlabeled'))
def generate_label_data(file_name: str, stance_out_name: str, claim_out_name: str):
file_path = Path(unlabeled_data_path, file_name)
stance_out_path = Path(unlabeled_data_path, stance_out_name)
claim_out_path = Path(unlabeled_data_path, claim_out_name)
with file_path.open() as data, stance_out_path.open(mode='w') as stance_out, claim_out_path.open(mode='w') as claim_out:
for line in data:
tweet_dict = json.loads(line.split('\t')[1])
source_tweet = tweet_dict[line.split('\t')[0]]
source_tweet['text'] = source_tweet['full_text']
source_tweet['labels'] = []
json.dump(source_tweet, claim_out)
claim_out.write('\n')
for tweet_id, tweet in tweet_dict.items():
if source_tweet == tweet:
continue
tweet['text'] = 'Source: {}\n\nReply: {}'.format(source_tweet['full_text'], tweet['full_text'])
tweet['labels'] = []
json.dump(tweet, stance_out)
stance_out.write('\n')
def anno_agreement_check(anno_data_file: str, agree_file: str, disagree_file: str):
anno_data_path = Path(os.path.join(default_raw_path, anno_data_file))
agree_path = Path(os.path.join(default_raw_path, agree_file))
disagree_path = Path(os.path.join(default_raw_path, disagree_file))
with anno_data_path.open(encoding='utf-8') as anno_data, agree_path.open(mode='w', encoding='utf-8') as agree_data, disagree_path.open(
mode='w', encoding='utf-8') as disagree_data:
for line in anno_data:
disagreement = False
annotations = json.loads(line)['annotations']
if len(annotations) == 1:
line = json.loads(line)
line['annotations'] = [annotations[0]['label']]
json.dump(line, agree_data)
agree_data.write('\n')
else:
user_labels = {}
for annotation in annotations:
user_labels.setdefault(annotation['user'], set()).add(annotation['label'])
for user_id_a, labels_a in user_labels.items():
for user_id_b, labels_b in user_labels.items():
if labels_a != labels_b:
disagree_data.write(line)
disagreement = True
break
if disagreement:
break
if not disagreement:
line = json.loads(line)
if user_labels:
line['annotations'] = list(user_labels[1])
if not disagreement:
print(line)
json.dump(line, agree_data)
agree_data.write('\n')
def integrate_claim_label(annotation, tweet):
veracity_map = {5: 'True', 6: 'Unverified', 7: 'False'}
if 1 or 2 not in annotation['annotations']:
err_msg = "Error in claim labels, must contain either '1' or '2', denominating 'claim'" \
" and 'non-claim' respectively. Given labels: {}"
raise RuntimeError(
err_msg.format(annotation['annotations']))
if 2 in annotation['annotations']:
tweet['Claim'] = False
else:
tweet['Claim'] = True
if 3 or 4 not in annotation['annotations']:
err_msg = "Error in claim labels, must contain either '3' or '4', denominating " \
"'verifiable' and 'subjective' respectively. Given labels: {}"
raise RuntimeError(
err_msg.format(annotation['annotations']))
if 4 in annotation['annotations']:
tweet['Verifiability'] = 'Subjective'
else:
tweet['Verifiability'] = 'Verifiable'
if 5 or 6 or 7 not in annotation['annotations']:
err_msg = "Error in claim labels, must contain either '5', '6' or '7', " \
"denominating 'True', 'Unverified' and 'False' respectively. Given " \
"labels: {}"
raise RuntimeError(
err_msg.format(annotation['annotations']))
for x in [5, 6, 7]:
if x in annotation['annotations']:
tweet['TruthStatus'] = veracity_map[x]
def integrate_sdqc_label(annotation, tweet):
sdqc_map = {1: 'Supporting', 2: 'Denying', 3: 'Querying', 4: 'Commenting'}
if len(annotation['annotations']) > 1:
err_msg = "{} SDQC labels found, only one allowed"
raise RuntimeError(
err_msg.format(len(annotation['annotations'])))
tweet['SDQC_Submission'] = sdqc_map[annotation['annotations'][0]]
def integrate_label_data(anno_data_path: Path, database_path: Path, label_scheme: str):
if label_scheme not in ['claim', 'sdqc']:
err_msg = "Unrecognized label scheme: {}, please use 'sdqc' or 'claim'"
raise RuntimeError(
err_msg.format(label_scheme))
with anno_data_path.open(encoding='utf-8') as labeled_data, database_path.open(encoding='utf-8') as database:
data = []
for line in database:
not_altered = True
tweet_dict = json.loads(line.split('\t')[1])
for annotation in labeled_data:
annotation = json.loads(annotation)
# Data-point not yet annotated
if not annotation['annotations']:
continue
for tweet_id, tweet in tweet_dict.items():
if tweet['full_text'] == annotation['text']:
if label_scheme == 'claim':
integrate_claim_label(annotation, tweet)
if label_scheme == 'sdqc':
integrate_sdqc_label(annotation, tweet)
not_altered = False
break
if not_altered:
data.append(line)
else:
data.append(line.split('\t')[0] + '\t' + json.dumps(tweet_dict))
with database_path.open(mode='w', encoding='utf-8') as database:
for line in data:
database.write(line)
#anno_agreement_check(Path('test.json'), Path('agree.json'), Path('disagree.json'))
#generate_label_data(test_data, 'stance.jsonl', 'claim.jsonl')
| 2.734375 | 3 |
nlvm.py | joselynzhao/One-shot-Person-Re-ID-with-Variance-Subsampling-Method | 3 | 12797147 | <reponame>joselynzhao/One-shot-Person-Re-ID-with-Variance-Subsampling-Method
from __future__ import print_function, absolute_import
from reid.snatch import *
from reid import datasets
from reid import models
import numpy as np
import torch
import argparse
import os
from reid.utils.logging import Logger
import os.path as osp
import sys
from torch.backends import cudnn
from reid.utils.serialization import load_checkpoint
from torch import nn
import time
import math
import pickle
import time
import matplotlib.pyplot as plt
import os
import codecs
from common_tool import *
def main(args):
# 声明动态绘图器
gd = gif_drawer()
cudnn.benchmark = True
cudnn.enabled = True
# get all the labeled and unlabeled data for training
dataset_all = datasets.create(args.dataset, osp.join(args.data_dir, args.dataset))
l_data, u_data = get_one_shot_in_cam1(dataset_all, load_path="./examples/oneshot_{}_used_in_paper.pickle".format(
dataset_all.name))
NN = len(l_data) + len(u_data)
# 总的训练step数的计算
total_step = math.ceil(math.pow((100 / args.EF), (1 / args.q))) # 这里应该取上限或者 +2 多一轮进行one-shot训练的 # EUG base 采样策略
# total_step = math.ceil((2 * NN * args.step_s + args.yita + len(u_data)) / (args.yita + NN + len(l_data))) + 2 # big start 策略
# 输出该轮训练关键的提示信息
print("{} training begin with dataset:{},batch_size:{},epoch:{},step_size:{},max_frames:{},total_step:{},EF:{},q:{},percent_vari:{}".format(args.exp_name,args.dataset,args.batch_size,args.epoch,args.step_size,args.max_frames,total_step+1,args.EF,args.q,args.percent_vari))
# 指定输出文件
# 第三部分要说明关键参数的设定
sys.stdout = Logger(osp.join(args.logs_dir, args.dataset,args.exp_name,args.exp_order,'log'+time.strftime(".%m_%d_%H-%M-%S")+'.txt'))
data_file =codecs.open(osp.join(args.logs_dir, args.dataset,args.exp_name,args.exp_order,'data.txt'),mode='a')
if args.clock :
time_file =codecs.open(osp.join(args.logs_dir, args.dataset,args.exp_name,args.exp_order,'time.txt'),mode='a')
save_path = osp.join(args.logs_dir, args.dataset,args.exp_name,args.exp_order)
resume_step, ckpt_file = -1, ''
if args.resume: # 重新训练的时候用
resume_step, ckpt_file = resume(args)
# initial the EUG algorithm
eug = EUG(model_name=args.arch, batch_size=args.batch_size, mode=args.mode, num_classes=dataset_all.num_train_ids,
data_dir=dataset_all.images_dir, l_data=l_data, u_data=u_data, save_path=save_path,
max_frames=args.max_frames)
# 训练之前初始化数据
nums_to_select = 0
expend_nums_to_select = 0
new_train_data = l_data
step = 0
if args.resume:
step = resume_step
nums_to_select = min(math.ceil(len(u_data) * math.pow((step), args.q) * args.EF / 100), len(u_data))
step_size = []
isout = 0 #用来标记是否应该结束训练
# 开始的时间记录
exp_start = time.time()
while(not isout):
print("{} training begin with dataset:{},batch_size:{},epoch:{},step:{}/{} saved to {}.".format(args.exp_name,args.dataset,args.batch_size, args.epoch,step+1,total_step+1,save_path))
print("key parameters contain EF:{},q:{},percent_vari:{}. Nums_been_selected:{}".format(args.EF,args.q,args.percent_vari,nums_to_select))
# 开始训练
train_start = time.time()
eug.train(new_train_data, step, epochs=args.epoch, step_size=args.step_size, init_lr=0.1) if step != resume_step else eug.resume(ckpt_file, step)
# 开始评估
evaluate_start = time.time()
# mAP, top1, top5, top10, top20 = 0,0,0,0,0
mAP,top1,top5,top10,top20 = eug.evaluate(dataset_all.query, dataset_all.gallery)
# 标签估计
estimate_start = time.time()
# pred_y, pred_score, label_pre, id_num = 0,0,0,0
pred_y, pred_score, label_pre, dists = eug.estimate_label()
estimate_end = time.time()
# 循环退出判断
if nums_to_select == len(u_data):
isout = 1
# nums_to_select 的设定
new_nums_to_select = min(math.ceil(len(u_data) * math.pow((step + 1), args.q) * args.EF / 100),len(u_data)) # EUG 基础指数渐进策略
# new_nums_to_select = min(math.ceil((len(u_data)-args.yita)*(step-1)/(total_step-2))+args.yita,len(u_data)) # big start
new_expend_nums_to_select = min(len(u_data), math.ceil(new_nums_to_select / args.percent_vari))
selected_idx = eug.select_top_data_nlvm_b1(pred_score,dists, new_expend_nums_to_select,new_nums_to_select)
new_train_data, select_pre = eug.generate_new_train_data(selected_idx, pred_y)
# select_pre =0
# 输出该epoch的信息
data_file.write("step:{} mAP:{:.2%} top1:{:.2%} top5:{:.2%} top10:{:.2%} top20:{:.2%} nums_selected:{} expend_nums_to_select:{} selected_percent:{:.2%} label_pre:{:.2%} select_pre:{:.2%}\n".format(
int(step+1), mAP, top1, top5,top10,top20,nums_to_select, expend_nums_to_select,nums_to_select/len(u_data),label_pre,select_pre))
print(
"step:{} mAP:{:.2%} top1:{:.2%} top5:{:.2%} top10:{:.2%} top20:{:.2%} nums_selected:{} expend_nums_to_select:{} selected_percent:{:.2%} label_pre:{:.2%} select_pre:{:.2%}\n".format(
int(step+1), mAP, top1, top5, top10, top20, nums_to_select, expend_nums_to_select,nums_to_select / len(u_data), label_pre,select_pre))
if args.clock:
train_time = evaluate_start-train_start
evaluate_time = estimate_start - evaluate_start
estimate_time = estimate_end-estimate_start
epoch_time = train_time+estimate_time
time_file.write("step:{} train:{} evaluate:{} estimate:{} epoch:{}\n".format(int(step+1),train_time,evaluate_time,estimate_time,epoch_time))
if args.gdraw:
gd.draw(nums_to_select/len(u_data),top1,mAP,label_pre,select_pre)
nums_to_select = new_nums_to_select
expend_nums_to_select = new_expend_nums_to_select
step = step + 1
data_file.close()
if (args.clock):
exp_end = time.time()
exp_time = exp_end - exp_start
h, m, s = changetoHSM(exp_time)
print("experiment is over, cost %02d:%02d:%02.6f" % ( h, m, s))
time_file.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Snatch Strategy')
parser.add_argument('-d', '--dataset', type=str, default='DukeMTMC-VideoReID',choices=datasets.names()) #DukeMTMC-VideoReID \mars
parser.add_argument('-b', '--batch-size', type=int, default=16)
parser.add_argument('--epoch',type=int,default=40)
parser.add_argument('--step_size',type=int,default=30)
parser.add_argument('--EF', type=float, default=10) # 渐进采样系数
parser.add_argument('--q', type=float, default=1) # 渐进采样指数
parser.add_argument('--percent_vari', type=float, default=0.8) # 方差的筛选范围.
working_dir = os.path.dirname(os.path.abspath(__file__))
parser.add_argument('--data_dir', type=str, metavar='PATH',default=os.path.join(working_dir, 'data')) # 加载数据集的根目录
parser.add_argument('--logs_dir', type=str, metavar='PATH',default=os.path.join(working_dir, 'logs')) # 保持日志根目录
parser.add_argument('--exp_name',type=str,default="nlvm-b1")
parser.add_argument('--exp_order',type=str,default="1")
parser.add_argument('--resume', type=str, default=None)
parser.add_argument('--mode', type=str, choices=["Classification", "Dissimilarity"], default="Dissimilarity") #这个考虑要不要取消掉
parser.add_argument('--max_frames', type=int, default=400)
parser.add_argument('--clock',type=bool, default=True) #是否记时
parser.add_argument('--gdraw',type=bool, default=False) #是否实时绘图
#下面是暂时不知道用来做什么的参数
parser.add_argument('-a', '--arch', type=str, default='avg_pool',choices=models.names()) #eug model_name
parser.add_argument('-i', '--iter-step', type=int, default=5)
parser.add_argument('-g', '--gamma', type=float, default=0.3)
parser.add_argument('-l', '--l', type=float)
parser.add_argument('--continuous', action="store_true")
main(parser.parse_args())
| 2.15625 | 2 |
draft/serial/serial_threaded.py | industrial-robotics-lab/omni-platform-python | 0 | 12797148 | <filename>draft/serial/serial_threaded.py
#!/usr/bin/env python3
import serial
import time
import threading
if __name__ == '__main__':
port = serial.Serial('/dev/ttyACM0', 115200)
port.flush()
start = time.time()
def process_message(msg):
print(msg)
# print(f"Time passed: {time.time() - start} secs; msg len = {len(msg)}")
def transmit():
while True:
# global start
# start = time.time()
port.write(b"Hello from Raspberry Pi!\n")
time.sleep(1)
def receive():
while True:
line = port.readline().decode('utf-8').rstrip()
process_message(line)
tx_thread = threading.Thread(target=transmit)
# rx_thread = threading.Thread(target=receive)
tx_thread.start()
# rx_thread.start()
# transmit()
receive()
tx_thread.join()
# rx_thread.join()
| 3 | 3 |
tetris/tetris.py | thejevans/tetris | 0 | 12797149 | <filename>tetris/tetris.py<gh_stars>0
__author__ = '<NAME>'
__copyright__ = 'Copyright 2020 <NAME>'
__credits__ = ['<NAME>']
__license__ = 'Apache License 2.0'
__version__ = '0.0.1'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__status__ = 'Development'
"""
Docstring
"""
from typing import Dict, List, Union, Optional
import time
from pynput import keyboard
Board = Dict[
Optional[str],
str,
int,
int,
Optional[str],
List[int],
List[int],
int,
bool,
]
TETROMINOES = [
[ # I
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 1, 1, 1, 1,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
],
[ # J
1, 0, 0,
1, 1, 1,
0, 0, 0,
],
[ # L
0, 0, 1,
1, 1, 1,
0, 0, 0,
],
[ # O
0, 1, 1,
0, 1, 1,
0, 0, 0,
],
[ # S
0, 1, 1,
1, 1, 0,
0, 0, 0,
],
[ # T
0, 1, 0,
1, 1, 1,
0, 0, 0,
],
[ # Z
1, 1, 0,
0, 1, 1,
0, 0, 0,
],
]
OFFSETS = [
(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), # 0
(0, 0), (1, 0), (1, -1), (0, 2), (1, 2), # R
(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), # 2
(0, 0), (-1, 0), (-1, -1), (0, 2), (-1, 2), # L
]
I_OFFSETS = [
(0, 0), (-1, 0), (2, 0), (-1, 0), (2, 0), # 0
(-1, 0), (0, 0), (0, 0), (0, 1), (0, -2), # R
(-1, 1), (1, 1), (-2, 1), (1, 0), (-2, 0), # 2
(0, 1), (0, 1), (0, 1), (0, -1), (0, 2), # L
]
O_OFFSETS = [
(0, 0), # 0
(0, -1), # R
(-1, -1), # 2
(-1, 0), # L
]
ROTATE_INT = {'0': 0, 'R': 1, '2': 2, 'L': 3}
INT_ROTATE = ('0', 'R', '2', 'L')
TETROMINO_INT = {'I': 0, 'J': 1, 'L': 2, 'O': 3, 'S': 4, 'T': 5, 'Z': 6}
DIRECTION_INT = {'clockwise': 1, 'counterclockwise': -1}
KEYMAP = { # TODO: change the keys to the correct strings
'up': 'up',
'down': 'down',
'left': 'left',
'right': 'right',
'clockwise': 'clockwise',
'counterclockwise': 'counterclockwise',
'store': 'store',
}
def display(board_state: Board) -> None:
"""Displays the current board state.
TODO: currently simply printing to console. update to use curses.
Args:
board_state: Dictionary containing the board state.
"""
# TODO: write this function
def true_rotate(board_state: Board, new_rotation: str = '0') -> List[int]:
"""Rotates a given tetromino clockwise and returns the rotated one.
Args:
board_state:
new_rotation:
Returns:
A list representation of the rotated tetromino
Raises:
AssertionError:
"""
# This nested function returns the new index for a given index after a
# clockwise rotation.
def rot(idx: int, dim: int) -> int:
return (dim - 1 - (idx % dim))*dim + idx//dim
tetromino_idx = TETROMINO_INT[board_state['tetromino']]
tetromino = TETROMINOES[tetromino_idx]
current_rotation_idx = ROTATE_INT[board_state['rotation']]
new_rotation_idx = ROTATE_INT[new_rotation]
iterations = (4 + new_rotation_idx - current_rotation_idx) % 4
assert(iterations != 2)
dim = int(len(tetromino)**0.5)
for _ in range(iterations):
tetromino = [tetromino[rot(i, dim)] for i in tetromino]
return tetromino
def collision(board_state: Board) -> bool:
"""Checks if given board results in a collision.
Args:
board_state:
Returns:
True if there is a collision, False if there is not.
"""
# TODO: write this function
pass
def kick(board_state: Board,
rotated_board_state: Board,
k: int,
) -> Union[bool, Board]:
"""Translates the rotated board using the offset tables.
Returns the kicked board if there is a kick available. Otherwise, returns
False.
Args:
board_state:
rotated_board_state:
k: The offset index to use.
Returns:
"""
i_rotate = ROTATE_INT[board_state['rotation']]
f_rotate = ROTATE_INT[rotated_board_state['rotation']]
if board_state['tetromino'] == 'O':
if k != 0:
return False
i_offset = O_OFFSETS[i_rotate]
f_offset = O_OFFSETS[f_rotate]
elif ~(0 <= k <= 4):
return False
elif board_state['tetromino'] == 'I':
i_offset = I_OFFSETS[i_rotate*5 + k]
f_offset = I_OFFSETS[f_rotate*5 + k]
else:
i_offset = OFFSETS[i_rotate*5 + k]
f_offset = OFFSETS[f_rotate*5 + k]
x_kick = f_offset[0] - i_offset[0]
y_kick = f_offset[1] - i_offset[1]
kicked_board_state = rotated_board_state.copy()
kicked_board_state['x'] += x_kick
kicked_board_state['y'] += y_kick
return kicked_board_state
def rotate(board_state: Board, direction: str = 'clockwise') -> Board:
"""Attempts to rotate the current piece in the given direction.
Args:
board_state:
direction:
Returns:
A new board state.
"""
rotate_offset = DIRECTION_INT[direction]
current_rotate = ROTATE_INT[Board['rotation']]
new_rotate = (current_rotate + rotate_offset) % 4
rotated_board_state = board_state.copy()
rotated_board_state['rotation'] = INT_ROTATE[new_rotate]
k = 0
kicked_board_state = kick(board_state, rotated_board_state, k)
while collision(kicked_board_state):
k += 1
if kicked_board_state := kick(board_state, rotated_board_state, k):
continue
else:
return board_state
return kicked_board_state
def translate(board_state: Board, direction: str = 'down') -> Board:
"""Attempts to translate the current piece in the given direction.
Args:
board_state:
direction:
Returns:
A new board state.
"""
# TODO: write this function
pass
def store(board_state: Board) -> Board:
"""Attempt to store the current piece.
Args:
board_state:
Returns:
A new board state.
"""
# TODO: write this function
pass
def lock(board_state: Board) -> Union[bool, Board]:
"""Checks to see if the current piece should be locked.
If the piece should not be locked, returns False. Otherwise, returns the
new board state with the locked piece.
Args:
board_state:
Returns:
"""
# TODO: make sure to clear lines
# TODO: update score
# TODO: write this function
pass
def pop(board_state: Board) -> Board:
# TODO: make sure to reset the store flag
# TODO: write this function
pass
def initialize_board() -> Board:
"""
"""
board_state = {
'tetromino': None,
'rotation': '0',
'x': 0,
'y': 0,
'stored': None,
'stack': [],
'queue': [],
'score': 0,
'store_flag': False,
}
# TODO: initialize stack
# TODO: initialize queue
return board_state
def game_over(board_state: Board) -> bool:
# TODO: write this function
pass
def move(board_state: Board, key: str = 'down') -> Board:
"""Attempts to move the active piece based on the key.
Key presses allowed: 'up', 'down', 'left', 'right', 'clockwise',
'counterclockwise', 'store'.
Args:
board_state: Dictionary containing the board state.
key: A strign representation of the key pressed.
Returns:
The new board state
"""
translations = set('up', 'down', 'left', 'right')
rotations = set('clockwise', 'counterclockwise')
if key in translations:
if board_state['tetromino'] is None:
return pop(board_state)
translated_board_state = translate(board_state, key)
if locked_board_state := lock(translated_board_state):
return locked_board_state
return translate(board_state, key)
elif key in rotations:
return rotate(board_state, key)
else: # elif key == 'store':
return store(board_state)
if __name__ == '__main__':
TICK = 0.5
board = initialize_board()
with keyboard.Events() as events:
t_i = time.perf_counter()
while ~game_over(board):
event = events.get(TICK)
if event is not None:
board = move(board, KEYMAP[event])
display(board)
if (time.perf_counter() - t_i) > TICK:
t_i = time.perf_counter()
board = move(board)
display(board)
print('GAME OVER')
input("Press the <ENTER> key to continue...")
| 2.625 | 3 |
site/blog/views.py | marcus-crane/site | 2 | 12797150 | import datetime
from django.contrib.syndication.views import Feed
from django.utils import timezone
from django.urls import reverse
from django.views import generic
from .models import Post
class PostDetailView(generic.DetailView):
model = Post
queryset = Post.objects.exclude(status='D')
template_name = 'blog/detail.html'
class PostView(generic.ListView):
template_name = 'blog/list.html'
context_object_name = 'posts'
def get_queryset(self):
""" Fetch only published posts, and order by descending date """
return Post.objects.filter(
published_at__lte=timezone.now(), status="P"
).order_by('-published_at')
class RSSFeed(Feed):
title = "utf9k"
link = "/blog/"
description = "Blog posts from utf9k"
def items(self):
return Post.objects.order_by('-published_at')
def item_title(self, item):
return item.title
def item_pubdate(self, item):
return datetime.datetime.combine(item.published_at, datetime.time())
def item_description(self, item):
return item.render()
def item_link(self, item):
return reverse('blog:detail', args=[item.slug]) | 2.375 | 2 |
Subsets and Splits