content
stringlengths 5
1.05M
|
---|
# coding=utf-8
# 错误写法
def open_file():
f = open('photo.jpg', 'r+')
jpgdata = f.read()
f.close()
#正确写法
def open_file_right():
with open('photo.jpg', 'r+') as f:
jpgdata = f.read()
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 17 12:55:25 2022
@author: catal
"""
import os
import smtplib
import imghdr
from email.message import EmailMessage
email_address = os.environ.get("gmail_app_user")
email_password = os.environ.get("gmail_app_pass")
msg = EmailMessage()
msg['Subject'] = "wanna scene this weekend?"
msg["From"] = email_address
msg["To"] = email_address
msg.set_content("How about 6pm-10pm this Saterday, at Tension? I'll throw in some extra pussy pics ;)")
pics = ["max2DaysOld.jpg", "max3DaysOld.jpg"]
for pic in pics:
with open(pic, "rb") as f:
f_data = f.read()
f_type = imghdr.what(f.name)
f_name = f.name
msg.add_attachment(f_data, maintype='image', subtype=f_type, filename=f_name)
with smtplib.SMTP_SSL("smtp.gmail.com", 465) as smtp:
smtp.login(email_address, email_password)
smtp.send_message(msg)
|
#!/usr/bin/python
import copy
import cPickle
import logging
import os
import subprocess
from time import time
from fetcher import fetch
from parser import parse
import settings
class SyncStamps:
def __init__(self, videos={}, playlists={}, expires=0.0):
self.videos = videos
self.playlists = playlists
self.expires = expires
class Sync:
def __init__(self):
self._playlist_prefix = settings.get('playlist_prefix')
self._video_prefix = settings.get('video_prefix')
self.playlists = settings.get('playlists').split(',')
self._cache_path = settings.get('cache_path')
self._make_dirs('current')
self.stamps = self._make_stamps('current')
logging.info('Instantiated sync manager')
def _path(self, *items):
return os.path.join(self._cache_path, *items)
def _nuke_path(self, *where):
subprocess.call(['rm', '-rf', self._path(*where)])
def _make_stamps(self, state, original=None):
stamps_path = self._path(state, 'stamps.pickle')
if os.path.exists(stamps_path):
with open(stamps_path, 'rb') as f:
return cPickle.load(f)
elif original is None:
return SyncStamps()
else:
return copy.deepcopy(original)
def _make_dirs(self, dir):
fullpath = self._path(dir)
if not os.path.exists(fullpath):
os.makedirs(fullpath, 0755)
def refresh(self, force=False):
if time() < self.stamps.expires and not force:
logging.info('Current files good for %.1f more secs', self.get_wait())
return False
self._nuke_path('new')
self._make_dirs('new')
new_stamps = self._make_stamps('new', self.stamps)
changed = False
available = set()
for playlist in self.playlists:
changed |= self._refresh_playlist(playlist, new_stamps, available)
for video in self._find_videos(available):
changed |= self._refresh_video(video, new_stamps, set())
new_stamps.expires = time() + (float(settings.get('refresh_hours')) * 3600)
if changed:
self._swap_into_new()
else:
self._nuke_path('new')
self.stamps = new_stamps
self._save_stamps('current', new_stamps)
return changed
def _refresh_playlist(self, playlist, stamps, available):
try: return self._refresh(self._playlist_prefix,
playlist, stamps.playlists, available)
finally: self._save_stamps('new', stamps)
def _refresh_video(self, video, stamps, available):
try: return self._refresh(self._video_prefix,
video, stamps.videos, available)
finally: self._save_stamps('new', stamps)
def _refresh(self, url_prefix, item, stamps_dict, available):
try:
known_item = item in stamps_dict
stamp = None
if known_item and os.access(self._path('current', item), os.R_OK):
stamp = stamps_dict[item]
resp = fetch(url_prefix + item, stamp)
local_path = self._path('new', item)
if resp.status >= 400: # error statuses
if known_item:
del stamps_dict[item]
return known_item # changed if previously known
elif resp.status == 304: # not modified
available.add(item)
os.link(
self._path('current', item),
self._path('new', item))
stamps_dict[item] = resp.date
return False # unchanged
elif 200 <= resp.status < 300: # downloading
available.add(item)
resp.save(local_path)
stamps_dict[item] = resp.date
return True # changed
else:
raise Exception("Don't know what to do with response %s", resp.status)
except:
import traceback
logging.error('Failed to fetch %s%s. Skipping. Exception info:\n%s',
url_prefix, item, traceback.format_exc())
return False # assume unchanged
def _save_stamps(self, folder, stamps):
with open(self._path(folder, 'stamps.pickle'), 'wb') as f:
cPickle.dump(stamps, f, 0)
def _find_videos(self, playlists):
videos = set()
for filename in playlists:
playlist = parse(self._path('new', filename))
for video in playlist:
videos.add(str(video))
return videos
def _swap_into_new(self):
if os.access(self._path('current'), os.W_OK):
os.rename(self._path('current'), self._path('old'))
os.rename(self._path('new'), self._path('current'))
if os.access(self._path('old'), os.W_OK):
self._nuke_path('old')
def get_playlists(self):
result = []
for playlist in self.stamps.playlists:
result.append(self._path('current', playlist))
return result
def get_wait(self):
return max(0, self.stamps.expires - time())
if __name__ == '__main__':
print 'nippl (c) 2010, Omar Balbuena'
logging.basicConfig(level = logging.DEBUG)
sync = Sync()
sync.refresh(True)
|
#!usr/local/bin/python3.8
# -*- coding: utf-8 -*import
import random
from typing import List, Dict
from classes.magic import Magic
class BColors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class Person:
ACTIONS = ['Attack', 'Magic', 'Items']
def __init__(self, name, hp: int, mp: int, attack: int, defence: int,
magic: List[Magic], items: List[Dict]):
self.name = name
self.__max_hp = hp
self.hp = hp
self.__max_mp = mp
self.mp = mp
self.__attack = attack
self.defence = defence
self.magic = magic
self.items = items
@property
def max_hp(self):
return self.__max_hp
@property
def max_mp(self):
return self.__max_mp
def damage(self):
attack_low = self.__attack - 20
attack_high = self.__attack + 20
return random.randrange(attack_low, attack_high)
def take_damage(self, damage: int):
self.hp -= damage
if self.hp <= 0:
self.hp = 0
return self.hp
def reduce_mp(self, cost: int):
self.mp -= cost
def heal(self, dmg):
self.hp += dmg
if self.hp > self.__max_hp:
self.hp = self.__max_hp
def choose_action(self):
i = 1
print(f'\n{BColors.OKBLUE}{BColors.BOLD}{self.name} Turn:\n{BColors.ENDC}')
print(f'{BColors.OKBLUE}{BColors.BOLD}ACTIONS{BColors.ENDC}')
for action in Person.ACTIONS:
print(f" {i}. {action}")
i += 1
def choose_magic(self):
i = 1
print(f'{BColors.OKBLUE}{BColors.BOLD}MAGIC{BColors.ENDC}')
for magic in self.magic:
print(f" {i}. {magic.name}, (cost: {magic.mp_cost})")
i += 1
def choose_item(self):
i = 1
print(f"{BColors.OKGREEN}{BColors.BOLD}ITEMS:{BColors.ENDC}")
for item in self.items:
print(f" {i}. {item['item'].name}: {item['item'].description} (x{item['quantity']})")
i += 1
@staticmethod
def choose_target(enemies):
i = 1
alive_enemies = len([x for x in enemies if x.hp > 0])
print(f"\n{BColors.OKGREEN}{BColors.BOLD}TARGET:{BColors.ENDC}")
for enemy in enemies:
if enemy.hp != 0:
print(f" {i}. {enemy.name}")
i += 1
while True:
choice = int(input("Choose target: ")) - 1
if choice in range(1, alive_enemies + 1) or choice == 0:
break
print("Wrong magic number! Choose again!")
return choice
def get_stats(self):
tick = '█'
hp_ticks = int(((self.hp / self.__max_hp) * 100) / 4)
hp_bar = ''
# dynamic HP bar
for x in range(hp_ticks):
hp_bar += tick
while True:
if len(hp_bar) == 25:
break
hp_bar += ' '
# Dynamic MP bar
mp_ticks = int(((self.mp / self.__max_mp) * 100) / 10)
mp_bar = ''
for x in range(mp_ticks):
mp_bar += tick
while True:
if len(mp_bar) == 10:
break
mp_bar += ' '
# Keep HP 4 spaces
hp = str(self.hp)
if len(hp) < 2:
hp = f" {hp}"
elif len(hp) < 3:
hp = f" {hp}"
elif len(hp) < 4:
hp = f' {hp}'
# Keep MP 3 spaces
mp = str(self.mp)
if len(mp) < 2:
mp = f' {mp}'
elif len(mp) < 3:
mp = f' {mp}'
print(f' {BColors.BOLD}_________________________ __________{BColors.ENDC}')
print(f'{BColors.BOLD}{self.name}: {hp}/{self.__max_hp} '
f'|{BColors.OKGREEN}{hp_bar}{BColors.ENDC}'
f'{BColors.BOLD}| {mp}/{self.__max_mp}|{BColors.OKBLUE}{mp_bar}{BColors.ENDC}{BColors.BOLD}|'
f'{BColors.ENDC}')
def get_enemy_stats(self):
hp_bar = ''
bar_ticks = int(((self.hp / self.__max_hp) * 100) / 2)
tick = '█'
for x in range(bar_ticks):
hp_bar += tick
while True:
if len(hp_bar) == 50:
break
hp_bar += ' '
# Keep HP 4 spaces
hp = str(self.hp)
if len(hp) < 2:
hp = f" {hp}"
elif len(hp) < 3:
hp = f" {hp}"
elif len(hp) < 4:
hp = f' {hp}'
elif len(hp) < 5:
hp = f" {hp}"
print(f' {BColors.BOLD}__________________________________________________{BColors.ENDC}')
print(f'{BColors.BOLD}{self.name} {hp}/{self.__max_hp} '
f'|{BColors.FAIL}{hp_bar}{BColors.ENDC}'
f'{BColors.BOLD}|{BColors.ENDC}')
def choose_enemy_spell(self):
magic_choice = random.randrange(0, len(self.magic))
spell = self.magic[magic_choice]
magic_dmg = spell.generate_damage()
hp_breakpoint = self.hp / self.__max_hp * 100
if self.mp < spell.mp_cost or (spell.type == 'white' and hp_breakpoint > 50):
self.choose_enemy_spell()
else:
return spell, magic_dmg
|
import subprocess
import re
class gdb(object):
def __init__(self, python_file, cpp_file, cpp_start_line):
self.process = subprocess.Popen(["PYTHONPATH=stencil:../.. gdb python"],shell=True,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE, cwd='.')
self.process.stdin.write("run " + python_file + "\n")
self.process.stdin.write("break " + cpp_file + ":" + str(cpp_start_line) + "\n")
self.process.stdin.write("run " + python_file + "\n")
self.process.stdin.write("delete 0\n")
# Read up to current position in output
def sync_pos(self):
self.process.stdin.write("echo sync375023\\n\n")
line = "\n"
while line:
line = self.process.stdout.readline()
if 'sync375023' in line:
break
line = self.process.stdout.read(len("(gdb) "))
def get_current_stack(self):
self.sync_pos()
self.process.stdin.write("back\n")
line = self.process.stdout.readline().strip()
m = re.match(r'^#([0-9]+)\s+(.*::)?([A-Za-z0-9_]+)\s+(\(.*\))? at (.*):([0-9]+)$', line)
if m and m.group(1) == '0':
result = dict()
result['stack_frame_number'] = m.group(1)
result['namespace'] = m.group(2)
result['method_name'] = m.group(3)
result['params'] = m.group(4)
result['filename'] = m.group(5)
result['line_no'] = int(m.group(6))
return result
else:
raise RuntimeError('Could not match regex on stack line:', line)
def next(self):
self.process.stdin.write("next\n")
def quit(self):
self.process.stdin.write("quit\n")
self.process.stdout.read() # Read to end
def read_expr(self, expr):
self.sync_pos()
self.process.stdin.write("print " + expr + "\n")
self.process.stdin.write("echo sentinel07501923\\n\n")
line = self.process.stdout.readline().strip()
if 'sentinel07501923' in line:
return None
else:
m = re.match(r'^\$([0-9]+)\s+=\s+(.*)$', line)
if m:
return m.group(2)
else:
raise RuntimeError('Could not match regex on expression print:', line)
if __name__ == '__main__':
gdb = gdb()
for x in range(10):
stack = gdb.get_current_stack()
print stack['line_no']
print 'x1:', gdb.read_expr('x1')
print 'x2:', gdb.read_expr('x2')
print 'x3:', gdb.read_expr('x3')
gdb.next()
gdb.quit()
|
"""
Realizar un programa que imprima en pantalla los números impares del 1 al 100.
Lo haremos con las diferentes opciones
* while
* for (usando “range”) y sin usar un condicional, solo con el uso del rango
"""
# while
print("======================while (impares) ======================")
i = 1
"""
Para obtener los impares para mostrarlos, ponemos la condición si la
división de entre 2 es diferente a 0 en el resto. Con eso sabemos que
es impar y lo mostramos
"""
while i <= 100:
if ( i % 2 != 0): print(i)
i += 1
# for
print("======================for (impares) ======================")
"""
En este caso aplicamos que por iteración haga + 2. Empezando de 1, que es
impar, si vamos sumando + tendremos 1, 3, 5, 7, 9, 11, 13, 15, 17,
19, 21,...
"""
for value in range(1, 101, 2):
print(value)
|
import unittest
from unittest import mock
from repeat import repeat
class TestException(Exception):
pass
class Repeat_Function_Test(unittest.TestCase):
def test_Should_CallFunctionUntilStopIterationException_When_Called(self):
func = mock.Mock(spec=[])
func.side_effect = [ [1], [2], StopIteration ]
called_history = []
for _ in repeat(func):
called_history.append(func.called)
func.called = False
self.assertEqual([True, True], called_history)
def test_Should_PropagateFunctionException_When_ExceptionRaised(self):
func = mock.Mock(spec=[])
func.side_effect = [ TestException ]
called_history = []
with self.assertRaises(TestException):
for _ in repeat(func):
pass
def test_Should_CallBeforeBeforeYielding_When_BeforeSpecified(self):
func = mock.Mock(spec=[])
before = mock.Mock(spec=[])
func.side_effect = [ 1, StopIteration ]
for _ in repeat(func, before=before):
self.assertTrue(before.called)
def test_Should_NotCallAfterBeforeYielding_When_AfterSpecified(self):
func = mock.Mock(spec=[])
after = mock.Mock(spec=[])
func.side_effect = [ 1, StopIteration ]
for _ in repeat(func, after=after):
self.assertFalse(after.called)
def test_Should_CallAfterAfterYielding_When_AfterSpecified(self):
func = mock.Mock(spec=[])
after = mock.Mock(spec=[])
func.side_effect = [ 1, StopIteration ]
for _ in repeat(func, after=after):
pass
self.assertTrue(after.called)
|
def Mmul(a, b): # 行列の積
# [m×n型][n×p型]
m = len(a)
n = len(a[0])
if len(b) != n:
raise ValueError('列と行の数が一致しません')
p = len(b[0])
ans = [[0]*p for _ in range(m)]
for i in range(m):
for j in range(p):
for k in range(n):
ans[i][j] += a[i][k]*b[k][j] % MOD
return ans
def Mfactorial(n, p): # n^p 繰り返し二乗法+行列の積
if (p == 1):
return n
if (p % 2 == 0):
t = Mfactorial(n, p // 2)
return Mmul(t, t)
return Mmul(Mfactorial(n, p - 1), n)
n, k = map(int, input().split())
a = [list(map(int, input().split())) for _ in range(n)]
MOD = 10**9+7
print(sum([sum(i) % MOD for i in Mfactorial(a, k)]) % MOD)
|
from telegram import Update
from core import CoreContext
from core.session import message_wrapper
@message_wrapper
def reset(update: Update, context: CoreContext):
if not context.user.token:
context.chat.send_message("Anda sudah keluar fitur elearning.")
return -1
context.user.token = None
context.save()
context.message.reply_text("Berhasil keluar fitur elearning, token dihapus.")
return -1
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# Use 'Agg' to generate PNGs of graphs without displaying via X.
# If X works, you can comment the following two lines:
#import matplotlib
#matplotlib.use('Agg')
from balltracker.image import ImageStack
from balltracker.ball import ballSequence
def main():
# Basics:
# ---------------------------------------------------
name = "003_ball"
inputFiles = ImageStack(filePattern="example_projections/003_ball_%6d_img.tif")
flatFields = ImageStack(filePattern="example_projections/003_ball_%6d_ref.tif")
darkFields = ImageStack(filePattern="example_projections/003_ball_%6d_dar.tif")
outputFolder = "results"
infosInFilename = True
seq = ballSequence(inputFileStack=inputFiles, outputFolder=outputFolder, darkFileStack=darkFields, flatFileStack=flatFields)
seq.setRowAxis("x") # Name of detector's row vector axis
seq.setColAxis("y") # Name of detector's column vector axis / stage rotation axis
seq.setBeamAxis("z") # Name of beam direction axis
seq.setScanAngle(360) # Rotation stage angular coverage for the given projections.
seq.skip(2) # Only look at every n-th image. 1=all, 2=every second, ...
# Display Mode: live image.
# Show 'absorption', 'threshold' or 'edges' image. Or 'none'.
# Works only when multiprocessing is turned off.
seq.displayMode(mode='absorption')
seq.saveIntermediates(True) # Save intermediate processing steps as pictures.
seq.showDebugInfo(True) # Prints additional debug info to terminal.
# Orientation:
# ---------------------------------------------------
# The 'orientation' tag of the tiff files is obeyed.
# Further corrections can be made here if something is wrong:
seq.rotate("0") # Rotate images by "90", "180" or "270" degress.
seq.flip(horizontal=False, vertical=False)
# Multiprocessing:
# ---------------------------------------------------
# Python will automatically decide on the number of processes unless you set it.
seq.multiprocessing(True)
seq.numberOfProcesses(n=3)
# Image preprocessing:
# ---------------------------------------------------
seq.applyDarks(True) # Dark Field Correction
seq.applyRefs(True) # Flat Field Correction
seq.median(3)
seq.threshold(ratio=0.7, absolute=None) # At least one of these parameters must be 'None'
seq.patchCleanup(doCleanUp=True,
min_circle_area=(150*150),
max_patch_area=(1000*1000),
aspect_ratio_tolerance=0.15)
seq.edgeDetection('sobel') # 'sobel' (faster, thicker line) or 'canny' (slower, thinner line)
# Data reduction:
# ---------------------------------------------------
seq.binning(2)
seq.cropBorder(top=200, bottom=200, left=50, right=50)
#seq.crop(x0=100, y0=1000, x1=5000, y1=2000) # Crop overrides border crop, if defined.
seq.autoCrop(doAutoCrop=True, autoCropSize=900, autoCropBinningFactor=40)
# Cropping the ball afterwards, mostly to produce an animation:
seq.cropAndSaveCenteredBall(doCropBall=True, radius=300)
# Drift compensation:
# ---------------------------------------------------
# To shift reference picture before applying it to a projection.
# This needs a lot of RAM for full size images. Good idea to specify a drift observation ROI.
seq.driftCompensation(refsToImg=False)
# ROI region for unbinned image:
# Define distance from [top or bottom] and [left or right], and a size.
seq.driftROI(bottom=300, left=300, width=4000, height=600)
# ---------------------------------------------------
# Maximum allowed deviation from circle fit,
# in pixels, for unbinned image.
seq.circleFitTolerances(max_meanDeviation=6, max_maxDeviation=30)
# Intensity profile fit:
# ---------------------------------------------------
# Fits Beer-Lambert law to intensity profile, after circle fit.
# This fit always uses the unbinned picture.
# Fast; don't worry about computation time.
seq.fitIntensityProfile(False)
# Run the ball tracker to gather coordinates:
# ---------------------------------------------------
seq.trackBall()
seq.saveParameters(name=name, infosInFilename=infosInFilename)
seq.saveCoordinates(name=name, infosInFilename=infosInFilename)
# Or import previously saved coordinate files.
# The specifier tells which fit type shall be imported (or both).
# Alternatively, use a single 'filename=' parameter to specify an
# absolute path to a coordinate file.
#seq.importCoordinates(name=name, specifier="coordinates_circleFit", infosInFilename=infosInFilename)
#seq.importCoordinates(name=name, specifier="coordinates_intensityFit", infosInFilename=infosInFilename)
# Calculate center of rotation + axis tilt from trajectory:
# -----------------------------------------------------------
seq.calcAxisParameters(fitSecondOrderWave=False)
seq.saveCoordinates(name=name, infosInFilename=infosInFilename, withHeader=True)
seq.saveGeometryResults(name=name, infosInFilename=infosInFilename)
print("Finished in {}.".format(seq.getFormattedRuntime()))
seq.plotTrajectories(displayPlot=True, savePlot=True, saveAs=name, infosInFilename=infosInFilename)
if __name__ == '__main__':
main() |
# Generated by Django 3.1.4 on 2020-12-25 15:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0003_comment'),
]
operations = [
migrations.AddField(
model_name='comment',
name='email',
field=models.EmailField(default=None, max_length=254),
),
]
|
import seaborn as sns
sns.set_style("whitegrid")
penguins = sns.load_dataset("penguins")
def func(input="green"):
plot = sns.displot(penguins, x="flipper_length_mm", color=input, legend=False)
fig0 = plot.fig
fig0.set_size_inches(11, 8)
return fig0
import panel as pn
pn.extension()
select = pn.widgets.Select(value="#6082A2", options=["#a2a160", "#6082A2", "#a26061"])
interactive_func=pn.bind(func, input=select)
pn.template.FastListTemplate(
site="Panel", title="Works With The Tools You Know And Love",
sidebar=[select], main=[interactive_func],
header_background="#6082A2", accent_base_color="#6082A2"
).servable() |
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import render, redirect
import openpyxl
import csv
from django.urls import reverse
from django.views.generic import FormView, CreateView
from django.forms import ModelForm, HiddenInput
from alacode.models import Code, Tweet
class CodingForm(ModelForm):
class Meta:
model = Code
exclude = ['id']
widgets = {'user': HiddenInput(), 'tweet': HiddenInput()}
class CodingView(LoginRequiredMixin, CreateView):
login_url = '/login/'
form_class = CodingForm
template_name = 'alacode/coding.html'
def get_tweet(self):
self.coded = {c.tweet_id for c in Code.objects.filter(user=self.request.user)}
self.ncoded = len(self.coded)
tweets = Tweet.objects.exclude(pk__in=self.coded)
self.total = tweets.count()
if self.total == 0:
self.total+=1
self.perc = 10 + int(self.ncoded / self.total*90)
self.total-=1
else:
self.perc = 10 + int(self.ncoded / self.total * 90)
return tweets[0]
def get_initial(self):
initial = super().get_initial()
self.tweet = self.get_tweet()
initial['tweet'] = self.tweet
initial['user'] = self.request.user
return initial
def get_success_url(self):
return reverse("alacode:index")
|
#data derived from http://www.data-compression.com/english.html
freqs = {'a': 0.080642499002080981, 'c': 0.026892340312538593, 'b': 0.015373768624831691, 'e': 0.12886234260657689, 'd': 0.043286671390026357, 'g': 0.019625534749730816, 'f': 0.024484713711692099, 'i': 0.06905550211598431, 'h': 0.060987267963718068, 'k': 0.0062521823678781188, 'j': 0.0011176940633901926, 'm': 0.025009719347800208, 'l': 0.041016761327711163, 'o': 0.073783151266212627, 'n': 0.069849754102356679, 'q': 0.0010648594165322703, 'p': 0.017031440203182008, 's': 0.063817324270355996, 'r': 0.06156572691936394, 'u': 0.027856851020401599, 't': 0.090246649949305979, 'w': 0.021192261444145363, 'v': 0.010257964235274787, 'y': 0.01806326249861108, 'x': 0.0016941732664605912, 'z': 0.0009695838238376564}
sum_f_squared = 0.0
sum_f = 0.0
for key in freqs:
sum_f += freqs[key]
sum_f_squared += freqs[key]**2
print(sum_f)
print(sum_f_squared) |
import json
import os
import re
import numpy as np
from django.conf import settings
from django.views.decorators.csrf import csrf_protect
from django.core.files.storage import FileSystemStorage
from django.shortcuts import redirect
from django.http import HttpResponse
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from PySAM import Windpower
from PySAM.ResourceTools import FetchResourceFiles
from api.models.calliope import Abstract_Tech, Run_Parameter
from api.models.configuration import Location, Technology, Tech_Param, \
Loc_Tech, Loc_Tech_Param, ParamsManager, Scenario, Scenario_Param, \
Scenario_Loc_Tech, Timeseries_Meta, Model, Model_Comment, \
Model_Favorite, User_File, Model_User
from api.tasks import task_status, upload_ts, copy_model
from taskmeta.models import CeleryTask
def validate_model_name(value):
if len(value) < 3:
raise ValidationError(f"Error: Invalid model name, too short.")
regex = re.compile(r"(<(.*)>.*?|<(.*) />|[^\w\s\(\)-])")
matched = regex.search(value)
if matched is None:
return
diff = set(value).difference(set(["(", ")", " ", "-", "_"]))
if len(diff) == 0:
raise ValidationError("Error: Invalid model name, should not contain only symbols")
result = matched.group(0)
raise ValidationError(f"Error: Invalid model name, should not contain '{result}'")
@csrf_protect
def add_model(request):
"""
Create a new model. Option to provide an existing model to copy as a new
instance. User must already have view access to the template model.
Parameters:
template_model_uuid (uuid): optional
model_name (str): required
Returns (json): Action Confirmation
Example:
POST: /api/add_model/
"""
user = request.user
model_name = request.POST["model_name"].strip()
template_model_uuid = request.POST["template_model_uuid"]
payload = {}
try:
validate_model_name(model_name)
except ValidationError as e:
payload["status"] = "Failed"
payload["message"] = str(e)
return HttpResponse(json.dumps(payload), content_type="application/json")
try:
template_model = Model.objects.get(uuid=template_model_uuid)
template_model.handle_view_access(user)
except Exception as e:
template_model = None
print("User building from blank model: {}".format(e))
# Create Model
model_name = Model.find_unique_name(model_name)
model = Model.objects.create(name=model_name)
Model_User.objects.create(user=user, model=model, can_edit=True)
comment = "{} initiated this model.".format(user.get_full_name())
Model_Comment.objects.create(model=model, comment=comment, type="version")
payload['model_uuid'] = str(model.uuid)
payload["status"] = "Added"
if template_model is not None:
try:
model.is_uploading = True
model.save()
copy_model.apply_async(
kwargs={"src_model_id": template_model.id,
"dst_model_id": model.id,
"user_id": user.id})
payload["status"] = "Submitted"
except Exception as e:
payload["status"] = "Failed"
payload["message"] = str(e)
model.delete()
return HttpResponse(json.dumps(payload), content_type="application/json")
@csrf_protect
def remove_model(request):
"""
Removes a user's access to a model. The model will still exist and
may be seen by other collaborators.
Parameters:
model_uuid (uuid): required
Returns (json): Action Confirmation
Example:
POST: /api/remove_model/
"""
user = request.user
model_uuid = request.POST["model_uuid"]
model = Model.by_uuid(model_uuid)
model.handle_view_access(request.user)
Model_User.objects.filter(model=model, user=user).hard_delete()
payload = {"message": "Dropped as collaborator."}
return HttpResponse(json.dumps(payload), content_type="application/json")
@csrf_protect
def duplicate_model(request):
"""
Duplicate a model as a view only snapshot. User's may choose to take a
snapshot of a model to provide a retrieval checkpoint and/or begin a
forked version of their original model. A snapshot will replicate all of
its underlying data as new instances.
Parameters:
model_uuid (uuid): required
Returns (json): Action Confirmation
Example:
POST: /api/duplicate_model/
"""
user = request.user
model_uuid = request.POST["model_uuid"]
payload = {}
old_model = Model.by_uuid(model_uuid)
old_model.handle_edit_access(user)
# Create Model
model = Model.objects.create(name=old_model.name)
latest = Model.objects.filter(name=model.name).exclude(
snapshot_version=None).values_list('snapshot_version',
flat=True)
model.snapshot_version = np.max(list(latest) + [0]) + 1
model.snapshot_base = old_model
payload['model_uuid'] = str(model.uuid)
model.save()
try:
model.is_uploading = True
model.save()
copy_model.apply_async(
kwargs={"src_model_id": old_model.id,
"dst_model_id": model.id,
"user_id": user.id})
payload["status"] = "Submitted"
except Exception as e:
payload["status"] = "Failed"
payload["message"] = str(e)
model.delete()
return HttpResponse(json.dumps(payload), content_type="application/json")
@csrf_protect
def add_collaborator(request):
"""
Add a collaborator to a model. A collaborator may become:
granted of edit permissions (value=1),
granted of view only permissions (value=0),
removed of all permisssions (value=null)
Parameters:
model_uuid (uuid): required
collaborator_id (str): required
collaborator_can_edit (int): optional (0 or 1)
Returns (json): Action Confirmation
Example:
POST: /api/add_collaborator/
"""
model_uuid = request.POST["model_uuid"]
user_id = request.POST["collaborator_id"]
user = User.objects.filter(id=user_id).first()
try:
can_edit = bool(int(request.POST["collaborator_can_edit"]))
except ValueError:
can_edit = None
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
if user:
message = Model_User.update(model, user, can_edit)
else:
message = "No user registered by that email."
payload = {"message": message}
return HttpResponse(json.dumps(payload), content_type="application/json")
def validate_model_comment(value):
value = value.strip()
if len(value) == 0:
raise ValidationError("Please write your comment.")
regex = re.compile(r"(<(.*)>.*?|<(.*) />)")
matched = regex.search(value)
if matched is None:
return
result = matched.group(0)
raise ValidationError(f"Invalid comment string, please remove '{result}'")
@csrf_protect
def add_model_comment(request):
"""
Add a user comment to a model's activity page
Parameters:
model_uuid (uuid): required
comment (str): required
Returns (json): Action Confirmation
Example:
POST: /api/add_model_comment/
"""
model_uuid = request.POST["model_uuid"]
comment = request.POST["comment"]
try:
validate_model_comment(comment)
except ValidationError as e:
payload = {"message": str(e)}
return HttpResponse(json.dumps(payload), content_type="application/json")
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
Model_Comment.objects.create(
model=model, user=request.user, comment=comment, type="comment"
)
model.notify_collaborators(request.user)
payload = {"message": "Added comment."}
return HttpResponse(json.dumps(payload), content_type="application/json")
# ------ Locations
@csrf_protect
def update_location(request):
"""
Add or Update a location.
To update a location, must provide a location_id
Parameters:
model_uuid (uuid): required
location_id (int): optional
location_name (str): required
location_lat (float): required
location_long (float): required
location_area (float): required
location_description (str): required
Returns (json): Action Confirmation
Example:
POST: /api/update_location/
"""
model_uuid = request.POST["model_uuid"]
location_id = int(request.POST.get("location_id", 0))
location_name = request.POST["location_name"].strip()
location_lat = float(request.POST["location_lat"])
location_long = float(request.POST["location_long"])
location_area = request.POST["location_area"]
location_description = request.POST["location_description"]
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
if location_area == "":
location_area = None
elif float(location_area) < 0:
location_area = None
if ((location_lat < -90) or (location_lat > 90)):
location_lat = 0
if ((location_long < -180) or (location_long > 180)):
location_long = 0
non_unique_name = True
while non_unique_name:
existing = model.locations.filter(pretty_name__iexact=location_name)
if existing:
if location_id == existing.first().id:
non_unique_name = False
else:
location_name += " COPY"
else:
non_unique_name = False
if location_id:
model.locations.filter(id=location_id).update(
pretty_name=location_name,
name=ParamsManager.simplify_name(location_name),
latitude=location_lat,
longitude=location_long,
available_area=location_area,
description=location_description,
)
# Log Activity
comment = "{} updated the location: {}.".format(
request.user.get_full_name(), location_name
)
Model_Comment.objects.create(model=model, comment=comment, type="edit")
model.notify_collaborators(request.user)
model.deprecate_runs(location_id=location_id)
payload = {
"message": "edited location",
"location_id": location_id,
"location_name": location_name,
"location_lat": location_lat,
"location_long": location_long,
"location_area": location_area,
"location_description": location_description,
}
else:
location = Location.objects.create(
model_id=model.id,
pretty_name=location_name,
name=ParamsManager.simplify_name(location_name),
latitude=location_lat,
longitude=location_long,
available_area=location_area,
description=location_description,
)
# Log Activity
comment = "{} added a location: {}.".format(
request.user.get_full_name(), location_name
)
Model_Comment.objects.create(model=model, comment=comment, type="add")
model.notify_collaborators(request.user)
payload = {
"message": "added location",
"location_id": location.id,
"location_name": location_name,
"location_lat": location_lat,
"location_long": location_long,
"location_area": location_area,
"location_description": location_description,
}
return HttpResponse(json.dumps(payload), content_type="application/json")
@csrf_protect
def delete_location(request):
"""
Delete a location. This action will cascade "delete" all instances that
refer to it.
Parameters:
model_uuid (uuid): required
location_id (int): required
Returns (json): Action Confirmation
Example:
POST: /api/delete_location/
"""
model_uuid = request.POST["model_uuid"]
location_id = request.POST["location_id"]
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
locations = model.locations.filter(id=location_id)
if len(locations) > 0:
pretty_name = locations.first().pretty_name
model.deprecate_runs(location_id=location_id)
locations.delete()
# Log Activity
comment = "{} deleted the location: {}.".format(
request.user.get_full_name(), pretty_name
)
Model_Comment.objects.create(model=model,
comment=comment, type="delete")
model.notify_collaborators(request.user)
payload = {"message": "deleted location"}
return HttpResponse(json.dumps(payload), content_type="application/json")
# ------ Technologies
@csrf_protect
def add_technology(request):
"""
Add a new technology. Option to create technology from an existing
technology to inherit its technology level parameters. Any override
parameters set at the nodes level will not be transferred.
Parameters:
model_uuid (uuid): required
technology_pretty_name (str): required
technology_type (str): required
technology_id (int): optional
Returns (json): Action Confirmation
Example:
POST: /api/add_technolgy/
"""
model_uuid = request.POST["model_uuid"]
technology_pretty_name = request.POST["technology_name"]
technology_id = request.POST.get("technology_id", None)
technology_type = request.POST["technology_type"]
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
abstract_tech = Abstract_Tech.objects.filter(name=technology_type).first()
technology_name = ParamsManager.simplify_name(technology_pretty_name)
if technology_id is not None:
existing = Technology.objects.filter(id=technology_id).first()
existing.model.handle_view_access(request.user)
technology = existing.duplicate(model.id, technology_pretty_name)
else:
technology = Technology.objects.create(
model_id=model.id,
abstract_tech_id=abstract_tech.id,
name=technology_name,
pretty_name=technology_pretty_name,
)
Tech_Param.objects.create(
model_id=model.id,
technology_id=technology.id,
parameter_id=1,
value=technology_type,
)
Tech_Param.objects.create(
model_id=model.id,
technology_id=technology.id,
parameter_id=2,
value=technology_pretty_name,
)
# Log Activity
comment = "{} added a technology: {}.".format(
request.user.get_full_name(), technology_pretty_name
)
Model_Comment.objects.create(model=model, comment=comment, type="add")
model.notify_collaborators(request.user)
request.session["technology_id"] = technology.id
payload = {"message": "added technology", "technology_id": technology.id}
return HttpResponse(json.dumps(payload), content_type="application/json")
@csrf_protect
def delete_technology(request):
"""
Delete a technology. This action will cascade "delete" all instances that
refer to it.
Parameters:
model_uuid (uuid): required
technology_id (int): required
Returns (json): Action Confirmation
Example:
POST: /api/delete_technology/
"""
model_uuid = request.POST["model_uuid"]
technology_id = request.POST["technology_id"]
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
technologies = model.technologies.filter(id=technology_id)
if len(technologies) > 0:
technology_pretty_name = technologies.first().pretty_name
model.deprecate_runs(technology_id=technology_id)
technologies.delete()
# Log Activity
comment = "{} deleted the technology: {}.".format(
request.user.get_full_name(), technology_pretty_name
)
Model_Comment.objects.create(model=model,
comment=comment, type="delete")
model.notify_collaborators(request.user)
payload = {"message": "deleted technology"}
return HttpResponse(json.dumps(payload), content_type="application/json")
@csrf_protect
def update_tech_params(request):
"""
Update the parameters for a technology. Parameter data is provided in a
form_data object which stores updates under the following keys:
'essentials', 'add', 'edit', 'delete'
Parameters:
model_uuid (uuid): required
technology_id (int): required
form_data (json): required
Returns (json): Action Confirmation
Example:
POST: /api/update_tech_params/
"""
model_uuid = request.POST["model_uuid"]
technology_id = request.POST["technology_id"]
form_data = json.loads(request.POST["form_data"])
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
technology = model.technologies.filter(id=technology_id)
if len(technology) > 0:
technology.first().update(form_data)
# Log Activity
comment = "{} updated the technology: {}.".format(
request.user.get_full_name(),
technology.first().pretty_name,
)
Model_Comment.objects.create(model=model, comment=comment, type="edit")
model.notify_collaborators(request.user)
model.deprecate_runs(technology_id=technology_id)
payload = {"message": "Success."}
return HttpResponse(json.dumps(payload), content_type="application/json")
@csrf_protect
def update_favorite(request):
"""
Add a parameter as a favorite. Favorites are persisted on a model by model
basis. Therefore, if one user adds or removes a favorite parameter,
all collaborators on that model will experience those changes.
Parameters:
model_uuid (uuid): required
param_id (int): required
add_favorite (int): required
Returns (json): Action Confirmation
Example:
GET: /api/update_favorite/
"""
model_uuid = request.GET["model_uuid"]
add_favorite = int(request.GET["add_favorite"])
param_id = int(request.GET["param_id"])
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
if add_favorite:
Model_Favorite.objects.create(model_id=model.id, parameter_id=param_id)
payload = {"message": "added favorite"}
else:
Model_Favorite.objects.filter(model_id=model.id,
parameter_id=param_id).hard_delete()
payload = {"message": "removed favorite"}
return HttpResponse(json.dumps(payload), content_type="application/json")
@csrf_protect
def convert_to_timeseries(request):
"""
Convert a static parameter into a timeseries. Note that this does not yet
assign a timeseries meta instance to the parameter instance. Any previous
data that has been configured for this parameter will be lost.
Parameters:
model_uuid (uuid): required
technology_id (int): required
param_id (int): required
Returns (json): Action Confirmation
Example:
GET: /api/convert_to_timeseries/
"""
model_uuid = request.GET["model_uuid"]
param_id = int(request.GET["param_id"])
technology_id = request.GET["technology_id"]
try:
loc_tech_id = int(request.GET["loc_tech_id"])
except Exception as e:
loc_tech_id = None
print("Technology only: {}".format(e))
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
if loc_tech_id:
Loc_Tech_Param.objects.filter(
model_id=model.id, parameter_id=param_id, loc_tech_id=loc_tech_id
).hard_delete()
Loc_Tech_Param.objects.create(
model_id=model.id,
parameter_id=param_id,
loc_tech_id=loc_tech_id,
value=0,
timeseries=True,
)
payload = {"message": "added timeseries to node"}
else:
Tech_Param.objects.filter(model_id=model.id,
parameter_id=param_id,
technology_id=technology_id).hard_delete()
Tech_Param.objects.create(
model_id=model.id,
parameter_id=param_id,
technology_id=technology_id,
value=0,
timeseries=True,
)
payload = {"message": "added timeseries to technology"}
return HttpResponse(json.dumps(payload), content_type="application/json")
# ------ Location-Technologies (Nodes)
@csrf_protect
def add_loc_tech(request):
"""
Add a new node (location + technology). An argument for location_2_id is
only required for nodes with a transmission technology.
Parameters:
model_uuid (uuid): required
technology_id (int): required
location_1_id (int): required
location_2_id (int): optional
loc_tech_description (str): optional
Returns (json): Action Confirmation
Example:
POST: /api/add_loc_tech/
"""
model_uuid = request.POST["model_uuid"]
technology_id = request.POST["technology_id"]
location_1_id = request.POST["location_1_id"]
location_2_id = request.POST.get("location_2_id", None)
loc_tech_description = request.POST.get("loc_tech_description", None)
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
technology = model.technologies.filter(id=technology_id).first()
location_1 = model.locations.filter(id=location_1_id).first()
location_2 = model.locations.filter(id=location_2_id).first()
if technology.abstract_tech.name != "transmission":
location_2_id = None
existing = model.loc_techs.filter(
technology=technology,
location_1=location_1,
location_2=location_2,
)
if existing.first():
loc_tech = existing.first()
else:
loc_tech = Loc_Tech.objects.create(
model=model,
technology=technology,
location_1=location_1,
location_2=location_2,
description=loc_tech_description,
)
# Log Activity
comment = "{} added a node: {} ({}) @ {}.".format(
request.user.get_full_name(),
technology.pretty_name,
technology.tag,
location_1.pretty_name,
)
Model_Comment.objects.create(model=model, comment=comment, type="add")
model.notify_collaborators(request.user)
request.session["loc_tech_id"] = loc_tech.id
payload = {"message": "added location technology",
"loc_tech_id": loc_tech.id}
return HttpResponse(json.dumps(payload), content_type="application/json")
@csrf_protect
def delete_loc_tech(request):
"""
Delete a node (location + technology). This action will cascade "delete"
all instances that refer to it.
Parameters:
model_uuid (uuid): required
loc_tech_id (int): required
Returns (json): Action Confirmation
Example:
POST: /api/delete_loc_tech/
"""
model_uuid = request.POST["model_uuid"]
loc_tech_id = request.POST["loc_tech_id"]
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
loc_techs = model.loc_techs.filter(id=loc_tech_id)
# Log Activity
comment = "{} deleted the node: {} ({}) @ {}.".format(
request.user.get_full_name(),
loc_techs.first().technology.pretty_name,
loc_techs.first().technology.tag,
loc_techs.first().location_1.pretty_name,
)
Model_Comment.objects.create(model=model, comment=comment, type="delete")
model.notify_collaborators(request.user)
model.deprecate_runs(technology_id=loc_techs.first().technology_id)
loc_techs.delete()
payload = {"message": "deleted location technology"}
return HttpResponse(json.dumps(payload), content_type="application/json")
@csrf_protect
def update_loc_tech_params(request):
"""
Update the parameters for a node. Parameter data is provided in a
form_data object which stores updates under the following keys:
'add', 'edit', 'delete'
Parameters:
model_uuid (uuid): required
loc_tech_id (int): required
form_data (json): required
Returns (json): Action Confirmation
Example:
POST: /api/update_loc_tech_params/
"""
model_uuid = request.POST["model_uuid"]
loc_tech_id = request.POST["loc_tech_id"]
form_data = json.loads(request.POST["form_data"])
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
loc_tech = model.loc_techs.filter(id=loc_tech_id)
if len(loc_tech) > 0:
loc_tech.first().update(form_data)
# Log Activity
comment = "{} updated the node: {} ({}) @ {}.".format(
request.user.get_full_name(),
loc_tech.first().technology.pretty_name,
loc_tech.first().technology.tag,
loc_tech.first().location_1.pretty_name,
)
Model_Comment.objects.create(model=model, comment=comment, type="edit")
model.notify_collaborators(request.user)
model.deprecate_runs(technology_id=loc_tech.first().technology_id)
payload = {"message": "Success."}
return HttpResponse(json.dumps(payload), content_type="application/json")
# ------ Scenarios
@csrf_protect
def add_scenario(request):
"""
Create a new scenario. Option to create a new scenario from an existing one
by providing an existing scenario_id. Configuration and settings will be
copied as new instances.
Parameters:
model_uuid (uuid): required
scenario_name (str): required
scenario_id (str): optional
Returns (json): Action Confirmation
Example:
POST: /api/add_scenario/
"""
model_uuid = request.POST["model_uuid"]
scenario_id = request.POST.get("scenario_id", None)
scenario_name = request.POST["scenario_name"]
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
if scenario_id not in [None, '']:
existing = model.scenarios.filter(id=scenario_id).first()
scenario = existing.duplicate(scenario_name)
else:
scenario = Scenario.objects.create(model_id=model.id,
name=scenario_name)
parameters = Run_Parameter.objects.all()
for param in parameters:
if param.name == "name":
value = "{}: {}".format(model.name, scenario_name)
else:
value = param.default_value
Scenario_Param.objects.create(
scenario=scenario, run_parameter=param,
value=value, model=model
)
# Log Activity
comment = "{} added a scenario: {}.".format(
request.user.get_full_name(), scenario_name
)
Model_Comment.objects.create(model=model, comment=comment, type="add")
model.notify_collaborators(request.user)
request.session["scenario_id"] = scenario.id
payload = {"message": "added scenario", "scenario_id": scenario.id}
return HttpResponse(json.dumps(payload), content_type="application/json")
@csrf_protect
def toggle_scenario_loc_tech(request):
"""
Add/remove a node (loc_tech) to/from a scenario.
Parameters:
model_uuid (uuid): required
scenario_id (int): required
loc_tech_ids (int, comma delimited): required
add (int): required: 1-True, 0-False
Returns (json): Action Confirmation
Example:
POST: /api/toggle_scenario_loc_tech/
"""
model_uuid = request.POST["model_uuid"]
scenario_id = request.POST["scenario_id"]
loc_tech_ids = request.POST["loc_tech_ids"]
loc_tech_ids = [int(i) for i in str(loc_tech_ids).split(',')]
add = bool(int(request.POST["add"]))
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
scenario = model.scenarios.filter(id=scenario_id).first()
scenario_loc_techs = Scenario_Loc_Tech.objects.filter(
model_id=model.id, scenario_id=scenario_id,
loc_tech_id__in=loc_tech_ids
)
scenario_loc_techs.delete()
if add:
slts = [Scenario_Loc_Tech(model_id=model.id, scenario_id=scenario_id,
loc_tech_id=lt) for lt in loc_tech_ids]
Scenario_Loc_Tech.objects.bulk_create(slts)
# Log Activity
comment = "{} updated the scenario: {}.".format(
request.user.get_full_name(), scenario.name
)
Model_Comment.objects.filter(model=model,
comment=comment, type="edit").hard_delete()
Model_Comment.objects.create(model=model, comment=comment, type="edit")
model.deprecate_runs(scenario_id=scenario_id)
# Return new list of active loc tech IDs
active_lts = Scenario_Loc_Tech.objects.filter(scenario_id=scenario_id)
active_lt_ids = list(active_lts.values_list("loc_tech_id", flat=True))
payload = {"active_lt_ids": active_lt_ids,
"message": "Updated scenario's location technologies"}
return HttpResponse(json.dumps(payload), content_type="application/json")
@csrf_protect
def update_scenario_params(request):
"""
Update the parameters on a scenario. Parameter data is provided in a
form_data object which stores updates under the following keys:
'add', 'edit', 'delete'
Parameters:
model_uuid (uuid): required
scenario_id (int): required
form_data (json): required
Returns (json): Action Confirmation
Example:
POST: /api/update_scenario_params/
"""
model_uuid = request.POST["model_uuid"]
scenario_id = request.POST["scenario_id"]
form_data = json.loads(request.POST["form_data"])
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
scenario = model.scenarios.filter(id=scenario_id).first()
Scenario_Param.update(scenario, form_data)
# Log Activity
comment = "{} updated the scenario: {}.".format(
request.user.get_full_name(), scenario.name
)
Model_Comment.objects.create(model=model, comment=comment, type="edit")
model.notify_collaborators(request.user)
model.deprecate_runs(scenario_id=scenario_id)
payload = {"message": "Success."}
return HttpResponse(json.dumps(payload), content_type="application/json")
@csrf_protect
def delete_scenario(request):
"""
Delete a scenario. This action will cascade "delete" all instances that
refer to it.
Parameters:
model_uuid (uuid): required
scenario_id (int): required
Returns (json): Action Confirmation
Example:
POST: /api/delete_scenario/
"""
model_uuid = request.POST["model_uuid"]
scenario_id = request.POST["scenario_id"]
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
scenarios = model.scenarios.filter(id=scenario_id)
if len(scenarios) > 0:
name = scenarios.first().name
scenarios.delete()
# Log Activity
comment = "{} deleted the scenario: {}.".format(
request.user.get_full_name(), name
)
Model_Comment.objects.create(model=model,
comment=comment, type="delete")
model.notify_collaborators(request.user)
payload = {"message": "deleted scenario"}
return HttpResponse(json.dumps(payload), content_type="application/json")
# ------ Timeseries
@csrf_protect
def upload_file(request):
"""
Upload a timeseries file.
Parameters:
model_uuid (uuid): required
description (str): optional
myfile (file): required
Returns: Redirect to the timeseries page for the given model
Example:
POST: /api/upload_file/
"""
model_uuid = request.POST["model_uuid"]
description = request.POST.get("file-description", None)
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
if (request.method == "POST") and ("myfile" in request.FILES):
myfile = request.FILES["myfile"]
fs = FileSystemStorage()
filename = fs.save("user_files/" + myfile.name, myfile)
User_File.objects.create(filename=filename,
description=description, model=model)
return redirect("/%s/timeseries/" % model_uuid)
return redirect("/{}/timeseries/".format(model_uuid))
@csrf_protect
def delete_timeseries(request):
"""
Delete a timeseries
Parameters:
model_uuid (uuid): required
timeseries_id (int): required
Returns (json): Action Confirmation
Example:
POST: /api/delete_timeseries/
"""
model_uuid = request.POST.get("model_uuid", None)
timeseries_id = request.POST.get("timeseries_id", None)
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
timeseries_meta = Timeseries_Meta.objects.filter(
model=model, id=timeseries_id
)
timeseries_meta.delete()
payload = {"message": "Success."}
return HttpResponse(json.dumps(payload), content_type="application/json")
@csrf_protect
def delete_file(request):
"""
Delete a user timeseries file
Parameters:
model_uuid (uuid): required
file_id (int): required
Returns (json): Action Confirmation
Example:
POST: /api/delete_file/
"""
model_uuid = request.POST.get("model_uuid", None)
file_id = request.POST.get("file_id", None)
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
file_record = User_File.objects.filter(model=model, id=file_id)
file_record.delete()
payload = {"message": "Success."}
return HttpResponse(json.dumps(payload), content_type="application/json")
@csrf_protect
def import_timeseries(request):
"""
Import a timeseries
Parameters:
model_uuid (uuid): required
timeseries_id (int): required
Returns (json): Action Confirmation
Example:
POST: /api/import_timeseries/
"""
model_uuid = request.POST["model_uuid"]
name = request.POST["name"]
values = request.POST["timeseries"].split(',')
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
Timeseries_Meta.create_ts_8760(model, name, values)
payload = {"message": "Success."}
return HttpResponse(json.dumps(payload), content_type="application/json")
@csrf_protect
def upload_timeseries(request):
"""
Build and save a clean timeseries csv from a user uploaded file.
Parameters:
model_uuid (uuid): required
file_id (int): required
timeseries_name (str): required
timestamp_col (int): required
value_col (int): required
has_header (bool): required
Returns (json): Action Confirmation
Example:
GET: /api/upload_timeseries/
"""
model_uuid = request.GET.get("model_uuid", None)
file_id = request.GET.get("file_id", None)
timeseries_name = request.GET.get("timeseries_name", None)
timestamp_col = request.GET.get("timestamp_col", None)
value_col = request.GET.get("value_col", None)
has_header = request.GET.get("has_header", None)
if has_header == "true":
has_header = True
else:
has_header = False
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
file_record = User_File.objects.filter(model=model, id=file_id)
simple_filename = file_record.first().simple_filename()
payload = {}
existing = Timeseries_Meta.objects.filter(model=model,
name=timeseries_name).first()
if existing:
payload["status"] = task_status.FAILURE
payload["message"] = "Timeseries name already exists"
return HttpResponse(json.dumps(payload),
content_type="application/json")
new_meta = Timeseries_Meta.objects.create(
model=model,
name=timeseries_name,
original_filename=simple_filename,
original_timestamp_col=timestamp_col,
original_value_col=value_col,
)
try:
async_result = upload_ts.apply_async(
kwargs={
"model_uuid": model_uuid,
"timeseries_meta_id": new_meta.id,
"file_id": file_id,
"timestamp_col": timestamp_col,
"value_col": value_col,
"has_header": has_header,
}
)
upload_task = CeleryTask.objects.get(task_id=async_result.id)
new_meta.upload_task = upload_task
new_meta.is_uploading = True
new_meta.save()
payload["status"] = "Success"
# Only means that the submission of the celery task was successful.
except Exception as e:
print(e)
payload["status"] = "Failed"
payload["message"] = str(e)
if not has_header:
payload["message"] += (
" Please try checking the box, "
'"The first row of the selected CSV file is a header row."'
)
new_meta.delete()
return HttpResponse(json.dumps(payload), content_type="application/json")
@csrf_protect
def wtk_timeseries(request):
"""
Pull timeseries from WTK (PySAM)
Parameters:
lat (float): required e.g. 32.22
lon (float): required e.g. -97.83
Returns (json): Data
Example:
POST: /api/wtk_timeseries/
"""
latitude = request.POST["lat"]
longitude = request.POST["lon"]
coordinate = (longitude, latitude)
# Fetch wind resource data
wtk_fp = wtk_fetch_resource_files(coordinate)
# --- Initialize generator ---
if wtk_fp is not None:
generator = Windpower.default('WindPowerNone')
generator.Resource.assign({'wind_resource_filename': wtk_fp})
generator.execute()
generation = np.array(generator.Outputs.gen)
cf_profile = generation / generator.Farm.system_capacity
payload = {"cf_profile": list(cf_profile)}
else:
payload = {"message": "Not Found"}
return HttpResponse(json.dumps(payload), content_type="application/json")
def wtk_fetch_resource_files(coordinate):
"""Fetch wind resource data"""
wr = FetchResourceFiles(
tech='wind',
resource_year='tmy',
nrel_api_email=settings.NREL_API_EMAIL,
nrel_api_key=settings.NREL_API_KEY,
resource_dir=os.path.join(settings.DATA_STORAGE, 'wind-data')
)
wr.fetch([coordinate])
# --- Get resource data file path ---
wtk_path_dict = wr.resource_file_paths_dict
wtk_fp = wtk_path_dict[coordinate]
return wtk_fp
|
# File: ciscoesa_view.py
#
# Copyright (c) 2017-2022 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
def _get_key_data(report_data):
""" Function to get key data to fetch data from report data
:param report_data: Object containing report data
:return parsed report
"""
report = dict()
# Iterating over data for each report
for key, data in report_data.items():
report[key] = dict()
# Iterating over keys in report data, to get only non-empty values
for report_key, value in data.get("data", {}).items():
if not value:
continue
elif isinstance(value, list):
for recipient_data in data["data"][report_key]:
if recipient_data["recipient"] not in report[key]:
report[key][recipient_data["recipient"]] = dict()
report[key][recipient_data["recipient"]][report_key] = recipient_data["count"]
return report
def get_ctx_result(result):
""" Function to collect information to be rendered for "get report" action
:param result: report data
:return result containing summary, data and parameter values
"""
ctx_result = {}
param = result.get_param()
summary = result.get_summary()
data = result.get_data()
ctx_result["param"] = param
if summary:
ctx_result["summary"] = summary
if not data:
ctx_result["data"] = dict()
return ctx_result
ctx_result["data"] = _get_key_data(data[0])
return ctx_result
def display_reports(provides, all_app_runs, context):
""" Function to render HTML file to display report generated
:param provides: Action name
:param all_app_runs: Object containing summary and action_result data
:param context: Object containing container details
:return return HTML file name
"""
context["results"] = results = []
for summary, action_results in all_app_runs:
for result in action_results:
ctx_result = get_ctx_result(result)
if not ctx_result:
continue
results.append(ctx_result)
return "ciscoesa_display_reports.html"
|
from django.urls import path
from plugins.ctftime.views import CTFTimeView
app_name = 'ctftime'
urlpatterns = [
path('', CTFTimeView.as_view(), name='ctftime'),
]
|
# Generated by Django 3.0.6 on 2020-05-29 20:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0002_auto_20200529_1952'),
]
operations = [
migrations.AlterField(
model_name='product',
name='label',
field=models.CharField(choices=[('SALE', 'sale'), ('NEW', 'new'), ('PROMOTION', 'promotion')], max_length=9),
),
]
|
"""Contains the utilities to display things on the screen
"""
class FixedWidth():
"""Utility to display information in a tabulated manner
"""
def __init__(self, dimensions: dict):
"""
Args:
dimensions: dict containing fields as keys and column width (ints) as values
"""
self.dim = dimensions
self.rows = []
self.delimiter = ''
def get_header(self):
"""Returns the header row in a tabulated manner
Returns: str
"""
ret = ''
for k, v in self.dim.items():
ret += FixedWidth.fit(v, str(k))
ret += self.delimiter
return ret
def add_row(self, row: dict):
"""Add row to the collection that is to be displayed
Args:
row: dict with fields as keys and values as values
Returns:
"""
self.rows.append(row)
def set_delimiter(self, character):
"""Set delimiter such as '|'
Args:
character: str
Returns:
"""
self.delimiter = character
@staticmethod
def fit(size: int, txt: str):
"""Forces a txt to fit into the required size.
Long texts get truncated and appended with a '..'
Args:
size: int the size of to fit the text in
txt: str the text that needs to be resized
Returns:
"""
# Length
l = len(txt)
# dimension of field
d = size
# number of spaces to append
s = d - l if l <= d else 0
# ellipsis
e = '..' if l > d else ''
return txt[0:(l if l <= d else (d - len(e)))] + e + ' ' * s
def format(self):
"""Iterates the rows and formats them.
Returns: str is a text blob that can be printed
"""
ret = ''
for row in self.rows:
ret += self.delimiter
for k, v in self.dim.items():
ret += FixedWidth.fit(v, str(row[k]))
ret += self.delimiter
ret += '\n'
# The following removes trailing newline
return ret.rstrip()
|
import os
import sys
import requests
import subprocess
installer_version_url = 'https://public.api.mindsdb.com/installer/@@beta_or_release/docker___success___None'
api_response = requests.get(
installer_version_url.replace('@@beta_or_release', sys.argv[1]))
if api_response.status_code != 200:
exit(1)
installer_version = api_response.text
os.system('mkdir -p dist')
if sys.argv[1] == 'release':
container_name = 'mindsdb'
dockerfile_template = 'dockerfile_release.template'
elif sys.argv[1] == 'beta':
container_name = 'mindsdb_beta'
dockerfile_template = 'dockerfile_beta.template'
with open(dockerfile_template, 'r') as fp:
content = fp.read()
content = content.replace('@@beta_or_release', sys.argv[1])
content = content.replace('@@installer_version', installer_version)
with open('dist/Dockerfile', 'w') as fp:
fp.write(content)
command = (f"""
cd dist &&
docker build -t {container_name} . &&
docker tag {container_name} mindsdb/{container_name}:latest &&
docker tag {container_name} mindsdb/{container_name}:{installer_version} &&
docker push mindsdb/{container_name};
cd ..
""")
subprocess.run(command, shell=True, check=True) |
# Generated by Django 3.1.3 on 2021-04-06 05:32
import itertools
from django.db import migrations
from library.vcf_utils import get_variant_caller_and_version_from_vcf
def _get_variant_caller_from_vcf_file(VariantCaller, vcf_path):
variant_caller, version = get_variant_caller_and_version_from_vcf(vcf_path)
if variant_caller is None:
variant_caller = "Unknown Variant Caller"
version = -1
return VariantCaller.objects.get_or_create(name=variant_caller, version=version)[0]
def _one_off_seqauto_variant_caller(apps, schema_editor):
SampleSheetCombinedVCFFile = apps.get_model("seqauto", "SampleSheetCombinedVCFFile")
VCFFile = apps.get_model("seqauto", "VCFFile")
VCFFromSequencingRun = apps.get_model("seqauto", "VCFFromSequencingRun")
VariantCaller = apps.get_model("seqauto", "VariantCaller")
legacy_combo = SampleSheetCombinedVCFFile.objects.filter(sequencing_run__legacy=True)
legacy_vcf = VCFFile.objects.filter(sequencing_run__legacy=True)
legacy_vcf_from_runs = VCFFromSequencingRun.objects.filter(sequencing_run__legacy=True)
if legacy_combo.exists() or legacy_vcf.exists() or legacy_vcf_from_runs.exists():
legacy_gatk = VariantCaller.objects.get_or_create(name="GATK", version="2014.4-3.3.0-0-ga3711aa")[0]
legacy_combo.update(variant_caller=legacy_gatk)
legacy_vcf.update(variant_caller=legacy_gatk)
legacy_vcf_from_runs.update(variant_caller=legacy_gatk)
recent_combo = SampleSheetCombinedVCFFile.objects.filter(sequencing_run__legacy=False)
recent_vcf = VCFFile.objects.filter(sequencing_run__legacy=False)
recent_vcf_from_runs = VCFFromSequencingRun.objects.filter(sequencing_run__legacy=False)
if recent_combo.exists() or recent_vcf.exists() or recent_vcf_from_runs.exists():
# TODO: Reload from actual VCF files
for seqauto_vcf in itertools.chain(recent_combo, recent_vcf):
try:
seqauto_vcf.variant_caller = _get_variant_caller_from_vcf_file(VariantCaller, seqauto_vcf.path)
seqauto_vcf.save()
except Exception as e:
print(e)
for run_vcf in recent_vcf_from_runs:
try:
vcf_path = run_vcf.vcf.uploadedvcf.uploaded_file.path
run_vcf.variant_caller = _get_variant_caller_from_vcf_file(VariantCaller, vcf_path)
run_vcf.save()
except Exception as e:
print(e)
class Migration(migrations.Migration):
dependencies = [
('seqauto', '0017_auto_20210406_1151'),
]
operations = [
migrations.RunPython(_one_off_seqauto_variant_caller),
]
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Ironic test utilities."""
from ironic.common import states
from ironic.openstack.common import jsonutils as json
fake_info = {"foo": "bar"}
ipmi_info = json.dumps(
{
'ipmi': {
"address": "1.2.3.4",
"username": "admin",
"password": "fake",
}
})
ssh_info = json.dumps(
{
'ssh': {
"address": "1.2.3.4",
"username": "admin",
"password": "fake",
"port": 22,
"virt_type": "vbox",
"key_filename": "/not/real/file",
}
})
pxe_info = json.dumps(
{
'pxe': {
"instance_name": "fake_instance_name",
"image_source": "glance://image_uuid",
"deploy_kernel": "glance://deploy_kernel_uuid",
"deploy_ramdisk": "glance://deploy_ramdisk_uuid",
"root_gb": 100,
}
})
pxe_ssh_info = json.dumps(
dict(json.loads(pxe_info), **json.loads(ssh_info)))
pxe_ipmi_info = json.dumps(
dict(json.loads(pxe_info), **json.loads(ipmi_info)))
properties = {
"cpu_arch": "x86_64",
"cpu_num": "8",
"storage": "1024",
"memory": "4096",
}
def get_test_node(**kw):
node = {
'id': kw.get('id', 123),
'uuid': kw.get('uuid', '1be26c0b-03f2-4d2e-ae87-c02d7f33c123'),
'chassis_id': kw.get('chassis_id', 42),
'power_state': kw.get('power_state', states.NOSTATE),
'target_power_state': kw.get('target_power_state', states.NOSTATE),
'provision_state': kw.get('provision_state', states.NOSTATE),
'target_provision_state': kw.get('target_provision_state',
states.NOSTATE),
'instance_uuid': kw.get('instance_uuid',
'8227348d-5f1d-4488-aad1-7c92b2d42504'),
'driver': kw.get('driver', 'fake'),
'driver_info': kw.get('driver_info', fake_info),
'properties': kw.get('properties', properties),
'reservation': None,
'extra': kw.get('extra', {}),
'updated_at': None,
'created_at': None,
}
return node
def get_test_port(**kw):
port = {
'id': kw.get('id', 987),
'uuid': kw.get('uuid', '1be26c0b-03f2-4d2e-ae87-c02d7f33c781'),
'node_id': kw.get('node_id', 123),
'address': kw.get('address', '52:54:00:cf:2d:31'),
'extra': kw.get('extra', {}),
'created_at': kw.get('created_at'),
'updated_at': kw.get('updated_at'),
}
return port
def get_test_chassis(**kw):
chassis = {
'id': kw.get('id', 42),
'uuid': kw.get('uuid', 'e74c40e0-d825-11e2-a28f-0800200c9a66'),
'extra': kw.get('extra', {}),
'description': kw.get('description', 'data-center-1-chassis'),
'created_at': kw.get('created_at'),
'updated_at': kw.get('updated_at'),
}
return chassis
|
#!/usr/bin/env python3
from flask import Flask, redirect, render_template, request
from mini_cactpot import Result
app = Flask(__name__)
@app.context_processor
def ticket_payout():
"""
Make the values of payouts available to all templates
"""
values = [10000, 36, 720, 360, 80, 252, 108, 72, 54, 180,
72, 180, 119, 36, 306, 1080, 144, 1800, 3600]
return dict(payout=values)
@app.route("/mini-cactpot", methods=["GET"])
def index():
return render_template("index.html")
@app.route("/mini-cactpot/result", methods=["POST"])
def result():
"""
Get user inputs from POST and calculate results
"""
# Get user inputs from form and convert to list of ints
inputs = request.form.getlist("user_inputs")
inputs = [int(i) for i in inputs]
# Redirect to index if user does not choose 4 numbers
if inputs.count(0) != 5:
return redirect("/mini-cactpot")
# Check if user repeats numbers
for number in inputs:
if number:
if inputs.count(number) != 1:
return redirect("/mini-cactpot")
# Dictionary of lines payout and suggestion
results = Result(inputs)
results = results.calculate()
# Line number and cell ids for highlighting payout
keys = [i for i in results["suggestion"][0].keys()]
values = [i for j in results["suggestion"][0].values() for i in j]
return render_template("result.html", results=results, inputs=inputs, keys=keys, values=values)
|
import gym
import model
import exp_replay
import random
from wrappers import *
import torch
import torch.nn.functional as F
from collections import namedtuple
from tqdm import tqdm
from itertools import count
class Agent():
def __init__(self,action_space,frame_history_len,env,device,buffer_size,\
epsilon_start,epsilon_decay,epsilon_min,update_every,batch_size):
self.action_space = action_space
self.frame_history_len = frame_history_len
self.env = env
self.device = device
self.policy_qnet = model.DQN(self.frame_history_len,84,84,6,1e-4).to(self.device)
self.target_qnet = model.DQN(self.frame_history_len,84,84,6,1e-4).to(self.device)
self.target_qnet.load_state_dict(self.policy_qnet.state_dict())
self.optimizer = self.policy_qnet.optimizer
self.epsilon_start = epsilon_start
self.epsilon_decay = epsilon_decay
self.epsilon_min = epsilon_min
self.batch_size = batch_size
self.buffer = exp_replay.ExperienceReplay(buffer_size)
self.update_every = update_every
def epsilon_greedy_act(self, state, eps=0.0):
#-----epsilon greedy-------------------------------------
rnd = random.random()
if rnd < eps:
return np.random.randint(self.action_space)
else:
#---set the network into evaluation mode(
self.policy_qnet.eval()
with torch.no_grad():
action_values = self.policy_qnet(state.to(self.device))
#----choose best action
action = np.argmax(action_values.cpu().data.numpy())
#----We need switch it back to training mode
self.policy_qnet.train()
return action
def torchify_state_dim(self,obs):
state = np.array(obs)
state = state.transpose((2, 0, 1))
state = torch.from_numpy(state).float()
return state.unsqueeze(0)
def update_gradients(self):
gamma = 0.99
if self.buffer.__len__()<self.batch_size:
return
batch = self.buffer.sample(self.batch_size)
#Preparing batch
experience = namedtuple('experience',
('state', 'action', 'next_state', 'reward','done'))
batch = experience(*zip(*batch))
states = list(map(lambda a: torch.as_tensor(a,device='cuda'),batch.state))
states = torch.cat(batch.state).to(self.device)
#print(states.size())
actions = list(map(lambda a: torch.tensor([[a]],device='cuda'),batch.action))
actions = torch.cat(actions).to(self.device)
#print(actions.size())
rewards = list(map(lambda a: torch.tensor([a],device='cuda'),batch.reward))
rewards = torch.cat(rewards).to(self.device)
#print(rewards.size())
next_states = list(map(lambda a: torch.as_tensor(a,device='cuda'),batch.next_state))
next_states = torch.cat(next_states).to(self.device)
dones = list(map(lambda a: torch.tensor([a],device='cuda'),batch.done))
dones = torch.cat(dones).to(self.device)
# Target = r + gamma*(max_a Q_target[next_state])
action_values = self.target_qnet(next_states).detach()
max_action_values = action_values.max(1)[0].detach()
target = rewards + gamma*max_action_values*(1-dones)
current = self.policy_qnet(states).gather(1,actions)
target = target.reshape(32,1)
loss = F.smooth_l1_loss(target, current)
self.optimizer.zero_grad()
loss.backward()
for param in self.policy_qnet.parameters():
param.grad.data.clamp_(-1.2, 1.2)
self.optimizer.step()
def train(self,max_epsiodes):
global steps
eps = self.epsilon_start
for episode in tqdm(range(max_epsiodes)):
obs = self.env.reset()
state = self.torchify_state_dim(obs)
total_reward = 0
for t in count():
action = self.epsilon_greedy_act(state,eps)
next_state,reward,done,_ = self.env.step(action)
if done:
next_state = torch.zeros(state.size())
done_flag=1
else:
next_state = self.torchify_state_dim(next_state)
done_flag=0
total_reward += reward
reward = torch.tensor([reward],device = self.device)
self.buffer.add(state,action,next_state,reward.to('cpu'),done_flag)
eps = max(eps * self.epsilon_decay, self.epsilon_min)
steps += 1
#print(self.buffer.__len__())
if steps > 10000:
self.update_gradients()
if steps%self.update_every==0:
self.target_qnet.load_state_dict(self.policy_qnet.state_dict())
state = next_state
if done:
break
if episode%10 == 0:
print("Episode no "+str(episode)+" reward = "+str(total_reward))
if __name__ == "__main__":
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
action_space = 4
frame_history_len = 4
env = gym.make("PongNoFrameskip-v4")
env = make_env(env)
steps = 0
buffer_size = 100000
epsilon_start = 1
epsilon_decay = 0.99
epsilon_min = 0.01
update_every = 1000
batch_size = 32
myagent = Agent(action_space,frame_history_len,env,device,buffer_size,\
epsilon_start,epsilon_decay,epsilon_min,update_every,batch_size)
myagent.train(180)
torch.save(myagent.policy_qnet, "saved_model")
env.close()
|
import os
import dj_database_url
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = os.getenv('SECRET_KEY', 'development_key')
DEBUG = True
ALLOWED_HOSTS = os.getenv('ALLOWED_HOSTS', '*').split(':')
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.postgres',
'game.apps.GameConfig',
'storages',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'questor.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'questor.wsgi.application'
DATABASES = {
'default': dj_database_url.config(conn_max_age=600)
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
AWS_STORAGE_BUCKET_NAME = 'projwoj'
AWS_S3_REGION_NAME = 'eu-central-1' # e.g. us-east-2
AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY')
AWS_S3_CUSTOM_DOMAIN = f'{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com'
STATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
|
#
# Copyright (c) 2020 by Delphix. All rights reserved.
#
#######################################################################################################################
"""
This module contains common functionality that is being used across plugin. Like bucket size calculation, read file,
write data into file and also operations required in discovery. Moreover it helps in colorful logging in debug log.
Recommending to view the logs using the tail command then easily segregate the running command/output/exception/debug
messages
"""
#######################################################################################################################
import json
import logging
import os
import os.path
import random
import re
import time
from datetime import datetime
import db_commands.constants
from db_commands.commands import CommandFactory
from db_commands.constants import DEFAULT_CB_BIN_PATH
from internal_exceptions.plugin_exceptions import RepositoryDiscoveryError, SourceConfigDiscoveryError, FileIOError, \
UnmountFileSystemError
from utils import utilities
# Global logger object for this file
logger = logging.getLogger(__name__)
def find_binary_path(source_connection):
"""
:param source_connection: Connection for the source environment
:return: Bin path defined in environment variable '$COUCHBASE_PATH'. If it is not defined then "/opt/couchbase/bin"
"""
logger.debug("Finding Binary Path...")
binary_paths, std_err, exit_code = utilities.execute_bash(source_connection, CommandFactory.find_binary_path())
if binary_paths == "":
logger.debug("Please verify COUCHBASE_PATH is defined. Checking at default location {}".format(DEFAULT_CB_BIN_PATH))
binary_paths = DEFAULT_CB_BIN_PATH
else:
logger.debug("List of couchbase path found are {}".format(binary_paths.split(';')))
logger.debug("Finding Binary: {}".format(binary_paths))
return binary_paths
def find_shell_path(source_connection, binary_path):
"""
:param source_connection:Connection for the source environment
:param binary_path: Couchbase binary path
:return:path of cluster management utility: {couchbase-cli}
"""
logger.debug("Finding Shell Path...")
shell_path, std_err, exit_code = utilities.execute_bash(source_connection,
CommandFactory.find_shell_path(binary_path))
if shell_path == "":
message = "Shell path {}/couchbase-cli not found".format(binary_path)
raise RepositoryDiscoveryError(message)
return shell_path
def find_install_path(source_connection, binary_path):
"""
:param source_connection:Connection for the source environment
:param binary_path: Couchbase binary path
:return: path of couchbase-server, through which daemon processes can start in background
"""
logger.debug("Finding install Path...")
install_path, std_err, exit_code = utilities.execute_bash(source_connection,
CommandFactory.find_install_path(binary_path))
if install_path == "":
message = "Install path {}/couchbase-server not found".format(binary_path)
raise RepositoryDiscoveryError(message)
else:
logger.debug("couchbase-server found in directory {}".format(install_path))
return install_path
def find_version(source_connection, install_path):
""" return the couchbase version installed on the host"""
cb_version, std_err, exit_code = utilities.execute_bash(source_connection,
CommandFactory.get_version(install_path))
version = re.search(r"\d.*$", cb_version).group()
logger.debug("Couchbase version installed {}".format(version))
return version
def is_instance_present_of_gosecrets(source_connection):
""" check couchbase server is running or not"""
instance, stderr, exit_code = utilities.execute_bash(source_connection, CommandFactory.get_process())
# return true if 'gosecrets' string is present in output of get_process
return "gosecrets" in instance
def get_data_directory(source_connection, repository):
couchbase_install_path = repository.cb_install_path
couchbase_binary_path = os.path.dirname(couchbase_install_path)
couchbase_base_dir = os.path.dirname(couchbase_binary_path)
filename = "{}/etc/couchbase/static_config".format(couchbase_base_dir)
static_config, stderr, exit_code = read_file(source_connection, filename)
if not re.search(r"(?<=path_config_datadir, \").*(?=\"}\.)", static_config):
message = "Cannot find data directory"
logger.debug(message)
raise SourceConfigDiscoveryError(message)
data_directory = re.search(r"(?<=path_config_datadir, \").*(?=\"}\.)", static_config).group()
logger.debug("data_directory is {} ".format(data_directory))
return data_directory
def get_base_directory_of_given_path(binary_path):
""" Return the base directory of given path """
path = os.path.split(binary_path)[0]
return path
def get_all_bucket_list_with_size(bucket_output, bucket=None):
""" Return bucket name with ramUsed( adjust ramused value ) from bucket_output"""
logger.debug("bucket_output: {}".format(bucket_output))
additional_buffer = 10
min_size = 104857600
all_bucket_list = ""
for line in bucket_output:
bucket_name = None
ram_size = 0
if line.find(':') == -1: # find the bucket name
all_bucket_list = all_bucket_list + line + ","
elif line.find("ramUsed") != -1: # find ramUsed row in output
ram_size = int(line.split(':')[1].strip())
# Formula used used bucketsize/2 + 10% additional memory
ram_size = (ram_size) / 2 + ((ram_size / 2) * additional_buffer // 100)
if ram_size < min_size:
ram_size = min_size
all_bucket_list = all_bucket_list + str(ram_size) + ":"
all_bucket_list = all_bucket_list.strip(":")
logger.debug("All bucket list is: {}".format(all_bucket_list))
return all_bucket_list.split(":")
def get_stg_all_bucket_list_with_ramquota_size(bucket_output):
""" Return bucket name with ramQuota from bucket_output. It will help in VDB creation as a reference value for
bucket
"""
logger.debug("bucket_output: {}".format(bucket_output))
all_bucket_list = ""
for line in bucket_output:
bucket_name = None
if line.find(':') == -1: # find the bucket name
all_bucket_list = all_bucket_list + line + ","
elif line.find("ramQuota") != -1: # find ramQuota row in output
ram_quota = int(line.split(':')[1].strip())
all_bucket_list = all_bucket_list + str(ram_quota) + ":"
all_bucket_list = all_bucket_list.strip(":")
logger.debug("All bucket list is: {}".format(all_bucket_list))
return all_bucket_list.split(":")
def filter_bucket_name_from_output(bucket_output):
""" Filter bucket name from bucket_output. Return list of bucket names present in bucket_output"""
output = filter(lambda bucket: bucket.find(":") == -1, bucket_output)
logger.debug("Bucket list: {}".format(output))
return output
def get_bucket_name_with_size(bucket_output, bucket):
""" Return `bucket_name:ramUsed` as output from bucket_output string for bucket(passed in argument) """
output = get_all_bucket_list_with_size(bucket_output, bucket)
output = ":".join(output)
bucket_info = re.search(r"{},\d+".format(bucket), output).group()
logger.debug("For Bucket {} detail is : {}".format(bucket, bucket_info))
return bucket_info
def get_bucketlist_to_namesize_list(bucket_output, bucket_list):
""" Return `bucket_name:ramUsed` as output from bucket_output string for each bucket(passed in bucket_list) """
bucket_details = []
for name in bucket_list:
bucket_details.append(get_bucket_name_with_size(bucket_output, name))
logger.debug("Buckets: {} \n details : {}".format(bucket_list, bucket_details))
return bucket_details
def sleepForSecond(sec):
# Sleep/Pause the execution for given seconds
time.sleep(sec)
def current_time():
""" Return current time in format of %Y%m%d%H%M%S'"""
curr_time = datetime.now()
return curr_time.strftime('%Y%m%d%H%M%S')
def get_value_of_key_from_json(json_obj, key):
"""return the value of key in provided json object"""
value = json.loads(json_obj)[key]
return value
def write_file(connection, content, filename):
"""Add given data into passed filename"""
logger.debug("writing data {} in file {}".format(content,filename))
try:
utilities.execute_bash(connection, CommandFactory.write_file(data=content, filename=filename))
except Exception as e:
logger.debug("Failed to Write into file")
raise FileIOError("Failed to Write into file ")
def check_file_present(connection, config_file_path):
""" return True if file is present else return False"""
try:
stdout, stderr, exit_code = utilities.execute_bash(connection, CommandFactory.check_file(config_file_path))
if stdout == "Found":
logger.debug("file path exist {}".format(config_file_path))
return True
except Exception as e:
logger.debug("File path not exist {}".format(config_file_path))
return False
def check_dir_present(connection, dir):
""" return True if directory is present else return False"""
try:
stdout, stderr, exit_code = utilities.execute_bash(connection, CommandFactory.check_directory(dir))
if stdout == "Found":
logger.debug("dir path found {} ".format(dir))
return True
except Exception as err:
logger.debug("directory path is absent: {}".format(err.message))
return False
def read_file(connection, filename):
"""read the file content and return the content"""
logger.debug("Reading file {}".format(filename))
command = CommandFactory.read_file(filename)
stdout, stderr, exit_code = utilities.execute_bash(connection, command)
return [stdout, stderr, exit_code]
# delete file
def delete_file(connection, filename):
logger.debug("Deleting file {}".format(filename))
stdout, stderr, exit_code = utilities.execute_bash(connection, CommandFactory.delete_file(filename))
return [stdout, stderr, exit_code]
# To generate the snapshot id each time using random function
def get_snapshot_id():
return random.randint(100000000, 999999999)
def unmount_file_system(rx_connection, path):
""" unmount the file system which will use in cbbackup manager after post snapshot"""
try:
utilities.execute_bash(rx_connection, CommandFactory.unmount_file_system(path))
except Exception as err:
logger.debug("error here {}".format(err.message))
raise UnmountFileSystemError(err.message)
def get_bucket_size_in_MB(bucket_size, bkt_name_size):
""" convert bkt size into MB if current bucket_size is zero"""
bkt_size_mb = 0
if bucket_size > 0:
bkt_size_mb = bucket_size
else:
bkt_size_mb = int(bkt_name_size) // 1024 // 1024
logger.debug("bkt_size_mb : {}".format(bkt_size_mb))
return bkt_size_mb
def get_sync_lock_file_name(dsource_type, dsource_name):
sync_filename = db_commands.constants.LOCK_SYNC_OPERATION
if dsource_type == "XDCR":
striped_dsource_name = dsource_name.replace(" ", "")
sync_filename = str(striped_dsource_name) + str(sync_filename)
return sync_filename
|
import logging
from selenium import webdriver
import time, re
from selenium.webdriver.support.select import Select
import threading
# 세마포어 기법 적용
sem = threading.Semaphore(1)
def login(browser):
with open("secret.txt", "r") as f:
line = f.readline()
loginId = line.split(':')[-1].replace('\n', '').strip()
line = f.readline()
loginPwd = line.split(':')[-1].replace('\n', '').strip()
# id 입력
elem = browser.find_element_by_id("loginId")
elem.clear()
elem.send_keys(loginId)
# pwd 입력
elem = browser.find_element_by_id("loginPwd")
elem.clear()
elem.send_keys(loginPwd)
browser.find_element_by_class_name("btn").click()
def kw_scraping():
sem.acquire()
browser = webdriver.Chrome()
delay = 0.5
colors = ['#FF9900', '#FFFF99', '#CCFFCC', '#CCFFFF', '#99CCFF', '#CC99FF', '#FF99CC', '#FF99CC', '#666699', '#3366FF']
with open("secret.txt", "r") as f:
line = f.readline()
loginId = line.split(':')[-1].replace('\n', '').strip()
line = f.readline()
loginPwd = line.split(':')[-1].replace('\n', '').strip()
def login():
# 학교 홈페이지 과제란 주소
url = "https://klas.kw.ac.kr/std/lis/evltn/TaskStdPage.do"
browser.get(url)
browser.maximize_window()
# id 입력
elem = browser.find_element_by_id("loginId")
elem.clear()
elem.send_keys(loginId)
browser.implicitly_wait(delay)
# pwd 입력
elem = browser.find_element_by_id("loginPwd")
elem.clear()
elem.send_keys(loginPwd)
time.sleep(delay)
# login 버튼 누르기
browser.find_element_by_class_name("btn").click()
browser.implicitly_wait(delay)
time.sleep(delay)
login()
elem = browser.find_element_by_xpath('//*[@id="appSelectSubj"]/div[2]/div/div[2]/select')
select = Select(elem)
browser.implicitly_wait(delay)
# 과제에 관한 정보를 담을 dict 생성
kw_homework = {}
SubjectAndColor = {}
subjectIndex = 0
for i in select.options:
# title
select.select_by_visible_text(i.text)
# table info print
table = browser.find_element_by_xpath('//*[@id="appModule"]/div/div[3]/table')
tbodys = table.find_elements_by_tag_name("tbody")
# check element is exist
SubjectAndColor[i.text] = colors[subjectIndex]
kw_homework[i.text] = []
for tbody in tbodys:
try:
tr = tbody.find_element_by_tag_name('tr')
td = tr.find_elements_by_tag_name('td')
if td[1].text == '출제된 레포트가 없습니다.':
break
# 1 is title, index 2 is deadline
temp = td[2].text
# print("td[1] + td[2] : " + td[1].text + " + " + td[2].text)
split_str = temp.split(" ")
startDate = split_str[0] + "T" + split_str[1]
endDate = split_str[3] + "T" + split_str[4]
# 관련 내용을 '///' 구분자로 불리하여 문자열로 저장
kw_homework[i.text].append(td[1].text + "///" + startDate + "///" + endDate + "///" + colors[subjectIndex])
# print(kw_homework[i.text])
except Exception as error:
break
subjectIndex += 1
browser.quit()
sem.release()
return kw_homework, SubjectAndColor
def goToUrl(url):
browser = webdriver.Chrome()
browser.get(url)
try:
login(browser)
except Exception as error:
return
def checkId():
try:
with open("secret.txt", "r") as f:
loginId = f.readline()
loginPwd = f.readline()
# id 검사 -> 학번이 10자이고 숫자로 구성되어 있는지 확인
studentNumCheck = re.compile(r'\d{10}')
findId = studentNumCheck.search(loginId)
if len(findId.group()) != 10:
return False
except Exception as error:
print(error)
return False
return True |
def const(a, b):
def result():
return a
def zilch(b):
return b
return result()
|
import time
import gym
import matplotlib.pyplot as plt
import torch
import numpy as np
from test_agents.PGAgent.agent import Agent as PGAgent
from test_agents.PGAgent_FCModel.agent import Agent as PGAgent_FCModel
from utils import save_plot
import wimblepong
# Make the environment
env = gym.make("WimblepongVisualSimpleAI-v0")
# Define the player
player_id = 1
# Set up the player here. We used the SimpleAI that does not take actions for now
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
player = PGAgent(device)
def train(episodes_per_game=100, train_episodes=500000, render=False, resume=False):
if resume:
player.load_model()
print("Training for {} started!".format(player.get_name()))
win_ratio_history, average_win_ratio_history = [], []
wins = 0
start_time = time.time()
for episode_number in range(1, train_episodes+1):
done = False
obs1 = env.reset()
rew1 = 1
while not done:
if render:
env.render()
# Get action from the agent
action1, log_act_prob = player.get_action(obs1)
prev_obs1 = obs1
# Perform the action on the environment, get new state and reward
obs1, rew1, done, info = env.step(action1)
# Store action's outcome (so that the agent can improve its policy)
player.store_outcome(prev_obs1, log_act_prob, action1, rew1, done)
player.episode_finished(episode_number)
wins = wins + 1 if rew1 == 10 else wins
if episode_number % 5 == 0:
env.switch_sides()
if episode_number % episodes_per_game == 0:
win_ratio = int((wins / episodes_per_game) * 100)
print("Episode {} over. Win ratio: {}%".format(episode_number, win_ratio))
wins = 0
# Bookkeeping (mainly for generating plots)
win_ratio_history.append(win_ratio)
if episode_number > 100:
avg = np.mean(win_ratio_history[-100:])
else:
avg = np.mean(win_ratio_history)
average_win_ratio_history.append(avg)
if episode_number % 10000 == 0:
player.save_model()
save_plot(win_ratio_history, average_win_ratio_history, player.get_name())
elapsed_time_min = round((time.time() - start_time) / 60, 2)
print("Training finished in %f minutes." % elapsed_time_min)
save_plot(win_ratio_history, average_win_ratio_history, player.get_name())
if __name__ == "__main__":
train()
|
from __future__ import print_function, division
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, concatenate
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras import backend as K
from keras.layers import Lambda
from utils.glove_loader import GloveModel
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from utils.dataset_utils import load_dataset
from PIL import Image
import math
import pandas as pd
import sys
import time
# GPU setting
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto(
gpu_options = tf.GPUOptions(
visible_device_list="2", # specify GPU number
allow_growth=True)
)
set_session(tf.Session(config=config))
class DCGAN():
def __init__(self, img_path, txt_path, glove_path):
# Input shape
self.img_rows = 64
self.img_cols = 64
self.channels = 3
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.latent_dim = 100
self.embedding_dim = 300
self.img_path = img_path
self.txt_path = txt_path
self.glove_path = glove_path
optimizer_g = Adam(0.0005, 0.5)
optimizer_d = Adam(0.00005, 0.5)
# Build the GloVe model
self.glove_model = GloveModel()
self.glove_model.load(data_dir_path=self.glove_path, embedding_dim=self.embedding_dim)
# Build and compile the discriminator
self.discriminator = self.build_discriminator()
self.discriminator.compile(loss='binary_crossentropy',
optimizer=optimizer_d,
metrics=['accuracy'])
# Build the generator
self.generator = self.build_generator()
# The generator takes noise as input and generates imgs
z = Input(shape=(self.latent_dim,))
cond_input = Input(shape=(self.embedding_dim,))
img = self.generator([z, cond_input])
# For the combined model we will only train the generator
self.discriminator.trainable = False
# The discriminator takes generated images as input and determines validity
valid = self.discriminator([img, cond_input])
# The combined model (stacked generator and discriminator)
# Trains the generator to fool the discriminator
self.combined = Model([z, cond_input], valid)
self.combined.compile(loss='binary_crossentropy', optimizer=optimizer_g)
def build_generator(self):
generator_input = Input(shape=(self.latent_dim, ), name="g_input")
cond_input = Input(shape=(self.embedding_dim, ), name="cond_g_input")
cond_output = Dense(100)(cond_input)
G = concatenate([generator_input, cond_output])
G = Dense(256 * 8 * 8, activation="relu")(G)
G = Reshape((8, 8, 256))(G)
G = UpSampling2D()(G)
G = Conv2D(256, kernel_size=3, padding="same")(G)
G = BatchNormalization(momentum=0.8)(G)
G = Activation("relu")(G)
G = UpSampling2D()(G)
G = Conv2D(128, kernel_size=3, padding="same")(G)
G = BatchNormalization(momentum=0.8)(G)
G = Activation("relu")(G)
G = UpSampling2D()(G)
G = Conv2D(64, kernel_size=3, padding="same")(G)
G = BatchNormalization(momentum=0.8)(G)
G = Activation("relu")(G)
G = Conv2D(self.channels, kernel_size=3, padding="same")(G)
generator_output = Activation("tanh")(G)
generator = Model([generator_input, cond_input], generator_output)
generator.summary()
return generator
def build_discriminator(self):
discriminator_input = Input(shape=self.img_shape, name="d_input")
cond_input = Input(shape=(self.embedding_dim, ), name="cond_d_input")
D = Conv2D(64, kernel_size=3, strides=2, padding="same")(discriminator_input)
D = LeakyReLU(alpha=0.2)(D)
D = Dropout(0.25)(D)
D = Conv2D(128, kernel_size=3, strides=2, padding="same")(D)
D = ZeroPadding2D(padding=((0,1),(0,1)))(D)
D = BatchNormalization(momentum=0.8)(D)
D = LeakyReLU(alpha=0.2)(D)
D = Dropout(0.25)(D)
D = Conv2D(256, kernel_size=3, strides=1, padding="same")(D)
D = BatchNormalization(momentum=0.8)(D)
D = LeakyReLU(alpha=0.2)(D)
D = Dropout(0.25)(D)
D = Conv2D(512, kernel_size=3, strides=2, padding="same")(D)
D = BatchNormalization(momentum=0.8)(D)
D = LeakyReLU(alpha=0.2)(D)
cond_d_hidden = Dense(100)(cond_input)
cond_d_hidden = Reshape((1, 1, 100))(cond_d_hidden)
cond_d_output = Lambda(lambda x: K.tile(x, [1, 9, 9, 1]))(cond_d_hidden)
D = concatenate([D, cond_d_output], axis=-1)
D = Conv2D(512, kernel_size=3, strides=1, padding='same')(D)
D = BatchNormalization(momentum=0.8)(D)
D = LeakyReLU(alpha=0.1)(D)
D = Dropout(0.25)(D)
D = Flatten()(D)
discriminator_output = Dense(1, activation='sigmoid')(D)
discriminator = Model([discriminator_input, cond_input], discriminator_output)
discriminator.summary()
return discriminator
def train(self, epochs, batch_size=26, save_interval=20):
# load dataset
X_train, Captions, X_test, Captions_test, Labels = load_dataset(self.img_path, self.txt_path, self.img_shape)
caption_list_train = []
caption_list_test = []
for caption in Captions:
caption_list_train.append([str(caption)])
for caption in Captions_test:
caption_list_test.append([str(caption)])
df = pd.DataFrame(caption_list_train, columns=['caption'])
df.to_csv('./saved_model/caption_train.csv')
df = pd.DataFrame(caption_list_test, columns=['caption'])
df.to_csv('./saved_model/caption_test.csv')
# Adversarial ground truths
valid = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
batch_count = int(X_train.shape[0] / batch_size)
history = []
history_test = []
for epoch in range(epochs):
for batch_index in range(batch_count):
# ---------------------
# Train Discriminator
# ---------------------
# Select a random half of images
# idx = np.random.randint(0, X_train.shape[0], batch_size)
imgs = X_train[batch_index * batch_size:(batch_index + 1) * batch_size]
texts_input = Captions[batch_index * batch_size:(batch_index + 1) * batch_size]
texts = self.glove_model.encode_docs(texts_input)
# Sample noise and generate a batch of new images
noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
gen_imgs = self.generator.predict([noise, texts])
# Train the discriminator (real classified as ones and generated as zeros)
start = time.time()
d_loss_real = self.discriminator.train_on_batch([imgs, texts], valid)
d_loss_fake = self.discriminator.train_on_batch([gen_imgs, texts], fake)
batch_time_d = time.time() - start
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# ---------------------
# Train Generator
# ---------------------
# Train the generator (wants discriminator to mistake images as real)
start = time.time()
g_loss = self.combined.train_on_batch([noise, texts], valid)
batch_time_g = time.time() - start
# Plot the progress
batch_time = batch_time_d + batch_time_g
print ("%d-%d [D loss: %f, acc.: %.2f%%] [G loss: %f] [Time: %f]" % (epoch, batch_index, d_loss[0], 100*d_loss[1], g_loss, batch_time))
history.append([epoch, batch_index, d_loss[0], 100*d_loss[1], g_loss, batch_time])
# Test the model
texts_test = self.glove_model.encode_docs(Captions_test)
noise_test = np.random.normal(0, 1, (batch_size, self.latent_dim))
gen_imgs_test = self.generator.predict([noise_test, texts_test])
start = time.time()
d_loss_real_test = self.discriminator.test_on_batch([X_test, texts_test], valid)
d_loss_fake_test = self.discriminator.test_on_batch([gen_imgs_test, texts_test], fake)
batch_time_d_test = time.time() - start
d_loss_test = 0.5 * np.add(d_loss_real_test, d_loss_fake_test)
start = time.time()
g_loss_test = self.combined.test_on_batch([noise_test, texts_test], valid)
batch_time_g_test = time.time() - start
# Plot the test progress
batch_time_test = batch_time_d_test + batch_time_g_test
print ("%d (test) [D loss: %f, acc.: %.2f%%] [G loss: %f] [Time: %f]" % (epoch, d_loss_test[0], 100*d_loss_test[1], g_loss_test, batch_time_test))
history_test.append([epoch, d_loss_test[0], 100*d_loss_test[1], g_loss_test, batch_time_test])
# If at save interval => save generated image samples & training weights
if epoch % save_interval == 0:
idx = np.random.randint(0, X_train.shape[0], batch_size)
texts_input = Captions[idx]
texts = self.glove_model.encode_docs(texts_input)
self.save_imgs(epoch, texts)
self.generator.save_weights(filepath='./saved_model/generator_weights_' + str(epoch) + '.h5')
self.discriminator.save_weights(filepath='./saved_model/discriminator_weights_' + str(epoch) + '.h5')
# save weights & history
df_train = pd.DataFrame(history, columns=['epoch', 'batch', 'd_loss', 'acc', 'g_loss', 'time[sec]'])
df_train.to_csv('./saved_model/history.csv')
df_test = pd.DataFrame(history_test, columns=['epoch', 'd_loss', 'acc', 'g_loss', 'time[sec]'])
df_test.to_csv('./saved_model/history_test.csv')
self.generator.save_weights(filepath='./saved_model/generator_weights.h5')
self.discriminator.save_weights(filepath='./saved_model/discriminator_weights.h5')
def save_imgs(self, epoch, texts, batch_size=26):
noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
if batch_size == 260:
texts = self.glove_model.encode_docs(texts)
gen_imgs = self.generator.predict([noise, texts])
gen_img = combine_normalized_images(gen_imgs)
img_from_normalized_img(gen_img).save("images/snapshot/%d.png" % epoch)
def load_model(self, gen_path='./saved_model/generator_weights.h5', dis_path='./saved_model/discriminator_weights.h5'):
"""
Function: load_model
This function loads a pre-trained model.
Input: model_dir_path: designate where weights file is.
Output: None (pre-trained model will be loaded.)
"""
### load weights
self.generator.load_weights(gen_path)
self.discriminator.load_weights(dis_path)
def generate_image_from_text(self, text, flag=True):
### prepare an empty array
noise = np.zeros(shape=(1, self.latent_dim))
encoded_text = np.zeros(shape=(1, self.embedding_dim))
### generate sample for input data
encoded_text[0, :] = self.glove_model.encode_doc(text)
noise[0, :] = np.random.uniform(0, 1, self.latent_dim)
### predict and generate an image
generated_images = self.generator.predict([noise, encoded_text])
generated_image = generated_images[0]
if flag is True:
generated_image = generated_image * 127.5 + 127.5
return Image.fromarray(generated_image.astype(np.uint8))
elif flag is not True:
return generated_image
def combine_normalized_images(generated_images):
num = generated_images.shape[0]
width = int(math.sqrt(num))
height = int(math.ceil(float(num) / width))
shape = generated_images.shape[1:]
image = np.zeros((height * shape[0], width * shape[1], shape[2]),
dtype=generated_images.dtype)
for index, img in enumerate(generated_images):
i = int(index / width)
j = index % width
image[i * shape[0]:(i + 1) * shape[0], j * shape[1]:(j + 1) * shape[1], :] = img
return image
def img_from_normalized_img(normalized_img):
image = normalized_img * 127.5 + 127.5
return Image.fromarray(image.astype(np.uint8))
def generate_mode():
img_size = (64, 64, 3)
img_path = './emoji/edited/emoji_64x64/'
txt_path = './emoji/description/detailed'
glove_path = './utils/glove.6B.300d.txt'
dcgan = DCGAN(img_path, txt_path, glove_path)
X_train, Captions, _, _, _ = load_dataset(img_path, txt_path, img_size, split_rate=0.0)
print('Loading model...')
dcgan.load_model()
iteration = 0
caption_list = []
print('Generating images...')
for image, caption in zip(X_train, Captions):
edited_image = image * 127.5 + 127.5
edited_image = Image.fromarray(edited_image.astype(np.uint8))
edited_image.save('./images/original/' + str(iteration) + '.png')
generated_image = dcgan.generate_image_from_text(caption)
generated_image.save('./images/output/' + str(iteration) + '.png')
caption_list.append([str(caption)])
iteration += 1
df = pd.DataFrame(caption_list, columns=['caption'])
df.to_csv('./images/caption.csv')
# plot all emojis
dcgan.save_imgs(epoch=5000, texts=Captions, batch_size=260)
print('Done!')
def train_mode():
img_path = './emoji/edited/emoji_64x64/'
txt_path = './emoji/description/detailed'
glove_path = './utils/glove.6B.300d.txt'
dcgan = DCGAN(img_path, txt_path, glove_path)
dcgan.train(epochs=5000, batch_size=26, save_interval=50)
if __name__ == '__main__':
if len(sys.argv) == 2:
if sys.argv[1] == '1':
generate_mode()
elif sys.argv[1] == '0':
train_mode()
else:
print("Unexpected Input Value!")
|
import csv
import json
import re
dataRows = [];
headerRow = [];
with open('../data/8_Parents_Meet.csv', newline='',encoding='utf-8') as csvfile:
reader = csv.reader(csvfile, delimiter=',',quotechar='"',skipinitialspace=True)
isHeader = True;
for row in reader:
if(not isHeader):
age = 'young'
if(row[2] == '26 or older.'):
age = 'old'
dataRows.append({'parents_meet':row[1],'age':age,'current_meet':row[3]})
else:
headerRow = row;
isHeader = False;
print(dataRows[0])
|
from setuptools import setup, find_packages
setup(
name="iris",
version="1.0",
py_modules=['iris'],
packages=find_packages(),
python_requires='>=3',
install_requires=['click', 'newspaper3k', 'twython'],
entry_points='''
[console_scripts]
iris=iris:cli
''',
)
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Silvio Peroni <[email protected]>
#
# Permission to use, copy, modify, and/or distribute this software for any purpose
# with or without fee is hereby granted, provided that the above copyright notice
# and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT,
# OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
# DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
# SOFTWARE.
def f(mat, name):
name_l = list(name)
uni = set()
for i in mat:
if i < len(name):
uni.add(i)
if len(uni) % 2 > 0:
uni.remove(0)
sl = list()
for i in uni:
pos = 0
for j in sl:
if j < i:
pos = pos + 1
sl.insert(pos, i)
sl_last = len(sl) - 1
for i in range(len(sl) // 2):
s = sl[i]
e = sl[sl_last]
tmp = name_l[s]
name_l[s] = name_l[e]
name_l[e] = tmp
sl_last = sl_last - 1
return "".join(name_l)
my_fn = input("Please provide your full name: ").strip().lower()
my_mat_l = [int(i) for i in input("Please provide your matriculation number: ").strip().strip("")]
print("Result:", f(my_mat_l, my_fn))
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=True)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
def __init__(self, inplanes, planes, stride=1, expansion=4):
super(Bottleneck, self).__init__()
planes = int(planes / expansion)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=True)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=True)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * expansion, kernel_size=1, bias=True)
self.bn3 = nn.BatchNorm2d(planes * expansion)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
self.expansion = expansion
if inplanes != planes * self.expansion:
self.downsample = nn.Sequential(
nn.Conv2d(inplanes, planes * self.expansion,
kernel_size=1, stride=stride, bias=True),
nn.BatchNorm2d(planes * self.expansion),
)
else:
self.downsample = None
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
def get_Bottleneck(in_c, out_c, stride):
return Bottleneck(in_c, out_c, stride=stride)
def get_BasicBlock(in_c, out_c, stride):
return BasicBlock(in_c, out_c, stride=stride)
|
import numbers
import array
real_types = [numbers.Real]
int_types = [numbers.Integral]
complex_types = [numbers.Complex]
iterable_types = [set, list, tuple, array.array]
try:
import numpy
except ImportError:
pass
else:
real_types.extend([numpy.float32, numpy.float64])
int_types.extend([numpy.int32, numpy.int64])
complex_types.extend([numpy.complex])
iterable_types.append(numpy.ndarray)
# use these with isinstance to test for various types that include builtins
# and numpy types (if numpy is available)
complex_or_real_types = tuple(real_types+complex_types)
real_types = tuple(real_types)
int_types = tuple(int_types)
complex_types = tuple(complex_types)
iterable_types = tuple(iterable_types)
|
#!/usr/bin/env python2.7
'''A simple test for :class:`PVPositioner`'''
import time
import epics
import config
from ophyd import (PVPositioner, PVPositionerPC)
from ophyd.signal import (EpicsSignal, EpicsSignalRO)
from ophyd.device import (Component as C)
logger = None
def put_complete_test():
logger.info('--> PV Positioner, using put completion and a DONE pv')
class MyPositioner(PVPositionerPC):
'''PV positioner, put completion with a done pv'''
setpoint = C(EpicsSignal, '.VAL')
readback = C(EpicsSignalRO, '.RBV')
done = C(EpicsSignalRO, '.MOVN')
done_value = 0
pos = MyPositioner(config.motor_recs[0], name='mypos_pc_done')
pos.wait_for_connection()
high_lim = pos.setpoint.high_limit
try:
pos.check_value(high_lim + 1)
except ValueError as ex:
logger.info('Check value for single failed, as expected (%s)', ex)
else:
print('high lim is %f' % high_lim)
raise ValueError('check_value should have failed')
stat = pos.move(1, wait=False)
logger.info('--> post-move request, moving=%s', pos.moving)
while not stat.done:
logger.info('--> moving... %s error=%s', stat, stat.error)
time.sleep(0.1)
pos.move(-1, wait=True)
logger.info('--> synchronous move request, moving=%s', pos.moving)
logger.info('--> PV Positioner, using put completion and no DONE pv')
# PV positioner, put completion, no done pv
class MyPositioner(PVPositionerPC):
'''PV positioner, put completion with a done pv'''
setpoint = C(EpicsSignal, '.VAL')
readback = C(EpicsSignalRO, '.RBV')
pos = MyPositioner(config.motor_recs[0], name='mypos_pc_nodone')
stat = pos.move(2, wait=False)
logger.info('--> post-move request, moving=%s', pos.moving)
while not stat.done:
logger.info('--> moving... %s', stat)
time.sleep(0.1)
pos.move(0, wait=True)
logger.info('--> synchronous move request, moving=%s', pos.moving)
def callback(sub_type=None, timestamp=None, value=None, **kwargs):
logger.info('[callback] [%s] (type=%s) value=%s', timestamp, sub_type,
value)
def done_moving(**kwargs):
logger.info('Done moving %s', kwargs)
def test():
global logger
loggers = ('ophyd',
)
config.setup_loggers(loggers)
logger = config.logger
fm = config.fake_motors[0]
# ensure we start at 0 for this simple test
epics.caput(fm['setpoint'], 0)
epics.caput(fm['actuate'], 1)
time.sleep(2)
if 0:
pos = PVPositioner(fm['setpoint'],
readback=fm['readback'],
act=fm['actuate'], act_val=1,
stop=fm['stop'], stop_val=1,
done=fm['moving'], done_val=1,
put_complete=False,
)
pos.subscribe(callback, event_type=pos.SUB_DONE)
pos.subscribe(callback, event_type=pos.SUB_READBACK)
logger.info('---- test #1 ----')
logger.info('--> move to 1')
pos.move(1)
logger.info('--> move to 0')
pos.move(0)
logger.info('---- test #2 ----')
logger.info('--> move to 1')
pos.move(1, wait=False)
time.sleep(0.5)
logger.info('--> stop')
pos.stop()
logger.info('--> sleep')
time.sleep(1)
logger.info('--> move to 0')
pos.move(0, wait=False, moved_cb=done_moving)
logger.info('--> post-move request, moving=%s', pos.moving)
time.sleep(2)
# m2.move(1)
put_complete_test()
if __name__ == '__main__':
test()
|
import logging
import os
import shutil
import uuid
from subprocess import CalledProcessError
from typing import List, Iterable
from django.conf import settings
from django.db import models
from django.db.models import CASCADE, Q
from django.db.models.signals import pre_delete
from django.dispatch import receiver
from django.utils.text import slugify
from django_extensions.db.models import TimeStampedModel
from model_utils.managers import InheritanceManager
from library.utils import execute_cmd
from patients.models_enums import Sex
from pedigree.ped.export_ped import write_unrelated_ped, write_trio_ped
from snpdb.models import Sample, VCF, Cohort, Trio, SuperPopulationCode, ImportStatus
from snpdb.models.models_enums import ProcessingStatus
class AbstractSomalierModel(TimeStampedModel):
status = models.CharField(max_length=1, choices=ProcessingStatus.choices, default=ProcessingStatus.CREATED)
error_exception = models.TextField(null=True, blank=True)
class Meta:
abstract = True
def get_samples(self) -> Iterable[Sample]:
raise NotImplementedError()
def get_sample_somalier_filenames(self) -> List[str]:
return [AbstractSomalierModel.sample_filename(s) for s in self.get_samples()]
def execute(self, command: List[str], **kwargs):
""" Executes code and handles saving errors """
cmd = " ".join(command)
logging.info('About to call %s', cmd)
self.status = ProcessingStatus.PROCESSING
self.save()
return_code, stdout, stderr = execute_cmd(command, **kwargs)
if return_code != 0:
self.error_exception = f"return_code: {return_code}. stdout: {stdout}, stderr: {stderr}"
self.status = ProcessingStatus.ERROR
else:
self.status = ProcessingStatus.SUCCESS
self.save()
if return_code != 0:
raise CalledProcessError(returncode=return_code, cmd=cmd, output=self.error_exception)
@staticmethod
def sample_name(sample: Sample):
# Add PK as suffix so they're all unique
return f"{slugify(sample.vcf_sample_name)}_{sample.pk}"
@staticmethod
def sample_filename(sample: Sample):
vcf_dir = sample.vcf.somaliervcfextract.get_somalier_dir()
return os.path.join(vcf_dir, AbstractSomalierModel.sample_name(sample) + ".somalier")
@staticmethod
def sample_name_to_id(sample_name: str):
""" Sample ID is stored at the end """
return sample_name.rsplit("_", 1)[-1]
@staticmethod
def media_url(file_path):
# Need to use a slash, so that later joins don't have absolute path
media_root_with_slash = os.path.join(settings.MEDIA_ROOT, "")
if not file_path.startswith(media_root_with_slash):
raise ValueError(f"'{file_path}' must start with MEDIA_ROOT: {media_root_with_slash}")
return os.path.join(settings.MEDIA_URL, file_path[len(media_root_with_slash):])
class SomalierVCFExtract(AbstractSomalierModel):
vcf = models.OneToOneField(VCF, on_delete=CASCADE)
def get_somalier_dir(self):
cfg = SomalierConfig()
return os.path.join(cfg["vcf_base_dir"], str(self.vcf.pk))
def get_samples(self) -> Iterable[Sample]:
return self.vcf.sample_set.filter(no_dna_control=False).order_by("pk")
@receiver(pre_delete, sender=SomalierVCFExtract)
def somalier_vcf_extract_pre_delete_handler(sender, instance, **kwargs): # pylint: disable=unused-argument
somalier_dir = instance.get_somalier_dir()
if os.path.exists(somalier_dir):
logging.info("Deleting %s - removing dir: %s", instance, somalier_dir)
shutil.rmtree(somalier_dir)
class SomalierSampleExtract(models.Model):
vcf_extract = models.ForeignKey(SomalierVCFExtract, on_delete=CASCADE)
sample = models.OneToOneField(Sample, on_delete=CASCADE)
ref_count = models.IntegerField(default=0)
het_count = models.IntegerField(default=0)
hom_count = models.IntegerField(default=0)
unk_count = models.IntegerField(default=0)
class SomalierAncestryRun(AbstractSomalierModel):
""" We do a run against a whole VCF """
vcf_extract = models.OneToOneField(SomalierVCFExtract, on_delete=CASCADE)
uuid = models.UUIDField(default=uuid.uuid4, editable=False) # code to hide directories in media_root
def get_report_dir(self):
cfg = SomalierConfig()
return cfg.ancestry_dir(self.uuid)
def get_samples(self) -> Iterable[Sample]:
return self.vcf_extract.get_samples()
@property
def url(self):
report_dir = self.get_report_dir()
return self.media_url(os.path.join(report_dir, "somalier-ancestry.somalier-ancestry.html"))
@receiver(pre_delete, sender=SomalierAncestryRun)
def somalier_ancestry_run_pre_delete_handler(sender, instance, **kwargs): # pylint: disable=unused-argument
report_dir = instance.get_report_dir()
if os.path.exists(report_dir):
logging.info("Deleting %s - removing dir: %s", instance, report_dir)
shutil.rmtree(report_dir)
class SomalierAncestry(TimeStampedModel):
ancestry_run = models.ForeignKey(SomalierAncestryRun, on_delete=CASCADE)
sample_extract = models.OneToOneField(SomalierSampleExtract, on_delete=CASCADE)
predicted_ancestry = models.CharField(max_length=1, choices=SuperPopulationCode.choices)
EAS_prob = models.FloatField()
AFR_prob = models.FloatField()
AMR_prob = models.FloatField()
SAS_prob = models.FloatField()
EUR_prob = models.FloatField()
class SomalierRelate(AbstractSomalierModel):
objects = InheritanceManager()
uuid = models.UUIDField(default=uuid.uuid4, editable=False) # code to hide directories in media_root
class Meta:
abstract = True
def get_samples(self) -> Iterable[Sample]:
return []
def is_joint_called_vcf(self) -> bool:
samples_qs = self.get_samples()
num_vcfs = samples_qs.order_by("vcf").distinct("vcf").count()
return num_vcfs == 1
def has_ped_file(self) -> bool:
return False
def write_ped_file(self, filename):
""" Sample IDs have to match samples provided in get_samples() """
write_unrelated_ped(filename, [AbstractSomalierModel.sample_name(s) for s in self.get_samples()])
def get_related_dir(self) -> str:
cfg = SomalierConfig()
return cfg.related_dir(self.uuid)
@property
def url(self):
return self.media_url(os.path.join(self.get_related_dir(), "somalier.html"))
class SomalierCohortRelate(SomalierRelate):
cohort = models.OneToOneField(Cohort, on_delete=CASCADE)
cohort_version = models.IntegerField()
def get_samples(self) -> Iterable[Sample]:
return self.cohort.get_samples().filter(no_dna_control=False)
class SomalierTrioRelate(SomalierRelate):
trio = models.OneToOneField(Trio, on_delete=CASCADE)
def get_samples(self) -> Iterable[Sample]:
return self.trio.get_samples()
def has_ped_file(self) -> bool:
return True
def write_ped_file(self, filename):
proband = AbstractSomalierModel.sample_name(self.trio.proband.sample)
father = AbstractSomalierModel.sample_name(self.trio.father.sample)
mother = AbstractSomalierModel.sample_name(self.trio.mother.sample)
proband_sex = Sex.UNKNOWN
if patient := self.trio.proband.sample.patient:
proband_sex = patient.sex
write_trio_ped(filename, proband, proband_sex,
father, self.trio.father_affected, mother, self.trio.mother_affected)
@receiver(pre_delete, sender=SomalierCohortRelate)
@receiver(pre_delete, sender=SomalierTrioRelate)
def somalier_relate_pre_delete_handler(sender, instance, **kwargs): # pylint: disable=unused-argument
related_dir = instance.get_related_dir()
if os.path.exists(related_dir):
logging.info("Deleting %s - removing dir: %s", instance, related_dir)
shutil.rmtree(related_dir)
class SomalierAllSamplesRelate(SomalierRelate):
def get_sample_somalier_filenames(self) -> List[str]:
cfg = SomalierConfig()
return [f"{cfg['vcf_base_dir']}/**/*.somalier"] # Wild card
def get_samples(self) -> Iterable[Sample]:
return Sample.objects.filter(import_status=ImportStatus.SUCCESS)
class SomalierRelatePairs(models.Model):
relate = models.ForeignKey(SomalierAllSamplesRelate, on_delete=CASCADE)
# Sample A always has a lower PK than B
sample_a = models.ForeignKey(Sample, on_delete=CASCADE, related_name="somalierrelatepairs_a")
sample_b = models.ForeignKey(Sample, on_delete=CASCADE, related_name="somalierrelatepairs_b")
relatedness = models.FloatField()
ibs0 = models.IntegerField()
ibs2 = models.IntegerField()
hom_concordance = models.FloatField()
hets_a = models.IntegerField()
hets_b = models.IntegerField()
hets_ab = models.IntegerField()
shared_hets = models.IntegerField()
hom_alts_a = models.IntegerField()
hom_alts_b = models.IntegerField()
shared_hom_alts = models.IntegerField()
n = models.IntegerField()
x_ibs0 = models.IntegerField()
x_ibs2 = models.IntegerField()
class Meta:
unique_together = ('sample_a', 'sample_b')
@staticmethod
def get_for_sample(sample: Sample):
return SomalierRelatePairs.objects.filter(Q(sample_a=sample) | Q(sample_b=sample))
class SomalierConfig:
def __init__(self):
self.settings = settings.SOMALIER
def _annotation_dir(self, dirname):
return os.path.join(self.settings["annotation_base_dir"], dirname)
def get_annotation(self, key):
return self._annotation_dir(self.settings["annotation"][key])
def report_dir(self, *args):
return os.path.join(self.settings["report_base_dir"], *map(str, args))
def ancestry_dir(self, subdir):
return self.report_dir("ancestry", subdir)
def related_dir(self, subdir):
return self.report_dir("related", subdir)
def get_sites(self, genome_build: 'GenomeBuild'):
sites = self.settings["annotation"]["sites"][genome_build.name]
return self._annotation_dir(sites)
def get_sites_vcf(self, genome_build: 'GenomeBuild'):
sites_name = os.path.basename(self.get_sites(genome_build))
sites_vcf_kwargs = {"name": sites_name, "genome_build": genome_build}
try:
return VCF.objects.get(**sites_vcf_kwargs)
except VCF.DoesNotExist as dne:
print(f"Expected single VCF loaded via: {sites_vcf_kwargs}")
raise dne
def __getitem__(self, key):
return self.settings[key]
|
from django.contrib.auth import user_logged_out
from django.contrib.auth.models import User
from django.db import models
from django.conf import settings
# Create your models here.
from django.db.models.signals import post_save
from django.dispatch import receiver
from argos.libs.clients.aniketos import AniketosClient
class UserProfile(models.Model):
user = models.OneToOneField(User)
language = models.CharField(max_length=5, default="en-us")
token = models.CharField(max_length=255, null=True)
domain = models.CharField(max_length=255, null=True)
def __unicode__(self):
return self.user.username
@receiver(post_save, sender=User)
def create_profile(sender, instance, created, **kwargs):
if created:
try:
profile, result = UserProfile.objects.get_or_create(user=instance)
except:
pass
@receiver(user_logged_out, sender=User)
def logoff_user(sender, request, user, **kwargs):
if 'argos.apps.common.auth.AniketosAuthentication' in getattr(settings, 'AUTHENTICATION_BACKENDS'):
profile = UserProfile.objects.get(user=user)
client = AniketosClient()
client.logout(profile.token)
profile.token = None
profile.save() |
import datetime
from pydantic import BaseModel
from typing import Optional
class MortgageCalculatorRequest(BaseModel):
propertyValue: Optional[float]
downPayment: Optional[float]
interestRate: Optional[float]
loanTermYears: Optional[int]
startDate: Optional[datetime.datetime]
pmi: Optional[float]
propertyInsurance: Optional[float]
monthlyHoa: Optional[float] |
'''
Copyright (c) 2017 Mark Fisher
Licensed under the MIT License, see LICENSE in the project root for full license.
This module contains an icon provider for use with a QFileSystemModel, where the icons are specified
using a JSON settings file.
The format of the JSON file should be:
{
"types" : {
"Computer" : "<icon path>",
"Desktop" : "<icon path>",
"Trashcan" : "<icon path>",
"Network" : "<icon path>",
"Drive" : "<icon path>",
"Folder" : "<icon path>",
"File" : "<icon path>"
},
"filenames" : {
"LICENSE" : "<icon path>",
"README.md" : "<icon path>",
<etc...>
},
"extensions" : {
"txt" : "<icon path>",
"jpg" : "<icon path>",
<etc...>
},
"file_default" : "<icon path>"
}
Note that null can be given for any path to not specify an icon.
Comments of the // form are allowed.
Filenames settings override extension settings.
'''
import os
from qtpy.QtCore import QFileInfo
from qtpy.QtWidgets import QFileIconProvider
from qtpy.QtGui import QIcon
from . import extended_json
class JSONFileIconProvider(QFileIconProvider):
'''
Provide icons to a QFileSystemModel based on a JSON file.
'''
def __init__(self, path):
'''
path
The path to the JSON file containing the paths to the icons to use.
'''
super(JSONFileIconProvider, self).__init__()
settings = extended_json.load_file(path)
# Icon cache for load_icon().
icons = {}
def load_icon(icon_specifier):
'''
Loads (with caching) the icon specified by the given specifier.
'''
if isinstance(icon_specifier, str):
icon_specifier = os.path.normcase(os.path.abspath(icon_specifier))
if icon_specifier not in icons:
icon = QIcon(icon_specifier)
icons[icon_specifier] = icon
else:
icon = icons[icon_specifier]
elif icon_specifier is None:
if icon_specifier not in icons:
icon = QIcon()
icons[icon_specifier] = icon
else:
icon = icons[icon_specifier]
else:
raise Exception('Unsuported icon specifier: {}.'.format(icon_specifier))
return icon
# Map JSON keys to QFileIconProvider file types.
type_map = {
'Computer' : QFileIconProvider.Computer,
'Desktop' : QFileIconProvider.Desktop,
'Trashcan' : QFileIconProvider.Trashcan,
'Network' : QFileIconProvider.Network,
'Drive' : QFileIconProvider.Drive,
'Folder' : QFileIconProvider.Folder,
'File' : QFileIconProvider.File
}
self._type_icons = {}
for type_name, icon_specifier in settings['types'].items():
self._type_icons[type_map[type_name]] = load_icon(icon_specifier)
self._filename_icons = {}
for filename, icon_specifier in settings['filenames'].items():
self._filename_icons[filename] = load_icon(icon_specifier)
self._extension_icons = {}
for extension, icon_specifier in settings['extensions'].items():
self._extension_icons[extension] = load_icon(icon_specifier)
self._file_default_icon = load_icon(settings['file_default'])
def icon(self, type_or_info):
'''
Returns the icon to use for the given file info or type.
type_or_info
Either a QFileIconProvider type enumeration, or a QFileInfo object.
'''
if isinstance(type_or_info, QFileInfo):
# called icon(info)
if type_or_info.isFile():
try:
return self._filename_icons[type_or_info.fileName()]
except KeyError:
pass
try:
return self._extension_icons[type_or_info.suffix()]
except KeyError:
pass
return self._file_default_icon
return QIcon()
else:
# called icon(type)
return self._type_icons.get(type_or_info, QIcon())
|
D = [[[0]*110 for i in range(110)] for j in range(110)]
N,L,R = map(int,input().split())
D[1][1][1] = 1
Mod = 1000000007
for i in range(2,N+1):
for j in range(1,L+1):
for k in range(1,R+1):
D[i][j][k] = (D[i-1][j][k-1] + D[i-1][j-1][k] + (i-2)*D[i-1][j][k]) %Mod
# 1. i 번째 건물을 추가할 때, 제일 작은 건물을 추가한다고 생각한다.
# 2. i번째 건물을 추가할 때, 맨 왼쪽, 오른쪽, 중간에 추가하는 경우의 수를 나누어 고려
print(D[N][L][R]) |
#! /usr/bin/env python
##############################################################################
# CreateJPEGKMZ.py
# A class to read EXIF data from geotageged JPEG files and
# create a KMZ File containing a quick look PNG
#
# Function for reading EXIF data modified from one
# proposed on http://stackoverflow.com/questions/765396/exif-manipulation-library-for-python
#
# Author: Dan Clewley
# Email: [email protected]
# Date: 06/06/2012
# Version: 1.0
##############################################################################
import os, glob, re, sys, csv
class CreateJPEGKMZ(object):
def createKMZFile(self, inDIR, image, inXYLocationsFile):
os.chdir(inDIR) # Change into input directory (for zipping files)
imageBaseName = re.sub('\.JPG','',image.upper())
imageBaseName = re.sub('\.jpg','',imageBaseName)
inImageFile = os.path.join(inDIR, image)
foundGeo = False
imageGeoLine = []
# Find lat long in XYLocationsFile
inXYLocations = csv.reader(open(inXYLocationsFile,'rU'))
for line in inXYLocations:
if line[0].strip() == imageBaseName:
imageGeoLine = line
foundGeo = True
if foundGeo == False:
print "No GeoInfo for: " + inImageFile
else:
# Create quicklook image (using imagemagick)
qlImage = imageBaseName + '_ql.png'
convertCommand = 'convert ' + os.path.join(inDIR, image) + ' -resize 600 400 ' + os.path.join(inDIR, qlImage)
os.system(convertCommand)
eastingDDStr = str(imageGeoLine[2])
nortingDDStr = str(imageGeoLine[1])
outKMLName = imageBaseName + '_kml.kml'
outKMLFile = os.path.join(inDIR, outKMLName)
outKML = open(outKMLFile, 'w')
outKMLText = '''
<kml xmlns="http://earth.google.com/kml/2.2">
<Document id="%s">
<name>%s</name>
<Snippet></Snippet>
<Snippet></Snippet>
<Placemark>
<name>%s</name>
<description>
<a href="../%s">
<img style="width: 600px; height: 400px;" alt="Photo" src="%s"/></a>
</description><Snippet></Snippet>
<Point>
<coordinates>%s,%s</coordinates>
</Point>
</Placemark>
</Document></kml>
''' %(imageBaseName, imageBaseName, imageBaseName, image, qlImage, eastingDDStr, nortingDDStr)
outKML.write(outKMLText)
outKML.close()
# Create KML archive
zipCommand = 'zip -r ' + imageBaseName + '.kmz ' + qlImage + ' ' + outKMLName
os.system(zipCommand)
os.remove(qlImage)
os.remove(outKMLName)
def run(self, inDIR, inXYLocationsFile):
os.chdir(inDIR)
jpegList = glob.glob('*.JPG')
if len(jpegList) == 0:
jpegList = glob.glob('*.jpg')
for image in jpegList:
self.createKMZFile(inDIR, image, inXYLocationsFile)
def help(self):
print '''Create KMZ file for photos where location is stored in CSV file in the form:
PhotoName, Lat, Long
Usage:
python CreateJPEGKMZ.py inDIR inXYLocationsFile'''
if __name__ == '__main__':
obj = CreateJPEGKMZ()
if len(sys.argv) >= 3:
inDIR = sys.argv[1]
inXYLocationsFile = sys.argv[2]
obj.run(inDIR, inXYLocationsFile)
else:
obj.help()
|
#!/usr/bin/env python
from tabulate import tabulate
from time import sleep
import unittest
class Instr():
def __init__(self, count):
self._count = count
def isExit(self):
return False
@property
def count(self):
return self._count
def expand(self):
expanded = [self.__class__(0)]
for _ in range(self._count -1):
expanded.append(self.__class__(0))
return expanded
class CPU(Instr):
def __repr__(self):
if self._count:
return "CPU({count})".format(count=self._count)
else:
return "CPU"
class IO(Instr):
def __repr__(self):
if self._count:
return "IO({count})".format(count=self._count)
else:
return "IO"
class EXIT(Instr):
def isExit(self):
return True
def __repr__(self):
return "EXIT"
class Program():
def __init__(self, name, instructions):
self._name = name
self._instructions = self.expand(instructions)
@property
def name(self):
return self._name
@property
def instructions(self):
return self._instructions
def expand(self, instructions):
expanded = []
for instr in instructions:
expanded.extend(instr.expand())
if not expanded[-1].isExit():
expanded.append(EXIT(0))
return expanded
def __repr__(self):
return "Program({name}, {instructions})".format(name=self._name, instructions=self._instructions)
class Cpu():
def __init__(self, mem):
self._memory = mem
self._pc = 0
#Guarda la ultima instruccion que se ejecuta (que no sea exit)
self._ir = Instr(1)
#tickea a mano
def start(self):
op = self._memory.fetch(self._pc)
self._tick(op)
@property
def pc(self):
return self._pc
@pc.setter
def pc(self, addr):
self._pc = addr
def _tick(self, op):
print("Exec: {op}, PC={pc}".format(op=op, pc=self._pc))
sleep(1)
if not op.isExit():
self._ir = op
self._pc += 1
def __repr__(self):
return "CPU(PC={pc})".format(pc=self._pc)
class Memory():
def __init__(self):
self._memory = []
def load(self, program):
self._memory.extend(program.instructions)
def fetch(self, addr):
return self._memory[addr]
def __repr__(self):
return tabulate(enumerate(self._memory), tablefmt='psql')
class SO():
def __init__(self):
self._memory = Memory()
self._cpu = Cpu(self._memory)
#Tiene que Hacer el load del programa
def exec(self, prog):
self._memory.load(prog)
def __repr__(self):
return "{cpu}\n{mem}".format(cpu=self._cpu, mem=self._memory)
class TestStringMethods(unittest.TestCase):
def setUp(self):
self.prog1 = Program("test.exe", [CPU(5), IO(2), CPU(3)])
self.prog2 = Program("test.exe", [IO(2), CPU(3)])
self.so = SO()
def tearDown(self):
self.so = SO()
def test_cargar_un_programa(self):
self.so.exec(self.prog1)
self.assertTrue(len(self.so._memory._memory) == len(self.prog1.instructions))
def test_cargar_dos_programas(self):
self.so.exec(self.prog1)
self.so.exec(self.prog2)
expected = len(self.prog1.instructions) + len(self.prog2.instructions)
self.assertEqual(len(self.so._memory._memory), expected)
def test_ejecutar_primera_instruccion(self):
self.so.exec(self.prog1)
self.so._cpu.start
expectedPC = 1
expectedValorDeIR = "CPU"
self.so._cpu.start()
self.assertEqual(self.so._cpu.pc, expectedPC)
self.assertEqual(repr(self.so._cpu._ir), "CPU")
def test_al_ejecutar_un_exit_no_lo_guarda_en_ir(self):
self.so.exec(self.prog1)
self.so.exec(self.prog2)
self.so._cpu.pc = 9
self.so._cpu.start()
expectedPC = 10
expectedValorDeIR = "CPU"
self.assertEqual(self.so._cpu.pc, expectedPC)
self.assertEqual(repr(self.so._cpu._ir), "CPU")
self.so._cpu.start()
expectedPC = 11
self.assertEqual (self.so._cpu.pc, expectedPC)
self.assertEqual (repr(self.so._cpu._ir), "CPU")
if __name__ == '__main__':
unittest.main() |
"""
Code adapted from by: https://github.com/dgilland/pydash
"""
import collections
def get(obj, path, default=None):
current = obj
for key in path:
if isinstance(current, list) and isinstance(key, int):
if key < 0 or key >= len(current):
return default
else:
current = current[key]
continue
if current is None or key not in current:
return default
current = current[key]
return current
def in_obj(obj, path):
if len(path) == 0:
return False
if len(path) == 1:
current = obj
else:
current = get(obj, path[:-1])
key = path[-1]
if isinstance(obj, list) and isinstance(key, int):
if 0 <= key < len(obj):
return True
else:
return False
return key in current
def insert(container, key_path, item):
"""
>>> insert({}, ['a', '1', '2', 'world'], 'hello')
{'a': {'1': {'2': {'world': 'hello'}}}}
"""
if isinstance(container, collections.OrderedDict):
gen = collections.OrderedDict
update = lambda i, k, v: i.update({k: v})
else:
gen = dict
update = lambda i, k, v: i.__setitem__(k, v)
sub_container = container
for key in key_path[:-1]:
if isinstance(key, int):
raise ValueError('No int keys allowed in deep insert')
if key not in sub_container:
update(sub_container, key, gen())
sub_container = sub_container[key]
update(sub_container, key_path[-1], item)
return container
if __name__ == '__main__':
pass |
"""Unit tests for readme.py"""
import re
from .readme import locate_badges, merge_badges, RE_MARKDOWN_IMAGE
def test_match_screenshot():
"""Tests replacing a screenshot with the module's regex"""
input_readme = """
# GodFather
A Delphi app to rename files (legacy project)

"""
expected = """
# GodFather
A Delphi app to rename files (legacy project)

"""
def replacer(match: re.Match) -> str:
"""The replacer function for the regex replacement"""
return (
match.string[match.start() : match.start("filename")]
+ "/scrnshot.png"
+ match.string[match.end("filename") : match.end()]
)
actual = RE_MARKDOWN_IMAGE.sub(replacer, input_readme)
assert actual == expected
def test_locate_badges_one_badge():
"""Tests locating badges with a single badge"""
input_readme = """# archetype-quickstart-jdk8
[](https://maven-badges.herokuapp.com/maven-central/com.github.ngeor/archetype-quickstart-jdk8)
A Maven archetype for a simple Java app, updated for Java 8.
This is effectively the same as the maven-archetype-quickstart,
"""
expected = (
"""# archetype-quickstart-jdk8
""",
[
"[](https://maven-badges.herokuapp.com/maven-central/com.github.ngeor/archetype-quickstart-jdk8)"
],
"""A Maven archetype for a simple Java app, updated for Java 8.
This is effectively the same as the maven-archetype-quickstart,
""",
)
actual = locate_badges(input_readme)
assert expected == actual
def test_locate_badges_no_badges():
"""Tests locating badges when no badges exist"""
input_readme = """# some project
Some project description that goes on for a while
and even wraps multiple lines.
Some other text second paragraph here.
"""
expected = (
"""# some project
""",
[],
"""Some project description that goes on for a while
and even wraps multiple lines.
Some other text second paragraph here.
""",
)
actual = locate_badges(input_readme)
assert expected == actual
def test_locate_and_merge_badges_one_badge():
"""Tests locating badges with a single badge and merging it back"""
input_readme = """# archetype-quickstart-jdk8
[](https://maven-badges.herokuapp.com/maven-central/com.github.ngeor/archetype-quickstart-jdk8)
A Maven archetype for a simple Java app, updated for Java 8.
This is effectively the same as the maven-archetype-quickstart,
"""
before, badges, after = locate_badges(input_readme)
merged = merge_badges(before, badges, after)
assert input_readme == merged
def test_locate_and_merge_badges_no_badges():
"""Tests locating and merging badges when no badges exist"""
input_readme = """# some project
Some project description that goes on for a while
and even wraps multiple lines.
Some other text second paragraph here.
"""
before, badges, after = locate_badges(input_readme)
merged = merge_badges(before, badges, after)
assert input_readme == merged
|
### Author: tj <[email protected]>
### Description: Scottish Consulate Techno Orbital Tracker
### Category: Other
### License: BSD 3
import sys
import os
import time
import socket
import ugfx
import buttons
import wifi
PORT = 4533
ANY_ADDR = "0.0.0.0"
container = None
textcontainer = None
target_az = 0.0
target_el = 0.0
current_az = 0.0
current_el = 0.0
az = None
el = None
def processbuttons(data):
#buttons.is_pressed("JOY_RIGHT"):
#buttons.is_pressed("JOY_LEFT"):
#buttons.is_pressed("JOY_DOWN"):
#buttons.is_pressed("JOY_UP"):
#buttons.is_pressed("JOY_CENTER"):
#buttons.is_pressed("BTN_A"):
#buttons.is_pressed("BTN_B"):
#buttons.is_pressed("BTN_MENU"):
print("readings buttons")
def drawtext(status="Searching for long range comms..."):
global current_az
global current_el
ugfx.clear(ugfx.BLACK)
ugfx.set_default_font(ugfx.FONT_NAME)
ugfx.text(0, 5, "SATTRACKER", ugfx.GREEN)
ugfx.set_default_font(ugfx.FONT_SMALL)
ugfx.text(0, 50, status, ugfx.RED)
posstring = "AZ: {} EL: {}".format(current_az, current_el)
ugfx.set_default_font(ugfx.FONT_SMALL)
ugfx.text(0, 70, posstring, ugfx.YELLOW)
#
# ___ ___ _ ___ _____ ___
# (_-</ -_) '_\ V / _ (_-<
# /__/\___|_| \_/\___/__/
#
#
def calibrateservos():
global az
global el
az = pyb.Servo(1)
el = pyb.Servo(2)
az.angle(0)
el.angle(0)
# _ _
# ___ __ _| |_ _ _ __ _ __| |_____ _ _
# (_-</ _` | _| '_/ _` / _| / / -_) '_|
# /__/\__,_|\__|_| \__,_\__|_\_\___|_|
#
#
def sattracker():
global current_az
global current_el
global target_az
global target_el
calibrateservos()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((ANY_ADDR, PORT))
s.listen(1)
s.settimeout(0.5) #socket will block for most 0.5 seconds
connected = False
while True:
status = "Searching for long range comms..."
conn, addr = s.accept()
if conn:
status = 'Connection from {}:{}'.format(addr[0], addr[1])
drawtext(status)
connected = True
else:
continue
while connected:
data = conn.recv(100).decode("utf-8") #timeouts here aren't handled
if not data:
break
if data == "p\n":
response = "{}\n{}\n".format(current_az, current_el)
conn.send(response)
elif data.startswith("P "):
values = data.split(" ")
if len(values) != 3:
continue
target_az = float(values[1])
target_el = float(values[2])
conn.send(" ")
az.angle(target_az)
el.angle(target_el)
current_az = target_az
current_el = target_el
drawtext(status)
elif data == "q\n":
print("close command, shutting down")
conn.close()
connected = False
break
else:
print("unknown command, closing socket")
conn.close()
connected = False
break
#if __name__ == "__main__":
buttons.init()
ugfx.init()
ugfx.clear(ugfx.BLACK)
textcontainer = ugfx.Container(0, 0, 320, 80)
container = ugfx.Container(0, 80,320,160)
drawtext()
wifi.connect()
sattracker()
|
import discord
from pymongo import MongoClient
from discord.ext.commands import Bot
from discord.ext.commands import CheckFailure
from discord.ext.commands import has_any_role
from discord.ext.commands import has_permissions
from discord.ext.commands import check
import os
import raven
from discord_util import *
rc = raven.Client(os.environ['RAVEN_DSN'], environment=os.environ['RAVEN_ENV'])
BOT_PREFIX = ("?", "!")
dbclient = MongoClient(os.environ['MONGO_IP'], int(os.environ['MONGO_PORT']))
db = dbclient[os.environ['MONGO_DB']]
client = Bot(command_prefix=BOT_PREFIX)
current_roles = []
def set_raven_ctx(ctx, rc):
c = {
'id' : ctx.message.author.id,
'user_name' : ctx.message.author.name,
'is_bot': ctx.message.author.bot,
'user_discriminator' : ctx.message.author.discriminator,
'created_at': ctx.message.author.created_at,
'message_id' : ctx.message.id,
'message_content' : ctx.message.content
}
if ctx.guild is not None:
c['guild_id'] = ctx.guild.id
c['guild_name'] = ctx.guild.name
rc.user_context(c)
def find_object(objects, name, default=None, error="The object does not exist."):
for o in objects:
if o.name.lower() == name.lower() or o.id == name:
return o
if default is not None: return default
raise CheckFailure(error)
def get_channel(name, default=None):
return find_object(client.get_all_channels(), name, default=default, error="The channel does not exist.")
def get_category(ctx, name, default=None):
return find_object(ctx.guild.categories, name, default=default, error="The category does not exist.")
def get_role(ctx, name, default=None):
return find_object(ctx.guild.roles, name, default=default, error="The role does not exist.")
def check_perms(ctx, action, objtype):
error = "You do not have permission to %s a %s."
result = db.settings.find_one({ '_id' : ctx.guild.id })
if result is None:
raise CheckFailure(error % (action, objtype))
if result[action] is None:
raise CheckFailure(error % (action, objtype))
if result[action][objtype] is None:
raise CheckFailure(error % (action, objtype))
u = ctx.guild.get_member(ctx.message.author.id)
for r in ctx.message.author.roles:
if r.name in result[action][objtype]: return True
raise CheckFailure(error % (action, objtype))
@client.group(pass_context=True)
async def channel(ctx):
if is_bot(ctx): return
set_raven_ctx(ctx, rc)
if ctx.invoked_subcommand is None:
await client.say('Invalid channel command passed...')
@client.group(pass_context=True)
async def role(ctx):
if is_bot(ctx): return
set_raven_ctx(ctx)
if ctx.invoked_subcommand is None:
await client.say('Invalid role command passed...')
@client.command(pass_context=True)
@has_permissions(manage_guild=True)
async def permcheck(ctx):
try:
if is_bot(ctx): return
set_raven_ctx(ctx, rc)
result = db.settings.find_one({ '_id' : ctx.guild.id })
message = ""
if result is None:
await ctx.channel.send("No roles have been set!")
else:
if "create" in result:
if "channel" in result["create"]: message += "Create Channels: " + ", ".join(str(x) for x in result["create"]["channel"]) + "\n"
if "role" in result["create"]: message += "Create Roles: " + ", ".join(str(x) for x in result["create"]["role"]) + "\n"
if "edit" in result:
if "channel" in result["edit"]: message += "Edit Channels: " + ", ".join(str(x) for x in result["edit"]["channel"]) + "\n"
if "role" in result["edit"]: message += "Edit Roles: " + ", ".join(str(x) for x in result["edit"]["role"]) + "\n"
if "delete" in result:
if "channel" in result["delete"]: message += "Delete Channels: " + ", ".join(str(x) for x in result["delete"]["channel"]) + "\n"
if "role" in result["delete"]: message += "Delete Roles: " + ", ".join(str(x) for x in result["delete"]["role"]) + "\n"
if "limit_everyone" in result:
message += "The current role limit is " + str(result['limit_everyone']) + "\n"
await ctx.channel.send(message)
except:
rc.captureException()
@channel.command(pass_context=True)
@has_permissions(manage_guild=True)
async def remove(ctx, action, rolename):
try:
if is_bot(ctx): return
set_raven_ctx(ctx, rc)
if action in ["create", "edit", "delete"]:
db.settings.update_one({ '_id' : ctx.guild.id }, { '$pullAll' : { action + '.channel' : [rolename] } }, upsert=True)
await ctx.channel.send("Removed role '%s' from being able to %s a channel." % (rolename, action))
else:
await ctx.channel.send("Incorrect command.")
except:
rc.captureException()
@channel.command(pass_context=True)
@has_permissions(manage_guild=True)
async def add(ctx, action, rolename):
try:
if is_bot(ctx): return
set_raven_ctx(ctx, rc)
if action in ["create", "edit", "delete"]:
db.settings.update_one({ '_id' : ctx.guild.id }, { '$addToSet' : { action + '.channel' : rolename } }, upsert=True)
await ctx.channel.send("Granted role '%s' permission to be able to %s a channel." % (rolename, action))
else:
await ctx.channel.send("Incorrect command.")
except:
rc.captureException()
@role.command(pass_context=True)
@has_permissions(manage_guild=True)
async def remove(ctx, action, rolename):
try:
if is_bot(ctx): return
set_raven_ctx(ctx, rc)
if action in ["create", "edit", "delete"]:
db.settings.update_one({ '_id' : ctx.guild.id }, { '$pullAll' : { action + '.role' : [rolename] } }, upsert=True)
await ctx.channel.send("Removed role '%s' from being able to %s a role." % (rolename, action))
else:
await ctx.channel.send("Incorrect command.")
except:
rc.captureException()
@role.command(pass_context=True)
@has_permissions(manage_guild=True)
async def add(ctx, action, rolename):
try:
if is_bot(ctx): return
set_raven_ctx(ctx, rc)
if action in ["create", "edit", "delete"]:
db.settings.update_one({ '_id' : ctx.guild.id }, { '$addToSet' : { action + '.role' : rolename } }, upsert=True)
await ctx.channel.send("Granted role '%s' permission to be able to %s a role." % (rolename, action))
else:
await ctx.channel.send("Incorrect command.")
except:
rc.captureException()
@role.command(pass_context=True)
@has_permissions(manage_guild=True)
async def limit(ctx, count):
try:
if is_bot(ctx): return
set_raven_ctx(ctx, rc)
c = int(count)
if c > 0:
db.settings.update_one({ '_id' : ctx.guild.id }, { '$set' : { 'limit_everyone' : c } }, upsert=True)
await ctx.channel.send("Everyone with no role can only have %i roles now" % (c))
except (TypeError, ValueError):
rc.captureException()
await ctx.channel.send("Sorry, that did not work.")
@role.command(pass_context=True)
@has_permissions(manage_guild=True)
async def unlimited(ctx):
try:
if is_bot(ctx): return
set_raven_ctx(ctx, rc)
db.settings.update_one({ '_id' : ctx.guild.id }, { '$unset' : { 'limit_everyone' : "" } }, upsert=True)
await ctx.channel.send("Everyone can have any number of roles now")
except:
rc.captureException()
@role.command(pass_context=True)
@has_permissions(manage_guild=True)
async def minimalist(ctx, mode):
try:
if is_bot(ctx): return
set_raven_ctx(ctx, rc)
if mode == "on":
db.settings.update_one({ '_id' : ctx.guild.id }, { '$set' : { 'minimalist' : True } }, upsert=True)
await ctx.channel.send("Role hierarchy auto management is now on")
if mode == "off":
db.settings.update_one({ '_id' : ctx.guild.id }, { '$set' : { 'minimalist' : False } }, upsert=True)
await ctx.channel.send("Role hierarchy auto management is now off")
except:
rc.captureException()
@channel.command(pass_context=True)
async def create(ctx, name, channel_type="text", category=None):
try:
if is_bot(ctx): return
set_raven_ctx(ctx, rc)
check_perms(ctx, 'create', 'channel')
c = get_category(ctx, name, ctx.guild.categories[0])
if channel_type == "text": await ctx.guild.create_text_channel(name, category=c)
if channel_type == "voice": await ctx.guild.create_voice_channel(name, category=c)
if channel_type == "category": await ctx.guild.create_category_channel(name, category=c)
await ctx.channel.send("Channel '%s' is created." % (name))
except:
rc.captureException()
@channel.command(pass_context=True)
async def delete(ctx, name, reason=None):
try:
if is_bot(ctx): return
set_raven_ctx(ctx, rc)
check_perms(ctx, 'delete', 'channel')
channel = get_channel(name)
await channel.delete(reason=reason)
except:
rc.captureException()
@channel.command(pass_context=True)
async def edit(ctx, name, param, value, reason=None):
try:
if is_bot(ctx): return
set_raven_ctx(ctx, rc)
check_perms(ctx, 'edit', 'channel')
channel = get_channel(name)
if param == "category": value = get_category(ctx, value)
await channel.edit(**{ "reason" : reason, param : value })
except:
rc.captureException()
@role.command(pass_context=True)
async def create(ctx, name):
try:
if is_bot(ctx): return
set_raven_ctx(ctx, rc)
check_perms(ctx, 'create', 'role')
client.create_role(name)
except:
rc.captureException()
@role.command(pass_context=True)
async def delete(ctx, name, reason=None):
try:
if is_bot(ctx): return
set_raven_ctx(ctx, rc)
check_perms(ctx, 'delete', 'role')
role = get_role(ctx, name)
role.delete(reason=reason)
except:
rc.captureException()
@role.command(pass_context=True)
async def edit(ctx, name, param, value, reason=None):
try:
if is_bot(ctx): return
set_raven_ctx(ctx, rc)
check_perms(ctx, 'edit', 'role')
role = get_role(ctx, name)
if param == "permissions": value = discord.Permissions(value)
if param == "color" or param == "colour": value = discord.Color(int(value, 16))
await role.edit(**{ "reason" : reason, param : value })
except:
rc.captureException()
async def on_member_update(before, after):
try:
if after.bot: return
set_raven_user(after, rc)
result = db.settings.find_one({ '_id' : after.guild.id })
if result is not None:
if 'limit_everyone' in result and len(before.roles) < len(after.roles):
thelimit = int(result['limit_everyone'])
# Because the @everyone role is included, which can't be deleted
if (len(after.roles) - 1) > thelimit:
# Now have to find the role that was added and remove it
ids = {}
for r in before.roles:
ids[r.id] = r
for r in after.roles:
if r.id not in ids:
await after.remove_roles(r, reason="Reached the role limit, which is " + str(thelimit) + " for everyone.")
if "minimalist" in result and len(before.roles) < len(after.roles):
if result["minimalist"]:
delete_roles = after.roles[1:]
for r in delete_roles:
await after.remove_roles(r, reason="Role hierarchy management is on.")
except:
rc.captureException()
client.add_listener(on_member_update, 'on_member_update')
client.add_cog(DiscordBotsOrgAPI(client))
client.run(os.environ['DISCORD_TOKEN'])
|
import re
from django.db.models.functions import Lower
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.views import generic
from django.utils import timezone
from juhannus.models import Event, Participant, get_midsummer_saturday
from juhannus.forms import SubmitForm
class EventView(generic.FormView):
template_name = 'juhannus/index.html'
form_class = SubmitForm
def dispatch(self, request, *args, **kwargs):
if not Event.objects.exists():
return HttpResponse("No event in db")
# use .localtime() when comparing to pytz-created datetime object
year = timezone.localtime().year
if timezone.localtime().strftime("%V") == get_midsummer_saturday(year).strftime("%V"):
# Only hit db when the week is correct
if not Event.objects.filter(year=year):
previous = Event.objects.order_by("year").last()
Event.objects.create(year=year, header=previous.header, body=previous.body)
return super().dispatch(request, *args, **kwargs)
def get_success_url(self):
return self.request.get_full_path()
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["events"] = list(Event.objects.order_by("year").values("year"))
sort_order = "vote" if self.request.GET.get("vote") else "name"
if not self.kwargs.get("year"):
ctx['event'] = Event.objects.order_by("year").last()
else:
ctx['event'] = get_object_or_404(Event, year=self.kwargs.get("year"))
ctx["participants"] = ctx["event"].participants.order_by(sort_order if sort_order == "vote" else Lower(sort_order))
ctx["ascending"] = False if self.request.GET.get(sort_order, "").lower() == "desc" else True
if not ctx["ascending"]:
ctx["participants"] = ctx["participants"].reverse()
return ctx
def form_valid(self, form):
# print("FORM VALID", form.data.get("action"))
action = form.data.get("action")
if action == "modify" and self.request.user.is_staff:
instance = get_object_or_404(Participant, pk=form.data.get("pk"))
vote = SubmitForm(self.request.POST, instance=instance)
vote.save()
if action == "delete" and self.request.user.is_staff:
instance = get_object_or_404(Participant, pk=form.data.get("pk"))
instance.delete()
if action == "save":
vote = form.save(commit=False)
if vote.event.is_voting_available() or self.request.user.is_staff:
vote.save()
return super().form_valid(form)
def form_invalid(self, form):
# print("FORM INVALID", form.errors)
return super().form_invalid(form)
|
import json
import requests
from discord import RequestsWebhookAdapter, Webhook
from koapy.config import config
from koapy.utils.messaging.Messenger import Messenger
class DiscordWebhookMessenger(Messenger):
def __init__(self, url=None):
self._url = url or config.get_string(
"koapy.utils.messaging.discord.webhook_url"
)
assert self._url is not None and len(self._url) > 0
self._webhook = Webhook.from_url(self._url, adapter=RequestsWebhookAdapter())
def send_message(self, content):
return self._webhook.send(content)
class DoItYourselfDiscordWebhookMessenger(Messenger):
def __init__(self, url=None):
self._url = url or config.get_string(
"koapy.utils.messaging.discord.webhook_url"
)
assert self._url is not None and len(self._url) > 0
def send_message(self, content):
headers = {
"Content-Type": "application/json",
}
data = {
"content": content,
}
data = json.dumps(data)
response = requests.post(
self._url, headers=headers, data=data, params={"wait": "true"}
)
return response
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import functools
import cv2
import math
import numpy as np
import imageio
from glob import glob
import os
import copy
import shutil
import skimage.metrics
import pandas as pd
import pylab as plt
import fairseq.distributed_utils as du
import OpenEXR, Imath
from plyfile import PlyData, PlyElement
from fairseq.meters import StopwatchMeter
def get_rank():
try:
return du.get_rank()
except AssertionError:
return 0
def get_world_size():
try:
return du.get_world_size()
except AssertionError:
return 1
def parse_views(view_args):
output = []
try:
xx = view_args.split(':')
ids = xx[0].split(',')
for id in ids:
if '..' in id:
a, b = id.split('..')
output += list(range(int(a), int(b)))
else:
output += [int(id)]
if len(xx) > 1:
output = output[::int(xx[-1])]
except Exception as e:
raise Exception("parse view args error: {}".format(e))
return output
def get_uv(H, W, h, w):
"""
H, W: real image (intrinsics)
h, w: resized image
"""
uv = np.flip(np.mgrid[0: h, 0: w], axis=0).astype(np.float32)
uv[0] = uv[0] * float(W / w)
uv[1] = uv[1] * float(H / h)
return uv, [float(H / h), float(W / w)]
def load_exr(path, with_alpha=True):
if not OpenEXR.isOpenExrFile(path):
return None
exr = OpenEXR.InputFile(path)
hdr = exr.header()
dw = hdr['dataWindow']
ch = hdr['channels']
if not ('R' in ch and 'G' in ch and 'B' in ch):
raise ValueError('Wrong EXR data')
if with_alpha and not 'A' in ch:
raise ValueError('EXR file doesn\'t have alpha channel')
sz = (dw.max.x - dw.min.x + 1, dw.max.y - dw.min.y + 1)
tps = {Imath.PixelType.UINT: np.uint, Imath.PixelType.HALF: np.half, Imath.PixelType.FLOAT: float}
r = np.frombuffer(exr.channel('R'), dtype=tps[ch['R'].type.v])
g = np.frombuffer(exr.channel('G'), dtype=tps[ch['G'].type.v])
b = np.frombuffer(exr.channel('B'), dtype=tps[ch['B'].type.v])
if with_alpha:
a = np.frombuffer(exr.channel('A'), dtype=tps[ch['A'].type.v])
img = np.stack((r, g, b, a)).reshape(4, sz[0]*sz[1]).T
else:
img = np.stack((r, g, b, np.ones((sz[0]*sz[1])))).reshape(4, sz[0]*sz[1]).T
img = img.reshape(sz[0], sz[1], -1).astype('float32')
return img
def load_rgb(
path,
resolution=None,
with_alpha=True,
bg_color=[1.0, 1.0, 1.0],
min_rgb=-1,
interpolation='AREA',
preprocessor=None):
# Try loading EXR file
img = load_exr(path, with_alpha)
if img is not None:
interpolation = 'linear'
# If it was not EXR, try loading LDR image
if img is None:
if with_alpha:
img = imageio.imread(path) # RGB-ALPHA
else:
img = imageio.imread(path)[:, :, :3]
img = skimage.img_as_float32(img).astype('float32')
assert img is not None, 'Input image loading failed!'
# Add alpha channel if required
if with_alpha and img.shape[-1] == 3:
mask = np.ones((img.shape[0], img.shape[1], 1))
# h, w, _ = img.shape
# circle_img = np.zeros((h, w), np.uint8)
# cv2.circle(circle_img, (w // 2, h // 2), min(w, h) // 2, 1, thickness=-1)
# mask = cv2.bitwise_and(mask, mask, mask=circle_img)[...,None]
img = np.concatenate([img, mask], -1).astype('float32')
H, W, D = img.shape
h, w = resolution
uv, ratio = get_uv(H, W, h, w)
if (h < H) or (w < W):
if interpolation.lower() == 'area': intp = cv2.INTER_AREA
elif interpolation.lower() == 'nearest': intp = cv2.INTER_NEAREST
elif interpolation.lower() == 'linear': intp = cv2.INTER_LINEAR
else: raise NotImplemented('Given interpolation type \'{0}\' is not implemented'.format(interpolation))
img = cv2.resize(img, (w, h), interpolation=intp).astype('float32')
if preprocessor:
img = preprocessor.preprocess(img)
# this part is now in preprocessor with option 'nsvf'
# if min_rgb == -1: # 0, 1 --> -1, 1
# img[:, :, :3] -= 0.5
# img[:, :, :3] *= 2.
# img[...,:3] = np.interp(img[...,:3], (img[...,:3].min(), np.percentile(img[...,:3], 99.9)), (-1, 1))
# background blending
img[:, :, :3] = img[:, :, :3] * img[:, :, 3:] + np.asarray(bg_color)[None, None, :] * (1 - img[:, :, 3:])
img[:, :, 3] = img[:, :, 3] * (img[:, :, :3] != np.asarray(bg_color)[None, None, :]).any(-1)
img = img.transpose(2, 0, 1)
return img, uv, ratio
def load_depth(path, resolution=None, depth_plane=5):
if path is None:
return None
img = cv2.imread(path, cv2.IMREAD_UNCHANGED).astype(np.float32)
# ret, img = cv2.threshold(img, depth_plane, depth_plane, cv2.THRESH_TRUNC)
H, W = img.shape[:2]
h, w = resolution
if (h < H) or (w < W):
img = cv2.resize(img, (w, h), interpolation=cv2.INTER_NEAREST).astype('float32')
#img = cv2.resize(img, (w, h), interpolation=cv2.INTER_LINEAR)
if len(img.shape) ==3:
img = img[:,:,:1]
img = img.transpose(2,0,1)
else:
img = img[None,:,:]
return img
def load_mask(path, resolution=None):
if path is None:
return None
img = cv2.imread(path, cv2.IMREAD_GRAYSCALE).astype(np.float32)
h, w = resolution
H, W = img.shape[:2]
if (h < H) or (w < W):
img = cv2.resize(img, (w, h), interpolation=cv2.INTER_NEAREST).astype('float32')
img = img / (img.max() + 1e-7)
return img
def load_matrix(path):
lines = [[float(w) for w in line.strip().split()] for line in open(path)]
if len(lines[0]) == 2:
lines = lines[1:]
if len(lines[-1]) == 2:
lines = lines[:-1]
return np.array(lines).astype(np.float32)
def load_intrinsics(filepath, resized_width=None, invert_y=False):
try:
intrinsics = load_matrix(filepath)
if intrinsics.shape[0] == 3 and intrinsics.shape[1] == 3:
_intrinsics = np.zeros((4, 4), np.float32)
_intrinsics[:3, :3] = intrinsics
_intrinsics[3, 3] = 1
intrinsics = _intrinsics
if intrinsics.shape[0] == 1 and intrinsics.shape[1] == 16:
intrinsics = intrinsics.reshape(4, 4)
return intrinsics
except ValueError:
pass
# Get camera intrinsics
with open(filepath, 'r') as file:
f, cx, cy, _ = map(float, file.readline().split())
fx = f
if invert_y:
fy = -f
else:
fy = f
# Build the intrinsic matrices
full_intrinsic = np.array([[fx, 0., cx, 0.],
[0., fy, cy, 0],
[0., 0, 1, 0],
[0, 0, 0, 1]])
return full_intrinsic
def load_postprocessing_data(filepath):
postprocessing = {
'mean': None,
'std': None,
'min': None,
'max': None,
'prcntmin': None,
'prcntmax': None,
}
try:
with open(filepath, 'r') as file:
l = file.readline()
if len(l) > 0: postprocessing.update({'mean': np.fromstring(l, sep=', ')})
l = file.readline()
if len(l) > 0: postprocessing.update({'std': np.fromstring(l, sep=', ')})
l = file.readline()
if len(l) > 0: postprocessing.update({'min': np.fromstring(l, sep=', ')})
l = file.readline()
if len(l) > 0: postprocessing.update({'max': np.fromstring(l, sep=', ')})
l = file.readline()
if len(l) > 0: postprocessing.update({'prcntmin': np.fromstring(l, sep=', ')})
l = file.readline()
if len(l) > 0: postprocessing.update({'prcntmax': np.fromstring(l, sep=', ')})
except ValueError:
pass
return postprocessing
def unflatten_img(img, width=512):
sizes = img.size()
height = sizes[-1] // width
return img.reshape(*sizes[:-1], height, width)
def square_crop_img(img):
if img.shape[0] == img.shape[1]:
return img # already square
min_dim = np.amin(img.shape[:2])
center_coord = np.array(img.shape[:2]) // 2
img = img[center_coord[0] - min_dim // 2:center_coord[0] + min_dim // 2,
center_coord[1] - min_dim // 2:center_coord[1] + min_dim // 2]
return img
def sample_pixel_from_image(
num_pixel, num_sample,
mask=None, ratio=1.0,
use_bbox=False,
center_ratio=1.0,
width=512,
patch_size=1):
if patch_size > 1:
assert (num_pixel % (patch_size * patch_size) == 0) \
and (num_sample % (patch_size * patch_size) == 0), "size must match"
_num_pixel = num_pixel // (patch_size * patch_size)
_num_sample = num_sample // (patch_size * patch_size)
height = num_pixel // width
_mask = None if mask is None else \
mask.reshape(height, width).reshape(
height//patch_size, patch_size, width//patch_size, patch_size
).any(1).any(-1).reshape(-1)
_width = width // patch_size
_out = sample_pixel_from_image(_num_pixel, _num_sample, _mask, ratio, use_bbox, _width)
_x, _y = _out % _width, _out // _width
x, y = _x * patch_size, _y * patch_size
x = x[:, None, None] + np.arange(patch_size)[None, :, None]
y = y[:, None, None] + np.arange(patch_size)[None, None, :]
out = x + y * width
return out.reshape(-1)
if center_ratio < 1.0:
r = (1 - center_ratio) / 2.0
H, W = num_pixel // width, width
mask0 = np.zeros((H, W))
mask0[int(H * r): H - int(H * r), int(W * r): W - int(W * r)] = 1
mask0 = mask0.reshape(-1)
if mask is None:
mask = mask0
else:
mask = mask * mask0
if mask is not None:
mask = (mask > 0.0).astype('float32')
if (mask is None) or \
(ratio <= 0.0) or \
(mask.sum() == 0) or \
((1 - mask).sum() == 0):
return np.random.choice(num_pixel, num_sample)
if use_bbox:
mask = mask.reshape(-1, width)
x, y = np.where(mask == 1)
mask = np.zeros_like(mask)
mask[x.min(): x.max()+1, y.min(): y.max()+1] = 1.0
mask = mask.reshape(-1)
try:
probs = mask * ratio / (mask.sum()) + (1 - mask) / (num_pixel - mask.sum()) * (1 - ratio)
# x = np.random.choice(num_pixel, num_sample, True, p=probs)
return np.random.choice(num_pixel, num_sample, True, p=probs)
except Exception:
return np.random.choice(num_pixel, num_sample)
def colormap(dz):
return plt.cm.jet(dz)
# return plt.cm.viridis(dz)
# return plt.cm.gray(dz)
def recover_image(img, min_val=-1, max_val=1.0, width=512, bg=None, weight=None, raw=False, pprc=None, gamma=1.):
if raw: return img
pprc = pprc if pprc else Preprocessor()
if 'prcntmax' in pprc.preprocess_data:
max_val = torch.Tensor(pprc.preprocess_data['prcntmax'])[:img.size()[-1]]
sizes = img.size()
height = sizes[0] // width
img = img.float().to('cpu')
# if len(sizes) == 1 and (bg is not None):
# bg_mask = img.eq(bg)[:, None].type_as(img)
if bg is not None:
bg_mask = img.eq(bg).type_as(img)
img = pprc.preprocessInverse(img)
img = (img - min_val) / (max_val - min_val)
img = img ** (1. / gamma)
img = img.clamp(min=0, max=1)
if len(sizes) == 1:
img = torch.from_numpy(colormap(img.numpy())[:, :3])
if weight is not None:
weight = weight.float().to('cpu')
img = img * weight[:, None]
if bg is not None:
# img = img * (1 - bg_mask) + bg_mask
img = img * (1 - bg_mask) + bg_mask * bg
img = img[..., :3]
img = img.reshape(height, width, -1)
return img
def write_images(writer, images, updates):
for tag in images:
img = images[tag]
tag, dataform = tag.split(':')
writer.add_image(tag, img, updates, dataformats=dataform)
def compute_psnr(p, t):
"""Compute PSNR of model image predictions.
:param prediction: Return value of forward pass.
:param ground_truth: Ground truth.
:return: (psnr, ssim): tuple of floats
"""
ssim = skimage.metrics.structural_similarity(p, t, multichannel=True, data_range=1)
psnr = skimage.metrics.peak_signal_noise_ratio(p, t, data_range=1)
return ssim, psnr
def save_point_cloud(filename, xyz, rgb=None):
if rgb is None:
vertex = np.array([(xyz[k, 0], xyz[k, 1], xyz[k, 2]) for k in range(xyz.shape[0])],
dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
else:
vertex = np.array([(xyz[k, 0], xyz[k, 1], xyz[k, 2], rgb[k, 0], rgb[k, 1], rgb[k, 2]) for k in range(xyz.shape[0])],
dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')])
# PlyData([PlyElement.describe(vertex, 'vertex')], text=True).write(filename)
# from fairseq import pdb; pdb.set_trace()
PlyData([PlyElement.describe(vertex, 'vertex')]).write(open(filename, 'wb'))
class InfIndex(object):
def __init__(self, index_list, shuffle=False):
self.index_list = index_list
self.size = len(index_list)
self.shuffle = shuffle
self.reset_permutation()
def reset_permutation(self):
if self.shuffle:
self._perm = np.random.permutation(self.index_list).tolist()
else:
self._perm = copy.deepcopy(self.index_list)
def __iter__(self):
return self
def __next__(self):
if len(self._perm) == 0:
self.reset_permutation()
return self._perm.pop()
def __len__(self):
return self.size
class Timer(StopwatchMeter):
def __enter__(self):
"""Start a new timer as a context manager"""
self.start()
return self
def __exit__(self, *exc_info):
"""Stop the context manager timer"""
self.stop()
class GPUTimer(object):
def __enter__(self):
"""Start a new timer as a context manager"""
self.start = torch.cuda.Event(enable_timing=True)
self.end = torch.cuda.Event(enable_timing=True)
self.start.record()
self.sum = 0
return self
def __exit__(self, *exc_info):
"""Stop the context manager timer"""
self.end.record()
torch.cuda.synchronize()
self.sum = self.start.elapsed_time(self.end) / 1000.
class Preprocessor:
def __init__(self): self.preprocess_data = {}
def preprocess(self, img): return img
def preprocessInverse(self, img): return img
class MinMaxPreprocessor(Preprocessor):
_percentileMin = 0
_percentileMax = 99.9
def __init__(self, preprocess_data = None):
raise NotImplemented('Need to check implementation first!')
self.preprocess_data = {} if preprocess_data is None else preprocess_data
self.min = preprocess_data.get('min', None)
self.max = preprocess_data.get('max', None)
self.tmin = preprocess_data.get('tmin', -1)
self.tmax = preprocess_data.get('tmax', 1)
def preprocess(self, img):
raise NotImplemented('Should the percentiles be here? does it make any sense?')
if self.min is None or self.max is None:
rgbimg = img[..., 0:3]
rgbimg = rgbimg[rgbimg < np.percentile(rgbimg, MinMaxPreprocessor._percentileMax, axis=None)]
rgbimg = rgbimg[rgbimg > np.percentile(rgbimg, MinMaxPreprocessor._percentileMin, axis=None)]
if self.min is None:
self.min = rgbimg.min()
if self.max is None:
self.max = rgbimg.max()
img[..., 0:3] = np.interp(img[..., 0:3], (self.min, self.max), (self.tmin, self.tmax))
return img
def preprocessInverse(self, img):
if self.mean is None or self.std is None:
raise ValueError('mean or std are not calculated. call preprocess() first to create them')
img[..., 0:3] = np.interp(img[..., 0:3], (self.tmin, self.tmax), (self.min, self.max))
return img
class MSTDPreprocessor(Preprocessor):
def __init__(self, preprocess_data = None):
self.preprocess_data = {} if preprocess_data is None else preprocess_data
self.mean = preprocess_data.get('mean', None)[:3]
self.std = preprocess_data.get('std', None)[:3]
self.axis = 0 if preprocess_data.get('channelwise', None) else None
def preprocess(self, img):
mean = torch.mean if torch.is_tensor(img) else np.mean
std = torch.std if torch.is_tensor(img) else np.std
if self.mean is None or self.std is None:
rgbimg = img[..., 0:3]
if self.mean is None:
self.mean = mean(rgbimg, axis=self.axis)
if self.std is None:
self.std = std(rgbimg, axis=self.axis) + 1e-5
img[..., 0:3] = (img[..., 0:3] - self.mean) / self.std
return img
def preprocessInverse(self, img):
if self.mean is None or self.std is None:
raise ValueError('mean or std are not calculated. call preprocess() first to create them')
img[..., 0:3] = img[..., 0:3] * self.std + self.mean
return img
class LogPreprocessor(Preprocessor):
def __init__(self, preprocess_data = None):
self.preprocess_data = {} if preprocess_data is None else preprocess_data
def preprocess(self, img):
log = torch.log if torch.is_tensor(img) else np.log
img[..., 0:3] = log(img[..., 0:3] + 1.)
return img
def preprocessInverse(self, img):
exp = torch.exp if torch.is_tensor(img) else np.exp
img[..., 0:3] = exp(img[..., 0:3]) - 1
return img
class NSVFPreprocessor(Preprocessor):
# original normalization -1~1. should be used with min_color = -1!!!
def __init__(self, preprocess_data = None):
self.preprocess_data = {} if preprocess_data is None else preprocess_data
def preprocess(self, img):
img[:, :, :3] -= 0.5
img[:, :, :3] *= 2.
return img
def preprocessInverse(self, img):
return img |
class DajareModel:
__text: str
__is_dajare: bool
__score: float
__reading: str
__applied_rule: str
@property
def text(self) -> str:
return self.__text
@text.setter
def text(self, text: str):
if not isinstance(text, str):
raise TypeError('invalid type')
self.__text = text
@property
def is_dajare(self) -> bool:
return self.__is_dajare
@is_dajare.setter
def is_dajare(self, is_dajare: bool):
if not isinstance(is_dajare, bool):
raise TypeError('invalid type')
self.__is_dajare = is_dajare
@property
def score(self) -> float:
return self.__score
@score.setter
def score(self, score: float):
if not isinstance(score, float):
raise TypeError('invalid type')
if not (score >= 1.0 and score <= 5.0):
raise ValueError('score must be 1~5')
self.__score = score
@property
def reading(self) -> str:
return self.__reading
@reading.setter
def reading(self, reading: str):
if not isinstance(reading, str):
raise TypeError('invalid type')
self.__reading = reading
@property
def applied_rule(self) -> str:
return self.__applied_rule
@applied_rule.setter
def applied_rule(self, applied_rule: str):
if not isinstance(applied_rule, str):
raise TypeError('invalid type')
self.__applied_rule = applied_rule
|
from utils import CSVScraper
class VancouverPersonScraper(CSVScraper):
# https://opendata.vancouver.ca/explore/dataset/elected-officials-contact-information/
csv_url = 'https://opendata.vancouver.ca/explore/dataset/elected-officials-contact-information/download/?format=csv&timezone=America/New_York&use_labels_for_header=true&csv_separator=%3B'
many_posts_per_area = True
delimiter = ';'
corrections = {
'fax': {
'N/A': None,
},
}
|
import time
import gym.spaces
import numpy as np
import tensorflow as tf
from rlutils.replay_buffers import PyUniformReplayBuffer
from rlutils.infra.runner import TFRunner, run_func_as_main
from rlutils.tf.nn.models import EnsembleDynamicsModel
from rlutils.tf.nn.planners import RandomShooter
class PETSAgent(tf.keras.Model):
def __init__(self, obs_dim, act_dim, mlp_hidden=128, num_ensembles=5, lr=1e-3,
horizon=10, num_particles=5, num_actions=1024):
super(PETSAgent, self).__init__()
self.dynamics_model = EnsembleDynamicsModel(obs_dim=obs_dim, act_dim=act_dim, mlp_hidden=mlp_hidden,
num_ensembles=num_ensembles, lr=lr, reward_fn=None,
terminate_fn=None)
self.inference_model = self.dynamics_model.build_ts_model(horizon=horizon, num_particles=num_particles)
self.planner = RandomShooter(inference_model=self.inference_model, horizon=horizon, num_actions=num_actions)
def set_logger(self, logger):
self.logger = logger
self.dynamics_model.set_logger(logger=logger)
def log_tabular(self):
self.dynamics_model.log_tabular()
def update_model(self, data, batch_size=64, num_epochs=60, patience=None,
validation_split=0.1, shuffle=True):
self.dynamics_model.update(inputs=data, batch_size=batch_size, num_epochs=num_epochs, patience=patience,
validation_split=validation_split, shuffle=shuffle)
def act_batch(self, obs):
return self.planner.act_batch(obs)
class Runner(TFRunner):
def setup_replay_buffer(self,
replay_size):
data_spec = {
'obs': gym.spaces.Space(shape=self.env.single_observation_space.shape,
dtype=np.float32),
'act': gym.spaces.Space(shape=self.env.single_action_space.shape,
dtype=np.float32),
'next_obs': gym.spaces.Space(shape=self.env.single_observation_space.shape,
dtype=np.float32),
'rew': gym.spaces.Space(shape=None, dtype=np.float32),
'done': gym.spaces.Space(shape=None, dtype=np.float32)
}
self.replay_buffer = PyUniformReplayBuffer(data_spec=data_spec,
capacity=replay_size,
batch_size=None,
num_parallel_env=self.num_parallel_env)
def setup_agent(self, mlp_hidden=128, num_ensembles=5, lr=1e-3, horizon=10, num_particles=5, num_actions=1024):
obs_dim = self.env.single_observation_space.shape[0]
act_dim = self.env.single_action_space.shape[0]
self.agent = PETSAgent(obs_dim=obs_dim, act_dim=act_dim,
mlp_hidden=mlp_hidden,
num_ensembles=num_ensembles,
lr=lr, horizon=horizon,
num_particles=num_particles,
num_actions=num_actions)
self.agent.set_logger(self.logger)
def setup_extra(self,
start_steps,
batch_size,
num_model_epochs,
patience,
validation_split):
self.start_steps = start_steps
self.batch_size = batch_size
self.num_model_epochs = num_model_epochs
self.patience = patience
self.validation_split = validation_split
def run_one_step(self, t):
global_env_steps = self.global_step * self.num_parallel_env
if global_env_steps >= self.start_steps:
a = self.agent.act_batch(self.o).numpy()
assert not np.any(np.isnan(a)), f'NAN action: {a}'
else:
a = self.env.action_space.sample()
# Step the env
o2, r, d, _ = self.env.step(a)
self.ep_ret += r
self.ep_len += 1
# Ignore the "done" signal if it comes from hitting the time
# horizon (that is, when it's an artificial terminal signal
# that isn't based on the agent's state)
true_d = np.logical_and(d, self.ep_len != self.max_ep_len)
# Store experience to replay buffer
self.replay_buffer.add(data={
'obs': self.o,
'act': a,
'rew': r,
'next_obs': o2,
'done': true_d
})
# Super critical, easy to overlook step: make sure to update
# most recent observation!
self.o = o2
# End of trajectory handling
if np.any(d):
self.logger.store(EpRet=self.ep_ret[d], EpLen=self.ep_len[d])
self.ep_ret[d] = 0
self.ep_len[d] = 0
self.o = self.env.reset_done()
def on_epoch_end(self, epoch):
# update the model
data = self.replay_buffer.get()
self.agent.update_model(data=data, batch_size=self.batch_size, num_epochs=self.num_model_epochs,
patience=self.patience, validation_split=self.validation_split, shuffle=True)
# Log info about epoch
self.logger.log_tabular('Epoch', epoch)
self.logger.log_tabular('EpRet', with_min_and_max=True)
self.logger.log_tabular('EpLen', average_only=True)
self.logger.log_tabular('TotalEnvInteracts', self.global_step * self.num_parallel_env)
self.agent.log_tabular()
self.logger.log_tabular('Time', time.time() - self.start_time)
self.logger.dump_tabular()
def on_train_begin(self):
self.start_time = time.time()
self.o = self.env.reset()
self.ep_ret = np.zeros(shape=self.num_parallel_env)
self.ep_len = np.zeros(shape=self.num_parallel_env, dtype=np.int64)
@classmethod
def main(cls,
env_name,
steps_per_epoch=400,
epochs=200,
start_steps=2000,
num_parallel_env=2,
seed=1,
# sac args
mlp_hidden=256,
num_ensembles=3,
learning_rate=1e-3,
horizon=10,
num_particles=5,
num_actions=1024,
batch_size=256,
num_model_epochs=60,
patience=10,
validation_split=0.1,
# replay
replay_size=int(1e6),
logger_path: str = None
):
config = locals()
runner = cls(seed=seed, steps_per_epoch=steps_per_epoch // num_parallel_env, epochs=epochs,
exp_name=None, logger_path=logger_path)
runner.setup_env(env_name=env_name, num_parallel_env=num_parallel_env, frame_stack=None, wrappers=None,
asynchronous=False, num_test_episodes=None)
runner.setup_logger(config=config)
runner.setup_agent(mlp_hidden=mlp_hidden, num_ensembles=num_ensembles, lr=learning_rate,
horizon=horizon, num_particles=num_particles, num_actions=num_actions)
runner.setup_extra(start_steps=start_steps,
batch_size=batch_size,
num_model_epochs=num_model_epochs,
patience=patience,
validation_split=validation_split)
runner.setup_replay_buffer(replay_size=replay_size)
runner.run()
if __name__ == '__main__':
run_func_as_main(Runner.main)
|
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
ans = 0
dic = {}
i = 0
for j in range(len(s)):
i = max(dic.get(s[j], 0), i)
ans = max(ans, j - i + 1)
dic[s[j]] = j + 1
return ans |
from context import vfdb_to_seqfindr
|
from dataclasses import dataclass
@dataclass()
class ExperimentPaths(object):
pose_resnet_weights_path: str
pose_2d_coco_only_weights_path: str
pedrec_2d_path: str
pedrec_2d_h36m_path: str
pedrec_2d_sim_path: str
pedrec_2d_c_path: str
pedrec_2d_h36m_sim_path: str
pedrec_2d3d_h36m_path: str
pedrec_2d3d_sim_path: str
pedrec_2d3d_h36m_sim_path: str
pedrec_2d3d_c_h36m_path: str
pedrec_2d3d_c_sim_path: str
pedrec_2d3d_c_h36m_sim_path: str
pedrec_2d3d_c_o_h36m_mebow_path: str
pedrec_2d3d_c_o_sim_path: str
pedrec_2d3d_c_o_h36m_sim_path: str
pedrec_2d3d_c_o_h36m_sim_mebow_path: str
pedrec_full_path: str
output_dir: str
coco_dir: str
tud_dir: str
sim_train_dir: str
sim_val_dir: str
h36m_train_dir: str
h36m_val_dir: str
sim_c01_dir: str
sim_c01_filename: str
sim_c01_results_filename: str
sim_c01_val_dir: str
sim_c01_val_filename: str
sim_c01_val_results_filename: str
pretrained_model_path: str = None
sim_train_filename: str = "rt_rom_01b.pkl"
sim_val_filename: str = "rt_validate_3d.pkl"
h36m_val_filename: str = "h36m_val_pedrec.pkl"
h36m_train_filename: str = "h36m_train_pedrec.pkl"
|
import random
import time
import orco
@orco.builder()
def do_something(config):
time.sleep(0.3) # Simulate computation
return random.randint(0, 10)
@orco.builder()
def make_experiment(config):
data = [do_something(x) for x in range(config["difficulty"])]
yield
time.sleep(config["difficulty"]) # Simulate computation
return sum(entry.value for entry in data)
orco.run_cli()
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from nipy.testing import *
from nipy.core.reference.slices import bounding_box, \
zslice, yslice, xslice
from nipy.core.reference.coordinate_map import AffineTransform
# Names for a 3D axis set
names = ['xspace','yspace','zspace']
class test_Slice(TestCase):
def test_bounding_box(self):
shape = (10, 14, 16)
coordmap = AffineTransform.identity(names)
#print coordmap.affine.dtype, 'affine'
self.assertEqual(bounding_box(coordmap, shape), ([0., 9.], [0, 13], [0, 15]))
def test_box_slice():
t = xslice(5, ([0, 9], 10), ([0, 9], 10))
yield assert_almost_equal,t.affine, [[ 0., 0., 5.],
[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]]
t = yslice(4, ([0, 9], 10), ([0, 9], 10))
yield assert_almost_equal, t.affine, [[ 1., 0., 0.],
[ 0., 0., 4.],
[ 0., 1., 0.],
[ 0., 0., 1.]]
t = zslice(3, ([0, 9], 10), ([0, 9], 10))
yield assert_almost_equal, t.affine, [[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 3.],
[ 0., 0., 1.]]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from fvcore.nn.focal_loss import sigmoid_focal_loss
from detectron2.layers.wrappers import cat
def _rezise_targets(targets, height, width):
"""
Resize the targets to the same size of logits (add height and width channels)
"""
targets = torch.stack(targets) # convert list of scalar tensors to single tensor
targets = targets.view(
targets.size(0), targets.size(1), 1, 1, # add two dummy spatial dims
)
targets = targets.expand(
targets.size(0), targets.size(1), height, width # expand to spatial size of x
)
return targets
class ImageFDALoss(nn.Module):
def __init__(self, loss_lambda, loss_type='cross_entropy', focal_gamma=2):
super().__init__()
self.loss_lambda = loss_lambda
self.loss_type = loss_type
self.focal_gamma = focal_gamma
def forward(self, img_fda_logits, targets):
losses = []
for logits in img_fda_logits:
targets_resized = _rezise_targets(targets, logits.size(-2), logits.size(-1))
if self.loss_type == "cross_entropy":
loss = F.binary_cross_entropy_with_logits(
logits, targets_resized, reduction='mean',
)
elif self.loss_type == "l2":
scores = torch.sigmoid(logits)
loss = torch.norm(scores - targets_resized, p=2, dim=1).mean()
elif self.loss_type == "focal":
loss = sigmoid_focal_loss(
logits, targets_resized, gamma=self.focal_gamma, reduction="mean"
)
else:
raise ValueError(f"Unsupported loss type \"{self.loss_type}\"")
losses.append(loss)
return sum(losses) / (len(losses) + 1e-8) * self.loss_lambda
class InstanceFDALoss(nn.Module):
def __init__(self, loss_lambda, loss_type='cross_entropy', focal_gamma=2):
super().__init__()
self.loss_lambda = loss_lambda
self.loss_type = loss_type
self.focal_gamma = focal_gamma
def forward(self, instance_fda_logits, instances):
gt_domains = []
for instances_per_img in instances:
if len(instances_per_img) > 0 and hasattr(instances_per_img, "gt_domains"):
gt_domains_per_img = instances_per_img.gt_domains.unsqueeze(-1)
gt_domains.append(gt_domains_per_img)
# Sanity check: All instances in an image should have the same domain label
assert gt_domains_per_img.unique().numel() == 1
# if there is no ground truth, there is no loss to compute
if len(gt_domains) == 0 or instance_fda_logits.shape != cat(gt_domains, dim=0).shape:
return instance_fda_logits.sum() * 0
if self.loss_type == "cross_entropy":
loss = F.binary_cross_entropy_with_logits(
instance_fda_logits, cat(gt_domains, dim=0), reduction='mean',
)
elif self.loss_type == "focal":
loss = sigmoid_focal_loss(
instance_fda_logits, cat(gt_domains, dim=0), gamma=self.focal_gamma, reduction="mean"
)
else:
raise ValueError(f"Unsupported loss type \"{self.loss_type}\"")
return loss * self.loss_lambda
|
from conans import ConanFile, CMake, tools
import os, platform
eastl_version = os.getenv('EASTL_VERSION', '0.0')
eastl_commit = os.getenv('EASTL_COMMIT', '')
class EASTLConan(ConanFile):
name = "eastl"
license = "MIT"
url = "https://github.com/BentouDev/conan-eastl"
version = eastl_version
commit = eastl_commit
description = "EASTL stands for Electronic Arts Standard Template Library. It is an extensive and robust implementation that has an emphasis on high performance."
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
exports_sources = ["eastl-source/*"]
options = {}
default_options = {}
def source(self):
if platform.system() != "Windows":
return
# This small hack might be useful to guarantee proper /MT /MD linkage in MSVC
# if the packaged project doesn't have variables to set it properly
print (' [*] Injecting conanbuildinfo.cmake...')
tools.replace_in_file("%s/CMakeLists.txt" % ("eastl-source"), "project(EASTL CXX)",
"""project(EASTL CXX)
include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
conan_basic_setup()
add_definitions(-DEASTL_EABASE_DISABLED)""")
def build(self):
# Workaround for conan choosing cmake embedded in Visual Studio
if platform.system() == "Windows" and 'AZURE' in os.environ:
cmake_path = '"C:\\Program Files\\CMake\\bin\\cmake.exe"'
print (' [DEBUG] Forcing CMake : ' + cmake_path)
os.environ['CONAN_CMAKE_PROGRAM'] = cmake_path
cmake = CMake(self)
#cmake.definitions['CMAKE_CXX_COMPILER_ID'] = 'gcc'#self.settings.compiler
#cmake.definitions['CMAKE_CC_COMPILER'] = self.settings.compiler
#cmake.definitions['CMAKE_CC_COMPILER_VERSION'] = self.settings.compiler.version
#cmake.definitions['CMAKE_CXX_COMPILER_VERSION'] = self.settings.compiler.version
#cmake.definitions['EASTL_VERSION'] = self.version
#cmake.definitions['EASTL_COMMIT'] = self.commit
#cmake.definitions['EASTL_CHANNEL'] = self.channel
cmake.definitions['EASTL_BUILD_TESTS'] = True
cmake.configure(source_folder="eastl-source")
cmake.build()
def package(self):
self.copy("*.h", src="eastl-source/test/packages/EABase/include/Common/EABase", dst="include/EABase", keep_path=True)
self.copy("*.h", src="eastl-source/include", dst="include", keep_path=True)
self.copy("*.natvis", src="eastl-source/doc", dst="lib", keep_path=False)
self.copy("*.lib", dst="lib", keep_path=False)
self.copy("*.pdb", dst="lib", keep_path=False)
self.copy("*.a", dst="lib", keep_path=False)
def package_info(self):
self.cpp_info.libs = [ self.name ]
# self.cpp_info.defines = ["EASTL_EABASE_DISABLED"]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Author: Daniel E. Cook
"""
import os
import gunicorn # Do not remove this line - this is here so pipreqs imports
import click
from click import secho
from base.utils.gcloud import get_item
from base.utils.data_utils import zipdir
from base.database import (initialize_sqlite_database,
download_sqlite_database)
from base import constants
from subprocess import Popen, PIPE
# Do not remove gunicorn import
secho(f"gunicorn {gunicorn.SERVER_SOFTWARE}", fg="green")
@click.command(help="Initialize the database")
@click.argument("wormbase_version", default=constants.WORMBASE_VERSION)
def initdb(wormbase_version=constants.WORMBASE_VERSION):
initialize_sqlite_database(wormbase_version)
@click.command(help="Updates the strain table of the database")
@click.argument("wormbase_version", default=constants.WORMBASE_VERSION)
def update_strains(wormbase_version):
initialize_sqlite_database(wormbase_version, strain_only=True)
@click.command(help="Download the database (used in docker container)")
def download_db():
# Downloads the latest SQLITE database
download_sqlite_database()
@click.command(help="Update credentials")
def update_credentials():
"""
Update the credentials zip file
"""
from base.application import create_app
app = create_app()
app.app_context().push()
click.secho("Zipping env_config", fg='green')
zipdir('env_config/', 'env_config.zip')
zip_creds = get_item('credential', 'travis-ci-cred')
click.secho("Encrypting credentials", fg='green')
if os.path.exists("env_config.zip.enc"):
os.remove("env_config.zip.enc")
comm = ['travis',
'encrypt-file',
'env_config.zip',
"--org",
'--key',
zip_creds['key'],
'--iv',
zip_creds['iv']]
print(' '.join(comm))
out, err = Popen(comm, stdout=PIPE, stderr=PIPE).communicate()
secho(str(out, 'utf-8'), fg='green')
if err:
exit(secho(str(err, 'utf-8'), fg='red'))
os.remove("env_config.zip")
@click.command(help="Decrypt credentials")
def decrypt_credentials():
from base.application import create_app
app = create_app()
app.app_context().push()
click.secho("Decrypting env_config.zip.enc", fg='green')
zip_creds = get_item('credential', 'travis-ci-cred')
comm = ['travis',
'encrypt-file',
'env_config.zip.enc',
'--force',
'--key',
zip_creds['key'],
'--iv',
zip_creds['iv'],
'--decrypt']
out, err = Popen(comm, stdout=PIPE, stderr=PIPE).communicate()
click.secho(str(out, 'utf-8'), fg='green')
if err:
exit(secho(str(err, 'utf-8'), fg='red'))
click.secho("Unzipping env_config.zip", fg='green')
comm = ['unzip', '-qo', 'env_config.zip']
out, err = Popen(comm, stdout=PIPE, stderr=PIPE).communicate()
click.secho(str(out, 'utf-8'), fg='green')
if err:
exit(secho(str(err, 'utf-8'), fg='red'))
os.remove("env_config.zip")
|
def get_expected(placed, lowest, override):
partial = [p for p in placed if p <= lowest]
if override > 0:
partial = partial[:-override]
only_mine = 37 - len(placed)
lowest_cnt = only_mine + len(partial)
if lowest_cnt == 0: return 0
ret = 0.0
ret += lowest * 36.0 * only_mine / float(lowest_cnt)
for p in partial:
ret += 36.0 * (lowest - p) * 1.0 / lowest_cnt
return ret
def _main():
infile = open("codejam/test_files/Y13R5P1/A.in")
cases = int(infile.readline())
for cc in xrange(cases):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [p-1 for p in placed] + [p+1 for p in placed]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0: continue
needed_budget = (37 - len(placed)) * lowest
for p in placed: needed_budget += max(0, lowest - p)
if budget < needed_budget: continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0: continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1,
remaining_budget / lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if (lowest + can_replicate) not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if (lowest + can_replicate - 1) not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial)+1):
# import pdb; pdb.set_trace()
cand = get_expected(placed, lowest, exclude) - exclude - needed_budget
# print ('lowest = %d required_budget = %d exclude = %d ret = %.4lf' %
# (lowest, needed_budget, exclude, cand))
ret = max(ret, cand)
print 'Case #%d: %.10lf' % (cc + 1, ret)
infile.close()
if __name__ == "__main__":
_main()
|
# coding=utf-8
# Copyright 2021 The init2winit Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for datasets.py."""
from absl.testing import absltest
from absl.testing import parameterized
from init2winit.dataset_lib import data_utils
import numpy as np
desired_batch_size = 23
batch_axes = [0, 0, 3, 2]
test_names = ['default', 'NHWC', 'HWCN', 'HWNC']
image_formats = [None, 'NHWC', 'HWCN', 'HWNC']
batch_size = 13
width = 11
num_channels = 3
input_shapes = [
(batch_size, width, width, num_channels),
(batch_size, width, width, num_channels),
(width, width, num_channels, batch_size),
(width, width, batch_size, num_channels),
]
test_parameters = zip(test_names, image_formats, batch_axes, input_shapes)
class DataUtilsTest(parameterized.TestCase):
"""Unit tests for datasets.py."""
@parameterized.named_parameters(*test_parameters)
def test_padding(self, image_format, batch_axis, input_shape):
"""Test that the shape is the expected padded shape."""
batch = {'inputs': np.ones(input_shape)}
padded_batch = data_utils.maybe_pad_batch(
batch, desired_batch_size, image_format)
expected_shapes = list(input_shape)
expected_shapes[batch_axis] = desired_batch_size
self.assertEqual(padded_batch['inputs'].shape, tuple(expected_shapes))
self.assertEqual(padded_batch['weights'].shape, (desired_batch_size,))
def test_padding_seq2seq(self):
"""Test padding for sequence-to-sequence models."""
input_len_max = 25
input_len_true = 22 # true input_seq_length for each example in batch.
target_len_max = 25
target_len_true = 21 # true target_seq_length for each example in batch.
inputs_shape = (batch_size, input_len_max)
targets_shape = (batch_size, target_len_max)
batch = {'inputs': np.ones(inputs_shape), 'targets': np.ones(targets_shape)}
batch['inputs'][:, input_len_true:] = 0 # zero-pad extra inputs tokens
batch['targets'][:, target_len_true:] = 0 # zero-pad extra targets tokens
expected_inputs_shape = (desired_batch_size, input_len_max)
expected_targets_shape = (desired_batch_size, target_len_max)
expected_weights_shape = (desired_batch_size, target_len_max)
padded_batch = data_utils.maybe_pad_batch(
batch, desired_batch_size, data_format=None, mask_key='targets')
self.assertEqual(padded_batch['inputs'].shape, expected_inputs_shape)
self.assertEqual(padded_batch['targets'].shape, expected_targets_shape)
self.assertEqual(padded_batch['weights'].shape, expected_weights_shape)
batch_pad = desired_batch_size - batch_size
expected_weights_array = np.ones((desired_batch_size, target_len_max))
# pad at batch axis
expected_weights_array[-batch_pad:] = 0
# # pad at sequence_len axis
expected_weights_array[:, target_len_true:] = 0
self.assertTrue(
np.array_equal(padded_batch['weights'], expected_weights_array))
if __name__ == '__main__':
absltest.main()
|
# coding=utf-8
# Copyright 2022 The Robustness Metrics Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Reports to compute robustness scores of machine learning models."""
from typing import Text, Type
from robustness_metrics.reports import base
from robustness_metrics.reports import cifar_variants
from robustness_metrics.reports import imagenet_variants
from robustness_metrics.reports import ood_detection
from robustness_metrics.reports import synthetic_variants
def get(report_spec) -> base.Report:
"""Load the report registered under the different name.
Args:
report_spec: A specification of the report to be constructed.
Returns:
A report constructed using the given spec.
"""
return base.registry.get_instance(report_spec)
|
"""
Tests all snippets in the docs and readme like this:
```yaml
rules:
```
To exclude, use shorthand `yml`.
"""
import re
import fs
from schema import SchemaError
from organize.actions import ACTIONS
from organize.config import CONFIG_SCHEMA, load_from_string
from organize.filters import FILTERS
RE_CONFIG = re.compile(r"```yaml\n(?P<config>rules:(?:.*?\n)+?)```", re.MULTILINE)
def test_examples_are_valid():
docdir = fs.open_fs(".")
for f in docdir.walk.files(filter=["*.md"], max_depth=2):
text = docdir.readtext(f)
for match in RE_CONFIG.findall(text):
err = ""
try:
config = load_from_string(match)
CONFIG_SCHEMA.validate(config)
except SchemaError as e:
print(f"{f}:\n({match})")
err = e.autos[-1]
assert not err
def test_all_filters_documented():
docdir = fs.open_fs("docs")
filter_docs = docdir.readtext("filters.md")
for name in FILTERS.keys():
assert "## {}".format(name) in filter_docs
def test_all_actions_documented():
docdir = fs.open_fs("docs")
action_docs = docdir.readtext("actions.md")
for name in ACTIONS.keys():
assert "## {}".format(name) in action_docs
|
from .forms import ContactForm
from django.contrib import messages
from django.conf import settings
from django.shortcuts import reverse, redirect, render
from post_office import mail
def index(request):
context = {}
return render(request, 'DSSCDB/index.html', context)
def contact_us(request):
form_data = ContactForm(request.POST or None)
if form_data.is_valid():
messages.add_message(request, messages.SUCCESS, 'The message has been received. Thanks for contacting us!',
extra_tags="Received!")
_, mail_to = zip(*settings.ADMINS)
mail.send(
sender=settings.DEFAULT_FROM_MAIL,
recipients=list(mail_to),
template='contact_form_email',
context={'message': form_data.cleaned_data.get('content'),
'contact_name': form_data.cleaned_data.get('contact_name'),
'contact_email': form_data.cleaned_data.get('contact_email')
},
)
return redirect(reverse('index'))
else:
return render(request, 'DSSCDB/contact-us.html', {
'contact_form': form_data,
})
|
def add(x: int, y: int) -> int:
"""Take two integers and returns the sum of them."""
return x + y
|
from lib import action
class ConsulAclCreateAction(action.ConsulBaseAction):
def run(self, name=None, acl_type='client', rules=None):
rules = rules.encode('ascii','ignore')
acl_id = self.consul.acl.create(name=name, type=acl_type, rules=rules)
return acl_id
|
from Assistant.exts.networks import internetConnection
from Assistant.utils.exceptions import InternetException
if internetConnection() is False:
raise InternetException(
"Alice works with INTERNET, Please get connected with INTERNET."
)
import threading
from os import startfile, path
import sys
import Assistant
from Assistant import alice # noqa
from Assistant.exts.workWithFiles import DailyWorksExel
from Assistant.constants import Client
__authors__ = ("Abhinav", "Brodevil") # Both are the same person lol
tasks = DailyWorksExel(path.join(Client.ALICE_PATH, "DailyWorks.xlsx"))
DailyTasks = threading.Thread(target=alice.dailyTaskReminder, args=(tasks,))
# Running part of the Alice Program
if __name__ == "__main__":
"""
The Alice just suppose to take the Voice from the user and convert the voice into text
Then by word to word matching and checking the queary
Then the tasks or works get executed as per the queary!
"""
if Client.ALICE_PASSWORD is not None:
password = str()
alice.speak(
"Alice is password Protected, Kindly Please type the Password To Access Alice!"
)
while password != Client.ALICE_PASSWORD:
alice.speak(
"Incorrect Password, Access not Granted! Please Try Again."
) if password != "" else None
password = input("Enter the password of Alice : \t")
else:
alice.speak("Access Granted.")
if len(sys.argv) <= 1:
alice.intro() # Introduction of Alice
DailyTasks.start() # daily task reminding will start here using Multiprocessing
# The program will be going to run on Infinite loop
while True:
queary = alice.takeCommand().lower()
if "sleep" in queary or "break" in queary or "rest" in queary:
alice.speak(
f"Okay {Client.GENDER}! I am going for sleep, Call me any time for any help!"
)
startfile(
path.join(
Client.ALICE_PATH, "Assistant//resources//images//Ribbons.scr"
)
)
while "wake up" not in queary and "back to work" not in queary:
queary = alice.takeCommand()
else:
alice.speak(
f"{alice.goodWish} {Client.GENDER}! How May I can help you!"
)
# Logic of Program
if (
queary != "none"
and "skip this one" not in queary
or "leave this one"
or "leave that one"
):
Assistant.logic(queary)
|
from lxml import etree
from parser import Parser
from utils import tidy_dict
'''
strip out some identified set of elements for the
triplestore/ontology definitions
strip out the rest of the text with associated, namespaced xpath
just in case?
'''
class BaseReader():
'''
parameters:
_service_descriptors: dict containing the "generic" key
and the xpath for that element in the specific xml
structure, ie abstract: idinfo/descript/abstract
'''
_service_descriptors = {}
def __init__(self, response, url):
self._response = response
self._url = url
self._load_xml()
def _load_xml(self):
self.parser = Parser(self._response)
def _remap_http_method(self, original_method):
'''
return the "full" http method from some input
'''
definition = {
"HTTP GET": ['get'],
"HTTP POST": ['post']
}
for k, v in definition.iteritems():
if original_method.lower() in v:
return k
return original_method
def return_service_descriptors(self):
'''
basic service information
title
abtract
note: what to do about keywords (thesaurus + list + type)?
keywords
'''
service_elements = {}
for k, v in self._service_descriptors.iteritems():
# v can be a list of possible xpaths where we want
# to keep all returned values from any xpath within
elems = []
xpaths = v if isinstance(v, list) else [v]
for xp in xpaths:
elems += self.parser.find(xp)
if elems:
# return everything as a list for the triplestore
service_elements[k] = [e.text if isinstance(e, etree._Element) else e for e in elems] if len(elems) > 1 \
else ([elems[0].text] if isinstance(elems[0], etree._Element) else elems)
endpoints = self.parse_endpoints()
if endpoints:
service_elements['endpoints'] = endpoints
return service_elements
def return_dataset_descriptors(self):
'''
no generic handling for this unfortunately.
'''
pass
def return_metadata_descriptors(self):
'''
no generic handling for this unfortunately.
'''
pass
def return_everything_else(self, excluded_elements):
'''
return any text value/attribute that wasn't extracted
for the main service definition or endpoint definition
or any ontology-related need
'''
return self.parser.find_nodes(excluded_elements)
def parse_service(self):
'''
main service parsing method: pull all defined elements,
pull anything else text/attribute related
returns:
dict {service: 'anything ontology-driven', remainder: 'any other text/attribute value'}
'''
service = {
"service": self.return_service_descriptors(),
"dataset": self.return_dataset_descriptors(),
"metadata": self.return_metadata_descriptors()
}
excluded = self.return_exclude_descriptors()
service['remainder'] = self.return_everything_else(excluded)
self.service = tidy_dict(service)
return self.service
def return_exclude_descriptors(self):
'''
return a list of fully qualified xpaths used for the service description,
endpoint description, etc, to flag those as "excluded" from the
rest of the xml parsing
note:
this might have certain nested structures depending on the service
'''
return []
def parse_endpoints(self):
return []
|
from django.urls import path
from . import views
urlpatterns = [
path("", views.IndexView.as_view(), name="index"),
path("login", views.LoginView.as_view(), name="login"),
path("logout", views.LogoutView.as_view(), name="logout"),
path("register", views.RegisterView.as_view(), name="register"),
# path("register", views.register, name="register"),
path("following", views.FollowingView.as_view(), name="following"),
path("edit", views.EditView.as_view(), name="edit"),
path("<str:profile_name>", views.ProfileView.as_view(), name="profile"),
]
|
"""summary cope survey fields
Revision ID: 16452fdb1334
Revises: c0394a487b8b
Create Date: 2020-05-12 14:02:39.592019
"""
from alembic import op
import sqlalchemy as sa
import rdr_service.model.utils
from sqlalchemy.dialects import mysql
from rdr_service.participant_enums import PhysicalMeasurementsStatus, QuestionnaireStatus, OrderStatus
from rdr_service.participant_enums import WithdrawalStatus, WithdrawalReason, SuspensionStatus, QuestionnaireDefinitionStatus
from rdr_service.participant_enums import EnrollmentStatus, Race, SampleStatus, OrganizationType, BiobankOrderStatus
from rdr_service.participant_enums import OrderShipmentTrackingStatus, OrderShipmentStatus
from rdr_service.participant_enums import MetricSetType, MetricsKey, GenderIdentity
from rdr_service.model.base import add_table_history_table, drop_table_history_table
from rdr_service.model.code import CodeType
from rdr_service.model.site_enums import SiteStatus, EnrollingStatus, DigitalSchedulingStatus, ObsoleteStatus
# revision identifiers, used by Alembic.
revision = '16452fdb1334'
down_revision = 'c0394a487b8b'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('participant_summary', sa.Column('questionnaire_on_cope_july', rdr_service.model.utils.Enum(QuestionnaireStatus), nullable=True))
op.add_column('participant_summary', sa.Column('questionnaire_on_cope_july_authored', rdr_service.model.utils.UTCDateTime(), nullable=True))
op.add_column('participant_summary', sa.Column('questionnaire_on_cope_july_time', rdr_service.model.utils.UTCDateTime(), nullable=True))
op.add_column('participant_summary', sa.Column('questionnaire_on_cope_june', rdr_service.model.utils.Enum(QuestionnaireStatus), nullable=True))
op.add_column('participant_summary', sa.Column('questionnaire_on_cope_june_authored', rdr_service.model.utils.UTCDateTime(), nullable=True))
op.add_column('participant_summary', sa.Column('questionnaire_on_cope_june_time', rdr_service.model.utils.UTCDateTime(), nullable=True))
op.add_column('participant_summary', sa.Column('questionnaire_on_cope_may', rdr_service.model.utils.Enum(QuestionnaireStatus), nullable=True))
op.add_column('participant_summary', sa.Column('questionnaire_on_cope_may_authored', rdr_service.model.utils.UTCDateTime(), nullable=True))
op.add_column('participant_summary', sa.Column('questionnaire_on_cope_may_time', rdr_service.model.utils.UTCDateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('participant_summary', 'questionnaire_on_cope_may_time')
op.drop_column('participant_summary', 'questionnaire_on_cope_may_authored')
op.drop_column('participant_summary', 'questionnaire_on_cope_may')
op.drop_column('participant_summary', 'questionnaire_on_cope_june_time')
op.drop_column('participant_summary', 'questionnaire_on_cope_june_authored')
op.drop_column('participant_summary', 'questionnaire_on_cope_june')
op.drop_column('participant_summary', 'questionnaire_on_cope_july_time')
op.drop_column('participant_summary', 'questionnaire_on_cope_july_authored')
op.drop_column('participant_summary', 'questionnaire_on_cope_july')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
import setuptools
from setuptools import setup, find_packages
import os
path = os.path.dirname(__file__)
with open(path + "/README.src.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='jittor',
version='1.0.0',
# scripts=[],
author="Jittor Group",
author_email="[email protected]",
description="a Just-in-time(JIT) deep learning framework",
long_description=long_description,
long_description_content_type="text/markdown",
url="http://jittor.org",
# packages=setuptools.find_packages(),
python_requires='>=3.7',
packages=["jittor", "jittor.test", "jittor.models", "jittor.utils", "jittor_utils"],
package_dir={'':path+'/python'},
package_data={'': ['*', '*/*', '*/*/*','*/*/*/*','*/*/*/*/*','*/*/*/*/*/*']},
# include_package_data=True,
install_requires=[
"pybind11",
"numpy",
"tqdm",
"pillow",
"astunparse",
],
) |
from typing import Optional
from pydantic import BaseSettings
class EnvSettings(BaseSettings):
SQL_DSN: Optional[str] = "sqlite:///db.sqlite3"
class Config:
env_file = ".env"
env_file_encoding = "utf-8"
|
from django.utils import translation
class PybbMiddleware(object):
def process_request(self, request):
if request.user.is_authenticated():
profile = request.user.pybb_profile
language = translation.get_language_from_request(request)
if not profile.language:
profile.language = language
profile.save()
if profile.language and profile.language != language:
request.session['django_language'] = profile.language
translation.activate(profile.language)
request.LANGUAGE_CODE = translation.get_language()
|
import simtk.unit as u
from simtk.openmm import app
import simtk.openmm as mm
from openmoltools import gafftools, system_checker
ligand_name = "sustiva"
ligand_path = "./chemicals/%s/" % ligand_name
temperature = 300 * u.kelvin
friction = 0.3 / u.picosecond
timestep = 0.1 * u.femtosecond
prmtop = app.AmberPrmtopFile("%s/%s.prmtop" % (ligand_path, ligand_name))
inpcrt = app.AmberInpcrdFile("%s/%s.inpcrd" % (ligand_path, ligand_name))
system_prm = prmtop.createSystem(nonbondedMethod=app.NoCutoff, nonbondedCutoff=1.0*u.nanometers, constraints=None)
mol2 = gafftools.Mol2Parser("%s/%s.mol2" % (ligand_path, ligand_name))
top, xyz = mol2.to_openmm()
forcefield = app.ForceField("%s/%s.xml" % (ligand_path, ligand_name))
system_xml = forcefield.createSystem(top, nonbondedMethod=app.NoCutoff, nonbondedCutoff=1.0*u.nanometers, constraints=None)
integrator_xml = mm.LangevinIntegrator(temperature, friction, timestep)
simulation_xml = app.Simulation(top, system_xml, integrator_xml)
simulation_xml.context.setPositions(xyz)
integrator_prm = mm.LangevinIntegrator(temperature, friction, timestep)
simulation_prm = app.Simulation(prmtop.topology, system_prm, integrator_prm)
simulation_prm.context.setPositions(xyz)
checker = system_checker.SystemChecker(simulation_xml, simulation_prm)
checker.check_force_parameters()
energy0, energy1 = checker.check_energies()
abs((energy0 - energy1) / u.kilojoules_per_mole)
|
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from recipe import views
# defaultRouter will generate routes for our views(recipe.views)
router = DefaultRouter()
router.register('tags', views.TagViewSet)
router.register('ingredients', views.IngredientViewSet)
# for reverse() to look up url
app_name = 'recipe'
urlpatterns = [
path('', include(router.urls))
]
|
import os
import torch
from autoattack import AutoAttack
from torch.utils import data
from lens.models.robust_cnn_classifier import RobustCNNClassifier
from lens.utils.datasets import SingleLabelDataset
class AdversarialAttackError(BaseException):
pass
class AdversarialAttackNotPerformedError(AdversarialAttackError):
pass
class AdversarialAttackNotConsistentError(AdversarialAttackError):
pass
def check_l2_adversarial_data_consistency(x_adv, x_test, epsilon):
res = ((x_adv - x_test) ** 2).view(x_test.shape[0], -1).sum(-1).sqrt()
if (res.max().item() - epsilon) / epsilon > 0.01:
print(f"There was a problem in loading adv dataset, maximum perturbation {res.max().item()} exceeded "
f"epsilon {epsilon}, by {(res.max().item() - epsilon) / epsilon}")
raise AdversarialAttackNotConsistentError()
else:
print("Loaded adversarial data consistent")
def create_single_label_dataset(dataset: torch.utils.data.Dataset, main_classes: range, batch_size: int = 256):
test_loader = data.DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=0)
x_test, y_test = [], []
for (x, y) in test_loader:
x_test.append(x), y_test.append(y)
x_test, y_test = torch.cat(x_test, 0), torch.cat(y_test)
y_test = y_test[:, main_classes].argmax(dim=1)
single_label_dataset = SingleLabelDataset(x_test, y_test)
return x_test, y_test, single_label_dataset
def single_label_evaluate(model: RobustCNNClassifier, dataset: SingleLabelDataset, main_classes: range,
reject: bool = False, adv: bool = False, batch_size: int = 128,
device: torch.device = torch.device("cpu")):
model.eval() # ALWAYS REMEMBER TO SET EVAL WHEN EVALUATING A RESNET
model.to(device), model.model.to(device)
outputs, labels, cons_losses, rejections = [], [], [], []
with torch.no_grad():
loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, num_workers=0, shuffle=False)
for i, loaded_data in enumerate(loader):
batch_data, batch_labels = loaded_data[0].to(device), loaded_data[1].to(device)
batch_output = model.forward(batch_data, logits=False)
if reject:
assert model.threshold is not None, "Threshold not calculated. self.calc_threshold need to be " \
"called before any forward operation when reject is set\n"
cons_loss = model.constraint_loss(batch_output, sum_reduction=False)
model.threshold = model.threshold.to(device=cons_loss.device)
batch_rejections = cons_loss > model.threshold
batch_output[batch_rejections == 1] = -1
else:
batch_rejections = torch.zeros(batch_output.shape[0])
batch_output = batch_output[:, main_classes].argmax(dim=1)
outputs.append(batch_output), labels.append(batch_labels), rejections.append(batch_rejections)
outputs, labels = torch.cat(outputs, dim=0), torch.cat(labels, dim=0)
rejections = torch.cat(rejections, dim=0)
if adv:
outputs[rejections == 1] = labels[rejections == 1]
acc_single_label = labels.eq(outputs).sum().item() / outputs.shape[0] * 100
rejection_rate = rejections.sum().item() / len(dataset) * 100
model.set_eval_main_classes(False)
model.train()
return acc_single_label, rejection_rate
def load_adversarial_data(attack_path, load_sec_eval, attack, k_to_attack, seed, device, x_test, y_test, epsilon):
if not (load_sec_eval and os.path.isfile(attack_path)):
raise AdversarialAttackNotPerformedError()
print(f"Attack {attack} against classifier constr {k_to_attack} seed {seed} already performed. "
f"Loading saved data")
adv_data = torch.load(attack_path, map_location=device)
x_adv, y_adv = adv_data
check_l2_adversarial_data_consistency(x_adv, x_test, epsilon)
single_label_dataset_adv = SingleLabelDataset(x_adv, y_adv)
return single_label_dataset_adv
def generate_adversarial_data(model: RobustCNNClassifier, dataset: SingleLabelDataset, dataset_name: str,
attack: str = "apgd-ce", epsilon: float = 0.5, batch_size: int = 128,
result_folder: str = ".", device: torch.device = torch.device("cpu")) \
-> SingleLabelDataset:
x_test, y_test = dataset.x, dataset.y
attack_path = "attack_" + attack + "_" + dataset_name + "_eps_" + str(epsilon)
attack_path = os.path.join(result_folder, attack_path)
print("Running attack " + attack + "...")
if os.path.isfile(attack_path):
x_adv, y_adv = torch.load(attack_path)
print("Attack already performed")
else:
adversary = AutoAttack(model, norm='L2', eps=epsilon, device=device)
model.eval() # REMEMBER TO SET MODEL EVAL FOR RESNET BEFORE ATTACKING IT!
model.set_eval_logits()
model.set_eval_main_classes()
adversary.attacks_to_run = [attack]
x_adv = adversary.run_standard_evaluation(x_test, y_test, bs=batch_size)
model.set_eval_main_classes(False)
model.set_eval_logits(False)
print("Finished attack")
y_adv = y_test
torch.save((x_adv, y_adv), attack_path)
single_label_dataset_adv = SingleLabelDataset(x_adv, y_adv)
return single_label_dataset_adv
|
import config
import redis
import telebot
bot = telebot.TeleBot(config.API_TOKEN)
db = redis.Redis.from_url(config.REDIS_URI)
|
# Generated by Django 2.0.7 on 2020-05-06 04:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('categories', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='categories',
name='subcategories',
field=models.CharField(blank=True, max_length=120),
),
]
|
import collections
import numpy as np
class QCAspect(collections.namedtuple('QCAspect', 'label units data comment doi glossary')):
"""Facilitates the storage of quantum chemical results by labeling them with basic metadata.
Attributes
----------
label : str
Official label for `data`, often qcvar. May contain spaces.
units : str
ASCII, LaTeX-like representation of units, without square brackets.
data : float or :py:class:`numpy.ndarray`
Value for `label`.
comment : str, optional
Additional notes.
doi : str, optional
Literature citation or definition DOI link.
glossary : str, optional
Extended description or definition.
"""
def __new__(cls, label, units, data, comment='', doi=None, glossary=''):
return super(QCAspect, cls).__new__(cls, label, units, data, comment, doi, glossary)
def __str__(self, label=''):
width = 40
text = []
text.append('-' * width)
text.append('{:^{width}}'.format('QCAspect ' + self.label, width=width))
if label:
text.append('{:^{width}}'.format(label))
text.append('-' * width)
text.append('Data: {}'.format(self.data))
text.append('Units: [{}]'.format(self.units))
text.append('doi: {}'.format(self.doi))
text.append('Comment: {}'.format(self.comment))
text.append('Glossary: {}'.format(self.glossary))
text.append('-' * width)
return ('\n'.join(text))
def to_dict(self):
dicary = dict(self._asdict()) # dict, not OrderedDict
for d in ['doi', 'comment', 'glossary']:
dicary.pop(d)
if isinstance(self.data, (np.ndarray, np.number)):
if self.data.dtype == np.complex:
dicary['data'] = [dicary['data'].real.tolist(), dicary['data'].imag.tolist()]
else:
dicary['data'] = dicary['data'].tolist()
elif isinstance(self.data, (complex, np.complex)):
dicary['data'] = [self.data.real, self.data.imag]
return dicary
|
# Generated by Django 2.2.24 on 2022-03-06 04:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('appearance', '0003_auto_20210823_2114'),
]
operations = [
migrations.AddField(
model_name='theme',
name='brand_name',
field=models.CharField(blank=True, help_text='Set text Brand.', max_length=100, verbose_name='Brand name'),
),
]
|
# 2100. Свадебный обед
# solved
invitations_num = int(input())
friends_total = 2
for i in range(invitations_num):
friend_name = input()
if friend_name.find('+') != -1:
friends_total += 2
else:
friends_total += 1
if friends_total == 13:
friends_total += 1
print(friends_total * 100)
|
#!/usr/bin/env python
########################################################################.......
u"""filenav for Pythonista, version 2, by dgelessus.
This is the "slim" version of filenav 2. It consists of a single
navigable table and is thus ideal for use on an iPhone or iPod touch, or
even on an iPad in popover mode.
A separate version intended for larger screens can be found under
`full.py`. It is designed to be run in panel mode on an iPad, though it
may also be usable on large iPhones in landscape mode.
"""
from __future__ import division, print_function
import argparse # For runtime argument parsing
import sys # For runtime arguments
import ui # Guess why
from filenav import common
try:
unicode
except NameError:
unicode = str
MODE = "panel"
##MODE = "popover" # For testing on iPad
class SlimFilenavApp(common.FilenavApp):
def push_view(self, view):
return self.root.push_view(view)
def pop_view(self):
return self.root.pop_view()
def main(args):
global fnapp # Technically not necessary, but useful for testing
ap = argparse.ArgumentParser(description=__doc__)
ap.add_argument(u"dir", type=unicode, nargs="?",
help=u"initial directory, defaults to favorites list")
ns = ap.parse_args(args)
fnapp = SlimFilenavApp()
lst = fnapp.make_favs_list(common.full_path("./favorites.json"))
lst.left_button_items = (
ui.ButtonItem(
image=ui.Image.named("ionicons-close-24"),
action=(lambda sender: fnapp.close()),
),
)
fnapp.root = ui.NavigationView(lst)
fnapp.root.navigation_bar_hidden = False
fnapp.root.flex = "WH"
if MODE == "popover":
fnapp.root.height = 1000
if ns.dir:
fnapp.push_view(fnapp.make_file_list(common.FileItem(ns.dir)))
fnapp.root.present(MODE, hide_title_bar=True)
sys.exit(0)
if __name__ == "__main__":
main(sys.argv[1:]) |
# -*- coding: utf-8 -*-
# !/usr/bin/python
import ctypes
import logging
import sys
import traceback
from lazagne.config.change_privileges import list_sids, rev2self, impersonate_sid_long_handle
from lazagne.config.users import get_user_list_on_filesystem, set_env_variables, get_username_winapi
from lazagne.config.dpapi_structure import SystemDpapi, are_masterkeys_retrieved
from lazagne.config.execute_cmd import save_hives, delete_hives
from lazagne.config.write_output import print_debug, StandardOutput
from lazagne.config.constant import constant
from lazagne.config.manage_modules import get_categories, get_modules
# Useful for the Pupy project
# workaround to this error: RuntimeError: maximum recursion depth exceeded while calling a Python object
sys.setrecursionlimit(10000)
def create_module_dic():
if constant.modules_dic:
return constant.modules_dic
modules = {}
# Define a dictionary for all modules
for category in get_categories():
modules[category] = {}
# Add all modules to the dictionary
for m in get_modules():
modules[m.category][m.options['dest']] = m
constant.modules_dic = modules
return modules
def run_module(title, module):
"""
Run only one module
"""
try:
constant.st.title_info(title.capitalize()) # print title
pwd_found = module.run() # run the module
constant.st.print_output(title.capitalize(), pwd_found) # print the results
# Return value - not used but needed
yield True, title.capitalize(), pwd_found
except Exception:
error_message = traceback.format_exc()
print_debug('DEBUG', error_message)
yield False, title.capitalize(), error_message
def run_modules(module, subcategories={}, system_module=False):
"""
Run modules inside a category (could be one or multiple modules)
"""
modules_to_launch = []
# Launch only a specific module
for i in subcategories:
if subcategories[i] and i in module:
modules_to_launch.append(i)
# Launch all modules
if not modules_to_launch:
modules_to_launch = module
for i in modules_to_launch:
# Only current user could access to HKCU registry or use some API that only can be run from the user environment
if not constant.is_current_user:
if module[i].registry_used or module[i].only_from_current_user:
continue
if system_module ^ module[i].system_module:
continue
if module[i].winapi_used:
constant.module_to_exec_at_end['winapi'].append({
'title': i,
'module': module[i],
})
continue
if module[i].dpapi_used:
constant.module_to_exec_at_end['dpapi'].append({
'title': i,
'module': module[i],
})
continue
# Run module
for m in run_module(title=i, module=module[i]):
yield m
def run_category(category_selected, subcategories={}, system_module=False):
constant.module_to_exec_at_end = {
"winapi": [],
"dpapi": [],
}
modules = create_module_dic()
categories = [category_selected] if category_selected != 'all' else get_categories()
for category in categories:
for r in run_modules(modules[category], subcategories, system_module):
yield r
if not system_module:
if constant.is_current_user:
# Modules using Windows API (CryptUnprotectData) can be called from the current session
for module in constant.module_to_exec_at_end.get('winapi', []):
for m in run_module(title=module['title'], module=module['module']):
yield m
if constant.module_to_exec_at_end.get('dpapi', []):
if are_masterkeys_retrieved():
for module in constant.module_to_exec_at_end.get('dpapi', []):
for m in run_module(title=module['title'], module=module['module']):
yield m
else:
if constant.module_to_exec_at_end.get('dpapi', []) or constant.module_to_exec_at_end.get('winapi', []):
if are_masterkeys_retrieved():
# Execute winapi/dpapi modules - winapi decrypt blob using dpapi without calling CryptUnprotectData
for i in ['winapi', 'dpapi']:
for module in constant.module_to_exec_at_end.get(i, []):
for m in run_module(title=module['title'], module=module['module']):
yield m
def run_lazagne(category_selected='all', subcategories={}, password=None):
"""
Execution Workflow:
- If admin:
- Execute system modules to retrieve LSA Secrets and user passwords if possible
- These secret could be useful for further decryption (e.g Wifi)
- If a process of another user is launched try to impersone it (impersonating his token)
- TO DO: if hashdump retrieved other local account, launch a new process using psexec techniques
- From our user:
- Retrieve all passwords using their own password storage algorithm (Firefox, Pidgin, etc.)
- Retrieve all passwords using Windows API - CryptUnprotectData (Chrome, etc.)
- If the user password or the dpapi hash is found:
- Retrieve all passowrds from an encrypted blob (Credentials files, Vaults, etc.)
- From all users found on the filesystem (e.g C:\\Users) - Need admin privilege:
- Retrieve all passwords using their own password storage algorithm (Firefox, Pidgin, etc.)
- If the user password or the dpapi hash is found:
- Retrieve all passowrds from an encrypted blob (Chrome, Credentials files, Vaults, etc.)
To resume:
- Some passwords (e.g Firefox) could be retrieved from any other user
- CryptUnprotectData can be called only from our current session
- DPAPI Blob can decrypted only if we have the password or the hash of the user
"""
# Useful if this function is called from another tool
if password:
constant.user_password = password
if not constant.st:
constant.st = StandardOutput()
# --------- Execute System modules ---------
if ctypes.windll.shell32.IsUserAnAdmin() != 0:
if save_hives():
# System modules (hashdump, lsa secrets, etc.)
constant.username = 'SYSTEM'
constant.finalResults = {'User': constant.username}
constant.system_dpapi = SystemDpapi()
if logging.getLogger().isEnabledFor(logging.INFO):
constant.st.print_user(constant.username)
yield 'User', constant.username
try:
for r in run_category(category_selected, subcategories, system_module=True):
yield r
except: # Catch all kind of exceptions
pass
finally:
delete_hives()
constant.stdout_result.append(constant.finalResults)
# ------ Part used for user impersonation ------
constant.is_current_user = True
constant.username = get_username_winapi()
if not constant.username.endswith('$'):
constant.finalResults = {'User': constant.username}
constant.st.print_user(constant.username)
yield 'User', constant.username
set_env_variables(user=constant.username)
for r in run_category(category_selected, subcategories):
yield r
constant.stdout_result.append(constant.finalResults)
# Check if admin to impersonate
if ctypes.windll.shell32.IsUserAnAdmin() != 0:
# --------- Impersonation using tokens ---------
sids = list_sids()
impersonate_users = {}
impersonated_user = [constant.username]
for sid in sids:
# Not save the current user's SIDs and not impersonate system user
if constant.username != sid[3] and sid[2] != 'S-1-5-18':
impersonate_users.setdefault(sid[3], []).append(sid[2])
for user in impersonate_users:
if 'service' in user.lower().strip():
continue
# Do not impersonate the same user twice
if user in impersonated_user:
continue
constant.st.print_user(user)
yield 'User', user
constant.finalResults = {'User': user}
for sid in impersonate_users[user]:
try:
set_env_variables(user, to_impersonate=True)
if impersonate_sid_long_handle(sid, close=False):
impersonated_user.append(user)
# Launch module wanted
for r in run_category(category_selected, subcategories):
yield r
rev2self()
constant.stdout_result.append(constant.finalResults)
break
except Exception:
print_debug('DEBUG', traceback.format_exc())
# --------- Impersonation browsing file system ---------
constant.is_current_user = False
# Ready to check for all users remaining
all_users = get_user_list_on_filesystem(impersonated_user=[constant.username])
for user in all_users:
# Fix value by default for user environment (APPDATA and USERPROFILE)
set_env_variables(user, to_impersonate=True)
constant.st.print_user(user)
constant.username = user
constant.finalResults = {'User': user}
yield 'User', user
# Retrieve passwords that need high privileges
for r in run_category(category_selected, subcategories):
yield r
constant.stdout_result.append(constant.finalResults)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
class PolyLine:
def __init__(self, segments=[]):
self.segments = segments
@staticmethod
def name():
return 'Poly Line'
def render(self, qp):
for seg in self.segments:
seg.render(qp)
def add_segment(self, value):
self.segments.append(value)
def get_segments(self):
return self.segments
def set_segments(self, value):
self.segments = value
|
#!/usr/bin/env python3
import socket
from IPy import IP
class DomainIPError(Exception):
pass
def get_ip(ip: str):
try:
IP(ip)
return ip
except ValueError:
try:
return socket.gethostname(ip)
except:
print()
raise DomainIPError
def scan_port(ip: str, port: int):
s = socket.socket()
s.settimeout(2.0) # Optional timeout added. Remove this for more accurate results (or increase the parameter).
response = s.connect_ex((ip, int(port)))
s.close()
if response is 0:
print("[+] Port", port, "is open on", ip + ".", end="\t")
sock = socket.socket()
sock.settimeout(3.0)
try:
sock.connect((ip, int(port)))
except ConnectionRefusedError:
print("[!!!] Connection refused on IP", ip +":"+str(port)+".")
return
try:
banner = sock.recv(1024)
print("[***] Banner:", str(banner.decode().strip("\n")))
sock.close()
except:
print("[---] Banner not received.")
return port
return -1
def scan(ip: str, port_range: list):
open_ports = []
try:
address = get_ip(ip)
except DomainIPError:
print("[-] Error: Problem with IP:", ip)
print("[-]\tSkipping...")
return
if address is not ip:
print("\nScanning", ip+"/"+address+":\n")
else:
print("\n[+] Scanning", ip +":\n")
try:
index = int(port_range[0])
lastIndex = int(port_range[1])
except:
print("[-] Error: Invalid port range entered.")
exit()
while (index <= lastIndex):
open_ports.append(scan_port(address, index))
index += 1
found_port = False
for num in open_ports:
if num is not -1:
found_port = True
if not found_port:
print("[~] No open ports detected on", ip+".\n")
def main():
addresses = input("[+] Enter the target(s) IP address or domain name (separate multiple targets with \",\"): ")
ports = input("[+] Enter the target port range (example: '1-100'): ")
if ',' in addresses:
targets = addresses.split(',')
try:
ports = ports.split("-")
except:
print("[-] Error: Unsupported port format entered. Closing.")
exit()
for target in targets:
scan(target, ports)
else:
try:
ports = ports.split("-")
except:
print("[-] Error: Unsupported port format entered. Closing.")
exit()
scan(addresses, ports)
def para_main(ip_list: list, port_range: list):
for address in ip_list:
scan(address, port_range)
if __name__=="__main__":
try:
main()
except KeyboardInterrupt: # This prevents a long error message if the program is closed via ctrl+c
print() |
"""
Forecast One By One
===================
A useful feature for short-term forecast in Silverkite model family is autoregression.
Silverkite has an "auto" option for autoregression,
which automatically selects the autoregression lag orders based on the data frequency and forecast horizons.
One important rule of this "auto" option is that the minimum order of autoregression terms
is at least the forecast horizon.
For example, if the forecast horizon is 3 on a daily model,
the minimum order of autoregression is set to 3.
The "auto" option won't have an order of 2 in this case,
because the 3rd day forecast will need the 1st day's observation,
which isn't available at the current time.
Although the model can make predictions with an autoregression lag order less than the forecast horizon
via simulations, it takes longer time to run and is not the preferred behavior in the "auto" option.
However, in many cases, using smaller autoregression lag orders can give more accurate forecast results.
We observe that the only barrier of using an autoregression term of order 2 in the 3-day forecast model
is the 3rd day, while we can use it freely for the first 2 days.
Similarly, we are able to use an autoregression term of order 1 for the 1st day.
In a 3 day forecast, if the accuracy of all 3 days are important, then replacing the first 2 days' models
with shorter autoregression lag orders can improve the accuracy.
The forecast-one-by-one algorithm is designed in this context.
The observations above together bring the idea of the forecast-one-by-one algorithm.
The algorithm allows fitting multiple models with the "auto" option in autoregression,
when one is forecasting with a forecast horizon longer than 1.
For each model, the "auto" option for autoregression selects the smallest
available autoregression lag order and predicts for the corresponding forecast steps,
thus improving the forecast accuracy for the early steps.
In this example, we will cover how to activate the forecast-one-by-one approach
via the ``ForecastConfig`` and the ``Forecaster`` classes.
For a detailed API reference, please see the
`~greykite.framework.templates.autogen.forecast_config.ForecastConfig` and
`~greykite.sklearn.estimator.one_by_one_estimator.OneByOneEstimator` classes.
"""
import warnings
warnings.filterwarnings("ignore")
import plotly
from greykite.common.data_loader import DataLoader
from greykite.framework.templates.autogen.forecast_config import ForecastConfig
from greykite.framework.templates.autogen.forecast_config import ModelComponentsParam
from greykite.framework.templates.forecaster import Forecaster
from greykite.framework.templates.model_templates import ModelTemplateEnum
from greykite.framework.utils.result_summary import summarize_grid_search_results
# Loads dataset into pandas DataFrame
dl = DataLoader()
df = dl.load_peyton_manning()
# %%
# The forecast-one-by-one option
# ------------------------------
#
# The forecast-one-by-one option is specified through the ``forecast_one_by_one`` parameter
# in ``ForecastConfig``.
config = ForecastConfig(
model_template=ModelTemplateEnum.SILVERKITE.name,
forecast_horizon=3,
model_components_param=ModelComponentsParam(
autoregression=dict(autoreg_dict="auto")
),
forecast_one_by_one=True
)
# %%
# The ``forecast_one_by_one`` parameter can be specified in the following ways
#
# - **``True``**: every forecast step will be a separate model.
# The number of models equals the forecast horizon.
# In this example, 3 models will be fit with the 3 forecast steps.
# - **``False``**: the forecast-one-by-one method is turned off.
# This is the default behavior and a single model is used for all forecast steps.
# - **A list of integers**: each integer corresponds to a model,
# and it is the number of steps. For example, in a 7 day forecast,
# specifying ``forecast_one_by_one=[1, 2, 4]`` will result in 3 models.
# The first model forecasts the 1st day with forecast horizon 1;
# The second model forecasts the 2nd - 3rd days with forecast horizon 3;
# The third model forecasts the 4th - 7th days with forecast horizon 7.
# In this case, the sum of the list entries must equal the forecast horizon.
# - **an integer ``n``**: every model will account for n steps. The last model
# will account for the rest <n steps. For example in a 7 day forecast,
# specifying ``forecast_one_by_one=2`` will result in 4 models,
# which is equivalent to ``forecast_one_by_one=[2, 2, 2, 1]``.
#
# .. note::
# ``forecast_one_by_one`` is activated only when there are parameters in
# the model that depend on the forecast horizon. Currently the only parameter
# that depends on forecast horizon is ``autoreg_dict="auto"``. If you do not specify
# ``autoreg_dict="auto"``, the ``forecast_one_by_one`` parameter will be ignored.
#
# .. note::
# Forecast-one-by-one fits multiple models to increase accuracy,
# which may cause the training time to increase linearly with the number of models.
# Please make sure your ``forecast_one_by_one`` parameter and forecast horizon
# result in a reasonable number of models.
#
# Next, let's run the model and look at the result.
# Runs the forecast
forecaster = Forecaster()
result = forecaster.run_forecast_config(
df=df.iloc[-365:].reset_index(drop=True), # Uses less data to speed up this example.
config=config
)
# %%
# You may see a few warnings like "The future x length is 0,
# which doesn't match the model forecast horizon 3,
# using only the model with the longest forecast horizon for prediction."
# This is an expected behavior when calculating the training errors.
# Because the models are mapped to the forecast period only,
# but not to the training period. Therefore, only the last model is used to
# get the fitted values on the training period.
# You don't need to worry about it.
#
# Everything on the ``forecast_result`` level is the same as not activating forecast-one-by-one.
# For example, we can view the cross-validation results in the same way.
# Summarizes the CV results
cv_results = summarize_grid_search_results(
grid_search=result.grid_search,
decimals=1,
# The below saves space in the printed output. Remove to show all available metrics and columns.
cv_report_metrics=None,
column_order=["rank", "mean_test", "split_test", "mean_train", "split_train", "mean_fit_time", "mean_score_time", "params"])
cv_results["params"] = cv_results["params"].astype(str)
cv_results.set_index("params", drop=True, inplace=True)
cv_results.transpose()
# %%
# When you need to access estimator level attributes, for example, model summary or component plots,
# the returned result will be a list of the original type, because we fit multiple models.
# The model summary list can be accessed in the same way and you can use index to get the model summary
# for a single model.
# Gets the model summary list
one_by_one_estimator = result.model[-1]
summaries = one_by_one_estimator.summary()
# Prints the model summary for 1st model only
print(summaries[0])
# %%
# We can access the component plots in a similar way.
# Gets the fig list
figs = one_by_one_estimator.plot_components()
# Shows the component plot for 1st model only
plotly.io.show(figs[0])
|
#
# Copyright (c) 2012, 2013, 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from git_upstream.errors import GitUpstreamError
from git_upstream.lib.pygitcompat import Repo
from git import Git
import re
import os
import sys
try:
from git.exc import InvalidGitRepositoryError
except ImportError:
from git.errors import InvalidGitRepositoryError
class GitMixin(object):
def __init__(self, *args, **kwargs):
repo = kwargs.pop('repo', None)
if repo:
self.__repo = repo
else:
try:
self.__repo = Repo(os.environ.get('GIT_WORK_TREE',
os.path.curdir))
except InvalidGitRepositoryError:
exc_class, exc, tb = sys.exc_info()
raise GitUpstreamError("Not a git repository", tb)
self.__git = self.repo.git
super(GitMixin, self).__init__(*args, **kwargs)
@property
def repo(self):
return self.__repo
@property
def git(self):
return self.__git
def is_detached(self):
return self.git.symbolic_ref("HEAD", q=True, with_exceptions=False)
def get_name(self, sha1, pattern=None):
"""
Return a symbolic name corresponding to a SHA1
Will return reference names using the commit revision modifier strings
to identify the given SHA1. Or will return nothing if SHA1 cannot be
identified relative to any existing reference.
"""
if pattern:
return self.git.name_rev(sha1, name_only=False, refs=pattern,
with_exceptions=False)
else:
return self.git.name_rev(sha1, name_only=False,
with_exceptions=False)
def is_valid_commit(self, sha1):
"""
Check if given SHA1 refers to a commit object on a valid ref.
This can be used to test if any name or SHA1 refers to a commit
reachable by walking any of the refs under the .git/refs.
"""
# get_name will return a string if the sha1 is reachable from an
# existing reference.
return bool(self.get_name(sha1))
def check_git_version(major, minor, revision):
"""
Check git version PythonGit (and git-upstream) will be using is greater of
equal than major.minor.revision)
"""
regex = re.compile("^git version ([0-9]+)\.([0-9]+)\.([0-9]+)(\.(.+))*$")
git = Git()
groups = regex.search(git.version()).groups()
if int(groups[0]) > major:
return True
elif int(groups[0]) == major:
if int(groups[1]) > minor:
return True
elif int(groups[1]) == minor:
if int(groups[2]) >= revision:
return True
return False
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for vgsl_model."""
import os
import numpy as np
import tensorflow as tf
import vgsl_input
import vgsl_model
def _testdata(filename):
return os.path.join('../testdata/', filename)
def _rand(*size):
return np.random.uniform(size=size).astype('f')
class VgslModelTest(tf.test.TestCase):
def testParseInputSpec(self):
"""The parser must return the numbers in the correct order.
"""
shape = vgsl_model._ParseInputSpec(input_spec='32,42,256,3')
self.assertEqual(
shape,
vgsl_input.ImageShape(
batch_size=32, height=42, width=256, depth=3))
# Nones must be inserted for zero sizes.
shape = vgsl_model._ParseInputSpec(input_spec='1,0,0,3')
self.assertEqual(
shape,
vgsl_input.ImageShape(
batch_size=1, height=None, width=None, depth=3))
def testParseOutputSpec(self):
"""The parser must return the correct args in the correct order.
"""
out_dims, out_func, num_classes = vgsl_model._ParseOutputSpec(
output_spec='O1c142')
self.assertEqual(out_dims, 1)
self.assertEqual(out_func, 'c')
self.assertEqual(num_classes, 142)
out_dims, out_func, num_classes = vgsl_model._ParseOutputSpec(
output_spec='O2s99')
self.assertEqual(out_dims, 2)
self.assertEqual(out_func, 's')
self.assertEqual(num_classes, 99)
out_dims, out_func, num_classes = vgsl_model._ParseOutputSpec(
output_spec='O0l12')
self.assertEqual(out_dims, 0)
self.assertEqual(out_func, 'l')
self.assertEqual(num_classes, 12)
def testPadLabels2d(self):
"""Must pad timesteps in labels to match logits.
"""
with self.test_session() as sess:
# Make placeholders for logits and labels.
ph_logits = tf.placeholder(tf.float32, shape=(None, None, 42))
ph_labels = tf.placeholder(tf.int64, shape=(None, None))
padded_labels = vgsl_model._PadLabels2d(tf.shape(ph_logits)[1], ph_labels)
# Make actual inputs.
real_logits = _rand(4, 97, 42)
real_labels = _rand(4, 85)
np_array = sess.run([padded_labels],
feed_dict={ph_logits: real_logits,
ph_labels: real_labels})[0]
self.assertEqual(tuple(np_array.shape), (4, 97))
real_labels = _rand(4, 97)
np_array = sess.run([padded_labels],
feed_dict={ph_logits: real_logits,
ph_labels: real_labels})[0]
self.assertEqual(tuple(np_array.shape), (4, 97))
real_labels = _rand(4, 100)
np_array = sess.run([padded_labels],
feed_dict={ph_logits: real_logits,
ph_labels: real_labels})[0]
self.assertEqual(tuple(np_array.shape), (4, 97))
def testPadLabels3d(self):
"""Must pad height and width in labels to match logits.
The tricky thing with 3-d is that the rows and columns need to remain
intact, so we'll test it with small known data.
"""
with self.test_session() as sess:
# Make placeholders for logits and labels.
ph_logits = tf.placeholder(tf.float32, shape=(None, None, None, 42))
ph_labels = tf.placeholder(tf.int64, shape=(None, None, None))
padded_labels = vgsl_model._PadLabels3d(ph_logits, ph_labels)
# Make actual inputs.
real_logits = _rand(1, 3, 4, 42)
# Test all 9 combinations of height x width in [small, ok, big]
real_labels = np.arange(6).reshape((1, 2, 3)) # Height small, width small
np_array = sess.run([padded_labels],
feed_dict={ph_logits: real_logits,
ph_labels: real_labels})[0]
self.assertEqual(tuple(np_array.shape), (1, 3, 4))
self.assertAllEqual(np_array[0, :, :],
[[0, 1, 2, 0], [3, 4, 5, 0], [0, 0, 0, 0]])
real_labels = np.arange(8).reshape((1, 2, 4)) # Height small, width ok
np_array = sess.run([padded_labels],
feed_dict={ph_logits: real_logits,
ph_labels: real_labels})[0]
self.assertEqual(tuple(np_array.shape), (1, 3, 4))
self.assertAllEqual(np_array[0, :, :],
[[0, 1, 2, 3], [4, 5, 6, 7], [0, 0, 0, 0]])
real_labels = np.arange(10).reshape((1, 2, 5)) # Height small, width big
np_array = sess.run([padded_labels],
feed_dict={ph_logits: real_logits,
ph_labels: real_labels})[0]
self.assertEqual(tuple(np_array.shape), (1, 3, 4))
self.assertAllEqual(np_array[0, :, :],
[[0, 1, 2, 3], [5, 6, 7, 8], [0, 0, 0, 0]])
real_labels = np.arange(9).reshape((1, 3, 3)) # Height ok, width small
np_array = sess.run([padded_labels],
feed_dict={ph_logits: real_logits,
ph_labels: real_labels})[0]
self.assertEqual(tuple(np_array.shape), (1, 3, 4))
self.assertAllEqual(np_array[0, :, :],
[[0, 1, 2, 0], [3, 4, 5, 0], [6, 7, 8, 0]])
real_labels = np.arange(12).reshape((1, 3, 4)) # Height ok, width ok
np_array = sess.run([padded_labels],
feed_dict={ph_logits: real_logits,
ph_labels: real_labels})[0]
self.assertEqual(tuple(np_array.shape), (1, 3, 4))
self.assertAllEqual(np_array[0, :, :],
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]])
real_labels = np.arange(15).reshape((1, 3, 5)) # Height ok, width big
np_array = sess.run([padded_labels],
feed_dict={ph_logits: real_logits,
ph_labels: real_labels})[0]
self.assertEqual(tuple(np_array.shape), (1, 3, 4))
self.assertAllEqual(np_array[0, :, :],
[[0, 1, 2, 3], [5, 6, 7, 8], [10, 11, 12, 13]])
real_labels = np.arange(12).reshape((1, 4, 3)) # Height big, width small
np_array = sess.run([padded_labels],
feed_dict={ph_logits: real_logits,
ph_labels: real_labels})[0]
self.assertEqual(tuple(np_array.shape), (1, 3, 4))
self.assertAllEqual(np_array[0, :, :],
[[0, 1, 2, 0], [3, 4, 5, 0], [6, 7, 8, 0]])
real_labels = np.arange(16).reshape((1, 4, 4)) # Height big, width ok
np_array = sess.run([padded_labels],
feed_dict={ph_logits: real_logits,
ph_labels: real_labels})[0]
self.assertEqual(tuple(np_array.shape), (1, 3, 4))
self.assertAllEqual(np_array[0, :, :],
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]])
real_labels = np.arange(20).reshape((1, 4, 5)) # Height big, width big
np_array = sess.run([padded_labels],
feed_dict={ph_logits: real_logits,
ph_labels: real_labels})[0]
self.assertEqual(tuple(np_array.shape), (1, 3, 4))
self.assertAllEqual(np_array[0, :, :],
[[0, 1, 2, 3], [5, 6, 7, 8], [10, 11, 12, 13]])
def testEndToEndSizes0d(self):
"""Tests that the output sizes match when training/running real 0d data.
Uses mnist with dual summarizing LSTMs to reduce to a single value.
"""
filename = _testdata('mnist-tiny')
with self.test_session() as sess:
model = vgsl_model.InitNetwork(
filename,
model_spec='4,0,0,1[Cr5,5,16 Mp3,3 Lfys16 Lfxs16]O0s12',
mode='train')
tf.initialize_all_variables().run(session=sess)
coord = tf.train.Coordinator()
tf.train.start_queue_runners(sess=sess, coord=coord)
_, step = model.TrainAStep(sess)
self.assertEqual(step, 1)
output, labels = model.RunAStep(sess)
self.assertEqual(len(output.shape), 2)
self.assertEqual(len(labels.shape), 1)
self.assertEqual(output.shape[0], labels.shape[0])
self.assertEqual(output.shape[1], 12)
# TODO(rays) Support logistic and test with Imagenet (as 0d, multi-object.)
def testEndToEndSizes1dCTC(self):
"""Tests that the output sizes match when training with CTC.
Basic bidi LSTM on top of convolution and summarizing LSTM with CTC.
"""
filename = _testdata('arial-32-tiny')
with self.test_session() as sess:
model = vgsl_model.InitNetwork(
filename,
model_spec='2,0,0,1[Cr5,5,16 Mp3,3 Lfys16 Lbx100]O1c105',
mode='train')
tf.initialize_all_variables().run(session=sess)
coord = tf.train.Coordinator()
tf.train.start_queue_runners(sess=sess, coord=coord)
_, step = model.TrainAStep(sess)
self.assertEqual(step, 1)
output, labels = model.RunAStep(sess)
self.assertEqual(len(output.shape), 3)
self.assertEqual(len(labels.shape), 2)
self.assertEqual(output.shape[0], labels.shape[0])
# This is ctc - the only cast-iron guarantee is labels <= output.
self.assertLessEqual(labels.shape[1], output.shape[1])
self.assertEqual(output.shape[2], 105)
def testEndToEndSizes1dFixed(self):
"""Tests that the output sizes match when training/running 1 data.
Convolution, summarizing LSTM with fwd rev fwd to allow no CTC.
"""
filename = _testdata('numbers-16-tiny')
with self.test_session() as sess:
model = vgsl_model.InitNetwork(
filename,
model_spec='8,0,0,1[Cr5,5,16 Mp3,3 Lfys16 Lfx64 Lrx64 Lfx64]O1s12',
mode='train')
tf.initialize_all_variables().run(session=sess)
coord = tf.train.Coordinator()
tf.train.start_queue_runners(sess=sess, coord=coord)
_, step = model.TrainAStep(sess)
self.assertEqual(step, 1)
output, labels = model.RunAStep(sess)
self.assertEqual(len(output.shape), 3)
self.assertEqual(len(labels.shape), 2)
self.assertEqual(output.shape[0], labels.shape[0])
# Not CTC, output lengths match.
self.assertEqual(output.shape[1], labels.shape[1])
self.assertEqual(output.shape[2], 12)
# TODO(rays) Get a 2-d dataset and support 2d (heat map) outputs.
if __name__ == '__main__':
tf.test.main()
|
# Method one
def divisors(n):
if n == 1:
return "{} is prime".format(n)
else:
c = [x for x in range(2,n//2+1) if n%x==0]
if len(c)==0:
return "{} is prime".format(n)
else:
return c
print(divisors(12))
print(divisors(25))
print(divisors(13))
print("------------------------------------------------")
# Method Two
def divisors_1(n):
c = [x for x in range(2,n//2+1) if n%x==0]
if len(c)==0:
return f'{n} is prime'
else:
return c
print(divisors_1(12))
print(divisors_1(25))
print(divisors_1(13))
print("------------------------------------------------")
# Shorthand for Method Two
def divisors_2(n):return f'{n} is prime' if len([x for x in range(2,n//2+1) if n%x==0]) == 0 else [x for x in range(2,n//2+1) if n%x==0]
print(divisors_2(12))
print(divisors_2(25))
print(divisors_2(13))
print("------------------------------------------------")
# Implementeing wiht Lambda Functions
divisors_3 = lambda n: f'{n} is prime' if len([x for x in range(2,n//2+1) if n%x==0]) == 0 else [x for x in range(2,n//2+1) if n%x==0]
print(divisors_3(12))
print(divisors_3(25))
print(divisors_3(13)) |
class Solution:
def readBinaryWatch(self, num):
"""
:type num: int
:rtype: List[str]
"""
times = []
for i in range(12):
for j in range(60):
if bin(i).count('1') + bin(j).count('1') == num:
times.append('{:d}:{:02d}'.format(i, j))
return times
if __name__ == '__main__':
solution = Solution()
print(solution.readBinaryWatch(0))
print(solution.readBinaryWatch(1))
print(solution.readBinaryWatch(2))
print(solution.readBinaryWatch(3))
print(solution.readBinaryWatch(4))
print(solution.readBinaryWatch(5))
print(solution.readBinaryWatch(8))
print(solution.readBinaryWatch(9))
print(solution.readBinaryWatch(10))
else:
pass
|
import disnake
from disnake import ui, ButtonStyle
import configs
from bot import util
from bot.tickets import TicketType
class TicketOpeningInteraction(ui.View):
def __init__(self):
super().__init__(timeout=None)
@ui.button(label='Plainte', style=ButtonStyle.blurple, custom_id=configs.TICKET_COMPLAINT_ID)
async def plainte(self, button: ui.Button, interaction: disnake.Interaction):
await interaction.send("Êtes-vous sûre de vouloir ouvrir un ticket de plainte?",
view=TicketConfirmationInteraction(TicketType.COMPLAINT),
ephemeral=True)
@ui.button(label='Appel de moron', style=ButtonStyle.blurple, custom_id=configs.TICKET_MORON_ID)
async def moron(self, button: ui.Button, interaction: disnake.Interaction):
await interaction.send("Êtes-vous sûre de vouloir ouvrir un ticket d'appel de moron?",
view=TicketConfirmationInteraction(TicketType.MORON),
ephemeral=True)
class TicketConfirmationInteraction(ui.View):
def __init__(self, ticket_type: TicketType):
super().__init__(timeout=30)
self.ticket_type = ticket_type
async def interaction_check(self, interaction: disnake.Interaction) -> bool:
for child in self.children:
child.disabled = True
await interaction.response.edit_message(view=self)
return await super().interaction_check(interaction)
@ui.button(label='Oui', style=ButtonStyle.green)
async def confirm(self, button: ui.Button, interaction: disnake.Interaction):
await interaction.response.defer()
await util.create_ticket(interaction.user, self.ticket_type)
@ui.button(label='Non', style=ButtonStyle.red)
async def decline(self, button: ui.Button, interaction: disnake.Interaction):
await interaction.response.defer()
await interaction.edit_original_message(content='Vous avez annulé la création du ticket.')
class TicketCloseInteraction(ui.View):
def __init__(self):
super().__init__(timeout=None)
async def interaction_check(self, interaction: disnake.Interaction) -> bool:
for child in self.children:
child.disabled = True
await interaction.response.edit_message(view=self)
return await super().interaction_check(interaction)
@ui.button(label='Fermer', style=ButtonStyle.red, custom_id=configs.TICKET_CLOSE_ID)
async def close(self, button: ui.Button, interaction: disnake.Interaction):
await interaction.response.defer()
await util.archive_ticket(interaction.user, interaction.channel)
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convolutional Neural Network Estimator for MNIST, built with tf.layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
class CNN():
def __init__(self, learning_rate=0.001, goal_cond=False):
# def cnn_model_fn(features, labels, mode):
with tf.variable_scope("goal_classifier"):
"""Model function for CNN."""
# Input Layer
# Reshape X to 4-D tensor: [batch_size, width, height, channels]
# MNIST images are 32x32 pixels, and have one color channel
self.images = tf.placeholder(
tf.float32,
shape=(None, 32, 32, 3),
name='images',
)
self.labels = tf.placeholder(tf.int64, shape=(None), name='labels')
# input_layer = tf.reshape(features["x"], [-1, 32, 32, 3])
# Convolutional Layer #1
# Computes 32 features using a 5x5 filter with ReLU activation.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 32, 32, 1]
# Output Tensor Shape: [batch_size, 32, 32, 32]
conv1 = tf.layers.conv2d(
inputs=self.images,
filters=32,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Pooling Layer #1
# First max pooling layer with a 2x2 filter and stride of 2
# Input Tensor Shape: [batch_size, 32, 32, 32]
# Output Tensor Shape: [batch_size, 16, 16, 32]
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
# Convolutional Layer #2
# Computes 64 features using a 5x5 filter.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 16, 16, 32]
# Output Tensor Shape: [batch_size, 16, 16, 64]
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# conv2d/bias (DT_FLOAT) [32]
# conv2d/kernel (DT_FLOAT) [5,5,3,32]
# conv2d_1/bias (DT_FLOAT) [64]
# conv2d_1/kernel (DT_FLOAT) [5,5,32,64]
# dense/bias (DT_FLOAT) [1024]
# dense/kernel (DT_FLOAT) [4097,1024]
# dense_1/bias (DT_FLOAT) [2]
# dense_1/kernel (DT_FLOAT) [1024,2]
# Pooling Layer #2
# Second max pooling layer with a 2x2 filter and stride of 2
# Input Tensor Shape: [batch_size, 16, 16, 64]
# Output Tensor Shape: [batch_size, 8, 8, 64]
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
# Flatten tensor into a batch of vectors
# Input Tensor Shape: [batch_size, 7, 7, 64]
# Output Tensor Shape: [batch_size, 7 * 7 * 64]
pool2_flat = tf.reshape(pool2, [-1, 8 * 8 * 64])
# Dense Layer
# Densely connected layer with 1024 neurons
# Input Tensor Shape: [batch_size, 7 * 7 * 64]
# Output Tensor Shape: [batch_size, 1024]
if goal_cond:
self.goals = tf.placeholder(tf.float32, shape=(pool2_flat.shape[0], 1))
dense = tf.layers.dense(inputs=tf.concat([pool2_flat, self.goals], axis=1), units=1024, activation=tf.nn.relu)
else:
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
# Add dropout operation; 0.6 probability that element will be kept
# dropout = tf.layers.dropout(
# inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
# Logits layer
# Input Tensor Shape: [batch_size, 1024]
# Output Tensor Shape: [batch_size, 10]
self.logits = tf.layers.dense(inputs=dense, units=2)
self.pred_classes = tf.argmax(input=self.logits, axis=1)
self.pred_probs = tf.nn.softmax(self.logits, name="softmax_tensor")
self.avg_pred_prob = tf.reduce_mean(tf.reduce_max(self.pred_probs, axis=1))
correct_pred = tf.equal(self.labels, self.pred_classes)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# # Calculate Loss (for both TRAIN and EVAL modes)
self.loss = tf.losses.sparse_softmax_cross_entropy(labels=self.labels, logits=self.logits)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
self.train_op = optimizer.minimize(
loss=self.loss,
global_step=tf.train.get_global_step())
tf.summary.scalar('loss', self.loss)
tf.summary.scalar('avg_prob', self.avg_pred_prob)
tf.summary.scalar('accuracy', self.accuracy)
self.summary = tf.summary.merge_all()
# return logits, predictiomerged = tf.summary.merge_all()
#
# #
# # Configure the Training Op (for TRAIN mode)
# if mode == tf.estimator.ModeKeys.TRAIN:
# optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
# train_op = optimizer.minimize(
# loss=loss,
# global_step=tf.train.get_global_step())
# return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
#
# # Add evaluation metrics (for EVAL mode)
# eval_metric_ops = {
# "accuracy": tf.metrics.accuracy(
# labels=labels, predictions=predictions["classes"])}
# return tf.estimator.EstimatorSpec(
# mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def get_variables(self):
return tf.trainable_variables()
def wrap_dist(theta1, theta2):
return np.minimum(np.abs(theta1-theta2), 2*np.pi-np.abs(theta1-theta2))
def main(goal_cond):
import pickle
import os
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
cur_dir = os.path.dirname(os.path.realpath(__file__))
if goal_cond:
exp_file = 'screw_imgs'
else:
exp_file = 'goal_neg_images_180_unif'
# Load training and eval data
# goal_data = pickle.load(open(cur_dir + '/goal_images_180/goal_images_180.pkl', 'rb'))
# goal_neg_data = pickle.load(open(cur_dir + '/' + exp_file + '/' + exp_file + '.pkl', 'rb'))
# data = np.append(goal_data, goal_neg_data, axis=0)
# train_data, test_data = train_test_split(data, test_size=0.2, shuffle=True)
# num_train = train_data.shape[0]
# train_images, train_labels, train_pos = np.array(train_data[:,0].tolist()), np.array(train_data[:,1].tolist()), train_data[:,2]
# test_images, test_labels, test_pos = np.array(test_data[:,0].tolist()), np.array(test_data[:,1].tolist()), test_data[:,2]
data = pickle.load(open(cur_dir + '/screw_imgs/screw_imgs.pkl', 'rb'))
train_data, test_data = train_test_split(data, test_size=0.2, shuffle=True)\
num_train, num_test = train_data.shape[0], test_data.shape[0]
train_images, train_pos = np.array(train_data[:,0].tolist()), train_data[:,1]
test_images, test_pos = np.array(test_data[:,0].tolist()), test_data[:,1]
def batcher(batch_size=100, goal_cond=False):
i = 0
while True:
# If goal conditioned, sample goal, else goal=pi
if goal_cond:
batch_goals = np.random.uniform(0,2*np.pi, size=batch_size)
else:
batch_goals = np.full(batch_size, fill_value=np.pi)
# Wraparound logic
if i + batch_size >= num_train:
batch_pos = np.append(train_pos[i:], train_pos[:(i+batch_size) % num_train], axis=0)
if goal_cond:
rand_inds = np.random.choice(range(batch_size), size=batch_size // 2, replace=False)
batch_goals[rand_inds] = batch_pos[rand_inds]
batch_labels = (wrap_dist(batch_goals, batch_pos) < 0.15)*1
batch_images = np.append(train_images[i:], train_images[:(i+batch_size) % num_train], axis=0)
i = (i+batch_size) % num_train
# Normal get batch
else:
batch_pos = train_pos[i:i + batch_size]
# If goal_cond, ensure that half of the batch are successes
if goal_cond:
rand_inds = np.random.choice(range(batch_size), size=batch_size // 2, replace=False)
batch_goals[rand_inds] = batch_pos[rand_inds]
batch_labels = (wrap_dist(batch_goals, batch_pos) < 0.15)*1
batch_images = train_images[i:i + batch_size]
i += batch_size
yield batch_images, batch_labels, batch_goals.reshape((batch_size, 1))
# Create the CNN
cnn = CNN(goal_cond=goal_cond)
# Get batch
train_batch = batcher(batch_size=200, goal_cond=goal_cond)
# set up tf
sess = tf.InteractiveSession()
saver = tf.train.Saver()
tf.global_variables_initializer().run()
# save data
train_writer = tf.summary.FileWriter(cur_dir + '/' + exp_file + '/train_scope',
sess.graph)
test_writer = tf.summary.FileWriter(cur_dir + '/' + exp_file + '/test_scope')
pickle.dump(train_data, open(cur_dir + '/' + exp_file + '/train_scope/train_data.pkl', 'wb'))
pickle.dump(test_data, open(cur_dir + '/' + exp_file + '/test_scope/test_data.pkl', 'wb'))
# Training
for i in range(200000):
# Set up batch
image_batch, label_batch, goal_batch = train_batch.__next__()
if image_batch.shape[0] == 0:
import ipdb; ipdb.set_trace()
if goal_cond:
feed_dict = {cnn.images: image_batch, cnn.labels: label_batch, cnn.goals: goal_batch}
else:
feed_dict = {cnn.images: image_batch, cnn.labels: label_batch}
# Train step
pred_classes, pred_probs, avg_pred_prob, train_acc, loss, summary, _ = sess.run([cnn.pred_classes, cnn.pred_probs, cnn.avg_pred_prob, cnn.accuracy, cnn.loss, cnn.summary, cnn.train_op], feed_dict=feed_dict)
train_writer.add_summary(summary, i) # print to tensorboard
# Testing
if i % 1000 == 0:
# Test data
test_goals = np.full((num_test), fill_value=np.pi)
test_labels = (wrap_dist(test_goals, test_pos) < 0.15) * 1
if goal_cond:
test_feed_dict = {cnn.images: test_images, cnn.labels: test_labels, cnn.goals:np.expand_dims(test_goals, 1)}
else:
test_feed_dict = {cnn.images: test_images, cnn.labels: test_labels}
# Evaluation
test_pred_probs, test_avg_pred_prob, test_acc, test_summary = sess.run([cnn.pred_probs, cnn.avg_pred_prob, cnn.accuracy, cnn.summary], feed_dict=test_feed_dict)
print("Iter: %i, Train Loss: %f, Avg Pred Prob (Train): %f, Train Acc: %f, Test Acc: %f, Avg Pred Prob (Test): %f" %(i, loss, avg_pred_prob, train_acc, test_acc, test_avg_pred_prob))
test_writer.add_summary(test_summary, i)
# Results
plt.scatter(test_pos, test_pred_probs[:,1])
plt.xlabel('Angle of Valve (Goal=pi)')
plt.ylabel('Probability of Being a Goal Image')
plt.savefig(cur_dir + '/' + exp_file + '/test_scope/' + 'angle_x_prob_%i.png' %i)
plt.clf()
print('Graph saved as: ' + exp_file + '/test_scope/' + 'angle_x_prob_%i.png' %i)
# Save Model
saver.save(sess, cur_dir + '/' + exp_file + '/train_scope/params.ckpt')
print("Model saved in path: %s" % exp_file + '/train_scope/params.ckpt')
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("simple_example")
parser.add_argument('--goal_cond', action='store_true', default=False)
args = parser.parse_args()
goal_cond = args.goal_cond
main(goal_cond)
|
"""Tests for the more idiomatic python client.
These are light, because exceptions, arg parsing happen serverside.
"""
import datetime
import random
import string
import unittest
import seer
class TestClient(unittest.TestCase):
def setUp(self):
self.name = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10))
self.client = seer.Client("127.0.0.1:8080")
self.client.create_stream(self.name, 86400)
def tearDown(self):
self.client.delete_stream(self.name)
def test_create_stream(self):
name = self.name + "_new"
period = 3600
stream = self.client.create_stream(name, period)
self.assertEqual(stream.name, name)
self.assertEqual(stream.period, period)
def test_get_stream(self):
stream = self.client.get_stream(self.name)
self.assertEqual(stream.name, self.name)
self.assertEqual(stream.period, 86400)
def test_list_streams(self):
streams = self.client.list_streams(10, 1)
self.assertTrue(len(streams) <= 10)
names = [s.name for s in streams]
self.assertTrue(self.name in names)
def test_update_stream(self):
times = [
datetime.datetime(2016, 1, 1),
datetime.datetime(2016, 1, 2),
datetime.datetime(2016, 1, 3),
]
values = [10, 9, 8]
stream = self.client.update_stream(self.name, times, values)
self.assertEqual(stream.name, self.name)
self.assertEqual(stream.period, 86400)
self.assertEqual(stream.last_event_time.ToDatetime(), datetime.datetime(2016, 1, 3))
def test_get_forecast(self):
forecast = self.client.get_forecast(self.name, 100)
self.assertEqual(len(forecast.times), 100)
self.assertEqual(len(forecast.values), 100)
self.assertEqual(len(forecast.intervals[0].lower_bound), 100)
|
if 0:
import astropy.io.fits as pyfits, os
#catalog = '/u/ki/dapple/nfs12/cosmos/cosmos30.slr.matched.cat'
catalog = '/u/ki/dapple/nfs12/cosmos/cosmos30.slr.cat'
p = pyfits.open(catalog)['OBJECTS']
print p.columns
#print p.data.field('z_spec')[4000:5000]
filters = ['MEGAPRIME-0-1-u','SUBARU-10_2-1-W-J-B','SUBARU-10_2-1-W-J-V','SUBARU-10_2-1-W-S-G+','SUBARU-10_2-1-W-S-R+','SUBARU-10_2-1-W-S-I+','SUBARU-10_2-1-W-S-Z+']
col_names = 'ID MAG_APER-SUBARU-10_2-1-W-S-R+ ' + reduce(lambda x,y: x + ' ' + y,['MAG_APER-' + z + ' MAGERR_APER-' + z for z in filters])
col= ['ID','MAG_APER-SUBARU-10_2-1-W-S-R+'] + reduce(lambda x,y: x + y,[['MAG_APER-' + z , 'MAGERR_APER-' + z] for z in filters])
print col
f = ''
for i in range(len(p.data)):
line = reduce(lambda x,y: x + ' ' + y, [str(p.data.field(c)[i]) for c in col])
#print line
import string
#if string.find(line,'-99') == -1:
f += line + '\n'
o = open('COSMOS.input','w')
o.write(f)
o.close()
command = 'ldactoasc -b -i ' + catalog + ' -t OBJECTS -k ' + col_names + ' > COSMOS.input'
print command
#os.system(command)
columns = open('COSMOS.columns','w')
for name,num in [['ID','1'],['Z_S','2'],['M_0','3']]:
columns.write(name + ' ' + num + '\n')
for name,num in [['MEGAPRIME-0-1-u','4,5'],['SUBARU-10_2-1-W-J-B','6,7'],['SUBARU-10_2-1-W-J-V','8,9'],['SUBARU-10_2-1-W-S-G+','10,11'],['SUBARU-10_2-1-W-S-R+','12,13'],['SUBARU-10_2-1-W-S-I+','14,15'],['SUBARU-10_2-1-W-S-Z+','16,17']]:
columns.write(name + ' ' + num + ' AB 0.0 0.0\n')
columns.close()
if 1:
import os, cutout_bpz
command = 'python $BPZPATH/bpz.py COSMOS.input -OUTPUT COSMOS.bpz -COLUMNS COSMOS.columns -PROBS_LITE COSMOS.probs -MAG yes -PRIOR hdfn_SB -ZMAX 4.0 -MIN_RMS 0.05 -INTERP 8 -SPECTRA CWWSB_capak.list'
print command
os.system(command)
if 0:
import cutout_bpz, os
cutout_bpz.plot_res('COSMOS.bpz',os.environ['sne'] + '/photoz/COSMOS/','CWWSB_capak.list')
|
#!/usr/bin/env python
# encoding: utf-8
'''
@author: xu.peng
@file: config.py
@time: 2019/9/5 2:00 PM
@desc:
'''
data_path="/Users/wll/AI-MLTraining/mltraining/notebook/科学比赛/CCF-EAOIN/data"
train_data_text=data_path+"/Train/Train_DataSet.csv"
train_data_label=data_path+"/Train/Train_DataSet_Label.csv"
test_data_text=data_path+"/Test_DataSet.csv"
bert_data_path="/Users/wll/AI-MLTraining/bert/examples/ccf-eaoin/data"
bert_data_pred=bert_data_path+"/pred.data"
bert_data_test=bert_data_path+"/test.data"
bert_data_train=bert_data_path+"/train.data"
bert_data_val=bert_data_path+"/val.data" |
from esper.main import EsperTest
def test_esper():
# test esper without any subcommands or arguments
with EsperTest() as app:
app.run()
assert app.exit_code == 0
def test_esper_debug():
# test that debug mode is functional
argv = ['--debug']
with EsperTest(argv=argv) as app:
app.run()
assert app.debug is True
|
# -*- coding: utf-8 -*-
"""Unit test package for onlinecourses_ooo."""
|
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ProvisioningStatus(Model):
"""Represents the status of a provisioning operation.
:param total_steps: Gets the total number of steps in the provisioning
operation.
:type total_steps: int
:param current_step: Gets the current step in the provisioning operation.
:type current_step: int
:param current_operation: Possible values include: 'starting', 'creating',
'loadingData', 'completed'
:type current_operation: str or ~dynamics.customerinsights.api.models.enum
:param created_time_utc: Gets the instance created time.
:type created_time_utc: datetime
:param last_updated_time_utc: Gets the instance last updated time.
:type last_updated_time_utc: datetime
:param errors: Gets a list of provisioning errors.
:type errors:
list[~dynamics.customerinsights.api.models.InstanceProvisioningError]
"""
_attribute_map = {
'total_steps': {'key': 'totalSteps', 'type': 'int'},
'current_step': {'key': 'currentStep', 'type': 'int'},
'current_operation': {'key': 'currentOperation', 'type': 'str'},
'created_time_utc': {'key': 'createdTimeUtc', 'type': 'iso-8601'},
'last_updated_time_utc': {'key': 'lastUpdatedTimeUtc', 'type': 'iso-8601'},
'errors': {'key': 'errors', 'type': '[InstanceProvisioningError]'},
}
def __init__(self, **kwargs):
super(ProvisioningStatus, self).__init__(**kwargs)
self.total_steps = kwargs.get('total_steps', None)
self.current_step = kwargs.get('current_step', None)
self.current_operation = kwargs.get('current_operation', None)
self.created_time_utc = kwargs.get('created_time_utc', None)
self.last_updated_time_utc = kwargs.get('last_updated_time_utc', None)
self.errors = kwargs.get('errors', None)
|
Subsets and Splits