max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
test/test.py | elna4os/mlflow_flask_artifacts_logger | 1 | 12792551 | <reponame>elna4os/mlflow_flask_artifacts_logger
import unittest
import requests
from mlflow.exceptions import RestException
from mlflow.tracking import MlflowClient
class TestMLFlowArtifactsProxy(unittest.TestCase):
def testLogArtifact(self):
with open('artifacts/foo.txt', 'rb') as f1, open('artifacts/image.png', 'rb') as f2, open(
'artifacts/animation.gif', 'rb') as f3:
client = MlflowClient(tracking_uri="http://localhost:5000")
try:
experiment_id = client.create_experiment("foo")
except RestException as e:
experiment = client.get_experiment_by_name("foo")
experiment_id = experiment.experiment_id
run = client.create_run(experiment_id)
run_id = run.info.run_id
print(experiment_id + ":" + run_id)
files = {'file1': f1, 'file2': f2, 'file3': f3}
data = {'run_id': run_id}
r = requests.post('http://localhost:5001/log_artifact', files=files, data=data)
print(r.text)
if __name__ == '__main__':
unittest.main()
| 2.359375 | 2 |
conftest.py | Chilipp/autodocsumm | 45 | 12792552 | <gh_stars>10-100
import os.path as osp
import sys
import pytest
from sphinx.testing.path import path
pytest_plugins = 'sphinx.testing.fixtures'
sphinx_supp = osp.abspath(osp.join(osp.dirname(__file__), "tests"))
@pytest.fixture(scope='session')
def rootdir():
return path(sphinx_supp)
sys.path.insert(0, osp.join(sphinx_supp, "test-root"))
| 1.59375 | 2 |
LNU_OS/main.py | JessyTsu1/DL_Backup | 0 | 12792553 | from process import * #结构体
from out import * #界面窗口
# from config import originate, target
import time
import os
if __name__ == '__main__':
run()
| 1.210938 | 1 |
plugins/newbie.py | PalashTanejaPro/cvbot | 0 | 12792554 | <filename>plugins/newbie.py
import re
from errbot import BotPlugin, re_botcmd
class Newbie(BotPlugin):
"""
List the bot rules
"""
@re_botcmd(pattern=r'newbie',
re_cmd_name_help='newbiw',
flags=re.IGNORECASE,
template='newbie.jinja2')
def newbie(self, msg, args):
"""
Show the bot rules.
"""
return {'rules': True}
| 2.359375 | 2 |
wowspy/extras.py | MaT1g3R/Warships.py | 4 | 12792555 | from enum import Enum
from typing import List, Union
class Region(Enum):
NA = 'com'
EU = 'eu'
RU = 'ru'
AS = 'asia'
def lst_of_int(id_, name):
if id_ is None:
return None
if not isinstance(id_, int) and any(not isinstance(x, int) for x in id_):
raise ValueError('{} must be an int or a list of ints'.format(name))
return ','.join([str(i) for i in id_]) if isinstance(id_, list) else id_
l_int = Union[int, List[int]]
| 3.125 | 3 |
marbles.py | jepace/marbles | 0 | 12792556 | <gh_stars>0
# Marbles!
#
# An implementation of the classic marble board game that can be very
# frustrating!
#
from random import randint
#import tkinter
# GLOBALS
BOARDSIZE = 84 # Number of space around the main track
CENTER=98 # "Location" of the center of death.
BASE=99 # "Location" for base spots. All are 99.
HOME=100 # "Location" for home spots - 100, 101, 102, 103
HOMESIZE=4 # How big is your home?
Colors = [ "Blue", "Red", "Cyan", "Purple", "Green", "White" ]
Board = ["" for x in range(0,BOARDSIZE)]
CenterSpace = ""
MagicCircle = [ 7, 21, 35, 49, 63, 77 ] # Locations for the magic circle spaces
Base = {} # Dict of each color's base status
Home = {} # Dict of each color's home status
Marbles = {} # Dict of each color's marble locations
Players = [] # List of active players
# Marbles[color] : { location0, location1, location2, location3 }
# Start[color] : space#
Start = {
"Blue": 0,
"Red": 14,
"Cyan": 28,
"Purple": 42,
"Green": 56,
"White": 70
}
#
# Roll():
#
# Roll a die.
# Returns an int between 1 and 6
#
def Roll():
return randint(1,6)
#
# Display():
#
# Prints out the state of the board.
# XXX: This could be replaced with Tk or something else.
#
def Display():
# Color!
# ANSI color codes for the marbles
ccode={
# [magic];Attrib;FG;BGm
"Blue": "\033[1;97;44m",
"Red": "\033[1;97;41m",
"Cyan": "\033[1;97;46m",
"Purple": "\033[1;97;45m",
"Green": "\033[1;35;42m",
"White": "\033[1;31;47m",
}
# ANSI color codes for start
startColor={
"Blue": "\033[1;34;40m",
"Red": "\033[1;31;40m",
"Cyan": "\033[1;36;40m",
"Purple": "\033[1;35;40m",
"Green": "\033[1;32;40m",
"White": "\033[1;37;40m",
}
# Reset the color to default
creset="\033[m"
output = ["-" for x in range(0,BOARDSIZE)]
for i in range(0,BOARDSIZE):
space = Board[i]
if space == "":
# Use a * to indicate magic circle
if i in MagicCircle:
#output[i] = "*"
#print ("*", end="")
output[i] = chr(0x00A4) # cool circle thing
# Use a # to indicate start spaces
elif i in Start.values():
# What's this? I need to get the color given the
# value. So here's a bunch of casting black magic to
# do that.
thiscolor = list(Start.keys())[list(Start.values()).index(i)]
#output[i] = startColor[thiscolor]+"#"+creset
output[i] = startColor[thiscolor]+chr(0x00BB)+creset
output[i] = startColor[thiscolor]+chr(0x033F)+creset
#print (startColor[thiscolor]+"#"+creset, end="")
elif i % 10 == 0:
output[i] = str(i // 10)
else:
#output[i] = ("-")
output[i] = chr(0x00B7) # A nice dot
#print ("-", end="")
# Occupied space
else:
# If you're on the magic circle, you get an upper case
# letter
if i in MagicCircle:
output[i] = ccode[space]+space[0].upper()+creset
#print (ccode[space]+space[0].upper()+creset, end="")
else:
output[i] = ccode[space]+space[0].lower()+creset
#print (ccode[space]+space[0].lower()+creset, end="")
for i in range(0,BOARDSIZE):
if i >=0 and i < 21:
if i == 0: print ("\t", end="")
print(output[i], end="")
if i == 20:
print()
elif i >= 21 and i < 42:
if i == 31:
if CenterSpace:
cen = ccode[CenterSpace]+CenterSpace[0].upper()+creset
else:
#cen = "-"
#cen = chr(216)
cen = chr(0x00A7) # Hurricane
print("\t%s\t %s\t %s" %(output[104-i],cen,output[i]))
else:
print("\t"+output[104-i],"\t\t ",output[i])
elif i >= 42 and i < 63:
if i == 42: print ("\t", end="")
print (output[104-i], end="") # Print it backwards
print("\n")
for p in Players:
print ("%s\t" %p, end="")
print ("Base:\t", end="")
for b in Base[p]:
if b == "":
#print ("-", end="")
print (chr(0x00B7), end="")
else:
print (ccode[b]+b[0].lower()+creset, end="")
print ("\tHome:\t", end="")
for h in Home[p]:
if h == "":
#print ("-", end="")
print (chr(0x00B7), end="")
else:
print (ccode[h]+h[0].lower()+creset, end="")
print()
#
# Setup():
#
# Gets the board ready for a new game, and assigns player colors.
# Returns: Number of Players
#
def Setup():
# Initialize the bases and colors
for c in Colors:
Base[c] = [ c, c, c, c]
Home[c] = [ "", "", "", ""]
# Where are my marbles? All your base are belong to us.
Marbles[c] = [BASE, BASE, BASE, BASE ]
robotMode = 0
Setup = 0 # Has the game been setup?
while not Setup:
try:
Setup=1
NumPlayers = int(input("How many players? "))
if NumPlayers == 0:
print ("The only way to win is not to play.")
NumPlayers = -6
robotMode = 1
elif NumPlayers >= -6 and NumPlayers <= -2:
print ("Like tears in rain.")
robotMode = 1
elif NumPlayers < 2 or NumPlayers > 6:
print ("Please enter a number between 2 and 6.")
Setup=0
except KeyError:
print ("Please enter a number between 2 and 6.")
Setup = 0
except TypeError:
print ("Please enter a number between 2 and 6.")
Setup = 0
except ValueError:
print ("Please enter a number between 2 and 6.")
Setup = 0
print ("Preparing a %d player game." %NumPlayers)
if NumPlayers == 2 or NumPlayers == -2:
Players.append("Blue")
Players.append("Purple")
elif NumPlayers == 3 or NumPlayers == -3:
Players.append("Blue")
Players.append("Cyan")
Players.append("Green")
elif NumPlayers == 4 or NumPlayers == -4:
Players.append("Blue")
Players.append("Purple")
Players.append("White")
Players.append("Cyan")
elif NumPlayers == 5 or NumPlayers == -5:
Players.append("Blue")
Players.append("Purple")
Players.append("White")
Players.append("Cyan")
Players.append("Red")
else:
Players.append("Blue")
Players.append("Purple")
Players.append("White")
Players.append("Cyan")
Players.append("Red")
Players.append("Green")
return NumPlayers
#
# Bonk!
#
# send a guy back to base
#
def Bonk(space):
if space == CENTER:
deadGuy = CenterSpace
else:
deadGuy = Board[space]
Board[space] = ""
Marbles[deadGuy].append(BASE)
Marbles[deadGuy].remove(space)
Base[deadGuy].append(deadGuy)
#
# Move(color, source, destination):
#
# Move marble of color color from source to destination.
#
def Move(color, source, destination):
global CenterSpace
moveDesc = color + ": "
# Remove marble from source
if source == CENTER:
assert CenterSpace == color
CenterSpace = ""
moveDesc += "[Center] -> "
elif source == BASE:
# Remove the marble from the base
assert Base[color].count(color) > 0
Base[color].remove(color)
# The destination is that color's start
destination = Start[color]
moveDesc += "[Base] -> "
elif source >= HOME:
Home[color][source-HOME] = ""
moveDesc += "Home[" + str(source-HOME+1) + "] -> "
else:
assert Board[source] == color
Board[source] = ""
moveDesc += "" + str(source) + " -> "
# Deal with possible destinations
if destination == CENTER:
assert CenterSpace != color
moveDesc += "[Center] "
if CenterSpace:
print ("Bonk! %s hits %s!" %(color, CenterSpace))
moveDesc += "Bonk " + CenterSpace + "!"
Bonk(CENTER)
CenterSpace = color
elif destination >= HOME:
assert Home[color][destination-HOME] != color
Home[color][destination-HOME] = color
moveDesc += "Home[" + str(destination-HOME+1) + "]"
else: # Board destination is not the center or Home
assert Board[destination] != color
moveDesc += "" + str(destination) + " "
# Deal with bonking if destination is not empty
if Board[destination]:
moveDesc += "Bonk " + Board[destination] + "!"
print ("Bonk! %s hits %s!" %(color,Board[destination]))
Bonk(destination)
Board[destination] = color
Marbles[color].remove(source)
Marbles[color].append(destination)
return moveDesc
#
# ValidMove (marble, destination, die)
#
# Check if the move from marble to destination via die is valid
# Returns True / False
#
# This is pretty much a duplicate of GetMoves() but it serves as a
# check because I was having problems. :) I should probably remove
# most of thie duplicate logic from GetMoves and have it here only.
# But, you know, this is working.
#
def ValidMove(marble, destination, die, color):
# print ("[Entering] ValidMove(src=%d, dest=%d, die=%d, color=%s)" %(marble, destination, die, color))
assert die > 0 and die < 7
assert color
# Quick check to see if there's a teammate at the destination
if destination < BOARDSIZE:
if Board[destination] == color and marble != destination and die != 6:
return False
# If this marble is in Base, see if it can get out
if marble == BASE:
assert destination == Start[color]
if (die == 1 or die == 6) and (Board[Start[color]]!=color):
return True
return False
assert marble != BASE
# CENTER SPACE HANDLING
# If my roll can take me to one past the MagicCircle, then I
# can enter the Center. marble+die-1 is equal to MagicCircle+1
# Entering the Center space
if destination == CENTER:
assert marble+die-1 in MagicCircle
if CenterSpace == color:
return False
for i in range(1,die+1):
if Board[(marble+i)%BOARDSIZE] == color:
return False
return True
# Leaving the Center space
if marble == CENTER:
if die==1 and Board[destination] != color:
return True
else:
return False
assert marble != CENTER
assert destination != CENTER
# Special case of 6 in the magic circle ending where you start
if marble == destination and die == 6 and marble in MagicCircle:
return True
# MAGIC CIRCLE HANDLING
if marble in MagicCircle:
# magicStart is the index of where we are in the magic
# circle list, so we can bop around by adding die values
# to the index in that list
magicStart = MagicCircle.index(marble)
for i in range(0,die+1):
if destination-i in MagicCircle:
magicDestination = MagicCircle.index(destination-i)
# Check all the magic spaces between where I entered
# and where I exited
for j in range(magicStart, magicDestination+1):
if Board[MagicCircle[j]] == color:
if marble == destination and die == 6:
return False
return True
else:
# The destination is not in the magic circle, so walk
# back to the nearest magic circle space, checking
# that walk.
if Board[destination-i] == color:
return False
return True
assert marble not in MagicCircle
# MOVEMENT INTO HOME
myStart = Start[color]
if myStart == 0: # I have grown to hate Blue in this game
myStart = BOARDSIZE
if marble < myStart and marble+die >= myStart:
# Test the spaces between here and my final location for
# teammates
for i in range(1,die+1):
testloc = marble+i
if testloc >= myStart: # testloc is in the Home zone
testloc -= myStart # How many spaces into Home?
if testloc >= HOMESIZE: # Ran off the end of Home
return False
elif Home[color][testloc]: # somebody in the way
return False
else: # Still on the main board
if Board[testloc] == color: # Can't pass teammate
return False
# Checked all intermediate spaces, and destination space
homeloc = destination - HOME # homeloc is (potential) home space
# Move into Home
if homeloc >= 0 and homeloc < HOMESIZE:
return True
assert False
return False # Something insane happened?
# Movement WITHIN Home
if marble >= HOME:
assert marble < HOME+HOMESIZE
assert destination >= HOME
hm = Home[color] # hm means Home[color]
hp = marble-HOME # hp means Home Position
for i in range(1,die+1):
if(hp+i >= HOMESIZE):
return False
if hp+i > HOMESIZE or hm[hp+i] == color:
return False
return True
# "NORMAL" MOVEMENT
if marble not in MagicCircle and marble < BOARDSIZE and destination < BOARDSIZE:
for i in range(1,die):
if Board[(marble+i)%BOARDSIZE] == color:
return False
return True
# Catch all
assert False
return False
#
# SortMoves(myList)
#
# Used by .sorted to return lists in order
def SortMoves(sub_li):
sub_li.sort(key = lambda x: x[3])
#sub_li.sort(reverse=True,key = lambda x: x[0])
return sub_li
#
# GetMoves (color, die)
#
# Return a list of the valid player options with a die roll
#
def GetMoves(color,die):
assert die > 0 and die < 7
assert color in Colors
assert color in Players
# print ("[Entering] GetMoves(color=%s die=%d)" %(color,die))
# List that we'll be returning with ALL valid moves
response = []
# For each marble, figure out all possible moves
firstStart=1 # Only want to add Start once
for dude in Marbles[color]:
# print ("[] GetMoves(color=%s die=%d) - Check %d" %(color,die,dude))
note ="" # Just in case, clear out any previous note
# If this marble is in Base, see if it can get out
if dude == BASE:
if (die == 1 or die == 6) and (Board[Start[color]]!=color) and (1==firstStart):
note = "[Start"
if Board[Start[color]]:
note += " & Bonk " + Board[Start[color]]
note += "]"
if not ValidMove(dude, Start[color], die, color):
assert False
response.append([dude, Start[color], note, BOARDSIZE])
firstStart=0
continue
else:
continue
#
# Handle "regular" motion starting here:
#
# CENTER SPACE HANDLING
# If my roll can take me to one past the MagicCircle, then I
# can enter the Center. dude+die-1 is equal to MagicCircle+1
if dude+die-1 in MagicCircle and CenterSpace != color:
yep=1
for i in range(1,die+1):
if Board[dude+i] == color:
yep=0
if yep:
note = "[Center"
if CenterSpace:
note += " & Bonk " + CenterSpace
note += "]"
if not ValidMove(dude, CENTER, die, color):
assert False
distance = BOARDSIZE - 8
response.append([dude, CENTER, note, distance])
# If I'm in the center and I got a one, I can roll out to any
# magic circle space
if dude == CENTER:
if die==1:
for i in MagicCircle:
if Board[i] != color:
note = "[Magic Circle"
if Board[i]:
note += " & Bonk " + Board[i]
note += "]"
if not ValidMove(dude, i, die, color):
assert False
distance = BOARDSIZE - (i - Start[color]) % BOARDSIZE
response.append([dude, i, note, distance])
continue
assert dude != CENTER
# MAGIC CIRCLE HANDLING
# If I'm in the magic circle, I can continue normal track, or
# hop one magic circle space and then continue the normal
# track, or hope 2 magic circle space and then continue the
# normal track, or ...
if dude in MagicCircle:
# circleNum is the index of where we are in the magic
# circle list, so we can bop around by adding die values
# to the index in that list
circleNum = MagicCircle.index(dude)
# Lots of permutations for magic circle...
for i in range(0, die+1):
circleExit = MagicCircle[(circleNum+i)%len(MagicCircle)]
finalspot = (circleExit + (die-i))%BOARDSIZE
# Now verify that I didn't pass a teammate between dude
# and out
badMove=0
circleBlock=0
# Check magic circle spots I traversed
for mc in range(1,i+1):
if Board[MagicCircle[(circleNum+mc)%len(MagicCircle)]] == color:
# Passed through teammate
# 6 in magic circle means I can land on myself
if mc == 6:
pass
else:
badMove = 1
# Check regular spots after I left circle
for t in range(0,die-i+1): # t is number of hops out of circle
MoveToCheck = (circleExit + t)%BOARDSIZE
if Board[MoveToCheck] == color:
# Handle case where I roll a 6 and want to do
# a full revolution
if dude != MoveToCheck:
# If it is not me, then it is someone else
badMove = 1
if t==0:
# The magic circle is poisoned from
# here on out..
circleBlock = 1
continue
if circleBlock:
continue
if not badMove:
# Add this to the list
# Special processing: If the roll is a 6 in magic
# circle, that isn't bonking because it is me.
special=0
if dude == finalspot: # End where I started
special=1
note = ""
if (finalspot in MagicCircle) or (Board[finalspot]):
note += "["
if finalspot in MagicCircle:
note += "Magic Circle"
if finalspot in MagicCircle and Board[finalspot] and not special:
note += " & "
if Board[finalspot] and not special:
note += "Bonk " + Board[finalspot]
if finalspot in MagicCircle or Board[finalspot]:
note += "]"
if not ValidMove(dude, finalspot, die, color):
assert False
distance = BOARDSIZE - (finalspot - Start[color]) % BOARDSIZE
response.append([dude, finalspot, note, distance])
# MOVEMENT INTO HOME
# NB: Add special cases for Blue, with start space of 0,
# because of modulo problems.
elif (dude < Start[color] and (dude+die)%BOARDSIZE >= Start[color]) or \
(Start[color] == 0 and dude < Start[color]+BOARDSIZE and dude+die >= Start[color]+BOARDSIZE):
badMove = 0
myStart = Start[color]
if myStart == 0: # HACK for Blue with start of 0
myStart = BOARDSIZE
for i in range(1,die+1):
testloc = dude+i
if not badMove and testloc >= myStart: # testloc is in the Home zone
testloc -= myStart # How many spaces into Home?
if testloc >= HOMESIZE: # Ran off the end of Home
badMove = 1
elif Home[color][testloc]: # somebody in the way
badMove = 1
else: # Still on the main board
if Board[testloc%BOARDSIZE] == color: # Can't pass teammate
badMove = 1
# End of for i in range(1,die)
if not badMove: # Valid moves only
loc = dude+die # loc is destination space
homeloc = loc - myStart # homeloc is home space
# Move into Home
if homeloc >= 0 and homeloc < HOMESIZE:
if not ValidMove(dude, HOME+homeloc, die, color):
assert False
response.append([dude, HOME+homeloc, "[Home]", 0])
# Still on the Board
elif loc < myStart:
if Board[loc]:
note = "[Bonk " + Board[loc] + "]"
if not ValidMove(dude, loc, die, color):
assert False
distance = BOARDSIZE - (loc - Start[color]) % BOARDSIZE
response.append([dude, loc, note, distance])
# Movement WITHIN Home
elif dude >= HOME:
hm = Home[color] # hm means Home[color]
hp = dude-HOME # hp means Home Position
valid=1
for i in range(1,die+1):
if(hp+i >= HOMESIZE):
valid=0
continue
if hp+i > HOMESIZE or hm[hp+i] == color:
valid=0
continue
if valid:
if not ValidMove(dude, dude+die, die, color):
assert False
response.append([dude, dude+die, "[Home]", 0])
# "NORMAL" MOVEMENT
elif Board[(dude+die)%BOARDSIZE] != color:
selfPass = 0
for i in range(1,die):
if Board[(dude+i)%BOARDSIZE] == color:
selfPass = 1
continue
if not selfPass:
note = ""
if (dude+die)%BOARDSIZE in MagicCircle or Board[(dude+die)%BOARDSIZE]:
note += "["
if (dude+die)%BOARDSIZE in MagicCircle:
note += "Magic Circle"
if (dude+die)%BOARDSIZE in MagicCircle and Board[(dude+die)%BOARDSIZE]:
note += " & "
if Board[(dude+die)%BOARDSIZE]:
note += "Bonk " + Board[(dude+die)%BOARDSIZE]
if (dude+die)%BOARDSIZE in MagicCircle or Board[(dude+die)%BOARDSIZE]:
note += "]"
if not ValidMove(dude, (dude+die)%BOARDSIZE, die, color):
assert False
distance = BOARDSIZE - ((dude+die)%BOARDSIZE - Start[color]) % BOARDSIZE
response.append([dude, (dude+die)%BOARDSIZE, note, distance])
# Done!
# print ("[Leaving] GetMoves(color=%s die=%d) =" %(color,die),response)
return SortMoves(response)
#
# IsWinner(color)
#
# Determine if color has won. Returns True/False
#
def IsWinner(color):
win=1
for i in range(0, HOMESIZE):
if Home[color][i] != color:
win=0
break
return bool(win)
def TkSetup():
root = tkinter.Tk()
root.title("Marbles!")
canvas = tk.Canvas(root, width=200, height=200, borderwidth=0,
bg="black")
canvas.grid()
canvas.create_oval(100,100,200,200,fill="blue",outline="#DDD",width=4)
root.mainloop()
#
# Main
#
def Main():
GameOver = 0 # Is the game over
turnNum = 0
robotMode = 0 # A human is needed
numPlayers = Setup()
if numPlayers <= 0:
robotMode = 1
numPlayers *= -1
# TkSetup()
Display() # Show the initial game board
while not GameOver: # Main game loop
turnNum += 1
for p in range(0,numPlayers):
again=1 # Flag for when a player rolls a 6
while again:
again=0
pColor = Players[p]
myRoll = Roll()
print ("\n%s rolled: %d\n" %(pColor, myRoll))
moves = GetMoves(pColor, myRoll)
if not moves:
print ("No moves available.")
continue
GotInput = 0
selection = 0
# Red always goes for the kill
# White tried to be optimal, but sucked so now takes 1
# Cyan takes option 1
# Purple kills
# Green picks randomly from choices
# Blue is the player .. or she chooses 1
# Deckard is a replicant!
if robotMode and pColor == "Blue":
selection = 1
GotInput = 1
if pColor == "Red" or pColor == "Purple": # Blood shall flow
GotInput = 1
for i in range(0,len(moves)):
if "Bonk" in moves[i][2]:
selection = i+1
print ("Kill!", moves[i])
break
if not selection:
selection = 1
elif pColor == "Cyan" or pColor == "Purple" or pColor == "White":
# Always take the first option
selection = 1
GotInput = 1
elif pColor == "Green":
# Take a random option
selection = randint(1,len(moves))
GotInput = 1
while not GotInput:
option=1 # Counter for the user input menu
for move in moves:
strt, finish, note, distance = move
if finish >= HOME:
if strt >= HOME:
print("\t[%d] Home[%d] -> Home[%d] %s" \
%(option, strt-HOME+1, finish-HOME+1, note))
else:
print("\t[%d] %d -> Home[%d] %s" %(option, strt, finish-HOME+1, note))
elif strt == CENTER:
print("\t[%d] Center -> %d %s" %(option,finish,note))
elif strt == BASE:
print ("\t[%d] Base -> Start %s" %(option,note))
else:
if finish == CENTER:
print ("\t[%d] %d -> Center %s" %(option,strt,note))
elif finish in MagicCircle:
print ("\t[%d] %d -> %d %s" %(option, strt, finish,note))
else:
print ("\t[%d] %d -> %d %s" %(option, strt, finish, note))
option+=1
try:
selection = int(input(pColor + ": Please select an option: "))
GotInput = 1
if selection < 1 or selection > len(moves):
print ("That's not an option. Try again.")
GotInput = 0
except ValueError:
if len(moves) == 1:
selection = 1
GotInput = 1
else:
print ("Bad input")
GotInput = 0
except TypeError:
print ("Bad input")
GotInput = 0
src,dst,note,distance = moves[selection-1]
if not ValidMove(src,dst,myRoll,pColor):
print ("ERROR: ValidMove(%d, %d, %d, %s)" %(src,dst,myRoll,pColor))
return False
response = Move(pColor, src, dst)
Display()
print (response)
if myRoll == 6:
print("%s rolled a 6! Take another turn." %pColor)
again=1
if IsWinner(pColor):
print ("%s wins in %d turns!" %(pColor, turnNum))
GameOver = 1
return # We're out of here!
Main()
| 4.03125 | 4 |
The Grid Manager - Simple Calculator.py | DirkTayab/OOP-1-1 | 0 | 12792557 | from tkinter import *
window = Tk()
window.title("Simple Calculator")
window.geometry("400x300+20+10")
window.grid_columnconfigure(0, weight=1)
class MyWindow:
def __init__(self, window):
self.lbl1 = Label(window, text = "Simple Calculator")
self.lbl1.grid(row = 0, columnspan = 5, pady = (10, 20))
self.lbl2 = Label(window, text = "Enter 1st Number: ")
self.lbl2.grid(row = 2, column = 2)
self.lbl3 = Label(window, text = "Enter 2nd Number: ")
self.lbl3.grid(row = 3, column = 2)
self.txt = Entry(window, bd = 3)
self.txt.grid(row = 2, column = 3)
self.txt2 = Entry(window, bd = 3)
self.txt2.grid(row = 3, column = 3)
def add():
self.txt3.configure(state="normal")
num1 = int(self.txt.get())
num2 = int(self.txt2.get())
ans = num1 + num2
self.txt3.delete(0, END)
self.txt3.insert(END, str(ans))
self.txt3.configure(state="disabled")
return
def sub():
self.txt3.configure(state="normal")
num1 = int(self.txt.get())
num2 = int(self.txt2.get())
ans = num1 - num2
self.txt3.delete(0, END)
self.txt3.insert(END, str(ans))
self.txt3.configure(state="disabled")
return
def multiply():
self.txt3.configure(state="normal")
num1 = int(self.txt.get())
num2 = int(self.txt2.get())
ans = num1*num2
self.txt3.delete(0, END)
self.txt3.insert(END, str(ans))
self.txt3.configure(state="disabled")
return
def divide():
self.txt3.configure(state = "normal")
num1 = int(self.txt.get())
num2 = int(self.txt2.get())
ans = num1/num2
self.txt3.delete(0, END)
self.txt3.insert(END, str(ans))
self.txt3.configure(state = "disabled")
return
def btnclr():
self.txt3.configure(state = "normal")
self.txt3.delete(0, END)
self.txt3.configure(state = "disabled")
self.txt.delete(0, END)
self.txt2.delete(0, END)
return
self.btn = Button(window, text = "Addition", command = add)
self.btn.grid(row = 4, column = 1, pady = 20)
self.btn2 = Button(window, text = "Subtraction", command = sub)
self.btn2.grid(row = 4, column = 2, padx = (30, 15))
self.btn3 = Button(window, text = "Multiplication", command = multiply)
self.btn3.grid(row = 4, column = 3)
self.btn4 = Button(window, text = "Division", command = divide)
self.btn4.grid(row = 4, column = 4, padx = 15)
self.lbl4 = Label(window, text = "Result: ")
self.lbl4.grid(row = 5, column = 2)
self.txt3 = Entry(window, state = "readonly")
self.txt3.grid(row = 5, column = 3)
self.btn_clr = Button(window, text="Clear", command= btnclr)
self.btn_clr.grid(row=13, column = 3, pady = 5)
mywin=MyWindow(window)
window.mainloop() | 3.71875 | 4 |
setup.py | Anuj-singla/TOPSIS_PYTHON_PACKAGE | 2 | 12792558 | from setuptools import setup
def readme():
with open('README.md') as f:
README = f.read()
return README
setup(
name = 'TOPSIS_ANUJ_101803638', # How you named your package folder (MyLib)
packages = ['TOPSIS_ANUJ_101803638'], # Chose the same as "name"
version = '0.5', # Start with a small number and increase it with every change you make
license='MIT', # Chose a license from here: https://help.github.com/articles/licensing-a-repository
description = 'THIS PACKAGE IS TO IMPLEMENT TOPSIS', # Give a short description about your library
long_description=readme(),
long_description_content_type="text/markdown",
author = '<NAME>', # Type in your name
author_email = '<EMAIL>', # Type in your E-Mail
install_requires=[ # I get to this in a second
'pandas',
'numpy',
],
classifiers=[
'Development Status :: 3 - Alpha', # Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package
'Intended Audience :: Developers', # Define that your audience are developers
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License', # Again, pick a license
'Programming Language :: Python :: 3', #Specify which pyhton versions that you want to support
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
) | 1.859375 | 2 |
test/const.py | mikeshultz/py4byte | 0 | 12792559 | <reponame>mikeshultz/py4byte<filename>test/const.py
# flake8: noqa
SWAP_TRANSACTION = {
'blockHash': '0x3f43cba31e73252412c88d42f80b64e3c81ed99a1ef3743d14c891ed0ef54ab3',
'blockNumber': 11325697,
'from': '0x9283099A29556fCF8fFF5b2Cea2D4F67CB7A7A8b',
'gas': 203049,
'gasPrice': 69000000000,
'hash': '0x32e97818ae1732c0cdc387452e24bd3700a610070b2b59de04d69f82c3f73364',
'input': '0x38ed173900000000000000000000000000000000000000000000065a4da25d3016c00000000000000000000000000000000000000000000000000101e38e99b52f2ce48c00000000000000000000000000000000000000000000000000000000000000a00000000000000000000000009283099a29556fcf8fff5b2cea2d4f67cb7a7a8b000000000000000000000000000000000000000000000000000000005fbdf16400000000000000000000000000000000000000000000000000000000000000030000000000000000000000008207c1ffc5b6804f6024322ccf34f29c3541ae26000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc20000000000000000000000006b175474e89094c44da98b954eedeac495271d0f',
'nonce': 85,
'r': '0x8a4e290dd3ab0440186b84e9f8b253e9e6b04389a3f3214d9da59d7c0533ca85',
's': '0x2ab840455ab71a77cd426fab7f2cd9307e3bd0a7699a73d49e807d68ca3d0e18',
'to': '0x7a250d5630B4cF539739dF2C5dAcb4c659F2488D',
'transactionIndex': 8,
'v': 37,
'value': 0
}
SWAP_RECEIPT = {
'blockHash': '0x3f43cba31e73252412c88d42f80b64e3c81ed99a1ef3743d14c891ed0ef54ab3',
'blockNumber': 11325697,
'contractAddress': None,
'cumulativeGasUsed': 872286,
'from': '0x9283099A29556fCF8fFF5b2Cea2D4F67CB7A7A8b',
'gasUsed': 174314,
'logs': [
{
'address': '0x8207c1FfC5B6804F6024322CcF34F29c3541Ae26',
'blockHash': '0x3f43cba31e73252412c88d42f80b64e3c81ed99a1ef3743d14c891ed0ef54ab3',
'blockNumber': 11325697,
'data': '0x00000000000000000000000000000000000000000000065a4da25d3016c00000',
'logIndex': 18,
'removed': False,
'topics': [
'0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef',
'0x0000000000000000000000009283099a29556fcf8fff5b2cea2d4f67cb7a7a8b',
'0x000000000000000000000000ce2cc0513634cef3a7c9c257e294ef5e3092f185'
],
'transactionHash': '0x32e97818ae1732c0cdc387452e24bd3700a610070b2b59de04d69f82c3f73364',
'transactionIndex': 8
},
{
'address': '0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2',
'blockHash': '0x3f43cba31e73252412c88d42f80b64e3c81ed99a1ef3743d14c891ed0ef54ab3',
'blockNumber': 11325697,
'data': '0x0000000000000000000000000000000000000000000000007291d30a40c19a3b',
'logIndex': 19,
'removed': False,
'topics': [
'0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef',
'0x000000000000000000000000ce2cc0513634cef3a7c9c257e294ef5e3092f185',
'0x000000000000000000000000a478c2975ab1ea89e8196811f51a7b7ade33eb11'
],
'transactionHash': '0x32e97818ae1732c0cdc387452e24bd3700a610070b2b59de04d69f82c3f73364',
'transactionIndex': 8
},
{
'address': '0xce2Cc0513634CEf3a7C9C257E294EF5E3092f185',
'blockHash': '0x3f43cba31e73252412c88d42f80b64e3c81ed99a1ef3743d14c891ed0ef54ab3',
'blockNumber': 11325697,
'data': '0x00000000000000000000000000000000000000000001c8b8cbb210149443c7c300000000000000000000000000000000000000000000001fd2b17e29f3c7fe13',
'logIndex': 20,
'removed': False,
'topics': [
'0x1c411e9a96e071241c2f21f7726b17ae89e3cab4c78be50e062b03a9fffbbad1'
],
'transactionHash': '0x32e97818ae1732c0cdc387452e24bd3700a610070b2b59de04d69f82c3f73364',
'transactionIndex': 8
},
{
'address': '0xce2Cc0513634CEf3a7C9C257E294EF5E3092f185',
'blockHash': '0x3f43cba31e73252412c88d42f80b64e3c81ed99a1ef3743d14c891ed0ef54ab3',
'blockNumber': 11325697,
'data': '0x00000000000000000000000000000000000000000000065a4da25d3016c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007291d30a40c19a3b',
'logIndex': 21,
'removed': False,
'topics': [
'0xd78ad95fa46c994b6551d0da85fc275fe613ce37657fb8d5e3d130840159d822',
'0x0000000000000000000000007a250d5630b4cf539739df2c5dacb4c659f2488d',
'0x000000000000000000000000a478c2975ab1ea89e8196811f51a7b7ade33eb11'
],
'transactionHash': '0x32e97818ae1732c0cdc387452e24bd3700a610070b2b59de04d69f82c3f73364',
'transactionIndex': 8
},
{
'address': '0x6B175474E89094C44Da98b954EedeAC495271d0F',
'blockHash': '0x3f43cba31e73252412c88d42f80b64e3c81ed99a1ef3743d14c891ed0ef54ab3',
'blockNumber': 11325697,
'data': '0x0000000000000000000000000000000000000000000001032c7405f5694857e5',
'logIndex': 22,
'removed': False,
'topics': [
'0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef',
'0x000000000000000000000000a478c2975ab1ea89e8196811f51a7b7ade33eb11',
'0x0000000000000000000000009283099a29556fcf8fff5b2cea2d4f67cb7a7a8b'
],
'transactionHash': '0x32e97818ae1732c0cdc387452e24bd3700a610070b2b59de04d69f82c3f73364',
'transactionIndex': 8
},
{
'address': '0xA478c2975Ab1Ea89e8196811F51A7B7Ade33eB11',
'blockHash': '0x3f43cba31e73252412c88d42f80b64e3c81ed99a1ef3743d14c891ed0ef54ab3',
'blockNumber': 11325697,
'data': '0x00000000000000000000000000000000000000000034faa46be5604324421cca000000000000000000000000000000000000000000001759ed6d2f07716bdd40',
'logIndex': 23,
'removed': False,
'topics': ['0x1c411e9a96e071241c2f21f7726b17ae89e3cab4c78be50e062b03a9fffbbad1'],
'transactionHash': '0x32e97818ae1732c0cdc387452e24bd3700a610070b2b59de04d69f82c3f73364',
'transactionIndex': 8
},
{
'address': '0xA478c2975Ab1Ea89e8196811F51A7B7Ade33eB11',
'blockHash': '0x3f43cba31e73252412c88d42f80b64e3c81ed99a1ef3743d14c891ed0ef54ab3',
'blockNumber': 11325697,
'data': '0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007291d30a40c19a3b0000000000000000000000000000000000000000000001032c7405f5694857e50000000000000000000000000000000000000000000000000000000000000000',
'logIndex': 24,
'removed': False,
'topics': [
'0xd78ad95fa46c994b6551d0da85fc275fe613ce37657fb8d5e3d130840159d822',
'0x0000000000000000000000007a250d5630b4cf539739df2c5dacb4c659f2488d',
'0x0000000000000000000000009283099a29556fcf8fff5b2cea2d4f67cb7a7a8b'
],
'transactionHash': '0x32e97818ae1732c0cdc387452e24bd3700a610070b2b59de04d69f82c3f73364',
'transactionIndex': 8
}
],
'logsBloom': '0x00200000400000000000000080010000000000000000000000010000000000001000000000000000000000000000000012000000080000001000000000000000000000000200000000080008000000200000000000001000000000000000000011000000000080000000000000000000000000000000000000000010000000000080000000000000004000000000000000000000000000080000004000000000000000000000000040200000100000000000000000000100000200000000000000000002000000000000080000400000000002000000001000000000000020000000200000000000000000000000000000000000000000000000000000000000',
'status': 1,
'to': '0x7a250d5630B4cF539739dF2C5dAcb4c659F2488D',
'transactionHash': '0x32e97818ae1732c0cdc387452e24bd3700a610070b2b59de04d69f82c3f73364',
'transactionIndex': 8
}
| 1.796875 | 2 |
acq4/util/prioritylock.py | campagnola/acq4 | 47 | 12792560 | <reponame>campagnola/acq4
from __future__ import print_function, division
import weakref
from threading import Lock, Thread, Event
from six.moves import queue
from .future import Future
class PriorityLock(object):
"""Mutex with asynchronous locking and priority queueing.
The purpose of this class is to provide a mutex that:
- Uses futures for acquiring locks asynchronously
- Allows locks to be acquired in priority order
Examples::
lock = PriorityLock()
# manual lock / wait / release
req = lock.acquire()
req.wait() # wait for lock to be acquired
# .. do stuff while lock is acquired
req.release()
# context manager
with lock.acquire() as req:
req.wait()
# .. do stuff while lock is acquired
"""
def __init__(self, name=None):
self.name = name
self.req_count = Counter()
self.lock_queue = queue.PriorityQueue()
self.unlock_event = Event()
self.unlock_event.set()
self.lock_thread = Thread(target=self._lock_loop)
self.lock_thread.daemon = True
self.lock_thread.start()
def acquire(self, priority=0, name=None):
"""Return a Future that completes when the lock is acquired.
Higher priority values will be locked first.
"""
fut = PriorityLockRequest(self, name=name)
# print("request lock:", fut)
self.lock_queue.put((-priority, next(self.req_count), fut))
return fut
def _release_lock(self, fut):
with fut._acq_lock:
# print("release request:", fut)
if fut.released:
return
fut._released = True
if fut.acquired:
# print("release lock:", fut)
fut._acquired = False
self.unlock_event.set()
else:
fut._taskDone(interrupted=True)
def _lock_loop(self):
while True:
# wait for lock to become available
self.unlock_event.wait()
# get next lock request
while True:
_, _, fut = self.lock_queue.get()
with fut._acq_lock:
if fut._released:
# future has already been released; don't assign lock
continue
# assign lock to this request
# print("assign lock:", fut)
fut._acquired = True
fut._taskDone()
self.unlock_event.clear()
break
def __repr__(self):
return "<%s %s 0x%x>" % (self.__class__.__name__, self.name, id(self))
class PriorityLockRequest(Future):
def __init__(self, mutex, name):
Future.__init__(self)
self.mutex = weakref.ref(mutex)
self.name = name
self._acq_lock = Lock()
self._wait_event = Event()
self._acquired = False
self._released = False
@property
def acquired(self):
"""If True, then this request currently has the lock acquired and prevents other requests
from acquiring the lock.
"""
return self._acquired
@property
def released(self):
"""If True, then this request has released its lock (if any) and can never acquire the lock again.
"""
return self._released
def _wait(self, timeout):
self._wait_event.wait(timeout=timeout)
def percentDone(self):
return 100 if (self.acquired or self.released) else 0
def release(self):
"""Release this lock request.
If the lock is currently acquired, then it is released and another queued request may
acquire the lock in turn. If the lock is not already acquired, then this request is simply
cancelled and will never acquire a lock.
"""
mutex = self.mutex()
if mutex is None:
return
mutex._release_lock(self)
def _taskDone(self, *args, **kwds):
self._wait_event.set()
Future._taskDone(self, *args, **kwds)
def __enter__(self):
return self
def __exit__(self, *args):
self.release()
def __repr__(self):
return "<%s %s 0x%x>" % (self.__class__.__name__, self.name, id(self))
class Counter(object):
"""Just a thread-safe counter, returns the next integer every time next() is called.
"""
def __init__(self):
self.value = 0
self.lock = Lock()
def __iter__(self):
return self
def __next__(self): # for py3
with self.lock:
self.value += 1
return self.value - 1
def next(self): # for py2
return self.__next__()
| 3.203125 | 3 |
bin/print_busco_config.py | ewels/nf-core-neutronstar | 4 | 12792561 | #!/usr/bin/env python
from __future__ import print_function
import os
print(
"""[busco]
out_path = {0}
tmp_path = {0}/tmp
[tblastn]
# path to tblastn
path = /usr/bin/
[makeblastdb]
# path to makeblastdb
path = /usr/bin/
[augustus]
# path to augustus
path = /opt/augustus/bin/
[etraining]
# path to augustus etraining
path = /opt/augustus/bin/
# path to augustus perl scripts, redeclare it for each new script
[gff2gbSmallDNA.pl]
path = /usr/bin/
[new_species.pl]
path = /usr/bin/
[optimize_augustus.pl]
path = /usr/bin/
[hmmsearch]
# path to HMMsearch executable
path = /usr/local/bin/
[Rscript]
# path to Rscript, if you wish to use the plot tool
path = /usr/bin/""".format(os.environ['PWD'])
)
| 1.695313 | 2 |
logs/models.py | Ishayahu/MJCC-tasks | 1 | 12792562 | # -*- coding:utf-8 -*-
# coding=<utf8>
from django.db import models
# Модели для логирования действий пользователей с активами
class Logging(models.Model):
user = models.CharField(max_length=140)
request = models.TextField(blank = True, null = True)
goal = models.TextField(blank = True, null = True)
done = models.BooleanField(default=False)
datetime = models.DateTimeField()
def __unicode__(self):
return str(self.id)+';'.join((str(self.datetime),self.user,self.goal,str(self.done))) | 2 | 2 |
directory_components/forms/forms.py | MichaelWalker/directory-components | 2 | 12792563 | from directory_constants.choices import COUNTRY_CHOICES
from django import forms
from django.conf import settings
from django.forms import Select
from django.template.loader import render_to_string
from django.utils import translation
from directory_components.forms import fields
from directory_components import helpers
__all__ = [
'CountryForm',
'DirectoryComponentsFormMixin',
'Form',
'get_country_form_initial_data',
'get_language_form_initial_data',
'LanguageForm',
]
BLANK_COUNTRY_CHOICE = [("", "Select a country")]
COUNTRIES = BLANK_COUNTRY_CHOICE + COUNTRY_CHOICES
class DirectoryComponentsFormMixin:
use_required_attribute = False
error_css_class = 'form-group-error'
def __str__(self):
return render_to_string('directory_components/form_widgets/form.html', {'form': self})
class Form(DirectoryComponentsFormMixin, forms.Form):
pass
class CountryForm(Form):
country = fields.ChoiceField(
label='Country',
widget=Select(attrs={'id': 'great-header-country-select'}),
choices=COUNTRIES
)
def get_country_form_initial_data(request):
return {
'country': helpers.get_user_country(request).upper() or None
}
class LanguageForm(forms.Form):
lang = fields.ChoiceField(
widget=Select(attrs={'id': 'great-header-language-select'}),
choices=[] # set by __init__
)
def __init__(self, language_choices=settings.LANGUAGES, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['lang'].choices = language_choices
def is_language_available(self, language_code):
language_codes = [code for code, _ in self.fields['lang'].choices]
return language_code in language_codes
def get_language_form_initial_data():
return {
'lang': translation.get_language()
}
| 2.171875 | 2 |
Tools/ecl_ekf/batch_process_logdata_ekf.py | WeRobotics/Firmware | 6 | 12792564 | <filename>Tools/ecl_ekf/batch_process_logdata_ekf.py<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import os, glob
"""
Runs process_logdata_ekf.py on the .ulg files in the supplied directory. ulog files are skipped from the analysis, if a
corresponding .pdf file already exists (unless the overwrite flag was set).
"""
parser = argparse.ArgumentParser(description='Analyse the estimator_status and ekf2_innovation message data for the'
' .ulg files in the specified directory')
parser.add_argument("directory_path")
parser.add_argument('-o', '--overwrite', action='store_true',
help='Whether to overwrite an already analysed file. If a file with .pdf extension exists for a .ulg'
'file, the log file will be skipped from analysis unless this flag has been set.')
def is_valid_directory(parser, arg):
if os.path.isdir(arg):
# Directory exists so return the directory
return arg
else:
parser.error('The directory {} does not exist'.format(arg))
args = parser.parse_args()
ulog_directory = args.directory_path
print("\n"+"analysing the .ulg files in "+ulog_directory)
# get all the ulog files found in the specified directory
ulog_files = glob.glob(os.path.join(ulog_directory, '*.ulg'))
# remove the files already analysed unless the overwrite flag was specified. A ulog file is consired to be analysed if
# a corresponding .pdf file exists.'
if not args.overwrite:
print("skipping already analysed ulg files.")
ulog_files = [ulog_file for ulog_file in ulog_files if not os.path.exists('{}.pdf'.format(ulog_file))]
# analyse all ulog files
for ulog_file in ulog_files:
print("\n"+"loading "+ulog_file +" for analysis")
os.system("python process_logdata_ekf.py '{}'".format(ulog_file))
| 2.734375 | 3 |
bm/apis/v1/APIsServices.py | kapousa/BrontoMind2 | 0 | 12792565 | import json
import numpy
from bm.controllers.prediction.ModelController import predict_values_from_model
from bm.db_helper.AttributesHelper import get_features, get_model_name, get_labels
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (numpy.int_, numpy.intc, numpy.intp, numpy.int8,
numpy.int16, numpy.int32, numpy.int64, numpy.uint8,
numpy.uint16, numpy.uint32, numpy.uint64)):
return int(obj)
elif isinstance(obj, (numpy.float_, numpy.float16, numpy.float32,
numpy.float64)):
return float(obj)
elif isinstance(obj, (numpy.ndarray,)): # add this line
return obj.tolist() # add this line
return json.JSONEncoder.default(self, obj)
def predictvalues(content):
model_name = get_model_name()
features_list = get_features()
lables_list = get_labels()
testing_values = []
for i in features_list:
feature_value = str(content[i])
final_feature_value = feature_value # float(feature_value) if feature_value.isnumeric() else feature_value
testing_values.append(final_feature_value)
predicted_value = predict_values_from_model(model_name, testing_values)
# Create predicted values json object
predicted_values_json = {}
for j in range(len(predicted_value)):
for i in range(len(lables_list)):
bb = predicted_value[j][i]
predicted_values_json[lables_list[i]] = predicted_value[j][i]
# NpEncoder = NpEncoder(json.JSONEncoder)
json_data = json.dumps(predicted_values_json, cls=NpEncoder)
return json_data
def getplotiamge(content):
return 0
def getmodelfeatures():
features_list = get_features()
features_json = {}
j = 0
for i in features_list:
yy = str(i)
features_json[i] = i
j += 1
# NpEncoder = NpEncoder(json.JSONEncoder)
json_data = json.dumps(features_json, cls=NpEncoder)
return json_data
def getmodellabels():
labels_list = get_labels()
labelss_json = {}
j = 0
for i in labels_list:
yy = str(i)
labelss_json[i] = i
j += 1
# NpEncoder = NpEncoder(json.JSONEncoder)
json_data = json.dumps(labelss_json, cls=NpEncoder)
return json_data
def getmodelprofile(contents):
return 0
def nomodelfound():
no_model_found = {'no_model':'No Model found' }
json_data = json.dumps(no_model_found, cls=NpEncoder)
return json_data | 2.703125 | 3 |
manganelo/errors.py | nixonjoshua98/manganelo | 22 | 12792566 |
class ManganeloError(BaseException):
...
class NotFound(ManganeloError):
...
| 1.617188 | 2 |
prepare.py | NateLol/BAM_A_lightweight_but_efficient_Balanced_attention_mechanism_for_super_resolution | 33 | 12792567 | <gh_stars>10-100
import argparse
import glob
import h5py
import numpy as np
import PIL.Image as pil_image
from torchvision.transforms import transforms
def train(args):
h5_file = h5py.File(args.output_path, 'w')
lr_group = h5_file.create_group('lr')
hr_group = h5_file.create_group('hr')
image_list = sorted(glob.glob('{}/*'.format(args.images_dir)))
patch_idx = 0
for i, image_path in enumerate(image_list):
hr = pil_image.open(image_path).convert('RGB')
for hr in transforms.FiveCrop(size=(hr.height // 2, hr.width // 2))(hr):
hr = hr.resize(((hr.width // args.scale) * args.scale, (hr.height // args.scale) * args.scale), resample=pil_image.BICUBIC)
lr = hr.resize((hr.width // args.scale, hr.height // args.scale), resample=pil_image.BICUBIC)
hr = np.array(hr)
lr = np.array(lr)
lr_group.create_dataset(str(patch_idx), data=lr)
hr_group.create_dataset(str(patch_idx), data=hr)
patch_idx += 1
print(i, patch_idx, image_path)
h5_file.close()
def eval(args):
h5_file = h5py.File(args.output_path, 'w')
lr_group = h5_file.create_group('lr')
hr_group = h5_file.create_group('hr')
for i, image_path in enumerate(sorted(glob.glob('{}/*'.format(args.images_dir)))):
hr = pil_image.open(image_path).convert('RGB')
hr_width = (hr.width // args.scale) * args.scale
hr_height = (hr.height // args.scale) * args.scale
hr = hr.resize((hr_width, hr_height), resample=pil_image.BICUBIC)
lr = hr.resize((hr.width // args.scale, hr_height // args.scale), resample=pil_image.BICUBIC)
hr = np.array(hr)
lr = np.array(lr)
lr_group.create_dataset(str(i), data=lr)
hr_group.create_dataset(str(i), data=hr)
print(i)
h5_file.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--images-dir', type=str, required=False,default="../../DIV2K/DIV2K_train_HR/HR") #../../classical_SR_datasets/Set5/Set5 #../../DIV2K/DIV2K_train_HR/HR
parser.add_argument('--output-path', type=str, required=False,default="./h5file_DIV2K_train_HR_x8_train")
parser.add_argument('--scale', type=int, default=8)
parser.add_argument('--eval', action='store_true',default=False)
args = parser.parse_args()
if not args.eval:
train(args)
else:
eval(args)
| 2.390625 | 2 |
trackeddy/utils/field_generator.py | navidcy/trackeddy | 36 | 12792568 | import numpy as np
import random as rnd
import pdb
def dist(loc1,loc2):
return np.sqrt((loc1[0]-loc2[0])**2 + (loc2[1]-loc1[1])**2)
#### BUG WHEN LEN(x) != LEN(y)
class Generate_field():
def __init__(self,a,b,n,x,y,opt=''):
self.xlen=len(x)
self.ylen=len(y)
self.a = a*rnd.uniform(0.7, 1.3)
self.b = b*rnd.uniform(0.7, 1.3)
self.x = x
self.y = y
self.n = n
self.opt = opt
if type(self.n) != list or type(self.n) != tuple:
self.eddies = {'eddy_n%s' % ii:{'loc':[[rnd.randint(0,self.xlen-1),\
rnd.randint(0,self.ylen-1)]],'grow':True,\
'radius':[self.a,self.b],'angle':rnd.uniform(0, 2*np.pi),\
'amp':rnd.choice([-1,1])*rnd.uniform(0.7, 1.3)} for ii in range(self.n)}
else:
raise ValueError("No right input.")
def go_right(self,indexs,step):
return [0,step]
def go_upright(self,indexs,step):
return [step,step]
def go_up(self,indexs,step):
return [step,0]
def go_upleft(self,indexs,step):
return [step,-step]
def go_left(self,indexs,step):
return [0,-step]
def go_downleft(self,indexs,step):
return [-step,-step]
def go_down(self,indexs,step):
return [-step,0]
def go_downright(self,indexs,step):
return [-step,step]
def twoD_Gaussian(self, coords, sigma_x, sigma_y, theta, slopex=0, slopey=0, offset=0):
'''
*************** twoD_Gaussian *******************
Build a 2D gaussian.
Notes:
Remmember to do g.ravel().reshape(len(x),len(y)) for plotting purposes.
Args:
coords [x,y] (list|array): Coordinates in x and y.
amplitude (float): Amplitud of gaussian.
x0 , yo (float): Center of Gausian.
sigma_x,sigma_y (float): Deviation.
theta (Float): Orientation.
offset (Float): Gaussian Offset.
Returns:
g.ravel() (list|array) - Gaussian surface in a list.
Usage:
Check scan_eddym function.
'''
x=coords[0]
y=coords[1]
amplitude = coords[2]
xo = float(coords[3])
yo = float(coords[4])
xo = float(xo)
yo = float(yo)
if sigma_y or sigma_x != 0:
a = (np.cos(theta)**2)/(2*sigma_x**2) + (np.sin(theta)**2)/(2*sigma_y**2)
b = -(np.sin(2*theta))/(4*sigma_x**2) + (np.sin(2*theta))/(4*sigma_y**2)
c = (np.sin(theta)**2)/(2*sigma_x**2) + (np.cos(theta)**2)/(2*sigma_y**2)
g = amplitude*np.exp( - (a*((x-xo)**2) + 2*b*(x-xo)*(y-yo) + c*((y-yo)**2)))
else:
g = (x-xo)*0 + (y-yo)*0
return g.ravel()
def checkposition(self,away_val=5,loc=False):
if loc == True:
eddies_loc=[[rnd.randint(0,self.xlen-1),rnd.randint(0,self.ylen-1)] for key,item in self.eddies.items()]
else:
eddies_loc=[item['loc'][-1] for key,item in self.eddies.items()]
for key1,item1 in self.eddies.items():
xc1=item1['loc'][0][0]
yc1=item1['loc'][0][1]
distance=np.array([dist([self.x[xc1],self.y[yc1]],[self.x[ii],self.y[jj]]) for ii,jj in eddies_loc])
distance[distance==0]=away_val*self.a
checker = ((distance < away_val*self.a).any() or (distance < away_val*self.b).any() ) or loc==True
count = 0
while checker or count >= 10000:
newx=rnd.randint(0,self.xlen-1)
newy=rnd.randint(0,self.ylen-1)
self.eddies[key1]['loc']=[[newx, newy]]
eddies_loc=[item['loc'][-1] for key,item in self.eddies.items()]
#pdb.set_trace()
xc1=newx
yc1=newy
distance=np.array([dist([self.x[xc1],self.y[yc1]],[self.x[ii],self.y[jj]]) for ii,jj in eddies_loc])
numzeros = [ii for ii in distance if ii == 0]
if len(numzeros) <= 1:
distance[distance==0]=np.inf
else:
distance[distance==0] = away_val*self.a
checker = ((distance < away_val*self.a).any() or (distance < away_val*self.b).any() )
count = count + 1
if loc == True:
return self.eddies
def make_random_walk(self,indexs, steps):
move_dict = {
1: self.go_up,
2: self.go_right,
3: self.go_left,
4: self.go_down,
5: self.go_downleft,
6: self.go_downright,
7: self.go_upleft,
8: self.go_upright,
}
#for _ in range(steps):
for ii in indexs:
move_in_a_direction = move_dict[rnd.randint(1, 8)]
movcood=move_in_a_direction(ii,steps)
return indexs[0]+movcood[0],indexs[1]+movcood[1]
def assemble_field(self, N,margin=50):
data=np.zeros((N,self.xlen+2*margin,self.ylen+2*margin))
for t in range(N):
#pdb.set_trace()
if self.opt == 'no_interaction' or self.opt == 'Nint':
self.eddies=self.checkposition(away_val=5,loc=True)
else:
pass
for keys, item in self.eddies.items():
gauss=self.twoD_Gaussian(self.pass_args(keys,margin),item['radius'][0], item['radius'][1], item['angle']).reshape(np.shape(data[0,:,:]))
data[t,:,:]=data[t,:,:]+gauss
return data
def reconstruct_field(self):
data=np.zeros((self.xlen,self.ylen))
for keys, item in self.eddies.items():
gauss=self.twoD_Gaussian(self.pass_args(keys),item['radius'][0], item['radius'][1], item['angle']).reshape(np.shape(data))
data=data+gauss
return data
def pass_args(self,key,margin=50):
self.x = np.linspace(min(self.x),max(self.x),self.xlen+2*margin)
self.y = np.linspace(min(self.y),max(self.y),self.ylen+2*margin)
X,Y=np.meshgrid(self.x,self.y)
if self.opt == 'interaction' or self.opt == 'int':
xloc=rnd.randint(0,self.xlen-1)+margin
yloc=rnd.randint(0,self.ylen-1)+margin
eddy_parms=(X,Y,self.eddies[key]['amp'],self.x[xloc],self.y[yloc])
else:
eddy_parms=(X,Y,self.eddies[key]['amp'],self.x[self.eddies[key]['loc'][0][0]+margin],self.y[self.eddies[key]['loc'][0][1]+margin])
return eddy_parms
| 3.015625 | 3 |
examples/scheduling.py | arnimarj/crochet | 152 | 12792569 | <filename>examples/scheduling.py
#!/usr/bin/python
"""
An example of scheduling time-based events in the background.
Download the latest EUR/USD exchange rate from Yahoo every 30 seconds in the
background; the rendered Flask web page can use the latest value without
having to do the request itself.
Note this is example is for demonstration purposes only, and is not actually
used in the real world. You should not do this in a real application without
reading Yahoo's terms-of-service and following them.
"""
from __future__ import print_function
from flask import Flask
from twisted.internet.task import LoopingCall
from twisted.web.client import getPage
from twisted.python import log
from crochet import wait_for, run_in_reactor, setup
setup()
# Twisted code:
class _ExchangeRate(object):
"""Download an exchange rate from Yahoo Finance using Twisted."""
def __init__(self, name):
self._value = None
self._name = name
# External API:
def latest_value(self):
"""Return the latest exchange rate value.
May be None if no value is available.
"""
return self._value
def start(self):
"""Start the background process."""
self._lc = LoopingCall(self._download)
# Run immediately, and then every 30 seconds:
self._lc.start(30, now=True)
def _download(self):
"""Download the page."""
print("Downloading!")
def parse(result):
print("Got %r back from Yahoo." % (result,))
values = result.strip().split(",")
self._value = float(values[1])
d = getPage(
"http://download.finance.yahoo.com/d/quotes.csv?e=.csv&f=c4l1&s=%s=X"
% (self._name,))
d.addCallback(parse)
d.addErrback(log.err)
return d
# Blocking wrapper:
class ExchangeRate(object):
"""Blocking API for downloading exchange rate."""
def __init__(self, name):
self._exchange = _ExchangeRate(name)
@run_in_reactor
def start(self):
self._exchange.start()
@wait_for(timeout=1)
def latest_value(self):
"""Return the latest exchange rate value.
May be None if no value is available.
"""
return self._exchange.latest_value()
EURUSD = ExchangeRate("EURUSD")
app = Flask(__name__)
@app.route('/')
def index():
rate = EURUSD.latest_value()
if rate is None:
rate = "unavailable, please refresh the page"
return "Current EUR/USD exchange rate is %s." % (rate,)
if __name__ == '__main__':
import sys, logging
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
EURUSD.start()
app.run()
| 3.15625 | 3 |
camera-opencv/01-color_space/crop.py | josemarin7/Python-OpenCV-Recognition-via-Camera | 0 | 12792570 | <reponame>josemarin7/Python-OpenCV-Recognition-via-Camera<gh_stars>0
import cv2
import numpy as np
img = cv2.imread("lena256rgb.jpg")
cv2.imshow("Normal", img)
cv2.waitKey(0)
face = img[95:195, 100:180]
cv2.imshow("Face", face)
cv2.waitKey(0)
body = img[20:, 35:210]
cv2.imshow("Body", body)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 2.984375 | 3 |
utils/transforms/augmenter.py | meet-minimalist/Learn-pytorch-in-one-example | 0 | 12792571 | <reponame>meet-minimalist/Learn-pytorch-in-one-example<filename>utils/transforms/augmenter.py
import imgaug.augmenters as iaa
class Augmenter(object):
def __init__(self):
self.seq_aug = iaa.SomeOf((1, 2), [
iaa.OneOf([
#iaa.Dropout(p=(0.1, 0.2)),
iaa.CoarseDropout(0.05, size_percent=0.1, per_channel=0.5),
iaa.SaltAndPepper(0.05),
iaa.CoarseSaltAndPepper(0.03, size_percent=(0.1, 0.2))
]),
iaa.OneOf([
iaa.GaussianBlur(sigma=(0.5, 1.0)),
iaa.MedianBlur(k=(3, 5)),
iaa.MotionBlur(k=5, angle=[-45, 45])
]),
iaa.OneOf([
iaa.MultiplyAndAddToBrightness(mul=(0.5, 1.5), add=(-30, 30)),
iaa.Grayscale(alpha=(0.5, 1.0)),
iaa.AddToHueAndSaturation((-50, 50))
]),
iaa.OneOf([
iaa.Fliplr(0.5),
iaa.Affine(scale=(0.8, 1.2)),
iaa.Affine(translate_percent={'x': (-0.2, 0.2), 'y': (-0.2, 0.2)}),
iaa.Affine(rotate=(-30, 30)),
iaa.Affine(shear={'x': (-15, 15), 'y': (-15, 15)})
])
], random_order=True)
def __call__(self, sample):
image, label = sample['image'], sample['label']
image = self.seq_aug(image=image)
return {'image':image, 'label':label}
| 2.546875 | 3 |
mmdet/models/cls_heads/__init__.py | atoaiari/mmdetection | 0 | 12792572 | # Copyright (c) OpenMMLab. All rights reserved.
from .cls_head import ClsHead
from .linear_head import LinearClsHead
from .orientation_head import OrientationHead
from .resnet_orientation_head import ResNetOrientationHead
__all__ = [
'ClsHead', 'LinearClsHead', 'OrientationHead', 'ResNetOrientationHead'
] | 0.988281 | 1 |
niddk_covid_sicr/data.py | nih-niddk-mbs/covid-sicr | 4 | 12792573 | """Functions for getting data needed to fit the models."""
import bs4
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import requests
from tqdm import tqdm
from typing import Union
from urllib.error import HTTPError
import urllib.request, json
import os
from datetime import timedelta, date
import pandas as pd
pd.options.mode.chained_assignment = None # default='warn'
JHU_FILTER_DEFAULTS = {'confirmed': 5, 'recovered': 1, 'deaths': 0}
COVIDTRACKER_FILTER_DEFAULTS = {'cum_cases': 5, 'cum_recover': 1, 'cum_deaths': 0}
US_STATE_ABBREV = {
'Alabama': 'US_AL',
'Alaska': 'US_AK',
'American Samoa': 'US_AS',
'Arizona': 'US_AZ',
'Arkansas': 'US_AR',
'California': 'US_CA',
'Colorado': 'US_CO',
'Connecticut': 'US_CT',
'Delaware': 'US_DE',
'District of Columbia': 'US_DC',
'Florida': 'US_FL',
'Georgia': 'US_GA',
'Guam': 'US_GU',
'Hawaii': 'US_HI',
'Idaho': 'US_ID',
'Illinois': 'US_IL',
'Indiana': 'US_IN',
'Iowa': 'US_IA',
'Kansas': 'US_KS',
'Kentucky': 'US_KY',
'Louisiana': 'US_LA',
'Maine': 'US_ME',
'Maryland': 'US_MD',
'Massachusetts': 'US_MA',
'Michigan': 'US_MI',
'Minnesota': 'US_MN',
'Mississippi': 'US_MS',
'Missouri': 'US_MO',
'Montana': 'US_MT',
'Nebraska': 'US_NE',
'Nevada': 'US_NV',
'New Hampshire': 'US_NH',
'New Jersey': 'US_NJ',
'New Mexico': 'US_NM',
'New York': 'US_NY',
'North Carolina': 'US_NC',
'North Dakota': 'US_ND',
'Northern Mariana Islands':'US_MP',
'Ohio': 'US_OH',
'Oklahoma': 'US_OK',
'Oregon': 'US_OR',
'Pennsylvania': 'US_PA',
'Puerto Rico': 'US_PR',
'Rhode Island': 'US_RI',
'South Carolina': 'US_SC',
'South Dakota': 'US_SD',
'Tennessee': 'US_TN',
'Texas': 'US_TX',
'Utah': 'US_UT',
'Vermont': 'US_VT',
'Virgin Islands': 'US_VI',
'Virginia': 'US_VA',
'Washington': 'US_WA',
'West Virginia': 'US_WV',
'Wisconsin': 'US_WI',
'Wyoming': 'US_WY'
}
def get_jhu(data_path: str, filter_: Union[dict, bool] = True) -> None:
"""Gets data from Johns Hopkins CSSEGIS (countries only).
https://coronavirus.jhu.edu/map.html
https://github.com/CSSEGISandData/COVID-19
Args:
data_path (str): Full path to data directory.
Returns:
None
"""
# Where JHU stores their data
url_template = ("https://raw.githubusercontent.com/CSSEGISandData/"
"COVID-19/master/csse_covid_19_data/"
"csse_covid_19_time_series/time_series_covid19_%s_%s.csv")
# Scrape the data
dfs = {}
for region in ['global', 'US']:
dfs[region] = {}
for kind in ['confirmed', 'deaths', 'recovered']:
url = url_template % (kind, region) # Create the full data URL
try:
df = pd.read_csv(url) # Download the data into a dataframe
except HTTPError:
print("Could not download data for %s, %s" % (kind, region))
else:
if region == 'global':
has_no_province = df['Province/State'].isnull()
# Whole countries only; use country name as index
df1 = df[has_no_province].set_index('Country/Region')
more_dfs = []
for country in ['China', 'Canada', 'Australia']:
if country == 'Canada' and kind in 'recovered':
continue
is_c = df['Country/Region'] == country
df2 = df[is_c].sum(axis=0, skipna=False).to_frame().T
df2['Country/Region'] = country
df2 = df2.set_index('Country/Region')
more_dfs.append(df2)
df = pd.concat([df1] + more_dfs)
elif region == 'US':
# Use state name as index
# for k, v in US_STATE_ABBREV.items(): # get US state abbrev
# if not US_STATE_ABBREV[k].startswith('US_'):
# US_STATE_ABBREV[k] = 'US_' + v # Add 'US_' to abbrev
df.replace(US_STATE_ABBREV, inplace=True)
df = df.set_index('Province_State')
df = df.groupby('Province_State').sum() # combine counties to create state level data
df = df[[x for x in df if any(year in x for year in ['20', '21'])]] # Use only data columns
# 20 or 21 signifies 2020 or 2021
dfs[region][kind] = df # Add to dictionary of dataframes
# Generate a list of countries that have "good" data,
# according to these criteria:
good_countries = get_countries(dfs['global'], filter_=filter_)
# For each "good" country,
# reformat and save that data in its own .csv file.
source = dfs['global']
for country in tqdm(good_countries, desc='Countries'): # For each country
if country in ['Diamond Princess', 'Grand Princess', 'MS Zaandam', 'Samoa',
'Vanuatu', 'Marshall Islands', 'US', 'Micronesia','Kiribati']:
print("Skipping {}".format(country))
continue
# If we have data in the downloaded JHU files for that country
if country in source['confirmed'].index:
df = pd.DataFrame(columns=['dates2', 'cum_cases', 'cum_deaths',
'cum_recover', 'new_cases',
'new_deaths', 'new_recover',
'new_uninfected'])
df['dates2'] = source['confirmed'].columns
df['dates2'] = df['dates2'].apply(fix_jhu_dates)
df['cum_cases'] = source['confirmed'].loc[country].values
df['cum_deaths'] = source['deaths'].loc[country].values
df['cum_recover'] = source['recovered'].loc[country].values
df[['new_cases', 'new_deaths', 'new_recover']] = \
df[['cum_cases', 'cum_deaths', 'cum_recover']].diff()
df['new_uninfected'] = df['new_recover'] + df['new_deaths']
try:
population = get_population_count(data_path, country)
df['population'] = population
except:
pass
# Fill NaN with 0 and convert to int
dfs[country] = df.set_index('dates2').fillna(0).astype(int)
dfs[country].to_csv(data_path / ('covidtimeseries_%s.csv' % country))
else:
print("No data for %s" % country)
source = dfs['US']
states = source['confirmed'].index.tolist()
us_recovery_data = covid_tracking_recovery(data_path)
for state in tqdm(states, desc='US States'): # For each country
if state in ['Diamond Princess', 'Grand Princess', 'MS Zaandam', 'US_AS']:
print("Skipping {}".format(state))
continue
# If we have data in the downloaded JHU files for that country
if state in source['confirmed'].index:
df = pd.DataFrame(columns=['dates2', 'cum_cases', 'cum_deaths',
'new_cases','new_deaths','new_uninfected'])
df['dates2'] = source['confirmed'].columns
df['dates2'] = df['dates2'].apply(fix_jhu_dates)
df['cum_cases'] = source['confirmed'].loc[state].values
df['cum_deaths'] = source['deaths'].loc[state].values
df[['new_cases', 'new_deaths']] = df[['cum_cases', 'cum_deaths']].diff()
# add recovery data
df.set_index('dates2', inplace=True)
df = df.merge(us_recovery_data[state], on='dates2', how='left')
df['tmp_new_recover'] = df['new_recover'].fillna(0).astype(int) # create temp new recover for
df['new_uninfected'] = df['tmp_new_recover'] + df['new_deaths'] # new uninfected calculation
df = df.fillna(-1).astype(int)
df = df.drop(['tmp_new_recover'], axis=1)
try:
population = get_population_count(data_path, state)
df['population'] = population
except:
pass
dfs[state] = df
dfs[state].to_csv(data_path /
('covidtimeseries_%s.csv' % state))
else:
print("No data for %s" % state)
def fix_jhu_dates(x):
y = datetime.strptime(x, '%m/%d/%y')
return datetime.strftime(y, '%m/%d/%y')
def fix_ct_dates(x):
return datetime.strptime(str(x), '%Y%m%d')
def get_countries(d: pd.DataFrame, filter_: Union[dict, bool] = True):
"""Get a list of countries from a global dataframe optionally passing
a quality check
Args:
d (pd.DataFrame): Data from JHU tracker (e.g. df['global]).
filter (bool, optional): Whether to filter by quality criteria.
"""
good = set(d['confirmed'].index)
if filter_ and not isinstance(filter_, dict):
filter_ = JHU_FILTER_DEFAULTS
if filter_:
for key, minimum in filter_.items():
enough = d[key].index[d[key].max(axis=1) >= minimum].tolist()
good = good.intersection(enough)
bad = set(d['confirmed'].index).difference(good)
# print("JHU data acceptable for %s" % ','.join(good))
# print("JHU data not acceptable for %s" % ','.join(bad))
return good
def get_population_count(data_path:str, roi):
""" Check if we have population count for roi and
add to timeseries df if we do.
Args:
data_path (str): Full path to data directory.
roi (str): Region.
Returns:
population (int): Population count for ROI (if exists).
"""
try: # open population file
df_pop = pd.read_csv(data_path / 'population_estimates.csv')
except:
print("Missing population_estimates.csv in data-path")
try:
population = df_pop.query('roi == "{}"'.format(roi))['population'].values
except:
print("{} population estimate not found in population_estimates.csv".format(args.roi))
return int(population)
def covid_tracking_recovery(data_path: str):
"""Gets archived US recovery data from The COVID Tracking Project.
https://covidtracking.com
Args:
data_path (str): Full path to data directory.
Returns:
ctp_dfs (dict): Dictionary containing US States (keys) and dataframes
containing dates, recovery data (values).
"""
archived_data = data_path / 'covid-tracking-project-recovery.csv'
df_raw = pd.read_csv(archived_data)
states = df_raw['state'].unique()
ctp_dfs = {}
for state in states: # For each country
source = df_raw[df_raw['state'] == state] # Only the given state
df = pd.DataFrame(columns=['dates2','cum_recover','new_recover'])
df['dates2'] = source['date'].apply(fix_ct_dates) # Convert date format
# first check if roi reports recovery data as recovered
if source['recovered'].isnull().all() == False:
df['cum_recover'] = source['recovered'].values
# check if roi reports recovery data as hospitalizedDischarged
elif source['hospitalizedDischarged'].isnull().all() == False:
df['cum_recover'] = source['hospitalizedDischarged'].values
else:
df['cum_recover'] = np.NaN
df.sort_values(by=['dates2'], inplace=True) # sort by datetime obj before converting to string
df['dates2'] = pd.to_datetime(df['dates2']).dt.strftime('%m/%d/%y') # convert dates to string
df = df.set_index('dates2') # Convert to int
df['new_recover'] = df['cum_recover'].diff()
ctp_dfs['US_'+state] = df
return ctp_dfs
def get_canada(data_path: str, filter_: Union[dict, bool] = True,
fixes: bool = False) -> None:
""" Gets data from Canada's Open Covid group for Canadian Provinces.
https://opencovid.ca/
"""
dfs = [] # we will append dfs for cases, deaths, recovered here
# URL for API call to get Province-level timeseries data starting on Jan 22 2020
url_template = 'https://api.opencovid.ca/timeseries?stat=%s&loc=prov&date=01-22-2020'
for kind in ['cases', 'mortality', 'recovered']:
url_path = url_template % kind # Create the full data URL
with urllib.request.urlopen(url_path) as url:
data = json.loads(url.read().decode())
source = pd.json_normalize(data[kind])
if kind == 'cases':
source.drop('cases', axis=1, inplace=True) # removing this column so
# we can index into date on all 3 dfs at same position
source.rename(columns={source.columns[1]: "date" }, inplace=True)
dfs.append(source)
cases = dfs[0]
deaths = dfs[1]
recovered = dfs[2]
# combine dfs
df_rawtemp = cases.merge(recovered, on=['date', 'province'], how='outer')
df_raw = df_rawtemp.merge(deaths, on=['date', 'province'], how='outer')
df_raw.fillna(0, inplace=True)
provinces = ['Alberta', 'BC', 'Manitoba', 'New Brunswick', 'NL',
'Nova Scotia', 'Nunavut', 'NWT', 'Ontario', 'PEI', 'Quebec',
'Saskatchewan', 'Yukon']
# Export timeseries data for each province
for province in tqdm(provinces, desc='Canadian Provinces'):
source = df_raw[df_raw['province'] == province] # Only the given province
df = pd.DataFrame(columns=['dates2','cum_cases', 'cum_deaths',
'cum_recover', 'new_cases',
'new_deaths', 'new_recover',
'new_uninfected'])
df['dates2'] = source['date'].apply(fix_canada_dates) # Convert date format
df['cum_cases'] = source['cumulative_cases'].values
df['cum_deaths'] = source['cumulative_deaths'].values
df['cum_recover'] = source['cumulative_recovered'].values
df[['new_cases', 'new_deaths', 'new_recover']] = \
df[['cum_cases', 'cum_deaths', 'cum_recover']].diff()
df['new_uninfected'] = df['new_recover'] + df['new_deaths']
try:
population = get_population_count(data_path, 'CA_' + province)
df['population'] = population
except:
pass
df.sort_values(by=['dates2'], inplace=True) # sort by datetime obj before converting to string
df['dates2'] = pd.to_datetime(df['dates2']).dt.strftime('%m/%d/%y') # convert dates to string
df = df.set_index('dates2').fillna(0).astype(int) # Fill NaN with 0 and convert to int
df.to_csv(data_path / ('covidtimeseries_CA_%s.csv' % province))
def fix_canada_dates(x):
return datetime.strptime(x, '%d-%m-%Y')
def get_brazil(data_path: str, filter_: Union[dict, bool] = True,
fixes: bool = False) -> None:
""" Get state-level data for Brazil.
https://github.com/wcota/covid19br (<NAME>)
"""
url = "https://raw.githubusercontent.com/wcota/covid19br/master/cases-brazil-states.csv"
try:
df_raw = pd.read_csv(url)
except HTTPError:
print("Could not download state-level data for Brazil")
state_code = {'AC':'Acre', 'AL':'Alagoas', 'AM':'Amazonas', 'AP':'Amapa',
'BA':'Bahia','CE':'Ceara', 'DF':'Distrito Federal',
'ES':'Espirito Santo', 'GO':'Goias', 'MA':'Maranhao',
'MG':'Minas Gerais', 'MS':'Mato Grosso do Sul', 'MT':'Mato Grosso',
'PA':'Para', 'PB':'Paraiba', 'PE':'Pernambuco', 'PI':'Piaui',
'PR':'Parana', 'RJ':'Rio de Janeiro', 'RN':'Rio Grande do Norte',
'RO':'Rondonia', 'RR':'Roraima', 'RS':'Rio Grande do Sul',
'SC':'Santa Catarina', 'SE':'Sergipe', 'SP':'Sao Paulo', 'TO':'Tocantins'}
for state in tqdm(state_code, desc='Brazilian States'):
source = df_raw[df_raw['state'] == state] # Only the given province
df = pd.DataFrame(columns=['dates2','cum_cases', 'cum_deaths',
'cum_recover', 'new_cases',
'new_deaths', 'new_recover',
'new_uninfected'])
df['dates2'] = source['date']
df['cum_cases'] = source['totalCases'].values
df['cum_deaths'] = source['deaths'].values
df['cum_recover'] = source['recovered'].values
df['new_cases'] = source['newCases'].values
df['new_deaths'] = source['newDeaths'].values
df['new_recover'] = df['cum_recover'].diff()
df['new_uninfected'] = df['new_recover'] + df['new_deaths']
try:
roi = 'BR_' + state_code[state]
population = get_population_count(data_path, roi)
df['population'] = population
except:
print("Could not add population data for {}".format(state))
pass
df.sort_values(by=['dates2'], inplace=True) # sort by datetime obj before converting to string
df['dates2'] = pd.to_datetime(df['dates2']).dt.strftime('%m/%d/%y') # convert dates to string
df = df.set_index('dates2').fillna(0).astype(int) # Fill NaN with 0 and convert to int
df.to_csv(data_path / ('covidtimeseries_BR_%s.csv' % state_code[state]))
def get_owid_tests(data_path: str, filter_: Union[dict, bool] = True,
fixes: bool = False) -> None:
""" Get testing data from Our World In Data
https://github.com/owid/covid-19-data
Add columns cum_tests and new_tests to csvs in data_path. """
url = 'https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/testing/covid-testing-all-observations.csv'
src = pd.read_csv(url)
roi_codes = pd.read_csv(data_path / 'country_iso_codes.csv')
roi_codes_dict = pd.Series(roi_codes.Country.values,index=roi_codes['Alpha-3 code']).to_dict()
# trim down source dataframe
src_trim = pd.DataFrame(columns=['dates2','Alpha-3 code','cum_tests'])
src_trim['dates2'] = src['Date'].apply(fix_owid_dates).values # fix dates
src_trim['Alpha-3 code'] = src['ISO code'].values
src_trim['cum_tests'] = src['Cumulative total'].fillna(-1).astype(int).values
src_trim.set_index('dates2',inplace=True, drop=True)
src_rois = src_trim['Alpha-3 code'].unique()
unavailable_testing_data = [] # for appending rois that don't have testing data
for roi in roi_codes_dict:
if roi not in src_rois:
unavailable_testing_data.append(roi)
continue
if roi_codes_dict[roi] in ["US", "Marshall Islands", "Micronesia", "Samoa", "Vanuatu"]: # skipping because bad data
continue
try:
timeseries_path = data_path / ('covidtimeseries_%s.csv' % roi_codes_dict[roi])
df_timeseries = pd.read_csv(timeseries_path, index_col='dates2')
except FileNotFoundError as fnf_error:
print(fnf_error, 'Could not add OWID data.')
pass
for i in df_timeseries.columns: # Check if OWID testng data already included
if 'tests' in i:
df_timeseries.drop([i], axis=1, inplace=True) # drop so we can add new
src_roi = src_trim[src_trim['Alpha-3 code'] == roi] # filter rows that match roi
df_combined = df_timeseries.merge(src_roi[['cum_tests']], how='left', on='dates2')
df_combined['new_tests'] = df_combined['cum_tests'].diff()
df_combined.loc[df_combined['new_tests'] < 0, 'new_tests'] = -1 # Handle cases where
# cumulative counts decrease and new_tests becomes a large negative number
df_combined[['cum_tests', 'new_tests']] = df_combined[['cum_tests', 'new_tests']].fillna(-1).astype(int).values
df_combined = df_combined.loc[:, ~df_combined.columns.str.contains('^Unnamed')]
df_combined.to_csv(timeseries_path) # overwrite timeseries CSV
print("OWID global test results missing for: ")
for roi in roi_codes_dict:
if roi in unavailable_testing_data:
print(roi_codes_dict[roi], end=" ")
print("")
def get_owid_global_vaccines(data_path: str, filter_: Union[dict, bool] = True,
fixes: bool = False) -> None:
""" Get global vaccines data from Our World In Data
https://github.com/owid/covid-19-data
Add columns to global csvs in data_path. """
url = 'https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/vaccinations/vaccinations.csv'
src = pd.read_csv(url)
src_trim = pd.DataFrame(columns=['dates2', 'Alpha-3 code', 'cum_vaccinations', 'daily_vaccinations',
'cum_people_vaccinated', 'cum_people_fully_vaccinated'])
src_trim['dates2'] = src['date'].apply(fix_owid_dates).values # fix dates
src_trim['Alpha-3 code'] = src['iso_code'].values
src_trim['cum_vaccinations'] = src['total_vaccinations'].values
src_trim['daily_vaccinations'] = src['daily_vaccinations'].values
src_trim['cum_people_vaccinated'] = src['people_vaccinated'].values
src_trim['cum_people_fully_vaccinated'] = src['people_fully_vaccinated'].values
roi_codes = pd.read_csv(data_path / 'country_iso_codes.csv')
roi_codes_dict = pd.Series(roi_codes.Country.values,index=roi_codes['Alpha-3 code']).to_dict()
# trim down source dataframe
src_trim.set_index('dates2',inplace=True, drop=True)
src_rois = src_trim['Alpha-3 code'].unique()
unavailable_testing_data = [] # for appending rois that don't have testing data
for roi in roi_codes_dict:
if roi not in src_rois:
unavailable_testing_data.append(roi)
continue
if roi_codes_dict[roi] in ["US", "Marshall Islands", "Micronesia", "Samoa", "Vanuatu"]: # skipping because no data
continue
try:
timeseries_path = data_path / ('covidtimeseries_%s.csv' % roi_codes_dict[roi])
df_timeseries = pd.read_csv(timeseries_path, index_col='dates2')
except FileNotFoundError as fnf_error:
print(fnf_error, 'Could not add OWID global vaccines data.')
pass
for i in df_timeseries.columns: # Check if OWID testing data already included
if 'vaccin' in i:
df_timeseries.drop([i], axis=1, inplace=True) # drop so we can add new
src_roi = src_trim[src_trim['Alpha-3 code'] == roi] # filter rows that match roi
df_combined = df_timeseries.merge(src_roi[['cum_vaccinations', 'daily_vaccinations', 'cum_people_vaccinated',
'cum_people_fully_vaccinated']], how='left', on='dates2')
cum_vacc_columns = ['vaccinations', 'people_vaccinated', 'people_fully_vaccinated']
df = dummy_cumulative_new_counts(roi_codes_dict[roi], df_combined, cum_vacc_columns)
df = df.loc[:, ~df.columns.str.contains('^Unnamed')]
df.to_csv(timeseries_path) # overwrite timeseries CSV
print("OWID global vaccine results missing for: ")
for roi in roi_codes_dict:
if roi in unavailable_testing_data:
print(roi_codes_dict[roi], end=" ")
print("")
def dummy_cumulative_new_counts(roi, df, columns: list):
""" There are cases where cum counts go missing and new counts get missed.
New counts spike when cumulative counts go to -1 for missing data and
the difference is taken between a new cumulative count and -1.
We don't want it to spike, and we don't want to miss new counts before the gap.
So create a dummy dataframe with forward filled cumulative counts and
perform new cases calculation, then merge those new cases back into dataframe.
Args:
roi (str): Region we are working with; used for print statements.
df (pd.DataFrame): DataFrame containing counts but not new counts.
columns (list): List of columns (without cum_ prefix) so create new counts for.
Returns:
df_fixed (pd.DataFrame): DataFrame containing cumulative and now new counts. """
dfs = []
df_tmp = df.copy()
df_tmp.reset_index(inplace=True)
for col in columns:
cum_col = 'cum_' + col
dummy_cum_col = 'dummy_' + cum_col
new_col = 'new_' + col
try:
start = df_tmp[df_tmp[cum_col] > 0].index.values[0]
df_ffill = df_tmp.iloc[start:]
df_ffill.set_index('dates2', drop=True, inplace=True)
df_ffill[dummy_cum_col] = df_ffill[cum_col].ffill().astype(int).values
df_ffill[new_col] = df_ffill[dummy_cum_col].diff().astype('Int64')
# If cumulative counts are missing, set new counts to -1 so they don't become 0.
df_ffill.loc[df_ffill[cum_col].isnull(), new_col] = -1
except:
print(f'No {cum_col} data to add for {roi}.')
df_ffill[new_col] = -1
df_ffill = df_ffill[~df_ffill.index.duplicated()] # fix duplication issue
dfs.append(df_ffill[new_col])
df_new = pd.concat(dfs, axis=1)
df_new = df_new.fillna(-1).astype(int)
df_fixed = df.join(df_new)
df_fixed = df_fixed.fillna(-1).astype(int)
return df_fixed
def get_owid_us_vaccines(data_path: str, filter_: Union[dict, bool] = True,
fixes: bool = False) -> None:
""" Get US vaccines data from Our World In Data
https://github.com/owid/covid-19-data
Add columns to US csvs in data_path. """
url = 'https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/vaccinations/us_state_vaccinations.csv'
src = pd.read_csv(url)
src_trim = pd.DataFrame(columns=['dates2', 'region', 'cum_vaccinations', 'daily_vaccinations',
'people_vaccinated', 'people_fully_vaccinated'])
src_trim['dates2'] = src['date'].apply(fix_owid_dates).values # fix dates
src_trim['region'] = src['location'].values
src_trim['cum_vaccinations'] = src['total_vaccinations'].values
src_trim['daily_vaccinations'] = src['daily_vaccinations'].values
src_trim['cum_people_vaccinated'] = src['people_vaccinated'].values
src_trim['cum_people_fully_vaccinated'] = src['people_fully_vaccinated'].values
src_trim.set_index('dates2', inplace=True, drop=True)
src_trim.replace("New York State", "New York", inplace=True) # fix NY name
src_rois = src_trim['region'].unique()
for roi in src_rois:
if roi in US_STATE_ABBREV:
try:
timeseries_path = data_path / ('covidtimeseries_%s.csv' % US_STATE_ABBREV[roi])
df_timeseries = pd.read_csv(timeseries_path, index_col='dates2')
except FileNotFoundError as fnf_error:
print(fnf_error, 'Could not add OWID vaccinations data.')
pass
for i in df_timeseries.columns: # Check if OWID vaccines data already included
if 'vaccin' in i:
df_timeseries.drop([i], axis=1, inplace=True) # drop so we can add new
src_roi = src_trim[src_trim['region'] == roi] # filter rows that match roi
df_combined = df_timeseries.merge(src_roi[['cum_vaccinations', 'daily_vaccinations', 'cum_people_vaccinated',
'cum_people_fully_vaccinated']], how='left', on='dates2')
cum_vacc_columns = ['vaccinations', 'people_vaccinated', 'people_fully_vaccinated']
df = dummy_cumulative_new_counts(US_STATE_ABBREV[roi], df_combined, cum_vacc_columns)
df = df.loc[:, ~df.columns.str.contains('^Unnamed')]
df.to_csv(timeseries_path) # overwrite timeseries CSV
def fix_owid_dates(x):
y = datetime.strptime(x, '%Y-%m-%d')
return datetime.strftime(y, '%m/%d/%y')
def get_jhu_us_states_tests(data_path: str, filter_: Union[dict, bool] = False) -> None:
""" Scrape JHU for US State level test results. Data is stored as a collection of
CSVs per date containing states and test results.
Args:
data_path (str): Full path to data directory.
Returns:
None
"""
url_template = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports_us/%s.csv"
# generate a list of dates for scraping
start_dt = date(2020, 4, 12) # When JHU starts reporting
end_dt = date.today()
dates = []
delta = end_dt - start_dt
delta = delta.days
for dt in daterange(start_dt, end_dt):
dates.append(dt.strftime("%m-%d-%Y"))
# cumulative tests are named 'People_Tested' for first 200 ish days
# then cumulative tests are named 'Total_Test_Results' after 200 ish days
dfs = []
for i in tqdm(dates, desc=f'Scraping {delta} days of data across all states'):
url = url_template % i
try:
df = pd.read_csv(url)
df_trim = pd.DataFrame(columns=['Province_State', 'cum_tests', 'dates2'])
df_trim['Province_State'] = df['Province_State'].values
df_trim['dates2'] = fix_jhu_testing_dates(i)
# handle cases where column is people_tested and then switches to Total_Test_Results
if 'People_Tested' in df.columns:
df_trim['cum_tests'] = df['People_Tested'].fillna(-1).astype(int).values
dfs.append(df_trim)
if 'Total_Test_Results' in df.columns:
df_trim['cum_tests'] = df['Total_Test_Results'].fillna(-1).astype(int).values
dfs.append(df_trim)
except HTTPError:
print("Could not download tests data for %s" % i)
df_combined = pd.concat(dfs)
df_combined.sort_values(by='Province_State', inplace=True)
df_combined['Date'] = pd.to_datetime(df_combined['dates2'])
rois = df_combined['Province_State'].unique()
sorted_dfs = []
for roi in rois:
df_roi = df_combined[df_combined['Province_State'] == roi]
df_roi = df_roi.sort_values(by="Date")
df_roi['new_tests'] = df_roi['cum_tests'].diff().fillna(-1).astype(int)
sorted_dfs.append(df_roi)
df_tests = pd.concat(sorted_dfs)
df_tests.reset_index(inplace=True, drop=True)
df_tests.replace(US_STATE_ABBREV, inplace=True)
df_tests.rename(columns={'Province_State': 'roi'}, inplace=True)
# now open csvs in data_path that match rois and merge on csv to add cum_test and new_tests
rois = df_tests.roi.unique().tolist()
to_remove = ['Diamond Princess', 'Grand Princess', 'Recovered']
for i in to_remove:
if i in rois:
rois.remove(i)
for roi in rois:
csv_path = data_path / f'covidtimeseries_{roi}.csv'
try:
df_timeseries = pd.read_csv(csv_path)
except:
print(f"{csv_path} not found in data path.")
try:
for i in df_timeseries.columns: # Check if testng data already included
if 'tests' in i:
df_timeseries.drop([i], axis=1, inplace=True) # drop so we can add new
df_roi_tests = df_tests[df_tests['roi'] == roi] # filter down to roi
df_result = df_timeseries.merge(df_roi_tests, on='dates2', how='left')
df_result.fillna(-1, inplace=True)
df_result.loc[df_result['new_tests'] < 0, 'new_tests'] = -1 # Handle cases where
# cumulative counts decrease and new_tests becomes a large negative number
df_result['new_tests'] = df_result['new_tests'].astype(int)
df_result[['cum_tests', 'new_tests']] = df_result[['cum_tests', 'new_tests']].astype(int)
df_result_trim = df_result[['dates2', 'cum_cases', 'new_cases',
'cum_deaths', 'new_deaths', 'cum_recover',
'new_recover', 'new_uninfected', 'cum_tests',
'new_tests', 'population']].copy()
df_result_trim = df_result_trim.loc[:, ~df_result_trim.columns.str.contains('^Unnamed')]
df_result_trim.to_csv(csv_path) # overwrite timeseries CSV
except:
print(f'Could not get tests data for {roi}.')
def daterange(date1, date2):
for n in range(int ((date2 - date1).days)+1):
yield date1 + timedelta(n)
def fix_jhu_testing_dates(x):
y = datetime.strptime(x, '%m-%d-%Y')
return datetime.strftime(y, '%m/%d/%y')
def fix_negatives(data_path: str, plot: bool = False) -> None:
"""Fix negative values in daily data.
The purpose of this script is to fix spurious negative values in new daily
numbers. For example, the cumulative total of cases should not go from N
to a value less than N on a subsequent day. This script fixes this by
nulling such data and applying a monotonic spline interpolation in between
valid days of data. This only affects a small number of regions. It
overwrites the original .csv files produced by the functions above.
Args:
data_path (str): Full path to data directory.
plot (bool): Whether to plot the changes.
Returns:
None
"""
csvs = [x for x in data_path.iterdir() if 'covidtimeseries' in str(x)]
for csv in tqdm(csvs, desc="Regions"):
roi = str(csv).split('.')[0].split('_')[-1]
df = pd.read_csv(csv)
# Exclude final day because it is often a partial count.
df = df.iloc[:-1]
df = fix_neg(df, roi, plot=plot)
df.to_csv(data_path / (csv.name.split('.')[0]+'.csv'))
def fix_neg(df: pd.DataFrame, roi: str,
columns: list = ['cases', 'deaths', 'recover'],
plot: bool = False) -> pd.DataFrame:
"""Used by `fix_negatives` to fix negatives values for a single region.
This function uses monotonic spline interpolation to make sure that
cumulative counts are non-decreasing.
Args:
df (pd.DataFrame): DataFrame containing data for one region.
roi (str): One region, e.g 'US_MI' or 'Greece'.
columns (list, optional): Columns to make non-decreasing.
Defaults to ['cases', 'deaths', 'recover'].
Returns:
pd.DataFrame: [description]
"""
for c in columns:
cum = 'cum_%s' % c
new = 'new_%s' % c
before = df[cum].copy()
non_zeros = df[df[new] > 0].index
has_negs = before.diff().min() < 0
if len(non_zeros) and has_negs:
first_non_zero = non_zeros[0]
maxx = df.loc[first_non_zero, cum].max()
# Find the bad entries and null the corresponding
# cumulative column, which are:
# 1) Cumulative columns which are zero after previously
# being non-zero
bad = df.loc[first_non_zero:, cum] == 0
df.loc[bad[bad].index, cum] = None
# 2) New daily columns which are negative
bad = df.loc[first_non_zero:, new] < 0
df.loc[bad[bad].index, cum] = None
# Protect against 0 null final value which screws up interpolator
if np.isnan(df.loc[df.index[-1], cum]):
df.loc[df.index[-1], cum] = maxx
# Then run a loop which:
while True:
# Interpolates the cumulative column nulls to have
# monotonic growth
after = df[cum].interpolate('pchip')
diff = after.diff()
if diff.min() < 0:
# If there are still negative first-differences at this
# point, increase the corresponding cumulative values by 1.
neg_index = diff[diff < 0].index
df.loc[neg_index, cum] += 1
else:
break
# Then repeat
if plot:
plt.figure()
plt.plot(df.index, before, label='raw')
plt.plot(df.index, after, label='fixed')
r = np.corrcoef(before, after)[0, 1]
plt.title("%s %s Raw vs Fixed R=%.5g" % (roi, c, r))
plt.legend()
else:
after = before
# Make sure the first differences are now all non-negative
assert after.diff().min() >= 0
# Replace the values
df[new] = df[cum].diff().fillna(0).astype(int).values
return df
def negify_missing(data_path: str) -> None:
"""Fix negative values in daily data.
The purpose of this script is to fix spurious negative values in new daily
numbers. For example, the cumulative total of cases should not go from N
to a value less than N on a subsequent day. This script fixes this by
nulling such data and applying a monotonic spline interpolation in between
valid days of data. This only affects a small number of regions. It
overwrites the original .csv files produced by the functions above.
Args:
data_path (str): Full path to data directory.
plot (bool): Whether to plot the changes.
Returns:
None
"""
csvs = [x for x in data_path.iterdir() if 'covidtimeseries' in str(x)]
for csv in tqdm(csvs, desc="Regions"):
roi = str(csv).split('.')[0].split('_')[-1]
df = pd.read_csv(csv)
for kind in ['cases', 'deaths', 'recover']:
if df['cum_%s' % kind].sum() == 0:
print("Negifying 'new_%s' for %s" % (kind, roi))
df['new_%s' % kind] = -1
out = data_path / (csv.name.split('.')[0]+'.csv')
df.to_csv(out)
def remove_old_rois(data_path: str):
"""Delete time-series files for regions no longer tracked, such as:
Diamond Princess, MS Zaandam, Samoa, Vanuatu, Marshall Islands,
US, US_AS (American Somoa)"""
csvs = [x for x in data_path.iterdir() if 'covidtimeseries' in str(x)]
rois_to_remove = ['Diamond Princess', 'Grand Princess', 'MS Zaandam', 'Samoa', 'Vanuatu',
'Marshall Islands', 'US', 'US_AS', 'Micronesia', 'Kiribati', 'Palau']
for csv in csvs:
roi = str(csv).split('.')[0].split('_', 1)[-1]
if roi in rois_to_remove:
try:
if os.path.exists(csv):
print("Removing {} from data_path".format(roi))
os.remove(csv)
except:
print("could not remove {}. Check that path is correct.".format(csv))
| 2.21875 | 2 |
jetson/od/objcet_detection.py | lonerlin/SelfDrivingCVCar | 2 | 12792574 | <gh_stars>1-10
import jetson.inference
import jetson.utils
from multiprocessing import Process, Pipe
import time
class ObjectDetection(Process):
"""
物体识别进程类,实际的识别程序
"""
def __init__(self, conn1, conn2, stop_process, width=320, height=240, frequency=10, device="/dev/video0", network="ssd-mobilenet-v2", threshold=0.5, display_window=True):
"""
初始化识别进程
:param conn1: 管道1
:param conn2: 管道2
:param stop_process:停止标志
:param width: 摄像头宽
:param height: 摄像头高
:param frequency: 探测频率
:param device: 摄像头设备文件
:param network: 选用的模型
:param threshold: 阈值(就是可信度多少时认定为识别物,一般是0.5)
:param display_window: 是否显示监视窗口
"""
super(ObjectDetection, self).__init__()
self.device = device
self.network = network
self.frequency = frequency
self.threshold = threshold
self.conn1 = conn1
self.conn2 = conn2
self.width = width
self.height = height
self.display_window = display_window
self.interval = time.perf_counter()
self.stop = stop_process
def run(self):
"""
启动进程
"""
self.conn2.close()
self.camera_detect()
def camera_detect(self):
"""
探测,并通过管道返回探测结果,没有达到刷新时间时,返回一个空的list(避免管道堵塞,其实应该有更好的方法)
"""
net = jetson.inference.detectNet(self.network, threshold=self.threshold)
camera = jetson.utils.gstCamera(self.width, self.height, self.device) # using V4L2
display = jetson.utils.glDisplay()
while display.IsOpen() and self.stop.value == 0:
img, width, height = camera.CaptureRGBA()
if time.perf_counter() - self.interval >= 1/self.frequency:
self.interval = time.perf_counter()
detections = net.Detect(img, width, height)
if self.display_window:
display.RenderOnce(img, width, height)
if len(detections) > 0:
detections_list = []
for d in detections:
detections_list.append(
[d.ClassID, d.Confidence, d.Left, d.Right, d.Top, d.Bottom, d.Area, d.Center])
self.conn1.send(detections_list)
else:
self.conn1.send([])
else:
self.conn1.send([])
if __name__ == '__main__':
od = ObjectDetection("")
od.camera_detect()
| 2.21875 | 2 |
data/hook_dataloader.py | jireh-father/InsightFace_Pytorch | 0 | 12792575 | import torch.multiprocessing as multiprocessing
import threading
from torch.utils.data import _utils
import torch
import random
import sys
from torch._six import queue
import os
from torch.utils.data._utils import collate, signal_handling, MP_STATUS_CHECK_INTERVAL, \
ExceptionWrapper, IS_WINDOWS
if IS_WINDOWS:
import ctypes
from ctypes.wintypes import DWORD, BOOL, HANDLE
# On Windows, the parent ID of the worker process remains unchanged when the manager process
# is gone, and the only way to check it through OS is to let the worker have a process handle
# of the manager and ask if the process status has changed.
class ManagerWatchdog(object):
def __init__(self):
self.manager_pid = os.getppid()
self.kernel32 = ctypes.WinDLL('kernel32', use_last_error=True)
self.kernel32.OpenProcess.argtypes = (DWORD, BOOL, DWORD)
self.kernel32.OpenProcess.restype = HANDLE
self.kernel32.WaitForSingleObject.argtypes = (HANDLE, DWORD)
self.kernel32.WaitForSingleObject.restype = DWORD
# Value obtained from https://msdn.microsoft.com/en-us/library/ms684880.aspx
SYNCHRONIZE = 0x00100000
self.manager_handle = self.kernel32.OpenProcess(SYNCHRONIZE, 0, self.manager_pid)
if not self.manager_handle:
raise ctypes.WinError(ctypes.get_last_error())
self.manager_dead = False
def is_alive(self):
if not self.manager_dead:
# Value obtained from https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032.aspx
self.manager_dead = self.kernel32.WaitForSingleObject(self.manager_handle, 0) == 0
return not self.manager_dead
else:
class ManagerWatchdog(object):
def __init__(self):
self.manager_pid = os.getppid()
self.manager_dead = False
def is_alive(self):
if not self.manager_dead:
self.manager_dead = os.getppid() != self.manager_pid
return not self.manager_dead
def _worker_loop(dataset, index_queue, data_queue, done_event, collate_fn, seed, init_fn, worker_id):
# See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for details on the
# logic of this function.
try:
collate._use_shared_memory = True
# Intialize C side signal handlers for SIGBUS and SIGSEGV. Python signal
# module's handlers are executed after Python returns from C low-level
# handlers, likely when the same fatal signal had already happened
# again.
# https://docs.python.org/3/library/signal.html#execution-of-python-signal-handlers
signal_handling._set_worker_signal_handlers()
torch.set_num_threads(1)
random.seed(seed)
torch.manual_seed(seed)
data_queue.cancel_join_thread()
if init_fn is not None:
init_fn(worker_id)
watchdog = ManagerWatchdog()
while watchdog.is_alive():
try:
r = index_queue.get(timeout=MP_STATUS_CHECK_INTERVAL)
except queue.Empty:
continue
if r is None:
# Received the final signal
assert done_event.is_set()
return
elif done_event.is_set():
# Done event is set. But I haven't received the final signal
# (None) yet. I will keep continuing until get it, and skip the
# processing steps.
continue
idx, batch_indices = r
try:
dataset.__before_hook__()
samples = collate_fn([dataset[i] for i in batch_indices])
dataset.__after_hook__()
except Exception:
# It is important that we don't store exc_info in a variable,
# see NOTE [ Python Traceback Reference Cycle Problem ]
data_queue.put((idx, ExceptionWrapper(sys.exc_info())))
else:
data_queue.put((idx, samples))
del samples
except KeyboardInterrupt:
# Main process will raise KeyboardInterrupt anyways.
pass
# Balanced batch sampler and online train loader
class HookDataloderIter(object):
def __init__(self, loader):
self.dataset = loader.dataset
self.collate_fn = loader.collate_fn
self.batch_sampler = loader.batch_sampler
self.num_workers = loader.num_workers
self.pin_memory = loader.pin_memory and torch.cuda.is_available()
self.timeout = loader.timeout
self.sample_iter = iter(self.batch_sampler)
base_seed = torch.LongTensor(1).random_().item()
if self.num_workers > 0:
self.worker_init_fn = loader.worker_init_fn
self.worker_queue_idx = 0
self.worker_result_queue = multiprocessing.Queue()
self.batches_outstanding = 0
self.worker_pids_set = False
self.shutdown = False
self.send_idx = 0
self.rcvd_idx = 0
self.reorder_dict = {}
self.done_event = multiprocessing.Event()
self.index_queues = []
self.workers = []
for i in range(self.num_workers):
index_queue = multiprocessing.Queue()
index_queue.cancel_join_thread()
w = multiprocessing.Process(
target=_worker_loop,
args=(self.dataset, index_queue,
self.worker_result_queue, self.done_event,
self.collate_fn, base_seed + i,
self.worker_init_fn, i))
w.daemon = True
# NB: Process.start() actually take some time as it needs to
# start a process and pass the arguments over via a pipe.
# Therefore, we only add a worker to self.workers list after
# it started, so that we do not call .join() if program dies
# before it starts, and __del__ tries to join but will get:
# AssertionError: can only join a started process.
w.start()
self.index_queues.append(index_queue)
self.workers.append(w)
if self.pin_memory:
self.data_queue = queue.Queue()
pin_memory_thread = threading.Thread(
target=_utils.pin_memory._pin_memory_loop,
args=(self.worker_result_queue, self.data_queue,
torch.cuda.current_device(), self.done_event))
pin_memory_thread.daemon = True
pin_memory_thread.start()
# Similar to workers (see comment above), we only register
# pin_memory_thread once it is started.
self.pin_memory_thread = pin_memory_thread
else:
self.data_queue = self.worker_result_queue
_utils.signal_handling._set_worker_pids(id(self), tuple(w.pid for w in self.workers))
_utils.signal_handling._set_SIGCHLD_handler()
self.worker_pids_set = True
# prime the prefetch loop
for _ in range(2 * self.num_workers):
self._put_indices()
def __len__(self):
return len(self.batch_sampler)
def _try_get_batch(self, timeout=_utils.MP_STATUS_CHECK_INTERVAL):
# Tries to fetch data from `data_queue` for a given timeout. This can
# also be used as inner loop of fetching without timeout, with the
# sender status as the loop condition.
#
# This raises a `RuntimeError` if any worker died expectedly. This error
# can come from either the SIGCHLD handler in `_utils/signal_handling.py`
# (only for non-Windows platforms), or the manual check below on errors
# and timeouts.
#
# Returns a 2-tuple:
# (bool: whether successfully get data, any: data if successful else None)
try:
data = self.data_queue.get(timeout=timeout)
return (True, data)
except Exception as e:
# At timeout and error, we manually check whether any worker has
# failed. Note that this is the only mechanism for Windows to detect
# worker failures.
if not all(w.is_alive() for w in self.workers):
pids_str = ', '.join(str(w.pid) for w in self.workers if not w.is_alive())
raise RuntimeError('DataLoader worker (pid(s) {}) exited unexpectedly'.format(pids_str))
if isinstance(e, queue.Empty):
return (False, None)
raise
def _get_batch(self):
# Fetches data from `self.data_queue`.
#
# We check workers' status every `MP_STATUS_CHECK_INTERVAL` seconds,
# which we achieve by running `self._try_get_batch(timeout=MP_STATUS_CHECK_INTERVAL)`
# in a loop. This is the only mechanism to detect worker failures for
# Windows. For other platforms, a SIGCHLD handler is also used for
# worker failure detection.
#
# If `pin_memory=True`, we also need check if `pin_memory_thread` had
# died at timeouts.
if self.timeout > 0:
success, data = self._try_get_batch(self.timeout)
if success:
return data
else:
raise RuntimeError('DataLoader timed out after {} seconds'.format(self.timeout))
elif self.pin_memory:
while self.pin_memory_thread.is_alive():
success, data = self._try_get_batch()
if success:
return data
else:
# while condition is false, i.e., pin_memory_thread died.
raise RuntimeError('Pin memory thread exited unexpectedly')
# In this case, `self.data_queue` is a `queue.Queue`,. But we don't
# need to call `.task_done()` because we don't use `.join()`.
else:
while True:
success, data = self._try_get_batch()
if success:
return data
def __next__(self):
if self.num_workers == 0: # same-process loading
indices = next(self.sample_iter) # may raise StopIteration
batch = self.collate_fn([self.dataset[i] for i in indices])
if self.pin_memory:
batch = _utils.pin_memory.pin_memory_batch(batch)
return batch
# check if the next sample has already been generated
if self.rcvd_idx in self.reorder_dict:
batch = self.reorder_dict.pop(self.rcvd_idx)
return self._process_next_batch(batch)
if self.batches_outstanding == 0:
self._shutdown_workers()
raise StopIteration
while True:
assert (not self.shutdown and self.batches_outstanding > 0)
idx, batch = self._get_batch()
self.batches_outstanding -= 1
if idx != self.rcvd_idx:
# store out-of-order samples
self.reorder_dict[idx] = batch
continue
return self._process_next_batch(batch)
next = __next__ # Python 2 compatibility
def __iter__(self):
return self
def _put_indices(self):
assert self.batches_outstanding < 2 * self.num_workers
indices = next(self.sample_iter, None)
if indices is None:
return
self.index_queues[self.worker_queue_idx].put((self.send_idx, indices))
self.worker_queue_idx = (self.worker_queue_idx + 1) % self.num_workers
self.batches_outstanding += 1
self.send_idx += 1
def _process_next_batch(self, batch):
self.rcvd_idx += 1
self._put_indices()
if isinstance(batch, _utils.ExceptionWrapper):
# make multiline KeyError msg readable by working around
# a python bug https://bugs.python.org/issue2651
if batch.exc_type == KeyError and "\n" in batch.exc_msg:
raise Exception("KeyError:" + batch.exc_msg)
else:
raise batch.exc_type(batch.exc_msg)
return batch
def __getstate__(self):
# TODO: add limited pickling support for sharing an iterator
# across multiple threads for HOGWILD.
# Probably the best way to do this is by moving the sample pushing
# to a separate thread and then just sharing the data queue
# but signalling the end is tricky without a non-blocking API
raise NotImplementedError("_DataLoaderIter cannot be pickled")
def _shutdown_workers(self):
# See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for details on
# the logic of this function.
python_exit_status = _utils.python_exit_status
if python_exit_status is True or python_exit_status is None:
# See (2) of the note. If Python is shutting down, do no-op.
return
# Normal exit when last reference is gone / iterator is depleted.
# See (1) and the second half of the note.
if not self.shutdown:
self.shutdown = True
try:
self.done_event.set()
# Exit `pin_memory_thread` first because exiting workers may leave
# corrupted data in `worker_result_queue` which `pin_memory_thread`
# reads from.
if hasattr(self, 'pin_memory_thread'):
# Use hasattr in case error happens before we set the attribute.
# First time do `worker_result_queue.put` in this process.
# `cancel_join_thread` in case that `pin_memory_thread` exited.
self.worker_result_queue.cancel_join_thread()
self.worker_result_queue.put(None)
self.pin_memory_thread.join()
# Indicate that no more data will be put on this queue by the
# current process. This **must** be called after
# `pin_memory_thread` is joined because that thread shares the
# same pipe handles with this loader thread. If the handle is
# closed, Py3 will error in this case, but Py2 will just time
# out even if there is data in the queue.
self.worker_result_queue.close()
# Exit workers now.
for q in self.index_queues:
q.put(None)
# Indicate that no more data will be put on this queue by the
# current process.
q.close()
for w in self.workers:
w.join()
finally:
# Even though all this function does is putting into queues that
# we have called `cancel_join_thread` on, weird things can
# happen when a worker is killed by a signal, e.g., hanging in
# `Event.set()`. So we need to guard this with SIGCHLD handler,
# and remove pids from the C side data structure only at the
# end.
#
# FIXME: Unfortunately, for Windows, we are missing a worker
# error detection mechanism here in this function, as it
# doesn't provide a SIGCHLD handler.
if self.worker_pids_set:
_utils.signal_handling._remove_worker_pids(id(self))
self.worker_pids_set = False
def __del__(self):
if self.num_workers > 0:
self._shutdown_workers()
class HookDataLoader(torch.utils.data.DataLoader):
def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None,
batch_sampler=None, num_workers=0, collate_fn=torch.utils.data.dataloader.default_collate,
pin_memory=False, drop_last=False, timeout=0,
worker_init_fn=None):
torch.utils.data.DataLoader.__init__(self, dataset=dataset, batch_size=batch_size, shuffle=shuffle,
sampler=sampler,
batch_sampler=batch_sampler, num_workers=num_workers,
collate_fn=collate_fn,
pin_memory=pin_memory, drop_last=drop_last, timeout=timeout,
worker_init_fn=worker_init_fn)
def __iter__(self):
return HookDataloderIter(self)
class HookDataset(object):
def __before_hook__(self):
pass
def __after_hook__(self):
pass | 2.046875 | 2 |
python/athena/communicator/mpi_nccl_comm.py | sj1104/Het | 2 | 12792576 | from ctypes import *
from athena import ndarray
from athena.stream import *
import numpy as np
from enum import Enum
import os
def _load_nccl_lib():
"""Load libary in build/lib."""
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
lib_path = os.path.join(curr_path, '../../../build/lib/')
path_to_so_file = os.path.join(lib_path, "lib_mpi_nccl_runtime_api.so")
lib = CDLL(path_to_so_file, RTLD_GLOBAL)
return lib
lib_mpi_nccl = _load_nccl_lib()
# lib_mpi_nccl = CDLL("./lib_mpi_nccl_runtime_api.so", RTLD_GLOBAL)
class ncclDataType_t(Enum):
ncclInt8 = 0
ncclChar = 0
ncclUint8 = 1
ncclInt32 = 2
ncclInt = 2
ncclUint32 = 3
ncclInt64 = 4
ncclUint64 = 5
ncclFloat16 = 6
ncclHalf = 6
ncclFloat32 = 7
ncclFloat = 7
ncclFloat64 = 8
ncclDouble = 8
ncclNumTypes = 9
class ncclRedOp_t(Enum):
ncclSum = 0
ncclProd = 1
ncclMax = 2
ncclMin = 3
ncclNumOps = 4
class ncclUniqueId(Structure):
_fields_=[("internal", (c_int8 * 128))]
class MPI_NCCL_Communicator():
def __init__(self, stream = None):
'''
mpicomm: the MPI communicator, to use in MPI_Bcast, MPI_Reduce, MPI_Scatter, etc
ncclcomm: the NCCL communicator, to use in ncclAllReduce ...
nRanks: the total number of MPI threads
myRanks: the rank in all MPI threads
localRank: the rank among the MPI threads in this device
ncclId: ncclGetUniqueId should be called once when creating a communicator
and the Id should be distributed to all ranks in the communicator before calling ncclCommInitRank.
stream: the stream for NCCL communication
'''
self.mpicomm = c_int64(0)
self.ncclcomm = c_int64(0)
self.nRanks = c_int32(0)
self.myRank = c_int32(0)
self.localRank = c_int32(-1)
self.ncclId = ncclUniqueId()
self.device_id = c_int(0)
self.MPI_Init()
self.MPIGetComm()
self.MPI_Comm_rank()
self.MPI_Comm_size()
self.getLocalRank()
self.device_id.value = self.localRank.value
if stream == None:
self.stream = create_stream_handle(ndarray.gpu(self.device_id.value))
else:
self.stream = stream
def MPI_Init(self):
lib_mpi_nccl.MPIInit()
def MPI_Finalize(self):
lib_mpi_nccl.MPIFinalize()
def MPIGetComm(self):
lib_mpi_nccl.MPIGetComm(ctypes.byref(self.mpicomm))
def MPI_Comm_rank(self):
lib_mpi_nccl.getMPICommRank(ctypes.byref(self.mpicomm), ctypes.byref(self.myRank))
def MPI_Comm_size(self):
lib_mpi_nccl.getMPICommSize(ctypes.byref(self.mpicomm), ctypes.byref(self.nRanks))
def getLocalRank(self):
lib_mpi_nccl.getLocalRank(ctypes.byref(self.mpicomm), self.nRanks, self.myRank, ctypes.byref(self.localRank))
def ncclGetUniqueId(self):
lib_mpi_nccl.getNcclUniqueId(ctypes.byref(self.ncclId), self.mpicomm, self.localRank)
def dlarrayNcclAllReduce(self, dlarray, datatype, reduceop, executor_stream = None):
lib_mpi_nccl.dlarrayAllReduce(dlarray.handle, c_int(datatype.value), c_int(reduceop.value), self.ncclcomm, executor_stream.handle if executor_stream else self.stream.handle)
def dlarrayBroadcast(self, dlarray, datatype, root, executor_stream = None):
lib_mpi_nccl.dlarrayBroadcast(dlarray.handle, c_int(datatype.value), c_int(root), self.ncclcomm, executor_stream.handle if executor_stream else self.stream.handle)
def dlarrayAllGather(self, input_arr, output_arr, datatype, executor_stream = None):
lib_mpi_nccl.dlarrayAllGather(input_arr.handle, output_arr.handle, c_int(datatype.value), self.ncclcomm, executor_stream.handle if executor_stream else self.stream.handle)
def dlarraySend(self, arr, datatype, target, executor_stream = None):
lib_mpi_nccl.dlarraySend(arr.handle, c_int(datatype.value), c_int(target), self.ncclcomm, executor_stream.handle if executor_stream else self.stream.handle)
def dlarrayRecv(self, arr, datatype, src, executor_stream = None):
lib_mpi_nccl.dlarrayRecv(arr.handle, c_int(datatype.value), c_int(src), self.ncclcomm, executor_stream.handle if executor_stream else self.stream.handle)
def ncclCommInitRank(self):
'''
Use partial AllReduce to change here.
self.nRanks is the number of threads to use ncclallreduce
self.myRank is the rank among these threads. the value must in [0, self.nRank - 1]
'''
lib_mpi_nccl.initNcclCommRank(ctypes.byref(self.ncclcomm), self.nRanks, ctypes.byref(self.ncclId), self.myRank, self.localRank)
def ncclCommDestroy(self):
lib_mpi_nccl.commDestroyNccl(ctypes.byref(self.ncclcomm))
def ncclSetDevice(self, device_id):
self.device_id.value = device_id
lib_mpi_nccl.setDevice(self.device_id.value)
def ncclInit(self):
self.ncclSetDevice(self.device_id.value)
self.ncclGetUniqueId()
self.ncclCommInitRank()
def ncclFinish(self):
self.MPI_Finalize()
def mpi_nccl_communicator():
'''
'''
return MPI_NCCL_Communicator()
# NCCL_DEBUG=INFO mpirun --allow-run-as-root -np 4 python mpi_nccl_comm.py
if __name__ == "__main__":
t = mpi_nccl_communicator()
t.ncclInit()
arr = np.ones(16)*t.localRank.value
print("before: = ", arr)
arr = ndarray.array(arr, ctx = ndarray.gpu(t.device_id.value))
output_arr = np.zeros(16 * t.nRanks.value)
output_arr = ndarray.array(output_arr, ctx = ndarray.gpu(t.device_id.value))
t.dlarrayNcclAllReduce(arr, ncclDataType_t.ncclFloat32, ncclRedOp_t.ncclSum)
# t.dlarrayBroadcast(arr, ncclDataType_t.ncclFloat32, 0)
# t.dlarrayAllGather(arr, output_arr, ncclDataType_t.ncclFloat32)
print("after: = ", arr.asnumpy())
t.ncclFinish()
| 2.03125 | 2 |
game/migrations/0001_initial.py | m4tx/hackthespace | 8 | 12792577 | # Generated by Django 2.1.7 on 2019-03-27 13:01
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Player',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('session_id', models.CharField(db_index=True, max_length=20, unique=True)),
('email', models.EmailField(blank=True, max_length=100)),
],
),
migrations.CreateModel(
name='SolvedHiddenPuzzle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('puzzle', models.CharField(choices=[('rot13', 'rot13'), ('sky', 'sky'), ('image', 'image'), ('terminal', 'terminal'), ('redirect', 'redirect'), ('login', 'login'), ('pages', 'pages'), ('audio_spectrum', 'audio_spectrum'), ('keypad', 'keypad'), ('vigenere', 'vigenere'), ('stego_mix', 'stego_mix'), ('reverse', 'reverse'), ('finish', 'finish')], max_length=40)),
('timestamp', models.DateTimeField()),
('player', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='game.Player')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='SolvedPuzzle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('puzzle', models.CharField(choices=[('rot13', 'rot13'), ('sky', 'sky'), ('image', 'image'), ('terminal', 'terminal'), ('redirect', 'redirect'), ('login', 'login'), ('pages', 'pages'), ('audio_spectrum', 'audio_spectrum'), ('keypad', 'keypad'), ('vigenere', 'vigenere'), ('stego_mix', 'stego_mix'), ('reverse', 'reverse'), ('finish', 'finish')], max_length=40)),
('timestamp', models.DateTimeField()),
('player', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='game.Player')),
],
options={
'abstract': False,
},
),
migrations.AlterIndexTogether(
name='solvedpuzzle',
index_together={('player', 'puzzle')},
),
migrations.AlterIndexTogether(
name='solvedhiddenpuzzle',
index_together={('player', 'puzzle')},
),
]
| 1.835938 | 2 |
auth.py | Emberwalker/Arke | 2 | 12792578 | <reponame>Emberwalker/Arke
from flask import Blueprint, render_template, abort, request, redirect, url_for
from flask.ext.login import LoginManager, UserMixin, login_required, current_user, login_user, logout_user
from werkzeug.security import generate_password_hash, check_password_hash
from db import User, get_user, has_superuser, write_to_db, delete_from_db, get_all
from settings import SECRET_KEY
auth = Blueprint('auth', __name__, template_folder='templates')
# Auth Helpers/Flask-Login
auth_sys = LoginManager()
def setup_auth(app):
auth_sys.init_app(app)
# Use the UserMixin from Flask-Login to make this easy.
class FLUser(UserMixin):
def __init__(self, user):
self.user = user
def get_id(self):
return self.user.username
# Redirect missing attributes to the User object
def __getattr__(self, name):
return getattr(self.user, name)
@auth_sys.user_loader
def load_user(uname):
user = get_user(uname)
if user:
return FLUser(user)
return None
@auth_sys.unauthorized_handler
def unauthorized():
return redirect('/login')
class UserExists(ValueError):
pass
class NoPermissionError(Exception):
pass
def create_user(name, password, sudo=False):
if not name or not password or len(name) < 3 or len(password) < 4 or name.isdigit(): # Disallow unames that are numbers to avoid confusing the ID catcher
raise ValueError()
if get_user(name.lower()):
raise UserExists()
u = User(username=name.lower(), password=generate_password_hash(password))
u.is_superuser = sudo
write_to_db(u)
def check_password(user, password):
return check_password_hash(user.password, password)
# Flask Routing
@auth.route('/logout')
@login_required
def logout():
logout_user()
return render_template('auth/logout.html')
@auth.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated():
return redirect('/')
if not has_superuser():
return redirect('/firstrun')
errs = None
if request.method == 'POST':
try:
uname = request.form['username'].lower()
user = get_user(uname)
assert user
assert check_password(user, request.form['password'])
remember = False
if 'remember' in request.form:
remember = True
login_user(FLUser(user))
return redirect('/')
except Exception as ex:
errs = ["Incorrect username/password."]
return render_template('auth/login.html', errors=errs)
@auth.route('/firstrun', methods=['GET', 'POST'])
def firstrun():
if has_superuser():
return redirect('/login')
errs = None
if request.method == 'POST':
try:
assert request.form['password'] == request.form['password-confirm']
uname = request.form['username'].lower()
create_user(uname, request.form['password'], sudo=True)
return render_template('auth/setup_complete.html')
except Exception as ex:
errs = ["Invalid credentials. Mismatching passwords?"]
return render_template('auth/firstrun.html', errors=errs)
ERR_USER_EXISTS = "User already exists; perhaps you wanted <a href=\"/manage-accounts\">account management</a>?"
@auth.route('/create-user', methods=['GET', 'POST'])
@login_required
def create_user_page():
if not current_user.is_superuser:
return redirect('/')
errs = None
info = None
if request.method == 'POST':
try:
assert request.form['password'] == request.form['password-confirm']
uname = request.form['username']
admin = True if 'superuser' in request.form else False
create_user(uname, request.form['password'], sudo=admin)
info = "User '{}' created.".format(uname)
except UserExists:
errs = [ERR_USER_EXISTS]
except Exception as ex:
errs = ["User creation failed; mismatching passwords?"]
return render_template('auth/create_user.html', errors=errs, info=info)
@auth.route('/manage-accounts')
@login_required
def manage_accounts():
if not current_user.is_superuser:
return redirect('/')
info = request.args.get('info', None)
errs = []
return render_template('auth/manage.html', users=get_all(User), errors=errs, info=info)
@auth.route('/users/destroy', methods=['GET', 'POST'])
@login_required
def destroy_user():
if not current_user.is_superuser:
return redirect('/')
uid = request.args.get('uid', None)
if not uid:
return redirect(url_for('auth.manage_accounts'))
user = get_user(uid)
errs = None
if not user:
return redirect(url_for('auth.manage_accounts'))
if request.method == 'POST':
# Actually destroy
uname = user.username
try:
delete_from_db(user)
return redirect(url_for('auth.manage_accounts', info="User {} deleted.".format(uname)))
except Exception as ex:
errs = [str(ex)]
return render_template('auth/destroy.html', user=user, errors=errs)
@auth.route('/users/change-password', methods=['GET', 'POST'])
@login_required
def change_password():
uid = request.args.get('uid', None)
user = None
if uid and current_user.is_superuser:
user = get_user(uid)
else:
user = current_user
errs = None
info = None
if request.method == 'POST':
try:
uname = request.form['username']
pw1 = request.form['password']
pw2 = request.form['password-confirm']
assert pw1 == pw2
if not uname == current_user.username and not current_user.is_superuser:
raise NoPermissionError()
u = get_user(uname)
if len(pw1) < 4:
raise ValueError()
u.password = generate_password_hash(pw1)
write_to_db(u)
info = "Password changed for '{}'".format(uname)
except NoPermissionError:
errs = ["Permission denied."]
except Exception as ex:
print(ex)
errs = ["Password change failed; mismatching passwords?"]
return render_template('auth/change_password.html', user=user, errors=errs, info=info)
@auth.route('/users/promote', methods=['GET', 'POST'])
@login_required
def promote_user():
if not current_user.is_superuser:
return redirect('/')
uid = request.args.get('uid', None)
if not uid:
return redirect(url_for('auth.manage_accounts'))
user = get_user(uid)
if not user:
return redirect(url_for('auth.manage_accounts'))
if request.method == 'POST':
user.is_superuser = True
uname = user.username
write_to_db(user)
return redirect(url_for('auth.manage_accounts', info="{} promoted.".format(uname)))
return render_template('auth/promote.html', user=user)
@auth.route('/users/demote', methods=['GET', 'POST'])
@login_required
def demote_user():
if not current_user.is_superuser:
return redirect('/')
uid = request.args.get('uid', None)
if not uid:
return redirect(url_for('auth.manage_accounts'))
user = get_user(uid)
if not user:
return redirect(url_for('auth.manage_accounts'))
if request.method == 'POST':
user.is_superuser = False
uname = user.username
write_to_db(user)
return redirect(url_for('auth.manage_accounts', info="{} demoted.".format(uname)))
return render_template('auth/demote.html', user=user)
| 2.484375 | 2 |
examples/steps/custom_extract_local.py | hillsdale18/ProjectX | 3 | 12792579 | """
ETL step wrapper for creating an S3 node for input from local files
"""
from dataduct.steps import ExtractLocalStep
import logging
logger = logging.getLogger(__name__)
class CustomExtractLocalStep(ExtractLocalStep):
"""CustomExtractLocal Step class that helps get data from a local file
"""
def __init__(self, **kwargs):
"""Constructor for the CustomExtractLocal class
Args:
**kwargs(optional): Keyword arguments directly passed to base class
"""
logger.info('Using the Custom Extract Local Step')
super(CustomExtractLocalStep, self).__init__(**kwargs)
| 2.171875 | 2 |
paclair/docker/docker_image.py | jpthiery/paclair | 0 | 12792580 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Docker_image module
"""
import hashlib
from paclair.logged_object import LoggedObject
class DockerImage(LoggedObject):
"""
A Docker Image
"""
def __init__(self, name, registry, repository="", tag='latest'):
"""
Constructor
:param name: docker image's name (ex: ubuntu, centos, ...)
:param repository: repository's name
:param tag: image's tag
:param registry: Docker registry
"""
super(DockerImage, self).__init__()
self.name = name
self.logger.debug("INITCLASS:NAMEIMAGE:{name}".format(name=self.name))
self.tag = tag
self.logger.debug("INITCLASS:TAG:{tag}".format(tag=self.tag))
self.registry = registry
self.repository = repository
self._manifest = None
self._sha = None
self.logger.debug("INITCLASS:REPOSITORY:{repository}".format(repository=self.repository))
@property
def token(self):
"""
Token for this image
:return:
"""
return self.registry.get_token(self)
@property
def sha(self):
"""
Sha256 of the layers list (used for clair layer_name)
:return: sha256
"""
if self._sha is None:
m = hashlib.sha256(''.join(self.get_layers()).encode('utf-8'))
self._sha = m.hexdigest()
return self._sha
@property
def short_sha(self):
"""
Sha short version (12 characters)
"""
return self.sha[:12]
@property
def manifest(self):
"""
Get manifest
:returns dict:
"""
if self._manifest is None:
self._manifest = self.registry.get_manifest(self)
return self._manifest
def get_layers(self):
"""
Get ordered layers
:returns list:
"""
manifest = self.manifest
layers = []
# Check Version
if manifest["schemaVersion"] == 2:
fs_layers = manifest['layers']
digest_field = 'digest'
else:
fs_layers = manifest.get('fsLayers', [])[::-1]
digest_field = 'blobSum'
# List layers
for fs_layer in fs_layers:
if fs_layer[digest_field] not in layers:
layers.append(fs_layer[digest_field])
return layers
| 2.3125 | 2 |
hyperadmin/clients/common.py | zbyte64/django-hyperadmin | 25 | 12792581 | <reponame>zbyte64/django-hyperadmin<filename>hyperadmin/clients/common.py
try:
from django.conf.urls import patterns, url
except ImportError:
from django.conf.urls.defaults import patterns, url
from django.views.generic import TemplateView
from django.core.urlresolvers import reverse
from hyperadmin.apirequests import InternalAPIRequest
import logging
class Client(object):
default_namespace = 'hyper-client'
default_app_name = 'client'
def __init__(self, api_endpoint, name=None, app_name=None):
self.api_endpoint = api_endpoint
self.name = name or self.default_namespace
self.app_name = app_name or self.default_app_name
def get_logger(self):
return logging.getLogger(__name__)
def get_urls(self):
pass
def urls(self):
return self, self.app_name, self.name
urls = property(urls)
@property
def urlpatterns(self):
return self.get_urls()
def reverse(self, name, *args, **kwargs):
return reverse('%s:%s' % (self.name, name), args=args, kwargs=kwargs, current_app=self.app_name)
class SimpleTemplateClientView(TemplateView):
client = None
def get_context_data(self, **kwargs):
context = super(SimpleTemplateClientView, self).get_context_data(**kwargs)
context.update(self.client.get_context_data())
return context
class SimpleTemplateClient(Client):
template_name = None
template_view = SimpleTemplateClientView
def get_media(self):
pass #TODO
def get_context_data(self):
api_endpoint = self.api_endpoint
api_request = InternalAPIRequest(site=api_endpoint)
api_endpoint = api_endpoint.fork(api_request=api_request)
api_url = api_endpoint.get_url()
return {'media':self.get_media(),
'api_endpoint':api_url,
'client':self,}
def get_urls(self):
urlpatterns = patterns('',
url(r'^$',
self.template_view.as_view(template_name=self.template_name, client=self),
name='index'),
)
return urlpatterns
| 2.015625 | 2 |
qodex/ui/new_shelf.py | grapesmoker/qodex | 0 | 12792582 | # -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'new_shelf.ui'
##
## Created by: Qt User Interface Compiler version 6.1.0
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide6.QtCore import *
from PySide6.QtGui import *
from PySide6.QtWidgets import *
class Ui_NewShelfDialog(object):
def setupUi(self, NewShelfDialog):
if not NewShelfDialog.objectName():
NewShelfDialog.setObjectName(u"NewShelfDialog")
NewShelfDialog.resize(400, 300)
self.buttonBox = QDialogButtonBox(NewShelfDialog)
self.buttonBox.setObjectName(u"buttonBox")
self.buttonBox.setGeometry(QRect(30, 240, 341, 32))
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Cancel|QDialogButtonBox.Ok)
self.widget = QWidget(NewShelfDialog)
self.widget.setObjectName(u"widget")
self.widget.setGeometry(QRect(50, 10, 324, 225))
self.gridLayout = QGridLayout(self.widget)
self.gridLayout.setObjectName(u"gridLayout")
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.label = QLabel(self.widget)
self.label.setObjectName(u"label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.shelf_name = QLineEdit(self.widget)
self.shelf_name.setObjectName(u"shelf_name")
self.gridLayout.addWidget(self.shelf_name, 0, 1, 1, 1)
self.label_2 = QLabel(self.widget)
self.label_2.setObjectName(u"label_2")
self.gridLayout.addWidget(self.label_2, 1, 0, 1, 1)
self.shelf_description = QTextEdit(self.widget)
self.shelf_description.setObjectName(u"shelf_description")
self.gridLayout.addWidget(self.shelf_description, 1, 1, 1, 1)
self.retranslateUi(NewShelfDialog)
self.buttonBox.accepted.connect(NewShelfDialog.accept)
self.buttonBox.rejected.connect(NewShelfDialog.reject)
QMetaObject.connectSlotsByName(NewShelfDialog)
# setupUi
def retranslateUi(self, NewShelfDialog):
NewShelfDialog.setWindowTitle(QCoreApplication.translate("NewShelfDialog", u"New Shelf", None))
self.label.setText(QCoreApplication.translate("NewShelfDialog", u"Name", None))
self.label_2.setText(QCoreApplication.translate("NewShelfDialog", u"Description", None))
# retranslateUi
| 2.046875 | 2 |
crusher/utils.py | JudahRockLuberto/crusher | 2 | 12792583 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Useful tools."""
import os
import glob
import pickle
from astropy.io import fits
__all__ = [
'read_from_pickle',
'save_to_pickle',
'save_to_fits',
'linux_or_mac',
'clean_after_ellipse',
]
def read_from_pickle(name):
"""Read the data from Pickle file."""
return pickle.load(open(name, "rb"))
def save_to_pickle(obj, name):
"""Save an object to a cPickle/Pickle format binary file."""
output = open(name, 'wb')
pickle.dump(obj, output, protocol=2)
output.close()
return
def save_to_fits(data, fits_file, wcs=None, header=None, overwrite=True):
"""Save a NDarray to FITS file.
Parameters
----------
data : ndarray
Data to be saved in FITS file.
fits_file : str
Name of the FITS file.
wcs : astropy.wcs object, optional
World coordinate system information. Default: None
header : str, optional
Header information. Default: None
overwrite : bool, optional
Overwrite existing file or not. Default: True
"""
if wcs is not None:
wcs_header = wcs.to_header()
data_hdu = fits.PrimaryHDU(data, header=wcs_header)
else:
data_hdu = fits.PrimaryHDU(data)
if header is not None:
if 'SIMPLE' in header and 'BITPIX' in header:
data_hdu.header = header
else:
data_hdu.header.extend(header)
if os.path.islink(fits_file):
os.unlink(fits_file)
data_hdu.writeto(fits_file, overwrite=overwrite)
return
def linux_or_mac():
"""Check the current platform.
Parameters
----------
Return
------
platform : str
"linux" or "macosx".
"""
from sys import platform
if platform == "linux" or platform == "linux2":
return "linux"
elif platform == "darwin":
return "macosx"
else:
raise TypeError("# Sorry, only support Linux and MacOSX for now!")
def clean_after_ellipse(folder, prefix, remove_bin=False):
"""Clean all the unecessary files after ellipse run.
Parameters
----------
folder : str
Directory that keeps all the output files.
prefix : str
Prefix of the file.
remove_bin : bool, optional
Remove the output binary table or not. Default: False
"""
_ = [os.remove(par) for par in glob.glob("{}/{}*.par".format(folder, prefix))]
_ = [os.remove(pkl) for pkl in glob.glob("{}/{}*.pkl".format(folder, prefix))]
_ = [os.remove(img) for img in glob.glob("{}/{}*.fits".format(folder, prefix))]
_ = [os.remove(tab) for tab in glob.glob("{}/{}*.tab".format(folder, prefix))]
if remove_bin:
_ = [os.remove(bin) for bin in glob.glob("{}/{}*.bin".format(folder, prefix))]
| 2.625 | 3 |
update-attempt-ids.py | inducer/courseflow | 284 | 12792584 | <filename>update-attempt-ids.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django
django.setup()
from course.models import GradeChange
for gchange in GradeChange.objects.all():
if gchange.flow_session is not None:
gchange.attempt_id = "flow-session-%d" % gchange.flow_session.id
gchange.save()
| 1.929688 | 2 |
ibdp_classes/__main__.py | ram6ler/ibdp_classes | 0 | 12792585 | import sys
from .ibdp_classes import Pseudocode
def run():
def help():
print(
"""
To use:
python -m ibdp_classes [options] filename
Options:
-md Output markdown.
-py Output intermediate Python code.
"""
)
exit(0)
if len(sys.argv) < 2:
help()
md = "-md" in sys.argv
py = "-py" in sys.argv
file_name = sys.argv[-1]
try:
with open(file_name) as f:
lines = [line for line in f]
except:
help()
code = "".join(lines)
pc = Pseudocode(code)
if md:
print("```")
print(pc)
print("```\n")
if md and py:
print("```python")
if py:
print(pc.python)
if md and py:
print("```\n")
if py and not md:
print("\n\n")
if md:
print("Output:\n\n```")
print(pc())
if md:
print("```")
if __name__ == "__main__":
run()
| 3.328125 | 3 |
floor.py | devcusn/civil-engineering | 0 | 12792586 | <filename>floor.py
"""
cordinate = [
[1,2,1,2,1],
[2,0,2,0,2],
[1,2,1,2,1]
]
before use this class,you must to create column and beam by using
column and beam class
"""
class floor():
def __init__(self,column,columnNumber,beam,beamNumber,cordinate):
self.columnNumber = columnNumber
self.beamNumber = beamNumber
self.column = column
self.beam = beam
self.totalVoiceColumn = self.column.volume * self.columnNumber
self.totalVoiceBeam = self.beam.volume * self.beamNumber
self.totalVoice = self.totalVoiceColumn + self.totalVoiceBeam
self.cordinate = cordinate
def showAll(self):
print('everyting')
| 3.53125 | 4 |
webGrover.py | meghu2791/evaluate_neuralFakenewsDetectors | 1 | 12792587 | news = "Online disinformation, or fake news intended to deceive, has emerged as a major societal problem. Currently, fake news articles are written by humans, but recently-introduced AI technology based on Neural Networks might enable adversaries to generate fake news. Our goal is to reliably detect this “neural fake news” so that its harm can be minimized."
from selenium import webdriver
from seleniumrequests import Chrome
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import time
import json
import argparse
import req
#initialization
human_data = []
machine_data = []
driver = webdriver.Firefox()
#command-line argument parsing
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str)
parser.add_argument('--file_name', type=str)
parser.add_argument('--save_human_file', type=str)
parser.add_argument('--save_machine_file', type=str)
args = parser.parse_args()
model = args.model
file_name = args.file_name
save_human_file = args.save_human_file
save_machine_file = args.save_machine_file
store_human_data = []
store_machine_data = []
#check_now = human_data
#driver.find_element_by_class_name("ant-input.sc-htpNat.sc-ksYbfQ.iuRnVj").clear()
#driver.find_element_by_class_name("ant-input.sc-htpNat.sc-ksYbfQ.iuRnVj").send_keys("Online disinformation, or fake news intended to deceive, has emerged as a major societal problem. Currently, fake news articles are written by humans, but recently-introduced AI technology based on Neural Networks might enable adversaries to generate fake news. Our goal is to reliably detect this “neural fake news” so that its harm can be minimized.")
#ans = driver.find_element_by_css_selector("button.ant-btn.sc-bwzfXH.sc-jDwBTQ.kNoRcT.ant-btn-default").submit()
#element = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CLASS_NAME, "sc-kvZOFW.bpFYHv")))
def detectGrover(news, driver, store_human_data, store_machine_data):
#for news in check_now:
driver.find_element_by_css_selector("textarea.ant-input.sc-dxgOiQ.sc-kTUwUJ.gEHnFy").clear()
driver.find_element_by_css_selector("textarea.ant-input.sc-dxgOiQ.sc-kTUwUJ.gEHnFy").send_keys(news.get('article'))
ans = driver.find_element_by_css_selector("button.ant-btn.sc-bdVaJa.sc-jbKcbu.iUrOzv").submit()
#ant-btn sc-bdVaJa sc-jbKcbu iUrOzv
try:
element = WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.CSS_SELECTOR, "div.sc-dfVpRl.eIhhqn")))
if element:
print(element.text.split())
if (news['label'] not in element.text.split()) and ((news['label'] + ".") not in element.text.split()[-1]):
print(news['article'], element.text.split(), news['label'])
else:
if news['label'] == 'human':
store_human_data.append(news)
else:
store_machine_data.append(news)
except:
ans = driver.find_element_by_css_selector("button.ant-btn.sc-bdVaJa.sc-jbKcbu.iUrOzv").submit()
try:
element = WebDriverWait(driver, 30).until(
EC.presence_of_element_located((By.CSS_SELECTOR, "div.sc-dfVpRl.eIhhqn")))
if element:
if (news['label'] not in element.text.split()) and (
(news['label'] + ".") not in element.text.split()[-1]):
print(news['article'], element.text.split(), news['label'])
else:
if news['label'] == 'human':
store_human_data.append(news)
else:
store_machine_data.append(news)
except:
print("Unresponsive!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
def detectGPT2(news, driver, store_human_data, store_machine_data):
if 'article' in news.keys():
#print(news.keys())
driver.find_element_by_id("textbox").clear()
driver.find_element_by_id("textbox").send_keys(news['article'])
temp = driver.find_element_by_id("real-percentage")
time.sleep(5)
temp = driver.find_element_by_id("real-percentage").text.split('%')
if float(temp[0]) > 50:
label = 'human'
else:
label = 'machine'
#if label not in news['label']:
# print(news['article'], label, news['label'])
#else:
if label == 'human':
store_human_data.append(news)
else:
store_machine_data.append(news)
#driver.close()
def detectFakeBox(news, store_human_data, store_machine_data):
maxtry = 10
res = 0
label = ""
try:
while maxtry > 0:
res = req.sendRequest(news.get('article'))
maxtry = maxtry - 1
except:
print("Internet Error!Sleep 3 sec!", res, maxtry)
time.sleep(3)
if res:
if res["content_decision"] == 'impartial' or ((res['content_decision'] == 'bias') and (res['content_score'] < 0.5)):
label = 'human'
else:
label = 'machine'
if label == news['label']:
if label == 'human':
store_human_data.append(news)
else:
store_machine_data.append(news)
#model load
if model == 'groverAI':
driver.get("https://grover.allenai.org/detect")
#detectGrover(human_data, driver)
elif model == 'gpt2':
driver.get("https://huggingface.co/openai-detector")
#detectGPT2(human_data, driver)
elif model == 'fakebox':
req.init()
else:
print("Not supported as yet! TODO:CTRL, FakeBox")
#temporary
i = 0
count = 0
#input read
human_file = open(save_human_file, "a+")
machine_file = open(save_machine_file, "a+")
with open(file_name) as json_file:
while True:
line = json_file.readline()
if len(line)!=0 and (model == 'groverAI'):
#print(line)
detectGrover(json.loads(line), driver, store_human_data, store_machine_data)
count +=1
elif len(line)!=0 and (model == 'gpt2'):
len_human = len(store_human_data)
len_machine = len(store_machine_data)
detectGPT2(json.loads(line), driver, store_human_data, store_machine_data)
if len_human < len(store_human_data):
human_file.write(str(json.dumps(store_human_data[-1]))+'\n')
elif len_machine < len(store_machine_data):
machine_file.write(str(json.dumps(store_machine_data[-1]))+'\n')
elif len(line)!=0 and (model == 'fakebox'):
len_human = len(store_human_data)
len_machine = len(store_machine_data)
detectFakeBox(json.loads(line), store_human_data, store_machine_data)
if len_human < len(store_human_data):
human_file.write(str(json.dumps(store_human_data[-1]))+'\n')
elif len_machine < len(store_machine_data):
machine_file.write(str(json.dumps(store_machine_data[-1]))+'\n')
else:
break
json_file.close()
driver.close()
human_file.close()
machine_file.close()
'''
with open(save_human_file, "w") as json_file:
for each in store_human_data:
json_file.write(str(json.dumps(each))+'\n')
with open(save_machine_file, "w") as json_file:
for each in store_machine_data:
json_file.write(str(json.dumps(each))+'\n')
json_file.close()
''' | 3.046875 | 3 |
config/urls.py | shirishgoyal/scholars | 0 | 12792588 | from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from rest_framework.routers import SimpleRouter
from django.views import defaults as default_views
from scholars.courses.viewsets import CourseViewSet, SlideViewSet, CategoryViewSet, LanguageViewSet, TimezoneViewSet, \
SlideReviewViewSet
from scholars.users.viewsets import UserViewSet
router = SimpleRouter()
router.register(r'users', UserViewSet)
router.register(r'timezones', TimezoneViewSet, base_name='timezones')
router.register(r'languages', LanguageViewSet, base_name='languages')
router.register(r'categories', CategoryViewSet)
router.register(r'courses', CourseViewSet)
router.register(r'slides', SlideViewSet)
router.register(r'reviews', SlideReviewViewSet)
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^api/', include('scholars.authentication.urls')),
url(r'^api/auth/', include('rest_framework_social_oauth2.urls')),
url(r'^api/', include(router.urls)),
# the 'api-root' from django rest-frameworks default router
# http://www.django-rest-framework.org/api-guide/routers/#defaultrouter
# url(r'^$', RedirectView.as_view(url=reverse_lazy('api-root'), permanent=False)),
url(r'^$', TemplateView.as_view(template_name='index.html')),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
| 1.789063 | 2 |
PythonExample/tmp/12_spider.py | RobinTang/machine-learning | 1 | 12792589 | <reponame>RobinTang/machine-learning<gh_stars>1-10
# -*- coding: utf-8 -*-
# Filename: my_crawl.py
# Function: 租房小爬虫
# Author:<EMAIL>
# 微博:OceanBase晓楚
# 微信:hustos
from bs4 import BeautifulSoup
import re
import sys
import urllib
import time
import random
import time
reload(sys)
sys.setdefaultencoding("GBK")
# 支持爬不同版面,取消下面的注释即可
# 二手房
# board = 'OurHouse'
# 二手市场主版
# board = 'SecondMarket'
# 租房
board = 'Career_Upgrade'
keywords = []
matched = []
final = []
# for kw in open('/home/wwwroot/rent/keywords.txt').readlines():
# keywords.append(kw.strip())
# print keywords[0]
#soup = BeautifulSoup(open('pg2.html'), "html5lib")
for page in range(1, 10):
url = 'http://m.newsmth.net/board/%s?p=%s' % (board, page)
data = urllib.urlopen(url).read()
# print data
soup = BeautifulSoup(data, "html5lib", from_encoding="utf8")
for a in soup.find_all(href=re.compile("\/article\/" + board)):
item = a.encode('utf-8')
print item
for kw in keywords:
if item.find(kw) >= 0:
matched.append(item)
time.sleep(5 + 10 * random.random())
for item in matched:
if item not in final:
final.append(item)
html = "<html><head><meta charset='UTF-8' /><title>租房</title><base href='http://m.newsmth.net/' /></head><body>"
html += "<br/>".join(final)
html += "<p>last update at %s </p><p><a href='http://m.newsmth.net/board/%s'>水木社区</a></p>" % (time.strftime('%Y-%m-%d %X', time.localtime()), board)
html += "</body></html>"
# output = open('/home/wwwroot/rent/index.html', 'w')
# output.write(html)
# output.close()
# notify,爬完后通知用户
# notifyUrl = "http://m.xxx.cn/rent"
# data = urllib.urlopen(notifyUrl).read() | 2.953125 | 3 |
explore/viz/kde.py | idc9/explore | 0 | 12792590 | import numpy as np
import warnings
from scipy import stats
from six import string_types
import matplotlib.pyplot as plt
from scipy.integrate import trapz
from explore.utils import Proportions
try:
import statsmodels.nonparametric.api as smnp
_has_statsmodels = True
except ImportError:
_has_statsmodels = False
def _univariate_kde(data, shade=False, vertical=False, kernel='gau',
bw="scott", gridsize=100, cut=3,
clip=None, legend=True, ax=None, cumulative=False,
**kwargs):
"""
Computes the KDE of univariate data.
shade : bool, optional
If True, shade in the area under the KDE curve (or draw with filled
contours when data is bivariate).
vertical : bool, optional
If True, density is on x-axis.
kernel : {'gau' | 'cos' | 'biw' | 'epa' | 'tri' | 'triw' }, optional
Code for shape of kernel to fit with. Bivariate KDE can only use
gaussian kernel.
bw : {'scott' | 'silverman' | scalar | pair of scalars }, optional
Name of reference method to determine kernel size, scalar factor,
or scalar for each dimension of the bivariate plot. Note that the
underlying computational libraries have different interperetations
for this parameter: ``statsmodels`` uses it directly, but ``scipy``
treats it as a scaling factor for the standard deviation of the
data.
gridsize : int, optional
Number of discrete points in the evaluation grid.
cut : scalar, optional
Draw the estimate to cut * bw from the extreme data points.
clip : pair of scalars, or pair of pair of scalars, optional
Lower and upper bounds for datapoints used to fit KDE. Can provide
a pair of (low, high) bounds for bivariate plots.
legend : bool, optional
If True, add a legend or label the axes when possible.
cumulative : bool, optional
If True, draw the cumulative distribution estimated by the kde.
ax : matplotlib axes, optional
Axes to plot on, otherwise uses current axes.
kwargs : key, value pairings
Other keyword arguments are passed to ``plt.plot()`` or
``plt.contour{f}`` depending on whether a univariate or bivariate
plot is being drawn.
Output
------
x: array-like, (n_grid_points, )
The grid of values where the kde is evaluated.
y: array-like, (n_grid_points, )
The values of the KDE.
"""
# Sort out the clipping
if clip is None:
clip = (-np.inf, np.inf)
# Calculate the KDE
if np.nan_to_num(data.var()) == 0:
# Don't try to compute KDE on singular data
msg = "Data must have variance to compute a kernel density estimate."
warnings.warn(msg, UserWarning)
x, y = np.array([]), np.array([])
elif _has_statsmodels:
# Prefer using statsmodels for kernel flexibility
x, y = _statsmodels_univariate_kde(data, kernel, bw,
gridsize, cut, clip,
cumulative=cumulative)
else:
# Fall back to scipy if missing statsmodels
if kernel != "gau":
kernel = "gau"
msg = "Kernel other than `gau` requires statsmodels."
warnings.warn(msg, UserWarning)
if cumulative:
raise ImportError("Cumulative distributions are currently "
"only implemented in statsmodels. "
"Please install statsmodels.")
x, y = _scipy_univariate_kde(data, bw, gridsize, cut, clip)
# Make sure the density is nonnegative
y = np.amax(np.c_[np.zeros_like(y), y], axis=1)
return x, y
def _statsmodels_univariate_kde(data, kernel, bw, gridsize, cut, clip,
cumulative=False):
"""Compute a univariate kernel density estimate using statsmodels."""
fft = kernel == "gau"
kde = smnp.KDEUnivariate(data)
kde.fit(kernel, bw, fft, gridsize=gridsize, cut=cut, clip=clip)
if cumulative:
grid, y = kde.support, kde.cdf
else:
grid, y = kde.support, kde.density
return grid, y
def _scipy_univariate_kde(data, bw, gridsize, cut, clip):
"""Compute a univariate kernel density estimate using scipy."""
try:
kde = stats.gaussian_kde(data, bw_method=bw)
except TypeError:
kde = stats.gaussian_kde(data)
if bw != "scott": # scipy default
msg = ("Ignoring bandwidth choice, "
"please upgrade scipy to use a different bandwidth.")
warnings.warn(msg, UserWarning)
if isinstance(bw, string_types):
bw = "scotts" if bw == "scott" else bw
bw = getattr(kde, "%s_factor" % bw)() * np.std(data)
grid = _kde_support(data, bw, gridsize, cut, clip)
y = kde(grid)
return grid, y
def _kde_support(data, bw, gridsize='default', cut=3, clip=None):
"""Establish support for a kernel density estimate."""
support_min = max(data.min() - bw * cut, clip[0])
support_max = min(data.max() + bw * cut, clip[1])
return np.linspace(support_min, support_max, gridsize)
def get_class_kdes(values, classes, ensure_norm=True, **kde_kws):
"""
KDEs for values with associated classes. Computes the KDE of each class
then weights each KDE by the number of points in each class. Also
compute the overall KDE.
Output
------
cl_kdes, overall_kde
cl_kdes: dict
KDE for each class. Keys are class labels.
overall_kde: dict
Overall KDE (i.e. ignoring class labels)
"""
# TODO: do we really need ensure_norm
overall_grid, overall_y = _univariate_kde(values, **kde_kws)
if ensure_norm:
overall_y = norm_kde(grid=overall_grid, y=overall_y)
overall_kde = {'grid': overall_grid, 'y': overall_y}
cl_props = Proportions(classes)
cl_kdes = {}
for cl in np.unique(classes):
cl_mask = classes == cl
cl_values = values[cl_mask]
cl_grid, cl_y = _univariate_kde(cl_values, **kde_kws)
if ensure_norm:
cl_y = norm_kde(grid=cl_grid, y=cl_y)
# weight area under KDE by number of samples
cl_y *= cl_props[cl]
cl_kdes[cl] = {'grid': cl_grid,
'y': cl_y}
return cl_kdes, overall_kde
def norm_kde(grid, y):
tot = trapz(y=y, x=grid)
return y / tot
def _univariate_kdeplot(x, y, shade=True, vertical=False,
legend=True, ax=None, **kwargs):
"""Plot a univariate kernel density estimate on one of the axes."""
if ax is None:
ax = plt.gca()
# Make sure the density is nonnegative
y = np.amax(np.c_[np.zeros_like(y), y], axis=1)
# Flip the data if the plot should be on the y axis
if vertical:
x, y = y, x
# Check if a label was specified in the call
label = kwargs.pop("label", None)
# Otherwise check if the data object has a name
if label is None and hasattr(x, "name"):
label = x.name
# Decide if we're going to add a legend
legend = label is not None and legend
label = "_nolegend_" if label is None else label
# Use the active color cycle to find the plot color
facecolor = kwargs.pop("facecolor", None)
line, = ax.plot(x, y, **kwargs)
color = line.get_color()
line.remove()
kwargs.pop("color", None)
facecolor = color if facecolor is None else facecolor
# Draw the KDE plot and, optionally, shade
ax.plot(x, y, color=color, label=label, **kwargs)
shade_kws = dict(
facecolor=facecolor,
alpha=kwargs.get("alpha", 0.25),
clip_on=kwargs.get("clip_on", True),
zorder=kwargs.get("zorder", 1),
)
if shade:
if vertical:
ax.fill_betweenx(y, 0, x, **shade_kws)
else:
ax.fill_between(x, 0, y, **shade_kws)
# Set the density axis minimum to 0
if vertical:
ax.set_xlim(0, auto=None)
else:
ax.set_ylim(0, auto=None)
# Draw the legend here
handles, labels = ax.get_legend_handles_labels()
if legend and handles:
ax.legend(loc="best")
return ax
def _univariate_conditional_kdeplot(values, classes,
cl_labels=None,
cl_palette=None,
include_overall=True,
shade=True,
vertical=False,
legend=True,
ax=None,
kde_kws={},
kde_plt_kws={}):
cl_kdes, overall_kde = get_class_kdes(values, classes, **kde_kws)
# in case 'overall' is one of the classes
if 'overall' in np.unique(classes):
overall_name = ''.join(np.unique(classes))
else:
overall_name = 'overall'
cl_kdes[overall_name] = overall_kde
# plot the KDE for each class
for cl in cl_kdes.keys():
_kwargs = kde_plt_kws.copy()
_kwargs['shade'] = shade
x = cl_kdes[cl]['grid']
y = cl_kdes[cl]['y']
if cl_palette is not None and cl in cl_palette:
_kwargs['color'] = cl_palette[cl]
if cl_labels is not None and cl in cl_labels:
_kwargs['label'] = cl_labels[cl]
else:
_kwargs['label'] = cl
if cl == overall_name:
if not include_overall:
continue
_kwargs['ls'] = '--'
# _kwargs['alpha'] = .2
_kwargs['zorder'] = 1
_kwargs['label'] = None # 'overall'
_kwargs['color'] = 'gray'
_kwargs['shade'] = False
_univariate_kdeplot(x=x, y=y,
vertical=vertical,
legend=legend, ax=ax, **_kwargs)
| 2.390625 | 2 |
basics/np_vs_torch_comparison.py | s-mostafa-a/a | 5 | 12792591 | <reponame>s-mostafa-a/a
import numpy as np
import torch
import time
t1 = time.time()
for i in range(1000000):
x_tensor = torch.empty(5, 3)
t2 = time.time()
for i in range(1000000):
x_ndarr = np.empty((5, 3))
t3 = time.time()
print('making empty array comparison:')
delta1 = t2 - t1
delta2 = t3 - t1
print(f'torch: {delta1} sec')
print(f'numpy: {delta2} sec')
print(f'''winner: {'torch' if delta1 < delta2 else 'numpy'}''')
# my computer's outputs (macbook pro without cuda):
# making empty array comparison:
# torch: 2.2384519577026367 sec
# numpy: 2.758033275604248 sec
# winner: torch
t4 = time.time()
for i in range(1000000):
x_tensor = torch.zeros(5, 3)
t5 = time.time()
for i in range(1000000):
x_ndarr = np.zeros((5, 3))
t6 = time.time()
print('making zeros array comparison:')
delta3 = t5 - t4
delta4 = t6 - t5
print(f'torch: {delta3} sec')
print(f'numpy: {delta4} sec')
print(f'''winner: {'torch' if delta3 < delta4 else 'numpy'}''')
# my computer's outputs (macbook pro without cuda):
# making zeros array comparison:
# torch: 3.497465133666992 sec
# numpy: 0.5160698890686035 sec
# winner: numpy
| 2.4375 | 2 |
python/day14/extended_polymerization.py | aesdeef/advent-of-code-2021 | 2 | 12792592 | from collections import Counter
INPUT_FILE = "../../input/14.txt"
Ruleset = dict[str, str]
def parse_input() -> tuple[str, Ruleset]:
"""
Parses the input and returns the polymer template and the pair insertion rules
"""
with open(INPUT_FILE) as f:
template, _, *rules = f.read().splitlines()
ruleset = dict(rule.split(" -> ") for rule in rules)
return (template, ruleset)
def step(ruleset: Ruleset, pair_counter: Counter[str]) -> Counter[str]:
"""
Applies a single step to the given pair_counter
"""
new_pair_counter: Counter[str] = Counter()
for pair, count in pair_counter.items():
inserted = ruleset[pair]
first, second = pair
new_pair_counter[first + inserted] += count
new_pair_counter[inserted + second] += count
return new_pair_counter
def calculate_answer(template: str, pair_counter: Counter[str]) -> int:
"""
Calculates how many times each letter occurs by adding the counts of pairs
where the given letter comes first and 1 for the last letter of the original
template (which does not change), then subtracts the lowest count from the
highest count and returns the answer
"""
letter_counter = Counter(template[-1])
for pair, count in pair_counter.items():
first_letter, _ = pair
letter_counter[first_letter] += count
return max(letter_counter.values()) - min(letter_counter.values())
def solve(template: str, ruleset: Ruleset) -> tuple[int, int]:
"""
Calculates the required answers given the original template and the pair
insertion rules
"""
pairs = ("".join(pair) for pair in zip(template, template[1:]))
pair_counter = Counter(pairs)
for _ in range(10):
pair_counter = step(ruleset, pair_counter)
part1 = calculate_answer(template, pair_counter)
for _ in range(30):
pair_counter = step(ruleset, pair_counter)
part2 = calculate_answer(template, pair_counter)
return (part1, part2)
if __name__ == "__main__":
template, ruleset = parse_input()
part1, part2 = solve(template, ruleset)
print(part1)
print(part2)
| 3.828125 | 4 |
backend/auth/security.py | restato/bunnybook | 131 | 12792593 | from typing import Optional, Dict
import jwt
import sentry_sdk
from fastapi import HTTPException
from starlette import status
from starlette.requests import Request
from auth.models import Role
from auth.models import User
from config import cfg
def get_user(request: Request) -> User:
"""
Protect route from anonymous access, requiring and returning current
authenticated user.
:param request: web request
:return: current user, otherwise raise an HTTPException (status=401)
"""
return _check_and_extract_user(request)
def get_admin(request: Request) -> User:
"""
Allow access only to an 'admin' account, returning current
authenticated admin account data.
:param request: web request
:return: current admin user, otherwise raise an HTTPException (status=401)
"""
user = _check_and_extract_user(request)
if user.role != Role.ADMIN:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
return user
def get_optional_user(request: Request) -> Optional[User]:
"""
Return authenticated user or None if session is anonymous.
:param request: web request
:return: current user or None for anonymous sessions
"""
try:
return _check_and_extract_user(request)
except HTTPException:
if request.headers.get("Authorization"):
raise
def extract_user_from_token(access_token: str, verify_exp: bool = True) -> User:
"""
Extract User object from jwt token, with optional expiration check.
:param access_token: encoded access token string
:param verify_exp: whether to perform verification or not
:return: User object stored inside the jwt
"""
return User(**jwt.decode(
access_token,
key=cfg.jwt_secret,
algorithms=[cfg.jwt_algorithm],
options={"verify_exp": verify_exp})["user"])
def decode_jwt_refresh_token(
encoded_refresh_token: str,
verify_exp: bool = True) -> Dict:
"""
Decode an encoded refresh token, with optional expiration check.
:param encoded_refresh_token: encoded refresh token string
:param verify_exp: whether to perform verification or not
:return: decoded jwt refresh token as dictionary
"""
return jwt.decode(
encoded_refresh_token,
key=cfg.jwt_secret,
algorithms=[cfg.jwt_algorithm],
options={"verify_exp": verify_exp})
def _check_and_extract_user(request: Request) -> User:
authorization_header = request.headers.get("Authorization")
if not authorization_header:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
try:
access_token = authorization_header.replace("Bearer ", "")
user = extract_user_from_token(access_token, )
if cfg.sentry_dsn:
sentry_sdk.set_user({
"id": user.id,
"username": user.username,
"email": user.email,
"ip_address": request.client.host
})
return user
except jwt.exceptions.ExpiredSignatureError:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
| 2.28125 | 2 |
built-in/ACL_TensorFlow/Official/cv/SSD_Resnet50_FPN_for_ACL/scripts/image_to_imgConf.py | Ascend/modelzoo | 12 | 12792594 | <gh_stars>10-100
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import argparse
import random
import os
from PIL import Image
import PIL
parser = argparse.ArgumentParser(description="SSD_mobilenet test single image test procedure.")
parser.add_argument("--save_conf_path", type=str, default="./img_info",
help="The path of the result.json.")
parser.add_argument("--intput_file_path", type=str, default="./acl/data",
help="The path of inference input bin file.")
args = parser.parse_args()
def get_reasoning_data(image_path):
img_files = []
exts = ['jpg', 'png', 'jpeg', 'JPG','JPEG']
for parent, dirnames, filenames in os.walk(os.path.join(image_path)):
for filename in filenames:
for ext in exts:
if filename.endswith(ext):
img_files.append(os.path.join(parent, filename))
break
print('Find {} images'.format(len(img_files)))
return img_files
def main():
img_info_path = args.save_conf_path
img_path = args.intput_file_path
img_list = get_reasoning_data(img_path)
f = open(img_info_path, "w+")
i = 0
for img_fn in img_list:
try:
img_name = img_fn.split("/")[-1].split(".")[0]
img_src = Image.open(img_fn)
im_width ,im_height = img_src.size
f.write(str(i) + " " + img_name + " " + str(im_width) + " " + str(im_height) )
f.write('\n')
except:
print("Error reading image {}!".format(im_fn))
continue
i = i + 1
f.close()
if __name__ == '__main__':
main()
| 2.265625 | 2 |
src/commands/get_ast.py | PranjalPansuriya/JavaScriptEnhancements | 690 | 12792595 | import sublime, sublime_plugin
import os
from ..libs import util
from ..libs import NodeJS
from ..libs import javaScriptEnhancements
from ..libs.global_vars import *
class JavascriptEnhancementsGetAstCommand(sublime_plugin.TextCommand):
def run(self, edit, **args):
view = self.view
flow_cli = "flow"
is_from_bin = True
chdir = ""
use_node = True
bin_path = ""
node = NodeJS(check_local=True)
result = node.execute_check_output(
flow_cli,
[
'ast',
'--from', 'sublime_text',
'--pretty'
],
is_from_bin=is_from_bin,
use_fp_temp=True,
fp_temp_contents=view.substr(sublime.Region(0, view.size())),
is_output_json=False,
chdir=chdir,
bin_path=bin_path,
use_node=use_node
)
print(result[1])
def is_enabled(self, **args) :
view = self.view
if not util.selection_in_js_scope(view) or not DEVELOPER_MODE :
return False
return True
def is_visible(self, **args) :
view = self.view
if not util.selection_in_js_scope(view) or not DEVELOPER_MODE :
return False
return True | 2.0625 | 2 |
setup.py | NTBEL/diffusion-fit | 0 | 12792596 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="diffusionfit",
version="0.7.0",
python_requires=">=3.9",
install_requires=[
"numpy",
"scipy",
"scikit-image",
"matplotlib",
"seaborn",
"pandas",
"numba",
"streamlit",
"plotly",
],
author="<NAME>",
author_email="<EMAIL>",
description="Python package for extract estimates of dye/peptide diffusion coefficients and loss rates from a time-sequence of fluorescence images.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/NTBEL/diffusion-fit",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
)
| 1.546875 | 2 |
cmepy/lazy_dict.py | hegland/cmepy | 6 | 12792597 | """
Dictionary with lazy evaluation on access, via a supplied update function
"""
import itertools
class LazyDict(dict):
"""
A dictionary type that lazily updates values when they are accessed.
All the usual dictionary methods work as expected, with automatic lazy
updates occuring behind the scenes whenever values are read from the
dictionary.
The optional ``items`` argument, if specified, is a mapping instance used
to initialise the items in the :class:`LazyDict`.
The ``update_value`` argument required by the :class:`LazyDict` constructor
must be a function of the form:
update_value(k, existing_value, member) -> updated_value
This function is called whenever an item with the key ``k`` is read
from the :class:`LazyDict`. The second argument ``existing_value``, is
the value corresponding to the key ``k`` stored in the :class:`LazyDict`,
or ``None``, if the key ``k`` is not contained in the :class:`LazyDict`.
The third argument ``member`` is a boolean value indicating if there is
an existing value stored under the key ``k``.
This function is used as follows by the :class:`LazyDict`. Suppose that the
value ``v`` has been stored in a :class:`LazyDict` object ``lazy_dict``
under the key ``k``, that is, ``lazy_dict[k] = v``. Then subsequently
accessing this value in the usual manner::
v_updated = lazy_dict[k]
is equivalent to the following two statements::
lazy_dict[k] = update_value(k, v, (k in lazy_dict))
v_updated = update_value(k, v, (k in lazy_dict))
Observe how the value stored in the :class:`LazyDict` under the key ``k``
is first updated, using the provided function,
with the updated value then being the one returned.
"""
def __init__(self, update_value, items = None):
"""
Returns a LazyDict using the specified ``update_value`` function
and optional initial dictionary arguments.
"""
self.update_value = update_value
if items is None:
dict.__init__(self)
else:
dict.__init__(items)
def __getitem__(self, key):
member = dict.__contains__(self, key)
if member:
existing_value = dict.__getitem__(self, key)
else:
existing_value = None
# ensure measurement is up to date
updated_value = self.update_value(key, existing_value, member)
self[key] = updated_value
return updated_value
def copy(self):
return LazyDict(self.update_value, dict.copy(self))
def itervalues(self):
return itertools.imap((lambda k : self[k]), dict.iterkeys(self))
def iteritems(self):
return itertools.imap((lambda k : (k, self[k])), dict.iterkeys(self))
def pop(self, *args):
n_args = len(args)
if n_args < 1:
raise TypeError('pop expected at least 1 argument, got %d' % n_args)
if n_args > 2:
raise TypeError('pop expected at most 2 arguments, got %d' % n_args)
k = args[0]
if k in self:
value = self[k]
del self[k]
return value
else:
if n_args == 2:
return args[1]
else:
raise KeyError(str(k))
def popitem(self):
key, value = dict.popitem(self)
self[key] = value
updated_value = self[key]
del self[key]
return key, updated_value
def setdefault(self, k, x=None):
if k in self:
return self[k]
else:
self[k] = x
return x
def get(self, k, x=None):
if k in self:
return self[k]
else:
return x
def values(self):
return list(self.itervalues())
def items(self):
return list(self.iteritems())
| 4.28125 | 4 |
pyvisdk/enums/entity_reference_entity_type.py | Infinidat/pyvisdk | 0 | 12792598 |
########################################
# Automatically generated, do not edit.
########################################
from pyvisdk.thirdparty import Enum
EntityReferenceEntityType = Enum(
'cluster',
'datacenter',
'datastore',
'host',
'nasMount',
'resourcePool',
'scsiAdapter',
'scsiPath',
'scsiTarget',
'scsiVolume',
'storagePod',
'vm',
'vmFile',
)
| 1.507813 | 2 |
AutoLookdev_Rman_v01_0/RenderMan_AutoLookDev.py | mteillet/AutoLookDev_Rman | 0 | 12792599 | <filename>AutoLookdev_Rman_v01_0/RenderMan_AutoLookDev.py<gh_stars>0
import maya.cmds as cmds
import os
from shutil import copyfile
from functools import partial
#### Copyright 2019 DUBOIX <NAME> and <NAME> - Licensed under the Apache License, Version 2.0 (the "License");
#### you may not use this file except in compliance with the License. You may obtain a copy of the License at:
#### http://www.apache.org/licenses/LICENSE-2.0
def main():
# Kill existing window if it is already displayed on the screen
if cmds.window("winID", exists = True):
cmds.deleteUI("winID")
# Window
windowWidth = 400
window = cmds.window("winID", title="Renderman_Auto_Lookdev", iconName='RmanLookDev', widthHeight=(windowWidth, 400) )
#### Header ####
cmds.rowColumnLayout( numberOfColumns=1, columnWidth=[(1, windowWidth)])
cmds.text( label = "Renderman Auto Lookdev", font = "boldLabelFont", backgroundColor = [0.290, 0.705, 0.909], enableBackground = True, height = 80)
cmds.separator()
# Layout
cmds.rowColumnLayout( numberOfColumns=1 )
cmds.separator( height = 40 )
# AutoLookdev
cmds.button(label = "AutoLookdev", command = 'lookdevAuto()')
# Layout
cmds.separator( height = 40 )
# Set Global Scale
# Hidden slider, serves only to define varible
scaleSlider = cmds.floatSliderGrp(label = "Lookdev Global Scale", min=0.001, max=100, value=1, precision = 3, field = True, dragCommand ='placeholder', changeCommand = 'placeholder')
# Slider calling the function as the variable is now defined
cmds.floatSliderGrp(scaleSlider, edit = True, dragCommand = partial(updateSize, scaleSlider), changeCommand = partial(updateSize, scaleSlider))
# Force update if the value is entered as string
# Layout
cmds.separator( height = 40 )
# Set background Type
bgStyle = cmds.optionMenu( label="Background Type", changeCommand="placeholder")
cmds.menuItem( label="Plane" )
cmds.menuItem( label="Cyclo" )
cmds.optionMenu(bgStyle, edit = True, changeCommand = partial(updateBgType, bgStyle))
# Set Cyclo Type
cycloStyle = cmds.optionMenu(label = "Cyclo Type", changeCommand = 'placeholder')
cmds.menuItem(label = "Constant Color")
cmds.menuItem(label = "Checker")
cmds.optionMenu(cycloStyle, edit = True, changeCommand = partial(updateCycloType, cycloStyle))
# Set Background Color
colorStyle = cmds.floatSliderGrp(label = "Background Value", min = 0, max = 1, value = 0.02, precision = 3, field = True, dragCommand = 'placeholder', changeCommand = 'placeholder')
cmds.floatSliderGrp(colorStyle, edit = True, dragCommand = partial(udateBgValue, colorStyle), changeCommand = partial(udateBgValue, colorStyle))
# Layout
cmds.separator( height = 40 )
# Set number of Shading balls
shadingBalls = cmds.optionMenu(label = "Shader Balls", changeCommand = 'placeholder')
cmds.menuItem(label = "Full")
cmds.menuItem(label = "Minimal")
cmds.optionMenu(shadingBalls, edit = True, changeCommand = partial(shadingBallType, shadingBalls))
# Set the toggle for the colorchecker
colorCheck = cmds.optionMenu(label ="Color Checker", changeCommand = 'placeholder')
cmds.menuItem(label = "On")
cmds.menuItem(label = "Off")
cmds.optionMenu(colorCheck, edit = True, changeCommand = partial(colorCheckerToggle, colorCheck))
# Layout
cmds.separator( height = 40 )
# Set the camera focal length and compensates for it
cameraFocal = cmds.optionMenu(label = "Camera focal length", changeCommand = 'placeholder')
cmds.menuItem(label = "classic - 50 mm")
cmds.menuItem(label = "telelens - 85mm")
cmds.menuItem(label = "widelens - 28mm")
cmds.optionMenu(cameraFocal, edit = True, changeCommand = partial(setCamFocal, cameraFocal))
tweakHeight = cmds.floatSliderGrp(label="Tweak Camera Heigth", min = -50, max = 50, value = 0, step = 0.5, field = True, dragCommand = 'placeholder', changeCommand = 'placeholder')
cmds.floatSliderGrp(tweakHeight, edit = True, dragCommand = partial(camHeight, tweakHeight), changeCommand = partial(camHeight, tweakHeight))
tweakDepth = cmds.floatSliderGrp(label="Tweak Camera Dolly", min = -100, max = 100, value = 0, step = 0.5, field = True, dragCommand = 'placeholder', changeCommand = 'placeholder' )
cmds.floatSliderGrp(tweakDepth, edit = True, dragCommand = partial(camDolly, tweakDepth), changeCommand = partial(camDolly, tweakDepth))
# Layout
cmds.separator( height = 40 )
# Change HDRI
changeHDR = cmds.button(label = "Change HDR", command = "changeHDRdef()")
# Reset
# Show window - Need update
cmds.showWindow( window )
#### Auto Lookdev set up definitions ####
def lookdevAuto():
# Get srcIMG folder in project and check if AutoLookdev folder is present
project = getProjectPath()
srcIMG = project + "sourceimages/"
srcIMGlookDev = srcIMG + "RmanAutoLookdev"
# Get the script Path folder before copying the HDRI
scriptFolder = getScriptPath()
# Create the folder if needed
checkFolderExists(srcIMGlookDev)
# Copy the HDRI and ColorChecker if needed
srcIMGhdrTex = srcIMGlookDev + "/DefaultHDR.hdr.tex"
srcIMGhdrHdr = srcIMGlookDev + "/DefaultHDR.hdr"
srcIMGcolorCheckerTex = srcIMGlookDev + "/DefaultColorChecker.png.tex"
srcIMGcolorCheckerPng = srcIMGlookDev + "/DefaultColorChecker.png"
scriptHdrTex = scriptFolder + "DefaultHDR.hdr.tex"
scriptHdrHdr = scriptFolder + "DefaultHDR.hdr"
scriptColorCheckerTex = scriptFolder + "DefaultColorChecker.png.tex"
scriptColorCheckerPng = scriptFolder + "DefaultColorChecker.png"
checkHdrExists(scriptHdrTex, scriptHdrHdr, srcIMGhdrTex, srcIMGhdrHdr, scriptColorCheckerTex, scriptColorCheckerPng, srcIMGcolorCheckerTex, srcIMGcolorCheckerPng)
# Check if the Lookdev scene exists and copies it if not
scriptScene = scriptFolder + "Lookdev_Scene_v01.ma"
projectScene = srcIMGlookDev + "/Lookdev_Scene_v01.ma"
checkSceneExists(scriptScene, projectScene)
# Import Lookdev as reference if it does not exist in the scene
importLookdev(projectScene)
# Set the shadow output in the Beauty Alpha
setRmanShadow()
# Get the string for the maya project path
def getProjectPath():
realProject = cmds.workspace( q=True, rootDirectory=True )
return(realProject)
# Get the string for the script directory
def getScriptPath():
scriptPath = os.path.expanduser('~') + "/maya/2019/scripts/AutoLookdev_Rman_v01_0/"
return(scriptPath)
# Check if the RmanAutoLookdev folder exists in the project
def checkFolderExists(srcIMGlookDev):
if not os.path.exists(srcIMGlookDev):
os.makedirs(srcIMGlookDev)
else:
print("RmanAutoLookdev folder already exists")
# Check if the HDRI & ColorChecker exists in the RmanAutoLookdev folder
def checkHdrExists(scriptHdrTex, scriptHdrHdr, srcIMGhdrTex, srcIMGhdrHdr, scriptColorCheckerTex, scriptColorCheckerPng, srcIMGcolorCheckerTex, srcIMGcolorCheckerPng):
# Check and copy HDR
if not os.path.exists(srcIMGhdrTex):
copyfile(scriptHdrTex, srcIMGhdrTex)
copyfile(scriptHdrHdr, srcIMGhdrHdr)
else:
print('default HDR is already in the project RmanAutoLookdev folder')
# Check and copy ColorChecker
if not os.path.exists(srcIMGcolorCheckerTex):
copyfile(scriptColorCheckerTex, srcIMGcolorCheckerTex)
copyfile(scriptColorCheckerPng, srcIMGcolorCheckerPng)
else:
print('default Color Checker is already in the project RmanAutoLookdev folder')
def checkSceneExists(scriptScene, projectScene):
# Check and copy HDR
if not os.path.exists(projectScene):
copyfile(scriptScene, projectScene)
else:
print('Lookdev scene is already in the project RmanAutoLookdev folder')
# Check and copy ColorChecker
# Importing the lookdev as reference
def importLookdev(projectScene):
print("Import Lookdev as reference")
cmds.file(projectScene, r=True, uns = False )
# Set the output of the shadows in the beauty alpha and deactivate the learn light from results
def setRmanShadow():
print("Set Renderman Shadow output in Beauty's Alpha")
cmds.setAttr("rmanGlobals.outputShadowAOV", 1)
cmds.setAttr("rmanGlobals.learnLightSelection", 0)
#### Update Lookdev Size ####
def updateSize(scaleSlider, *args):
globalScale = (cmds.floatSliderGrp(scaleSlider, q=True, v=True))
print(globalScale)
cmds.setAttr("Lookdev_Scene_v01_Rman_Lookdev_CTRL.Lookdev_GlobalScale", globalScale)
#### Update Background Type ####
def updateBgType(bgStyle, *args):
bgType = (cmds.optionMenu(bgStyle, q=True, v=True))
if bgType == "Plane":
print("Setting the Background to infinite")
cmds.setAttr("Lookdev_Scene_v01_Rman_Lookdev_CTRL.BackgroundType", 0)
else:
print("Setting the Background to Cyclo ")
cmds.setAttr("Lookdev_Scene_v01_Rman_Lookdev_CTRL.BackgroundType", 1)
#### Update Cyclo Type ####
def updateCycloType(cycloStyle, *args):
cycloType = (cmds.optionMenu(cycloStyle, q=True, v=True))
if cycloType == "Constant Color":
print("Setting the Cyclo to constant color")
cmds.setAttr("Lookdev_Scene_v01_Rman_Lookdev_CTRL.Lookdev_Cyclo_Type", 1)
else :
print("Setting the Cyclo to Grid texture")
cmds.setAttr("Lookdev_Scene_v01_Rman_Lookdev_CTRL.Lookdev_Cyclo_Type", 0)
#### Update Background Color ####
def udateBgValue(colorStyle, *args):
bgValue = (cmds.floatSliderGrp(colorStyle, q=True, v=True))
print("Setting the Background Value to "+ str(bgValue))
cmds.setAttr("Lookdev_Scene_v01_Rman_Lookdev_CTRL.Lookdev_Background_Color", bgValue)
def shadingBallType(shadingBalls, *args):
ballsType = (cmds.optionMenu(shadingBalls, q=True, v=True))
if ballsType == "Full":
print("Setting the Shader Balls to full type")
cmds.setAttr("Lookdev_Scene_v01_Rman_Lookdev_CTRL.Lookdev_ShaderBalls", 0)
else :
print("Setting the Shader Balls to minimal type")
cmds.setAttr("Lookdev_Scene_v01_Rman_Lookdev_CTRL.Lookdev_ShaderBalls", 1)
def colorCheckerToggle(colorCheck, *args):
colorCheckerState = (cmds.optionMenu(colorCheck, q=True, v=True))
if colorCheckerState == "On":
print("Showing the ColorChecker")
cmds.setAttr("Lookdev_Scene_v01_Rman_Lookdev_CTRL.Lookdev_ColorChecker", 0)
else:
print("Hiding the ColorChecker")
cmds.setAttr("Lookdev_Scene_v01_Rman_Lookdev_CTRL.Lookdev_ColorChecker", 1)
def setCamFocal(cameraFocal, *args):
camFocLength = (cmds.optionMenu(cameraFocal, q=True, v=True))
if camFocLength == "classic - 50 mm":
print("Setting camera focal length to 50mm")
cmds.setAttr("Lookdev_Scene_v01_Lookdev_CamShape.focalLength", 50)
cmds.setAttr("Lookdev_Scene_v01_Lookdev_Cam_focal_compensator.translateZ", 133.103)
else:
if camFocLength == "telelens - 85mm":
print("Setting camera focal length to 85mm")
cmds.setAttr("Lookdev_Scene_v01_Lookdev_CamShape.focalLength", 85)
cmds.setAttr("Lookdev_Scene_v01_Lookdev_Cam_focal_compensator.translateZ", 141.465)
else:
print("setting camera focal length to 28mm")
cmds.setAttr("Lookdev_Scene_v01_Lookdev_CamShape.focalLength", 28)
cmds.setAttr("Lookdev_Scene_v01_Lookdev_Cam_focal_compensator.translateZ", 127.913)
def camHeight(tweakHeight, *args):
changeHeight = (cmds.floatSliderGrp(tweakHeight, q=True, v=True))
cmds.setAttr("Lookdev_Scene_v01_Lookdev_CameraScale_LOC.translateY", changeHeight)
def camDolly(tweakDepth, *args):
changeDolly = (cmds.floatSliderGrp(tweakDepth, q=True, v=True))
cmds.setAttr("Lookdev_Scene_v01_Lookdev_CameraScale_LOC.translateZ", changeDolly)
def changeHDRdef():
# Get the new HDR path
project = getProjectPath()
srcIMG = project + "sourceimages/"
file = cmds.fileDialog2(fileFilter = "*.hdr", dialogStyle = 1, fileMode = 1, dir = srcIMG )
# Change the path to the new HDR
if __name__ == '__main__':
main() | 2.34375 | 2 |
pysirix/__init__.py | sirixdb/sirix-python-client | 7 | 12792600 | <reponame>sirixdb/sirix-python-client
import httpx
from pysirix.sirix import Sirix
from pysirix.database import Database
from pysirix.resource import Resource
from pysirix.json_store import JsonStoreSync, JsonStoreAsync
from pysirix.constants import Insert, DBType, TimeAxisShift
from pysirix.errors import SirixServerError
from pysirix.types import (
QueryResult,
Commit,
Revision,
InsertDiff,
ReplaceDiff,
UpdateDiff,
DeleteDiff,
Metadata,
MetaNode,
)
def sirix_sync(username: str, password: str, client: httpx.Client,) -> Sirix:
"""
:param username: the username registered with keycloak for this application.
:param password: the password registered with keycloak for this application.
:param client: an ``httpx.Client`` instance. You should instantiate the instance with
the ``base_url`` param as the url for the sirix database.
"""
s = Sirix(username=username, password=password, client=client,)
s.authenticate()
return s
async def sirix_async(
username: str, password: str, client: httpx.AsyncClient,
) -> Sirix:
"""
:param username: the username registered with keycloak for this application.
:param password: the password registered with keycloak for this application.
:param client: an ``httpx.AsyncClient`` instance. You should instantiate the instance with
the ``base_url`` param as the url for the sirix database.
"""
s = Sirix(username=username, password=password, client=client,)
await s.authenticate()
return s
__all__ = [
"sirix_sync",
"sirix_async",
"Sirix",
"SirixServerError",
"Database",
"Resource",
"JsonStoreSync",
"JsonStoreAsync",
"Insert",
"DBType",
"QueryResult",
"Commit",
"Revision",
"InsertDiff",
"ReplaceDiff",
"UpdateDiff",
"DeleteDiff",
"Metadata",
"MetaNode",
"TimeAxisShift",
]
| 2.609375 | 3 |
app/utility/answer_forms.py | syth0le/tg_reminder_bot | 0 | 12792601 | <gh_stars>0
from typing import Union, Optional
from app.utility.schemas import TemporaryReminder, PermanentReminder, Bookmark
from app.utility.stickers import stickers_recognize
def answer_forms(answer: Optional[Union[TemporaryReminder, PermanentReminder, Bookmark]] = None,
adding: Optional[bool] = False,
element: Optional[object] = None,
position: Optional[int] = None) -> str:
if adding:
stick_done, stick_type = stickers_recognize(element[4], element[2])
answer_message = message_form(reminder_type=element[2],
stick_done=stick_done,
stick_type=stick_type,
element=element,
position=position)
else:
stick_done, stick_type = stickers_recognize(answer.is_done, answer.type)
if not answer.type == 'book':
answer_message = f'{stick_done} {stick_type} - {answer.title}:\n{answer.date}\n id:{answer.id}'
else:
answer_message = f'{stick_done} {stick_type} - {answer.title}:\n id:{answer.id}'
return answer_message
def message_form(reminder_type: str,
stick_done: str,
stick_type: str,
element: object,
position: Optional[int] = None,
) -> str:
if reminder_type == 'temp':
answer_message = f'{position}) {stick_done} {stick_type} - {element[1]}:\n{element[3]}\n'
elif reminder_type == 'perm':
answer_message = f'{position}) {stick_done} {stick_type} - {element[1]}:\n{element[3]}\n{element[5]}\n'
elif reminder_type == 'book':
answer_message = f'{position}) {stick_done} {stick_type} - {element[1]}\n'
else:
answer_message = f'{position}) {stick_done} {stick_type} - {element[1]}:\n{element[3]}\n'
return answer_message
| 2.453125 | 2 |
src/Bicho.py | victorlujan/Dise-odeSoftwarePatrones | 0 | 12792602 | class Bicho:
def __init__(self):
self.vida=0
self.modo = None
self.ataque = 10
self.posicion = None
def hablar(self):
self.modo.hablar()
def dormir(self):
self.modo.dormir()
def atacar(self):
self.modo.atacar()
def esPerezoso(self):
return self.modo.esPerezoso()
def esAgresivo(self):
return self.modo.esAgresivo()
def recorrer(self):
self.modo.printOn()
def actua(self):
self.modo.actua(self)
def mover(self):
self.modo.mover()
| 3.03125 | 3 |
scripts/ao3_work_ids.py | lyubadimitrova/ffpopularity | 0 | 12792603 | <filename>scripts/ao3_work_ids.py<gh_stars>0
# Retrieve fic ids from an AO3 search
# Will return in searched order
# Saves ids to a csv for later use e.g. to retrieve fic text
# Options:
# Only retrieve multichapter fics
# Modify search to include a list of tags
# (e.g. you want all fics tagged either "romance" or "fluff")
from bs4 import BeautifulSoup
import re
import time
import requests
import csv
import sys
import datetime
import argparse
page_empty = False
base_url = ""
url = ""
num_requested_fic = 0
num_recorded_fic = 0
csv_name = ""
multichap_only = ""
tags = []
# keep track of all processed ids to avoid repeats:
# this is separate from the temporary batch of ids
# that are written to the csv and then forgotten
seen_ids = []
#
# Ask the user for:
# a url of a works listed page
# e.g.
# https://archiveofourown.org/works?utf8=%E2%9C%93&work_search%5Bsort_column%5D=word_count&work_search%5Bother_tag_names%5D=&work_search%5Bquery%5D=&work_search%5Blanguage_id%5D=&work_search%5Bcomplete%5D=0&commit=Sort+and+Filter&tag_id=Harry+Potter+-+J*d*+K*d*+Rowling
# https://archiveofourown.org/tags/Harry%20Potter%20-%20J*d*%20K*d*%20Rowling/works?commit=Sort+and+Filter&page=2&utf8=%E2%9C%93&work_search%5Bcomplete%5D=0&work_search%5Blanguage_id%5D=&work_search%5Bother_tag_names%5D=&work_search%5Bquery%5D=&work_search%5Bsort_column%5D=word_count
# how many fics they want
# what to call the output csv
#
# If you would like to add additional search terms (that is should contain at least one of, but not necessarily all of)
# specify these in the tag csv, one per row.
def get_args():
global base_url
global url
global csv_name
global num_requested_fic
global multichap_only
global tags
parser = argparse.ArgumentParser(description='Scrape AO3 work IDs given a search URL')
parser.add_argument(
'url', metavar='URL',
help='a single URL pointing to an AO3 search page')
parser.add_argument(
'--out_csv', default='work_ids',
help='csv output file name')
parser.add_argument(
'--header', default='',
help='user http header')
parser.add_argument(
'--num_to_retrieve', default='a',
help='how many fic ids you want')
parser.add_argument(
'--multichapter_only', default='',
help='only retrieve ids for multichapter fics')
parser.add_argument(
'--tag_csv', default='',
help='provide an optional list of tags; the retrieved fics must have one or more such tags')
args = parser.parse_args()
url = args.url
csv_name = str(args.out_csv)
# defaults to all
if (str(args.num_to_retrieve) is 'a'):
num_requested_fic = -1
else:
num_requested_fic = int(args.num_to_retrieve)
multichap_only = str(args.multichapter_only)
if multichap_only != "":
multichap_only = True
else:
multichap_only = False
tag_csv = str(args.tag_csv)
if (tag_csv):
with open(tag_csv, "r") as tags_f:
tags_reader = csv.reader(tags_f)
for row in tags_reader:
tags.append(row[0])
header_info = str(args.header)
return header_info
#
# navigate to a works listed page,
# then extract all work ids
#
def get_ids(header_info=''):
global page_empty
headers = {'user-agent' : header_info}
req = requests.get(url, headers=headers)
soup = BeautifulSoup(req.text, "lxml")
# some responsiveness in the "UI"
sys.stdout.write('.')
sys.stdout.flush()
works = soup.find_all(class_="work blurb group")
# see if we've gone too far and run out of fic:
if (len(works) is 0):
page_empty = True
# process list for new fic ids
ids = []
for tag in works:
if (multichap_only):
# FOR MULTICHAP ONLY
chaps = tag.find('dd', class_="chapters")
if (chaps.text != u"1/1"):
t = tag.get('id')
t = t[5:]
if not t in seen_ids:
ids.append(t)
seen_ids.append(t)
else:
t = tag.get('id')
t = t[5:]
if not t in seen_ids:
ids.append(t)
seen_ids.append(t)
return ids
#
# update the url to move to the next page
# note that if you go too far, ao3 won't error,
# but there will be no works listed
#
def update_url_to_next_page():
global url
key = "page="
start = url.find(key)
# there is already a page indicator in the url
if (start is not -1):
# find where in the url the page indicator starts and ends
page_start_index = start + len(key)
page_end_index = url.find("&", page_start_index)
# if it's in the middle of the url
if (page_end_index is not -1):
page = int(url[page_start_index:page_end_index]) + 1
url = url[:page_start_index] + str(page) + url[page_end_index:]
# if it's at the end of the url
else:
page = int(url[page_start_index:]) + 1
url = url[:page_start_index] + str(page)
# there is no page indicator, so we are on page 1
else:
# there are other modifiers
if (url.find("?") is not -1):
url = url + "&page=2"
# there an no modifiers yet
else:
url = url + "?page=2"
# modify the base_url to include the new tag, and save to global url
def add_tag_to_url(tag):
global url
key = "&work_search%5Bother_tag_names%5D="
if (base_url.find(key)):
start = base_url.find(key) + len(key)
new_url = base_url[:start] + tag + "%2C" + base_url[start:]
url = new_url
else:
url = base_url + "&work_search%5Bother_tag_names%5D=" + tag
#
# after every page, write the gathered ids
# to the csv, so a crash doesn't lose everything.
# include the url where it was found,
# so an interrupted search can be restarted
#
def write_ids_to_csv(ids):
global num_recorded_fic
with open(csv_name + ".csv", 'a') as csvfile:
wr = csv.writer(csvfile, delimiter=',')
for id in ids:
if (not_finished()):
wr.writerow([id, url])
num_recorded_fic = num_recorded_fic + 1
else:
break
#
# if you want everything, you're not done
# otherwise compare recorded against requested.
# recorded doesn't update until it's actually written to the csv.
# If you've gone too far and there are no more fic, end.
#
def not_finished():
if (page_empty):
return False
if (num_requested_fic == -1):
return True
else:
if (num_recorded_fic < num_requested_fic):
return True
else:
return False
#
# include a text file with the starting url,
# and the number of requested fics
#
def make_readme():
with open(csv_name + "_readme.txt", "w") as text_file:
text_file.write("url: " + url + "\n" + "num_requested_fic: " + str(num_requested_fic) + "\n" + "retreived on: " + str(datetime.datetime.now()))
# reset flags to run again
# note: do not reset seen_ids
def reset():
global page_empty
global num_recorded_fic
page_empty = False
num_recorded_fic = 0
def process_for_ids(header_info=''):
while(not_finished()):
# 5 second delay between requests as per AO3's terms of service
time.sleep(5)
ids = get_ids(header_info)
write_ids_to_csv(ids)
update_url_to_next_page()
def main():
header_info = get_args()
make_readme()
print ("processing...\n")
if (len(tags)):
for t in tags:
print ("Getting tag: ", t)
reset()
add_tag_to_url(t)
process_for_ids(header_info)
else:
process_for_ids(header_info)
print ("That's all, folks.")
main()
| 2.796875 | 3 |
NewGame/room.py | LivOriona/EscapeBoat-le-secret-du-Python-perdu | 0 | 12792604 | from exit import Exit
class Room: #objet room
def __init__(self, id, name, description): #rappel : self est mon objet qui suit le plan Room
self.id = id
self.name = name
self.description = description
self.pickables = {} #les objets ramassables (pickables) sont actuellement un dictionnaire vide, qu'on va remplir plus tard, on ne les retrouve pas dans les paramètres de ma fonction init
self.inspectables = []
self.exits = {}
self.characters = []
def addPickable(self, itemId, nbOfThisItem): #méthode ajouter un objet dans une salle
numberOfThisItem = self.pickables.get(itemId, 0) #fonction get me retourne la valeur associée à la clef itemId,
# et s'il n'y en a pas, il me retourne ici 0
numberOfThisItem += nbOfThisItem #j'ajoute x item de plus au nombre d'items
self.pickables[itemId] = numberOfThisItem #numberfThisItem est stocké dans le pickable de ma 'cuisine' qui a pour clef itemId
def addInspectable(self, inspectableItem):
self.inspectables.append(inspectableItem)
def addCharacter(self, charactersName):
self.characters.append(charactersName)
def addExit(self, destination, exitName): #méthode ajouter une sortie (d'un seul côté)
newExit = Exit(self, destination) #self ici se trouve dans Room, il ne fait référence qu'à une Room du coup (ex 'cuisine')
#Création d'un nouvel objet de type Exit (ex : new Exit en JS) en lui donnant les paramètres attendus par le contructeur
#init //// = je stocke ma nouvelle sortie dans newExit
self.exits[exitName] = newExit #dans le dico exits{} de mon objet ('cuisine'), je stocke newExit (associé à la clef qui
# vaut exitName)
def addDoubleExit(self, destination, exitName1, exitName2): #méthode ajouter la même sortie des deux côtés
self.addExit(destination, exitName1) #exécution méthode addExit() sur 'cuisine'
destination.addExit(self, exitName2) #destination est associé à la classe Room. A la destination de ma salle
# actuelle 'cuisine', je lui ajoute une sortie
@staticmethod
def addDoubleExitBuilder(source, destination, exitName1, exitName2):
def hiddenDoubleExit(game, source=source, destination=destination, exitName1=exitName1, exitName2=exitName2):
source.addDoubleExit(destination, exitName1, exitName2)
return hiddenDoubleExit
def __repr__(self):
return "Room("+self.name+")"
| 3.203125 | 3 |
region.py | Chandramouli-Das/Lending-Club-Case-Study | 0 | 12792605 | def finding_regions(state):
north_east = ['CT', 'NY', 'PA', 'NJ', 'RI','MA', 'MD', 'VT', 'NH', 'ME']
south_west = ['AZ', 'TX', 'NM', 'OK']
south_east = ['GA', 'NC', 'VA', 'FL', 'KY', 'SC', 'LA', 'AL', 'WV', 'DC', 'AR', 'DE', 'MS', 'TN' ]
west = ['CA', 'OR', 'UT','WA', 'CO', 'NV', 'AK', 'MT', 'HI', 'WY', 'ID']
mid_west = ['IL', 'MO', 'MN', 'OH', 'WI', 'KS', 'MI', 'SD', 'IA', 'NE', 'IN', 'ND']
if state in west:
return 'west'
elif state in south_west:
return 'south_west'
elif state in south_east:
return 'south_east'
elif state in mid_west:
return 'mid_west'
elif state in north_east:
return 'north_east'
| 3.234375 | 3 |
UsefulTools/Classify/scripts/vis.py | CharlesPikachu/CharlesFace | 13 | 12792606 | <filename>UsefulTools/Classify/scripts/vis.py<gh_stars>10-100
# Draw network structure
from graphviz import Digraph
import torch
from torch.autograd import Variable
from torchviz import make_dot
from nets.ResNets import ResNet
model = ResNet(resnet='resnet18', num_classes=10575, embeddings_num=128, img_size=224, is_fc=True, is_AvgPool=False)
x = Variable(torch.randn(1, 3, 224, 224), requires_grad=True)
y = model(x)
params_dict = dict(model.named_parameters())
params_dict['x'] = x
g = make_dot(y, params=params_dict)
g.view()
| 2.40625 | 2 |
doodle/views/admin/user.py | keakon/Doodle | 38 | 12792607 | <reponame>keakon/Doodle<gh_stars>10-100
# -*- coding: utf-8 -*-
from tornado.web import HTTPError
from doodle.core.models.comment import Comment
from doodle.core.models.user import User
from ..base_handler import AdminHandler
class BanUserHandler(AdminHandler):
def post(self, comment_id):
comment = Comment.get_by_id(comment_id)
if not comment and comment.user_id:
raise HTTPError(404)
user = User.get_by_id(comment.user_id)
if not user:
raise HTTPError(404)
if not user.banned:
user.banned = True
user.save(relative=False, transactional=False)
| 2.3125 | 2 |
uttut/toolkits/tests/test_partition_by_entities.py | Yoctol/uttut | 2 | 12792608 | from unittest import TestCase
from uttut.elements import Datum, Entity, Intent
from ..partition_by_entities import partition_by_entities
class PartitionByEntitiesTestCase(TestCase):
def setUp(self):
self.utterance = '我想訂明天從紐約飛到新加坡的機票'
self.entities = [
Entity(label=0, value='明天', start=3, end=5, replacements=['下禮拜二']),
Entity(label=1, value='紐約', start=6, end=8),
Entity(label=2, value='新加坡', start=10, end=13, replacements=['斯堪地那維亞', 'KIX']),
]
self.intents = [
Intent(label=0),
]
self.datum = Datum(
utterance=self.utterance,
intents=self.intents,
entities=self.entities,
)
self.datum_wo_entity = Datum(
utterance='薄餡亂入',
intents=[Intent(label=0)],
)
def test_partition_by_entities(self):
actual_parts, entity_names = partition_by_entities(self.datum, False)
expected_parts = [
['我想訂'],
['下禮拜二'],
['從'],
['紐約'],
['飛到'],
['斯堪地那維亞', 'KIX'],
['的機票'],
]
for exp_part, act_part in zip(expected_parts, actual_parts):
self.assertEqual(set(exp_part), set(act_part))
self.assertEqual(
entity_names,
[None, 0, None, 1, None, 2, None],
)
def test_partition_by_entities_include_orig(self):
actual_parts, entity_names = partition_by_entities(self.datum, True)
expected_parts = [
['我想訂'],
['明天', '下禮拜二'],
['從'],
['紐約'],
['飛到'],
['新加坡', '斯堪地那維亞', 'KIX'],
['的機票'],
]
for exp_part, act_part in zip(expected_parts, actual_parts):
self.assertEqual(set(exp_part), set(act_part))
self.assertEqual(
entity_names,
[None, 0, None, 1, None, 2, None],
)
def test_datum_wo_entity(self):
# do not include origin
output = partition_by_entities(self.datum_wo_entity, True)
self.assertEqual(([['薄餡亂入']], [None]), output)
# include origin
output = partition_by_entities(self.datum_wo_entity, False)
self.assertEqual(([['薄餡亂入']], [None]), output)
| 2.78125 | 3 |
lib/sunrise_sunset.py | leepenney/ha-pen | 0 | 12792609 | <filename>lib/sunrise_sunset.py
#from astral import Astral
import astral, pytz, os, sys, json
import datetime, time
from time import mktime
from datetime import timedelta
def sunset_for_today():
today = datetime.datetime.now()
offset = int((mktime(time.localtime())-mktime(time.gmtime()))/60/60)
long_lat = get_long_lat()
this_location = astral.Location()
this_location.longitude = long_lat['longitude']
this_location.latitude = long_lat['latitude']
this_location.solar_depression = 'civil'
return this_location.sunset(date=today) + timedelta(hours=offset)
def sunrise_for_today():
today = datetime.datetime.now()
offset = int((mktime(time.localtime())-mktime(time.gmtime()))/60/60)
long_lat = get_long_lat()
this_location = astral.Location()
this_location.longitude = long_lat['longitude']
this_location.latitude = long_lat['latitude']
this_location.solar_depression = 'civil'
return this_location.sunrise(date=today) + timedelta(hours=offset)
def get_long_lat():
#script_path = os.path.dirname(__file__)
#os.chdir(script_path)
settings_file_path = os.path.join(sys.path[0],'..','config/settings.json')
#settings_json = open('../config/settings.json')
settings_json = open(settings_file_path)
settings = json.load(settings_json)
settings_json.close()
return {'longitude': settings[0]['longitude'],
'latitude': settings[0]['latitude']}
def update_timers():
script_path = os.path.dirname(__file__)
os.chdir(script_path)
timers_file_path = '../config/timers.json'
timers_json = open(timers_file_path)
timers_list = json.load(timers_json)
timers_json.close()
long_lat = get_long_lat()
for timer in timers_list:
if timer['sunset']:
#sunset = sunset_for_today(long_lat['longitude'],long_lat['latitude'])
sunset = sunset_for_today()
if timer['sunset'] == 'on':
timer['on'] = sunset.strftime('%H:%M')
else:
timer['off'] = sunset.strftime('%H:%M')
if timer['sunrise']:
#sunrise = sunrise_for_today(long_lat['longitude'],long_lat['latitude'])
sunrise = sunrise_for_today()
if timer['sunrise'] == 'off':
timer['off'] = sunrise.strftime('%H:%M')
else:
timer['on'] = sunrise.strftime('%H:%M')
with open(timers_file_path, 'w') as json_file:
json.dump(timers_list, json_file, indent=4)
if __name__ == '__main__':
update_timers()
| 2.953125 | 3 |
exopy/measurement/monitors/api.py | jerjohste/exopy | 16 | 12792610 | <filename>exopy/measurement/monitors/api.py
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2015-2018 by Exopy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""A measurement monitor is used to follow the measurement progress.
It can simply displays some database values or request the plotting of some
data.
"""
from .base_monitor import BaseMonitor, BaseMonitorItem, Monitor
__all__ = ['BaseMonitor', 'BaseMonitorItem', 'Monitor']
| 1.578125 | 2 |
ProposalDistribution.py | GrzegorzMika/Markov-Chains-Monte-Carlo | 0 | 12792611 | from abc import ABCMeta, abstractmethod
import numpy as np
class ProposalDistribution(metaclass=ABCMeta):
@abstractmethod
def __init__(self):
...
@abstractmethod
def sample(self, x: np.ndarray) -> np.ndarray:
...
@abstractmethod
def pdf(self, x: np.ndarray, cond: np.ndarray) -> np.ndarray:
...
class Normal(ProposalDistribution):
__slots__ = ['mean', 'std']
def __init__(self, mean: float, spread: float):
super().__init__()
self.mean = mean
self.std = spread
assert self.std > 0, "Wrong specification of distribution!"
def sample(self, x):
return x + np.random.normal(self.mean, self.std, x.shape)
def pdf(self, x, cond):
return 1 / (np.sqrt(2 * np.pi) * self.std) * np.exp(-(x - self.mean - cond) ** 2 / (2 * self.std ** 2))
class Uniform(ProposalDistribution):
__slots__ = ['spread']
def __init__(self, spread: float):
super().__init__()
self.spread = spread
assert self.spread > 0, "Wrong specification of distribution!"
def sample(self, x):
return x + np.random.uniform(low=-self.spread / 2, high=self.spread / 2, size=x.shape)
def pdf(self, x, cond):
return np.array(1 / self.spread)
| 3.203125 | 3 |
annie_pieces_2.20/plotting_utilities.py | annierak/odor_tracking_sim | 0 | 12792612 | def rect(x,y,w,h,c,ax):
polygon = plt.Rectangle((x,y),w,h,color=c)
ax.add_patch(polygon)
def dist_fill(X,Y, cmap,ax):
plt.plot(X,Y,lw=0)
dx = X[1]-X[0]
N = float(X.size)
for n, (x,y) in enumerate(zip(X,Y)):
color = cmap[n,:]
rect(x,0,dx,y,color,ax)
| 3.234375 | 3 |
tests/test_topbottominjector.py | minddistrict/fanstatic | 6 | 12792613 | <reponame>minddistrict/fanstatic<gh_stars>1-10
import pytest
from fanstatic import Library, Resource, init_needed, ConfigurationError
from fanstatic.injector import TopBottomInjector
def test_bundle_resources_bottomsafe():
foo = Library('foo', '')
a = Resource(foo, 'a.css')
b = Resource(foo, 'b.css', bottom=True)
needed = init_needed(resources=[a, b])
injector = TopBottomInjector({'bundle': True})
top, bottom = injector.group(needed)
assert len(top) == 1
assert len(bottom) == 0
injector = TopBottomInjector({'bundle': False, 'bottom': True})
top, bottom = injector.group(needed)
assert len(top) == 1
assert len(bottom) == 1
def test_top_bottom_insert():
foo = Library('foo', '')
x1 = Resource(foo, 'a.js')
x2 = Resource(foo, 'b.css')
y1 = Resource(foo, 'c.js', depends=[x1, x2])
html = b"<html><head>start of head</head><body>rest of body</body></html>"
needed = init_needed(resources=[y1])
injector = TopBottomInjector(dict(bottom=True, force_bottom=True))
assert injector(html, needed) == b'''\
<html><head>start of head<link rel="stylesheet" type="text/css" href="/fanstatic/foo/b.css" /></head><body>rest of body<script type="text/javascript" src="/fanstatic/foo/a.js"></script>
<script type="text/javascript" src="/fanstatic/foo/c.js"></script></body></html>'''
def test_html_bottom_safe_used_with_minified():
foo = Library('foo', '')
a = Resource(foo, 'a.js', minified='a-minified.js', bottom=True)
needed = init_needed(resources=[a])
injector = TopBottomInjector(dict(bottom=True, minified=True))
with pytest.raises(ConfigurationError):
TopBottomInjector(dict(debug=True, minified=True))
top, bottom = injector.group(needed)
assert len(top) == 0
assert len(bottom) == 1
assert bottom.resources[0].relpath == 'a-minified.js'
def test_html_bottom_safe():
foo = Library('foo', '')
x1 = Resource(foo, 'a.js')
x2 = Resource(foo, 'b.css')
y1 = Resource(foo, 'c.js', depends=[x1, x2])
y2 = Resource(foo, 'y2.js', bottom=True)
needed = init_needed(resources=[y1, y2])
injector = TopBottomInjector({})
top, bottom = injector.group(needed)
assert len(top) == 4
assert len(bottom) == 0
injector = TopBottomInjector(dict(bottom=True))
top, bottom = injector.group(needed)
assert len(top) == 3
assert len(bottom) == 1
# The bottom resource is y2.
assert bottom.resources[0] == y2
injector = TopBottomInjector(dict(bottom=True, force_bottom=True))
top, bottom = injector.group(needed)
assert len(top) == 1
assert len(bottom) == 3
top, bottom = injector.group(needed)
assert len(top) == 1
assert top.resources[0] == x2
assert len(bottom) == 3
def test_html_top_bottom_force_bottom():
foo = Library('foo', '')
x1 = Resource(foo, 'a.js')
x2 = Resource(foo, 'b.css')
y1 = Resource(foo, 'c.js', depends=[x1, x2])
needed = init_needed(resources=[y1])
injector = TopBottomInjector(dict(bottom=True, force_bottom=True))
top, bottom = injector.group(needed)
assert top.resources == [x2]
assert bottom.resources == [x1, y1]
def test_html_top_bottom_set_bottom():
foo = Library('foo', '')
x1 = Resource(foo, 'a.js')
x2 = Resource(foo, 'b.css')
y1 = Resource(foo, 'c.js', depends=[x1, x2])
needed = init_needed(resources=[y1])
injector = TopBottomInjector(dict(bottom=True))
top, bottom = injector.group(needed)
assert len(top) == 3
assert len(bottom) == 0
def test_html_insert_head_with_attributes():
# ticket 72: .need() broken when <head> tag has attributes
foo = Library('foo', '')
x1 = Resource(foo, 'a.js')
needed = init_needed(resources=[x1])
injector = TopBottomInjector({})
html = b'<html><head profile="http://example.org">something</head></html>'
assert injector(html, needed) == b'''\
<html><head profile="http://example.org">something<script type="text/javascript" src="/fanstatic/foo/a.js"></script></head></html>'''
def test_html_insert():
foo = Library('foo', '')
x1 = Resource(foo, 'a.js')
x2 = Resource(foo, 'b.css')
y1 = Resource(foo, 'c.js', depends=[x1, x2])
needed = init_needed(resources=[y1])
injector = TopBottomInjector({})
html = b"<html><head>something more</head></html>"
assert injector(html, needed) == b'''\
<html><head>something more<link rel="stylesheet" type="text/css" href="/fanstatic/foo/b.css" />
<script type="text/javascript" src="/fanstatic/foo/a.js"></script>
<script type="text/javascript" src="/fanstatic/foo/c.js"></script></head></html>'''
| 2.21875 | 2 |
enzian_descriptions.py | Sockeye-Project/decl-power-seq | 0 | 12792614 | <gh_stars>0
from sequence_generation import Node, Input, Output, Constraint, Wire, PowerState, Stateful_Node, intersect, State_Space_Error, unite_dict, state_union, SET, empty_intersection
import math
from functools import partial
import z3
class INA226(Node):
BUS = Input([{0, 1}], "bus")
VS = Input([(0, 6000)], "power")
VBUS = Input([(0, 40000)], "monitor", lambda node, name: node.ina_monitor(name))
def __init__(self, name, bus_addr, device):
self.device = device
super(INA226, self).__init__(name, bus_addr, INA226)
self.configured = False
def ina_monitor(self, wire_name):
def fun(value, states, node=self, wire=wire_name):
if states[node.VS.name][0][0] > 2700 and states[node.VS.name][0][0] < 5500:
commands = node.configure()
commands.append("wait_for_voltage('%s', v_min=%.3f, v_max=%.3f, device='%s', monitor='VOLTAGE')" % (
wire,
0.00095 * list(value[0])[0],
0.00105 * list(value[0])[0],
node.device
))
return (True, "\n".join(commands))
else:
return (False, "wait_for_voltage('%s', v_min=%.3f, v_max=%.3f, device='%s', monitor='VOLTAGE')" % (
wire,
0.00095 * list(value[0])[0],
0.00105 * list(value[0])[0],
node.device
))
return fun
def configure(self):
if self.configured:
return []
else:
self.configured = True
return [
"init_device('%s', False)" % (self.device)
]
class MAX15301(Node):
implicit_off = {"EN": [{0}], "V_PWR": [(0, 4400)]}
implicit_on = {"EN": [{1}], "V_PWR": [(5500, 14000)]}
BUS = Input([{0, 1}], "bus")
EN = Input([{0, 1}], "logical")
V_PWR = Input([(0, 14000)], "power")
V_OUT = lambda default : Output([(0, 5250)],
[Constraint([], {"EN": [{1}], "V_PWR": [(5500, 14000)]}, partial(Constraint.implicit, "V_OUT", MAX15301.implicit_on), state_update = Constraint.default_state),
Constraint([(600, 5250)], {"EN": [{1}], "V_PWR": [(5500, 14000)], "BUS" : [{1}]}, {}, dependency_update=(Constraint.is_default, [partial(Constraint.explicit, "V_OUT", {"V_PWR", "BUS"}, {"EN"}, after_set = {"EN"}), partial(Constraint.explicit, "V_OUT", {"V_PWR", "BUS", "EN"}, set())])),
Constraint([(0, 0)], {"EN": [{0}], "V_PWR": [(0, 14000)]}, partial(Constraint.implicit, "V_OUT", MAX15301.implicit_off))
], "power", Wire.voltage_set)
def __init__(self, name, bus_addr, default, device):
self.device = device
self.default = default
self.is_default = False
self.current = [(default, default)]
self.V_OUT = MAX15301.V_OUT(default)
super(MAX15301, self).__init__(name, bus_addr, MAX15301)
def bus_req(self):
return {self.V_PWR.name: [(5500, 14000)]}
def bus_req_off(self):
return {self.V_PWR.name: [(0, 4400)]}
def update(self, states):
try:
intersect(states[self.V_OUT.name], [(600, 5250)])
self.current = states[self.V_OUT.name]
self.is_default = True
return
except State_Space_Error:
self.is_default = False
try:
intersect(states[self.V_PWR.name], [(0, 4400)])
self.current = [(self.default, self.default)]
return
except State_Space_Error:
pass
class NCP(Node):
implicit_on = {"VRI" : [(868, 3600)], "VCC" : [(2375, 5500)]}
implicit_off = {"VRI" : [(0, 868)], "VCC" : [(0, 2374)]}
VCC = Input([(0, 6000)], "power")
VRI = Input([(0, 6000)], "power") #reference input
VREF = Output([(0, 6000)],
[Constraint([(435, 1800)], {"VRI" : [(868, 3600)], "VCC" : [(2375, 5500)]}, partial(Constraint.implicit, "VREF", "implicit_on"), complex_constraints= [(lambda x1, x2: z3.Or(x1 * 2 == x2, x1 * 2 == x2 + 1), ([("VREF", 0), ("VRI", 0)]))]),
Constraint([(0, 0)], {"VRI" : [(0, 868)], "VCC" : [(2375, 5500)]}, partial(Constraint.implicit, "VREF", "implicit_off")),
Constraint([(0, 0)], { "VRI" : [(0, 3600)], "VCC" : [(0, 2374)]}, partial(Constraint.implicit, "VREF", "implicit_off"))], "power")
def __init__(self, name, bus_addr):
super(NCP, self).__init__(name, bus_addr, NCP)
class MAX8869(Node):
implicit_on = lambda _, thresh: {"V_IN" : [(max(thresh + 500, 2700), 5500)], "SHDN" : [{1}]}
implicit_off = lambda _, thresh: {"V_IN" : [(0, max(thresh + 499, 2699))], "SHDN" : [{0}]}
V_IN = Input([(0, 6000)], "power")
SHDN = Input([{0, 1}], "logical")
V_OUT = lambda _, default, thresh: Output([(0, thresh)],
[Constraint([(default, default)], {"V_IN" : [(max(thresh + 500, 2700), 5500)], "SHDN" : [{1}]}, partial(Constraint.implicit, "V_OUT", "implicit_on")),
Constraint([(0, 0)], {"V_IN" : [(0, 5500)], "SHDN" : [{0}]}, partial(Constraint.implicit, "V_OUT", "implicit_off")),
], "power")
def __init__(self, name, bus_addr, voltage):
self.implicit_on = self.implicit_on(int(voltage * 1.01))
self.implicit_off = self.implicit_off(int(voltage * 1.01))
self.V_OUT = self.V_OUT(int(voltage), int(voltage * 1.01))
super(MAX8869, self).__init__(name, bus_addr, MAX8869)
class MAX15053(Node):
implicit_on = lambda _, threshold: {"V_IN" : [(max(int(threshold * 1.06), 2700), 5500)], "V_EN" : [{1}]}
implicit_off = lambda _, threshold: {"V_IN" : [(0, (max(int(threshold * 1.06) - 1, 2699)))], "V_EN" : [{0}]}
V_IN = Input([(0, 6000)], "power")
V_EN = Input([{0, 1}], "logical")
V_OUT = lambda _, default, threshold: Output([(0, default)],
[Constraint([(default, default)], {"V_IN" : [(max(int(threshold * 1.06), 2700), 5500)], "V_EN" : [{1}]}, partial(Constraint.implicit, "V_OUT", "implicit_on")),
Constraint([(0, 0)], {"V_IN" : [(0, 5500)], "V_EN" : [{0}]}, partial(Constraint.implicit, "V_OUT", "implicit_off")),
], "power")
def __init__(self, name, bus_addr, voltage):
self.implicit_on = self.implicit_on(int(voltage * 1.01))
self.implicit_off = self.implicit_off(int(voltage * 1.01))
self.V_OUT = self.V_OUT(voltage, int(voltage * 1.01))
super(MAX15053, self).__init__(name, bus_addr, MAX15053)
def binary_multidimensional(decimal):
binary = bin(decimal)[2:] #remove 0b prefix
multidim = list({0} for i in range(8 - len(binary)))
for i in binary:
multidim.append({int(i)})
return multidim
def isl_outputs():
outputs = []
for i in range(0, 177):
voltage_min = math.floor(1600 - i * 6.25)
voltage_max = math.ceil(1600 - i * 6.25)
outputs.append(
Constraint(
[(voltage_min, voltage_max)], \
{"VID" : binary_multidimensional(i + 2), "VCC" : [(4750, 5250)], "EN_PWR" : [{1}], "EN_VTT" : [(870, 14000)]}, {}, \
dependency_update= (Constraint.is_default, \
[partial(Constraint.implicit, "VOUT", {"VID" : binary_multidimensional(i + 2)}, after_set={"EN_PWR"}, before_complete= {"VCC", "EN_VTT", "EN_PWR"}),
partial(Constraint.implicit, "VOUT", {"VID" : binary_multidimensional(i + 2)}, before_complete= {"VCC", "EN_VTT", "EN_PWR"})])))
outputs.append(Constraint([(0, 0)], {"VID": [{0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}], "VCC" : [(0, 5250)], "EN_PWR" : [{0}], "EN_VTT" : [(0, 14000)]}, partial(Constraint.implicit, "VOUT", "implicit_off")))
return outputs
class ISL(Node):
implicit_off = {"VCC" : [(0, 4300)], "EN_PWR" : [{0, 1}], "EN_VTT" : [(0, 830)]}
VCC = Input([(0, 6000)], "power")
EN_PWR = Input([{0, 1}], "logical")
EN_VTT = Input([(0, 12000)], "power")
VID = Input([{0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}], "logical")
VOUT = Output([(0, 1600)], isl_outputs(), "power")
def __init__(self, name, bus_addr):
self.is_default = False
super(ISL, self).__init__(name, bus_addr, ISL)
def update(self, states):
try:
intersect(states[self.VOUT.name], [(500, 1600)])
self.is_default = True
except State_Space_Error:
self.is_default = False
class IR(Node):
implicit_off = lambda _, thresh : {"VCC" : [(0, 2500)], "VIN" : [(0, thresh-1)], "EN" : [{0}]}
implicit_off_2 = lambda _, thresh : {"VCC" : [(0, 2500)], "VIN" : [(0, thresh-1)], "EN_2" : [{0}]}
device = "ir3581"
bus = "power"
BUS = Input([{0, 1}], "bus")
#loop 1 and loop 2 will have different addresses...
VCC = Input([(0, 4000)], "power")
EN = Input([{0, 1}], "logical")
EN_2 = Input([{0, 1}], "logical")
VIN = Input([(0, 13200)], "power")
VOUT = lambda _, thresh : Output([(0, 3040)],
[Constraint([(500, 3040)], {"VCC" : [(2900, 3630)], "VIN" : [(thresh, 13200)], "EN" : [{1}], "BUS": [{1}]}, {}, dependency_update = (Constraint.is_default, [partial(Constraint.explicit, "VOUT", {"VCC", "VIN", "BUS"}, {"EN"}, after_set = {"EN"}), partial(Constraint.explicit, "VOUT", {"VCC", "VIN", "BUS", "EN"}, set())])),
Constraint([(0, 0)], {"VCC" : [(0, 3630)], "VIN" : [(0, 13200)], "EN" : [{0}]}, partial(Constraint.implicit, "VOUT", "implicit_off")),
], "power", Wire.ir_set)
VOUT_2 = lambda _, thresh : Output([(0, 3040)],
[Constraint([(500, 3040)], {"VCC" : [(2900, 3630)], "VIN" : [(thresh, 13200)], "EN_2" : [{1}], "BUS" : [{1}]}, {}, dependency_update = (Constraint.is_default, [partial(Constraint.explicit, "VOUT_2", {"VCC", "VIN", "BUS"}, {"EN_2"}, after_set = {"EN_2"}), partial(Constraint.explicit, "VOUT_2", {"VCC", "VIN", "BUS", "EN_2"}, set())])),
Constraint([(0, 0)], {"VCC" : [(0, 3630)], "VIN" : [(0, 13200)], "EN_2" : [{0}]}, partial(Constraint.implicit, "VOUT_2", "implicit_off_2")),
], "power", Wire.ir_set)
def __init__(self, name, bus_addr, threshold, device, loop1, loop2, l1_addr, l2_addr):
self.configured = False
self.is_default = False
self.threshold = threshold
self.device = device
self.loop1 = loop1
self.loop2 = loop2
self.l1_addr = l1_addr
self.l2_addr = l2_addr
self.implicit_off = self.implicit_off(threshold)
self.implicit_off_2 = self.implicit_off_2(threshold)
self.VOUT = self.VOUT(threshold)
self.VOUT_2 = self.VOUT_2(threshold)
super(IR, self).__init__(name, bus_addr, IR)
def bus_req(self):
return {self.VIN.name : [(self.threshold, 13200)], self.VCC.name : [(2900, 3630)]}
def bus_req_off(self):
return {self.VIN.name : [(0, self.threshold-1)], self.VCC.name : [(0, 2500)]}
def update(self, states):
try:
intersect(states[self.VOUT.name], [(500, 3040)])
self.is_default = True
except State_Space_Error:
self.is_default = False
try:
intersect(states[self.VIN.name], [(self.threshold, 13200)])
except State_Space_Error:
self.configured = False
return
try:
intersect(states[self.VCC.name], [(2900, 3630)])
except State_Space_Error:
self.configured = False
return
def configure(self):
if self.configured:
return []
else:
self.configured = True
return [
"init_device('%s', False)" % self.device,
"init_device('%s', False)" % self.loop1,
"init_device('%s', False)" % self.loop2
]
class FPGA(Stateful_Node):
CLK = Input([(0, 0), (3300, 3300), (0, 50)], "clock")
CLK_OK = Input([{0, 1}], "logical")
VCCO_2V5_DDR24 = Input([(0, 3400)], "power")
VCCO_2V5_DDR13 = Input([(0, 3400)], "power")
VCCO_VCC_DDR24 = Input([(0, 3400)], "power")
VCCO_VTT_DDR13 = Input([(0, 2000)], "power") #replace VREF
VCCO_VTT_DDR24 = Input([(0, 2000)], "power")
VCCO_VCC_DDR13 = Input([(0, 3400)], "power")
VADJ_1V8 = Input([(0, 2000)], "power") #not found in fpga boot sequ; filled in like VCCO_VCC_DDR voltages
MGTVCCAUX_L = Input([(0, 1900)], "power")
MGTVCCAUX_R = Input([(0, 1900)], "power")
VCCO_1V8 = Input([(0, 2000)], "power") #this is sys_1v8....
VCCINT = Input([(0, 1000)], "power")
MGTAVCC = Input([(0, 1000)], "power")
MGTAVTT = Input([(0, 1300)], "power")
VCCINT_IO = Input([(0, 1000)], "power")
VCCAUX = Input([(0, 2000)], "power")
states = (lambda clk, ok, vcc, io, aux, vcco, vadj, vcc_2v5_ddr13, vcc_2v5_ddr24, vcc_ddr13, vcc_ddr24, vtt_ddr13, vtt_ddr24, vtt, mgtaux_l, mgtaux_r, mgtavcc: {
"POWERED_DOWN" :
PowerState({
clk: [(0, 0), (3300, 3300), (0, 0)],
vcc : [(0, 0)],
io : [(0, 0)],
aux : [(0, 0)],
vcco : [(0, 0)],
vadj : [(0, 0)],
vcc_2v5_ddr13 : [(0, 0)],
vcc_2v5_ddr24 : [(0, 0)],
vcc_ddr13 : [(0, 0)],
vcc_ddr24 : [(0, 0)],
vtt_ddr13 : [(0, 0)],
vtt_ddr24 : [(0, 0)],
vtt : [(0, 0)],
mgtaux_l : [(0, 0)],
mgtaux_r : [(0, 0)],
mgtavcc : [(0, 0)],
ok : [{0}]
}, {
"POWERED_ON" : [
({mgtaux_l : [(0, 0)], mgtaux_r : [(0, 0)]}, ""),
({vtt : [(0, 0)]}, ""),
({mgtavcc : [(0, 0)]}, ""),
({vcco: [(0, 0)], vcc_2v5_ddr13 : [(0, 0)], vcc_2v5_ddr24 : [(0, 0)], vcc_ddr13 : [(0, 0)], vcc_ddr24 : [(0, 0)], vtt_ddr13 : [(0, 0)], vtt_ddr24 : [(0, 0)], vadj: [(0, 0)]}, ""),
({aux : [(0, 0)]}, ""),
({io : [(0, 0)]}, ""),
({vcc : [(0, 0)]}, ""),
],
"POWERED_DOWN" : []
}),
"POWERED_ON" :
PowerState({
clk: [(0, 0), (3300, 3300), (50, 50)],
vcc : [(873, 927)],
io : [(873, 927)],
aux : [(1746, 1854)],
vcco : [(1746, 1854)],
vadj : [(1746, 1854)],
vcc_2v5_ddr13 : [(2400, 2600)],
vcc_2v5_ddr24 : [(2400, 2600)],
vcc_ddr13 : [(1140, 3400)],
vcc_ddr24 : [(1140, 3400)],
vtt_ddr13 : [(550, 1700)],
vtt_ddr24 : [(550, 1700)],
vtt : [(1164, 1236)],
mgtaux_l : [(1746, 1854)],
mgtaux_r : [(1746, 1854)],
mgtavcc : [(873, 927)],
ok : [{1}]
}, {
"POWERED_DOWN" : [
({clk: [(0, 0), (3300, 3300), (50, 50)]}, ""),
({vcc : [(873, 927)]}, ""),
({io : [(873, 927)]}, ""),
({aux : [(1746, 1854)]}, ""),
({vcco : [(1746, 1854)], vcc_2v5_ddr13 : [(2400, 2600)], vcc_2v5_ddr24 : [(2400, 2600)], vcc_ddr13 : [(1140, 3400)], vcc_ddr24 : [(1140, 3400)], vtt_ddr13 : [(550, 1700)], vtt_ddr24 : [(550, 1700)], vadj: [(1746, 1845)]}, ""),
({mgtavcc : [(873, 927)]}, ""),
({vtt : [(1164, 1236)]}, ""),
({mgtaux_l : [(1746, 1854)], mgtaux_r : [(1746, 1854)]}, ""),
({ok: [{1}]}, "")
],
"POWERED_ON" : []}
)}, ["CLK", "CLK_OK", "VCCINT", "VCCINT_IO", "VCCAUX", "VCCO_1V8", "VADJ_1V8", "VCCO_2V5_DDR13", "VCCO_2V5_DDR24", "VCCO_VCC_DDR13", "VCCO_VCC_DDR24", "VCCO_VTT_DDR13", "VCCO_VTT_DDR24", "MGTAVTT", "MGTVCCAUX_L", "MGTVCCAUX_R", "MGTAVCC"])
def __init__(self, name, bus_addr):
super(FPGA, self).__init__(name, bus_addr, "POWERED_DOWN", FPGA)
#EVAL 3 version of the FPGA, comments indicate changes
class FPGA_EVAL3(Stateful_Node):
CLK = Input([(0, 0), (3300, 3300), (0, 50)], "clock")
CLK_OK = Input([{0, 1}], "logical")
VCCO_2V5_DDR24 = Input([(0, 3400)], "power")
VCCO_2V5_DDR13 = Input([(0, 3400)], "power")
VCCO_VCC_DDR24 = Input([(0, 3400)], "power")
VCCO_VTT_DDR13 = Input([(0, 2000)], "power") #replace VREF
VCCO_VTT_DDR24 = Input([(0, 2000)], "power")
VCCO_VCC_DDR13 = Input([(0, 3400)], "power")
VADJ_1V8 = Input([(0, 2000)], "power") #not found in fpga boot sequ; filled in like VCCO_VCC_DDR voltages
MGTVCCAUX_L = Input([(0, 1900)], "power")
MGTVCCAUX_R = Input([(0, 1900)], "power")
VCCO_1V8 = Input([(0, 2000)], "power") #this is sys_1v8....
VCCINT = Input([(0, 1000)], "power")
MGTAVCC = Input([(0, 1000)], "power")
MGTAVTT = Input([(0, 1300)], "power")
VCCINT_IO = Input([(0, 1000)], "power")
VCCAUX = Input([(0, 2000)], "power")
states = (lambda clk, ok, vcc, io, aux, vcco, vadj, vcc_2v5_ddr13, vcc_2v5_ddr24, vcc_ddr13, vcc_ddr24, vtt_ddr13, vtt_ddr24, vtt, mgtaux_l, mgtaux_r, mgtavcc: {
"POWERED_DOWN" :
PowerState({
clk: [(0, 0), (3300, 3300), (0, 0)],
vcc : [(0, 0)],
io : [(0, 0)],
aux : [(0, 0)],
vcco : [(0, 0)],
vadj : [(0, 0)],
vcc_2v5_ddr13 : [(0, 0)],
vcc_2v5_ddr24 : [(0, 0)],
vcc_ddr13 : [(0, 0)],
vcc_ddr24 : [(0, 0)],
vtt_ddr13 : [(0, 0)],
vtt_ddr24 : [(0, 0)],
vtt : [(0, 0)],
mgtaux_l : [(0, 0)],
mgtaux_r : [(0, 0)],
mgtavcc : [(0, 0)],
ok : [{0}]
}, {
"POWERED_ON" : [
({mgtaux_l : [(0, 0)], mgtaux_r : [(0, 0)]}, ""),
({vtt : [(0, 0)]}, ""),
({mgtavcc : [(0, 0)]}, ""),
({vcco: [(0, 0)], vcc_2v5_ddr13 : [(0, 0)], vcc_2v5_ddr24 : [(0, 0)], vcc_ddr13 : [(0, 0)], vcc_ddr24 : [(0, 0)], vtt_ddr13 : [(0, 0)], vtt_ddr24 : [(0, 0)], vadj: [(0, 0)]}, ""),
({aux : [(0, 0)]}, ""),
({io : [(0, 0)]}, ""),
({vcc : [(0, 0)]}, ""),
],
"POWERED_DOWN" : []
}),
"POWERED_ON" :
PowerState({
clk: [(0, 0), (3300, 3300), (50, 50)],
vcc : [(873, 927)],
io : [(873, 927)],
aux : [(1746, 1854)],
vcco : [(1746, 1854)],
vadj : [(1746, 1854)],
vcc_2v5_ddr13 : [(2400, 2600)],
vcc_2v5_ddr24 : [(2400, 2600)],
##### REGULAR VALUES: #######
#vcc_ddr13 : [(1140, 3400)],
#vcc_ddr24 : [(1140, 3400)],
##### VALUES FOR EVAL 3: #####
vcc_ddr13 : [(1200, 1200)],
vcc_ddr24 : [(1200, 1200)],
#############################
vtt_ddr13 : [(550, 1700)],
vtt_ddr24 : [(550, 1700)],
vtt : [(1164, 1236)],
mgtaux_l : [(1746, 1854)],
mgtaux_r : [(1746, 1854)],
mgtavcc : [(873, 927)],
ok : [{1}]
}, {
"POWERED_DOWN" : [
({clk: [(0, 0), (3300, 3300), (50, 50)]}, ""),
({vcc : [(873, 927)]}, ""),
({io : [(873, 927)]}, ""),
({aux : [(1746, 1854)]}, ""),
##### REGULAR TRANSITION STEP ######
#({vcco : [(1746, 1854)], vcc_2v5_ddr13 : [(2400, 2600)], vcc_2v5_ddr24 : [(2400, 2600)], vcc_ddr13 : [(1140, 3400)], vcc_ddr24 : [(1140, 3400)], vtt_ddr13 : [(550, 1700)], vtt_ddr24 : [(550, 1700)], vadj: [(1746, 1845)]}, ""),
##### FOR EVAL 3: ###################
({vcco : [(1746, 1854)], vcc_2v5_ddr13 : [(2400, 2600)], vcc_2v5_ddr24 : [(2400, 2600)], vcc_ddr13 : [(1200, 1200)], vcc_ddr24 : [(1200, 1200)], vtt_ddr13 : [(550, 1700)], vtt_ddr24 : [(550, 1700)], vadj: [(1746, 1845)]}, ""),
#####################################
({mgtavcc : [(873, 927)]}, ""),
({vtt : [(1164, 1236)]}, ""),
({mgtaux_l : [(1746, 1854)], mgtaux_r : [(1746, 1854)]}, ""),
({ok: [{1}]}, "")
],
"POWERED_ON" : []}
)}, ["CLK", "CLK_OK", "VCCINT", "VCCINT_IO", "VCCAUX", "VCCO_1V8", "VADJ_1V8", "VCCO_2V5_DDR13", "VCCO_2V5_DDR24", "VCCO_VCC_DDR13", "VCCO_VCC_DDR24", "VCCO_VTT_DDR13", "VCCO_VTT_DDR24", "MGTAVTT", "MGTVCCAUX_L", "MGTVCCAUX_R", "MGTAVCC"])
def __init__(self, name, bus_addr):
super(FPGA_EVAL3, self).__init__(name, bus_addr, "POWERED_DOWN", FPGA_EVAL3)
class ThunderX(Stateful_Node):
PLL_DC_OK = Input([{0, 1}], "logical")
CHIP_RESET_L = Input([{0, 1}], "logical")
PLL_REF_CLK = Input([(0, 0), (3300, 3300), (0, 50)], "clock")
VDD = Input([(0, 1210)], "power")
VDD_09 = Input([(0, 945)], "power")
VDD_15 = Input([(0, 1650)], "power")
VDD_DDR13 = Input([(0, 1650)], "power")
VDD_2V5_DDR13 = Input([(0, 3300)], "power")
#actually, the CPU names its ddr bank voltage DDR02 (not 24),
#but I adjusted it to match the schematics, so I know which supplies to connect.. :')
VDD_DDR24 = Input([(0, 1650)], "power")
VDD_2V5_DDR24 = Input([(0, 3300)], "power")
VTT_DDR24 = Input([(0, 1400)], "power")
VTT_DDR13 = Input([(0, 1400)], "power")
VDD_IO33 = Input([(0, 3600)], "power")
states = (lambda ok, rst, clk, vdd, vdd09, vdd15, ddr24, ddr24_2v5, ddr13, ddr13_2v5, vttddr24, vttddr13, io33 : { #pll_vdd, pll_ddr2, pll_ddr13, sys_pll_ddr : {
"POWERED_DOWN" :
PowerState({
ok : [{0}],
rst : [{0}],
clk : [(0, 0), (3300, 3300), (0, 0)],
vdd : [(0, 0)],
vdd09 : [(0, 0)],
vdd15 : [(0, 0)],
ddr24 : [(0, 0)],
ddr24_2v5 : [(0, 0)],
ddr13 : [(0, 0)],
ddr13_2v5 : [(0, 0)],
vttddr24 : [(0, 0)],
vttddr13 : [(0, 0)],
io33 : [(0, 0)]
}, {
"POWERED_ON" : [
({ok : [{0}]}, "")
],
"POWERED_DOWN" : []
}
),
"POWERED_ON" :
PowerState({
ok : [{1}],
rst : [{1}],
clk : [(0, 0), (3300, 3300), (50, 50)],
vdd : [(940, 980)],
vdd09 : [(870, 930)],
vdd15 : [(1450, 1550)],
ddr13 : [(1425, 1575)],
ddr24: [(1425, 1575)],
ddr24_2v5 : [(2400, 2600)], #not imposed by the cpu, but the connected DIMM SPD needs 2.5 V
ddr13_2v5 : [(2400, 2600)],
vttddr24 : [(700, 800)],
vttddr13 : [(700, 800)],
io33 : [(3140, 3460)],
}, {
"POWERED_DOWN": [
({clk: [(0, 0), (3300, 3300), (50, 50)], io33 : [(3140, 3460)]}, "wait for %s to stabilize" %(io33)),
({vdd : [(940, 980)]}, ""),
({vdd09 : [(870, 930)], vdd15 : [(1450, 1550)]}, ""),
({ddr24 : [(1425, 1575)], ddr24_2v5 : [(2400, 2600)], ddr13 : [(1425, 1575)], ddr13_2v5 : [(2400, 2600)], vttddr24 : [(700, 800)], vttddr13 : [(700, 800)]}, "%s should have stabilized by now" %vdd),
({ok : [{1}]}, "must have written pll_mul and sys_pll_mul beforehand"),
({rst : [{1}]}, "")
],
"POWERED_ON" : [] })
}, ["PLL_DC_OK", "CHIP_RESET_L", "PLL_REF_CLK", "VDD", "VDD_09", "VDD_15", "VDD_DDR24", "VDD_2V5_DDR24", "VDD_DDR13", "VDD_2V5_DDR13", "VTT_DDR24", "VTT_DDR13", "VDD_IO33"]) #"VDD_IO25", "VDD_IO33"])
def __init__(self, name, bus_addr):
super(ThunderX, self).__init__(name, bus_addr, "POWERED_DOWN", ThunderX)
#EVAL 3 version of the THUNDERX: Comments indicate changes
class ThunderX_EVAL3(Stateful_Node):
PLL_DC_OK = Input([{0, 1}], "logical")
CHIP_RESET_L = Input([{0, 1}], "logical")
PLL_REF_CLK = Input([(0, 0), (3300, 3300), (0, 50)], "clock")
VDD = Input([(0, 1210)], "power")
VDD_09 = Input([(0, 945)], "power")
VDD_15 = Input([(0, 1650)], "power")
VDD_DDR13 = Input([(0, 1650)], "power")
VDD_2V5_DDR13 = Input([(0, 3300)], "power")
#actually, the CPU names its ddr bank voltage DDR02 (not 24),
#but I adjusted it to match the schematics, so I know which supplies to connect.. :')
VDD_DDR24 = Input([(0, 1650)], "power")
VDD_2V5_DDR24 = Input([(0, 3300)], "power")
VTT_DDR24 = Input([(0, 1400)], "power")
VTT_DDR13 = Input([(0, 1400)], "power")
VDD_IO33 = Input([(0, 3600)], "power")
states = (lambda ok, rst, clk, vdd, vdd09, vdd15, ddr24, ddr24_2v5, ddr13, ddr13_2v5, vttddr24, vttddr13, io33 : { #pll_vdd, pll_ddr2, pll_ddr13, sys_pll_ddr : {
"POWERED_DOWN" :
PowerState({
ok : [{0}],
rst : [{0}],
clk : [(0, 0), (3300, 3300), (0, 0)],
vdd : [(0, 0)],
vdd09 : [(0, 0)],
vdd15 : [(0, 0)],
ddr24 : [(0, 0)],
ddr24_2v5 : [(0, 0)],
ddr13 : [(0, 0)],
ddr13_2v5 : [(0, 0)],
vttddr24 : [(0, 0)],
vttddr13 : [(0, 0)],
io33 : [(0, 0)]
}, {
"POWERED_ON" : [
({ok : [{0}]}, "")
],
"POWERED_DOWN" : []
}
),
"POWERED_ON" :
PowerState({
ok : [{1}],
rst : [{1}],
clk : [(0, 0), (3300, 3300), (50, 50)],
vdd : [(940, 980)],
vdd09 : [(870, 930)],
vdd15 : [(1450, 1550)],
####### REGULAR VALUES #########
#ddr13 : [(1425, 1575)],
#ddr24: [(1425, 1575)],
####### FOR EVAL 3 ###########
ddr24: [(1200, 1200)],
ddr13: [(1200, 1200)],
#################################
ddr24_2v5 : [(2400, 2600)], #not imposed by the cpu, but the connected DIMM SPD needs 2.5 V
ddr13_2v5 : [(2400, 2600)],
vttddr24 : [(570, 630)],
vttddr13 : [(570, 630)],
io33 : [(3140, 3460)],
}, {
"POWERED_DOWN": [
({clk: [(0, 0), (3300, 3300), (50, 50)], io33 : [(3140, 3460)]}, "wait for %s to stabilize" %(io33)),
({vdd : [(940, 980)]}, ""),
({vdd09 : [(870, 930)], vdd15 : [(1450, 1550)]}, ""),
#### REGULAR TRANSITION STEP #########
#({ddr24 : [(1425, 1575)], ddr24_2v5 : [(2400, 2600)], ddr13 : [(1425, 1575)], ddr13_2v5 : [(2400, 2600)], vttddr24 : [(700, 800)], vttddr13 : [(700, 800)]}, "%s should have stabilized by now" %vdd),
#### FOR EVAL 3 ######################
({ddr24 : [(1200, 1200)], ddr24_2v5 : [(2400, 2600)], ddr13 : [(1200, 1200)], ddr13_2v5 : [(2400, 2600)], vttddr24 : [(570, 630)], vttddr13 : [(570, 630)]}, "%s should have stabilized by now" %vdd),
######################################
({ok : [{1}]}, "must have written pll_mul and sys_pll_mul beforehand"),
({rst : [{1}]}, "")
],
"POWERED_ON" : [] })
}, ["PLL_DC_OK", "CHIP_RESET_L", "PLL_REF_CLK", "VDD", "VDD_09", "VDD_15", "VDD_DDR24", "VDD_2V5_DDR24", "VDD_DDR13", "VDD_2V5_DDR13", "VTT_DDR24", "VTT_DDR13", "VDD_IO33"]) #"VDD_IO25", "VDD_IO33"])
def __init__(self, name, bus_addr):
super(ThunderX_EVAL3, self).__init__(name, bus_addr, "POWERED_DOWN", ThunderX_EVAL3)
class Bus(Node):
BUS = Output([{0, 1}],
[
Constraint([{1}], lambda node, inputs: node.construct_req(inputs), lambda node, name, inputs: node.construct_dependency(name, node.construct_req(inputs))),
Constraint([{0}], lambda node, inputs: {}, lambda node, name, inputs: node.construct_dependency(name, node.construct_req_off(inputs)))
], "bus")
def __init__(self, name, bus_addr):
super(Bus, self).__init__(name, bus_addr, Bus)
def construct_req(self, inputs):
req = {}
for node, _ in inputs:
unite_dict(req, node.bus_req())
print(req)
return req
def construct_req_off(self, inputs):
req = {}
for node, _ in inputs:
node_req = node.bus_req_off()
for wire, state in node_req.items():
if not wire in req:
req[wire] = state
else:
req[wire] = state_union(state, req[wire])
print(req)
return req
def construct_dependency(self, name, req):
return (SET.Implicit, [set(), set(), set(), set()], lambda states, req = req: {name : set(filter(lambda x: not empty_intersection(x, req, states), req.keys()))})
class MAX20751(Node):
implicit_on = {"VR_ON": [{1}], "VDD33":[(2970, 3630)], "VDDH":[(8500, 14000)]}
implicit_off = {"VR_ON": [{0}], "VDD33": [0, 2800], "VDDH": [(0, 8499)]}
VDD33 = Input([(0, 4000)], "power")
BUS = Input([{0, 1}], "bus")
VDDH = Input([(0, 23000)], "power") #slave input power
VR_ON = Input([{0, 1}], "logical")
V_OUT = lambda default : Output([(0, 1520)],
[Constraint([], {"VR_ON": [{1}], "VDD33":[(2970, 3630)], "VDDH":[(8500, 14000)]}, partial(Constraint.implicit, "V_OUT", MAX20751.implicit_on), state_update= Constraint.default_state),
Constraint([(500, 1520)], {"VR_ON": [{1}], "VDD33":[(2970, 3630)], "VDDH":[(8500, 14000)], "BUS" : [{1}]}, {}, dependency_update = (Constraint.is_default, [partial(Constraint.explicit, "V_OUT", {"VDD33", "VDDH", "BUS"}, {"VR_ON"}, after_set = {"VR_ON"}), partial(Constraint.explicit, "V_OUT", {"VDD33", "VDDH", "BUS", "VR_ON"}, set())])),
Constraint([(0, 0)], {"VR_ON": [{0}], "VDD33":[(0, 3630)], "VDDH":[(0, 14000)]}, partial(Constraint.implicit, "V_OUT", MAX20751.implicit_off)),
], "power", Wire.voltage_set)
def __init__(self, name, bus_addr, default, device):
self.device = device
self.default = default
self.is_default = False
self.current = [(default, default)]
self.V_OUT = MAX20751.V_OUT(default)
super(MAX20751, self).__init__(name, bus_addr, MAX20751)
def bus_req(self):
return {}
def bus_req_off(self):
return {}
def update(self, states):
try:
intersect(states[self.V_OUT.name], [(500, 1520)])
self.current = states[self.V_OUT.name]
self.is_default = True
return
except State_Space_Error:
self.is_default = False
try:
intersect(states[self.VDD33.name], [(0, 2800)])
self.current = [(self.default, self.default)]
return
except State_Space_Error:
pass
try:
intersect(states[self.VDDH.name], [(8500, 14000)])
self.current = [(self.default, self.default)]
return
except State_Space_Error:
pass
class Oscillator(Node):
VDD = Input([(0, 3600)], "power")
CLK = Output([(0, 0), (3300, 3300), (0, 50)], [
Constraint([(0, 0), (3300, 3300), (50, 50)], {"VDD": [(2600, 3600)]}, partial(Constraint.implicit, "CLK", {"VDD": [(2600, 3600)]})),
Constraint([(0, 0), (3300, 3300), (0, 0)], {"VDD": [(0, 2599)]}, partial(Constraint.implicit, "CLK", {"VDD": [(0, 2599)]}))], "clock", Wire.clock_config)
def __init__(self, name, bus_addr):
super(Oscillator, self).__init__(name, bus_addr, Oscillator)
class SI5395(Node):
implicit_on = {"VDD": [(2600, 3600)], "CLK_IN": [(0, 0), (3300, 3300), (50, 50)]}
implicit_off = {"VDD": [(0, 2599)], "CLK_IN": [(0, 0), (3300, 3300), (0, 0)]}
VDD = Input([(0, 3600)], "power")
CLK_IN = Input([(0, 0), (3300, 3300), (0, 50)], "clock")
CLK = Output([(0, 0), (3300, 3300), (0, 50)], [
Constraint([(0, 0), (3300, 3300), (50, 50)], {"VDD": [(2600, 3600)], "CLK_IN": [(0, 0), (3300, 3300), (50, 50)]}, partial(Constraint.explicit, "CLK", {"VDD", "CLK_IN"}, set())),
Constraint([(0, 0), (3300, 3300), (0, 0)], {"VDD": [(2600, 3600)], "CLK_IN": [(0, 0), (3300, 3300), (50, 50)]}, partial(Constraint.implicit, "CLK", "implicit_on"), dependency_update= (Constraint.is_configured, [partial(Constraint.implicit, "CLK", "implicit_on"), partial(Constraint.explicit, "CLK", {}, set())])),
Constraint([(0, 0), (3300, 3300), (0, 0)], {"VDD": [(0, 2599)]}, partial(Constraint.implicit, "CLK", "implicit_off")),
Constraint([(0, 0), (3300, 3300), (0, 0)], {"CLK_IN": [(0, 0), (3300, 3300), (0, 0)]}, partial(Constraint.implicit, "CLK", "implicit_off"))], "clock", Wire.clock_config)
def __init__(self, name, bus_addr, device):
self.device = device
self.configured = False
super(SI5395, self).__init__(name, bus_addr, SI5395)
def update(self, states):
try:
intersect(states[self.VDD.name], [(2600, 3600)])
except State_Space_Error:
self.configured = False
def configure(self):
if self.configured:
return []
else:
self.configured = True
return [
"init_device('%s', False)" % (self.device)
]
class Clock(Node):
CLK = Output([(0, 3300), (0, 60)], [([(0, 3300), (0, 60)], {}, [], lambda node: node.indep("CLK"))], "clock", Wire.clock_config)
def __init__(self, name, bus_addr):
super(Clock, self).__init__(name, bus_addr, Clock)
class PowerConsumer(Node):
node_string = "<V_IN> V_IN"
V_IN = Input([(0, 12000)], "power")
def __init__(self, name, bus_addr):
super(PowerConsumer, self).__init__(name, bus_addr, PowerConsumer)
class CPU2(Stateful_Node):
VDD = Input([(0, 2600)], "power")
EN1 = Input([{0, 1}], "logical")
EN2 = Input([{0, 1}], "logical")
states = (lambda vdd, en1, en2: {
"POWERED_DOWN" : PowerState({vdd : [(0, 0)], en1 : [{0}], en2 : [{0}]}, {
"POWERED_ON": [
({en1 : [{0}]}, "")
],
"POWERED_DOWN": []}),
"POWERED_ON" : PowerState({vdd : [(2300, 2600)], en1 : [{1}], en2 : [{0}]}, {
"POWERED_DOWN": [
({vdd: [(2300, 2400)]}, "wait until " + vdd + " stabilized"),
({en1 : [{1}]}, ""),
({en2 : [{1}], vdd: [(2000, 2600)]}, "")
],
"POWERED_ON": []})
}, ["VDD", "EN1", "EN2"])
def __init__(self, name, bus_addr):
super(CPU2, self).__init__(name, bus_addr, "POWERED_DOWN", CPU2)
class BMC(Node):
B_CLOCK_FLOL = Output([{0, 1}], [Constraint([{0, 1}], {}, partial(Constraint.explicit, "B_CLOCK_FLOL", set(), set()))], "logical", Wire.fpga_clk_ok)
B_PSUP_ON = Output([{0, 1}], [Constraint([{0, 1}], {}, partial(Constraint.explicit, "B_PSUP_ON", set(), set()))], "logical", Wire.gpio_set)
C_RESET_N = Output([{0, 1}], [Constraint([{0, 1}], {}, partial(Constraint.explicit, "C_RESET_N", set(), set()))], "logical", Wire.gpio_set)
C_PLL_DC_OK = Output([{0, 1}], [Constraint([{0, 1}], {}, partial(Constraint.explicit, "C_PLL_DC_OK", set(), set()))], "logical", Wire.cpu_clk_ok)
B_FDV_1V8 = Output([{0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}], [
Constraint([{0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}], {}, partial(Constraint.explicit, "B_FDV_1V8", set(), set()))], "logical", Wire.vid_set)
B_CDV_1V8 = Output([{0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}], [
Constraint([{0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}], {}, partial(Constraint.explicit, "B_CDV_1V8", set(), set()))], "logical", Wire.vid_set)
def __init__(self, name, bus_addr):
super(BMC, self).__init__(name, bus_addr, BMC)
self.configured = False
def configure(self):
if self.configured:
return []
else:
self.configured = True
return [
"init_device('isl6334d_ddr_v', False)"
]
class CPU_3(Stateful_Node):
VDD33 = Input([(0, 4000)], "power")
VDD = Input([(0, 2500)], "power")
EN1 = Input([{0, 1}], "logical")
EN2 = Input([{0, 1}], "logical")
states = (lambda vdd33, vdd, en1, en2: {
"POWERED_DOWN" : PowerState({vdd33: [(0, 0)], vdd : [(0, 0)], en1 : [{0}], en2 : [{1}]}, {
"POWERED_ON": [
({en1 : [{0}]}, "")
],
"POWERED_DOWN": []}),
"POWERED_ON" : PowerState({vdd33: [(3000, 4000)], vdd : [(2000, 2500)], en1 : [{1}], en2 : [{0}]}, {
"POWERED_DOWN": [
({en2 : [{1}]}, ""),
({vdd33: [(3000, 4000)], vdd: [(2000, 2500)]}, "wait until " + vdd + " stabilized"),
({en1 : [{1}]}, ""),
({vdd: [(2000, 2200)]}, "")
],
"POWERED_ON": []})
}, ["VDD33", "VDD", "EN1", "EN2"])
def __init__(self, name, bus_addr):
super(CPU_3, self).__init__(name, bus_addr, "POWERED_DOWN", CPU_3)
class PSU(Node):
EN = Input([{0, 1}], "logical")
OUT = Output([(0, 12000)], [
Constraint([(12000, 12000)], {"EN" : [{1}]}, partial(Constraint.implicit, "OUT", {"EN": [{1}]})),
Constraint([(0, 0)], {"EN": [{0}]}, partial(Constraint.implicit, "OUT", {"EN": [{0}]}))], "power")
def __init__(self, name, bus_addr):
super(PSU, self).__init__(name, bus_addr, PSU)
class Main_PSU(Node):
EN = Input([{0, 1}], "logical")
V33_PSU = Output([(0, 3300)], [
Constraint([(3300, 3300)], {"EN": [{1}]}, partial(Constraint.implicit, "V33_PSU", {"EN": [{1}]})),
Constraint([(0, 0)], {"EN": [{0}]}, partial(Constraint.implicit, "V33_PSU", {"EN": [{0}]}))], "power")
V12_PSU = Output([(0, 12000)], [
Constraint([(12000, 12000)], {"EN" : [{1}]}, partial(Constraint.implicit, "V12_PSU", {"EN" : [{1}]})),
Constraint([(0, 0)], {"EN": [{0}]}, partial(Constraint.implicit, "V12_PSU", {"EN": [{0}]}))], "power")
V5SB_PSU = Output([(5000, 5000)], [Constraint([(5000, 5000)], {}, partial(Constraint.implicit, "V5SB_PSU", {}))], "power")
BMC_VCC_3V3 = Output([(3300, 3300)], [Constraint([(3300, 3300)], {}, partial(Constraint.implicit, "BMC_VCC_3V3", {}))], "power")
V5_PSU = Output([(0, 5000)], [
Constraint([(5000, 5000)], {"EN": [{1}]}, partial(Constraint.implicit, "V5_PSU", {"EN" : [{1}]})),
Constraint([(0, 0)], {"EN": [{0}]}, partial(Constraint.implicit, "V5_PSU", {"EN": [{0}]}))], "power")
def __init__(self, name, bus_addr):
super(Main_PSU, self).__init__(name, bus_addr, Main_PSU)
class PowerSupply(Node):
OUT0 = Output([(0, 12000)], [([(0, 12000)], {}, [], lambda node: node.indep("OUT0"))], "power")
OUT1 = Output([(0, 12000)], [([(0, 12000)], {}, [], lambda node: node.indep("OUT1"))], "power")
OUT2 = Output([(0, 12000)], [([(0, 12000)], {}, [], lambda node: node.indep("OUT2"))], "power")
def __init__(self, name, bus_addr):
super(PowerSupply, self).__init__(name, bus_addr, PowerSupply)
class ISPPAC(Node):
implicit_off = {"VCC": [(0, 2600)], "VCC_IN": [(0, 2000)]}
VMON1_ATT = Input([(0, 13900)], "monitor", lambda node, name: node.isppac_monitor("VMON1_ATT", name, 0.4125))
VMON2_ATT = Input([(0, 5734)], "monitor", lambda node, name: node.isppac_monitor("VMON2_ATT", name))
VMON3_ATT = Input([(0, 5734)], "monitor", lambda node, name: node.isppac_monitor("VMON3_ATT", name))
VMON4 = Input([(0, 5734)], "monitor", lambda node, name: node.isppac_monitor("VMON4", name))
VMON4_ATT = Input([(0, 5734)], "monitor", lambda node, name: node.isppac_monitor("VMON4_ATT", name))
VMON5 = Input([(0, 5734)], "monitor", lambda node, name: node.isppac_monitor("VMON5", name))
VMON5_ATT = Input([(0, 5734)], "monitor", lambda node, name: node.isppac_monitor("VMON5_ATT", name))
VMON6 = Input([(0, 5734)], "monitor", lambda node, name: node.isppac_monitor("VMON6", name))
VMON7 = Input([(0, 5734)], "monitor", lambda node, name: node.isppac_monitor("VMON7", name))
VMON7_ATT = Input([(0, 5734)], "monitor", lambda node, name: node.isppac_monitor("VMON7_ATT", name))
VMON8 = Input([(0, 5734)], "monitor", lambda node, name: node.isppac_monitor("VMON8", name))
VMON8_ATT = Input([(0, 5734)], "monitor", lambda node, name: node.isppac_monitor("VMON8_ATT", name))
VMON9 = Input([(0, 5734)], "monitor", lambda node, name: node.isppac_monitor("VMON9", name))
VMON10 = Input([(0, 5734)], "monitor", lambda node, name: node.isppac_monitor("VMON10", name))
VMON11 = Input([(0, 5734)], "monitor", lambda node, name: node.isppac_monitor("VMON11", name))
VMON11_ATT = Input([(0, 5734)], "monitor", lambda node, name: node.isppac_monitor("VMON11_ATT", name))
VMON12 = Input([(0, 5734)], "monitor", lambda node, name: node.isppac_monitor("VMON12", name))
VCC_IN = Input([(0, 6000)], "power", lambda node, name: node.isppac_monitor("VCCINP", name))
VCC = Input([(0, 4500)], "power", lambda node, name: node.isppac_monitor("VCCA", name))
def __init__(self, name, bus_addr, device):
self.device = device
for i in range(0, 20):
self.generate_output(i)
super(ISPPAC, self).__init__(name, bus_addr, ISPPAC)
self.configured = False
def generate_output(self, number):
name = "OUT" + str(number)
output = Output([{0, 1}],
[Constraint([{1, 0}], {"VCC": [(2800, 3960)], "VCC_IN": [(2250, 5500)]}, partial(Constraint.explicit, name, {"VCC", "VCC_IN"}, set())),
Constraint([{0}], {"VCC": [(0, 2600)], "VCC_IN": [(0, 6000)]}, partial(Constraint.implicit, name, "implicit_off")),
Constraint([{0}], {"VCC": [(0, 4500)], "VCC_IN": [(0, 2000)]}, partial(Constraint.implicit, name, "implicit_off"))], "logical", Wire.pin_set)
setattr(self, name, output)
def isppac_monitor(self, pinname, wire_name, multiplier = 1):
def fun(value, _, node=self, wire_name=wire_name, pinname=pinname, multiplier=multiplier):
commands = node.configure()
if list(value[0])[0] == 0:
commands.append("wait_for_voltage('%s', v_min=0, v_max=0.08, device='%s', monitor='%s')" % (
wire_name,
node.device,
pinname
))
else:
commands.append("wait_for_voltage('%s', v_min=%.3f, v_max=%.3f, device='%s', monitor='%s')" % (
wire_name,
0.00095 * list(value[0])[0] * multiplier,
0.00105 * list(value[0])[0] * multiplier,
node.device,
pinname
))
return (True, "\n".join(commands))
return fun
def configure(self):
if self.configured:
return []
else:
self.configured = True
return [
"init_device('%s', False)" % (self.device)
]
#EVAL 3 version of the Enzian nodes, only changes classes of ThunderX and FPGA to EVAL 3 versions
enzian_nodes_EVAL3 = [
("power_bus", 0x0, Bus, []),
("psu_cpu0", 0x0, PSU, []),
("psu_cpu1", 0x0, PSU, []),
("main_psu", 0x0, Main_PSU, []),
("U20", 0x60, ISPPAC, ["pac_cpu"]), #cpu ISPPAC
("U35", 0x61, ISPPAC, ["pac_fpga"]), #fpga ISPPAC
("U44", 0x40, INA226, ["ina226_ddr_fpga_24"]),
("U48", 0x41, INA226, ["ina226_ddr_fpga_13"]),
("U27", 0x44, INA226, ["ina226_ddr_cpu_13"]),
("U31", 0x45, INA226, ["ina226_ddr_cpu_24"]),
#TODO: add real names of MAX15301s
("IC12", 0x11, MAX15301, [1800, "max15301_vcc1v8_fpga"]), #VCCIV8_FPGA p 92
("IC10", 0x1B, MAX15301, [3300, "max15301_util_3v3"]), #UTIL_3V3 p.90
("IC4", 0x10, MAX15301, [1500, "max15301_15_vdd_oct"]), #1V5_VDD_OCT p.70
("IC11", 0x12, MAX15301, [1800, "max15301_vadj_1v8"]), #VADJ_1V8 p.91
("IC13", 0x15, MAX15301, [900, "max15301_vccintio_bram_fpga"]), #VCCINTIO_FPGA p 99
("U34", 0x0, IR, [4500, "ir3581", "ir3581_loop_vdd_core", "ir3581_loop_0v9_vdd_oct", 0x60, 0x62]), #VDD_CORE, VDD_OCT p 77
("U26", 0x0, ISL, []), #VDD_DDRCPU13 p 73
("U30", 0x0, ISL, []), #VDD_DDRCPU24 p 75
("U37", 0x72, MAX20751, [900, "max20751_mgtavcc_fpga"]), #MGTACC_FPGA p 85
("U41", 0x73, MAX20751, [1200,"max20751_mgtavtt_fpga"]), #MGTAVTT_FPGA p 87
("U51", 0x70, MAX20751, [900, "max20751_vccint_fpga"]), #VCCINT_FPGA p 97
("U43", 0x0, ISL, []), #VDD_DDRFPGA13 p 93
("U47", 0x0, ISL, []), #DD_DDRFPGA24 p 95
("IC5", 0x0, MAX8869, [1800]), #MGTVCCAUX_L, p 88
("IC6", 0x0, MAX8869, [1800]), #MGTVCCAUX_R, p 88
("IC7", 0x0, MAX15053, [1800]), #SYS_1V8, p 89
("IC8", 0x0, MAX15053, [2500]), #SYS_2V5_13
("IC9", 0x0, MAX15053, [2500]), #SYS_2V5_24
("IC15", 0x0, MAX15053, [2500]), #2V5_CPU13 p 71
("IC16", 0x0, MAX15053, [2500]), #2V5_CPU24 p 71
("fpga", 0x0, FPGA_EVAL3, []),
("cpu", 0x0, ThunderX_EVAL3, []),
("bmc", 0x0, BMC, []),
("U24", 0x0, NCP, []),
("U25", 0x0, NCP, []),
("U39", 0x0, NCP, []),
("U40", 0x0, NCP, []),
("U57", 0x0, SI5395, ["clk_main"]),
("U11", 0x0, SI5395, ["clk_cpu"]),
("U16", 0x0, SI5395, ["clk_fpga"]),
("oscillator", 0x0, Oscillator, []),
]
enzian_nodes = [
("power_bus", 0x0, Bus, []),
("psu_cpu0", 0x0, PSU, []),
("psu_cpu1", 0x0, PSU, []),
("main_psu", 0x0, Main_PSU, []),
("U20", 0x60, ISPPAC, ["pac_cpu"]), #cpu ISPPAC
("U35", 0x61, ISPPAC, ["pac_fpga"]), #fpga ISPPAC
("U44", 0x40, INA226, ["ina226_ddr_fpga_24"]),
("U48", 0x41, INA226, ["ina226_ddr_fpga_13"]),
("U27", 0x44, INA226, ["ina226_ddr_cpu_13"]),
("U31", 0x45, INA226, ["ina226_ddr_cpu_24"]),
#TODO: add real names of MAX15301s
("IC12", 0x11, MAX15301, [1800, "max15301_vcc1v8_fpga"]), #VCCIV8_FPGA p 92
("IC10", 0x1B, MAX15301, [3300, "max15301_util_3v3"]), #UTIL_3V3 p.90
("IC4", 0x10, MAX15301, [1500, "max15301_15_vdd_oct"]), #1V5_VDD_OCT p.70
("IC11", 0x12, MAX15301, [1800, "max15301_vadj_1v8"]), #VADJ_1V8 p.91
("IC13", 0x15, MAX15301, [900, "max15301_vccintio_bram_fpga"]), #VCCINTIO_FPGA p 99
("U34", 0x0, IR, [4500, "ir3581", "ir3581_loop_vdd_core", "ir3581_loop_0v9_vdd_oct", 0x60, 0x62]), #VDD_CORE, VDD_OCT p 77
("U26", 0x0, ISL, []), #VDD_DDRCPU13 p 73
("U30", 0x0, ISL, []), #VDD_DDRCPU24 p 75
("U37", 0x72, MAX20751, [900, "max20751_mgtavcc_fpga"]), #MGTACC_FPGA p 85
("U41", 0x73, MAX20751, [1200,"max20751_mgtavtt_fpga"]), #MGTAVTT_FPGA p 87
("U51", 0x70, MAX20751, [900, "max20751_vccint_fpga"]), #VCCINT_FPGA p 97
("U43", 0x0, ISL, []), #VDD_DDRFPGA13 p 93
("U47", 0x0, ISL, []), #DD_DDRFPGA24 p 95
("IC5", 0x0, MAX8869, [1800]), #MGTVCCAUX_L, p 88
("IC6", 0x0, MAX8869, [1800]), #MGTVCCAUX_R, p 88
("IC7", 0x0, MAX15053, [1800]), #SYS_1V8, p 89
("IC8", 0x0, MAX15053, [2500]), #SYS_2V5_13
("IC9", 0x0, MAX15053, [2500]), #SYS_2V5_24
("IC15", 0x0, MAX15053, [2500]), #2V5_CPU13 p 71
("IC16", 0x0, MAX15053, [2500]), #2V5_CPU24 p 71
("fpga", 0x0, FPGA, []),
("cpu", 0x0, ThunderX, []),
("bmc", 0x0, BMC, []),
("U24", 0x0, NCP, []),
("U25", 0x0, NCP, []),
("U39", 0x0, NCP, []),
("U40", 0x0, NCP, []),
("U57", 0x0, SI5395, ["clk_main"]),
("U11", 0x0, SI5395, ["clk_cpu"]),
("U16", 0x0, SI5395, ["clk_fpga"]),
("oscillator", 0x0, Oscillator, []),
]
enzian_wires = [
("b_psup_on", "bmc", "B_PSUP_ON", {("psu_cpu0", "EN"), ("psu_cpu1", "EN"), ("main_psu", "EN")}),
("3v3_psup", "main_psu", "V33_PSU", {("U20", "VMON3_ATT"), ("U27", "VS"), ("U31", "VS"), ("IC15", "V_IN"), ("IC16", "V_IN"), ("cpu", "VDD_IO33"), ("U34", "VCC"), ("U57", "VDD"), ("U11", "VDD"), ("U16", "VDD"), ("oscillator", "VDD")}),
#("12v_psup", "main_psu", "V12_PSU", {}), #add bmc
("5v_psup", "main_psu", "V5_PSU", {("U35", "VMON2_ATT"), ("U20", "VMON2_ATT"), ("U26", "VCC"), ("U30", "VCC"), ("U43", "VCC"), ("U47", "VCC")}),
("5vsb_psup", "main_psu", "V5SB_PSU", {("U20", "VCC_IN"), ("U35", "VCC_IN")}),
("bmc_vcc_3v3", "main_psu", "BMC_VCC_3V3", {("U20", "VCC"), ("U35", "VCC")}),
#add 5vsb if added bmc
("12v_cpu0_psup", "psu_cpu0", "OUT", {("U20", "VMON1_ATT"), ("IC4", "V_PWR"), ("U26", "EN_VTT"), ("U30", "EN_VTT"), ("U34", "VIN")}),
("12v_cpu1_psup", "psu_cpu1", "OUT", {("U35", "VMON1_ATT"), ("U37", "VDDH"), ("U41", "VDDH"), ("IC10", "V_PWR"), ("IC11", "V_PWR"), ("IC12", "V_PWR"), ("U43", "EN_VTT"), ("U47", "EN_VTT"), ("U51", "VDDH"), ("IC13", "V_PWR")}),
("en_vcc1v8_fpga", "U35", "OUT15", {("IC12", "EN")}),
("vcc1v8_fpga", "IC12", "V_OUT", {("U35", "VMON11_ATT"), ("fpga", "VCCAUX")}),
("en_util33", "U35", "OUT6", {("IC10", "EN")}),
("util33", "IC10", "V_OUT", {("U35", "VMON3_ATT"), ("U44", "VS"), ("U48", "VS"), ("U37", "VDD33"), ("U41", "VDD33"), ("IC5", "V_IN"), ("IC6", "V_IN"), ("IC7", "V_IN"), ("IC8", "V_IN"), ("IC9", "V_IN"), ("U51", "VDD33")}),
("en_mgtavtt_fpga", "U35", "OUT14", {("U41", "VR_ON")}),
("mgtavtt_fpga", "U41", "V_OUT", {("fpga", "MGTAVTT")}),
("en_mgtavcc_fpga", "U35", "OUT10", {("U37", "VR_ON")}),
("mgtavcc_fpga", "U37", "V_OUT", {("U35", "VMON7"), ("fpga", "MGTAVCC")}),
("en_vccint_fpga", "U35", "OUT9", {("U51", "VR_ON")}),
("vccint_fpga", "U51", "V_OUT", {("U35", "VMON6"), ("fpga", "VCCINT")}),
("en_sys_1v8", "U35", "OUT16", {("IC7", "V_EN")}),
("sys_1v8", "IC7", "V_OUT", {("U35", "VMON12"), ("fpga", "VCCO_1V8")}), #where to connect at fpga? additional vcco thingy?
("en_sys_2v5_13", "U35", "OUT7", { ("IC8", "V_EN")}),
("sys_2v5_13", "IC8", "V_OUT", {("U35", "VMON4_ATT"), ("U39", "VCC"), ("fpga", "VCCO_2V5_DDR13")}), #add NCP nodes
("vtt_ddrfpga13", "U39", "VREF", {("fpga", "VCCO_VTT_DDR13")}),
("en_sys_2v5_24", "U35", "OUT8", { ("IC9", "V_EN")}),
("sys_2v5_24", "IC9", "V_OUT", {("U35", "VMON5_ATT"), ("U40", "VCC"), ("fpga", "VCCO_2V5_DDR24")}), #add NCP nodes
("vtt_ddrfpga24", "U40", "VREF", {("fpga", "VCCO_VTT_DDR24")}),
("clk_sig", "oscillator", "CLK", {("U57", "CLK_IN")}),
("clk_main", "U57", "CLK", {("U11", "CLK_IN"), ("U16", "CLK_IN")}),
("clock_flol", "bmc", "B_CLOCK_FLOL", {("fpga", "CLK_OK")}),
("fpga_clk", "U16", "CLK", {("fpga", "CLK")}),
("en_mgtvccaux_l", "U35", "OUT11", {("IC5", "SHDN")}),
("en_mgtvccaux_r", "U35", "OUT12", {("IC6", "SHDN")}),
("mgtvccaux_l", "IC5", "V_OUT", {("U35", "VMON8"), ("fpga", "MGTVCCAUX_L")}),
("mgtvccaux_r", "IC6", "V_OUT", {("U35", "VMON9"), ("fpga", "MGTVCCAUX_R")}),
("en_vadj_1v8_fpga", "U35", "OUT17", {("IC11", "EN")}),
("vadj_1v8_fpga", "IC11", "V_OUT", {("fpga", "VADJ_1V8")}),
("en_vccintio_bram_fpga", "U35", "OUT13", {("IC13", "EN")}),
("vccintio_bram_fpga", "IC13", "V_OUT", {("U35", "VMON10"), ("fpga", "VCCINT_IO")}),
("en_vdd_ddrfpga13", "U35", "OUT18", {("U43", "EN_PWR")}),
("en_vdd_ddrfpga24", "U35", "OUT19", {("U47", "EN_PWR")}),
("vdd_ddrfpga13", "U43", "VOUT", {("U48", "VBUS"), ("fpga", "VCCO_VCC_DDR13"), ("U39", "VRI")}), #vcco
("vdd_ddrfpga24", "U47", "VOUT", {("U44", "VBUS"), ("fpga", "VCCO_VCC_DDR24"), ("U40", "VRI")}),
("b_cdv_1v8", "bmc", "B_CDV_1V8", {("U26", "VID"), ("U30", "VID")}),
("b_fdv_1v8", "bmc", "B_FDV_1V8", {("U43", "VID"), ("U47", "VID")}),
("c_reset_n", "bmc", "C_RESET_N", {("cpu", "CHIP_RESET_L")}),
("pll_dc_ok", "bmc", "C_PLL_DC_OK", {("cpu", "PLL_DC_OK")}),
("en_vdd_ddrcpu13", "U20", "OUT11", {("U26", "EN_PWR")}),
("en_vdd_ddrcpu24", "U20", "OUT12", {("U30", "EN_PWR")}),
("vdd_ddrcpu13", "U26", "VOUT", {("U20", "VMON9"), ("U27", "VBUS"), ("cpu", "VDD_DDR13"), ("U24", "VRI")}),
("vdd_ddrcpu24", "U30", "VOUT", {("U20", "VMON10"), ("U31", "VBUS"), ("cpu", "VDD_DDR24"), ("U25", "VRI")}),
("vdd_core_en", "U20", "OUT6", {("U34", "EN")}),
("vdd_core", "U34", "VOUT", {("U20", "VMON4"), ("cpu", "VDD")}),
("vdd_oct_en_l2", "U20", "OUT7", {("U34", "EN_2")}),
("0v9_vdd_oct", "U34", "VOUT_2", {("U20", "VMON5"), ("cpu", "VDD_09")}),
("en_1v5_vdd_oct", "U20", "OUT8", {("IC4", "EN")}),
("1v5_vdd_oct", "IC4", "V_OUT", {("U20", "VMON6"), ("cpu", "VDD_15")}),
("en_2v5_cpu13", "U20", "OUT9", {("IC15", "V_EN")}), #to NCP
("2v5_cpu13", "IC15", "V_OUT", {("U20", "VMON7_ATT"), ("U24", "VCC"), ("cpu", "VDD_2V5_DDR13")}),
("vtt_ddrcpu13", "U24", "VREF", {("U20", "VMON11"), ("cpu", "VTT_DDR13")}),
("en_2v5_cpu24", "U20", "OUT10", {("IC16", "V_EN")}), #to NCP
("2v5_cpu24", "IC16", "V_OUT", {("U20", "VMON8_ATT"), ("U25", "VCC"), ("cpu", "VDD_2V5_DDR24")}),
("vtt_ddrcpu24", "U25", "VREF", {("U20", "VMON12"), ("cpu", "VTT_DDR24")}),
("pll_ref_clk", "U11", "CLK", {("cpu", "PLL_REF_CLK")}),
("bus", "power_bus", "BUS", {("IC10", "BUS"), ("IC12", "BUS"), ("IC13", "BUS"), ("IC4", "BUS"), ("IC11", "BUS"), ("U34", "BUS"), ("U37", "BUS"), ("U41", "BUS"), ("U51", "BUS")}),
]
| 2.453125 | 2 |
textrank4zh/TextRank4Sentence.py | Mumuerr/textrank_test | 2 | 12792615 | <reponame>Mumuerr/textrank_test
#-*- encoding:utf-8 -*-
'''
Created on Dec 1, 2014
@author: letian
'''
import networkx as nx
from Segmentation import Segmentation
import numpy as np
import math
class TextRank4Sentence(object):
def __init__(self, stop_words_file = None, delimiters='?!;?!。;…\n'):
'''
`stop_words_file`:默认值为None,此时内部停止词表为空;可以设置为文件路径(字符串),将从停止词文件中提取停止词。
`delimiters`:默认值是`'?!;?!。;…\n'`,用来将文本拆分为句子。
self.sentences:由句子组成的列表。
self.words_no_filter:对sentences中每个句子分词而得到的两级列表。
self.words_no_stop_words:去掉words_no_filter中的停止词而得到的两级列表。
self.words_all_filters:保留words_no_stop_words中指定词性的单词而得到的两级列表。
'''
self.seg = Segmentation(stop_words_file=stop_words_file, delimiters=delimiters)
self.sentences = None
self.words_no_filter = None # 2维列表
self.words_no_stop_words = None
self.words_all_filters = None
self.graph = None
self.key_sentences = None
def train(self, text, lower = False, speech_tag_filter=True,
source = 'no_stop_words', sim_func = 'standard'):
'''
`text`:文本内容,字符串。
`lower`:是否将文本转换为小写。默认为False。
`speech_tag_filter`:若值为True,将调用内部的词性列表来过滤生成words_all_filters。
若值为False,words_all_filters与words_no_stop_words相同。
`source`:选择使用words_no_filter, words_no_stop_words, words_all_filters中的哪一个来生成句子之间的相似度。
默认值为`'all_filters'`,可选值为`'no_filter', 'no_stop_words', 'all_filters'`。
`sim_func`: 指定计算句子相似度的函数。当前只有一个函数,对应默认值`standard`。
'''
self.key_sentences = []
(self.sentences, self.words_no_filter, self.words_no_stop_words, self.words_all_filters) = self.seg.segment(text=text,
lower=lower,
speech_tag_filter=speech_tag_filter);
# -
# print self.sentences
if source == 'no_filter':
source = self.words_no_filter
elif source == 'all_filters':
source = self.words_all_filters
else:
source = self.words_no_stop_words
sim_func = self._get_similarity_standard
sentences_num = len(source)
self.graph = np.zeros((sentences_num, sentences_num))
for x in xrange(sentences_num):
for y in xrange(x, sentences_num):
similarity = sim_func(source[x], source[y])
self.graph[x, y] = similarity
self.graph[y, x] = similarity
# for x in xrange(sentences_num):
# row_sum = np.sum(self.graph[x, :])
# if row_sum > 0:
# self.graph[x, :] = self.graph[x, :] / row_sum
# print self.graph
nx_graph = nx.from_numpy_matrix(self.graph)
scores = nx.pagerank(nx_graph) # this is a dict
sorted_scores = sorted(scores.items(), key = lambda item: item[1], reverse=True)
# print sorted_scores
for index, _ in sorted_scores:
self.key_sentences.append(self.sentences[index])
# print '\n'.join(self.key_sentences)
def _get_similarity_standard(self, word_list1, word_list2):
'''
默认的用于计算两个句子相似度的函数。
word_list1, word_list2: 分别代表两个句子,都是由单词组成的列表
'''
vector1, vector2 =self._gen_vectors(word_list1, word_list2)
# print vector1, vector2
vector3 = [vector1[x]*vector2[x] for x in xrange(len(vector1))]
vector4 = [1 for num in vector3 if num > 0.]
co_occur_num = sum(vector4)
# print co_occur_num
if co_occur_num == 0.:
return 0.
denominator = math.log(float(len(word_list1))) + math.log(float(len(word_list2))) # 分母
if denominator == 0.:
return 0.
return co_occur_num / denominator
def _gen_vectors(self, word_list1, word_list2):
'''
两个句子转换成两个同样大小向量。可以通过这两个向量来计算两个句子的相似度。
word_list1, word_list2: 分别代表两个句子,都是由单词组成的列表
'''
words = list(set(word_list1 + word_list2))
vector1 = [float(word_list1.count(word)) for word in words]
vector2 = [float(word_list2.count(word)) for word in words]
return vector1, vector2
def get_key_sentences(self, num = 6, sentence_min_len = 6):
'''
获取最重要的num个长度大于等于sentence_min_len的句子用来生成摘要。
返回列表。
'''
result = []
count = 0
for sentence in self.key_sentences:
if count >= num:
break
if len(sentence) >= sentence_min_len:
result.append(sentence)
count += 1
return result
if __name__ == '__main__':
import codecs
# text = codecs.open('../text/03.txt', 'r', 'utf-8').read()
text = "这间酒店位于北京东三环,里面摆放很多雕塑,文艺气息十足。答谢宴于晚上8点开始。"
tr4s = TextRank4Sentence(stop_words_file='../stopword.data')
tr4s.train(text=text, speech_tag_filter=True, lower=True, source = 'all_filters')
print '\n'.join(tr4s.get_key_sentences(num=1))
print '\n'.join(tr4s.sentences)
for wl in tr4s.words_no_filter:
print '[', ', \''.join(wl), ']'
print
for wl in tr4s.words_no_stop_words:
print '[', ', \''.join(wl), ']'
print
for wl in tr4s.words_all_filters:
print '[', ', \''.join(wl), ']' | 2.65625 | 3 |
systemcheck/systems/ABAP/models/action_abap_validate_redundant_password_hashes_model.py | team-fasel/SystemCheck | 2 | 12792616 | <filename>systemcheck/systems/ABAP/models/action_abap_validate_redundant_password_hashes_model.py
from systemcheck.systems.ABAP.models import AbapSpoolParams_BAPIPRIPAR_Mixin, ActionAbapIsNotClientSpecificMixin
from systemcheck import models
from systemcheck.models.meta import generic_repr
from systemcheck.checks.models import Check
pluginName='ActionAbapValidateRedundantPasswordHashes'
@generic_repr
class ActionAbapValidateRedundantPasswordHashes(Check, AbapSpoolParams_BAPIPRIPAR_Mixin, ActionAbapIsNotClientSpecificMixin):
__tablename__ = pluginName
id = models.meta.Column(models.meta.Integer, models.meta.ForeignKey('checks_metadata.id'), primary_key=True)
SAP_USER_NAME = models.meta.Column(models.meta.String(12),
qt_label='Step User',
qt_description='Background User Name for Authorization Check',
nullable=True)
__mapper_args__ = {
'polymorphic_identity':pluginName,
}
__qtmap__ = [Check.name, Check.description, Check.failcriteria, Check.criticality,
SAP_USER_NAME,
AbapSpoolParams_BAPIPRIPAR_Mixin.PDEST,
AbapSpoolParams_BAPIPRIPAR_Mixin.PRBIG,
AbapSpoolParams_BAPIPRIPAR_Mixin.PRSAP] | 1.96875 | 2 |
ComRISB/pyglib/pyglib/estructure/misc/fermi.py | comscope/comsuite | 18 | 12792617 | from __future__ import print_function
import numpy as np
try:
from builtins import range, zip
except:
pass
def fermi_dirac(e_fermi, delta, energy):
"""
Return fermi-dirac distribution weight.
"""
x = (energy - e_fermi)/delta
if x < -200:
f = 1.
elif x > 200:
f = 0.
else:
f = 1./(np.exp(x) + 1)
return f
def num_electron_diff(e_fermi, delta, e_skn, w_k, nb_k, num_elec):
ne = 0
for e_kn in e_skn:
for e_n, w, nb in zip(e_kn, w_k, nb_k):
f = [fermi_dirac(e_fermi, delta, e) for e in e_n[:nb]]
ne += np.sum(f)*w
return ne - num_elec
| 2.8125 | 3 |
test/legacy/test_responses.py | mitsuhiko/nameko | 3 | 12792618 | from mock import Mock
from nameko.legacy import responses
def test_iter_rpcresponses():
response_list = [
Mock(payload={'id': 1, 'failure': False, 'ending': False}),
Mock(payload={'id': 2, 'failure': False, 'ending': False}),
Mock(payload={'id': 3, 'failure': False, 'ending': True}),
]
iter_ = responses.iter_rpcresponses(response_list)
ret = responses.last(iter_)
# should be the message preceeding the `ending`
assert ret.payload['id'] == 2
def test_iter_rpcresponses_ending_only():
response_list = [
Mock(payload={'id': 3, 'failure': False, 'ending': True}),
]
iter_ = responses.iter_rpcresponses(response_list)
# should not include the ending message
assert list(iter_) == []
| 2.671875 | 3 |
functions/load_nag.py | daviddoret/pyxag | 1 | 12792619 | from . import *
def load_nag(nag, path):
"""Load a NAG from a file"""
with open(path, 'r') as nag_file:
nag_json = nag_file.read()
nag = convert_json_to_nag(nag_json)
return nag
| 3.21875 | 3 |
app/notebook/views/notebook.py | Lontras-e-Preguicas/cnotes-server | 1 | 12792620 | <filename>app/notebook/views/notebook.py<gh_stars>1-10
from drf_yasg.openapi import Parameter
from drf_yasg.utils import swagger_auto_schema
from rest_framework import authentication, permissions, viewsets, mixins
from rest_framework.decorators import action
from rest_framework.response import Response
from core.models import Notebook, Member
from notebook.serializers.folder import FolderSerializer
from notebook.serializers.member import MemberSerializer
from notebook.serializers.notebook import NotebookSerializer
from notebook.serializers.search import SearchResult, SearchResultSerializer
class NotebookRolePermission(permissions.BasePermission):
def has_object_permission(self, request, view, obj: Notebook):
if request.method in permissions.SAFE_METHODS:
return True
if view.action == 'destroy':
if request.user != obj.owner:
return False
else:
membership = obj.members.get(user=request.user)
if membership.role != Member.Roles.ADMIN:
return False
return True
class NotebookViewSet(viewsets.GenericViewSet, mixins.CreateModelMixin, mixins.ListModelMixin,
mixins.RetrieveModelMixin, mixins.DestroyModelMixin, mixins.UpdateModelMixin):
serializer_class = NotebookSerializer
authentication_classes = (authentication.TokenAuthentication,)
permission_classes = (permissions.IsAuthenticated, NotebookRolePermission)
queryset = Notebook.objects.all()
def get_queryset(self):
if self.request.user.is_anonymous:
return self.queryset
return Notebook.objects.filter(member__user=self.request.user,
member__is_active=True)
@swagger_auto_schema(
responses={200: MemberSerializer(many=True)}
)
@action(detail=True, methods=['get'])
def members(self, request, pk=None):
instance: Notebook = self.get_object()
serializer = MemberSerializer(instance.members.filter(is_active=True), many=True)
return Response(serializer.data)
@swagger_auto_schema(
responses={200: FolderSerializer()}
)
@action(detail=True, methods=['get'])
def root(self, request, pk=None):
instance: Notebook = self.get_object()
serializer = FolderSerializer(instance.root_folder)
return Response(serializer.data)
@swagger_auto_schema(
manual_parameters=[Parameter('q', 'query', required=True, type='string',
description='_query_ de pesquisa (pode ser substituído pelo parâmetro `query`)')],
responses={200: SearchResultSerializer()}
)
@action(detail=True, methods=['get'])
def search(self, request, pk=None):
instance: Notebook = self.get_object()
query = request.query_params.get('q', None) or request.query_params.get('query', '')
serializer = SearchResultSerializer(SearchResult(instance, query))
return Response(serializer.data)
| 1.9375 | 2 |
cogv3/moderation/Kick.py | XFazze/discordbot | 2 | 12792621 | import discord
from ..admin.managecommands import perms
import json
from discord.utils import get
from pymongo import MongoClient, collation
from discord.ext import commands, tasks
import time
import os
import pymongo as pm
import asyncio
import random
import datetime
import copy
class Kick(commands.Cog):
def __init__(self, bot):
self.bot = bot
# Kick
@commands.command(pass_context=True)
@commands.has_permissions(kick_members=True)
@commands.bot_has_permissions(kick_members=True)
@commands.check(perms)
async def kick(self, ctx, member:discord.Member, *reason):
# Sets default reason if not specified
if not reason:
reason = "Reason was not specified"
# Bans member if the author has a higher role than the subject.
if member is None:
await ctx.reply("Please mention someone to kick")
else:
if ctx.author.top_role.position > member.top_role.position:
reason = ' '.join(map(str, reason))
await ctx.reply(f'{member} was kicked with reason "{reason}"')
await ctx.guild.kick(member, reason=reason)
else:
await ctx.reply("The person you are trying to kick is more powerful than you")
def setup(bot):
bot.add_cog(Kick(bot)) | 2.5 | 2 |
getting_started/hash.py | AoEiuV020/LearningPython | 0 | 12792622 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
反直觉,is 和 hash 没什么关系,
"""
assert hash(1.00 + 0.01) == hash(1.01)
assert id(1.00 + 0.01) != id(1.01)
assert 1.00 + 0.01 == 1.01
assert not 1.00 + 0.01 is 1.01
assert type(hash('s')) == int
| 3.046875 | 3 |
src/adobe/pdfservices/operation/internal/api/dto/request/platform/cpf_content_analyzer_req.py | hvntravel/pdfservices-python-sdk | 2 | 12792623 | <filename>src/adobe/pdfservices/operation/internal/api/dto/request/platform/cpf_content_analyzer_req.py
# Copyright 2021 Adobe. All rights reserved.
# This file is licensed to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import json
from adobe.pdfservices.operation.internal.api.dto.request.platform.engine import Engine
from adobe.pdfservices.operation.internal.api.dto.request.platform.inputs import Inputs
from adobe.pdfservices.operation.internal.api.dto.request.platform.outputs import Outputs
from adobe.pdfservices.operation.internal.util.json_hint_encoder import JSONHintEncoder
class CPFContentAnalyzerRequests:
json_hint = {
'engine' : 'cpf:engine',
'inputs' : 'cpf:inputs',
'outputs' : 'cpf:outputs'
}
def __init__(self, service_id, inputs: Inputs, outputs : Outputs):
self.engine = Engine(service_id)
self.inputs = inputs
self.outputs = outputs
def to_json(self):
return json.dumps(self, cls=JSONHintEncoder, indent=4, sort_keys=True) | 2.140625 | 2 |
tests/test-cases/basic/ssa_case-5.py | SMAT-Lab/Scalpel | 102 | 12792624 | # Imports
import os
import random
from collections import Counter, defaultdict
import random
from nltk.tag import StanfordNERTagger
from nltk.tokenize import word_tokenize
from nltk import pos_tag
from nltk.chunk import conlltags2tree
from nltk.tree import Tree
import pandas as pd
from htrc_features import FeatureReader
import geocoder
import folium
from pprint import pprint
from tqdm import tqdm
# Set environment variable
# Geonames requires a username to access the API but we do not want to expose personal info in code
#
# Run this locally by adding USERNAME to environment variables, e.g. to .env, as follows:
# > export USERNAME=<insert username here>
USERNAME = os.getenv('USERNAME')
# Setup Stanford NER Tagger
# Ignore deprecation warning for now; we'll deal with it when the time comes!
st = StanfordNERTagger('/usr/local/share/stanford-ner/classifiers/english.all.3class.distsim.crf.ser.gz',
'/usr/local/share/stanford-ner/stanford-ner.jar',
encoding='utf-8')
# Functions for putting together with inside-outside-beginning (IOB) logic
# Cf. https://stackoverflow.com/a/30666949
#
# For more information on IOB tagging, see https://en.wikipedia.org/wiki/Inside–outside–beginning_(tagging)
# Sample HathiTrust ID
# This is the HTID for...
# "Ancient Corinth: A guide to the excavations," <NAME>, <NAME>, and <NAME>
htid = "wu.89079728994"
# Get HTEF data for this ID; specifically tokenlist
fr = FeatureReader(ids=[htid])
for vol in fr:
tokens = vol.tokenlist()
# Create pandas dataframe with relevant data
temp = tokens.index.values.tolist()
counts = pd.DataFrame.from_records(temp, columns=['page', 'section', 'token', 'pos'])
counts['count'] = tokens['count'].tolist()
counts[:10]
# Reconstruct text using tokens and counts
text_data = list(zip(counts['token'].tolist(), counts['count'].tolist()))
# Loop through and multiply words by counts
text_list = []
for w, c in text_data:
for i in range(0, c):
text_list.append(w)
random.shuffle(text_list) # Necessary?
text_reconstruction = " ".join(text_list)
#page_words_extended = page_words+page_ner
tokens = word_tokenize(text_reconstruction)
tagged_tokens = st.tag(tokens)
tagged_tokens = [item for item in tagged_tokens if item[0] != '']
ne_tree = stanfordNE2tree(tagged_tokens)
ne_in_sent = []
for subtree in ne_tree:
if type(subtree) == Tree: # If subtree is a noun chunk, i.e. NE != "O"
ne_label = subtree.label()
ne_string = " ".join([token for token, pos in subtree.leaves()])
ne_in_sent.append((ne_string, ne_label))
locations = [tag[0].title() for tag in ne_in_sent if tag[1] == 'LOCATION']
print(locations)
most_common_locations = Counter(locations).most_common(10)
pprint(most_common_locations)
# Organize some data for map info
places_list = [name for name, _ in most_common_locations][:3] # Limit to top three
most_common_locations = dict(most_common_locations) # Turn mcl into dictionary
# Retrieve json from geonames API (for fun this time using geocoder)
geocoder_results = []
for place in places_list:
results = geocoder.geonames(place, maxRows=5, key=USERNAME)
jsons = []
for result in results:
jsons.append(result.json)
geocoder_results.append(jsons)
# Create a list of 'country' from the geonames json results
countries = []
for results in geocoder_results:
for item in results:
if 'country' in item.keys():
countries.append(item['country'])
# Determine which country appears most often
top_country = sorted(Counter(countries))[0]
print(top_country)
# Iterate over geocoder_results and keep the first lat/long that matches the top country
coordinates = []
for i, results in enumerate(geocoder_results):
for item in results:
if item['country'] == top_country:
coordinates.append((float(item['lat']), float(item['lng'])))
break # Only get the first item for now
print(places_list)
print(coordinates)
# Set up Folium and populate with weighted coordinates
basemap = folium.Map(location=[37.97945, 23.71622], zoom_start=8, tiles='cartodbpositron', width=960, height=512)
for i, c in enumerate(coordinates):
folium.CircleMarker([c[0], c[1]], radius=most_common_locations[places_list[i]]*.25, color='#3186cc',
fill=True, fill_opacity=0.5, fill_color='#3186cc',
popup='{} ({}, {}) appears {} times in book.'.format(places_list[i], c[0], c[1], most_common_locations[places_list[i]])).add_to(basemap)
print('Map of relevant locations in Broneer et al.\'s "Ancient Corinth: A guide to the excavations," weighted by frequency.')
basemap
page = 87
test = counts[counts['page'] == page]['token'].tolist()
print(test)
print(len(test))
from nltk.corpus import stopwords
stops = set(stopwords.words('english'))
pns_list = []
for i in range(1, max(counts['page'])+1):
tokens = counts[counts['page'] == i]['token'].tolist()
tokens = [token for token in tokens if token.lower() not in stops and len(token) > 2]
pns = [token for token in tokens if token[0].isupper()]
combs = [f'{x} {y}' for x, y in combinations(pns, 2)]
pns_list.extend(combs)
| 2.4375 | 2 |
virtcam/webcam.py | badmonkey/virtual-camera | 0 | 12792625 | import cv2
import virtcam.debug as debug
from virtcam.base import Frame, FrameSource, Image, Mask, StreamConfig, immutable
class Webcam(FrameSource):
def __init__(self):
super().__init__()
self.current_id = -1
self.camera = cv2.VideoCapture("/dev/video0", cv2.CAP_V4L2)
c1, c2, c3, c4 = "M", "J", "P", "G"
codec = cv2.VideoWriter_fourcc(c1, c2, c3, c4)
self.camera.set(cv2.CAP_PROP_FOURCC, codec)
camConfig = StreamConfig(
int(self.camera.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(self.camera.get(cv2.CAP_PROP_FRAME_HEIGHT)),
int(self.camera.get(cv2.CAP_PROP_FPS)),
)
self._init_config(camConfig)
self.frame = Frame(self.config, Image(self.config.width, self.config.height), self.fullmask)
debug.config("Webcam:init:config", camConfig)
def grab(self) -> bool:
return True
def next(self, frame_id: int) -> Frame:
if not self.frame or self.current_id != frame_id:
grabbed = False
while not grabbed:
grabbed, image = self.camera.read()
self.frame = Frame(self.config, immutable(image), self.fullmask)
self.current_id = frame_id
# debug.frame(f"Webcam:next[{frame_id}]", self.frame)
return self.frame
| 2.421875 | 2 |
Toolkits/VCS/repology__repology-api/repology/packageproc.py | roscopecoltran/SniperKit-Core | 0 | 12792626 | <filename>Toolkits/VCS/repology__repology-api/repology/packageproc.py
# Copyright (C) 2016-2017 <NAME> <<EMAIL>>
#
# This file is part of repology
#
# repology is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# repology is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with repology. If not, see <http://www.gnu.org/licenses/>.
import sys
from functools import cmp_to_key
from repology.package import *
from repology.version import VersionCompare
def PackagesMerge(packages):
aggregated = {}
# aggregate by subrepo/name/version
# this is just to make merging faster, as packages
# with same subrepo/name/version may or may not merge
for package in packages:
key = (package.subrepo, package.name, package.version)
aggregated.setdefault(key, []).append(package)
outpkgs = []
for packages in aggregated.values():
while packages:
nextpackages = []
merged = packages[0]
for package in packages[1:]:
if not merged.TryMerge(package):
nextpackages.append(package)
outpkgs.append(merged)
packages = nextpackages
return outpkgs
def PackagesetCheckFilters(packages, *filters):
for filt in filters:
if not filt.Check(packages):
return False
return True
def FillPackagesetVersions(packages):
versions = set()
families = set()
for package in packages:
if not package.ignoreversion:
versions.add(package.version)
families.add(package.family)
bestversion = None
for version in versions:
if bestversion is None or VersionCompare(version, bestversion) > 0:
bestversion = version
for package in packages:
result = VersionCompare(package.version, bestversion) if bestversion is not None else 1
if result > 0:
package.versionclass = PackageVersionClass.ignored
elif result == 0:
# XXX: if len(families) == 1 -> PackageVersionClass.unique
package.versionclass = PackageVersionClass.newest
else:
package.versionclass = PackageVersionClass.outdated
def PackagesetToSummaries(packages):
summary = {}
state_by_repo = {}
families = set()
for package in packages:
families.add(package.family)
if package.repo not in state_by_repo:
state_by_repo[package.repo] = {
'has_outdated': False,
'bestpackage': None,
'count': 0
}
if package.versionclass == PackageVersionClass.outdated:
state_by_repo[package.repo]['has_outdated'] = True,
if state_by_repo[package.repo]['bestpackage'] is None or VersionCompare(package.version, state_by_repo[package.repo]['bestpackage'].version) > 0:
state_by_repo[package.repo]['bestpackage'] = package
state_by_repo[package.repo]['count'] += 1
for repo, state in state_by_repo.items():
resulting_class = None
# XXX: lonely ignored package is currently lonely; should it be ignored instead?
if state['bestpackage'].versionclass == PackageVersionClass.outdated:
resulting_class = RepositoryVersionClass.outdated
elif len(families) == 1:
resulting_class = RepositoryVersionClass.lonely
elif state['bestpackage'].versionclass == PackageVersionClass.newest:
if state['has_outdated']:
resulting_class = RepositoryVersionClass.mixed
else:
resulting_class = RepositoryVersionClass.newest
elif state['bestpackage'].versionclass == PackageVersionClass.ignored:
resulting_class = RepositoryVersionClass.ignored
summary[repo] = {
'version': state['bestpackage'].version,
'bestpackage': state['bestpackage'],
'versionclass': resulting_class,
'numpackages': state['count']
}
return summary
def PackagesetSortByVersions(packages):
def packages_version_cmp_reverse(p1, p2):
return VersionCompare(p2.version, p1.version)
return sorted(packages, key=cmp_to_key(packages_version_cmp_reverse))
def PackagesetToFamilies(packages):
return set([package.family for package in packages])
def PackagesetAggregateByVersions(packages):
versions = {}
for package in packages:
key = (package.version, package.versionclass)
if key not in versions:
versions[key] = []
versions[key].append(package)
def key_cmp_reverse(v1, v2):
return VersionCompare(v2[0], v1[0])
return [
{
'version': key[0],
'versionclass': key[1],
'packages': versions[key]
} for key in sorted(versions.keys(), key=cmp_to_key(key_cmp_reverse))
]
| 1.671875 | 2 |
test/afunc.py | jamesljlster/cnn | 1 | 12792627 | import numpy as np
import math
def softmax(src):
# Get size of input vector
rows, cols = src.shape
# Checking
if rows > 1:
raise Exception("Input rows > 1")
# Find softmax
expVec = np.exp(src)
return expVec / np.sum(expVec)
def softmax_derivative(src):
# Get size of input vector
rows, cols = src.shape
# Checking
if rows > 1:
raise Exception("Input rows > 1")
# Find softmax derivative
tmpVec = softmax(src)
retMat = np.zeros((cols, cols))
for i in range(cols):
for j in range(cols):
retMat[i, j] = tmpVec[0, i] * (float((i == j)) - tmpVec[0, j])
return retMat
def relu(src):
# Get size of input vector
rows, cols = src.shape
# Checking
if rows > 1:
raise Exception("Input rows > 1")
# Find relu
retVec = np.zeros((1, cols))
for i in range(cols):
retVec[0, i] = max(src[0, i], 0.0)
return retVec
def relu_derivative(src):
# Get size of input vector
rows, cols = src.shape
# Checking
if rows > 1:
raise Exception("Input rows > 1")
# Find relu derivative
retMat = np.zeros((cols, cols))
for i in range(cols):
if src[0, i] < 0.0:
retMat[i, i] = 0
else:
retMat[i, i] = 1
return retMat
| 3.265625 | 3 |
implementation-contributed/v8/wasm-js/testcfg.py | katemihalikova/test262 | 1,849 | 12792628 | <reponame>katemihalikova/test262<gh_stars>1000+
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
from testrunner.local import testsuite
from testrunner.objects import testcase
ANY_JS = ".any.js"
WPT_ROOT = "/wasm/jsapi/"
META_SCRIPT_REGEXP = re.compile(r"META:\s*script=(.*)")
class TestSuite(testsuite.TestSuite):
def __init__(self, *args, **kwargs):
super(TestSuite, self).__init__(*args, **kwargs)
self.testroot = os.path.join(self.root, "data", "test", "js-api")
self.mjsunit_js = os.path.join(os.path.dirname(self.root), "mjsunit",
"mjsunit.js")
def ListTests(self):
tests = []
for dirname, dirs, files in os.walk(self.testroot):
for dotted in [x for x in dirs if x.startswith(".")]:
dirs.remove(dotted)
dirs.sort()
files.sort()
for filename in files:
if (filename.endswith(ANY_JS)):
fullpath = os.path.join(dirname, filename)
relpath = fullpath[len(self.testroot) + 1 : -len(ANY_JS)]
testname = relpath.replace(os.path.sep, "/")
test = self._create_test(testname)
tests.append(test)
return tests
def _test_class(self):
return TestCase
class TestCase(testcase.D8TestCase):
def _get_files_params(self):
files = [os.path.join(self.suite.mjsunit_js),
os.path.join(self.suite.root, "testharness.js")]
source = self.get_source()
for script in META_SCRIPT_REGEXP.findall(source):
if script.startswith(WPT_ROOT):
# Matched an absolute path, strip the root and replace it with our
# local root.
script = os.path.join(self.suite.testroot, script[len(WPT_ROOT):])
elif not script.startswith("/"):
# Matched a relative path, prepend this test's directory.
thisdir = os.path.dirname(self._get_source_path())
script = os.path.join(thisdir, script)
else:
raise Exception("Unexpected absolute path for script: \"%s\"" % script);
files.append(script)
files.extend([
self._get_source_path(),
os.path.join(self.suite.root, "testharness-after.js")
])
return files
def _get_source_path(self):
# All tests are named `path/name.any.js`
return os.path.join(self.suite.testroot, self.path + ANY_JS)
def GetSuite(*args, **kwargs):
return TestSuite(*args, **kwargs)
| 2.015625 | 2 |
test/test_multi_args.py | Planet-AI-GmbH/paiargparse | 3 | 12792629 | <filename>test/test_multi_args.py
import unittest
from paiargparse import PAIArgumentParser, RequiredArgumentError
from test.dataclasse_setup import Level1b, Level2, Level1, Level2a
class TestPAIParser(unittest.TestCase):
def test_three_dc(self):
parser = PAIArgumentParser()
parser.add_root_argument("arg0", Level1b)
parser.add_root_argument("arg1", Level2)
parser.add_root_argument("arg2", Level2a)
parser.add_root_argument("arg3", Level1)
args = parser.parse_args(args=["--arg0.p1", "0", "--arg2.p1a", "0.5"])
self.assertIsInstance(args.arg0, Level1b)
self.assertIsInstance(args.arg1, Level2)
self.assertIsInstance(args.arg2, Level2a)
self.assertIsInstance(args.arg3, Level1)
def test_three_dc_required(self):
parser = PAIArgumentParser()
parser.add_root_argument("arg0", Level1b)
parser.add_root_argument("arg1", Level2)
parser.add_root_argument("arg2", Level2a)
parser.add_root_argument("arg3", Level1)
with self.assertRaises(RequiredArgumentError):
parser.parse_args(args=["--arg3.p1", "0", "--arg2.p1a", "0.5"])
if __name__ == "__main__":
unittest.main()
| 3.09375 | 3 |
fdm-devito-notebooks/C_softeng2/src-softeng2/wave1D_oo.py | devitocodes/devito_book | 7 | 12792630 | # -*- coding: utf-8 -*-
"""
Class implementation for solving of the wave equation
u_tt = (c**2*u_x)_x + f(x,t) with t in [0,T] and x in (0,L).
We have u=U_0 or du/dn=0 on x=0, and u=u_L or du/dn=0 on x = L.
For simplicity, we use a constant c here and compare with a
known exact solution.
"""
import time, glob, shutil, os
import numpy as np
class Parameters(object):
def __init__(self):
"""
Subclasses must initialize self.prm with
parameters and default values, self.type with
the corresponding types, and self.help with
the corresponding descriptions of parameters.
self.type and self.help are optional, but
self.prms must be complete and contain all parameters.
"""
pass
def ok(self):
"""Check if attr. prm, type, and help are defined."""
if hasattr(self, 'prm') and \
isinstance(self.prm, dict) and \
hasattr(self, 'type') and \
isinstance(self.type, dict) and \
hasattr(self, 'help') and \
isinstance(self.help, dict):
return True
else:
raise ValueError(
'The constructor in class %s does not '\
'initialize the\ndictionaries '\
'self.prm, self.type, self.help!' %
self.__class__.__name__)
def _illegal_parameter(self, name):
"""Raise exception about illegal parameter name."""
raise ValueError(
'parameter "%s" is not registered.\nLegal '\
'parameters are\n%s' %
(name, ' '.join(list(self.prm.keys()))))
def set(self, **parameters):
"""Set one or more parameters."""
for name in parameters:
if name in self.prm:
self.prm[name] = parameters[name]
else:
self._illegal_parameter(name)
def get(self, name):
"""Get one or more parameter values."""
if isinstance(name, (list,tuple)): # get many?
for n in name:
if n not in self.prm:
self._illegal_parameter(name)
return [self.prm[n] for n in name]
else:
if name not in self.prm:
self._illegal_parameter(name)
return self.prm[name]
def __getitem__(self, name):
"""Allow obj[name] indexing to look up a parameter."""
return self.get(name)
def __setitem__(self, name, value):
"""
Allow obj[name] = value syntax to assign a parameter's value.
"""
return self.set(name=value)
def define_command_line_options(self, parser=None):
self.ok()
if parser is None:
import argparse
parser = argparse.ArgumentParser()
for name in self.prm:
tp = self.type[name] if name in self.type else str
help = self.help[name] if name in self.help else None
parser.add_argument(
'--' + name, default=self.get(name), metavar=name,
type=tp, help=help)
return parser
def init_from_command_line(self, args):
for name in self.prm:
self.prm[name] = getattr(args, name)
class Problem(Parameters):
"""
Physical parameters for the wave equation
u_tt = (c**2*u_x)_x + f(x,t) with t in [0,T] and
x in (0,L). The problem definition is implied by
the method of manufactured solution, choosing
u(x,t)=x(L-x)(1+t/2) as our solution. This solution
should be exactly reproduced when c is const.
"""
def __init__(self):
self.prm = dict(L=2.5, c=1.5, T=18)
self.type = dict(L=float, c=float, T=float)
self.help = dict(L='1D domain',
c='coefficient (wave velocity) in PDE',
T='end time of simulation')
def u_exact(self, x, t):
L = self['L']
return x*(L-x)*(1+0.5*t)
def I(self, x):
return self.u_exact(x, 0)
def V(self, x):
return 0.5*self.u_exact(x, 0)
def f(self, x, t):
c = self['c']
return 2*(1+0.5*t)*c**2
def U_0(self, t):
return self.u_exact(0, t)
U_L = None
class Solver(Parameters):
"""
Numerical parameters for solving the wave equation
u_tt = (c**2*u_x)_x + f(x,t) with t in [0,T] and
x in (0,L). The problem definition is implied by
the method of manufactured solution, choosing
u(x,t)=x(L-x)(1+t/2) as our solution. This solution
should be exactly reproduced, provided c is const.
We simulate in [0, L/2] and apply a symmetry condition
at the end x=L/2.
"""
def __init__(self, problem):
self.problem = problem
self.prm = dict(C = 0.75, Nx=3, stability_safety_factor=1.0)
self.type = dict(C=float, Nx=int, stability_safety_factor=float)
self.help = dict(C='Courant number',
Nx='No of spatial mesh points',
stability_safety_factor='stability factor')
from UniformFDMesh import Mesh, Function
# introduce some local help variables to ease reading
L_end = self.problem['L']
dx = (L_end/2)/float(self['Nx'])
t_interval = self.problem['T']
dt = dx*self['stability_safety_factor']*self['C']/ \
float(self.problem['c'])
self.m = Mesh(L=[0,L_end/2],
d=[dx],
Nt = int(round(t_interval/float(dt))),
T=t_interval)
# The mesh function f will, after solving, contain
# the solution for the whole domain and all time steps.
self.f = Function(self.m, num_comp=1, space_only=False)
def solve(self, user_action=None, version='scalar'):
# ...use local variables to ease reading
L, c, T = self.problem['L c T'.split()]
L = L/2 # compute with half the domain only (symmetry)
C, Nx, stability_safety_factor = self[
'C Nx stability_safety_factor'.split()]
dx = self.m.d[0]
I = self.problem.I
V = self.problem.V
f = self.problem.f
U_0 = self.problem.U_0
U_L = self.problem.U_L
Nt = self.m.Nt
t = np.linspace(0, T, Nt+1) # Mesh points in time
x = np.linspace(0, L, Nx+1) # Mesh points in space
# Make sure dx and dt are compatible with x and t
dx = x[1] - x[0]
dt = t[1] - t[0]
# Treat c(x) as array
if isinstance(c, (float,int)):
c = np.zeros(x.shape) + c
elif callable(c):
# Call c(x) and fill array c
c_ = np.zeros(x.shape)
for i in range(Nx+1):
c_[i] = c(x[i])
c = c_
q = c**2
C2 = (dt/dx)**2; dt2 = dt*dt # Help variables in the scheme
# Wrap user-given f, I, V, U_0, U_L if None or 0
if f is None or f == 0:
f = (lambda x, t: 0) if version == 'scalar' else \
lambda x, t: np.zeros(x.shape)
if I is None or I == 0:
I = (lambda x: 0) if version == 'scalar' else \
lambda x: np.zeros(x.shape)
if V is None or V == 0:
V = (lambda x: 0) if version == 'scalar' else \
lambda x: np.zeros(x.shape)
if U_0 is not None:
if isinstance(U_0, (float,int)) and U_0 == 0:
U_0 = lambda t: 0
if U_L is not None:
if isinstance(U_L, (float,int)) and U_L == 0:
U_L = lambda t: 0
# Make hash of all input data
import hashlib, inspect
data = inspect.getsource(I) + '_' + inspect.getsource(V) + \
'_' + inspect.getsource(f) + '_' + str(c) + '_' + \
('None' if U_0 is None else inspect.getsource(U_0)) + \
('None' if U_L is None else inspect.getsource(U_L)) + \
'_' + str(L) + str(dt) + '_' + str(C) + '_' + str(T) + \
'_' + str(stability_safety_factor)
hashed_input = hashlib.sha1(data).hexdigest()
if os.path.isfile('.' + hashed_input + '_archive.npz'):
# Simulation is already run
return -1, hashed_input
# use local variables to make code closer to mathematical
# notation in computational scheme
u_1 = self.f.u[0,:]
u = self.f.u[1,:]
import time; t0 = time.clock() # CPU time measurement
Ix = range(0, Nx+1)
It = range(0, Nt+1)
# Load initial condition into u_1
for i in range(0,Nx+1):
u_1[i] = I(x[i])
if user_action is not None:
user_action(u_1, x, t, 0)
# Special formula for the first step
for i in Ix[1:-1]:
u[i] = u_1[i] + dt*V(x[i]) + \
0.5*C2*(0.5*(q[i] + q[i+1])*(u_1[i+1] - u_1[i]) - \
0.5*(q[i] + q[i-1])*(u_1[i] - u_1[i-1])) + \
0.5*dt2*f(x[i], t[0])
i = Ix[0]
if U_0 is None:
# Set boundary values (x=0: i-1 -> i+1 since u[i-1]=u[i+1]
# when du/dn = 0, on x=L: i+1 -> i-1 since u[i+1]=u[i-1])
ip1 = i+1
im1 = ip1 # i-1 -> i+1
u[i] = u_1[i] + dt*V(x[i]) + \
0.5*C2*(0.5*(q[i] + q[ip1])*(u_1[ip1] - u_1[i]) - \
0.5*(q[i] + q[im1])*(u_1[i] - u_1[im1])) + \
0.5*dt2*f(x[i], t[0])
else:
u[i] = U_0(dt)
i = Ix[-1]
if U_L is None:
im1 = i-1
ip1 = im1 # i+1 -> i-1
u[i] = u_1[i] + dt*V(x[i]) + \
0.5*C2*(0.5*(q[i] + q[ip1])*(u_1[ip1] - u_1[i]) - \
0.5*(q[i] + q[im1])*(u_1[i] - u_1[im1])) + \
0.5*dt2*f(x[i], t[0])
else:
u[i] = U_L(dt)
if user_action is not None:
user_action(u, x, t, 1)
for n in It[1:-1]:
# u corresponds to u^{n+1} in the mathematical scheme
u_2 = self.f.u[n-1,:]
u_1 = self.f.u[n,:]
u = self.f.u[n+1,:]
# Update all inner points
if version == 'scalar':
for i in Ix[1:-1]:
u[i] = - u_2[i] + 2*u_1[i] + \
C2*(0.5*(q[i] + q[i+1])*(u_1[i+1] - u_1[i]) - \
0.5*(q[i] + q[i-1])*(u_1[i] - u_1[i-1])) + \
dt2*f(x[i], t[n])
elif version == 'vectorized':
u[1:-1] = - u_2[1:-1] + 2*u_1[1:-1] + \
C2*(0.5*(q[1:-1] + q[2:])*(u_1[2:] - u_1[1:-1]) -
0.5*(q[1:-1] + q[:-2])*(u_1[1:-1] - u_1[:-2])) + \
dt2*f(x[1:-1], t[n])
else:
raise ValueError('version=%s' % version)
# Insert boundary conditions
i = Ix[0]
if U_0 is None:
# Set boundary values
# x=0: i-1 -> i+1 since u[i-1]=u[i+1] when du/dn=0
# x=L: i+1 -> i-1 since u[i+1]=u[i-1] when du/dn=0
ip1 = i+1
im1 = ip1
u[i] = - u_2[i] + 2*u_1[i] + \
C2*(0.5*(q[i] + q[ip1])*(u_1[ip1] - u_1[i]) - \
0.5*(q[i] + q[im1])*(u_1[i] - u_1[im1])) + \
dt2*f(x[i], t[n])
else:
u[i] = U_0(t[n+1])
i = Ix[-1]
if U_L is None:
im1 = i-1
ip1 = im1
u[i] = - u_2[i] + 2*u_1[i] + \
C2*(0.5*(q[i] + q[ip1])*(u_1[ip1] - u_1[i]) - \
0.5*(q[i] + q[im1])*(u_1[i] - u_1[im1])) + \
dt2*f(x[i], t[n])
else:
u[i] = U_L(t[n+1])
if user_action is not None:
if user_action(u, x, t, n+1):
break
cpu_time = time.clock() - t0
return cpu_time, hashed_input
def assert_no_error(self):
"""Run through mesh and check error"""
Nx = self['Nx']
Nt = self.m.Nt
L, T = self.problem['L T'.split()]
L = L/2 # only half the domain used (symmetry)
x = np.linspace(0, L, Nx+1) # Mesh points in space
t = np.linspace(0, T, Nt+1) # Mesh points in time
for n in range(len(t)):
u_e = self.problem.u_exact(x, t[n])
diff = np.abs(self.f.u[n,:] - u_e).max()
print 'diff:', diff
tol = 1E-13
assert diff < tol
def test_quadratic_with_classes():
"""
Check the scalar and vectorized versions for a quadratic
u(x,t)=x(L-x)(1+t/2) that is exactly reproduced,
provided c(x) is constant. We simulate in [0, L/2] and
apply a symmetry condition at the end x=L/2.
"""
problem = Problem()
solver = Solver(problem)
# Read input from the command line
parser = problem.define_command_line_options()
parser = solver. define_command_line_options(parser)
args = parser.parse_args()
problem.init_from_command_line(args)
solver. init_from_command_line(args)
print parser.parse_args() # parameters ok?
solver.solve()
print 'Check error.........................'
solver.assert_no_error()
if __name__ == '__main__':
test_quadratic_with_classes()
| 3.453125 | 3 |
python/readFetiData.py | ostravaTokyo/hfls | 0 | 12792631 | import numpy as np
from scipy import sparse
import scipy.sparse.linalg as spla
import pylab as plt
from scipy.linalg import block_diag
#
#
nSub = 2
def load_matrix_basic(pathToFile,makeSparse,makeSymmetric, offset):
f0 = open(pathToFile).readlines()
firstLine = f0.pop(0) #removes the first line
tmp = np.zeros((len(f0),3), dtype = float)
for i in range(len(f0)):
line = f0[i]
k = line.split()
tmp[i,0] = float(k[0])
tmp[i,1] = float(k[1])
tmp[i,2] = float(k[2])
if (tmp.shape[0]==1):
tmp = []
else:
n = np.int32(tmp[0,0])
m = np.int32(tmp[0,1])
I = tmp[1::,0]-offset;
J = tmp[1::,1]-offset;
V = tmp[1::,2]
#
# print str0,i,j
if (makeSymmetric):
logInd = J != I;
I = np.concatenate((I,J[logInd]))
J = np.concatenate((J,I[logInd]))
V = np.concatenate((V,V[logInd]))
if (makeSparse):
tmp = sparse.csc_matrix((V,(I,J)),shape=(n,m)).tocoo()
else:
if (m==1):
tmp = V
else:
tmp = sparse.csc_matrix((V,(I,J)),shape=(n,m)).toarray()
return tmp
def load_matrix(path,str0,i,j,makeSparse,makeSymmetric,offset):
pathToFile = path+'/'+str(i)+'/'+str0+str(j)+'.txt' #
tmp = load_matrix_basic(pathToFile,makeSparse,makeSymmetric,offset)
return tmp
path0 = "../data"
if 1:
K = []
K_reg = []
Fc = []
R = []
Rf = []
Bc = []
Bf = []
BcT_dense = []
Gc = []
# Gf = []
Gf_p = []
Gc = []
Fc_p = []
rhs = []
xx = []
Kplus_f_test = []
KplusBcT_p = []
Bc_nonzRow = []
KplusBcT = []
BcKplus_tmp = []
# BcK_dense = []
K_UT = []
# x_out = []
# x_out_p = []
# Lumped = []
# Lumped = []
for i in range(nSub):
K.append(load_matrix(path0,"dump_K_","",str(i),False,True,1))
K_UT.append(load_matrix(path0,"dump_K_","",str(i),False,False,1))
K_reg.append(load_matrix(path0,"dump_K_reg_","",str(i),False,True,1))
Fc.append(load_matrix(path0,"dump_Fc_","",str(i),False,False,1))
R.append(load_matrix(path0,"dump_R_","",str(i),False,False,1))
Rf.append(load_matrix(path0,"dump_Rf_","",str(i),False,False,1))
Bc.append(load_matrix(path0,"dump_Bc_","",str(i),False,False,1))
Bf.append(load_matrix(path0,"dump_Bf_","",str(i),False,False,1))
Gf_p.append(np.dot(Bf[i],Rf[i]))
# Lumped.append(load_matrix(path0,"dump_Lumped_","",str(i),False,False,1))
BcT_dense.append(load_matrix(path0,"dump_BcT_dense_","",str(i),False,False,1))
Gc.append(load_matrix(path0,"dump_Gc_","",str(i),False,False,1))
# Gf.append(load_matrix(path0,"dump_Gf_","",str(i),False,False,1))
indBc = np.abs(Bc[i]).sum(axis=1)>0
Bc_nonzRow.append( Bc[i][indBc,:])
# Fc.append( np.dot(Bc_nonzRow[i], np.linalg.solve(K_reg[i],Bc_nonzRow[i].T)))
# Lumped.append( np.dot(Bc_nonzRow[i], np.dot(K[i],Bc_nonzRow[i].T)))
rhs.append(load_matrix(path0,"dump_rhs_","",str(i),False,False,1))
# xx.append(load_matrix(path0,"dump_xxTest_","",str(i),False,False,1))
# Kplus_f_test.append(load_matrix(path0,"dump_Kplus_f_test_","",str(i),False,False,1))
# KplusBcT_p = BcKplus_List[i]
# BcK_dense.append(load_matrix(path0,"dump_BcK_dense_","",str(i),False,False,1))
# BcK_dense.append(np.dot(K[i],Bc_nonzRow[i].T).T)
Gc.append(np.dot(Bc[i], R[i]))
KplusBcT.append(load_matrix(path0,"dump_KplusBcT_","",str(i),False,False,1))
KplusBcT_p.append(np.linalg.solve(K_reg[i],Bc_nonzRow[i].T))
# BcKplus_tmp.append(np.linalg.solve(K_reg[i],Bc[i].T).T)
# x_out.append(load_matrix(path0,"dump_x_out_","",str(i),False,False,1))
Fc_p.append(np.dot(Bc_nonzRow[i],KplusBcT_p[i]))
# iK_K = np.linalg.solve(K_reg[i],K[i])
# K_iK_K = np.dot(K[i],iK_K)
# del_ = np.linalg.norm(K_iK_K - K[i] ) / np.linalg.norm(K[i])
# print(del_)
#
tmp_g = np.dot(Bc[i],np.linalg.solve(K_reg[i], rhs[i]))
tmp_e = -np.dot(R[i].T,rhs[i])
if (i == 0):
g_p = tmp_g
e_p = tmp_e;
else:
g_p += tmp_g;
e_p = np.concatenate((e_p,tmp_e))
print(' ...%d '%(i))
# gc_p = np.concatenate((g_p,e_p))
# gc_p = np.concatenate((gc_p,np.zeros(6)))
Gc_clust = load_matrix(path0,"dump_Gc_clust_","",str(0),False,False,1)
Ac_clust = load_matrix(path0,"dump_Ac_clust_","",str(0),False,True,1)
Fc_clust = load_matrix(path0,"dump_Fc_clust_","",str(0),False,True,1)
ker_GcTGc = load_matrix(path0,"dump_kerGc_","",str(0),False,False,1)
# gc = load_matrix(path0,"dump_gc_","",str(0),False,False,1)
# lam_alpha = load_matrix(path0,"dump_lam_alpha_","",str(0),False,False,1)
# lam_alpha_p = np.linalg.solve(Ac_clust, gc)
# nLam = Bc[0].shape[0]
# lam_p = lam_alpha_p[0:nLam]
## alpha_p = lam_alpha[nLam:]
# for i in range(nSub):
# print (" ! %d " % (i))
# x10 = np.linalg.solve(K_reg[i],rhs[i])
# x11 = np.linalg.solve(K_reg[i],np.dot(Bc[i].T,lam_p))
#
# print alpha_p[(6*i):(6*(i+1))]
# x2 = np.dot(R[i],alpha_p[(6*i):(6*(i+1))])
#
# x_out_p.append(x10 - x11 + x2)
# print( "||x_out - x_out_p || = %e " % np.linalg.norm(x_out[i] - x_out_p[i]))
Ac_clust_python = np.hstack((Fc_clust,Gc_clust))
Z = np.zeros((Gc_clust.shape[1],Ac_clust_python.shape[1]))
print ( Z.shape)
Ac_clust_python = np.vstack((Ac_clust_python,Z))
Gf_clust = load_matrix(path0,"dump_Gf_clust_","",str(0),False,False,1)
# test = load_matrix(path0,"dump_testXYZ_","",str(0),False,False,1)
# KpOnes= load_matrix(path0,"dump_KplusONES_","",str(0),False,False,1)
#K_regD = K_reg[0]
#frhs = rhs[0]
#xxD = xx[0]
#RD = R[0]
#for i in range(1,nSub):
# K_regD = block_diag(K_regD,K_reg[i]);
# RD = block_diag(RD,R[i]);
# frhs = np.concatenate((frhs,rhs[i]))
# xxD = np.concatenate((xxD,xx[i]))
#
for i in range(nSub - 1):
if (i == 0):
Bc_g = np.hstack((Bc[0],Bc[1]))
else:
Bc_g = np.hstack((Bc_g,Bc[i+1]))
for i in range(nSub - 1):
if (i == 0):
Bf_g = np.hstack((Bf[0],Bf[1]))
else:
Bf_g = np.hstack((Bf_g,Bf[i+1]))
for i in range(nSub - 1):
if (i == 0):
Gf_g = Gf_p[0]+ Gf_p[1]
else:
Gf_g += Gf_p[i+1]
weigth = np.loadtxt(path0+'/dump_weigth.txt')
#Fc__ = np.dot(Bc_g,np.linalg.solve(K_regD,Bc_g.T))
#
#
#gc__ = np.dot(Bc_g,np.linalg.solve(K_regD,frhs))
#ec__ = - np.dot(RD.T,frhs)
#
#gc__ = np.concatenate((gc__,ec__))
#H = ker_GcTGc
#AA0 = np.hstack((Fc__,Gc_clust))
#AB1 =
#
#
#ZZ1 = np.zeros((Gc_clust.shape[0], H.shape[1]))
#AA1 = np.vstack((ZZ1,H))
#AA01 = np.hstack((AA0,AA1))
#A0 = np.hstack((K_regD,Bc_g.T))
#
#nB = Bc_g.shape[0]
#Bc_Z = np.hstack((Bc_g,np.zeros((nB,nB))))
#
#crhs = np.zeros(nB);
#
#A = np.vstack((A0,Bc_Z))
#
#b = np.concatenate((frhs,crhs))
#
#x = np.linalg.solve(A,b)
#
#xxD = np.concatenate((xxD,crhs))
#Bc_g = np.hstack((Bc_g,Bc[2]))
#Bc_g = np.hstack((Bc_g,Bc[2]))
#BcT_dense = load_matrix(path0,"dump_BcT_dense_","",str(0),True,True,1)
#Fc_clust = load_matrix(path0,"dump_Fc_clust_","",str(0),True,True,1)
#Ac_clust = load_matrix(path0,"dump_Ac_clust_","",str(0),True,True,1)
#GcTGc = load_matrix(path0,"dump_GcTGc_clust_","",str(0),False,True,1)
#GfTGf = load_matrix(path0,"dump_GfTGf_","",str(0),False,False,1)
#iGfTGf = load_matrix(path0,"dump_iGfTGf_","",str(0),False,False,1)
#ker_Ac = load_matrix(path0,"dump_ker_Ac_","",str(0),False,False,1)
##KpBcT0 = load_matrix(path0,"dump_KplusBcT_","",str(0),False,False,1)
##KpBcT1 = load_matrix(path0,"dump_KplusBcT_","",str(1),False,False,1)
#
#
#dFc_eig = load_matrix(path0,"dump_Fc_clust_","",str(444),False,False,1)
##dFc_svd = load_matrix(path0,"dump_Fc_clust_","",str(555),False,False,1)
#dAc_eig = load_matrix(path0,"dump_Ac_clust_","",str(444),False,False,1)
##dAc_svd = load_matrix(path0,"dump_Ac_clust_","",str(555),False,False,1)
#
#
#GfTGf_ = np.zeros((GfTGf.shape[0],GfTGf.shape[0]))
#
#
#
#
#
#
#for d in range(nSub):
# GfTGf_ += np.dot(Gf[d].T,Gf[d])
#
#
#
#
#if False:
# plt.subplot(1,3,1)
# if GcTGc.shape[0] < 100:
# markersize_ = 3
# else:
# markersize_ = 0.7
# plt.spy(GcTGc, markersize=markersize_)
# plt.xlabel("nnz = %d" % (GcTGc.nonzero()[0].shape[0]))
# plt.subplot(1,3,2)
# if Fc_clust.shape[0] < 100:
# markersize = 3
# else:
# markersize = 0.7
# plt.spy(Fc_clust, markersize=markersize_)
# plt.xlabel("nnz = %d" % (Fc_clust.nonzero()[0].shape[0]))
# plt.subplot(1,3,3)
# if Ac_clust.shape[0] < 100:
# markersize_ = 3
# else:
# markersize_ = 0.7
# plt.spy(Ac_clust, markersize=markersize_)
# plt.xlabel("nnz = %d" % (Ac_clust.nonzero()[0].shape[0]))
# plt.show()
#
##Bc_from_Rt = []
##for i in range(1,14):
## Bc_from_Rt.append( load_matrix(path0,"dump_Bc_from_Rt_","",str(i),False,False,1) )
##
#
## Gc_ = load_matrix(path0,"dump_Gc_i_","",str(0),False,False,1)
#
#
#
#
##BcT_dense = load_matrix(path0,"dump_BcT_dense_","",str(0),True,True,1)
#
#
#K_test= []
#Kplus_K_test = []
#K_Kplus_K_test = []
#K_reg_test = []
#K_reg_SF = []
#x_test = []
#
#
#for i in range(4):
#
# K_test.append(load_matrix(path0,"dump_K_dense_","",str(i),False,True,1))
# K_reg_test.append(load_matrix(path0,"dump_K_reg_","",str(i),False,True,1))
# K_reg_SF.append(load_matrix(path0,"dump_K_reg_SF_","",str(i),False,True,1))
# Kplus_K_test.append(load_matrix(path0,"dump_Kplus_K_","",str(i),False,False,1))
# K_Kplus_K_test.append(load_matrix(path0,"dump_K_Kplus_K_","",str(i),False,False,1))
#
# #KKpK = np.dot(K_test[i], np.linalg.solve(K_reg_test[i],K_test[i]))
# KKpK = np.dot(K[i], np.linalg.solve(K_reg[i],K[i]))
# print "norm = %3.8e \n" % np.linalg.norm(KKpK - K[i])
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
##plt.spy(Fc_clust,markersize = .8);plt.show()
#
##Gc_ = load_matrix(path0,"dump_Gc_i_","",str(0),True,True,1)
#
#
#
##r = sparse.csgraph.reverse_cuthill_mckee(Ac_clust.tocsr(), symmetric_mode=True)
##Ac_clust = Ac_clust.toarray()
###
##P,L,U= scipy.linalg.lu(Ac_clust)
##nnz0 = L.nonzero()[0].shape[0] + U.nonzero()[0].shape[0]
##
##
###
###
##AcR = Ac_clust[np.ix_(r,r)]
##PR,LR,UR = scipy.linalg.lu(AcR)
##nnzR = LR.nonzero()[0].shape[0] + UR.nonzero()[0].shape[0]
###
###
##plt.subplot(2,2,1)
##plt.spy(L,markersize=0.1);
##plt.subplot(2,2,2)
##plt.spy(U,markersize=0.1);
##plt.subplot(2,2,3)
##plt.spy(LR,markersize=0.1);
##plt.subplot(2,2,4)
##plt.spy(UR,markersize=0.1);
#
##print ("nnz = %d, nnz(reordered) = %d ") % (nnz0, nnzR)
#
#
##plt.show()
#
##ker_Ac = load_matrix(path0,"dump_ker_Ac_","",str(0),False,True,1)
##ker_GcTGc = load_matrix(path0,"dump_ker_GcTGc_","",str(0),False,True,1)
##R0 = load_matrix(path0,"dump_R_","",str(0),False,True,1)
#
##Gc_H = np.dot(GcTGc.toarray(),ker_GcTGc)
#
##r = sparse.csgraph.reverse_cuthill_mckee(Ac_clust.tocsr(), symmetric_mode=True)
##Ac = Ac_clust.toarray()[np.ix_(r,r)]
##plt.subplot(1,2,1)
##plt.spy(Ac_clust ,markersize = 2.0)
##plt.subplot(1,2,2)
##plt.spy(Ac,markersize = 0.125)
#
#
#
##Fc_python_List = []
#
##if 0:
## Fc_clust_python = np.zeros((Bct_list[i].shape[0], Bct_list[i].shape[0]))
## for i in range(nSub):
## Bc = Bct_list[i].toarray()
## indBc = np.abs(Bc).sum(axis=1)>0
## Bc_red = Bc[indBc,:]
## BcKplus = BcKplus_List[i]
##
## Bf = Bf_List[i].toarray()
## indBf = np.abs(Bf).sum(axis=1)>0
## Bf_red = Bf[indBf,:]
##
## Rc = RList[i].toarray()
##
##
##
## if (i == 0):
## Gf_clust_python = np.dot(Bf,Rc)
## Gc_clust_python = np.dot(Bc,Rc)
## else:
## Gf_clust_python = np.hstack((Gf_clust_python,np.dot(Bf,Rc)))
## Gc_clust_python = np.hstack((Gc_clust_python,np.dot(Bc,Rc)))
## indBcKplus = np.abs(BcKplus).sum(axis=1)>0
## BcKplus = BcKplus[indBcKplus,:]
## BcKplus_python = np.linalg.solve(K_reg_List[i],Bc_red.T)
## BcKplus_ = np.linalg.solve(K_reg_List[i],Bc.T)
## Fc_i = np.dot(Bc_red,BcKplus_python)
## Fc_clust_python += np.dot(Bc,BcKplus_)
## Fc_python_List.append(Fc_i)
##
## for ii in range(nSub):
##
## ttt = Gc_List[ii][np.abs(Gc_List[ii]).sum(axis=1)>0,:] - GcList[ii]
## print np.linalg.norm(ttt)
##
##
## for ii in range(nSub):
## ddd0 = np.linalg.norm(Fc_python_List[ii] - Fc_List[ii])
## ddd1 = np.linalg.norm(Fc_python_List[ii])
## print "|Fc_python - Fc_myAp|/|Fc_python|",ddd0 / ddd1
##
##
## Fc_clust = load_matrix(path0,"dump_Fc_clust_","",0,False,True,1)
## Gc_clust = load_matrix(path0,"dump_Gc_clust_","",0,False,False,1)
## Gf_clust = load_matrix(path0,"dump_Gf_clust_","",0,False,False,1)
## Ac_clust = load_matrix(path0,"dump_Ac_clust_","",0,False,True,1)
## Ac_clust_python = np.hstack((Fc_clust_python,Gc_clust_python))
##
## Z = np.zeros((Gc_clust_python.shape[1],Ac_clust.shape[1]))
## print ( Z.shape)
## Ac_clust_python = np.vstack((Ac_clust_python,Z))
##
##
## ddd0 = np.linalg.norm(Fc_clust - Fc_clust_python)
## ddd1 = np.linalg.norm(Fc_clust)
## print "|Fc_clust_python - Fc_clust_myAp|/|Fc_clust_python|",ddd0 / ddd1
##
## ddd0 = np.linalg.norm(Gc_clust - Gc_clust_python)
## ddd1 = np.linalg.norm(Gc_clust)
## print "|Gc_clust_python - Gc_clust_myAp|/|Gc_clust_python|",ddd0 / ddd1
##
## ddd0 = np.linalg.norm(Gf_clust - Gf_clust_python)
## ddd1 = np.linalg.norm(Gf_clust)
## print "|Gf_clust_python - Gf_clust_myAp|/|Gf_clust_python|",ddd0 / ddd1
##
## ddd0 = np.linalg.norm(Ac_clust - Ac_clust_python)
## ddd1 = np.linalg.norm(Ac_clust)
## print "|Ac_clust_python - Ac_clust_myAp|/|Ac_clust_python|",ddd0 / ddd1
##
##K = []
#
#
#
##plt.subplot(1,2,1)
##plt.spy(Gf_clust_python,markersize=1)
##plt.subplot(1,2,2)
##plt.spy(Gf_clust,markersize=1)
##plt.show()
| 2.625 | 3 |
grackle/cooling_cell_plot.py | diamondjems016/galaxy_analysis | 1 | 12792632 | from galaxy_analysis.plot.plot_styles import *
import matplotlib.pyplot as plt
import os, sys
import numpy as np
from mpl_toolkits.axes_grid1 import make_axes_locatable
def bins_from_centers(x):
xnew = np.zeros(len(x) + 1)
dx = np.zeros(len(x) + 1)
dx[1:-1] = x[1:] - x[:-1]
dx[0] = dx[1]
dx[-1] = dx[-2]
xnew[:-1] = x - 0.5*dx[:-1]
xnew[-1] = x[-1] + 0.5*dx[-1]
return xnew
def plot_2d_histogram(datafile = 'all_runs_d_12.20.dat'):
ylabel = r'log(H$^{-}$ Photodetachment Scale Factor)'
xlabel = "log(LW Scale Factor)"
data = np.genfromtxt(datafile) # names = True)
k27 = data[:,0]
LW = data[:,1]
k27_centers = np.linspace(np.log10(np.min(k27)), np.log10(np.max(k27)),
int(np.sqrt(np.size(k27) )))
k27_vals = bins_from_centers(k27_centers)
LW_centers = np.linspace(np.log10(np.min(LW)), np.log10(np.max(LW)),
int(np.sqrt(np.size(LW))))
LW_vals = bins_from_centers(LW_centers)
k27_mesh, LW_mesh = np.meshgrid(LW_vals, k27_vals)
k27_center_mesh, LW_center_mesh = np.meshgrid(LW_centers, k27_centers)
#f_H2[data['k27'] == 1.58489319] = 100.0 # flag to figure out orientation
f_H2 = data[:,2]
z_mesh = f_H2.reshape( int(np.sqrt(np.size(k27))), int(np.sqrt(np.size(LW))))
#z_mesh = z[:-1,:-1]
fig, ax = plt.subplots()
fig.set_size_inches(8,8)
img1 = ax.pcolormesh(10.0**(LW_mesh),
10.0**(k27_mesh),
np.log10(z_mesh.T), cmap = 'magma',
vmin = -9,
vmax = -2.8)
ax.semilogx()
ax.semilogy()
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
divider = make_axes_locatable(ax)
cax1 = divider.append_axes('right', size = '5%', pad = 0.05)
fig.colorbar(img1, cax=cax1, label = r'log(f$_{\rm H_2}$)')
ax.contour( 10.**(LW_center_mesh), 10.0**(k27_center_mesh), np.log10(z_mesh.T),
levels = [-8,-7,-6,-5,-4,-3], colors = 'black',
linewidths = 3, linestyles = '-.')
ax.scatter( [1,1,100,100], [1,100,1,100], s = 250, marker = "*", color = "white")
plt.minorticks_on()
plt.tight_layout(h_pad = 0, w_pad = 0.05)
fig.savefig("fH2.png")
plt.close()
f_H2 = data[:,3]
z_mesh= f_H2.reshape( int(np.sqrt(np.size(k27))), int(np.sqrt(np.size(LW))))
#z_mesh = z[:-1,:-1]
fig, ax = plt.subplots()
fig.set_size_inches(8,8)
img1 = ax.pcolormesh(10.0**(LW_mesh),
10.0**(k27_mesh),
np.log10(z_mesh.T), cmap = 'RdYlBu_r',
vmin = np.min(np.log10(z_mesh)),
vmax = np.max(np.log10(z_mesh)))
ax.semilogx()
ax.semilogy()
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
divider = make_axes_locatable(ax)
cax1 = divider.append_axes('right', size = '5%', pad = 0.05)
fig.colorbar(img1, cax=cax1, label = r'log(Temperature [K])')
plt.minorticks_on()
plt.tight_layout(h_pad = 0, w_pad = 0.05)
fig.savefig("T.png")
plt.close()
return
if __name__ == "__main__":
plot_2d_histogram( datafile = str(sys.argv[1]))
| 2.34375 | 2 |
pybamm/models/submodels/thermal/x_lumped/__init__.py | jedgedrudd/PyBaMM | 1 | 12792633 | <gh_stars>1-10
from .base_x_lumped import BaseModel
from .x_lumped_no_current_collectors import NoCurrentCollector
from .x_lumped_0D_current_collectors import CurrentCollector0D
from .x_lumped_1D_current_collectors import CurrentCollector1D
from .x_lumped_2D_current_collectors import CurrentCollector2D
| 1.023438 | 1 |
dickserv/modules/weather.py | acidvegas/dickserv | 14 | 12792634 | #!/usr/bin/env python
# DickServ IRC Bot - Developed by acidvegas in Python (https://acid.vegas/dickserv)
# weather.py
import httplib
import config
def lookup(zip_code):
api = httplib.get_json('http://api.wunderground.com/api/{0}/conditions/q/{1}.json'.format(config.api.wunderground_api_key, zip_code))
if 'error' not in api:
city = api['current_observation']['display_location']['city']
state = api['current_observation']['display_location']['state']
country = api['current_observation']['display_location']['country']
weather = api['current_observation']['weather']
temp = api['current_observation']['temp_f']
return 'The weather for {0}, {1}, {2} is {3} at {4} F'.format(city, state, country, weather, temp)
else:
return False
| 2.6875 | 3 |
part-3/1-dictionaries/exercise-3.py | boconlonton/python-deep-dive | 0 | 12792635 | """
- You have text data spread across multiple servers. Each server is able to analyze this data and return a dictionary
that contains words and their frequency.
- Your job is to combine this data to create a single dictionary that contains all the words and their combined
frequencies from all these data sources
- Bonus points if you can make your dictionary sorted by frequency
"""
from itertools import chain
def combined_data(*dictionary):
d = dict()
for dict_temp in dictionary:
for key in dict_temp:
d[key] = d.setdefault(key, 0) + dict_temp[key]
return {
k: v
for k, v in sorted(d.items(), key=lambda x: x[1], reverse=True)
}
def combined_data1(*dictionary):
d = dict()
for dict_temp in dictionary:
for key in dict_temp:
d[key] = d.setdefault(key, 0) + dict_temp[key]
return dict(sorted(d.items(), key=lambda x: x[1], reverse=True))
d1 = {
'python': 10,
'java': 3,
'c#': 8,
'javascript': 15
}
d2 = {
'java': 10,
'c++': 10,
'c#': 4,
'go': 9,
'python': 6
}
d3 = {
'erlang': 5,
'haskell': 2,
'python': 1,
'pascal': 1
}
d = combined_data(d1, d2, d3)
print(d)
d1 = combined_data1(d1, d2, d3)
print(d1)
| 3.953125 | 4 |
codebuild/create-projects.py | jtojnar/aws-crt-cpp | 53 | 12792636 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0.
import argparse
import boto3
# Parse required options
parser = argparse.ArgumentParser(description='Creates all required AWS CodeBuild projects for a repo')
parser.add_argument('project', type=str, help='The name of the repo to create the projects for')
parser.add_argument('--github-account', type=str, dest='github_account', default='awslabs', help='The GitHub account that owns the repo')
parser.add_argument('--profile', type=str, default='default', help='The profile in ~/.aws/credentials to use when creating the jobs')
args = parser.parse_args()
# The template for the arguments to be passed to create_project
CREATE_PARAM_TEMPLATE = {
'name': '{project}-{build}',
'source': {
'type': 'GITHUB',
'location': 'https://github.com/{account}/{project}.git',
'gitCloneDepth': 1,
'buildspec': 'codebuild/{build}.yml',
'auth': {
'type': 'OAUTH',
},
'reportBuildStatus': True,
},
'artifacts': {
'type': 'NO_ARTIFACTS',
},
'environment': None,
'serviceRole': 'arn:aws:iam::123124136734:role/CodeBuildServiceRole',
'badgeEnabled': False,
}
# The common enviroment objects to feed to CodeBuild
ENVIRONMENTS = {
'linux': {
'type': 'LINUX_CONTAINER',
'image': 'aws/codebuild/ubuntu-base:14.04',
'computeType': 'BUILD_GENERAL1_SMALL',
'environmentVariables': [],
'privilegedMode': False,
},
'windows-2017': {
'type': 'WINDOWS_CONTAINER',
'image': '123124136734.dkr.ecr.us-east-1.amazonaws.com/codebulid-windows-vs-2017:latest',
'computeType': 'BUILD_GENERAL1_MEDIUM',
'environmentVariables': [],
'privilegedMode': False,
},
'windows-2015': {
'type': 'WINDOWS_CONTAINER',
'image': '123124136734.dkr.ecr.us-east-1.amazonaws.com/codebulid-windows-vs-2015:latest',
'computeType': 'BUILD_GENERAL1_MEDIUM',
'environmentVariables': [],
'privilegedMode': False,
},
}
# The list of all of our build configs paired with their environments
BUILD_CONFIGS = [
{
'build': 'linux-clang3-x64',
'env': 'linux'
},
{
'build': 'linux-clang6-x64',
'env': 'linux',
'privileged': True
},
{
'build': 'linux-gcc-4x-x64',
'env': 'linux'
},
{
'build': 'linux-gcc-4x-x86',
'env': 'linux'
},
{
'build': 'linux-gcc-5x-x64',
'env': 'linux'
},
{
'build': 'linux-gcc-6x-x64',
'env': 'linux'
},
{
'build': 'linux-gcc-7x-x64',
'env': 'linux'
},
{
'build': 'windows-msvc-2017',
'env': 'windows-2017'
},
{
'build': 'windows-msvc-2015',
'env': 'windows-2015'
},
{
'build': 'windows-msvc-2015-x86',
'env': 'windows-2015'
},
]
# Fully populate the BUILDS list with all final build objects
BUILDS = {}
for config in BUILD_CONFIGS:
build_name = config['build']
build = dict(CREATE_PARAM_TEMPLATE)
env = dict(ENVIRONMENTS[config['env']])
if 'privileged' in config:
env['privilegedMode'] = config['privileged']
build['environment'] = env
sub_params = {
'project': args.project,
'build': build_name,
'account': args.github_account,
}
# Replace all templates with the values above
def do_replace(obj):
if isinstance(obj, dict):
for key, value in obj.items():
obj[key] = do_replace(value)
return obj
elif isinstance(obj, str):
return obj.format(**sub_params)
else:
return obj
do_replace(build)
BUILDS['{}-{}'.format(args.project, build_name)] = build
# Connect to codebuild
session = boto3.Session(profile_name=args.profile, region_name='us-east-1')
codebuild = session.client('codebuild')
# Find out which projects already exist and should be updated, and which must be created
all_project_names = list(BUILDS.keys())
existing_projects = codebuild.batch_get_projects(names=all_project_names)
new_projects = existing_projects['projectsNotFound']
existing_projects = [project['name'] for project in existing_projects['projects']]
# Actually create the projects
for build_name, build in BUILDS.items():
if build_name in new_projects:
print('{}: Creating'.format(build_name))
codebuild.create_project(**build)
codebuild.create_webhook(projectName=build_name)
elif build_name in existing_projects:
print('{}: Updating'.format(build_name))
codebuild.update_project(**build)
else:
assert False
| 2.375 | 2 |
gen_models/train_attentive_vae.py | yifan-you-37/rl_swiss | 56 | 12792637 | # Imports ---------------------------------------------------------------------
# Python
import argparse
import joblib
import yaml
import os.path as osp
from collections import defaultdict
import joblib
import os
# PyTorch
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch import autograd
from torch.optim import Adam
# NumPy
import numpy as np
from numpy import array
from numpy.random import choice, randint
# Model Building
from gen_models.attentive_vae import AttentiveVAE
import rlkit.torch.pytorch_util as ptu
# Data
from observations import multi_mnist
from torch.utils.data import DataLoader, TensorDataset
# Logging
from rlkit.core import logger
from rlkit.launchers.launcher_util import setup_logger, set_seed
from rlkit.core.vistools import generate_gif, save_pytorch_tensor_as_img
import sys
def experiment(exp_specs):
ptu.set_gpu_mode(exp_specs['use_gpu'])
# Set up logging ----------------------------------------------------------
exp_id = exp_specs['exp_id']
exp_prefix = exp_specs['exp_name']
seed = exp_specs['seed']
set_seed(seed)
setup_logger(exp_prefix=exp_prefix, exp_id=exp_id, variant=exp_specs)
# Prep the data -----------------------------------------------------------
path = 'junk_vis/debug_att_vae_shallower_48_64_dim_0p1_kl_stronger_seg_conv'
(X_train, Y_train), (X_test, Y_test) = multi_mnist(path, max_digits=2, canvas_size=48, seed=42, use_max=False)
convert_dict = {0: [0.,0.], 1: [1.,0.], 2: [1.,1.]}
Num_train = np.array([convert_dict[a.shape[0]] for a in Y_train])
Num_test = np.array([convert_dict[a.shape[0]] for a in Y_test])
X_train = X_train[:,None,...]
X_test = X_test[:,None,...]
X_train, X_test = torch.FloatTensor(X_train)/255.0, torch.FloatTensor(X_test)/255.0
mask_train, mask_test = torch.FloatTensor(Num_train), torch.FloatTensor(Num_test)
train_ds = TensorDataset(X_train, Num_train)
val_ds = TensorDataset(X_test, Num_test)
# Model Definition --------------------------------------------------------
model = AttentiveVAE(
[1, 48, 48],
exp_specs['vae_specs']['z_dim'],
exp_specs['vae_specs']['x_encoder_specs'],
exp_specs['vae_specs']['z_seg_conv_specs'],
exp_specs['vae_specs']['z_seg_fc_specs'],
exp_specs['vae_specs']['z_obj_conv_specs'],
exp_specs['vae_specs']['z_obj_fc_specs'],
exp_specs['vae_specs']['z_seg_recon_fc_specs'],
exp_specs['vae_specs']['z_seg_recon_upconv_specs'],
exp_specs['vae_specs']['z_obj_recon_fc_specs'],
exp_specs['vae_specs']['z_obj_recon_upconv_specs'],
exp_specs['vae_specs']['recon_upconv_part_specs']
)
if ptu.gpu_enabled():
model.cuda()
# Optimizer ---------------------------------------------------------------
model_optim = Adam(model.parameters(), lr=float(exp_specs['model_lr']), weight_decay=float(exp_specs['model_wd']))
# -------------------------------------------------------------------------
global_iter = 0
for epoch in range(exp_specs['epochs']):
train_loader = DataLoader(train_ds, batch_size=exp_specs['batch_size'], shuffle=True, num_workers=4, pin_memory=False, drop_last=True)
for iter_num, img_batch in enumerate(train_loader):
img_batch, num_batch = img_batch[0], img_batch[1]
if ptu.gpu_enabled(): img_batch = img_batch.cuda()
what_means, what_log_covs, where_means, where_log_covs, masks, recon_mean, recon_log_cov = model(img_batch, num_batch)
elbo, KL = model.compute_ELBO(
what_means + where_means,
what_log_covs + where_log_covs,
recon_mean,
recon_log_cov,
img_batch,
average_over_batch=True
)
loss = -1. * elbo
loss = loss + 1. * sum([m.mean() for m in masks])
loss.backward()
model_optim.step()
if global_iter % exp_specs['freq_val'] == 0:
with torch.no_grad():
print('\nValidating Iter %d...' % global_iter)
model.eval()
idxs = np.random.choice(int(X_test.size(0)), size=exp_specs['batch_size'], replace=False)
img_batch, num_batch = X_test[idxs], Num_test[idxs]
if ptu.gpu_enabled(): img_batch = img_batch.cuda()
what_means, what_log_covs, where_means, where_log_covs, masks, recon_mean, recon_log_cov = model(img_batch, num_batch)
elbo, KL = model.compute_ELBO(
what_means + where_means,
what_log_covs + where_log_covs,
recon_mean,
recon_log_cov,
img_batch,
average_over_batch=True
)
mse = ((recon_mean - img_batch)**2).mean()
print('ELBO:\t%.4f' % elbo)
print('MSE:\t%.4f' % mse)
print('KL:\t%.4f' % KL)
for i in range(1):
save_pytorch_tensor_as_img(img_batch[i].data.cpu(), os.path.join(path, '%d_%d_img.png'%(global_iter, i)))
save_pytorch_tensor_as_img(recon_mean[i].data.cpu(), os.path.join(path, '%d_%d_recon.png'%(global_iter, i)))
save_pytorch_tensor_as_img(masks[0][i].data.cpu(), os.path.join(path, '%d_%d_mask_0.png'%(global_iter, i)))
# save_pytorch_tensor_as_img(masks[1][i].data.cpu(), os.path.join(path, '%d_%d_mask_1.png'%(global_iter, i)))
model.train()
global_iter += 1
if __name__ == '__main__':
# Arguments
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--experiment', help='experiment specification file')
args = parser.parse_args()
with open(args.experiment, 'r') as spec_file:
spec_string = spec_file.read()
exp_specs = yaml.load(spec_string)
experiment(exp_specs)
| 1.90625 | 2 |
minder_utils/util/decorators/file_func.py | alexcapstick/minder_utils | 0 | 12792638 | <filename>minder_utils/util/decorators/file_func.py
from functools import wraps
from minder_utils.util.util import save_mkdir, save_file, load_file
from minder_utils.util.util import reformat_path
class load_save:
def __init__(self, save_path, save_name=None, verbose=True, refresh=False):
self.save_path = reformat_path(save_path)
self.file_name = save_name
self.verbose = verbose
self.refresh = refresh
def __call__(self, func):
self.file_name = func.__name__ if self.file_name is None else self.file_name
@wraps(func)
def wrapped_function(*args, **kwargs):
if self.refresh:
self.print_func(func, 'start to refresh the data')
data = func(*args, **kwargs)
save_file(data, self.save_path, self.file_name)
else:
try:
data = load_file(self.save_path, self.file_name)
self.print_func(func, 'loading processed data')
except FileNotFoundError:
save_mkdir(self.save_path)
self.print_func(func, 'processing the data')
data = func(*args, **kwargs)
save_file(data, self.save_path, self.file_name)
return data
return wrapped_function
def print_func(self, func, message):
if self.verbose:
print(str(func.__name__).ljust(20, ' '), message) | 2.5 | 2 |
causal_dcgan/models.py | karhankaan/CausalGAN | 0 | 12792639 | <gh_stars>0
import tensorflow as tf
import numpy as np
slim = tf.contrib.slim
import math
from causal_dcgan.ops import lrelu,linear,conv_cond_concat,batch_norm,add_minibatch_features
from causal_dcgan.ops import conv2d,deconv2d
def conv_out_size_same(size, stride):
return int(math.ceil(float(size) / float(stride)))
def GeneratorCNN( z, config, reuse=None):
'''
maps z to a 64x64 images with values in [-1,1]
uses batch normalization internally
'''
#trying to get around batch_size like this:
batch_size=tf.shape(z)[0]
#batch_size=tf.placeholder_with_default(64,[],'bs')
with tf.variable_scope("generator",reuse=reuse) as vs:
g_bn0 = batch_norm(name='g_bn0')
g_bn1 = batch_norm(name='g_bn1')
g_bn2 = batch_norm(name='g_bn2')
g_bn3 = batch_norm(name='g_bn3')
s_h, s_w = config.gf_dim, config.gf_dim#64,64
s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2)
s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2)
s_h8, s_w8 = conv_out_size_same(s_h4, 2), conv_out_size_same(s_w4, 2)
s_h16, s_w16 = conv_out_size_same(s_h8, 2), conv_out_size_same(s_w8, 2)
# project `z` and reshape
z_, self_h0_w, self_h0_b = linear(
z, config.gf_dim*8*s_h16*s_w16, 'g_h0_lin', with_w=True)
self_h0 = tf.reshape(
z_, [-1, s_h16, s_w16, config.gf_dim * 8])
h0 = tf.nn.relu(g_bn0(self_h0))
h1, h1_w, h1_b = deconv2d(
h0, [batch_size, s_h8, s_w8, config.gf_dim*4], name='g_h1', with_w=True)
h1 = tf.nn.relu(g_bn1(h1))
h2, h2_w, h2_b = deconv2d(
h1, [batch_size, s_h4, s_w4, config.gf_dim*2], name='g_h2', with_w=True)
h2 = tf.nn.relu(g_bn2(h2))
h3, h3_w, h3_b = deconv2d(
h2, [batch_size, s_h2, s_w2, config.gf_dim*1], name='g_h3', with_w=True)
h3 = tf.nn.relu(g_bn3(h3))
h4, h4_w, h4_b = deconv2d(
h3, [batch_size, s_h, s_w, config.c_dim], name='g_h4', with_w=True)
out=tf.nn.tanh(h4)
variables = tf.contrib.framework.get_variables(vs)
return out, variables
def DiscriminatorCNN(image, config, reuse=None):
'''
Discriminator for GAN model.
image : batch_size x 64x64x3 image
config : see causal_dcgan/config.py
reuse : pass True if not calling for first time
returns: probabilities(real)
: logits(real)
: first layer activation used to estimate z from
: variables list
'''
with tf.variable_scope("discriminator",reuse=reuse) as vs:
d_bn1 = batch_norm(name='d_bn1')
d_bn2 = batch_norm(name='d_bn2')
d_bn3 = batch_norm(name='d_bn3')
if not config.stab_proj:
h0 = lrelu(conv2d(image, config.df_dim, name='d_h0_conv'))#16,32,32,64
else:#method to restrict disc from winning
#I think this is equivalent to just not letting disc optimize first layer
#and also removing nonlinearity
#k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
#paper used 8x8 kernel, but I'm using 5x5 because it is more similar to my achitecture
#n_projs=config.df_dim#64 instead of 32 in paper
n_projs=config.n_stab_proj#64 instead of 32 in paper
print("WARNING:STAB_PROJ active, using ",n_projs," projections")
w_proj = tf.get_variable('w_proj', [5, 5, image.get_shape()[-1],n_projs],
initializer=tf.truncated_normal_initializer(stddev=0.02),trainable=False)
conv = tf.nn.conv2d(image, w_proj, strides=[1, 2, 2, 1], padding='SAME')
b_proj = tf.get_variable('b_proj', [n_projs],#does nothing
initializer=tf.constant_initializer(0.0),trainable=False)
h0=tf.nn.bias_add(conv,b_proj)
h1_ = lrelu(d_bn1(conv2d(h0, config.df_dim*2, name='d_h1_conv')))#16,16,16,128
h1 = add_minibatch_features(h1_, config.df_dim)
h2 = lrelu(d_bn2(conv2d(h1, config.df_dim*4, name='d_h2_conv')))#16,16,16,248
h3 = lrelu(d_bn3(conv2d(h2, config.df_dim*8, name='d_h3_conv')))
#print('h3shape: ',h3.get_shape().as_list())
#print('8df_dim:',config.df_dim*8)
#dim3=tf.reduce_prod(tf.shape(h3)[1:])
dim3=np.prod(h3.get_shape().as_list()[1:])
h3_flat=tf.reshape(h3, [-1,dim3])
h4 = linear(h3_flat, 1, 'd_h3_lin')
prob=tf.nn.sigmoid(h4)
variables = tf.contrib.framework.get_variables(vs,collection=tf.GraphKeys.TRAINABLE_VARIABLES)
return prob, h4, h1_, variables
def discriminator_labeler(image, output_dim, config, reuse=None):
batch_size=tf.shape(image)[0]
with tf.variable_scope("disc_labeler",reuse=reuse) as vs:
dl_bn1 = batch_norm(name='dl_bn1')
dl_bn2 = batch_norm(name='dl_bn2')
dl_bn3 = batch_norm(name='dl_bn3')
h0 = lrelu(conv2d(image, config.df_dim, name='dl_h0_conv'))#16,32,32,64
h1 = lrelu(dl_bn1(conv2d(h0, config.df_dim*2, name='dl_h1_conv')))#16,16,16,128
h2 = lrelu(dl_bn2(conv2d(h1, config.df_dim*4, name='dl_h2_conv')))#16,16,16,248
h3 = lrelu(dl_bn3(conv2d(h2, config.df_dim*8, name='dl_h3_conv')))
dim3=np.prod(h3.get_shape().as_list()[1:])
h3_flat=tf.reshape(h3, [-1,dim3])
D_labels_logits = linear(h3_flat, output_dim, 'dl_h3_Label')
D_labels = tf.nn.sigmoid(D_labels_logits)
variables = tf.contrib.framework.get_variables(vs)
return D_labels, D_labels_logits, variables
def discriminator_gen_labeler(image, output_dim, config, reuse=None):
batch_size=tf.shape(image)[0]
with tf.variable_scope("disc_gen_labeler",reuse=reuse) as vs:
dl_bn1 = batch_norm(name='dl_bn1')
dl_bn2 = batch_norm(name='dl_bn2')
dl_bn3 = batch_norm(name='dl_bn3')
h0 = lrelu(conv2d(image, config.df_dim, name='dgl_h0_conv'))#16,32,32,64
h1 = lrelu(dl_bn1(conv2d(h0, config.df_dim*2, name='dgl_h1_conv')))#16,16,16,128
h2 = lrelu(dl_bn2(conv2d(h1, config.df_dim*4, name='dgl_h2_conv')))#16,16,16,248
h3 = lrelu(dl_bn3(conv2d(h2, config.df_dim*8, name='dgl_h3_conv')))
dim3=np.prod(h3.get_shape().as_list()[1:])
h3_flat=tf.reshape(h3, [-1,dim3])
D_labels_logits = linear(h3_flat, output_dim, 'dgl_h3_Label')
D_labels = tf.nn.sigmoid(D_labels_logits)
variables = tf.contrib.framework.get_variables(vs)
return D_labels, D_labels_logits,variables
def discriminator_on_z(image, config, reuse=None):
batch_size=tf.shape(image)[0]
with tf.variable_scope("disc_z_labeler",reuse=reuse) as vs:
dl_bn1 = batch_norm(name='dl_bn1')
dl_bn2 = batch_norm(name='dl_bn2')
dl_bn3 = batch_norm(name='dl_bn3')
h0 = lrelu(conv2d(image, config.df_dim, name='dzl_h0_conv'))#16,32,32,64
h1 = lrelu(dl_bn1(conv2d(h0, config.df_dim*2, name='dzl_h1_conv')))#16,16,16,128
h2 = lrelu(dl_bn2(conv2d(h1, config.df_dim*4, name='dzl_h2_conv')))#16,16,16,248
h3 = lrelu(dl_bn3(conv2d(h2, config.df_dim*8, name='dzl_h3_conv')))
dim3=np.prod(h3.get_shape().as_list()[1:])
h3_flat=tf.reshape(h3, [-1,dim3])
D_labels_logits = linear(h3_flat, config.z_dim, 'dzl_h3_Label')
D_labels = tf.nn.tanh(D_labels_logits)
variables = tf.contrib.framework.get_variables(vs)
return D_labels,variables
| 2.046875 | 2 |
IBMi-lib-repo.py | barrettotte/IBMi-Lib-Repo | 4 | 12792640 | import sys, os, traceback
from os import path
import utils as utils
from IBMi import IBMi
CONFIG_PATH="./config.json"
def get_configuration(name):
for cfg in utils.read_file_json(CONFIG_PATH):
if cfg["name"] == name: return cfg
return None
def append_configuration(new_data):
if not path.exists(CONFIG_PATH):
utils.log("{} not found. Creating new config file...".format(CONFIG_PATH))
utils.write_file_json(CONFIG_PATH, [])
elif get_configuration(new_data["name"]):
utils.log("ERROR: Configuration already exists by this name.")
exit(1)
utils.write_file_json(CONFIG_PATH, utils.read_file_json(CONFIG_PATH) + [new_data])
return new_data
def new_configuration():
utils.log("Creating new configuration...")
return {
"name": utils.required_input(" Enter name for this configuration: "),
"host": utils.required_input(" Enter IBMi host: "),
"library": utils.required_input(" Enter library to export: "),
"output": utils.required_input(" Enter output directory path: "),
"formatting": utils.bool_input(" Inject additional formatting into source?", is_req=True),
}
def get_credentials(config_name, host):
utils.log("Fetching credentials for configuration '{}'...".format(config_name))
return { 'user': utils.required_input(" Enter user for host '{}': ".format(host)), 'pw': utils.required_pass(" Enter password: ") }
def new_library(args):
export_library(args + [append_configuration(new_configuration())["name"]])
def export_library(args):
config = get_configuration(args[1])
lib = config["library"]
if "--creds" in args:
creds_idx = args.index("--creds")
if creds_idx+2 > len(args):
print("Not enough arguments for credentials flag. --creds <user> <password>")
exit(1)
creds = {'user': args[creds_idx+1], 'pw': args[creds_idx+2]}
else:
print("Credentials not provided. --creds <user> <password>\nPrompting for credentials...")
creds = get_credentials(config["name"], config["host"])
ibmi = IBMi(out_path=config["output"])
ibmi.connect(config["host"])
try:
ibmi.login(creds)
lib_data = ibmi.get_library_data(lib)
ibmi.write_file(lib_data, '{}/lib_data'.format(lib), ext='json')
ibmi.generate_repo(lib_data)
except Exception as e:
utils.log("Exception occurred. Please yell at the programmer ; {}".format(e))
traceback.print_exc()
def print_help(args):
print("\n".join([
"IBMi-lib-repo HELP:",
" [-e <library name>] [--creds <user> <password>] --> Re-export an existing library",
" [-h] --> Display help information",
" [-n] [--creds <user> <password>] --> Setup a new library"
]))
def get_commands():
return [("-e", 1, export_library), ("-h", 0, print_help), ("-n", 0, new_library)]
def process_args(args):
if len(args) == 0:
print("Not enough arguments passed.")
return False
for cmd in get_commands():
if args[0] == cmd[0]:
cmd[2](args)
return True
print("Invalid argument: '{}' Not found.".format(args[0]))
return False
def main():
utils.log("Program started.")
if not process_args(sys.argv[1:]): exit(1)
if __name__ == "__main__": main() | 2.484375 | 2 |
src/squirrel/orders/widgets.py | c3loc/squirrel | 1 | 12792641 | """
This is extracted from the unmaintained https://github.com/jazzband/django-floppyforms to provide a datalist widget.
It has not been cleaned up yet.
"""
from django import forms
from django.template import Context, loader
from django.utils import formats
from django.utils.encoding import force_text
class DictContext(dict):
pass
REQUIRED_CONTEXT_ATTRIBTUES = (
"_form_config",
"_form_render",
)
def flatten_context(context):
if isinstance(context, Context):
flat = {}
for d in context.dicts:
flat.update(d)
return flat
else:
return context
def flatten_contexts(*contexts):
"""Takes a list of context instances and returns a new dict that
combines all of them."""
new_context = DictContext()
for context in contexts:
if context is not None:
new_context.update(flatten_context(context))
for attr in REQUIRED_CONTEXT_ATTRIBTUES:
if hasattr(context, attr):
setattr(new_context, attr, getattr(context, attr))
return new_context
class Widget(forms.Widget):
is_required = False
def render(self, name, value, attrs=None, renderer=None):
"""
Returns this Widget rendered as HTML, as a Unicode string.
The 'value' given is not guaranteed to be valid input, so subclass
implementations should program defensively.
"""
raise NotImplementedError("subclasses of Widget must provide a render() method")
def build_attrs(self, extra_attrs=None, **kwargs):
"""
Backported from Django 1.10
Helper function for building an attribute dictionary.
"""
attrs = dict(self.attrs, **kwargs)
if extra_attrs:
attrs.update(extra_attrs)
return attrs
# Backported from Django 1.7
@property
def is_hidden(self):
return self.input_type == "hidden" if hasattr(self, "input_type") else False
# Backported from Django 1.9
if not hasattr(forms.Widget, "format_value"):
def format_value(self, value):
return self._format_value(value)
class Input(Widget):
template_name = "widgets/input.html"
input_type = None
datalist = None
def __init__(self, *args, **kwargs):
datalist = kwargs.pop("datalist", None)
if datalist is not None:
self.datalist = datalist
template_name = kwargs.pop("template_name", None)
if template_name is not None:
self.template_name = template_name
super(Input, self).__init__(*args, **kwargs)
# This attribute is used to inject a surrounding context in the
# floppyforms templatetags, when rendered inside a complete form.
self.context_instance = None
def get_context_data(self):
return {}
def format_value(self, value):
if self.is_localized:
value = formats.localize_input(value)
return force_text(value)
def get_context(self, name, value, attrs=None):
context = {
"widget": self,
"type": self.input_type,
"name": name,
"hidden": self.is_hidden,
"required": self.is_required,
"True": True,
}
# True is injected in the context to allow stricter comparisons
# for widget attrs. See #25.
if self.is_hidden:
context["hidden"] = True
if value is None:
value = ""
if value != "":
# Only add the value if it is non-empty
context["value"] = self.format_value(value)
context.update(self.get_context_data())
context["attrs"] = self.build_attrs(attrs)
for key, attr in context["attrs"].items():
if attr == 1:
# 1 == True so 'key="1"' will show up only as 'key'
# Casting to a string so that it doesn't equal to True
# See #25.
if not isinstance(attr, bool):
context["attrs"][key] = str(attr)
if self.datalist is not None:
context["datalist"] = self.datalist
return context
def render(self, name, value, attrs=None, **kwargs):
template_name = kwargs.pop("template_name", None)
if template_name is None:
template_name = self.template_name
context = self.get_context(name, value, attrs=attrs or {})
context = flatten_contexts(self.context_instance, context)
return loader.render_to_string(template_name, context)
class TextInput(Input):
template_name = "widgets/text.html"
input_type = "text"
def __init__(self, *args, **kwargs):
if kwargs.get("attrs", None) is not None:
self.input_type = kwargs["attrs"].pop("type", self.input_type)
super(TextInput, self).__init__(*args, **kwargs)
| 2.421875 | 2 |
site_creator/network.py | danielbostock/meraki_site_creator | 0 | 12792642 | <gh_stars>0
# Start Import Area
import meraki, os
# End Import Area
class orgNetwork:
def __init__(self, site_name, serials):
self.site_name = site_name
self.organization_id = ''
#self.timeZone = 'Australia/Brisbane' #Optional - If not assigned Meraki will do their default which is LA time.
#self.tags = ['TEST'] #Optional
self.productTypes = ['appliance', 'switch', 'wireless'] #When one or more product type is entered, the network is changed to combined
api_key = os.environ.get('meraki_api_key')
self.dashboard = meraki.DashboardAPI(api_key)
self.serials = serials
self.networkId = ''
self.templateId =''
def create(self):
new_network = self.dashboard.organizations.createOrganizationNetwork(organizationId=(self.organization_id), name=(self.site_name), productTypes=(self.productTypes), tags=(self.tags), timeZone=(self.timeZone))
network_id = new_network
self.networkId = network_id['id']
claim = self.dashboard.networks.claimNetworkDevices(networkId=(self.networkId), serials=(self.serials))
bind = self.dashboard.networks.bindNetwork(networkId=(self.networkId), configTemplateId=(self.templateId)) | 2.34375 | 2 |
xpath_utils/models/xpath_condition_contains.py | kkristof200/py_xpath_utils | 0 | 12792643 | <gh_stars>0
# ------------------------------------------------------ Imports ----------------------------------------------------- #
# System
from typing import Optional
# Local
from .xpath_condition import XPathCondition
from .enums import XPathConditionType
# -------------------------------------------------------------------------------------------------------------------- #
# ------------------------------------------- class: XPathConditionContains ------------------------------------------ #
class XPathConditionContains(XPathCondition):
# --------------------------------------------------- Init --------------------------------------------------- #
def __init__(
self,
name: Optional[str] = None,
value: Optional[any] = None,
**kwargs
):
super().__init__(
name=name,
value=value,
condition_type=XPathConditionType.CONTAINS,
**kwargs
)
# -------------------------------------------------------------------------------------------------------------------- # | 2.15625 | 2 |
backend/tradingbot/migrations/0003_auto_20220108_1759.py | webclinic017/WallStreetBots | 4 | 12792644 | <gh_stars>1-10
# Generated by Django 3.2.8 on 2022-01-08 22:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tradingbot', '0002_alter_stockinstance_user'),
]
operations = [
migrations.RemoveField(
model_name='order',
name='price',
),
migrations.AddField(
model_name='order',
name='filled_avg_price',
field=models.DecimalField(blank=True, decimal_places=2, help_text='filled average price', max_digits=8, null=True),
),
migrations.AddField(
model_name='order',
name='filled_quantity',
field=models.DecimalField(decimal_places=2, default=0, help_text='filled quantity', max_digits=8),
),
migrations.AddField(
model_name='order',
name='filled_timestamp',
field=models.DateTimeField(blank=True, help_text='order filled timestamp', null=True),
),
migrations.AddField(
model_name='order',
name='limit_price',
field=models.DecimalField(blank=True, decimal_places=2, help_text='limit price', max_digits=8, null=True),
),
migrations.AddField(
model_name='order',
name='status',
field=models.CharField(choices=[('A', 'Accepted'), ('F', 'Filled')], default='A', help_text='order status', max_length=1),
preserve_default=False,
),
migrations.AddField(
model_name='order',
name='stop_price',
field=models.DecimalField(blank=True, decimal_places=2, help_text='stop price', max_digits=8, null=True),
),
migrations.AlterField(
model_name='order',
name='timestamp',
field=models.DateTimeField(auto_now_add=True, help_text='order submission timestamp'),
),
]
| 1.75 | 2 |
w1data/commands.py | swork/w1-datalogger | 0 | 12792645 | <reponame>swork/w1-datalogger
#! /usr/bin/env python
import argparse, configparser, sys, os
from . import common, metadata, observations, rollup, w1datapoint
import logging
logger = logging.getLogger(__name__)
debug_done = False
class LocalArgumentParser(argparse.ArgumentParser):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.add_argument('--debug', '-d', nargs='?', const='all')
self.add_argument('--config', '-c', default=os.path.expanduser("~/.w1.conf"))
self.add_argument('--rollup-location', default=None)
self.add_argument('--raw-location', default=None)
def _resolve(self, a):
"""
get basic stuff from config file, from env vars, and from command line
"""
# Set defaults here, unless a bad idea
raw_location = None
rollup_location = None
try:
config_string = open(a.config, 'r').read()
except IOError:
config_string = None
c = configparser.ConfigParser()
success = False
if config_string:
try:
c.read_string(config_string, source=a.config)
success = True
except configparser.MissingSectionHeaderError:
config_string = "[global]\n" + config_string
if not success:
c.read_string(config_string, source=a.config)
success = True
if success:
if not a.raw_location: # command-line option takes precedence
a.raw_location = os.path.expanduser(
os.environ.get('raw_location',
c['global'].get('raw_location', raw_location)))
if not a.rollup_location:
a.rollup_location = os.path.expanduser(
os.environ.get('rollup_location',
c['global'].get('rollup_location', rollup_location)))
return a
def parse_args_permissive(self, *args, **kwargs):
t = super().parse_known_args(*args, **kwargs)
try:
a = t[0]
except ValueError:
a = t
return self._resolve(a)
def parse_args(self, *args, **kwargs):
a = super().parse_args(*args, **kwargs)
return self._resolve(a)
def rollup_command():
"""
Maintain a collection of monthly rollup JSON blobs from raw observation JSON blobs
"""
direct_name = "w1rollup"
_, applied_name = os.path.split(sys.argv[0])
p = LocalArgumentParser()
if applied_name != direct_name:
p.add_argument('rollup_command')
a = p.parse_args()
do_debug(a)
if a.rollup_location is None or a.raw_location is None:
logger.error("Need dirs for raw and rollup data, see --help")
sys.exit(64) # EX_USAGE
return rollup.do_rollup(
os.path.expanduser(a.rollup_location),
os.path.expanduser(a.raw_location))
def testcli_command():
"""
Confidence the CLI is doing the needful
"""
p = LocalArgumentParser()
p.add_argument('testcli_command')
p.add_argument('--option', action='store_true')
a = p.parse_args()
do_debug(a)
if a.testcli_command != 'testcli':
raise RuntimeError("something's goofy with CLI logic")
print(repr(a))
def do_debug(a):
global debug_done
if not debug_done:
logging.basicConfig(format="%(levelname)s:%(filename)s:%(lineno)d:%(message)s")
if a.debug:
modules = set(a.debug.split(','))
if 'commands' in modules or 'all' in modules:
logger.setLevel(logging.DEBUG)
if 'common' in modules or 'all' in modules:
common.logger.setLevel(logging.DEBUG)
if 'metadata' in modules or 'all' in modules:
metadata.logger.setLevel(logging.DEBUG)
if 'observations' in modules or 'all' in modules:
observations.logger.setLevel(logging.DEBUG)
if 'rollup' in modules or 'all' in modules:
rollup.logger.setLevel(logging.DEBUG)
if 'w1datapoint' in modules or 'all' in modules:
w1datapoint.logger.setLevel(logging.DEBUG)
debug_done = True
def main():
"""
CLI operation
"""
p = LocalArgumentParser()
p.add_argument('command')
a = p.parse_args_permissive()
do_debug(a)
if a.command == 'rollup':
return rollup_command()
if a.command == 'testcli':
return testcli_command()
sys.stderr.write(argparse.ArgumentError("Unknown command {}".format(a.command)))
return 64 # EX_USAGE in BSD sysexits, just to pick a standard
if __name__ == '__main__':
sys.exit(main())
| 2.09375 | 2 |
scripts/test_create_dataloaders.py | qlero/charcoal_recognition | 0 | 12792646 | <gh_stars>0
"""
Tests the instantiation of PyTorch dataloaders over the
cleaned images from the Laboratório Visão Robótica e Imagem
charcoal dataset
"""
# Imports library
import os, sys
# used to declare ../charcoal as a folder that can be imported
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
from charcoal.generator_dataloaders import generate_dataloaders
# Main scripts
if __name__ == "__main__":
# Declares the target path for the dataset
ufpr_dataset_path = "dataset_ufpr/cleaned/"
print("Imports training, validation, and test dataloaders")
classes, datasets, dataloaders = generate_dataloaders(ufpr_dataset_path)
print("DONE")
print("\nImported Classes")
print(", ".join(classes))
print("\nImported datasets")
print(datasets)
print("\nImported dataloaders")
print(dataloaders) | 2.171875 | 2 |
dcbase/tests/unit/__init__.py | tctimmeh/dc-django-base | 0 | 12792647 | from dcbase.tests.unit.unitTestCase import UnitTestCase
| 1.09375 | 1 |
password_security/models/__init__.py | juazisco/gestion_rifa | 0 | 12792648 | # Copyright 2015 LasLabs Inc.
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html).
from . import res_users # noqa
from . import res_company # noqa
from . import res_users_pass_history # noqa
from . import res_config_settings # noqa
| 0.9375 | 1 |
sdk/ml/azure-ai-ml/azure/ai/ml/_schema/compute/aml_compute_node_info.py | dubiety/azure-sdk-for-python | 1 | 12792649 | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from marshmallow import fields
from azure.ai.ml._schema.core.schema_meta import PatchedSchemaMeta
class AmlComputeNodeInfoSchema(metaclass=PatchedSchemaMeta):
node_id = fields.Str()
private_ip_address = fields.Str()
public_ip_address = fields.Str()
port = fields.Str()
node_state = fields.Str()
current_job_name = fields.Str()
| 1.601563 | 2 |
gym-maze/gym_maze/__init__.py | martinzimmermann/RBL-test-programs | 0 | 12792650 | <gh_stars>0
from gym.envs.registration import register
register(
id='maze-v0',
entry_point='gym_maze.envs:MazeEnvSample5x5',
max_episode_steps=2000,
)
register(
id='maze-sample-5x5-v0',
entry_point='gym_maze.envs:MazeEnvSample5x5',
max_episode_steps=10000000000000,
)
register(
id='maze-empty-5x5-v0',
entry_point='gym_maze.envs:MazeEnvEmpty5x5',
max_episode_steps=10000000000000,
)
register(
id='maze-warehouse-5x5-v0',
entry_point='gym_maze.envs:MazeEnvWarehouse5x5',
max_episode_steps=10000000000000,
)
register(
id='maze-warehouse-8x8-v0',
entry_point='gym_maze.envs:MazeEnvWarehouse8x8',
max_episode_steps=10000000000000,
)
register(
id='maze-warehouse-11x11-v0',
entry_point='gym_maze.envs:MazeEnvWarehouse11x11',
max_episode_steps=10000000000000,
)
register(
id='maze-warehouse-14x14-v0',
entry_point='gym_maze.envs:MazeEnvWarehouse14x14',
max_episode_steps=10000000000000,
)
register(
id='maze-warehouse-17x17-v0',
entry_point='gym_maze.envs:MazeEnvWarehouse17x17',
max_episode_steps=10000000000000,
)
register(
id='maze-warehouse-20x20-v0',
entry_point='gym_maze.envs:MazeEnvWarehouse20x20',
max_episode_steps=10000000000000,
)
register(
id='maze-random-5x5-v0',
entry_point='gym_maze.envs:MazeEnvRandom5x5',
max_episode_steps=10000000000000,
nondeterministic=True,
)
register(
id='maze-random-8x8-v0',
entry_point='gym_maze.envs:MazeEnvRandom8x8',
max_episode_steps=10000000000000,
nondeterministic=True,
)
register(
id='maze-random-11x11-v0',
entry_point='gym_maze.envs:MazeEnvRandom11x11',
max_episode_steps=10000000000000,
nondeterministic=True,
)
register(
id='maze-random-14x14-v0',
entry_point='gym_maze.envs:MazeEnvRandom14x14',
max_episode_steps=10000000000000,
nondeterministic=True,
)
register(
id='maze-random-17x17-v0',
entry_point='gym_maze.envs:MazeEnvRandom17x17',
max_episode_steps=10000000000000,
nondeterministic=True,
)
register(
id='maze-random-20x120-v0',
entry_point='gym_maze.envs:MazeEnvRandom20x20',
max_episode_steps=10000000000000,
nondeterministic=True,
)
register(
id='maze-sample-10x10-v0',
entry_point='gym_maze.envs:MazeEnvSample10x10',
max_episode_steps=10000000000000,
)
register(
id='maze-random-10x10-v0',
entry_point='gym_maze.envs:MazeEnvRandom10x10',
max_episode_steps=10000000000000,
nondeterministic=True,
)
register(
id='maze-sample-3x3-v0',
entry_point='gym_maze.envs:MazeEnvSample3x3',
max_episode_steps=10000000000000,
)
register(
id='maze-random-3x3-v0',
entry_point='gym_maze.envs:MazeEnvRandom3x3',
max_episode_steps=10000000000000,
nondeterministic=True,
)
register(
id='maze-random-25x25-v0',
entry_point='gym_maze.envs:MazeEnvRandom25x25',
max_episode_steps=10000000000000,
)
register(
id='maze-random-50x50-v0',
entry_point='gym_maze.envs:MazeEnvRandom50x50',
max_episode_steps=10000000000000,
)
register(
id='maze-random-60x60-v0',
entry_point='gym_maze.envs:MazeEnvRandom60x60',
max_episode_steps=10000000000000,
)
register(
id='maze-random-75x75-v0',
entry_point='gym_maze.envs:MazeEnvRandom75x75',
max_episode_steps=10000000000000,
)
register(
id='maze-sample-100x100-v0',
entry_point='gym_maze.envs:MazeEnvSample100x100',
max_episode_steps=10000000000000,
)
register(
id='maze-random-100x100-v0',
entry_point='gym_maze.envs:MazeEnvRandom100x100',
max_episode_steps=10000000000000,
nondeterministic=True,
)
register(
id='maze-random-10x10-plus-v0',
entry_point='gym_maze.envs:MazeEnvRandom10x10Plus',
max_episode_steps=10000000000000,
nondeterministic=True,
)
register(
id='maze-random-20x20-plus-v0',
entry_point='gym_maze.envs:MazeEnvRandom20x20Plus',
max_episode_steps=10000000000000,
nondeterministic=True,
)
register(
id='maze-random-30x30-plus-v0',
entry_point='gym_maze.envs:MazeEnvRandom30x30Plus',
max_episode_steps=10000000000000,
nondeterministic=True,
)
| 1.445313 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.