hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c72ce0a57a6b20d9f3b0b840d03685a73126b0e | 22,727 | py | Python | duels/duels.py | ridinginstyle00/redcogs | 216869935f322f7e5927740da22fa36f728c48db | [
"MIT"
]
| 8 | 2016-08-23T16:56:17.000Z | 2021-07-24T16:44:31.000Z | duels/duels.py | ridinginstyle00/redcogs | 216869935f322f7e5927740da22fa36f728c48db | [
"MIT"
]
| 1 | 2018-04-25T14:20:06.000Z | 2018-04-25T14:20:06.000Z | duels/duels.py | ridinginstyle00/redcogs | 216869935f322f7e5927740da22fa36f728c48db | [
"MIT"
]
| 8 | 2016-07-26T21:36:44.000Z | 2019-08-03T16:38:57.000Z | import discord
from discord.ext import commands
from .utils import checks
from .utils.dataIO import dataIO
from __main__ import send_cmd_help
from __main__ import settings
from datetime import datetime
from random import choice
from random import sample
from copy import deepcopy
from collections import namedtuple, defaultdict
import os
import logging
import aiohttp
import asyncio
import time
from time import sleep
client = discord.Client()
class Duels:
def __init__(self, bot):
global globvar
self.bot = bot
self.duelist = dataIO.load_json("data/duels/duelist.json")
self.nuels = "duels"
self.counter = "Number:"
self.setter = "Max:"
self.wlt = dataIO.load_json("data/duels/account.json")
self.timer_board = dataIO.load_json("data/duels/timer.json")
@commands.group(name="duels", pass_context=True)
async def _duels(self, ctx):
"""Duel with another player!!"""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
@commands.command(name="tjoin", pass_context=True)
@checks.admin_or_permissions(manage_server=True)
async def tjoin(self, ctx):
"""Add server to timer list"""
author = ctx.message.author
server = author.server
if server.id not in self.timer_board:
self.timer_board[server.id] = {"time": 0}
dataIO.save_json("data/duels/timer.json", self.timer_board)
await self.bot.say("**{}** has been added to the timer board!".format(server.name))
else:
await self.bot.say("**{}** has already been added to the timer_board!".format(server.name))
@commands.command(name="duel", pass_context=True, no_pm=True)
async def _duel(self, ctx, user: discord.Member=None, otheruser : discord.Member=None):
"""Duel another player"""
author = ctx.message.author
server = author.server
if not user or not otheruser:
await self.bot.reply("Please mention two users that you want to see a duel of!")
elif user.id == otheruser.id:
await self.bot.reply("Silly, you can't see a duel of someone against themselves!")
else:
if server.id in self.timer_board:
if self.timer_board[server.id]["time"] == 0:
self.timer_board[server.id]["time"] += 1
dataIO.save_json("data/duels/timer.json", self.timer_board)
nick_player1 = user.name
nick_player2 = otheruser.name
action = self.duelist[self.nuels]
action_damage1, action_damage2, action_damage3, action_damage4 = self.action_damage()
action_chosen1, action_chosen2, action_chosen3, action_chosen4 = sample(action,4)
hp_player1 = 100
hp_player2 = 100
player1_id = user.id
player2_id = otheruser.id
await self.bot.say("**{}** dueled **{}**!!\n\nPlease wait for the duel to start! Both players will begin with **{}** health!".format(user.mention, otheruser.mention, hp_player1))
await asyncio.sleep(1)
await self.bot.say("**{}** `{}` **{}** and took off **{}** health!".format(nick_player1, action_chosen1, nick_player2, action_damage1))
hp_player2 = hp_player2 - action_damage1
await asyncio.sleep(1)
await self.bot.say("**{}** `{}` **{}** and took off **{}** health!".format(nick_player2, action_chosen2, nick_player1, action_damage2))
hp_player1 = hp_player1 - action_damage2
await asyncio.sleep(1)
await self.bot.say("**{}** `{}` **{}** and took off **{}** health!".format(nick_player1, action_chosen3, nick_player2, action_damage3))
hp_player2 = hp_player2 - action_damage3
await asyncio.sleep(1)
await self.bot.say("**{}** `{}` **{}** and took off **{}** health!".format(nick_player2, action_chosen2, nick_player1, action_damage4))
hp_player1 = hp_player1 - action_damage4
if hp_player1 > hp_player2:
winning_player = nick_player1
losing_player = nick_player2
remaining_hp = hp_player1
await asyncio.sleep(1)
await self.bot.say("After 4 rounds of bloody combat, the winner is **{}** with **{}** health!".format(winning_player, remaining_hp))
if player1_id not in self.wlt:
self.wlt[player1_id] = {"name": winning_player, "Wins": 0, "Losses": 0, "Ties": 0}
dataIO.save_json("data/duels/account.json", self.wlt)
await self.bot.say("{} has not yet entered the duel tournament!".format(winning_player))
await asyncio.sleep(.5)
await self.bot.say("{} has joined the duel tournament, currently changing settings!".format(winning_player))
await self.bot.say("{} gained +1 WIN!!".format(winning_player))
self.wlt[player1_id]["Wins"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
else:
await self.bot.say("{} gained +1 WIN!!".format(winning_player))
self.wlt[player1_id]["Wins"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
if player2_id not in self.wlt:
self.wlt[player2_id] = {"name": losing_player, "Wins": 0, "Losses": 0, "Ties": 0}
dataIO.save_json("data/duels/account.json", self.wlt)
await self.bot.say("{} has not yet entered the duel tournament!".format(losing_player))
await asyncio.sleep(.5)
await self.bot.say("{} has joined the duel tournament, currently changing settings!".format(losing_player))
await self.bot.say("{} gained +1 LOSE!!".format(losing_player))
self.wlt[player2_id]["Losses"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
else:
await self.bot.say("{} gained +1 LOSE!!".format(losing_player))
self.wlt[player2_id]["Losses"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
elif hp_player1 == hp_player2:
remaining_hp = hp_player1
await asyncio.sleep(1)
await self.bot.say("After 4 rounds of bloody combat, the winner is **no one because it's a draw** with both players still having **{}** health!".format(remaining_hp))
if player1_id not in self.wlt:
self.wlt[player1_id] = {"name": nick_player1, "Wins": 0, "Losses": 0, "Ties": 0}
dataIO.save_json("data/duels/account.json", self.wlt)
await self.bot.say("{} has not yet entered the duel tournament!".format(nick_player1))
await asyncio.sleep(.5)
await self.bot.say("{} has joined the duel tournament, currently changing settings!".format(nick_player1))
await self.bot.say("{} gained +1 TIE!!".format(nick_player1))
self.wlt[player1_id]["Ties"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
else:
await self.bot.say("{} gained +1 TIE!!".format(nick_player1))
self.wlt[player1_id]["Ties"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
if player2_id not in self.wlt:
self.wlt[player2_id] = {"name": nick_player2, "Wins": 0, "Losses": 0, "Ties": 0}
dataIO.save_json("data/duels/account.json", self.wlt)
await self.bot.say("{} has not yet entered the duel tournament!".format(nick_player2))
await asyncio.sleep(.5)
await self.bot.say("{} has joined the duel tournament, currently changing settings!".format(nick_player2))
await self.bot.say("{} gained +1 TIE!!".format(nick_player2))
self.wlt[player2_id]["Ties"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
else:
await self.bot.say("{} gained +1 TIE!!".format(nick_player2))
self.wlt[player2_id]["Ties"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
else:
winning_player = nick_player2
losing_player = nick_player1
remaining_hp = hp_player2
await asyncio.sleep(1)
await self.bot.say("After 4 rounds of bloody combat, the winner is **{}** with **{}** health!".format(winning_player, remaining_hp))
if player2_id not in self.wlt:
self.wlt[player2_id] = {"name": winning_player, "Wins": 0, "Losses": 0, "Ties": 0}
dataIO.save_json("data/duels/account.json", self.wlt)
await self.bot.say("{} has not yet entered the duel tournament!".format(winning_player))
await asyncio.sleep(.5)
await self.bot.say("{} has joined the duel tournament, currently changing settings!".format(winning_player))
await self.bot.say("{} gained +1 WIN!!".format(winning_player))
self.wlt[player2_id]["Wins"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
else:
await self.bot.say("{} gained +1 WIN!!".format(winning_player))
self.wlt[player2_id]["Wins"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
if player1_id not in self.wlt:
self.wlt[player1_id] = {"name": losing_player, "Wins": 0, "Losses": 0, "Ties": 0}
dataIO.save_json("data/duels/account.json", self.wlt)
await self.bot.say("{} has not yet entered the duel tournament!".format(losing_player))
await asyncio.sleep(.5)
await self.bot.say("{} has joined the duel tournament, currently changing settings!".format(losing_player))
await self.bot.say("{} gained +1 LOSE!!".format(losing_player))
self.wlt[player1_id]["Losses"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
else:
await self.bot.say("{} gained +1 LOSE!!".format(losing_player))
self.wlt[player1_id]["Losses"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
self.timer_board[server.id]["time"] -= 1
dataIO.save_json("data/duels/timer.json", self.timer_board)
else:
await self.bot.say("**A duel is already running!\nPlease wait for the current one to finish!**")
else:
await self.bot.say("Please do {}tjoin to be added to the timer board!".format(ctx.prefix))
@_duels.command(pass_context=True, no_pm=True)
@checks.admin_or_permissions(manage_server=True)
async def add (self, ctx, *, Duel : str):
"""Adds a duel to the list"""
if self.nuels not in self.duelist:
self.duelist[self.nuels] = ["Super Falcon Punched",
"shot",
"kidnapped",
"called 'The Spanker' on",
"ran over",
"Super Falcon Kicked",
"One Punched",
"used One Punch Man on",
"Kamehameha'd",
"Final Flashed",
"Instant Transmission Kamehameha'd",
"Omega Blastered",
"Rick Roll'd",
"Kaioken X4 Kamehameha'd",
"Spirit Bombed",
"hacked",
"Perfect Kamehameha'd",
"used Destructo Disc on",
"used Destructo Disc X2 on",
"used Destructo Disc Chain on",
"Big Bang Kamehameha'd",
"Big Bang Attacked",
"Galick Gunned",
"used Chuck Norris on",
"used Dragon Fist on",
"Final Kamehameha'd",
"Air striked",
"concrete donkey'd",
"super banana bombed",
"Holy Hand Grenaded"]
self.duelist[self.setter] = 100
await self.bot.say("Setter hasn't been added yet. Setter has been auto set to: **{}**".format(self.duelist[self.setter]))
dataIO.save_json("data/duels/duelist.json", self.duelist)
if Duel in self.duelist[self.nuels]:
await self.bot.say("Uh oh. It seems `{}` has already been added to the list.".format(Duel))
else:
if self.counter not in self.duelist:
self.duelist[self.counter] = 0
if self.setter not in self.duelist:
self.duelist[self.setter] = 100
dataIO.save_json("data/duels/duelist.json", self.duelist)
await self.bot.say("Setter hasn't been added yet. Setter has been auto set to: **{}**".format(self.duelist[self.setter]))
if self.duelist[self.counter] < self.duelist[self.setter]:
self.duelist[self.nuels].append(Duel)
self.duelist[self.counter] += 1
dataIO.save_json("data/duels/duelist.json", self.duelist)
await self.bot.say("`{}` has been added to the duel list!".format(Duel))
else:
await self.bot.say("The maximum amount of duel actions has been added (**{}**). Please contact someone with the `Manage Server` permission to change this.".format(self.duelist[self.setter]))
@_duels.command(name="set", pass_context=True, no_pm=True)
@checks.admin_or_permissions(manage_server=True)
async def _set(self, ctx, setter : int=None):
"""Sets the maximum amount of duels that can be added"""
if not setter:
if self.setter not in self.duelist:
self.duelist[self.setter] = 100
await self.bot.say("Setter is currently set to: **{}**".format(self.duelist[self.setter]))
else:
if self.setter not in self.duelist:
self.duelist[self.setter] = 100
await self.bot.say("Setter hasn't been added yet. Setter has been auto set to: **{}**".format(self.duelist[self.setter]))
self.duelist[self.setter] = setter
dataIO.save_json("data/duels/duelist.json", self.duelist)
await self.bot.say("The Duel List Setter has been set to allow a maximum of **{}** items.".format(setter))
#Save function here that isn't added yet
else:
self.duelist[self.setter] = setter
dataIO.save_json("data/duels/duelist.json", self.duelist)
await self.bot.say("The Duel List Setter has been set to allow a maximum of **{}** items.".format(setter))
#Save function here that isn't added yet
if not setter:
await self.bot.say("Setter is currently set to: **{}**".format(self.duelist[self.setter]))
@_duels.command(pass_context=True, no_pm=True)
async def join(self, ctx, user: discord.Member=None):
"""Join tournament"""
user = ctx.message.author
if user.id not in self.wlt:
self.wlt[user.id] = {"name": user.name, "Wins": 0, "Losses": 0, "Ties": 0}
dataIO.save_json("data/duels/account.json", self.wlt)
await self.bot.say("{} has joined the tournament!".format(user.mention))
else:
await self.bot.say("{} has already joined the tournament".format(user.mention))
@_duels.command(name="stats", pass_context=True)
async def _stats(self, ctx, user : discord.Member=None):
"""Show rank and XP of users.
Defaults to yours."""
if not user:
user = ctx.message.author
if self.check_joined(user.id):
await self.bot.say("{}'s stats: **Wins: {} | Losses: {} | Ties: {} **".format(user.name, self.get_wins(user.id),
self.get_losses(user.id),
self.get_ties(user.id)))
else:
await self.bot.say("{}, you are not yet in the tournament!".format(user.mention))
else:
if self.check_joined(user.id):
await self.bot.say("{}'s stats: **Wins: {} | Losses: {} | Ties: {} **".format(user.name, self.get_wins(user.id),
self.get_losses(user.id),
self.get_ties(user.id)))
else:
await self.bot.say("This user has not joined the tournament")
@_duels.command(pass_context=True, no_pm=True)
async def show (self, ctx):
"""Shows list of available duels"""
if self.nuels not in self.duelist:
self.duelist[self.setter] = 100
self.duelist[self.counter] = 30
self.duelist[self.nuels] = ["Super Falcon Punched",
"shot",
"kidnapped",
"called 'The Spanker' on",
"ran over",
"Super Falcon Kicked",
"One Punched",
"used One Punch Man on",
"Kamehameha'd",
"Final Flashed",
"Instant Transmission Kamehameha'd",
"Omega Blastered",
"Rick Roll'd",
"Kaioken X4 Kamehameha'd",
"Spirit Bombed",
"hacked",
"Perfect Kamehameha'd",
"used Destructo Disc on",
"used Destructo Disc X2 on",
"used Destructo Disc Chain on",
"Big Bang Kamehameha'd",
"Big Bang Attacked",
"Galick Gunned",
"used Chuck Norris on",
"used Dragon Fist on",
"Final Kamehameha'd",
"Air striked",
"concrete donkey'd",
"super banana bombed",
"Holy Hand Grenaded"]
dataIO.save_json("data/duels/duelist.json", self.duelist)
await self.bot.say(" \n\n\n\n\nThe 30 duels are preset duels that are added automatically on first run. (Code looks like crap right now though :wink:)".format(ctx.prefix))
strbuffer = self.duel_show().split("\n")
mess = ""
if self.duelist[self.counter] == self.duelist[self.setter]:
await self.bot.say("**{}** out of **{}** spaces used! **MAXED OUT!!**".format(len(self.duelist[self.nuels]), self.duelist[self.setter]))
else:
await self.bot.say("**{}** out of **{}** spaces used!".format(len(self.duelist[self.nuels]), self.duelist[self.setter]))
for line in strbuffer:
if len(mess) + len(line) + 1 < 300:
mess += "\n" + line
else:
await self.bot.say(mess)
mess = ""
if mess != "":
await self.bot.say(mess)
else:
strbuffer = self.duel_show().split("\n")
mess = ""
if self.duelist[self.counter] == self.duelist[self.setter]:
await self.bot.say("**{}** out of **{}** spaces used! **MAXED OUT!!**".format(len(self.duelist[self.nuels]), self.duelist[self.setter]))
else:
await self.bot.say("**{}** out of **{}** spaces used!".format(len(self.duelist[self.nuels]), self.duelist[self.setter]))
for line in strbuffer:
if len(mess) + len(line) + 1 < 300:
mess += "\n" + line
else:
await self.bot.say(mess)
mess = ""
if mess != "":
await self.bot.say(mess)
@_duels.command(pass_context=True, no_pm=True)
async def remove (self, ctx, Duel : str):
"""removes a duel from the list"""
try:
x = self.duelist[self.nuels].remove(Duel)
if x is not ValueError:
dataIO.save_json("data/duels/duelist.json", self.duelist)
await self.bot.say("{} has been successfully removed from the duel list!".format(Duel))
except ValueError:
await self.bot.say("I can't remove what hasn't been added to the list to begin with.")
@_duels.command(pass_context=True, no_pm=True)
async def reset (self, ctx):
"""For when you have waaay too many duels"""
if len(self.duelist[self.nuels]) > 0:
self.duelist[self.counter] = 0
self.duelist[self.nuels] = []
dataIO.save_json("data/duels/duelist.json", self.duelist)
dataIO.save_json("data/duels/duelist.json", self.duelist)
await self.bot.say("Duel list has been reset")
else:
await self.bot.say("I can't delete a list that's already empty!")
@_duels.command(pass_context=True)
async def timerreset(self, ctx):
"""Reset the duel timer, only use if the system hangs or breaks!"""
author = ctx.message.author
server = author.server
if server.id in self.timer_board:
if self.timer_board[server.id]["time"] == 0:
await self.bot.say("There isn't a timer right now (no duel running).")
else:
self.timer_board[server.id]["time"] = 0
await self.bot.say("Timer has been reset!")
else:
await self.bot.say("Please do {}tjoin to be added to the timer board!".format(ctx.prefix))
#This cog was made by Axaios and Ridinginstyle00. And any code taken from others we also credit them here, whether we know their name or not.
def duel_show (self):
ret = "```--------```"
for num, duels in enumerate(self.duelist[self.nuels]):
ret += str(num + 1) + ") `" + duels + "`\n"
ret += "```--------```"
return ret
def action_choose (self):
action = choice(sample(self.duelist[self.nuels],1))
return action
def multiple_action_choose (self):
action1 = self.action_choose()
action2 = self.action_choose()
action3 = self.action_choose()
action4 = self.action_choose()
return action1, action2, action3, action4
def action_damage (self):
action_chosen1, action_chosen2, action_chosen3, action_chosen4 = self.multiple_action_choose()
action_damage1 = self.duelist[self.nuels].index(action_chosen1)
action_damage2 = self.duelist[self.nuels].index(action_chosen2)
action_damage3 = self.duelist[self.nuels].index(action_chosen3)
action_damage4 = self.duelist[self.nuels].index(action_chosen4)
return action_damage1, action_damage2, action_damage3, action_damage4
def check_joined(self, id):
if id in self.wlt:
return True
else:
return False
def get_wins(self, id):
if self.check_joined(id):
return self.wlt[id]["Wins"]
def get_losses(self, id):
if self.check_joined(id):
return self.wlt[id]["Losses"]
def get_ties(self, id):
if self.check_joined(id):
return self.wlt[id]["Ties"]
def display_time(self, seconds, granularity=2): # What would I ever do without stackoverflow?
intervals = ( # Source: http://stackoverflow.com/a/24542445
('weeks', 604800), # 60 * 60 * 24 * 7
('days', 86400), # 60 * 60 * 24
('hours', 3600), # 60 * 60
('minutes', 60),
('seconds', 1),
)
result = []
for name, count in intervals:
value = seconds // count
if value:
seconds -= value * count
if value == 1:
name = name.rstrip('s')
result.append("{} {}".format(value, name))
return ', '.join(result[:granularity])
def check_folders():
if not os.path.exists("data/duels"):
print("Creating data/duels folder...")
os.mkdir("data/duels")
def check_files():
fp = "data/duels/duelist.json"
if not dataIO.is_valid_json(fp):
print("Creating duelist.json...")
dataIO.save_json(fp, {})
acc = "data/duels/account.json"
if not dataIO.is_valid_json(acc):
print("creating account.json...")
dataIO.save_json(acc, {})
fp = "data/duels/timer.json"
if not dataIO.is_valid_json(fp):
print("Creating timer.json...")
dataIO.save_json(fp, {})
def setup(bot):
global logger
check_folders()
check_files()
n = Duels(bot)
logger = logging.getLogger("red.duels")
if logger.level == 0: # Prevents the logger from being loaded again in case of module reload
logger.setLevel(logging.INFO)
handler = logging.FileHandler(filename='data/duels/duels.log', encoding='utf-8', mode='a')
handler.setFormatter(logging.Formatter('%(asctime)s %(message)s', datefmt="[%d/%m/%Y %H:%M]"))
logger.addHandler(handler)
bot.add_cog(n) | 44.215953 | 195 | 0.6267 | 21,103 | 0.928543 | 0 | 0 | 18,512 | 0.814538 | 17,797 | 0.783077 | 6,968 | 0.306596 |
1c741e6bc69fc8671df5a15c26f40ce7a3bf09f3 | 2,839 | py | Python | paranuara/citizens/models/citizens.py | SPLAYER-HD/Paranuara | 5a42f23d761e16e3b486ba04d9185551614f06a5 | [
"MIT"
]
| null | null | null | paranuara/citizens/models/citizens.py | SPLAYER-HD/Paranuara | 5a42f23d761e16e3b486ba04d9185551614f06a5 | [
"MIT"
]
| 4 | 2021-06-08T20:53:43.000Z | 2022-03-12T00:13:51.000Z | paranuara/citizens/models/citizens.py | SPLAYER-HD/RestServiceDjango | 5a42f23d761e16e3b486ba04d9185551614f06a5 | [
"MIT"
]
| null | null | null | """Citizens model."""
# Django
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.core.validators import RegexValidator
# models
from paranuara.companies.models import Company
# PostgreSQL fields
from django.contrib.postgres.fields import JSONField
# Utilities
from paranuara.utils.models import ParanuaraModel
class Citizen(ParanuaraModel, AbstractUser):
"""Citizen model.
Extend from Django's Abstract User, change the username field
to email and add some extra fields.
"""
index = models.IntegerField(
unique=True,
default=-1
)
favorite_food = models.ManyToManyField(
'foods.Food',
related_name='favorite_food'
)
has_died = models.BooleanField(
'died',
default=False,
help_text=(
'Help easily distinguish citizens died or alive. '
)
)
balance = models.DecimalField(
max_digits=15,
decimal_places=2,
default=None
)
picture = models.ImageField(
'profile picture',
upload_to='paranuara/citizens/pictures/',
blank=True,
null=True
)
age = models.IntegerField(
default=-1
)
eyeColor = models.CharField(
max_length=50,
blank=False
)
gender = models.CharField(
max_length=6,
blank=True
)
email = models.EmailField(
'email address',
unique=True,
error_messages={
'unique': 'A user with that email already exists.'
}
)
phone_regex = RegexValidator(
regex=r'\+?1?\d{9,15}$',
message="Phone number must be entered in the format: +999999999. Up to 15 digits allowed."
)
phone = models.CharField(
validators=[phone_regex],
max_length=20,
blank=True
)
address = models.CharField(
max_length=100,
blank=True
)
company = models.ForeignKey(
Company,
related_name='employees_company',
on_delete=models.SET_NULL,
null=True
)
about = models.CharField(
max_length=1000,
blank=True,
null=True
)
greeting = models.CharField(
max_length=1000,
blank=True,
null=True
)
tags = JSONField(
default=None,
blank=True,
null=True
)
REQUIRED_FIELDS = ['has_died', 'eyeColor', 'index']
def get_relations(self):
return models.Relationship.objects.get(from_person=self)
class Relationship(models.Model):
"""Class to represent many to many relation between Ctizens"""
from_people = models.ForeignKey(Citizen, related_name='from_people', on_delete=models.CASCADE)
to_people = models.ForeignKey(Citizen, related_name='to_people', on_delete=models.CASCADE)
| 22.007752 | 98 | 0.62205 | 2,476 | 0.872138 | 0 | 0 | 0 | 0 | 0 | 0 | 622 | 0.219091 |
1c75ba48ae7018192a5f6740f29aabe6961aa8fd | 103 | py | Python | tests/utils.py | niwibe/cobrascript | 4c6a193d8745771e5fb0e277394f83e47cc7ede8 | [
"BSD-3-Clause"
]
| 1 | 2015-05-03T00:25:17.000Z | 2015-05-03T00:25:17.000Z | tests/utils.py | niwibe/cobrascript | 4c6a193d8745771e5fb0e277394f83e47cc7ede8 | [
"BSD-3-Clause"
]
| null | null | null | tests/utils.py | niwibe/cobrascript | 4c6a193d8745771e5fb0e277394f83e47cc7ede8 | [
"BSD-3-Clause"
]
| null | null | null | # -*- coding: utf-8 -*-
from cobra.utils import normalize
def norm(data):
return normalize(data)
| 14.714286 | 33 | 0.669903 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.223301 |
1c76dc2dd43f773b5f6265abe296c42c73bdff7c | 597 | py | Python | rgislackbot/dispatcher/dispatchconfig.py | raginggeek/RGISlackBot | 9fddf78b37f494eb6605da890a28f41427f37f03 | [
"MIT"
]
| null | null | null | rgislackbot/dispatcher/dispatchconfig.py | raginggeek/RGISlackBot | 9fddf78b37f494eb6605da890a28f41427f37f03 | [
"MIT"
]
| 11 | 2019-03-08T01:38:40.000Z | 2019-03-15T16:21:59.000Z | rgislackbot/dispatcher/dispatchconfig.py | raginggeek/RGISlackBot | 9fddf78b37f494eb6605da890a28f41427f37f03 | [
"MIT"
]
| null | null | null | class DispatchConfig:
def __init__(self, raw_config):
self.registered_commands = {}
for package in raw_config["handlers"]:
for command in package["commands"]:
self.registered_commands[command] = {
"class": package["class"],
"fullpath": ".".join([package["package"], package["module"], package["class"]])
}
def get_handler_by_command(self, command):
if command in self.registered_commands:
return self.registered_commands[command]
else:
return None
| 37.3125 | 99 | 0.572864 | 596 | 0.998325 | 0 | 0 | 0 | 0 | 0 | 0 | 71 | 0.118928 |
1c770de3012ff3f97ad6bf07fd17d96b765a28e4 | 2,442 | py | Python | chess/rules.py | DevStrikerTech/Chess-Engine | f0d2e0fc48b820325b1826e4379bf0520c8d3b52 | [
"MIT"
]
| 18 | 2021-01-26T19:21:45.000Z | 2021-01-27T00:32:49.000Z | chess/rules.py | KingCobra2018/Chess-Engine | f0d2e0fc48b820325b1826e4379bf0520c8d3b52 | [
"MIT"
]
| null | null | null | chess/rules.py | KingCobra2018/Chess-Engine | f0d2e0fc48b820325b1826e4379bf0520c8d3b52 | [
"MIT"
]
| 9 | 2021-01-26T19:51:20.000Z | 2021-01-26T22:39:28.000Z | import pygame
from chess.board import Board
from .variable_declaration import black_piece, white_piece, position_piece, board_square_size
class Rules:
def __init__(self, window):
self._init()
self.window = window
def update(self):
self.chess_board.draw_pieces(self.window)
self.draw_valid_moves(self.logical_moves)
pygame.display.update()
def _init(self):
self.current_piece = None
self.chess_board = Board()
self.turn_taken = black_piece
self.logical_moves = {}
def winner(self):
return self.chess_board.winner()
def reset(self):
self._init()
def select(self, board_row, board_column):
if self.current_piece:
result = self._move(board_row, board_column)
if not result:
self.current_piece = None
self.select(board_row, board_column)
piece = self.chess_board.get_pieces(board_row, board_column)
if piece != 0 and piece.piece_color == self.turn_taken:
self.current_piece = piece
self.logical_moves = self.chess_board.get_logical_moves(piece)
return True
return False
def _move(self, board_row, board_column):
piece = self.chess_board.get_pieces(board_row, board_column)
if self.current_piece and piece == 0 and (board_row, board_column) in self.logical_moves:
self.chess_board.move_pieces(self.current_piece, board_row, board_column)
skipped = self.logical_moves[(board_row, board_column)]
if skipped:
self.chess_board.remove(skipped)
self.change_turn()
else:
return False
return True
def draw_valid_moves(self, moves):
for move in moves:
row, col = move
pygame.draw.circle(self.window, position_piece,
(col * board_square_size + board_square_size // 2,
row * board_square_size + board_square_size // 2), 15)
def change_turn(self):
self.logical_moves = {}
if self.turn_taken == black_piece:
self.turn_taken = white_piece
else:
self.turn_taken = black_piece
def get_board(self):
return self.chess_board
def algorithm_move(self, chess_board):
self.chess_board = chess_board
self.change_turn()
| 29.421687 | 97 | 0.620393 | 2,301 | 0.94226 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
1c77f1e65b1460f3b0a09bd95f3c03183aa1bcf6 | 1,542 | py | Python | kivygames/games/noughtsandcrosses/__init__.py | jonathanjameswatson/kivygames | 7636580956562af0814c973f94afede926cfa4b9 | [
"MIT"
]
| null | null | null | kivygames/games/noughtsandcrosses/__init__.py | jonathanjameswatson/kivygames | 7636580956562af0814c973f94afede926cfa4b9 | [
"MIT"
]
| null | null | null | kivygames/games/noughtsandcrosses/__init__.py | jonathanjameswatson/kivygames | 7636580956562af0814c973f94afede926cfa4b9 | [
"MIT"
]
| null | null | null | import numpy as np
from kivygames.games import Game
import kivygames.games.noughtsandcrosses.c as c
class CellOccupiedError(Exception):
pass
class NoughtsAndCrosses(Game):
minPlayers = 2
maxPlayers = 2
hasAI = True
gridShape = (3, 3)
def __init__(self):
Game.__init__(self)
self.grid = np.zeros(self.gridShape, dtype="u1")
self.player = 1
def isEmpty(self, position):
return self.grid[position] == 0
async def turn(self):
await self.sendOutput("Player", self.player)
while True:
position = await self.getInput("Position", tuple, self.player)
if self.isEmpty(position):
break
await self.sendOutput("Error", "That space is already full.")
await self.sendOutput("Error", "")
self.grid[position] = self.player
await self.sendOutput("Grid", self.grid)
if c.hasPlayerWon(self.grid, self.player):
await self.sendOutput("End", f"Player {self.player} wins.")
return True
if np.count_nonzero(self.grid) == 9:
await self.sendOutput("End", f"It's a draw!")
return True
self.player = 3 - self.player
return False
def getAIInput(self, name):
if name == "Position":
return c.minimax(self.player, self.player, True, self.grid)[1]
async def game(self):
while True:
ended = await self.turn()
if ended:
break
await self.end()
| 26.135593 | 74 | 0.586252 | 1,434 | 0.929961 | 0 | 0 | 0 | 0 | 921 | 0.597276 | 137 | 0.088846 |
1c7894b14ef779955e6bd0f109d8986f10e8fa84 | 1,206 | py | Python | 03-Decouvrez-POO/download_agents.py | gruiick/openclassrooms-py | add4b28eab8b311dea7c1d3915a22061f54326a9 | [
"BSD-2-Clause"
]
| null | null | null | 03-Decouvrez-POO/download_agents.py | gruiick/openclassrooms-py | add4b28eab8b311dea7c1d3915a22061f54326a9 | [
"BSD-2-Clause"
]
| null | null | null | 03-Decouvrez-POO/download_agents.py | gruiick/openclassrooms-py | add4b28eab8b311dea7c1d3915a22061f54326a9 | [
"BSD-2-Clause"
]
| null | null | null | #! /usr/bin/env python
import argparse
import json
import time
import urllib.error
import urllib.request
def main():
parser = argparse.ArgumentParser(description="Download agents from pplapi.com")
parser.add_argument("-c", "--count", type=int, default=10, help="Number of agents to download.")
parser.add_argument("-d", "--dest", help="Destination file. If absent, will print to stdout")
args = parser.parse_args()
agents = []
while len(agents) < args.count:
if agents:
# Wait one second between every request
time.sleep(1)
request_count = min(args.count - len(agents), 500)
try:
response = urllib.request.urlopen("http://pplapi.com/batch/{}/sample.json".format(request_count))
agents += json.loads(response.read().decode("utf8"))
except urllib.error.HTTPError:
print("Too may requests, sleeping 10s ({} agents)".format(len(agents)))
time.sleep(10)
result = json.dumps(agents, indent=2, sort_keys=True)
if args.dest:
with open(args.dest, 'w') as out_f:
out_f.write(result)
else:
print(result)
if __name__ == "__main__":
main()
| 30.923077 | 109 | 0.630182 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 304 | 0.252073 |
1c78aef6937bac0c47b2a7aeef06915d8ec4cebe | 3,681 | py | Python | Commands/images.py | Mariobob/Proton | 7c5eab0251266ca1da83591d396b357bab692399 | [
"MIT"
]
| null | null | null | Commands/images.py | Mariobob/Proton | 7c5eab0251266ca1da83591d396b357bab692399 | [
"MIT"
]
| null | null | null | Commands/images.py | Mariobob/Proton | 7c5eab0251266ca1da83591d396b357bab692399 | [
"MIT"
]
| null | null | null | import functools
import re
import asyncio
from io import BytesIO
from discord.ext import commands
import discord
from Utils import canvas
import random
class Images:
"""
Contains commands for manipulation of images.
"""
def __init__(self, bot):
self.bot = bot
self.imageClient = canvas.Client(bot)
@commands.command(name="illegal")
async def illegal(self, ctx, *, args=None):
"""Ask US President Donald Trump to make something illegal."""
if args is None:
await ctx.send("Please provide something to make it illegal.")
return
if len(args) > 10 or len(args) < 1:
await ctx.send("You can make only 1 to 10 lettered things illegal.")
return
elif not bool(re.match('^[a-zA-Z0-9]+$', args)):
await ctx.send("Oops! Only alphanumeric characters are allowed.")
return
payload = {"task": "gif", "word": args.upper()}
async with ctx.message.channel.typing():
message = await ctx.send(f"Convincing US President Donald Trump to make `{args}` illegal.")
async with self.bot.session.post("https://is-now-illegal.firebaseio.com/queue/tasks.json", json=payload) as resp:
pass
await asyncio.sleep(5)
url = f"https://storage.googleapis.com/is-now-illegal.appspot.com/gifs/{args.upper()}.gif"
async with self.bot.session.get(url) as resp:
image = await resp.read()
await ctx.send(file=discord.File(BytesIO(image), "illegal.gif"))
await message.delete()
@commands.command(name="beautiful")
async def beautiful(self, ctx, user: discord.Member = None):
"""This... this is beautiful!"""
member = user or ctx.author
async with ctx.typing():
avatar = await self.imageClient.getAvatar(user=member, size=128)
func = functools.partial(self.imageClient.beautify, avatar)
image = await self.bot.loop.run_in_executor(None, func)
await ctx.send(file=discord.File(fp=image, filename="beautiful.png"))
@commands.command(name="delet")
async def delet(self, ctx, user: discord.Member = None):
"""Delet this garbage!"""
member = user or ctx.author
async with ctx.typing():
avatar = await self.imageClient.getAvatar(user=member, size=128)
func = functools.partial(self.imageClient.deletify, avatar, f"{member.name}#{member.discriminator}")
image = await self.bot.loop.run_in_executor(None, func)
await ctx.send(file=discord.File(fp=image, filename="delet.png"))
@commands.command(name="robot")
async def robot(self, ctx, *, args=None):
"""See a unique robot image from any text."""
if args is None:
args = ctx.author.name
randomInt = random.randrange(1, 3)
async with ctx.typing():
image = await self.imageClient.getRobotImage(args, randomInt)
file = discord.File(fp=image, filename=f"{args}.png")
await ctx.send(file=file)
@commands.command(name="thuglife")
async def thuglife(self, ctx, user: discord.Member = None):
"""Thug Life....."""
member = user or ctx.author
async with ctx.typing():
avatar = await self.imageClient.getAvatar(user=member, size=512)
func = functools.partial(self.imageClient.thugLife, avatar)
image = await self.bot.loop.run_in_executor(None, func)
await ctx.send(file=discord.File(fp=image, filename="thuglife.png"))
def setup(bot):
bot.add_cog(Images(bot)) | 41.359551 | 125 | 0.620212 | 3,480 | 0.945395 | 0 | 0 | 3,264 | 0.886716 | 3,075 | 0.835371 | 779 | 0.211627 |
1c79747bf1ea18f4c3b8be4f42301cb16c8ee8f3 | 223 | py | Python | labJS/conf.py | lpomfrey/django-labjs | f35346ec7f3b87ae24b2d7a01c06001ceb4173bc | [
"MIT"
]
| null | null | null | labJS/conf.py | lpomfrey/django-labjs | f35346ec7f3b87ae24b2d7a01c06001ceb4173bc | [
"MIT"
]
| null | null | null | labJS/conf.py | lpomfrey/django-labjs | f35346ec7f3b87ae24b2d7a01c06001ceb4173bc | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from appconf import AppConf
from django.conf import settings # noqa
class LabjsConf(AppConf):
ENABLED = not settings.DEBUG
DEBUG_TOGGLE = 'labjs'
| 18.583333 | 40 | 0.730942 | 86 | 0.38565 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.161435 |
1c7b1135efb3bd7f94a1f1a7d47294ebfd74cbde | 10,416 | py | Python | tests/test_nanoevents_vector.py | danbarto/coffea | 2b28e28f602f8b81a1449ee85578187a7f52b602 | [
"BSD-3-Clause"
]
| null | null | null | tests/test_nanoevents_vector.py | danbarto/coffea | 2b28e28f602f8b81a1449ee85578187a7f52b602 | [
"BSD-3-Clause"
]
| null | null | null | tests/test_nanoevents_vector.py | danbarto/coffea | 2b28e28f602f8b81a1449ee85578187a7f52b602 | [
"BSD-3-Clause"
]
| null | null | null | import awkward as ak
from coffea.nanoevents.methods import vector
import pytest
ATOL = 1e-8
def record_arrays_equal(a, b):
return (ak.fields(a) == ak.fields(b)) and all(ak.all(a[f] == b[f]) for f in ak.fields(a))
def test_two_vector():
a = ak.zip(
{
"x": [[1, 2], [], [3], [4]],
"y": [[5, 6], [], [7], [8]]
},
with_name="TwoVector",
highlevel=False
)
a = ak.Array(a, behavior=vector.behavior)
b = ak.zip(
{
"x": [[11, 12], [], [13], [14]],
"y": [[15, 16], [], [17], [18]]
},
with_name="TwoVector",
highlevel=False
)
b = ak.Array(b, behavior=vector.behavior)
assert record_arrays_equal(- a, ak.zip(
{
"x": [[-1, -2], [], [-3], [-4]],
"y": [[-5, -6], [], [-7], [-8]]
}
))
assert record_arrays_equal(a + b, ak.zip(
{
"x": [[12, 14], [], [16], [18]],
"y": [[20, 22], [], [24], [26]]
}
))
assert record_arrays_equal(a - b, ak.zip(
{
"x": [[-10, -10], [], [-10], [-10]],
"y": [[-10, -10], [], [-10], [-10]]
}
))
assert record_arrays_equal(a * 2, ak.zip(
{
"x": [[2, 4], [], [6], [8]],
"y": [[10, 12], [], [14], [16]]
}
))
assert record_arrays_equal(a / 2, ak.zip(
{
"x": [[0.5, 1], [], [1.5], [2]],
"y": [[2.5, 3], [], [3.5], [4]]
}
))
assert record_arrays_equal(a.dot(b), ak.Array([[86, 120], [], [158], [200]]))
assert record_arrays_equal(b.dot(a), ak.Array([[86, 120], [], [158], [200]]))
assert ak.all(abs(a.unit.r - 1) < ATOL)
assert ak.all(abs(a.unit.phi - a.phi) < ATOL)
def test_polar_two_vector():
a = ak.zip(
{
"r": [[1, 2], [], [3], [4]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]],
},
with_name="PolarTwoVector",
highlevel=False
)
a = ak.Array(a, behavior=vector.behavior)
assert record_arrays_equal(a * 2, ak.zip(
{
"r": [[2, 4], [], [6], [8]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]]
}
))
assert ak.all((a * (-2)).r == [[2, 4], [], [6], [8]])
assert ak.all((a * (-2)).phi - ak.Array([
[-2.8415926535, -2.7415926535],
[],
[-2.6415926535],
[-2.5415926535]
]) < ATOL)
assert record_arrays_equal(a / 2, ak.zip(
{
"r": [[0.5, 1], [], [1.5], [2]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]]
}
))
assert ak.all(abs((-a).x + a.x) < ATOL)
assert ak.all(abs((-a).y + a.y) < ATOL)
assert record_arrays_equal(a * (-1), -a)
assert ak.all(a.unit.phi == a.phi)
def test_three_vector():
a = ak.zip(
{
"x": [[1, 2], [], [3], [4]],
"y": [[5, 6], [], [7], [8]],
"z": [[9, 10], [], [11], [12]]
},
with_name="ThreeVector",
highlevel=False
)
a = ak.Array(a, behavior=vector.behavior)
b = ak.zip(
{
"x": [[4, 1], [], [10], [11]],
"y": [[17, 7], [], [11], [6]],
"z": [[9, 11], [], [5], [16]]
},
with_name="ThreeVector",
highlevel=False
)
b = ak.Array(b, behavior=vector.behavior)
assert record_arrays_equal(- a, ak.zip(
{
"x": [[-1, -2], [], [-3], [-4]],
"y": [[-5, -6], [], [-7], [-8]],
"z": [[-9, -10], [], [-11], [-12]]
}
))
assert record_arrays_equal(a + b, ak.zip(
{
"x": [[5, 3], [], [13], [15]],
"y": [[22, 13], [], [18], [14]],
"z": [[18, 21], [], [16], [28]]
}
))
assert record_arrays_equal(a - b, ak.zip(
{
"x": [[-3, 1], [], [-7], [-7]],
"y": [[-12, -1], [], [-4], [2]],
"z": [[0, -1], [], [6], [-4]]
}
))
assert record_arrays_equal(a * 2, ak.zip(
{
"x": [[2, 4], [], [6], [8]],
"y": [[10, 12], [], [14], [16]],
"z": [[18, 20], [], [22], [24]]
}
))
assert record_arrays_equal(a / 2, ak.zip(
{
"x": [[0.5, 1], [], [1.5], [2]],
"y": [[2.5, 3], [], [3.5], [4]],
"z": [[4.5, 5], [], [5.5], [6]]
}
))
assert ak.all(a.dot(b) == ak.Array([[170, 154], [], [162], [284]]))
assert ak.all(b.dot(a) == ak.Array([[170, 154], [], [162], [284]]))
assert record_arrays_equal(a.cross(b), ak.zip(
{
"x": [[-108, -4], [], [-86], [56]],
"y": [[27, -12], [], [95], [68]],
"z": [[-3, 8], [], [-37], [-64]]
}
))
assert record_arrays_equal(b.cross(a), ak.zip(
{
"x": [[108, 4], [], [86], [-56]],
"y": [[-27, 12], [], [-95], [-68]],
"z": [[3, -8], [], [37], [64]]
}
))
assert ak.all(abs(a.unit.rho - 1) < ATOL)
assert ak.all(abs(a.unit.phi - a.phi) < ATOL)
def test_spherical_three_vector():
a = ak.zip(
{
"rho": [[1.0, 2.0], [], [3.0], [4.0]],
"theta": [[1.2, 0.7], [], [1.8], [1.9]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]],
},
with_name="SphericalThreeVector",
highlevel=False
)
a = ak.Array(a, behavior=vector.behavior)
assert ak.all(abs((-a).x + a.x) < ATOL)
assert ak.all(abs((-a).y + a.y) < ATOL)
assert ak.all(abs((-a).z + a.z) < ATOL)
assert record_arrays_equal(a * (-1), -a)
def test_lorentz_vector():
a = ak.zip(
{
"x": [[1, 2], [], [3], [4]],
"y": [[5, 6], [], [7], [8]],
"z": [[9, 10], [], [11], [12]],
"t": [[50, 51], [], [52], [53]]
},
with_name="LorentzVector",
highlevel=False
)
a = ak.Array(a, behavior=vector.behavior)
b = ak.zip(
{
"x": [[4, 1], [], [10], [11]],
"y": [[17, 7], [], [11], [6]],
"z": [[9, 11], [], [5], [16]],
"t": [[60, 61], [], [62], [63]]
},
with_name="LorentzVector",
highlevel=False
)
b = ak.Array(b, behavior=vector.behavior)
assert record_arrays_equal(- a, ak.zip(
{
"x": [[-1, -2], [], [-3], [-4]],
"y": [[-5, -6], [], [-7], [-8]],
"z": [[-9, -10], [], [-11], [-12]],
"t": [[-50, -51], [], [-52], [-53]]
}
))
assert record_arrays_equal(a + b, ak.zip(
{
"x": [[5, 3], [], [13], [15]],
"y": [[22, 13], [], [18], [14]],
"z": [[18, 21], [], [16], [28]],
"t": [[110, 112], [], [114], [116]]
}
))
assert record_arrays_equal(a - b, ak.zip(
{
"x": [[-3, 1], [], [-7], [-7]],
"y": [[-12, -1], [], [-4], [2]],
"z": [[0, -1], [], [6], [-4]],
"t": [[-10, -10], [], [-10], [-10]]
}
))
assert record_arrays_equal(a * 2, ak.zip(
{
"x": [[2, 4], [], [6], [8]],
"y": [[10, 12], [], [14], [16]],
"z": [[18, 20], [], [22], [24]],
"t": [[100, 102], [], [104], [106]]
}
))
assert record_arrays_equal(a / 2, ak.zip(
{
"x": [[0.5, 1], [], [1.5], [2]],
"y": [[2.5, 3], [], [3.5], [4]],
"z": [[4.5, 5], [], [5.5], [6]],
"t": [[25, 25.5], [], [26], [26.5]]
}
))
assert record_arrays_equal(a.pvec, ak.zip(
{
"x": [[1, 2], [], [3], [4]],
"y": [[5, 6], [], [7], [8]],
"z": [[9, 10], [], [11], [12]],
}
))
boosted = a.boost(-a.boostvec)
assert ak.all(abs(boosted.x) < ATOL)
assert ak.all(abs(boosted.y) < ATOL)
assert ak.all(abs(boosted.z) < ATOL)
def test_pt_eta_phi_m_lorentz_vector():
a = ak.zip(
{
"pt": [[1, 2], [], [3], [4]],
"eta": [[1.2, 1.4], [], [1.6], [3.4]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]],
"mass": [[0.5, 0.9], [], [1.3], [4.5]]
},
with_name="PtEtaPhiMLorentzVector",
highlevel=False
)
a = ak.Array(a, behavior=vector.behavior)
assert ak.all((a * (-2)).pt == ak.Array([[2, 4], [], [6], [8]]))
assert ak.all((a * (-2)).theta - ak.Array([
[2.556488570968, 2.65804615357],
[],
[2.74315571762],
[3.07487087733]
]) < ATOL)
assert ak.all((a * (-2)).phi - ak.Array([
[-2.8415926535, -2.7415926535],
[],
[-2.6415926535],
[-2.5415926535]
]) < ATOL)
assert record_arrays_equal(a / 2, ak.zip(
{
"pt": [[0.5, 1], [], [1.5], [2]],
"eta": [[1.2, 1.4], [], [1.6], [3.4]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]],
"mass": [[0.25, 0.45], [], [0.65], [2.25]]
}
))
assert record_arrays_equal(a * (-1), -a)
boosted = a.boost(-a.boostvec)
assert ak.all(abs(boosted.x) < ATOL)
assert ak.all(abs(boosted.y) < ATOL)
assert ak.all(abs(boosted.z) < ATOL)
def test_pt_eta_phi_e_lorentz_vector():
a = ak.zip(
{
"pt": [[1, 2], [], [3], [4]],
"eta": [[1.2, 1.4], [], [1.6], [3.4]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]],
"energy": [[50, 51], [], [52], [60]]
},
with_name="PtEtaPhiELorentzVector",
highlevel=False
)
a = ak.Array(a, behavior=vector.behavior)
assert ak.all((a * (-2)).pt == ak.Array([[2, 4], [], [6], [8]]))
assert ak.all((a * (-2)).theta - ak.Array([
[2.556488570968, 2.65804615357],
[],
[2.74315571762],
[3.07487087733]
]) < ATOL)
assert ak.all((a * (-2)).phi - ak.Array([
[-2.8415926535, -2.7415926535],
[],
[-2.6415926535],
[-2.5415926535]
]) < ATOL)
assert record_arrays_equal(a / 2, ak.zip(
{
"pt": [[0.5, 1], [], [1.5], [2]],
"eta": [[1.2, 1.4], [], [1.6], [3.4]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]],
"energy": [[25, 25.5], [], [26], [30]]
}
))
assert record_arrays_equal(a * (-1), -a)
boosted = a.boost(-a.boostvec)
assert ak.all(abs(boosted.x) < ATOL)
assert ak.all(abs(boosted.y) < ATOL)
assert ak.all(abs(boosted.z) < ATOL)
| 28.075472 | 93 | 0.368856 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 505 | 0.048483 |
1c7b1f4e4b7bfbf72b788463867c6a1ec1a46c6d | 900 | py | Python | testing/python/telBuggyScript2.py | sys-bio/rrplugins | 03af6ea70d73462ad88103f1e446dc0c5f3f971c | [
"Apache-2.0"
]
| null | null | null | testing/python/telBuggyScript2.py | sys-bio/rrplugins | 03af6ea70d73462ad88103f1e446dc0c5f3f971c | [
"Apache-2.0"
]
| 8 | 2015-12-02T18:20:43.000Z | 2021-08-20T17:13:34.000Z | testing/python/telBuggyScript2.py | sys-bio/telPlugins | 03af6ea70d73462ad88103f1e446dc0c5f3f971c | [
"Apache-2.0"
]
| 3 | 2015-01-27T18:53:45.000Z | 2015-07-13T17:07:50.000Z | import roadrunner
import teplugins as tel
i = 0
#for i in range(100):
try:
noisePlugin = tel.Plugin ("tel_add_noise")
print noisePlugin.listOfProperties()
# Create a roadrunner instance
rr = roadrunner.RoadRunner()
rr.load("sbml_test_0001.xml")
# Generate data
data = rr.simulate(0, 10, 511) # Want 512 points
# Get the dataseries from roadrunner
d = tel.getDataSeries (data)
# Assign the dataseries to the plugin inputdata
noisePlugin.InputData = d
# Set parameter for the 'size' of the noise
noisePlugin.Sigma = 3.e-6
# Add the noise
noisePlugin.execute()
# Get the data to plot
noisePlugin.InputData.plot()
# tel.show()
d.writeDataSeries ("testData2.dat")
d.readDataSeries ("testData2.dat")
print "done"
print i
except Exception as e:
print 'Problem: ' + `e`
| 20 | 52 | 0.637778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 346 | 0.384444 |
1c7b885a3c4fad049ff2d1a6a859aa95838e0630 | 2,954 | py | Python | encyclopaedia/labels.py | tcyrus/renpy-encyclopaedia | 900517b34ab7b870f6ee03057f898fb5eb61313c | [
"MIT"
]
| null | null | null | encyclopaedia/labels.py | tcyrus/renpy-encyclopaedia | 900517b34ab7b870f6ee03057f898fb5eb61313c | [
"MIT"
]
| null | null | null | encyclopaedia/labels.py | tcyrus/renpy-encyclopaedia | 900517b34ab7b870f6ee03057f898fb5eb61313c | [
"MIT"
]
| null | null | null | from renpy import store
class Labels(store.object):
"""Controls how the labels that display Encyclopaedia data appear.
Attributes:
percentage_label (str): Placed next to the percentage unlocked number
page_label (str): Placed before the entry page displayed
page_separator_label (str): Placed in-between the
current page number and the total page number
sort_number_label (str): Label for Number Sorting
sort_alphabetical_label (str): Label for Alphabetical sorting
sort_reverse_alphabetical_label (str): Label for Reverse Alphabetical
sorting
sort_subject_label (str): Label for Subject sorting
sort_unread_label (str): Label for Unread sorting
unread_entry_label (str): Default for the tag next to unread entries
locked_entry_label (str): Default for a "Locked Entry" button
"""
def __init__(self, encyclopaedia):
self.encyclopaedia = encyclopaedia
self.percentage_label = '%'
self.page_label = 'Page'
self.page_separator_label = '/'
self.sort_number_label = "Number"
self.sort_alphabetical_label = "A to Z"
self.sort_reverse_alphabetical_label = "Z to A"
self.sort_subject_label = "Subject"
self.sort_unread_label = "Unread"
self.unread_entry_label = "New!"
self.locked_entry_label = "???"
@property
def percentage_unlocked(self):
"""Percentage representation of the amount of the encyclopaedia
that's unlocked. ie: '50%'.
Returns:
str
"""
percentage_unlocked = int(self.encyclopaedia.percentage_unlocked)
return "{}{}".format(percentage_unlocked, self.percentage_label)
@property
def entry_current_page(self):
"""The sub-page of an entry that is being viewed.
Returns:
str
"""
try:
total_pages = self.encyclopaedia.active.pages
except AttributeError:
raise AttributeError(
"Cannot display Entry's current page when no entry is open."
)
label = "{0} {1} {2} {3}".format(
self.page_label,
self.encyclopaedia.sub_current_position,
self.page_separator_label,
total_pages
)
return label
@property
def sorting_mode(self):
"""Label for the encyclopaedia's current sorting mode.
Returns:
str
"""
enc = self.encyclopaedia
sorting_strings = {
enc.SORT_NUMBER: self.sort_number_label,
enc.SORT_ALPHABETICAL: self.sort_alphabetical_label,
enc.SORT_REVERSE_ALPHABETICAL: self.sort_reverse_alphabetical_label, # NOQA: E501
enc.SORT_SUBJECT: self.sort_subject_label,
enc.SORT_UNREAD: self.sort_unread_label
}
return sorting_strings[enc.sorting_mode]
| 32.461538 | 94 | 0.635748 | 2,927 | 0.99086 | 0 | 0 | 1,527 | 0.516926 | 0 | 0 | 1,342 | 0.454299 |
1c7bed607992f89cbbe011d8fbb3d755bb77d244 | 1,816 | py | Python | ncservice/ncDeviceOps/threaded/get_configs.py | cunningr/yanccm | 2d8f891d704672f4d3a15472c7a13edf7832d53d | [
"MIT"
]
| null | null | null | ncservice/ncDeviceOps/threaded/get_configs.py | cunningr/yanccm | 2d8f891d704672f4d3a15472c7a13edf7832d53d | [
"MIT"
]
| null | null | null | ncservice/ncDeviceOps/threaded/get_configs.py | cunningr/yanccm | 2d8f891d704672f4d3a15472c7a13edf7832d53d | [
"MIT"
]
| null | null | null | import logging
from ncservice.ncDeviceOps.nc_device_ops import NcDeviceOps
from ncservice.ncDeviceOps.task_report import TaskReport
from ncservice.ncDeviceOps.threaded.base_thread_class import BaseThreadClass
logger = logging.getLogger('main.{}'.format(__name__))
extra = {'signature': '---SIGNATURE-NOT-SET---'}
class GetConfigs(BaseThreadClass):
def __init__(self, service):
super().__init__()
self.service = service
self.results = TaskReport(service)
def get_configs(self):
logger.debug('Requesting thread queue for _th_read_configs', extra=extra)
enclosure_queue = self.create_thread_queue(
self._th_read_configs
)
for device in self.service:
enclosure_queue.put(device)
enclosure_queue.join()
return self.results
def _th_read_configs(self, tid, queue):
while True:
target_device = queue.get()
device_name = target_device['device']
host = target_device['host']
port = target_device.get('ncport', 830)
session = NcDeviceOps(host, port=port, tid=tid)
current_config = session.nc_get_configs()
if current_config is not None:
self.results.set_device_config_data('original_running_configs', device_name, current_config)
self.results.set_device_config_data('current_running_configs', device_name, current_config)
self.results.set_service_result(device_name, 'SUCCESS')
else:
logger.error('TID-{}: Unable to retrieve config for device: {}'
.format(tid, device_name), extra=extra)
queue.task_done()
continue
session.close_session()
queue.task_done()
| 37.061224 | 108 | 0.643722 | 1,499 | 0.825441 | 0 | 0 | 0 | 0 | 0 | 0 | 223 | 0.122797 |
1c7de0aa67e4761191bbc1f1c380a31439d54c36 | 258 | py | Python | CodeHS/Looping/DoubleForLoop.py | Kev-in123/ICS2O7 | 425c59975d4ce6aa0937fd8715b51d04487e4fa9 | [
"MIT"
]
| 2 | 2021-08-10T18:16:08.000Z | 2021-09-26T19:49:26.000Z | CodeHS/Looping/DoubleForLoop.py | Kev-in123/ICS2O7 | 425c59975d4ce6aa0937fd8715b51d04487e4fa9 | [
"MIT"
]
| null | null | null | CodeHS/Looping/DoubleForLoop.py | Kev-in123/ICS2O7 | 425c59975d4ce6aa0937fd8715b51d04487e4fa9 | [
"MIT"
]
| null | null | null | """
This program visualizes nested for loops by printing number 0 through 3
and then 0 through 3 for the nested loop.
"""
for i in range(4):
print("Outer for loop: " + str(i))
for j in range(4):
print(" Inner for loop: " + str(j)) | 28.666667 | 72 | 0.612403 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 164 | 0.635659 |
1c7e58a1470bb1ce0a1146ec377bf0292d1e20e6 | 4,657 | py | Python | saleor/graphql/channel/tests/test_base_channel_listing.py | fairhopeweb/saleor | 9ac6c22652d46ba65a5b894da5f1ba5bec48c019 | [
"CC-BY-4.0"
]
| 15,337 | 2015-01-12T02:11:52.000Z | 2021-10-05T19:19:29.000Z | saleor/graphql/channel/tests/test_base_channel_listing.py | fairhopeweb/saleor | 9ac6c22652d46ba65a5b894da5f1ba5bec48c019 | [
"CC-BY-4.0"
]
| 7,486 | 2015-02-11T10:52:13.000Z | 2021-10-06T09:37:15.000Z | saleor/graphql/channel/tests/test_base_channel_listing.py | aminziadna/saleor | 2e78fb5bcf8b83a6278af02551a104cfa555a1fb | [
"CC-BY-4.0"
]
| 5,864 | 2015-01-16T14:52:54.000Z | 2021-10-05T23:01:15.000Z | from collections import defaultdict
import graphene
import pytest
from django.core.exceptions import ValidationError
from ....shipping.error_codes import ShippingErrorCode
from ..mutations import BaseChannelListingMutation
def test_validate_duplicated_channel_ids(channel_PLN, channel_USD):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_USD.id)
second_channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
errors = defaultdict(list)
# when
result = BaseChannelListingMutation.validate_duplicated_channel_ids(
[channel_id],
[second_channel_id],
errors,
ShippingErrorCode.DUPLICATED_INPUT_ITEM.value,
)
# then
assert result is None
assert errors["input"] == []
def test_validate_duplicated_channel_ids_with_duplicates(channel_PLN):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
second_channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
error_code = ShippingErrorCode.DUPLICATED_INPUT_ITEM.value
errors = defaultdict(list)
# when
result = BaseChannelListingMutation.validate_duplicated_channel_ids(
[channel_id], [second_channel_id], errors, error_code
)
# then
assert result is None
assert errors["input"][0].code == error_code
def test_validate_duplicated_channel_values(channel_PLN, channel_USD):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
second_channel_id = graphene.Node.to_global_id("Channel", channel_USD.id)
error_code = ShippingErrorCode.DUPLICATED_INPUT_ITEM.value
errors = defaultdict(list)
field = "add_channels"
# when
result = BaseChannelListingMutation.validate_duplicated_channel_values(
[channel_id, second_channel_id], field, errors, error_code
)
# then
assert result is None
assert errors[field] == []
def test_validate_duplicated_channel_values_with_duplicates(channel_PLN):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
second_channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
error_code = ShippingErrorCode.DUPLICATED_INPUT_ITEM.value
errors = defaultdict(list)
field = "add_channels"
# when
result = BaseChannelListingMutation.validate_duplicated_channel_values(
[channel_id, second_channel_id], field, errors, error_code
)
# then
assert result is None
assert errors[field][0].code == error_code
def test_clean_channels_add_channels(channel_PLN):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
error_code = ShippingErrorCode.DUPLICATED_INPUT_ITEM.value
errors = defaultdict(list)
# when
result = BaseChannelListingMutation.clean_channels(
None, {"add_channels": [{"channel_id": channel_id}]}, errors, error_code
)
# then
assert result == {
"add_channels": [{"channel_id": channel_id, "channel": channel_PLN}],
"remove_channels": [],
}
assert errors["input"] == []
def test_clean_channels_remove_channels(channel_PLN):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
error_code = ShippingErrorCode.DUPLICATED_INPUT_ITEM.value
errors = defaultdict(list)
# when
result = BaseChannelListingMutation.clean_channels(
None, {"remove_channels": [channel_id]}, errors, error_code
)
# then
assert result == {"add_channels": [], "remove_channels": [str(channel_PLN.id)]}
assert errors["input"] == []
def test_test_clean_channels_with_errors(channel_PLN):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
error_code = ShippingErrorCode.DUPLICATED_INPUT_ITEM.value
errors = defaultdict(list)
# when
result = BaseChannelListingMutation.clean_channels(
None, {"remove_channels": [channel_id, channel_id]}, errors, error_code
)
# then
assert result == {}
assert errors["remove_channels"][0].code == error_code
def test_test_clean_channels_invalid_object_type(channel_PLN):
# given
channel_id = graphene.Node.to_global_id("Product", channel_PLN.id)
error_code = ShippingErrorCode.GRAPHQL_ERROR.value
errors = defaultdict(list)
# when
with pytest.raises(ValidationError) as error:
BaseChannelListingMutation.clean_channels(
None, {"remove_channels": [channel_id]}, errors, error_code
)
# then
assert (
error.value.error_dict["remove_channels"][0].message
== f"Must receive Channel id: {channel_id}."
)
| 30.84106 | 83 | 0.721065 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 551 | 0.118317 |
1c7ea7eccdeaa85272171df846b591a0afd65d34 | 9,843 | py | Python | francoralite/apps/francoralite_front/tools.py | Francoralite/francoralite | f8c5eeffe6d395c7e4222a9f5a4a7a01841b503c | [
"BSD-3-Clause"
]
| 2 | 2021-07-26T08:29:26.000Z | 2021-07-26T08:29:27.000Z | francoralite/apps/francoralite_front/tools.py | lluc/telemeta-integration | c2fb116471235674eae597abac84a7113e0f7c82 | [
"BSD-3-Clause"
]
| 167 | 2018-10-20T14:34:46.000Z | 2021-06-01T10:40:55.000Z | francoralite/apps/francoralite_front/tools.py | Francoralite/francoralite | f8c5eeffe6d395c7e4222a9f5a4a7a01841b503c | [
"BSD-3-Clause"
]
| 1 | 2021-06-06T12:16:49.000Z | 2021-06-06T12:16:49.000Z | # -*- coding: utf-8 -*-
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors: Luc LEGER / Coopérative ARTEFACTS <[email protected]>
import requests
from django.conf import settings
from django.contrib import messages
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseRedirect, Http404
from django.utils.translation import gettext as _
from requests.exceptions import RequestException
from rest_framework import status
from francoralite.apps.francoralite_front.errors import APPLICATION_ERRORS
from .views.related import (
write_fond_related,
write_mission_related,
write_collection_related,
write_item_related)
HTTP_ERRORS = {
status.HTTP_400_BAD_REQUEST: APPLICATION_ERRORS['HTTP_API_400'],
status.HTTP_401_UNAUTHORIZED: APPLICATION_ERRORS['HTTP_API_401'],
status.HTTP_403_FORBIDDEN: APPLICATION_ERRORS['HTTP_API_403'],
status.HTTP_404_NOT_FOUND: APPLICATION_ERRORS['HTTP_API_404'],
status.HTTP_409_CONFLICT: APPLICATION_ERRORS['HTTP_API_409'],
}
PROBLEM_NAMES = [
"legal_rights",
"recording_context",
"location_gis",
]
class UserMessageError(RequestException): pass
def get_token_header(request):
"""
TODO: À renseigner
"""
auth_token = request.session.get('oidc_access_token')
if auth_token:
return {'Authorization': 'Bearer ' + auth_token}
else:
return {}
def check_status_code(status_code, allowed_codes=(status.HTTP_200_OK,)):
"""
TODO: À renseigner
"""
if status_code == status.HTTP_403_FORBIDDEN:
raise PermissionDenied(_('Accès interdit.'))
if status_code == status.HTTP_404_NOT_FOUND:
raise Http404(_('Cette fiche n’existe pas.'))
if status_code == status.HTTP_409_CONFLICT:
raise UserMessageError(_('Une fiche avec ce code existe déjà.'))
if status.HTTP_400_BAD_REQUEST <= status_code < status.HTTP_500_INTERNAL_SERVER_ERROR:
raise RequestException()
if status_code not in allowed_codes:
raise Exception(HTTP_ERRORS[status_code])
def handle_message_from_exception(request, exception):
"""
TODO: À renseigner
"""
if isinstance(exception, UserMessageError):
messages.add_message(request, messages.ERROR, exception)
elif exception is not None:
messages.add_message(request, messages.ERROR,
_('Une erreur indéterminée est survenue.'))
def request_api(endpoint):
"""
TODO: À renseigner
"""
response = requests.get(settings.FRONT_HOST_URL + endpoint)
check_status_code(response.status_code)
return response.json()
def post(entity, form_entity, request, *args, **kwargs):
"""
TODO: À renseigner
"""
form = form_entity(request.POST, request.FILES)
entity_api = entity
entity_url = entity
# Processing the problem names entities
if entity in PROBLEM_NAMES:
entity_api = entity.replace('_', '')
# Processing URL for Mission entity
if entity == 'fond':
entity_url = 'institution/' + kwargs['id_institution'] \
+ '/' + entity
# Processing URL for Mission entity
if entity == 'mission':
entity_url = 'institution/' + kwargs['id_institution'] \
+ '/fond/' + kwargs['id_fond']\
+ '/' + entity
# Processing URL for Collection entity
if entity == 'collection':
entity_url = 'institution/' + kwargs['id_institution'] \
+ '/fond/' + kwargs['id_fond']\
+ '/mission/' + kwargs['id_mission'] \
+ '/' + entity
# Processing URL for Item entity
if entity == 'item':
entity_url = 'institution/' + kwargs['id_institution'] \
+ '/fond/' + kwargs['id_fond']\
+ '/mission/' + kwargs['id_mission'] \
+ '/collection/' + kwargs['id_collection'] \
+ '/' + entity
# Problem with old Telemeta fields/entities
if form.is_valid():
if entity == 'item':
# Concatenate domains
form.cleaned_data['domain'] = ''.join(form.cleaned_data['domain'])
# Remove the 'file' entry : if not, there some bugs
del form.cleaned_data['file']
try:
post_api(settings.FRONT_HOST_URL + '/api/' + entity_api,
data=form.cleaned_data,
request=request,
entity=entity)
if entity == 'fond':
return HttpResponseRedirect(
'/institution/' +
str(form.cleaned_data['institution']))
# Previous page ( not an edit page ... )
if len(request.session["referers"]) > 1:
try:
for referer in request.session["referers"]:
if 'add' not in referer.split('/'):
return HttpResponseRedirect(referer)
except Exception:
return HttpResponseRedirect('/' + entity)
return HttpResponseRedirect('/' + entity)
except RequestException as e:
handle_message_from_exception(request, e)
return HttpResponseRedirect('/' + entity_url + '/add')
return HttpResponseRedirect('/' + entity_url + '/add')
def post_api(endpoint, data, request, entity):
"""
TODO: À renseigner
"""
headers = get_token_header(request=request)
response = requests.post(
endpoint,
data=data,
files=request.FILES,
headers=headers,
)
check_status_code(response.status_code,
allowed_codes=(status.HTTP_200_OK, status.HTTP_201_CREATED))
entity_json = response.json()
if entity == "fond":
write_fond_related(entity_json, request, headers)
if entity == "mission":
write_mission_related(entity_json, request, headers)
if entity == "collection":
write_collection_related(entity_json, request, headers)
if entity == "item":
write_item_related(entity_json, request, headers)
return entity_json
def patch(entity, form_entity, request, *args, **kwargs):
"""
TODO: À renseigner
"""
form = form_entity(request.POST)
if entity == 'item':
form.fields['file'].required = False
id = kwargs.get('id')
entity_api = entity
if entity in PROBLEM_NAMES:
entity_api = entity.replace('_', '')
if form.is_valid():
if entity == "collection":
form.cleaned_data['recorded_from_year'] = \
form.data['recorded_from_year']
form.cleaned_data['recorded_to_year'] = \
form.data['recorded_to_year']
if form.cleaned_data['year_published'] is None:
form.cleaned_data['year_published'] = ''
if entity == "item":
# Concatenate domains
form.cleaned_data['domain'] = ''.join(form.cleaned_data['domain'])
try:
response = patch_api(
settings.FRONT_HOST_URL + '/api/' + entity_api + '/' + str(id),
data=form.cleaned_data,
request=request,
entity=entity
)
if(response.status_code != status.HTTP_200_OK):
return HttpResponseRedirect('/' + entity + '/edit/' +
str(id))
# Previous page ( not an edit page ... )
if len(request.session["referers"]) > 1:
for referer in request.session["referers"]:
if 'edit' not in referer.split('/'):
return HttpResponseRedirect(referer)
return HttpResponseRedirect('/' + entity)
except RequestException as e:
handle_message_from_exception(request, e)
return HttpResponseRedirect('/' + entity + '/edit/' + str(id))
return HttpResponseRedirect('/' + entity + '/edit/' + str(id))
def patch_api(endpoint, data, request, entity):
"""
TODO: À renseigner
"""
response = requests.patch(
endpoint,
data=data,
headers=get_token_header(request=request),
)
check_status_code(response.status_code)
entity_json = response.json()
if entity == "fond":
write_fond_related(
entity_json,
request,
headers=get_token_header(request=request),
)
if entity == "mission":
write_mission_related(
entity_json,
request,
headers=get_token_header(request=request),
)
if entity == "collection":
write_collection_related(
entity_json,
request,
headers=get_token_header(request=request),
)
if entity == "item":
write_item_related(
entity_json,
request,
headers=get_token_header(request=request),
)
return response
def delete(entity, request, *args, **kwargs):
"""
TODO: À renseigner
"""
id = kwargs.get('id')
entity_api = entity
if entity in PROBLEM_NAMES:
entity_api = entity.replace('_', '')
try:
delete_api(
settings.FRONT_HOST_URL + '/api/' + entity_api + '/' + str(id),
request=request,
)
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
except RequestException as e:
handle_message_from_exception(request, e)
return HttpResponseRedirect('/' + entity)
def delete_api(endpoint, request):
"""
TODO: À renseigner
"""
response = requests.delete(
endpoint,
headers=get_token_header(request=request),
)
check_status_code(response.status_code)
return response
| 30.286154 | 90 | 0.607843 | 46 | 0.004665 | 0 | 0 | 0 | 0 | 0 | 0 | 2,035 | 0.206369 |
1c7f5af1d319f74fdb488cde790b4cffce3502aa | 5,173 | py | Python | python2.7/site-packages/twisted/internet/iocpreactor/client.py | 84KaliPleXon3/sslstrip-hsts-openwrt | f875ded48078a3ed84bffef1e69dcbeaf2e77ae3 | [
"MIT"
]
| 4 | 2020-10-31T19:52:05.000Z | 2021-09-22T11:39:27.000Z | python2.7/site-packages/twisted/internet/iocpreactor/client.py | 84KaliPleXon3/sslstrip-hsts-openwrt | f875ded48078a3ed84bffef1e69dcbeaf2e77ae3 | [
"MIT"
]
| null | null | null | python2.7/site-packages/twisted/internet/iocpreactor/client.py | 84KaliPleXon3/sslstrip-hsts-openwrt | f875ded48078a3ed84bffef1e69dcbeaf2e77ae3 | [
"MIT"
]
| 2 | 2020-02-27T08:28:35.000Z | 2020-09-13T12:39:26.000Z | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
import socket
from twisted.persisted import styles
from twisted.internet.base import BaseConnector
from twisted.internet import defer, interfaces, error
from twisted.python import failure
from abstract import ConnectedSocket
from ops import ConnectExOp
from util import StateEventMachineType
from zope.interface import implements
class ClientSocket(ConnectedSocket):
def __init__(self, sock, protocol, sf):
ConnectedSocket.__init__(self, sock, protocol, sf)
self.repstr = '<%s to %s at %x>' % (self.__class__, self.sf.addr, id(self))
self.logstr = protocol.__class__.__name__+",client"
self.startReading()
class _SubConnector:
state = "connecting"
socket = None
def __init__(self, sf):
self.sf = sf
def startConnecting(self):
d = defer.maybeDeferred(self.sf.resolveAddress)
d.addCallback(self._cbResolveDone)
d.addErrback(self._ebResolveErr)
def _cbResolveDone(self, addr):
if self.state == "dead":
return
try:
skt = socket.socket(*self.sf.sockinfo)
except socket.error, se:
raise error.ConnectBindError(se[0], se[1])
try:
if self.sf.bindAddress is None:
self.sf.bindAddress = ("", 0) # necessary for ConnectEx
skt.bind(self.sf.bindAddress)
except socket.error, se:
raise error.ConnectBindError(se[0], se[1])
self.socket = skt
op = ConnectExOp(self)
op.initiateOp(self.socket, addr)
def _ebResolveErr(self, fail):
if self.state == "dead":
return
self.sf.connectionFailed(fail)
def connectDone(self):
if self.state == "dead":
return
self.sf.connectionSuccess()
def connectErr(self, err):
if self.state == "dead":
return
self.sf.connectionFailed(err)
class SocketConnector(styles.Ephemeral, object):
__metaclass__ = StateEventMachineType
implements(interfaces.IConnector)
transport_class = ClientSocket
events = ["stopConnecting", "disconnect", "connect"]
sockinfo = None
factoryStarted = False
timeoutID = None
def __init__(self, addr, factory, timeout, bindAddress):
from twisted.internet import reactor
self.state = "disconnected"
self.addr = addr
self.factory = factory
self.timeout = timeout
self.bindAddress = bindAddress
self.reactor = reactor
self.prepareAddress()
def handle_connecting_stopConnecting(self):
self.connectionFailed(failure.Failure(error.UserError()))
def handle_disconnected_stopConnecting(self):
raise error.NotConnectingError
handle_connected_stopConnecting = handle_disconnected_stopConnecting
handle_connecting_disconnect = handle_connecting_stopConnecting
def handle_connected_disconnect(self):
self.transport.loseConnection()
def handle_disconnected_disconnect(self):
pass
def handle_connecting_connect(self):
raise RuntimeError, "can't connect in this state"
handle_connected_connect = handle_connecting_connect
def handle_disconnected_connect(self):
self.state = "connecting"
if not self.factoryStarted:
self.factory.doStart()
self.factoryStarted = True
if self.timeout is not None:
self.timeoutID = self.reactor.callLater(self.timeout, self.connectionFailed, failure.Failure(error.TimeoutError()))
self.sub = _SubConnector(self)
self.sub.startConnecting()
self.factory.startedConnecting(self)
def prepareAddress(self):
raise NotImplementedError
def resolveAddress(self):
raise NotImplementedError
def connectionLost(self, reason):
self.state = "disconnected"
self.factory.clientConnectionLost(self, reason)
if self.state == "disconnected":
# factory hasn't called our connect() method
self.factory.doStop()
self.factoryStarted = 0
def connectionFailed(self, reason):
if self.sub.socket:
self.sub.socket.close()
self.sub.state = "dead"
del self.sub
self.state = "disconnected"
self.cancelTimeout()
self.factory.clientConnectionFailed(self, reason)
if self.state == "disconnected":
# factory hasn't called our connect() method
self.factory.doStop()
self.factoryStarted = 0
def cancelTimeout(self):
if self.timeoutID:
try:
self.timeoutID.cancel()
except ValueError:
pass
del self.timeoutID
def connectionSuccess(self):
socket = self.sub.socket
self.sub.state = "dead"
del self.sub
self.state = "connected"
self.cancelTimeout()
p = self.factory.buildProtocol(self.buildAddress(socket.getpeername()))
self.transport = self.transport_class(socket, p, self)
p.makeConnection(self.transport)
| 30.429412 | 127 | 0.64972 | 4,749 | 0.918036 | 0 | 0 | 0 | 0 | 0 | 0 | 429 | 0.082931 |
1c7f78b673d9e154cc86707fcd75f178c99f6089 | 2,678 | py | Python | pypika/tests/dialects/test_mssql.py | uhrm/pypika | b390aa33c980704555d75d27ade5bfa4d1d4bae7 | [
"Apache-2.0"
]
| null | null | null | pypika/tests/dialects/test_mssql.py | uhrm/pypika | b390aa33c980704555d75d27ade5bfa4d1d4bae7 | [
"Apache-2.0"
]
| null | null | null | pypika/tests/dialects/test_mssql.py | uhrm/pypika | b390aa33c980704555d75d27ade5bfa4d1d4bae7 | [
"Apache-2.0"
]
| null | null | null | import unittest
from pypika import Table
from pypika.analytics import Count
from pypika.dialects import MSSQLQuery
from pypika.utils import QueryException
class SelectTests(unittest.TestCase):
def test_normal_select(self):
q = MSSQLQuery.from_("abc").select("def")
self.assertEqual('SELECT "def" FROM "abc"', str(q))
def test_distinct_select(self):
q = MSSQLQuery.from_("abc").select("def").distinct()
self.assertEqual('SELECT DISTINCT "def" FROM "abc"', str(q))
def test_top_distinct_select(self):
q = MSSQLQuery.from_("abc").select("def").top(10).distinct()
self.assertEqual('SELECT DISTINCT TOP (10) "def" FROM "abc"', str(q))
def test_top_select(self):
q = MSSQLQuery.from_("abc").select("def").top(10)
self.assertEqual('SELECT TOP (10) "def" FROM "abc"', str(q))
def test_top_select_non_int(self):
with self.assertRaisesRegex(QueryException, "TOP value must be an integer"):
MSSQLQuery.from_("abc").select("def").top("a")
def test_limit(self):
q = MSSQLQuery.from_("abc").select("def").orderby("def").limit(10)
self.assertEqual('SELECT "def" FROM "abc" ORDER BY "def" OFFSET 0 ROWS FETCH NEXT 10 ROWS ONLY', str(q))
def test_fetch_next(self):
q = MSSQLQuery.from_("abc").select("def").orderby("def").fetch_next(10)
self.assertEqual('SELECT "def" FROM "abc" ORDER BY "def" OFFSET 0 ROWS FETCH NEXT 10 ROWS ONLY', str(q))
def test_offset(self):
q = MSSQLQuery.from_("abc").select("def").orderby("def").offset(10)
self.assertEqual('SELECT "def" FROM "abc" ORDER BY "def" OFFSET 10 ROWS', str(q))
def test_fetch_next_with_offset(self):
q = MSSQLQuery.from_("abc").select("def").orderby("def").fetch_next(10).offset(10)
self.assertEqual('SELECT "def" FROM "abc" ORDER BY "def" OFFSET 10 ROWS FETCH NEXT 10 ROWS ONLY', str(q))
def test_groupby_alias_False_does_not_group_by_alias_with_standard_query(self):
t = Table('table1')
col = t.abc.as_('a')
q = MSSQLQuery.from_(t).select(col, Count('*')).groupby(col)
self.assertEqual('SELECT "abc" "a",COUNT(\'*\') FROM "table1" GROUP BY "abc"', str(q))
def test_groupby_alias_False_does_not_group_by_alias_when_subqueries_are_present(self):
t = Table('table1')
subquery = MSSQLQuery.from_(t).select(t.abc)
col = subquery.abc.as_('a')
q = MSSQLQuery.from_(subquery).select(col, Count('*')).groupby(col)
self.assertEqual(
'SELECT "sq0"."abc" "a",COUNT(\'*\') FROM (SELECT "abc" FROM "table1") "sq0" GROUP BY "sq0"."abc"', str(q)
)
| 38.257143 | 118 | 0.647872 | 2,519 | 0.940627 | 0 | 0 | 0 | 0 | 0 | 0 | 755 | 0.281927 |
1c7fbcb14ea301bda84e83c0a6cddb4f13bae6fe | 14,860 | py | Python | Postprocessing/Hardt/Hardt.py | maliha93/Fairness-Analysis-Code | acf13c6e7993704fc627249fe4ada44d8b616264 | [
"MIT"
]
| null | null | null | Postprocessing/Hardt/Hardt.py | maliha93/Fairness-Analysis-Code | acf13c6e7993704fc627249fe4ada44d8b616264 | [
"MIT"
]
| null | null | null | Postprocessing/Hardt/Hardt.py | maliha93/Fairness-Analysis-Code | acf13c6e7993704fc627249fe4ada44d8b616264 | [
"MIT"
]
| null | null | null | import cvxpy as cvx
import numpy as np
from collections import namedtuple
from metric import metric, cd
import pandas as pd
import sys
from helper import make_dataset
class Model(namedtuple('Model', 'pred label')):
def logits(self):
raw_logits = np.clip(np.log(self.pred / (1 - self.pred)), -100, 100)
return raw_logits
def num_samples(self):
return len(self.pred)
def base_rate(self):
"""
Percentage of samples belonging to the positive class
"""
return np.mean(self.label)
def accuracy(self):
return self.accuracies().mean()
def precision(self):
return (self.label[self.pred.round() == 1]).mean()
def recall(self):
return (self.label[self.label == 1].round()).mean()
def tpr(self):
"""
True positive rate
"""
return np.mean(np.logical_and(self.pred.round() == 1, self.label == 1))
def fpr(self):
"""
False positive rate
"""
return np.mean(np.logical_and(self.pred.round() == 1, self.label == 0))
def tnr(self):
"""
True negative rate
"""
return np.mean(np.logical_and(self.pred.round() == 0, self.label == 0))
def fnr(self):
"""
False negative rate
"""
return np.mean(np.logical_and(self.pred.round() == 0, self.label == 1))
def fn_cost(self):
"""
Generalized false negative cost
"""
return 1 - self.pred[self.label == 1].mean()
def fp_cost(self):
"""
Generalized false positive cost
"""
return self.pred[self.label == 0].mean()
def accuracies(self):
return self.pred.round() == self.label
def eq_odds(self, othr, mix_rates=None):
has_mix_rates = not (mix_rates is None)
if not has_mix_rates:
mix_rates = self.eq_odds_optimal_mix_rates(othr)
sp2p, sn2p, op2p, on2p = tuple(mix_rates)
self_fair_pred = self.pred.copy()
self_pp_indices, = np.nonzero(self.pred.round())
self_pn_indices, = np.nonzero(1 - self.pred.round())
np.random.shuffle(self_pp_indices)
np.random.shuffle(self_pn_indices)
n2p_indices = self_pn_indices[:int(len(self_pn_indices) * sn2p)]
self_fair_pred[n2p_indices] = 1 - self_fair_pred[n2p_indices]
p2n_indices = self_pp_indices[:int(len(self_pp_indices) * (1 - sp2p))]
self_fair_pred[p2n_indices] = 1 - self_fair_pred[p2n_indices]
othr_fair_pred = othr.pred.copy()
othr_pp_indices, = np.nonzero(othr.pred.round())
othr_pn_indices, = np.nonzero(1 - othr.pred.round())
np.random.shuffle(othr_pp_indices)
np.random.shuffle(othr_pn_indices)
n2p_indices = othr_pn_indices[:int(len(othr_pn_indices) * on2p)]
othr_fair_pred[n2p_indices] = 1 - othr_fair_pred[n2p_indices]
p2n_indices = othr_pp_indices[:int(len(othr_pp_indices) * (1 - op2p))]
othr_fair_pred[p2n_indices] = 1 - othr_fair_pred[p2n_indices]
fair_self = Model(self_fair_pred, self.label)
fair_othr = Model(othr_fair_pred, othr.label)
if not has_mix_rates:
return fair_self, fair_othr, mix_rates
else:
return fair_self, fair_othr
def eq_odds_optimal_mix_rates(self, othr):
sbr = float(self.base_rate())
obr = float(othr.base_rate())
sp2p = cvx.Variable(1)
sp2n = cvx.Variable(1)
sn2p = cvx.Variable(1)
sn2n = cvx.Variable(1)
op2p = cvx.Variable(1)
op2n = cvx.Variable(1)
on2p = cvx.Variable(1)
on2n = cvx.Variable(1)
sfpr = self.fpr() * sp2p + self.tnr() * sn2p
sfnr = self.fnr() * sn2n + self.tpr() * sp2n
ofpr = othr.fpr() * op2p + othr.tnr() * on2p
ofnr = othr.fnr() * on2n + othr.tpr() * op2n
error = sfpr + sfnr + ofpr + ofnr
sflip = 1 - self.pred
sconst = self.pred
oflip = 1 - othr.pred
oconst = othr.pred
sm_tn = np.logical_and(self.pred.round() == 0, self.label == 0)
sm_fn = np.logical_and(self.pred.round() == 0, self.label == 1)
sm_tp = np.logical_and(self.pred.round() == 1, self.label == 1)
sm_fp = np.logical_and(self.pred.round() == 1, self.label == 0)
om_tn = np.logical_and(othr.pred.round() == 0, othr.label == 0)
om_fn = np.logical_and(othr.pred.round() == 0, othr.label == 1)
om_tp = np.logical_and(othr.pred.round() == 1, othr.label == 1)
om_fp = np.logical_and(othr.pred.round() == 1, othr.label == 0)
spn_given_p = (sn2p * (sflip * sm_fn).mean() + sn2n * (sconst * sm_fn).mean()) / sbr + \
(sp2p * (sconst * sm_tp).mean() + sp2n * (sflip * sm_tp).mean()) / sbr
spp_given_n = (sp2n * (sflip * sm_fp).mean() + sp2p * (sconst * sm_fp).mean()) / (1 - sbr) + \
(sn2p * (sflip * sm_tn).mean() + sn2n * (sconst * sm_tn).mean()) / (1 - sbr)
opn_given_p = (on2p * (oflip * om_fn).mean() + on2n * (oconst * om_fn).mean()) / obr + \
(op2p * (oconst * om_tp).mean() + op2n * (oflip * om_tp).mean()) / obr
opp_given_n = (op2n * (oflip * om_fp).mean() + op2p * (oconst * om_fp).mean()) / (1 - obr) + \
(on2p * (oflip * om_tn).mean() + on2n * (oconst * om_tn).mean()) / (1 - obr)
constraints = [
sp2p == 1 - sp2n,
sn2p == 1 - sn2n,
op2p == 1 - op2n,
on2p == 1 - on2n,
sp2p <= 1,
sp2p >= 0,
sn2p <= 1,
sn2p >= 0,
op2p <= 1,
op2p >= 0,
on2p <= 1,
on2p >= 0,
spp_given_n == opp_given_n,
spn_given_p == opn_given_p,
]
prob = cvx.Problem(cvx.Minimize(error), constraints)
prob.solve()
res = np.array([sp2p.value, sn2p.value, op2p.value, on2p.value])
return res
def __repr__(self):
return '\n'.join([
'Accuracy:\t%.3f' % self.accuracy(),
'F.P. cost:\t%.3f' % self.fp_cost(),
'F.N. cost:\t%.3f' % self.fn_cost(),
'T.P. rate:\t%.3f' % self.tpr(),
'T.N. rate:\t%.3f' % self.tnr(),
'Precision:\t%.3f' % self.precision(),
'Recall:\t\t%.3f' % self.recall(),
'Base rate:\t%.3f' % self.base_rate(),
'Avg. score:\t%.3f' % self.pred.mean(),
])
def Adult(f="data/adult_post.csv"):
data_filename = f
test_and_val_data = pd.read_csv(data_filename)
order = np.arange(len(test_and_val_data))
leng = order.shape[0]
val_indices = order[0:int(leng*0.7)]
test_indices = order[int(leng*0.7):]
val_data = test_and_val_data.iloc[val_indices]
test_data = test_and_val_data.iloc[test_indices]
# Create model objects - one for each group, validation and test
group_0_val_data = val_data[val_data['group'] == 0]
group_1_val_data = val_data[val_data['group'] == 1]
group_0_test_data = test_data[test_data['group'] == 0]
group_1_test_data = test_data[test_data['group'] == 1]
group_0_val_model = Model(group_0_val_data['prediction'].to_numpy(), group_0_val_data['label'].to_numpy())
group_1_val_model = Model(group_1_val_data['prediction'].to_numpy(), group_1_val_data['label'].to_numpy())
group_0_test_model = Model(group_0_test_data['prediction'].to_numpy(), group_0_test_data['label'].to_numpy())
group_1_test_model = Model(group_1_test_data['prediction'].to_numpy(), group_1_test_data['label'].to_numpy())
# Find mixing rates for equalized odds models
_, _, mix_rates = Model.eq_odds(group_0_val_model, group_1_val_model)
# Apply the mixing rates to the test models
eq_odds_group_0_test_model, eq_odds_group_1_test_model = Model.eq_odds(group_0_test_model,
group_1_test_model,
mix_rates)
cd_eq_odds_group_0_test_model, cd_eq_odds_group_1_test_model = Model.eq_odds(group_1_test_model,
group_0_test_model,
mix_rates)
metric(eq_odds_group_0_test_model.label, eq_odds_group_0_test_model.pred,
eq_odds_group_1_test_model.label, eq_odds_group_1_test_model.pred)
y_cd = cd(eq_odds_group_0_test_model.pred.round(), eq_odds_group_1_test_model.pred.round(),\
cd_eq_odds_group_0_test_model.pred.round(),cd_eq_odds_group_1_test_model.pred.round())
group_0_test_data['pred'] = eq_odds_group_0_test_model.pred.round()
group_1_test_data['pred'] = eq_odds_group_1_test_model.pred.round()
df = group_0_test_data.append(group_1_test_data)
df = df.drop(['group', 'prediction', 'label'], axis=1).sample(frac=1)
df.to_csv("results_Hardt/adult_test_repaired.csv", index=False)
np.savetxt("results_Hardt/adult_test_repaired_cd.csv", y_cd, delimiter=",")
def Compas(f="data/compas_post.csv", f1='', f2=''):
data_filename = f
test_and_val_data = pd.read_csv(data_filename)
# Randomly split the data into two sets - one for computing the fairness constants
order = np.arange(len(test_and_val_data))
leng = order.shape[0]
val_indices = order[0:int(leng*0.7)]
test_indices = order[int(leng*0.7):]
val_data = test_and_val_data.iloc[val_indices]
test_data = test_and_val_data.iloc[test_indices]
# Create model objects - one for each group, validation and test
group_0_val_data = val_data[val_data['group'] == 0]
group_1_val_data = val_data[val_data['group'] == 1]
group_0_test_data = test_data[test_data['group'] == 0]
group_1_test_data = test_data[test_data['group'] == 1]
group_0_val_model = Model(group_0_val_data['prediction'].to_numpy(), group_0_val_data['label'].to_numpy())
group_1_val_model = Model(group_1_val_data['prediction'].to_numpy(), group_1_val_data['label'].to_numpy())
group_0_test_model = Model(group_0_test_data['prediction'].to_numpy(), group_0_test_data['label'].to_numpy())
group_1_test_model = Model(group_1_test_data['prediction'].to_numpy(), group_1_test_data['label'].to_numpy())
# Find mixing rates for equalized odds models
_, _, mix_rates = Model.eq_odds(group_0_val_model, group_1_val_model)
# Apply the mixing rates to the test models
eq_odds_group_0_test_model, eq_odds_group_1_test_model = Model.eq_odds(group_0_test_model,
group_1_test_model,
mix_rates)
cd_eq_odds_group_0_test_model, cd_eq_odds_group_1_test_model = Model.eq_odds(group_1_test_model,
group_0_test_model,
mix_rates)
metric(eq_odds_group_0_test_model.label, eq_odds_group_0_test_model.pred,
eq_odds_group_1_test_model.label, eq_odds_group_1_test_model.pred)
y_cd = cd(eq_odds_group_0_test_model.pred.round(), eq_odds_group_1_test_model.pred.round(),\
cd_eq_odds_group_0_test_model.pred.round(),cd_eq_odds_group_1_test_model.pred.round())
group_0_test_data['pred'] = eq_odds_group_0_test_model.pred.round()
group_1_test_data['pred'] = eq_odds_group_1_test_model.pred.round()
df = group_0_test_data.append(group_1_test_data)
df = df.drop(['group', 'prediction', 'label'], axis=1).sample(frac=1)
df.to_csv(f1+"results_Hardt/compas_test_repaired"+f2+".csv", index=False)
np.savetxt(f1+"results_Hardt/compas_test_repaired"+f2+"_cd.csv", y_cd, delimiter=",")
def German(f="data/german_post.csv"):
data_filename = f
test_and_val_data = pd.read_csv(data_filename)
# Randomly split the data into two sets - one for computing the fairness constants
order = np.arange(len(test_and_val_data))
leng = order.shape[0]
val_indices = order[0:int(leng*0.7)]
test_indices = order[int(leng*0.7):]
val_data = test_and_val_data.iloc[val_indices]
test_data = test_and_val_data.iloc[test_indices]
# Create model objects - one for each group, validation and test
group_0_val_data = val_data[val_data['group'] == 0]
group_1_val_data = val_data[val_data['group'] == 1]
group_0_test_data = test_data[test_data['group'] == 0]
group_1_test_data = test_data[test_data['group'] == 1]
group_0_val_model = Model(group_0_val_data['prediction'].to_numpy(), group_0_val_data['label'].to_numpy())
group_1_val_model = Model(group_1_val_data['prediction'].to_numpy(), group_1_val_data['label'].to_numpy())
group_0_test_model = Model(group_0_test_data['prediction'].to_numpy(), group_0_test_data['label'].to_numpy())
group_1_test_model = Model(group_1_test_data['prediction'].to_numpy(), group_1_test_data['label'].to_numpy())
# Find mixing rates for equalized odds models
_, _, mix_rates = Model.eq_odds(group_0_val_model, group_1_val_model)
# Apply the mixing rates to the test models
eq_odds_group_0_test_model, eq_odds_group_1_test_model = Model.eq_odds(group_0_test_model,
group_1_test_model,
mix_rates)
cd_eq_odds_group_0_test_model, cd_eq_odds_group_1_test_model = Model.eq_odds(group_1_test_model,
group_0_test_model,
mix_rates)
metric(eq_odds_group_0_test_model.label, eq_odds_group_0_test_model.pred,
eq_odds_group_1_test_model.label, eq_odds_group_1_test_model.pred)
y_cd = cd(eq_odds_group_0_test_model.pred.round(), eq_odds_group_1_test_model.pred.round(),\
cd_eq_odds_group_0_test_model.pred.round(),cd_eq_odds_group_1_test_model.pred.round())
group_0_test_data['pred'] = eq_odds_group_0_test_model.pred.round()
group_1_test_data['pred'] = eq_odds_group_1_test_model.pred.round()
df = group_0_test_data.append(group_1_test_data)
df = df.drop(['group', 'prediction', 'label'], axis=1).sample(frac=1)
df.to_csv("results_Hardt/german_test_repaired.csv", index=False)
np.savetxt("results_Hardt/german_test_repaired_cd.csv", y_cd, delimiter=",")
def Hardt(dataset):
make_dataset(dataset)
if dataset == 'adult':
Adult()
elif dataset == 'compas':
Compas()
elif dataset == 'german':
German()
| 43.323615 | 113 | 0.611844 | 6,337 | 0.426447 | 0 | 0 | 0 | 0 | 0 | 0 | 1,939 | 0.130485 |
1c802e1801de4019c3b100aff72c042e2ff702ed | 1,632 | py | Python | tests/test_exceptions.py | nesnahnoj/py3-textract | 61290fb44c964cf78ce64593fdf0076143dbcd91 | [
"MIT"
]
| 2 | 2015-03-03T12:40:17.000Z | 2015-03-03T13:05:14.000Z | tests/test_exceptions.py | anderser/textract | 8f7b32cadabcd13ad1eab1a56b9aa151901d0453 | [
"MIT"
]
| null | null | null | tests/test_exceptions.py | anderser/textract | 8f7b32cadabcd13ad1eab1a56b9aa151901d0453 | [
"MIT"
]
| null | null | null | import unittest
import os
import subprocess
import base
class ExceptionTestCase(base.GenericUtilities, unittest.TestCase):
"""This class contains a bunch of tests to make sure that textract
fails in expected ways.
"""
def test_unsupported_extension_cli(self):
"""Make sure unsupported extension exits with non-zero status"""
filename = self.get_temp_filename(extension="extension")
command = "textract %(filename)s 2> /dev/null" % locals()
self.assertEqual(1, subprocess.call(command, shell=True))
os.remove(filename)
def test_unsupported_extension_python(self):
"""Make sure unsupported extension raises the correct error"""
filename = self.get_temp_filename(extension="extension")
import textract
from textract.exceptions import ExtensionNotSupported
with self.assertRaises(ExtensionNotSupported):
textract.process(filename)
os.remove(filename)
def test_missing_filename_cli(self):
"""Make sure missing files exits with non-zero status"""
filename = self.get_temp_filename()
os.remove(filename)
command = "textract %(filename)s 2> /dev/null" % locals()
self.assertEqual(1, subprocess.call(command, shell=True))
def test_missing_filename_python(self):
"""Make sure missing files raise the correct error"""
filename = self.get_temp_filename()
os.remove(filename)
import textract
from textract.exceptions import MissingFileError
with self.assertRaises(MissingFileError):
textract.process(filename)
| 37.090909 | 72 | 0.692402 | 1,572 | 0.963235 | 0 | 0 | 0 | 0 | 0 | 0 | 431 | 0.264093 |
1c81071b5834983f0325a721292427a8ce6ce5f8 | 1,998 | py | Python | dloud_ads/circular_queue.py | dataloudlabs/dloud-ads | d0ad3f169c2384292db4097e00ba7858f37a8198 | [
"MIT"
]
| null | null | null | dloud_ads/circular_queue.py | dataloudlabs/dloud-ads | d0ad3f169c2384292db4097e00ba7858f37a8198 | [
"MIT"
]
| null | null | null | dloud_ads/circular_queue.py | dataloudlabs/dloud-ads | d0ad3f169c2384292db4097e00ba7858f37a8198 | [
"MIT"
]
| null | null | null | """Queue implementation using circularly linked list for storage."""
class CircularQueue:
"""Queue implementation using circularly linked list for storage."""
class _Node:
"""Lightweight, nonpublic class for storing a singly linked node."""
__slots__ = '_element', '_next'
def __init__(self, element, next_element):
self._element = element
self._next = next_element
def __init__(self):
"""Create an empty queue."""
self._tail = None
self._size = 0
def __len__(self):
"""Return the number of elements in the queue."""
return self._size
def is_empty(self):
"""Return True if the queue is empty."""
return self._size == 0
def first(self):
"""Return (but do not remove) the element at the front of the queue.
Raise ValueError exception if the queue is empty.
"""
if self.is_empty():
raise ValueError('Queue is empty')
head = self._tail._next
return head._element
def dequeue(self):
"""Remove and return the first element of the queue (i.e., FIFO).
Raise ValueError exception if the queue is empty.
"""
if self.is_empty():
raise ValueError('Queue is empty')
oldhead = self._tail._next
if self._size == 1:
self._tail = None
else:
self._tail._next = oldhead._next
self._size -= 1
return oldhead._element
def enqueue(self, element):
"""Add an element to the back of queue."""
newest = self._Node(element, None)
if self.is_empty():
newest._next = newest
else:
newest._next = self._tail._next
self._tail._next = newest
self._tail = newest
self._size += 1
def rotate(self):
"""Rotate front element to the back of the queue."""
if self._size > 0:
self._tail = self._tail._next
| 29.382353 | 76 | 0.578579 | 1,927 | 0.964464 | 0 | 0 | 0 | 0 | 0 | 0 | 739 | 0.36987 |
1c828f50eb6739af4655f73774016c25d4ee4ac9 | 1,389 | py | Python | suplemon/helpers.py | johnmbaughman/suplemon | fdde20f2181c280236d40f89b89b9bbe5843440e | [
"MIT"
]
| null | null | null | suplemon/helpers.py | johnmbaughman/suplemon | fdde20f2181c280236d40f89b89b9bbe5843440e | [
"MIT"
]
| null | null | null | suplemon/helpers.py | johnmbaughman/suplemon | fdde20f2181c280236d40f89b89b9bbe5843440e | [
"MIT"
]
| null | null | null | # -*- encoding: utf-8
"""
Various helper constants and functions.
"""
import os
import re
import sys
import time
import traceback
def curr_time():
"""Current time in %H:%M"""
return time.strftime("%H:%M")
def curr_time_sec():
"""Current time in %H:%M:%S"""
return time.strftime("%H:%M:%S")
def multisplit(data, delimiters):
pattern = "|".join(map(re.escape, delimiters))
return re.split(pattern, data)
def get_error_info():
"""Return info about last error."""
msg = "{0}\n{1}".format(str(traceback.format_exc()), str(sys.exc_info()))
return msg
def get_string_between(start, stop, s):
"""Search string for a substring between two delimeters. False if not found."""
i1 = s.find(start)
if i1 == -1:
return False
s = s[i1 + len(start):]
i2 = s.find(stop)
if i2 == -1:
return False
s = s[:i2]
return s
def whitespace(line):
"""Return index of first non whitespace character on a line."""
i = 0
for char in line:
if char != " ":
break
i += 1
return i
def parse_path(path):
"""Parse a relative path and return full directory and filename as a tuple."""
if path[:2] == "~" + os.sep:
p = os.path.expanduser("~")
path = os.path.join(p+os.sep, path[2:])
ab = os.path.abspath(path)
parts = os.path.split(ab)
return parts
| 21.369231 | 83 | 0.592513 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 419 | 0.301656 |
1c850ddd900887b33d213aba43297d734592063b | 31,713 | py | Python | geofem/emg3d/meshes.py | iisadoramacedo/geofem-master | cc5cf4ae660480dd4dc3d805310f7207fb28230e | [
"MIT"
]
| null | null | null | geofem/emg3d/meshes.py | iisadoramacedo/geofem-master | cc5cf4ae660480dd4dc3d805310f7207fb28230e | [
"MIT"
]
| 1 | 2020-10-29T11:42:21.000Z | 2020-10-29T11:42:21.000Z | build/lib/geofem/emg3d/meshes.py | iisadoramacedo/geofem-master | cc5cf4ae660480dd4dc3d805310f7207fb28230e | [
"MIT"
]
| 1 | 2020-07-09T18:15:10.000Z | 2020-07-09T18:15:10.000Z | """
:mod:`meshes` -- Discretization
===============================
Everything related to meshes appropriate for the multigrid solver.
"""
# Copyright 2018-2020 The emg3d Developers.
#
# This file is part of emg3d.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import numpy as np
from copy import deepcopy
from scipy import optimize
__all__ = ['TensorMesh', 'get_hx_h0', 'get_cell_numbers', 'get_stretched_h',
'get_domain', 'get_hx']
class TensorMesh:
"""Rudimentary mesh for multigrid calculation.
The tensor-mesh :class:`discretize.TensorMesh` is a powerful tool,
including sophisticated mesh-generation possibilities in 1D, 2D, and 3D,
plotting routines, and much more. However, in the multigrid solver we have
to generate a mesh at each level, many times over and over again, and we
only need a very limited set of attributes. This tensor-mesh class provides
all required attributes. All attributes here are the same as their
counterparts in :class:`discretize.TensorMesh` (both in name and value).
.. warning::
This is a slimmed-down version of :class:`discretize.TensorMesh`, meant
principally for internal use by the multigrid modeller. It is highly
recommended to use :class:`discretize.TensorMesh` to create the input
meshes instead of this class. There are no input-checks carried out
here, and there is only one accepted input format for `h` and `x0`.
Parameters
----------
h : list of three ndarrays
Cell widths in [x, y, z] directions.
x0 : ndarray of dimension (3, )
Origin (x, y, z).
"""
def __init__(self, h, x0):
"""Initialize the mesh."""
self.x0 = x0
# Width of cells.
self.hx = h[0]
self.hy = h[1]
self.hz = h[2]
# Cell related properties.
self.nCx = int(self.hx.size)
self.nCy = int(self.hy.size)
self.nCz = int(self.hz.size)
self.vnC = np.array([self.hx.size, self.hy.size, self.hz.size])
self.nC = int(self.vnC.prod())
self.vectorCCx = np.r_[0, self.hx[:-1].cumsum()]+self.hx*0.5+self.x0[0]
self.vectorCCy = np.r_[0, self.hy[:-1].cumsum()]+self.hy*0.5+self.x0[1]
self.vectorCCz = np.r_[0, self.hz[:-1].cumsum()]+self.hz*0.5+self.x0[2]
# Node related properties.
self.nNx = self.nCx + 1
self.nNy = self.nCy + 1
self.nNz = self.nCz + 1
self.vnN = np.array([self.nNx, self.nNy, self.nNz], dtype=int)
self.nN = int(self.vnN.prod())
self.vectorNx = np.r_[0., self.hx.cumsum()] + self.x0[0]
self.vectorNy = np.r_[0., self.hy.cumsum()] + self.x0[1]
self.vectorNz = np.r_[0., self.hz.cumsum()] + self.x0[2]
# Edge related properties.
self.vnEx = np.array([self.nCx, self.nNy, self.nNz], dtype=int)
self.vnEy = np.array([self.nNx, self.nCy, self.nNz], dtype=int)
self.vnEz = np.array([self.nNx, self.nNy, self.nCz], dtype=int)
self.nEx = int(self.vnEx.prod())
self.nEy = int(self.vnEy.prod())
self.nEz = int(self.vnEz.prod())
self.vnE = np.array([self.nEx, self.nEy, self.nEz], dtype=int)
self.nE = int(self.vnE.sum())
def __repr__(self):
"""Simple representation."""
return (f"TensorMesh: {self.nCx} x {self.nCy} x {self.nCz} "
f"({self.nC:,})")
def copy(self):
"""Return a copy of the TensorMesh."""
return TensorMesh.from_dict(self.to_dict(True))
def to_dict(self, copy=False):
"""Store the necessary information of the TensorMesh in a dict."""
out = {'hx': self.hx, 'hy': self.hy, 'hz': self.hz, 'x0': self.x0,
'__class__': self.__class__.__name__}
if copy:
return deepcopy(out)
else:
return out
@classmethod
def from_dict(cls, inp):
"""Convert dictionary into :class:`TensorMesh` instance.
Parameters
----------
inp : dict
Dictionary as obtained from :func:`TensorMesh.to_dict`.
The dictionary needs the keys `hx`, `hy`, `hz`, and `x0`.
Returns
-------
obj : :class:`TensorMesh` instance
"""
try:
return cls(h=[inp['hx'], inp['hy'], inp['hz']], x0=inp['x0'])
except KeyError as e:
print(f"* ERROR :: Variable {e} missing in `inp`.")
raise
@property
def vol(self):
"""Construct cell volumes of the 3D model as 1D array."""
if getattr(self, '_vol', None) is None:
self._vol = (self.hx[None, None, :]*self.hy[None, :, None] *
self.hz[:, None, None]).ravel()
return self._vol
def get_hx_h0(freq, res, domain, fixed=0., possible_nx=None, min_width=None,
pps=3, alpha=None, max_domain=100000., raise_error=True, verb=1,
return_info=False):
r"""Return cell widths and origin for given parameters.
Returns cell widths for the provided frequency, resistivity, domain extent,
and other parameters using a flexible amount of cells. See input parameters
for more details. A maximum of three hard/fixed boundaries can be provided
(one of which is the grid center).
The minimum cell width is calculated through :math:`\delta/\rm{pps}`, where
the skin depth is given by :math:`\delta = 503.3 \sqrt{\rho/f}`, and the
parameter `pps` stands for 'points-per-skindepth'. The minimum cell width
can be restricted with the parameter `min_width`.
The actual calculation domain adds a buffer zone around the (survey)
domain. The thickness of the buffer is six times the skin depth. The field
is basically zero after two wavelengths. A wavelength is
:math:`2\pi\delta`, hence roughly 6 times the skin depth. Taking a factor 6
gives therefore almost two wavelengths, as the field travels to the
boundary and back. The actual buffer thickness can be steered with the
`res` parameter.
One has to take into account that the air is very resistive, which has to
be considered not just in the vertical direction, but also in the
horizontal directions, as the airwave will bounce back from the sides
otherwise. In the marine case this issue reduces with increasing water
depth.
See Also
--------
get_stretched_h : Get `hx` for a fixed number `nx` and within a fixed
domain.
Parameters
----------
freq : float
Frequency (Hz) to calculate the skin depth. The skin depth is a concept
defined in the frequency domain. If a negative frequency is provided,
it is assumed that the calculation is carried out in the Laplace
domain. To calculate the skin depth, the value of `freq` is then
multiplied by :math:`-2\pi`, to simulate the closest
frequency-equivalent.
res : float or list
Resistivity (Ohm m) to calculate the skin depth. The skin depth is
used to calculate the minimum cell width and the boundary thicknesses.
Up to three resistivities can be provided:
- float: Same resistivity for everything;
- [min_width, boundaries];
- [min_width, left boundary, right boundary].
domain : list
Contains the survey-domain limits [min, max]. The actual calculation
domain consists of this domain plus a buffer zone around it, which
depends on frequency and resistivity.
fixed : list, optional
Fixed boundaries, one, two, or maximum three values. The grid is
centered around the first value. Hence it is the center location with
the smallest cell. Two more fixed boundaries can be added, at most one
on each side of the first one.
Default is 0.
possible_nx : list, optional
List of possible numbers of cells. See :func:`get_cell_numbers`.
Default is ``get_cell_numbers(500, 5, 3)``, which corresponds to
[16, 24, 32, 40, 48, 64, 80, 96, 128, 160, 192, 256, 320, 384].
min_width : float, list or None, optional
Minimum cell width restriction:
- None : No restriction;
- float : Fixed to this value, ignoring skin depth and `pps`.
- list [min, max] : Lower and upper bounds.
Default is None.
pps : int, optional
Points per skindepth; minimum cell width is calculated via
`dmin = skindepth/pps`.
Default = 3.
alpha : list, optional
Maximum alpha and step size to find a good alpha. The first value is
the maximum alpha of the survey domain, the second value is the maximum
alpha for the buffer zone, and the third value is the step size.
Default = [1, 1.5, .01], hence no stretching within the survey domain
and a maximum stretching of 1.5 in the buffer zone; step size is 0.01.
max_domain : float, optional
Maximum calculation domain from fixed[0] (usually source position).
Default is 100,000.
raise_error : bool, optional
If True, an error is raised if no suitable grid is found. Otherwise it
just prints a message and returns None's.
Default is True.
verb : int, optional
Verbosity, 0 or 1.
Default = 1.
return_info : bool
If True, a dictionary is returned with some grid info (min and max
cell width and alpha).
Returns
-------
hx : ndarray
Cell widths of mesh.
x0 : float
Origin of the mesh.
info : dict
Dictionary with mesh info; only if ``return_info=True``.
Keys:
- `dmin`: Minimum cell width;
- `dmax`: Maximum cell width;
- `amin`: Minimum alpha;
- `amax`: Maximum alpha.
"""
# Get variables with default lists:
if alpha is None:
alpha = [1, 1.5, 0.01]
if possible_nx is None:
possible_nx = get_cell_numbers(500, 5, 3)
# Cast resistivity value(s).
res = np.array(res, ndmin=1)
if res.size == 1:
res_arr = np.array([res[0], res[0], res[0]])
elif res.size == 2:
res_arr = np.array([res[0], res[1], res[1]])
else:
res_arr = np.array([res[0], res[1], res[2]])
# Cast and check fixed.
fixed = np.array(fixed, ndmin=1)
if fixed.size > 2:
# Check length.
if fixed.size > 3:
print("\n* ERROR :: Maximum three fixed boundaries permitted.\n"
f" Provided: {fixed.size}.")
raise ValueError("Wrong input for fixed")
# Sort second and third, so it doesn't matter how it was provided.
fixed = np.array([fixed[0], max(fixed[1:]), min(fixed[1:])])
# Check side.
if np.sign(np.diff(fixed[:2])) == np.sign(np.diff(fixed[::2])):
print("\n* ERROR :: 2nd and 3rd fixed boundaries have to be "
"left and right of the first one.\n "
f"Provided: [{fixed[0]}, {fixed[1]}, {fixed[2]}]")
raise ValueError("Wrong input for fixed")
# Calculate skin depth.
skind = 503.3*np.sqrt(res_arr/abs(freq))
if freq < 0: # For Laplace-domain calculations.
skind /= np.sqrt(2*np.pi)
# Minimum cell width.
dmin = skind[0]/pps
if min_width is not None: # Respect user input.
min_width = np.array(min_width, ndmin=1)
if min_width.size == 1:
dmin = min_width
else:
dmin = np.clip(dmin, *min_width)
# Survey domain; contains all sources and receivers.
domain = np.array(domain, dtype=float)
# Calculation domain; big enough to avoid boundary effects.
# To avoid boundary effects we want the signal to travel two wavelengths
# from the source to the boundary and back to the receiver.
# => 2*pi*sd ~ 6.3*sd = one wavelength => signal is ~ 0.2 %.
# Two wavelengths we can safely assume it is zero.
#
# The air does not follow the concept of skin depth, as it is a wave rather
# than diffusion. For this is the factor `max_domain`, which restricts
# the domain in each direction to this value from the center.
# (a) Source to edges of domain.
dist_in_domain = abs(domain - fixed[0])
# (b) Two wavelengths.
two_lambda = skind[1:]*4*np.pi
# (c) Required buffer, additional to domain.
dist_buff = np.max([np.zeros(2), (two_lambda - dist_in_domain)/2], axis=0)
# (d) Add buffer to domain.
calc_domain = np.array([domain[0]-dist_buff[0], domain[1]+dist_buff[1]])
# (e) Restrict total domain to max_domain.
calc_domain[0] = max(calc_domain[0], fixed[0]-max_domain)
calc_domain[1] = min(calc_domain[1], fixed[0]+max_domain)
# Initiate flag if terminated.
finished = False
# Initiate alpha variables for survey and calculation domains.
sa, ca = 1.0, 1.0
# Loop over possible cell numbers from small to big.
for nx in np.unique(possible_nx):
# Loop over possible alphas for domain.
for sa in np.arange(1.0, alpha[0]+alpha[2]/2, alpha[2]):
# Get current stretched grid cell sizes.
thxl = dmin*sa**np.arange(nx) # Left of origin.
thxr = dmin*sa**np.arange(nx) # Right of origin.
# 0. Adjust stretching for fixed boundaries.
if fixed.size > 1: # Move mesh to first fixed boundary.
t_nx = np.r_[fixed[0], fixed[0]+np.cumsum(thxr)]
ii = np.argmin(abs(t_nx-fixed[1]))
thxr *= abs(fixed[1]-fixed[0])/np.sum(thxr[:ii])
if fixed.size > 2: # Move mesh to second fixed boundary.
t_nx = np.r_[fixed[0], fixed[0]-np.cumsum(thxl)]
ii = np.argmin(abs(t_nx-fixed[2]))
thxl *= abs(fixed[2]-fixed[0])/np.sum(thxl[:ii])
# 1. Fill from center to left domain.
nl = np.sum((fixed[0]-np.cumsum(thxl)) > domain[0])+1
# 2. Fill from center to right domain.
nr = np.sum((fixed[0]+np.cumsum(thxr)) < domain[1])+1
# 3. Get remaining number of cells and check termination criteria.
nsdc = nl+nr # Number of domain cells.
nx_remain = nx-nsdc
# Not good, try next.
if nx_remain <= 0:
continue
# Create the current hx-array.
hx = np.r_[thxl[:nl][::-1], thxr[:nr]]
hxo = np.r_[thxl[:nl][::-1], thxr[:nr]]
# Get actual domain:
asurv_domain = [fixed[0]-np.sum(thxl[:nl]),
fixed[0]+np.sum(thxr[:nr])]
x0 = float(fixed[0]-np.sum(thxl[:nl]))
# Get actual stretching (differs in case of fixed layers).
sa_adj = np.max([hx[1:]/hx[:-1], hx[:-1]/hx[1:]])
# Loop over possible alphas for calc_domain.
for ca in np.arange(sa, alpha[1]+alpha[2]/2, alpha[2]):
# 4. Fill to left calc_domain.
thxl = hx[0]*ca**np.arange(1, nx_remain+1)
nl = np.sum((asurv_domain[0]-np.cumsum(thxl)) >
calc_domain[0])+1
# 5. Fill to right calc_domain.
thxr = hx[-1]*ca**np.arange(1, nx_remain+1)
nr = np.sum((asurv_domain[1]+np.cumsum(thxr)) <
calc_domain[1])+1
# 6. Get remaining number of cells and check termination
# criteria.
ncdc = nl+nr # Number of calc_domain cells.
nx_remain2 = nx-nsdc-ncdc
if nx_remain2 < 0: # Not good, try next.
continue
# Create hx-array.
nl += int(np.floor(nx_remain2/2)) # If uneven, add one cell
nr += int(np.ceil(nx_remain2/2)) # more on the right.
hx = np.r_[thxl[:nl][::-1], hx, thxr[:nr]]
# Calculate origin.
x0 = float(asurv_domain[0]-np.sum(thxl[:nl]))
# Mark it as finished and break out of the loop.
finished = True
break
if finished:
break
if finished:
break
# Check finished and print info about found grid.
if not finished:
# Throw message if no solution was found.
print("\n* ERROR :: No suitable grid found; relax your criteria.\n")
if raise_error:
raise ArithmeticError("No grid found!")
else:
hx, x0 = None, None
elif verb > 0:
print(f" Skin depth ", end="")
if res.size == 1:
print(f" [m] : {skind[0]:.0f}")
elif res.size == 2:
print(f"(m/l-r) [m] : {skind[0]:.0f} / {skind[1]:.0f}")
else:
print(f"(m/l/r) [m] : {skind[0]:.0f} / {skind[1]:.0f} / "
f"{skind[2]:.0f}")
print(f" Survey domain [m] : {domain[0]:.0f} - "
f"{domain[1]:.0f}")
print(f" Calculation domain [m] : {calc_domain[0]:.0f} - "
f"{calc_domain[1]:.0f}")
print(f" Final extent [m] : {x0:.0f} - "
f"{x0+np.sum(hx):.0f}")
extstr = f" Min/max cell width [m] : {min(hx):.0f} / "
alstr = f" Alpha survey"
nrstr = " Number of cells "
if not np.isclose(sa, sa_adj):
sastr = f"{sa:.3f} ({sa_adj:.3f})"
else:
sastr = f"{sa:.3f}"
print(extstr+f"{max(hxo):.0f} / {max(hx):.0f}")
print(alstr+f"/calc : {sastr} / {ca:.3f}")
print(nrstr+f"(s/c/r) : {nx} ({nsdc}/{ncdc}/{nx_remain2})")
print()
if return_info:
if not fixed.size > 1:
sa_adj = sa
info = {'dmin': dmin,
'dmax': np.nanmax(hx),
'amin': np.nanmin([ca, sa, sa_adj]),
'amax': np.nanmax([ca, sa, sa_adj])}
return hx, x0, info
else:
return hx, x0
def get_cell_numbers(max_nr, max_prime=5, min_div=3):
r"""Returns 'good' cell numbers for the multigrid method.
'Good' cell numbers are numbers which can be divided by 2 as many times as
possible. At the end there will be a low prime number.
The function adds all numbers :math:`p 2^n \leq M` for :math:`p={2, 3, ...,
p_\text{max}}` and :math:`n={n_\text{min}, n_\text{min}+1, ..., \infty}`;
:math:`M, p_\text{max}, n_\text{min}` correspond to `max_nr`, `max_prime`,
and `min_div`, respectively.
Parameters
----------
max_nr : int
Maximum number of cells.
max_prime : int
Highest permitted prime number p for p*2^n. {2, 3, 5, 7} are good upper
limits in order to avoid too big lowest grids in the multigrid method.
Default is 5.
min_div : int
Minimum times the number can be divided by two.
Default is 3.
Returns
-------
numbers : array
Array containing all possible cell numbers from lowest to highest.
"""
# Primes till 20.
primes = np.array([2, 3, 5, 7, 11, 13, 17, 19])
# Sanity check; 19 is already ridiculously high.
if max_prime > primes[-1]:
print(f"* ERROR :: Highest prime is {max_prime}, "
"please use a value < 20.")
raise ValueError("Highest prime too high")
# Restrict to max_prime.
primes = primes[primes <= max_prime]
# Get possible values.
# Currently restricted to prime*2**30 (for prime=2 => 1,073,741,824 cells).
numbers = primes[:, None]*2**np.arange(min_div, 30)
# Get unique values.
numbers = np.unique(numbers)
# Restrict to max_nr and return.
return numbers[numbers <= max_nr]
def get_stretched_h(min_width, domain, nx, x0=0, x1=None, resp_domain=False):
"""Return cell widths for a stretched grid within the domain.
Returns `nx` cell widths within `domain`, where the minimum cell width is
`min_width`. The cells are not stretched within `x0` and `x1`, and outside
uses a power-law stretching. The actual stretching factor and the number of
cells left and right of `x0` and `x1` are find in a minimization process.
The domain is not completely respected. The starting point of the domain
is, but the endpoint of the domain might slightly shift (this is more
likely the case for small `nx`, for big `nx` the shift should be small).
The new endpoint can be obtained with ``domain[0]+np.sum(hx)``. If you want
the domain to be respected absolutely, set ``resp_domain=True``. However,
be aware that this will introduce one stretch-factor which is different
from the other stretch factors, to accommodate the restriction. This
one-off factor is between the left- and right-side of `x0`, or, if `x1` is
provided, just after `x1`.
See Also
--------
get_hx_x0 : Get `hx` and `x0` for a flexible number of `nx` with
given bounds.
Parameters
----------
min_width : float
Minimum cell width. If x1 is provided, the actual minimum cell width
might be smaller than min_width.
domain : list
[start, end] of model domain.
nx : int
Number of cells.
x0 : float
Center of the grid. `x0` is restricted to `domain`.
Default is 0.
x1 : float
If provided, then no stretching is applied between `x0` and `x1`. The
non-stretched part starts at `x0` and stops at the first possible
location at or after `x1`. `x1` is restricted to `domain`. This will
min_width so that an integer number of cells fit within x0 and x1.
resp_domain : bool
If False (default), then the domain-end might shift slightly to assure
that the same stretching factor is applied throughout. If set to True,
however, the domain is respected absolutely. This will introduce one
stretch-factor which is different from the other stretch factors, to
accommodate the restriction. This one-off factor is between the left-
and right-side of `x0`, or, if `x1` is provided, just after `x1`.
Returns
-------
hx : ndarray
Cell widths of mesh.
"""
# Cast to arrays
domain = np.array(domain, dtype=float)
x0 = np.array(x0, dtype=float)
x0 = np.clip(x0, *domain) # Restrict to model domain
min_width = np.array(min_width, dtype=float)
if x1 is not None:
x1 = np.array(x1, dtype=float)
x1 = np.clip(x1, *domain) # Restrict to model domain
# If x1 is provided (a part is not stretched)
if x1 is not None:
# Store original values
xlim_orig = domain.copy()
nx_orig = int(nx)
x0_orig = x0.copy()
h_min_orig = min_width.copy()
# Get number of non-stretched cells
n_nos = int(np.ceil((x1-x0)/min_width))
# Re-calculate min_width to fit with x0-x1-limits:
min_width = (x1-x0)/n_nos
# Subtract one cell, because the standard scheme provides one
# min_width-cell.
n_nos -= 1
# Reset x0, because the first min_width comes from normal scheme
x0 += min_width
# Reset xmax for normal scheme
domain[1] -= n_nos*min_width
# Reset nx for normal scheme
nx -= n_nos
# If there are not enough points reset to standard procedure. The limit
# of five is arbitrary. However, nx should be much bigger than five
# anyways, otherwise stretched grid doesn't make sense.
if nx <= 5:
print("Warning :: Not enough points for non-stretched part,"
"ignoring therefore `x1`.")
domain = xlim_orig
nx = nx_orig
x0 = x0_orig
x1 = None
min_width = h_min_orig
# Get stretching factor (a = 1+alpha).
if min_width == 0 or min_width > np.diff(domain)/nx:
# If min_width is bigger than the domain-extent divided by nx, no
# stretching is required at all.
alpha = 0
else:
# Wrap _get_dx into a minimization function to call with fsolve.
def find_alpha(alpha, min_width, args):
"""Find alpha such that min(hx) = min_width."""
return min(get_hx(alpha, *args))/min_width-1
# Search for best alpha, must be at least 0
args = (domain, nx, x0)
alpha = max(0, optimize.fsolve(find_alpha, 0.02, (min_width, args)))
# With alpha get actual cell spacing with `resp_domain` to respect the
# users decision.
hx = get_hx(alpha, domain, nx, x0, resp_domain)
# Add the non-stretched center if x1 is provided
if x1 is not None:
hx = np.r_[hx[: np.argmin(hx)], np.ones(n_nos)*min_width,
hx[np.argmin(hx):]]
# Print warning min_width could not be respected.
if abs(hx.min() - min_width) > 0.1:
print(f"Warning :: Minimum cell width ({np.round(hx.min(), 2)} m) is "
"below `min_width`, because `nx` is too big for `domain`.")
return hx
def get_domain(x0=0, freq=1, res=0.3, limits=None, min_width=None,
fact_min=0.2, fact_neg=5, fact_pos=None):
r"""Get domain extent and minimum cell width as a function of skin depth.
Returns the extent of the calculation domain and the minimum cell width as
a multiple of the skin depth, with possible user restrictions on minimum
calculation domain and range of possible minimum cell widths.
.. math::
\delta &= 503.3 \sqrt{\frac{\rho}{f}} , \\
x_\text{start} &= x_0-k_\text{neg}\delta , \\
x_\text{end} &= x_0+k_\text{pos}\delta , \\
h_\text{min} &= k_\text{min} \delta .
Parameters
----------
x0 : float
Center of the calculation domain. Normally the source location.
Default is 0.
freq : float
Frequency (Hz) to calculate the skin depth. The skin depth is a concept
defined in the frequency domain. If a negative frequency is provided,
it is assumed that the calculation is carried out in the Laplace
domain. To calculate the skin depth, the value of `freq` is then
multiplied by :math:`-2\pi`, to simulate the closest
frequency-equivalent.
Default is 1 Hz.
res : float, optional
Resistivity (Ohm m) to calculate skin depth.
Default is 0.3 Ohm m (sea water).
limits : None or list
[start, end] of model domain. This extent represents the minimum extent
of the domain. The domain is therefore only adjusted if it has to reach
outside of [start, end].
Default is None.
min_width : None, float, or list of two floats
Minimum cell width is calculated as a function of skin depth:
fact_min*sd. If `min_width` is a float, this is used. If a list of
two values [min, max] are provided, they are used to restrain
min_width. Default is None.
fact_min, fact_neg, fact_pos : floats
The skin depth is multiplied with these factors to estimate:
- Minimum cell width (`fact_min`, default 0.2)
- Domain-start (`fact_neg`, default 5), and
- Domain-end (`fact_pos`, defaults to `fact_neg`).
Returns
-------
h_min : float
Minimum cell width.
domain : list
Start- and end-points of calculation domain.
"""
# Set fact_pos to fact_neg if not provided.
if fact_pos is None:
fact_pos = fact_neg
# Calculate the skin depth.
skind = 503.3*np.sqrt(res/abs(freq))
if freq < 0: # For Laplace-domain calculations.
skind /= np.sqrt(2*np.pi)
# Estimate minimum cell width.
h_min = fact_min*skind
if min_width is not None: # Respect user input.
if np.array(min_width).size == 1:
h_min = min_width
else:
h_min = np.clip(h_min, *min_width)
# Estimate calculation domain.
domain = [x0-fact_neg*skind, x0+fact_pos*skind]
if limits is not None: # Respect user input.
domain = [min(limits[0], domain[0]), max(limits[1], domain[1])]
return h_min, domain
def get_hx(alpha, domain, nx, x0, resp_domain=True):
r"""Return cell widths for given input.
Find the number of cells left and right of `x0`, `nl` and `nr`
respectively, for the provided alpha. For this, we solve
.. math:: \frac{x_\text{max}-x_0}{x_0-x_\text{min}} =
\frac{a^{nr}-1}{a^{nl}-1}
where :math:`a = 1+\alpha`.
Parameters
----------
alpha : float
Stretching factor `a` is given by ``a=1+alpha``.
domain : list
[start, end] of model domain.
nx : int
Number of cells.
x0 : float
Center of the grid. `x0` is restricted to `domain`.
resp_domain : bool
If False (default), then the domain-end might shift slightly to assure
that the same stretching factor is applied throughout. If set to True,
however, the domain is respected absolutely. This will introduce one
stretch-factor which is different from the other stretch factors, to
accommodate the restriction. This one-off factor is between the left-
and right-side of `x0`, or, if `x1` is provided, just after `x1`.
Returns
-------
hx : ndarray
Cell widths of mesh.
"""
if alpha <= 0.: # If alpha <= 0: equal spacing (no stretching at all)
hx = np.ones(nx)*np.diff(np.squeeze(domain))/nx
else: # Get stretched hx
a = alpha+1
# Get hx depending if x0 is on the domain boundary or not.
if np.isclose(x0, domain[0]) or np.isclose(x0, domain[1]):
# Get al a's
alr = np.diff(domain)*alpha/(a**nx-1)*a**np.arange(nx)
if x0 == domain[1]:
alr = alr[::-1]
# Calculate differences
hx = alr*np.diff(domain)/sum(alr)
else:
# Find number of elements left and right by solving:
# (xmax-x0)/(x0-xmin) = a**nr-1/(a**nl-1)
nr = np.arange(2, nx+1)
er = (domain[1]-x0)/(x0-domain[0]) - (a**nr[::-1]-1)/(a**nr-1)
nl = np.argmin(abs(np.floor(er)))+1
nr = nx-nl
# Get all a's
al = a**np.arange(nl-1, -1, -1)
ar = a**np.arange(1, nr+1)
# Calculate differences
if resp_domain:
# This version honours domain[0] and domain[1], but to achieve
# this it introduces one stretch-factor which is different from
# all the others between al to ar.
hx = np.r_[al*(x0-domain[0])/sum(al),
ar*(domain[1]-x0)/sum(ar)]
else:
# This version moves domain[1], but each stretch-factor is
# exactly the same.
fact = (x0-domain[0])/sum(al) # Take distance from al.
hx = np.r_[al, ar]*fact
# Note: this hx is equivalent as providing the following h
# to TensorMesh:
# h = [(min_width, nl-1, -a), (min_width, n_nos+1),
# (min_width, nr, a)]
return hx
| 35.833898 | 79 | 0.590578 | 4,305 | 0.135749 | 0 | 0 | 889 | 0.028033 | 0 | 0 | 20,101 | 0.633841 |
1c855f3c4b60017317473eca05c6f77584434cbc | 2,464 | py | Python | QUICK_START/NODE_SQUEEZESEG_CLUSTER/src/script/squeezeseg/utils/clock.py | Hqss/DINK | 5fecaa65e2f9da48eb8ac38ef709aa555fca8766 | [
"BSD-3-Clause"
]
| 189 | 2019-01-16T03:05:23.000Z | 2020-09-14T14:54:16.000Z | QUICK_START/NODE_SQUEEZESEG_CLUSTER/src/script/squeezeseg/utils/clock.py | jtpils/DINK | 5f6b3eaba279126f79ae6607f965311002d7451c | [
"BSD-3-Clause"
]
| 3 | 2019-02-11T06:20:15.000Z | 2020-04-05T07:03:53.000Z | QUICK_START/NODE_SQUEEZESEG_CLUSTER/src/script/squeezeseg/utils/clock.py | jtpils/DINK | 5f6b3eaba279126f79ae6607f965311002d7451c | [
"BSD-3-Clause"
]
| 25 | 2019-01-16T03:05:24.000Z | 2020-04-04T21:07:53.000Z | #! /usr/bin/python2
# -*- coding: utf-8 -*-
"""
Clock function to take running time following Segmatch.
"""
# BSD 3-Clause License
#
# Copyright (c) 2019, FPAI
# Copyright (c) 2019, SeriouslyHAO
# Copyright (c) 2019, xcj2019
# Copyright (c) 2019, Leonfirst
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
class Clock(object):
def __init__(self):
self.kSecondsToMiliseconds = 1000.0
self.kMicrosecondsToMiliseconds = 0.001
self.start()
def start(self):
self.real_time_start_ = datetime.datetime.now()
def takeTime(self):
seconds = (datetime.datetime.now() - self.real_time_start_).seconds
useconds = (datetime.datetime.now() - self.real_time_start_).microseconds
self.real_time_ms_ = (seconds*self.kSecondsToMiliseconds + useconds*self.kMicrosecondsToMiliseconds) + 0.5
def getRealTime(self):
return self.real_time_ms_
def takeRealTime(self):
self.takeTime()
return self.getRealTime()
| 39.741935 | 114 | 0.745536 | 683 | 0.277192 | 0 | 0 | 0 | 0 | 0 | 0 | 1,727 | 0.700893 |
1c8628f13d65ff3439c3fbd013fa30e504e0ca89 | 918 | py | Python | office365/sharepoint/view_collection.py | andebor/Office365-REST-Python-Client | ffd0ab4cf742b2e5ae7d8c44e937495aece41e07 | [
"MIT"
]
| null | null | null | office365/sharepoint/view_collection.py | andebor/Office365-REST-Python-Client | ffd0ab4cf742b2e5ae7d8c44e937495aece41e07 | [
"MIT"
]
| null | null | null | office365/sharepoint/view_collection.py | andebor/Office365-REST-Python-Client | ffd0ab4cf742b2e5ae7d8c44e937495aece41e07 | [
"MIT"
]
| null | null | null | from office365.runtime.client_object_collection import ClientObjectCollection
from office365.runtime.resource_path_service_operation import ResourcePathServiceOperation
from office365.sharepoint.view import View
class ViewCollection(ClientObjectCollection):
"""Represents a collection of View resources."""
def __init__(self, context, resource_path=None):
super(ViewCollection, self).__init__(context, View, resource_path)
def get_by_title(self, view_title):
"""Gets the list view with the specified title."""
return View(self.context,
ResourcePathServiceOperation(self.context, self.resource_path, "GetByTitle", [view_title]))
def get_by_id(self, view_id):
"""Gets the list view with the specified ID."""
return View(self.context,
ResourcePathServiceOperation(self.context, self.resource_path, "GetById", [view_id]))
| 45.9 | 111 | 0.734205 | 703 | 0.765795 | 0 | 0 | 0 | 0 | 0 | 0 | 166 | 0.180828 |
1c88139e81ccf155fe77c897a8674f07ab2d5797 | 1,461 | py | Python | common-scrapers/common_src/scrapers/second_extinction.py | mrPaintMan/blog-scraper | 9b1ff3d398bd23d799d86c9a62ec76a6950555cc | [
"MIT"
]
| null | null | null | common-scrapers/common_src/scrapers/second_extinction.py | mrPaintMan/blog-scraper | 9b1ff3d398bd23d799d86c9a62ec76a6950555cc | [
"MIT"
]
| null | null | null | common-scrapers/common_src/scrapers/second_extinction.py | mrPaintMan/blog-scraper | 9b1ff3d398bd23d799d86c9a62ec76a6950555cc | [
"MIT"
]
| 1 | 2020-03-11T14:49:00.000Z | 2020-03-11T14:49:00.000Z | from common_src.lib.model.post import Post
from common_src.lib.model.source import Source
from common_src.scrapers.abstract_scraper import make_soup, remove_dups, now
SOURCE_CODE = "second_extinction"
WEBSITE = "https://www.secondextinctiongame.com/news"
ALT_IMAGE = 'https://www.secondextinctiongame.com/static/242486b363d867dc483deb6d7038dde1/d8255/se_screenshot_5.jpg'
FILENAME = "../resources/data/second_extinction.txt"
def get_source():
name = "Second Extinction"
description = 'Second Extinction is a first person shooter game where earth has been invaded by mutated dinosaurs.'
profile_image = 'https://www.secondextinctiongame.com/static/logo-0d52f8575a251eff8ebd6e2d6bd6c51b.png'
return Source(SOURCE_CODE, name, description, profile_image, ALT_IMAGE, None)
def scrape():
soup = make_soup(WEBSITE)
base_site = "https://www.secondextinctiongame.com"
data = []
for post in soup.findAll("article", {"class": "cgYILD"}):
date = post.find("time").text.replace("-", "") + "0000"
title = post.find("h3").text.strip()
link = base_site + post.find("a").get("href")
alt_image = ALT_IMAGE
image = base_site + post.find("picture").find("img").get("src").replace(" ", "%20")
data.append(Post(None, date, title, link, image, alt_image, SOURCE_CODE, None))
if len(data) % 25 == 0:
print(now() + f"Processed {len(data)} posts")
return remove_dups(data)
| 39.486486 | 119 | 0.699521 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 563 | 0.385352 |
1c88d1e1834d792edf9c14b13846bd1ee7d80360 | 3,860 | py | Python | systems/ILSVRC12/AlexNet/alexnet.py | mdatres/quantlab | 09fb24ede78f49768f829afe0fac2ac291b8fd4f | [
"Apache-2.0"
]
| 7 | 2021-07-01T17:02:50.000Z | 2022-03-29T10:54:41.000Z | systems/ILSVRC12/AlexNet/alexnet.py | mdatres/quantlab | 09fb24ede78f49768f829afe0fac2ac291b8fd4f | [
"Apache-2.0"
]
| null | null | null | systems/ILSVRC12/AlexNet/alexnet.py | mdatres/quantlab | 09fb24ede78f49768f829afe0fac2ac291b8fd4f | [
"Apache-2.0"
]
| 2 | 2021-07-10T20:57:06.000Z | 2022-01-02T10:10:25.000Z | #
# alexnet.py
#
# Author(s):
# Matteo Spallanzani <[email protected]>
#
# Copyright (c) 2020-2021 ETH Zurich.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
import torch.nn as nn
class AlexNet(nn.Module):
def __init__(self, use_bn: bool, num_classes: int = 1000, seed : int = -1) -> None:
super(AlexNet, self).__init__()
self.features = self._make_features(use_bn)
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.classifier = self._make_classifier(num_classes)
self._initialize_weights(seed)
def _make_features(self, use_bn: bool) -> nn.Sequential:
modules = []
# conv 1
modules += [nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2, bias=not use_bn)]
modules += [nn.BatchNorm2d(64)] if use_bn else []
modules += [nn.ReLU(inplace=True)]
# max pool
modules += [nn.MaxPool2d(kernel_size=3, stride=2)]
# conv 2
modules += [nn.Conv2d(64, 192, kernel_size=5, padding=2, bias=not use_bn)]
modules += [nn.BatchNorm2d(192)] if use_bn else []
modules += [nn.ReLU(inplace=True)]
# max pool
modules += [nn.MaxPool2d(kernel_size=3, stride=2)]
# conv 3
modules += [nn.Conv2d(192, 384, kernel_size=3, padding=1, bias=not use_bn)]
modules += [nn.BatchNorm2d(384)] if use_bn else []
modules += [nn.ReLU(inplace=True)]
# conv 4
modules += [nn.Conv2d(384, 256, kernel_size=3, padding=1, bias=not use_bn)]
modules += [nn.BatchNorm2d(256)] if use_bn else []
modules += [nn.ReLU(inplace=True)]
# conv 5
modules += [nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=not use_bn)]
modules += [nn.BatchNorm2d(256)] if use_bn else []
modules += [nn.ReLU(inplace=True)]
# max pool
modules += [nn.MaxPool2d(kernel_size=3, stride=2)]
return nn.Sequential(*modules)
def _make_classifier(self, num_classes: int) -> nn.Sequential:
modules = []
# dropout
modules += [nn.Dropout()]
# linear 1
modules += [nn.Linear(256 * 6 * 6, 4096)]
modules += [nn.ReLU(inplace=True)]
# dropout
modules += [nn.Dropout()]
# linear 2
modules += [nn.Linear(4096, 4096)]
modules += [nn.ReLU(inplace=True)]
# linear 3
modules += [nn.Linear(4096, num_classes)]
return nn.Sequential(*modules)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.features(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self, seed: int = -1):
if seed >= 0:
torch.manual_seed(seed)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
| 31.900826 | 91 | 0.591192 | 3,152 | 0.81658 | 0 | 0 | 0 | 0 | 0 | 0 | 781 | 0.202332 |
1c8a214cb9301b78671cb8aa70f1cebef2a6167b | 448 | py | Python | e/mail-relay/web/apps/core/migrations/0012_auto_20151105_1442.py | zhouli121018/nodejsgm | 0ccbc8acf61badc812f684dd39253d55c99f08eb | [
"MIT"
]
| null | null | null | e/mail-relay/web/apps/core/migrations/0012_auto_20151105_1442.py | zhouli121018/nodejsgm | 0ccbc8acf61badc812f684dd39253d55c99f08eb | [
"MIT"
]
| 18 | 2020-06-05T18:17:40.000Z | 2022-03-11T23:25:21.000Z | e/mail-relay/web/apps/core/migrations/0012_auto_20151105_1442.py | zhouli121018/nodejsgm | 0ccbc8acf61badc812f684dd39253d55c99f08eb | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0011_customersetting'),
]
operations = [
migrations.AlterField(
model_name='customersetting',
name='bounce',
field=models.BooleanField(default=True, verbose_name='\u5f00\u542f\u9000\u4fe1'),
),
]
| 22.4 | 93 | 0.625 | 339 | 0.756696 | 0 | 0 | 0 | 0 | 0 | 0 | 102 | 0.227679 |
1c8bf817623bc83ae0e3cfb38c83d93d7647579a | 1,068 | py | Python | madlib/main.py | FredericIV/PythonPractice | 36b3a321eb8fefc38befe83b15a7596418250756 | [
"CC0-1.0"
]
| null | null | null | madlib/main.py | FredericIV/PythonPractice | 36b3a321eb8fefc38befe83b15a7596418250756 | [
"CC0-1.0"
]
| null | null | null | madlib/main.py | FredericIV/PythonPractice | 36b3a321eb8fefc38befe83b15a7596418250756 | [
"CC0-1.0"
]
| null | null | null | #!/bin/python3
# Libraries
import sys
import array
import textwrap
# Variable Declaration
madlib_selection = "example.txt"
madlib_array = array.array('i')
copy_state = False
user_filler = ""
new_madlib = []
if len(sys.argv) != 1:
print(len(sys.argv))
if sys.argv[1] == "-":
print("This program takes the path to a madlib as an argument. Showing default now.")
## TODO: Add input validation, i.e. make sure the input is actully text.
else:
## TODO: Add pipe as input option.
madlib_selection = sys.argv[1]
with open(madlib_selection, 'r') as madlib:
read_madlib = madlib.read()
for i in range(read_madlib.count("#")//2):
first = read_madlib.index("#")
second = read_madlib.index("#", first+1)
replacement = input("Please give me " + read_madlib[first+1:second] + ":")
new_madlib = read_madlib[0:first] + replacement + read_madlib[second+1:]
read_madlib = new_madlib
print("\n\n\n")
print(textwrap.fill(read_madlib, drop_whitespace=False, replace_whitespace=False))
| 31.411765 | 93 | 0.659176 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 292 | 0.273408 |
1c8d061b0e5a02933d936632c10e61f84e6418bb | 2,558 | py | Python | src/tests/control/test_devices.py | bsod85/pretix | d86b3a217352f7ad24008685393f9af18fcf6e6c | [
"ECL-2.0",
"Apache-2.0"
]
| null | null | null | src/tests/control/test_devices.py | bsod85/pretix | d86b3a217352f7ad24008685393f9af18fcf6e6c | [
"ECL-2.0",
"Apache-2.0"
]
| null | null | null | src/tests/control/test_devices.py | bsod85/pretix | d86b3a217352f7ad24008685393f9af18fcf6e6c | [
"ECL-2.0",
"Apache-2.0"
]
| null | null | null | import pytest
from django.utils.timezone import now
from pretix.base.models import Device, Event, Organizer, Team, User
from pretix.base.models.devices import generate_api_token
@pytest.fixture
def organizer():
return Organizer.objects.create(name='Dummy', slug='dummy')
@pytest.fixture
def event(organizer):
event = Event.objects.create(
organizer=organizer, name='Dummy', slug='dummy',
date_from=now()
)
return event
@pytest.fixture
def device(organizer):
return organizer.devices.create(name='Cashdesk')
@pytest.fixture
def admin_user(admin_team):
u = User.objects.create_user('[email protected]', 'dummy')
admin_team.members.add(u)
return u
@pytest.fixture
def admin_team(organizer):
return Team.objects.create(organizer=organizer, can_change_organizer_settings=True, name='Admin team')
@pytest.mark.django_db
def test_list_of_devices(event, admin_user, client, device):
client.login(email='[email protected]', password='dummy')
resp = client.get('/control/organizer/dummy/devices')
assert 'Cashdesk' in resp.rendered_content
@pytest.mark.django_db
def test_create_device(event, admin_user, admin_team, client):
client.login(email='[email protected]', password='dummy')
resp = client.post('/control/organizer/dummy/device/add', {
'name': 'Foo',
'limit_events': str(event.pk),
}, follow=True)
d = Device.objects.last()
assert d.name == 'Foo'
assert not d.all_events
assert list(d.limit_events.all()) == [event]
assert d.initialization_token in resp.content.decode()
@pytest.mark.django_db
def test_update_device(event, admin_user, admin_team, device, client):
client.login(email='[email protected]', password='dummy')
client.post('/control/organizer/dummy/device/{}/edit'.format(device.pk), {
'name': 'Cashdesk 2',
'limit_events': str(event.pk),
}, follow=True)
device.refresh_from_db()
assert device.name == 'Cashdesk 2'
assert not device.all_events
assert list(device.limit_events.all()) == [event]
@pytest.mark.django_db
def test_revoke_device(event, admin_user, admin_team, device, client):
client.login(email='[email protected]', password='dummy')
device.api_token = generate_api_token()
device.initialized = now()
device.save()
client.get('/control/organizer/dummy/device/{}/revoke'.format(device.pk))
client.post('/control/organizer/dummy/device/{}/revoke'.format(device.pk), {}, follow=True)
device.refresh_from_db()
assert device.revoked
| 30.452381 | 106 | 0.713057 | 0 | 0 | 0 | 0 | 2,352 | 0.919468 | 0 | 0 | 462 | 0.18061 |
1c8e802ab7e5ab17bb7b662f2406ded9d3de6507 | 11,773 | py | Python | mcp/augmentation/album.py | j20232/moco_image_pipeline | 997ae76e795548e75f95e862284c1fc0a3c7541a | [
"BSD-3-Clause"
]
| 5 | 2020-03-18T14:36:12.000Z | 2022-01-26T09:36:11.000Z | mcp/augmentation/album.py | j20232/moco_image_pipeline | 997ae76e795548e75f95e862284c1fc0a3c7541a | [
"BSD-3-Clause"
]
| null | null | null | mcp/augmentation/album.py | j20232/moco_image_pipeline | 997ae76e795548e75f95e862284c1fc0a3c7541a | [
"BSD-3-Clause"
]
| null | null | null | import numpy as np
from PIL import Image, ImageOps, ImageEnhance
import albumentations as A
# ndarray: H x W x C
def apply_aug(aug, image):
return aug(image=image)["image"]
# ----------------------------------- Blur -------------------------------------------
class RandomBlur():
def __init__(self, prob, blur_limit=9):
self.prob = np.clip(prob, 0.0, 1.0)
self.blur_limit = blur_limit
def __call__(self, img):
if np.random.uniform() < self.prob:
r = np.random.uniform()
if r < 0.4:
img = apply_aug(A.Blur(blur_limit=self.blur_limit, always_apply=True), img)
elif r < 0.6:
img = apply_aug(A.GaussianBlur(blur_limit=self.blur_limit, always_apply=True), img)
else:
img = apply_aug(A.MotionBlur(blur_limit=self.blur_limit, always_apply=True), img)
return img
# ----------------------------------- Noise -------------------------------------------
class GaussNoise():
def __init__(self, prob, var_limit=(0.0, 0.07)):
self.prob = np.clip(prob, 0.0, 1.0)
self.var_limit = var_limit
def __call__(self, img):
return apply_aug(A.GaussNoise(var_limit=self.var_limit, p=self.prob), img)
class MultiplicativeNoise():
def __init__(self, prob, var_limit=(0.6, 1.1)):
self.prob = np.clip(prob, 0.0, 1.0)
self.var_limit = var_limit
def __call__(self, img):
return apply_aug(A.MultiplicativeNoise(multiplier=self.var_limit, p=self.prob), img)
# ---------------------------------- Distortion ---------------------------------------
class GridDistortion():
def __init__(self, prob, num_steps=10, distort_limit=0.7):
self.prob = np.clip(prob, 0.0, 1.0)
self.num_steps = num_steps
self.distort_limit = distort_limit
def __call__(self, img):
return apply_aug(A.GridDistortion(p=self.prob, num_steps=self.num_steps,
distort_limit=self.distort_limit), img)
class ElasticTransform():
def __init__(self, prob, sigma=40, alpha=1, alpha_affine=15):
self.prob = np.clip(prob, 0.0, 1.0)
self.sigma = sigma
self.alpha = alpha
self.alpha_affine = alpha_affine
def __call__(self, img):
return apply_aug(A.ElasticTransform(p=self.prob, sigma=self.sigma,
alpha=self.alpha, alpha_affine=self.alpha_affine), img)
class ShiftScaleRotate():
def __init__(self, prob, shift_limit=0.0625, scale_limit=0.2, rotate_limit=20):
self.prob = prob
self.shift_limit = shift_limit
self.scale_limit = scale_limit
self.rotate_limit = rotate_limit
def __call__(self, img):
return apply_aug(A.ShiftScaleRotate(p=self.prob, shift_limit=self.shift_limit,
scale_limit=self.scale_limit,
rotate_limit=self.rotate_limit), img)
# ----------------------------------- Histogram ----------------------------------------
class HueSaturationValue():
def __init__(self, prob, hue_shift_limit=20, sat_shift_limit=40, val_shift_limit=100):
self.prob = np.clip(prob, 0.0, 1.0)
self.hue_shift_limit = hue_shift_limit
self.sat_shift_limit = sat_shift_limit
self.val_shift_limit = val_shift_limit
def __call__(self, img):
out = img if img.dtype == "uint8" else (img * 255).astype(np.uint8)
out = apply_aug(A.HueSaturationValue(p=self.prob, hue_shift_limit=self.hue_shift_limit,
sat_shift_limit=self.sat_shift_limit,
val_shift_limit=self.val_shift_limit), out)
return out if img.dtype == "uint8" else (out / 255).astype(np.float64)
class RandomBrightnessContrast():
def __init__(self, prob, brightness_limit=2.0, contrast_limit=0.6):
self.prob = np.clip(prob, 0.0, 1.0)
self.brightness_limit = brightness_limit
self.contrast_limit = contrast_limit
def __call__(self, img):
return apply_aug(A.RandomBrightnessContrast(p=self.prob,
brightness_limit=self.brightness_limit,
contrast_limit=self.contrast_limit,
brightness_by_max=False,
), img)
class RandomCLAHE():
def __init__(self, prob, clip_limit=40.0, tile_grid_size=(16, 16)):
self.prob = np.clip(prob, 0.0, 1.0)
self.clip_limit = clip_limit
self.tile_grid_size = tile_grid_size
def __call__(self, img):
out = img if img.dtype == "uint8" else (img * 255).astype(np.uint8)
out = apply_aug(A.CLAHE(p=self.prob, clip_limit=self.clip_limit,
tile_grid_size=self.tile_grid_size), out)
return out if img.dtype == "uint8" else (out / 255).astype(np.float64)
# ------------------------------------- Removal ------------------------------------------
class CoarseDropout():
def __init__(self, prob, max_holes=10, max_height=12, max_width=12):
self.prob = np.clip(prob, 0.0, 1.0)
self.max_holes = max_holes
self.max_height = max_height
self.max_width = max_width
def __call__(self, img):
return apply_aug(A.CoarseDropout(p=self.prob, max_holes=self.max_holes,
max_height=self.max_height, max_width=self.max_width,
fill_value=np.median(img)), img)
# ------------------------------------------- Augmix -------------------------------------------
# Reference: https://www.kaggle.com/haqishen/augmix-based-on-albumentations
def int_parameter(level, maxval):
"""Helper function to scale `val` between 0 and maxval .
Args:
level: Level of the operation that will be between [0, `PARAMETER_MAX`].
maxval: Maximum value that the operation can have. This will be scaled to
level/PARAMETER_MAX.
Returns:
An int that results from scaling `maxval` according to `level`.
"""
return int(level * maxval / 10)
def float_parameter(level, maxval):
"""Helper function to scale `val` between 0 and maxval.
Args:
level: Level of the operation that will be between [0, `PARAMETER_MAX`].
maxval: Maximum value that the operation can have. This will be scaled to
level/PARAMETER_MAX.
Returns:
A float that results from scaling `maxval` according to `level`.
"""
return float(level) * maxval / 10.
def sample_level(n):
return np.random.uniform(low=0.1, high=n)
def autocontrast(pil_img, _):
return ImageOps.autocontrast(pil_img)
def equalize(pil_img, _):
return ImageOps.equalize(pil_img)
def posterize(pil_img, level):
level = int_parameter(sample_level(level), 4)
return ImageOps.posterize(pil_img, 4 - level)
def rotate(pil_img, level):
degrees = int_parameter(sample_level(level), 30)
if np.random.uniform() > 0.5:
degrees = -degrees
return pil_img.rotate(degrees, resample=Image.BILINEAR)
def solarize(pil_img, level):
level = int_parameter(sample_level(level), 256)
return ImageOps.solarize(pil_img, 256 - level)
def shear_x(pil_img, level):
level = float_parameter(sample_level(level), 0.3)
if np.random.uniform() > 0.5:
level = -level
return pil_img.transform(pil_img.size,
Image.AFFINE, (1, level, 0, 0, 1, 0),
resample=Image.BILINEAR)
def shear_y(pil_img, level):
level = float_parameter(sample_level(level), 0.3)
if np.random.uniform() > 0.5:
level = -level
return pil_img.transform(pil_img.size,
Image.AFFINE, (1, 0, 0, level, 1, 0),
resample=Image.BILINEAR)
def translate_x(pil_img, level):
level = int_parameter(sample_level(level), pil_img.size[0] / 3)
if np.random.random() > 0.5:
level = -level
return pil_img.transform(pil_img.size,
Image.AFFINE, (1, 0, level, 0, 1, 0),
resample=Image.BILINEAR)
def translate_y(pil_img, level):
level = int_parameter(sample_level(level), pil_img.size[0] / 3)
if np.random.random() > 0.5:
level = -level
return pil_img.transform(pil_img.size,
Image.AFFINE, (1, 0, 0, 0, 1, level),
resample=Image.BILINEAR)
# operation that overlaps with ImageNet-C's test set
def color(pil_img, level):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Color(pil_img).enhance(level)
# operation that overlaps with ImageNet-C's test set
def contrast(pil_img, level):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Contrast(pil_img).enhance(level)
# operation that overlaps with ImageNet-C's test set
def brightness(pil_img, level):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Brightness(pil_img).enhance(level)
# operation that overlaps with ImageNet-C's test set
def sharpness(pil_img, level):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Sharpness(pil_img).enhance(level)
def normalize(image):
"""Normalize input image channel-wise to zero mean and unit variance."""
return image - 127
def apply_op(image, op, severity):
# image = np.clip(image, 0, 255)
pil_img = Image.fromarray(image) # Convert to PIL.Image
pil_img = op(pil_img, severity)
return np.asarray(pil_img)
def augment_and_mix(image, severity=3, width=3, depth=-1, alpha=1.):
"""Perform AugMix augmentations and compute mixture.
Args:
image: Raw input image as float32 np.ndarray of shape (h, w, c)
severity: Severity of underlying augmentation operators (between 1 to 10).
width: Width of augmentation chain
depth: Depth of augmentation chain. -1 enables stochastic depth uniformly
from [1, 3]
alpha: Probability coefficient for Beta and Dirichlet distributions.
Returns:
mixed: Augmented and mixed image.
"""
augmentations = [
autocontrast, equalize, posterize, rotate, solarize, shear_x, shear_y,
translate_x, translate_y
]
ws = np.float32(np.random.dirichlet([alpha] * width))
m = np.float32(np.random.beta(alpha, alpha))
mix = np.zeros_like(image).astype(np.float32)
for i in range(width):
image_aug = image.copy()
depth = depth if depth > 0 else np.random.randint(1, 4)
for _ in range(depth):
op = np.random.choice(augmentations)
image_aug = apply_op(image_aug, op, severity)
# Preprocessing commutes since all coefficients are convex
mix += ws[i] * image_aug
# mix += ws[i] * normalize(image_aug)
mixed = (1 - m) * image + m * mix
# mixed = (1 - m) * normalize(image) + m * mix
return mixed
class RandomAugMix():
def __init__(self, prob=0.1, severity=2, width=3, depth=2, alpha=1.):
self.prob = prob
self.severity = severity
self.width = width
self.depth = depth
self.alpha = alpha
def __call__(self, img):
if np.random.uniform() > self.prob:
return img
tmp = (img * 255).astype(np.uint8) if img.dtype != "uint8" else img
out = augment_and_mix(tmp, self.severity, self.width, self.depth, self.alpha)
if type(img) is np.ndarray:
if img.dtype != "uint8":
out = (out / 255).astype(np.float64)
return out
| 35.460843 | 99 | 0.599507 | 5,665 | 0.481186 | 0 | 0 | 0 | 0 | 0 | 0 | 2,303 | 0.195617 |
1c8ea0dcd3e4b0f8ab68d4a876a677661904e6f8 | 2,959 | py | Python | website/util/sanitize.py | bdyetton/prettychart | e8b33a7dfdc8c33d15969586be7f68172795f76d | [
"Apache-2.0"
]
| null | null | null | website/util/sanitize.py | bdyetton/prettychart | e8b33a7dfdc8c33d15969586be7f68172795f76d | [
"Apache-2.0"
]
| null | null | null | website/util/sanitize.py | bdyetton/prettychart | e8b33a7dfdc8c33d15969586be7f68172795f76d | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
import bleach
import json
def strip_html(unclean):
"""Sanitize a string, removing (as opposed to escaping) HTML tags
:param unclean: A string to be stripped of HTML tags
:return: stripped string
:rtype: str
"""
return bleach.clean(unclean, strip=True, tags=[], attributes=[], styles=[])
def clean_tag(data):
"""Format as a valid Tag
:param data: A string to be cleaned
:return: cleaned string
:rtype: str
"""
# TODO: make this a method of Tag?
return escape_html(data).replace('"', '"').replace("'", ''')
def is_iterable_but_not_string(obj):
"""Return True if ``obj`` is an iterable object that isn't a string."""
return (hasattr(obj, '__iter__') and not hasattr(obj, 'strip'))
def escape_html(data):
"""Escape HTML characters in data.
:param data: A string, dict, or list to clean of HTML characters
:return: A cleaned object
:rtype: str or list or dict
"""
if isinstance(data, dict):
return {
key: escape_html(value)
for (key, value) in data.iteritems()
}
if is_iterable_but_not_string(data):
return [
escape_html(value)
for value in data
]
if isinstance(data, basestring):
return bleach.clean(data)
return data
def assert_clean(data):
"""Ensure that data is cleaned
:raise: AssertionError
"""
def _ensure_clean(value):
if value != bleach.clean(value):
raise ValueError
return escape_html(data)
# TODO: Remove safe_unescape_html when mako html safe comes in
def safe_unescape_html(value):
"""
Return data without html escape characters.
:param value: A string, dict, or list
:return: A string or list or dict without html escape characters
"""
safe_characters = {
'&': '&',
'<': '<',
'>': '>',
}
if isinstance(value, dict):
return {
key: safe_unescape_html(value)
for (key, value) in value.iteritems()
}
if is_iterable_but_not_string(value):
return [
safe_unescape_html(each)
for each in value
]
if isinstance(value, basestring):
for escape_sequence, character in safe_characters.items():
value = value.replace(escape_sequence, character)
return value
return value
def safe_json(value):
"""
Dump a string to JSON in a manner that can be used for JS strings in mako templates.
Providing additional forward-slash escaping to prevent injection of closing markup in strings. See:
http://benalpert.com/2012/08/03/preventing-xss-json.html
:param value: A string to be converted
:return: A JSON-formatted string that explicitly escapes forward slashes when needed
"""
return json.dumps(value).replace('</', '<\\/') # Fix injection of closing markup in strings
| 26.185841 | 103 | 0.627239 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,417 | 0.478878 |
1c8eddd2bd80bb485d60b7d54110b5642d861af4 | 16,525 | py | Python | mainTrain.py | PolarizedLightFieldMicroscopy/LFMNet2 | c9b064d7625e018ef54b8dd8a0e53801c4565397 | [
"Apache-2.0"
]
| null | null | null | mainTrain.py | PolarizedLightFieldMicroscopy/LFMNet2 | c9b064d7625e018ef54b8dd8a0e53801c4565397 | [
"Apache-2.0"
]
| null | null | null | mainTrain.py | PolarizedLightFieldMicroscopy/LFMNet2 | c9b064d7625e018ef54b8dd8a0e53801c4565397 | [
"Apache-2.0"
]
| null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from torch.utils import data
from torch import optim
import torchvision.models as models
from torch.autograd import Variable
import torchvision as tv
import random
import math
import time
from datetime import datetime
import os
import argparse
import subprocess
from util.LFUtil import *
import numpy as np
from networks.LFMNet import LFMNet
def main(args=None):
# # Arguments
# parser = argparse.ArgumentParser()
# # Number of epochs
# parser.add_argument('--epochs', type=int, default=1000)
# # Validate every n percentage of the data
# parser.add_argument('--valEvery', type=float, default=0.25)
# # Image indices to use for training and validation
# parser.add_argument('--imagesToUse', nargs='+', type=int, default=list(range(0,5,1)))
# # List of GPUs to use: 0 1 2 for example
# parser.add_argument('--GPUs', nargs='+', type=int, default=None)
# # Batch size
# parser.add_argument('--batchSize', type=int, default=128)
# # Perentage of the data to use for validation, from 0 to 1
# parser.add_argument('--validationSplit', type=float, default=0.1)
# # Bias initialization value
# parser.add_argument('--biasVal', type=float, default=0.1)
# # Learning rate
# parser.add_argument('--learningRate', type=float, default=0.001)
# # Use bias flag
# parser.add_argument('--useBias', type=str2bool, default=True)
# # Use skip connections flag
# parser.add_argument('--useSkipCon', type=str2bool, default=False)
# # User selected random seed
# parser.add_argument('--randomSeed', type=int, default=None)
# # fov of input or neighboarhood around lenslet to reconstruct
# parser.add_argument('--fovInput', type=int, default=9)
# # nT number of lenslets to reconstruct simultaneously use at training time
# parser.add_argument('--neighShape', type=int, default=3)
# # Flag to use shallow or large U-net
# parser.add_argument('--useShallowUnet', type=str2bool, default=True)
# # Lower threshold of GT stacks, to get rid of autofluorescence
# parser.add_argument('--ths', type=float, default=0.03)
# # Path to dataset
# parser.add_argument('--datasetPath', nargs='?', default="BrainLFMConfocalDataset/Brain_40x_64Depths_362imgs.h5")
# # Path to directory where models and tensorboard logs are stored
# parser.add_argument('--outputPath', nargs='?', default="runs/")
# # Prefix for current output folder
# parser.add_argument('--outputPrefix', nargs='?', default="")
# # Path to model in case of continuing a training
# parser.add_argument('--checkpointPath', nargs='?', default=None)
# args = parser.parse_args()
nImgs = len(args.imagesToUse)
# Setup multithreading
num_workers = getThreads()
if num_workers!=0:
torch.set_num_threads(num_workers)
if not torch.cuda.is_available():
print("GPU initialization error")
exit(-1)
if torch.cuda.is_available():
print ("Cuda is available")
device_id = torch.cuda.current_device()
gpu_properties = torch.cuda.get_device_properties(device_id)
print("Found %d GPUs available. Using GPU %d (%s) of compute capability %d.%d with "
"%.1fGb total memory.\n" %
(torch.cuda.device_count(),
device_id,
gpu_properties.name,
gpu_properties.major,
gpu_properties.minor,
gpu_properties.total_memory / 1e9))
# Select GPUs to use
args.GPUs = list(range(torch.cuda.device_count())) if args.GPUs is None else args.GPUs
print('Using GPUs: ' + str(args.GPUs))
device_ids = args.GPUs
# Set common random seed
if args.randomSeed is not None:
np.random.seed(args.randomSeed)
torch.manual_seed(args.randomSeed)
# Load checkpoint if provided
if args.checkpointPath is not None:
checkpointPath = args.checkpointPath
checkpoint = torch.load(checkpointPath)
# overwrite args
args = checkpoint['args']
args.checkpointPath = checkpointPath
# set Device to use
device = torch.device("cuda:"+str(device_ids[0]) if torch.cuda.is_available() else "cpu")
# Create unique label
today = datetime.now()
# Get commit number
# label = subprocess.check_output(["git", "describe", "--always"]).strip()
#specific to MBL lab workstation
label = subprocess.check_output(["C:/Program Files/git/bin/git", "describe", "--always"]).strip()
comment = today.strftime('%Y_%m_%d__%H%M%S') + "_"+ str(args.useBias) +"B_"+str(args.biasVal)+"bias_" + str(nImgs) + \
"I_"+ str(args.batchSize)+"BS_"+str(args.useSkipCon)+"Sk_" + str(args.fovInput) + "FOV_" + str(args.neighShape) + "nT_" \
+ str(args.ths) + "ths_" + str(label.decode("utf-8") ) + "_commit__" + args.outputPrefix
# Create output folder
save_folder = args.outputPath + "/" + comment
# If asked to continue a training, save in the same folder
if args.checkpointPath is not None:
save_folder = os.path.split(args.checkpointPath)[0]
print(save_folder)
# Create summary writer to log stuff
writer = SummaryWriter(log_dir=save_folder)
writer.add_text('Description',comment,0)
writer.flush()
# Load dataset
all_data = Dataset(args.datasetPath, args.randomSeed, \
fov=args.fovInput, neighShape=args.neighShape, img_indices=args.imagesToUse, get_full_imgs=False, center_region=None)
# Split validation and testing
train_size = int((1 - args.validationSplit) * len(all_data))
test_size = len(all_data) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(all_data, [train_size, test_size])
# Create data loaders
train_dataset = data.DataLoader(train_dataset, batch_size=args.batchSize,
shuffle=True, num_workers=num_workers, pin_memory=True)
test_dataset = data.DataLoader(test_dataset, batch_size=args.batchSize,
shuffle=True, num_workers=num_workers, pin_memory=True)
validate_every = np.round(len(train_dataset)*args.valEvery)
# Get Dataset information
nDepths = all_data.get_n_depths()
volShape, LFshape = all_data.__shape__()
LFshape = LFshape[0:4]
lateralTile = int(math.sqrt(nDepths))
# Find normalization values
maxInputTrain, maxVolumeTrain = all_data.get_max()
maxInputTest, maxVolumeTest = all_data.get_max()
# Create network
net = LFMNet(nDepths, args.useBias, args.useSkipCon, LFshape, LFfov=args.fovInput, use_small_unet=args.useShallowUnet).to(device)
optimizer = optim.Adam(net.parameters(), lr=args.learningRate)
lossFunction = nn.L1Loss()
# Create SSIM criteria
ssim = SSIM()
ssim.eval()
# Init bias and weights if needed
if args.useBias:
def bias_init(m):
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv3d):
if m.bias is not None:
nn.init.constant_(m.bias.data, args.biasVal)
nn.init.kaiming_normal_(m.weight)
if isinstance(m, nn.ConvTranspose2d):
nn.init.constant_(m.bias.data, args.biasVal)
nn.init.kaiming_normal_(m.weight)
net.apply(bias_init)
# Load network from checkpoint
if args.checkpointPath is not None:
net.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epochStart = checkpoint['epoch']
epochs = args.epochs + epochStart
train_loss = checkpoint['loss']
# Start distributed data parallel, as it's faster than DataParallel
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '1234'+str(device_ids[0])
torch.distributed.init_process_group(backend="nccl", rank=0, world_size=1)
# Move network to distributed data parallel
net = nn.parallel.DistributedDataParallel(net, device_ids=args.GPUs, output_device=args.GPUs[0]).to(device)
# timers
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
global_it_counter = 0
# define indices to grab for tensorboard visualization
indices_to_show = torch.randperm(test_size)[0:8]
# Init arrays to store losses
train_losses, test_losses = [], []
test_loss = 0
epochStart = 0
# Start training
for epoch in range(epochStart, args.epochs):
net.train()
torch.set_grad_enabled(True)
torch.cuda.empty_cache()
train_loss = 0
print('Training')
global_it_counter = 0
for nBatch,(inputs,labels) in enumerate(train_dataset):
# compute current iteration
curr_it = epoch*len(train_dataset) + nBatch
# start timer
start.record()
print('ep: ' + str(epoch) + ' ' + str(nBatch+1) + '/' + str(len(train_dataset)) + ' currIt: ' + str(curr_it))
optimizer.zero_grad()
# load data to gpu and normalize from 0 to 1
inputGPU = inputs.float().to(device) / maxInputTest
outputsGT = labels.float().to(device) / maxVolumeTrain
# Threshold GT to get rid of autofluorescence
if args.ths!=0:
outputsGT = imadjust(outputsGT, args.ths,outputsGT.max(), outputsGT.min(), outputsGT.max())
# Predict
outputsVol = net(inputGPU)
loss = lossFunction(outputsGT,outputsVol)
loss.backward()
train_loss += loss.item() / nDepths
optimizer.step()
global_it_counter += inputs.shape[0]
# Record training time
end.record()
torch.cuda.synchronize()
end_time = start.elapsed_time(end)
# Compute time per sample
elapsed_time = end_time/inputs.shape[0]
# Check if validation is required
if nBatch%validate_every==0:
print(comment)
# Write training images to tensorboard
lastBatchSize = min(outputsGT.shape[0],4)
gridOut2 = torch.cat((outputsGT[0:lastBatchSize, :, :, :, :].sum(2).cpu().data.detach(), outputsVol[0:lastBatchSize, :, :, :, :].sum(2).cpu().data.detach()), dim=0)
gridOut2 = tv.utils.make_grid(gridOut2, normalize=True, scale_each=False)
# Select some images in the batch for showing
indices_to_display = torch.randperm(inputGPU.shape[0])[0:4]
outputsGT = F.interpolate(outputsGT[indices_to_display, :, :, :, :],[LFshape[0]*2,LFshape[1]*2,volShape[2]])
outputsVol = F.interpolate(outputsVol[indices_to_display, :, :, :, :],[LFshape[0]*2,LFshape[1]*2,volShape[2]])
inputGPU = inputGPU[indices_to_display,:,:,:,:,:]
currPred = convert3Dto2DTiles(outputsVol, [lateralTile, lateralTile])
currGT = convert3Dto2DTiles(outputsGT, [lateralTile, lateralTile])
inputGrid = LF2Spatial(inputGPU, inputGPU.shape[2:])
gridPred = tv.utils.make_grid(currPred,normalize=True, scale_each=False)
gridGT = tv.utils.make_grid(currGT,normalize=True, scale_each=False)
gridInput = tv.utils.make_grid(inputGrid,normalize=True, scale_each=False)
gt = outputsGT[0,:,:,:,:].sum(3).repeat(3,1,1)
gt /= gt.max()
# Write to tensorboard
writer.add_image('z_proj_train',gt,curr_it)
writer.add_image('images_train_YZ_projection', gridOut2, curr_it)
writer.add_image('outputRGB_train', gridPred, curr_it)
writer.add_image('outputRGB_train_GT', gridGT, curr_it)
writer.add_image('input_train', gridInput, curr_it)
writer.add_scalar('Loss/train', train_loss/global_it_counter, curr_it)
writer.add_scalar('times/train', elapsed_time, curr_it)
# Restart
train_loss = 0.0
global_it_counter = 0
print('Validating')
net.eval()
with torch.no_grad():
avg_psnr = 0
avg_ssim = 0
test_loss = 0
start.record()
for nBatch,(inputs,labels) in enumerate(test_dataset):
inputGPU = inputs.float().to(device) / maxInputTest
outputsGT = labels.float().to(device) / maxVolumeTrain
# Threshold GT to get rid of autofluorescence
outputsGT = imadjust(outputsGT,args.ths,outputsGT.max(), outputsGT.min(), outputsGT.max())
outputsVol = net(inputGPU)
loss = lossFunction(outputsGT,outputsVol)
test_loss += loss.item() / nDepths
# Compute PSNR
lossMSE = nn.functional.mse_loss(outputsVol.to(device).detach(), outputsGT.to(device).detach())
avg_psnr += 10 * math.log10(1 / lossMSE.item())
# Compute ssim
avg_ssim += ssim(outputsVol[:,0,:,:,:].permute(0,3,1,2).contiguous().detach().to(device), outputsGT[:,0,:,:,:].permute(0,3,1,2).contiguous().detach().to(device)).sum()
end.record()
torch.cuda.synchronize()
lastBatchSize = min(outputsGT.shape[0],4)
gridOut2 = torch.cat((outputsGT[0:lastBatchSize, :, :, :, :].sum(2).cpu().data.detach(), outputsVol[0:lastBatchSize, :, :, :, :].sum(2).cpu().data.detach()), dim=0)
gridOut2 = tv.utils.make_grid(gridOut2, normalize=True, scale_each=False)
# process some for showing
indices_to_display = torch.randperm(inputGPU.shape[0])[0:lastBatchSize]
outputsGT = F.interpolate(outputsGT[indices_to_display, :, :, :, :],[LFshape[0]*2,LFshape[1]*2,volShape[2]])
outputsVol = F.interpolate(outputsVol[indices_to_display, :, :, :, :],[LFshape[0]*2,LFshape[1]*2,volShape[2]])
inputGPU = inputGPU[indices_to_display,:,:,:,:,:]
currPred = convert3Dto2DTiles(outputsVol, [lateralTile, lateralTile])
currGT = convert3Dto2DTiles(outputsGT, [lateralTile, lateralTile])
inputGrid = LF2Spatial(inputGPU, inputGPU.shape[2:])
gridPred = tv.utils.make_grid(currPred,normalize=True, scale_each=False)
gridGT = tv.utils.make_grid(currGT,normalize=True, scale_each=False)
gridInput = tv.utils.make_grid(inputGrid,normalize=True, scale_each=False)
# Write to tensorboard
writer.add_image('images_val_YZ_projection', gridOut2, curr_it)
writer.add_image('outputRGB_test', gridPred, curr_it)
writer.add_image('outputRGB_test_GT', gridGT, curr_it)
writer.add_image('input_test', gridInput, curr_it)
writer.add_scalar('Loss/test', test_loss/len(test_dataset), curr_it)
writer.add_scalar('Loss/psnr_val', avg_psnr/len(test_dataset), curr_it)
writer.add_scalar('Loss/ssim_val', avg_ssim/len(test_dataset), curr_it)
writer.add_scalar('LearningRate', args.learningRate, curr_it)
writer.add_scalar('times/val', start.elapsed_time(end)/test_size, curr_it)
net.train()
if epoch%2==0:
torch.save({
'epoch': epoch,
'args' : args,
'model_state_dict': net.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': train_loss,
'dataset_path': args.datasetPath},
save_folder + '/model_'+str(epoch))
print(f"Epoch {epoch + 1}/{args.epochs}.. "
f"Train loss: {train_loss / len(train_dataset):.7f}.. "
f"Test loss: {test_loss / len(test_dataset):.7f}.. ")
if __name__ == '__main__':
main() | 47.34957 | 191 | 0.616884 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,368 | 0.264327 |
1c8fed7e472142a2a42ee1131ff8f6b28599bc16 | 1,295 | py | Python | tools/utils.py | valsworthen/toxic-comment-classification | 12ceb4d78410a14fba05e43f6f424cec52e6665d | [
"MIT"
]
| 10 | 2018-03-26T05:46:39.000Z | 2020-04-30T08:03:18.000Z | tools/utils.py | valsworthen/toxic_comment_classification | 12ceb4d78410a14fba05e43f6f424cec52e6665d | [
"MIT"
]
| null | null | null | tools/utils.py | valsworthen/toxic_comment_classification | 12ceb4d78410a14fba05e43f6f424cec52e6665d | [
"MIT"
]
| null | null | null | """Utilities"""
import pandas as pd
import numpy as np
from attrdict import AttrDict
import yaml
def average_predictions(cv_predictions, n_splits, num_samples = 153164, num_labels = 6):
"""Average k-fold predictions stored in a dict"""
preds = np.zeros((num_samples, num_labels))
for preds_i in cv_predictions:
preds += preds_i
preds /= n_splits
return preds
def geom_average_predictions(cv_predictions, n_splits, num_samples = 153164, num_labels = 6):
"""Average k-fold predictions stored in a dict"""
preds = np.ones((num_samples, num_labels))
for preds_i in cv_predictions:
preds *= preds_i
preds = preds **(1/n_splits)
return preds
def create_submission(preds, filename):
labels = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
subm = pd.read_csv('input/sample_submission.csv')
submid = pd.DataFrame({'id': subm["id"]})
submission = pd.concat([submid, pd.DataFrame(preds, columns = labels)], axis=1)
submission.to_csv(filename, index=False)
def format_time(sec):
m, s = divmod(sec, 60)
h, m = divmod(m, 60)
return "{:.0f}h {:.0f}min {:.0f}s".format(h, m, s)
def read_yaml(filepath):
with open(filepath) as f:
config = yaml.load(f)
return AttrDict(config)
| 33.205128 | 93 | 0.67722 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 238 | 0.183784 |
1c9014f0cf5d96c8108b4c96a94c876f92838ff8 | 530 | py | Python | dags/exercise1.py | mikef-nl/airflow-training-skeleton | 85a0e9103772be012a41ee0daa9f67ba401bfddc | [
"Apache-2.0"
]
| null | null | null | dags/exercise1.py | mikef-nl/airflow-training-skeleton | 85a0e9103772be012a41ee0daa9f67ba401bfddc | [
"Apache-2.0"
]
| null | null | null | dags/exercise1.py | mikef-nl/airflow-training-skeleton | 85a0e9103772be012a41ee0daa9f67ba401bfddc | [
"Apache-2.0"
]
| null | null | null | import airflow
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
args = {
'owner': 'Mike',
'start_date': airflow.utils.dates.days_ago(2),
}
dag = DAG(
dag_id='exercise1',
default_args=args,
schedule_interval=None
)
t1 = DummyOperator(task_id='task1', dag=dag)
t2 = DummyOperator(task_id='task2', dag=dag)
t3 = DummyOperator(task_id='task3', dag=dag)
t4 = DummyOperator(task_id='task4', dag=dag)
t5 = DummyOperator(task_id='task5', dag=dag)
t1 >> t2 >> [t3,t4] >> t5
| 23.043478 | 58 | 0.70566 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 71 | 0.133962 |
1c90552cf52e653e519bda73228f741afee1058c | 3,148 | py | Python | pyhelp/scripts/produce_meteo_maps.py | jnsebgosselin/help | f0194a96ba7e1474fe1864d79447ee20cee949ec | [
"MIT"
]
| 12 | 2019-03-11T12:38:35.000Z | 2021-06-26T03:40:18.000Z | pyhelp/scripts/produce_meteo_maps.py | jnsebgosselin/help | f0194a96ba7e1474fe1864d79447ee20cee949ec | [
"MIT"
]
| 23 | 2018-11-22T15:16:12.000Z | 2022-03-25T12:55:33.000Z | pyhelp/scripts/produce_meteo_maps.py | jnsebgosselin/help | f0194a96ba7e1474fe1864d79447ee20cee949ec | [
"MIT"
]
| 2 | 2019-04-18T17:47:00.000Z | 2021-08-31T04:45:30.000Z | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 27 10:54:25 2018
@author: jsgosselin
"""
# ---- Standard Library Imports
from itertools import product
import os.path as osp
import os
# ---- Third Party Imports
import netCDF4
from geopandas import GeoDataFrame
import pandas as pd
from shapely.geometry import Point, Polygon
import numpy as np
dirpath_netcdf = "D:/MeteoGrilleDaily"
# %% Get lat/lon from the netCDF
filename = osp.join(dirpath_netcdf, 'GCQ_v2_2000.nc')
netcdf_dset = netCDF4.Dataset(filename, 'r+')
lat = np.array(netcdf_dset['lat'])
lon = np.array(netcdf_dset['lon'])
netcdf_dset.close()
# %% Read the weather data from the InfoClimat grid
stack_precip = []
stack_tasmax = []
stack_tasmin = []
nyear = 0
for year in range(2000, 2015):
print("\rProcessing year %d" % year, end=' ')
filename = osp.join(dirpath_netcdf, 'GCQ_v2_%d.nc' % year)
netcdf_dset = netCDF4.Dataset(filename, 'r+')
stack_precip.append(np.array(netcdf_dset['pr']))
stack_tasmax.append(np.array(netcdf_dset['tasmax']))
stack_tasmin.append(np.array(netcdf_dset['tasmin']))
netcdf_dset.close()
nyear += 1
print('')
daily_precip = np.vstack(stack_precip)
daily_tasmax = np.vstack(stack_tasmax)
daily_tasmin = np.vstack(stack_tasmin)
daily_tasavg = (daily_tasmax + daily_tasmin) / 2
yearly_avg_precip = np.sum(daily_precip, axis=0) / nyear
yearly_avg_tasavg = np.average(daily_tasavg, axis=0)
yearly_avg_tasmax = np.average(daily_tasmax, axis=0)
yearly_avg_tasmin = np.average(daily_tasmin, axis=0)
# %% Create a grid
Np = len(lat) * len(lon)
geometry = []
arr_yearly_avg_precip = np.zeros(Np)
arr_avg_yearly_tasavg = np.zeros(Np)
arr_avg_yearly_tasmax = np.zeros(Np)
arr_avg_yearly_tasmin = np.zeros(Np)
i = 0
dx = dy = 0.1/2
for j, k in product(range(len(lat)), range(len(lon))):
print("\rProcessing cell %d of %d" % (i, Np), end=' ')
point = Point((lon[k], lat[j]))
# polygon = Polygon([(lon[k]-dx, lat[j]-dy),
# (lon[k]-dx, lat[j]+dy),
# (lon[k]+dx, lat[j]+dy),
# (lon[k]+dx, lat[j]-dy)])
geometry.append(point)
arr_yearly_avg_precip[i] = yearly_avg_precip[j, k]
arr_avg_yearly_tasavg[i] = yearly_avg_tasavg[j, k]
arr_avg_yearly_tasmax[i] = yearly_avg_tasmax[j, k]
arr_avg_yearly_tasmin[i] = yearly_avg_tasmin[j, k]
i += 1
print("\rProcessing cell %d of %d" % (i, Np))
# %%
print('\rFormating the data in a shapefile...', end=' ')
df = pd.DataFrame(data={'precip': arr_yearly_avg_precip,
'tasavg': arr_avg_yearly_tasavg,
'tasmax': arr_avg_yearly_tasmax,
'tasmin': arr_avg_yearly_tasmin})
crs = "+proj=longlat +ellps=GRS80 +datum=NAD83 +towgs84=0,0,0,0,0,0,0 +no_defs"
gdf = GeoDataFrame(df, crs=crs, geometry=geometry)
print('\rFormating the data in a shapefile... done')
print('\rSaving to Shapefile...', end=' ')
path_shp_out = ("D:/MeteoGrilleDaily/grid_yearly_meteo/grid_yearly_meteo.shp")
if not osp.exists(path_shp_out):
os.makedirs(path_shp_out)
gdf.to_file(path_shp_out)
print('\rSaving to Shapefile... done', end=' ')
| 29.420561 | 79 | 0.67249 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 917 | 0.291296 |
1c90b62f02619f835bc7d89b23d75b9ecf0b6be0 | 1,803 | py | Python | platform/core/tests/test_activitylogs/test_service.py | hackerwins/polyaxon | ff56a098283ca872abfbaae6ba8abba479ffa394 | [
"Apache-2.0"
]
| null | null | null | platform/core/tests/test_activitylogs/test_service.py | hackerwins/polyaxon | ff56a098283ca872abfbaae6ba8abba479ffa394 | [
"Apache-2.0"
]
| null | null | null | platform/core/tests/test_activitylogs/test_service.py | hackerwins/polyaxon | ff56a098283ca872abfbaae6ba8abba479ffa394 | [
"Apache-2.0"
]
| null | null | null | # pylint:disable=ungrouped-imports
import uuid
import pytest
import activitylogs
from db.models.activitylogs import ActivityLog
from events.registry.experiment import EXPERIMENT_DELETED_TRIGGERED
from events.registry.user import USER_ACTIVATED
from factories.factory_experiments import ExperimentFactory
from factories.factory_users import UserFactory
from tests.base.case import BaseTest
@pytest.mark.activitylogs_mark
class ActivityLogsTest(BaseTest):
def setUp(self):
super().setUp()
self.experiment = ExperimentFactory()
self.admin = UserFactory(is_staff=True, is_superuser=True)
self.user = UserFactory()
def test_record_creates_activities(self):
assert ActivityLog.objects.count() == 0
activitylogs.record(ref_id=uuid.uuid4(),
event_type=USER_ACTIVATED,
instance=self.user,
actor_id=self.admin.id,
actor_name=self.admin.username)
assert ActivityLog.objects.count() == 1
activity = ActivityLog.objects.last()
assert activity.event_type == USER_ACTIVATED
assert activity.content_object == self.user
assert activity.actor == self.admin
activitylogs.record(ref_id=uuid.uuid4(),
event_type=EXPERIMENT_DELETED_TRIGGERED,
instance=self.experiment,
actor_id=self.admin.id,
actor_name=self.admin.username)
assert ActivityLog.objects.count() == 2
activity = ActivityLog.objects.last()
assert activity.event_type == EXPERIMENT_DELETED_TRIGGERED
assert activity.content_object == self.experiment
assert activity.actor == self.admin
| 36.795918 | 68 | 0.658902 | 1,377 | 0.763727 | 0 | 0 | 1,408 | 0.780921 | 0 | 0 | 34 | 0.018857 |
1c9164529ab22e884811a536f6b8ba91eb8bbe19 | 420 | py | Python | tests/framework/test_ingress.py | praus/shapy | 7fa5512d9015b4921870f212495280fbb0675164 | [
"MIT"
]
| 54 | 2015-01-14T10:14:11.000Z | 2022-02-25T17:12:10.000Z | tests/framework/test_ingress.py | praus/shapy | 7fa5512d9015b4921870f212495280fbb0675164 | [
"MIT"
]
| 1 | 2015-12-29T07:37:17.000Z | 2015-12-30T06:17:41.000Z | tests/framework/test_ingress.py | praus/shapy | 7fa5512d9015b4921870f212495280fbb0675164 | [
"MIT"
]
| 14 | 2015-02-10T15:29:48.000Z | 2021-09-22T03:01:13.000Z | import unittest
from shapy.framework.tcelements import *
from shapy.framework.executor import run
from tests import TCTestCase
class TestIngress(TCTestCase):
def setUp(self):
self.interface = Interface('lo')
def test_ingress_filter(self):
q = IngressQdisc()
q.add(RedirectFilter('dst 127.0.0.3', 'eth0'))
self.interface.add_ingress(q)
self.interface.set_shaping()
| 24.705882 | 54 | 0.688095 | 289 | 0.688095 | 0 | 0 | 0 | 0 | 0 | 0 | 25 | 0.059524 |
1c91c9a08ce7e29a5358fe242bc8b960fc941c8f | 1,844 | py | Python | software/hippietrap/gradient.py | mayhem/led-chandelier | 899caa8d81e6aac6e954f78b4f5b4ab101bf5257 | [
"MIT"
]
| 2 | 2018-09-20T08:36:11.000Z | 2019-08-25T20:06:11.000Z | software/hippietrap/gradient.py | mayhem/led-chandelier | 899caa8d81e6aac6e954f78b4f5b4ab101bf5257 | [
"MIT"
]
| null | null | null | software/hippietrap/gradient.py | mayhem/led-chandelier | 899caa8d81e6aac6e954f78b4f5b4ab101bf5257 | [
"MIT"
]
| 1 | 2020-12-12T18:21:18.000Z | 2020-12-12T18:21:18.000Z | from colorsys import hsv_to_rgb
from math import fabs, fmod
import os
from hippietrap.color import Color
class Gradient(object):
def __init__(self, palette, num_leds = 1):
# palletes are in format [ (.345, (128, 0, 128)) ]
self._validate_palette(palette)
self.palette = palette
self.num_leds = num_leds
self.led_scale = 1.0
self.led_offset = 0.0
def _validate_palette(self, palette):
if len(palette) < 2:
raise ValueError("Palette must have at least two points.")
if palette[0][0] > 0.0:
raise ValueError("First point in palette must be less than or equal to 0.0")
if palette[-1][0] < 1.0:
raise ValueError("Last point in palette must be greater than or equal to 1.0")
def set_scale(self, scale):
self.led_scale = scale
def set_offset(self, offset):
self.led_offset = offset
def get_color(self, offset):
if offset < 0.0 or offset > 1.0:
raise IndexError("Invalid offset.")
for index in range(len(self.palette)):
# skip the first item
if index == 0:
continue
if self.palette[index][0] >= offset:
section_begin_offset = self.palette[index-1][0]
section_end_offset = self.palette[index][0]
percent = (offset - section_begin_offset) / (section_end_offset - section_begin_offset)
new_color = []
for color in range(3):
new_color.append(int(self.palette[index-1][1][color] +
((self.palette[index][1][color] - self.palette[index-1][1][color]) * percent)))
return Color(min(new_color[0], 255), min(new_color[1], 255), min(new_color[2], 255))
assert False
| 29.741935 | 107 | 0.58026 | 1,737 | 0.941974 | 0 | 0 | 0 | 0 | 0 | 0 | 246 | 0.133406 |
1c95c503cb53578803e2dbe2dd22ba875018dd47 | 817 | py | Python | stix_shifter_modules/elastic/entry_point.py | 6un9-h0-Dan/stix-shifter | f99feee8c247b9fc1d79f6db623c301b49685b63 | [
"Apache-2.0"
]
| 1 | 2020-04-06T21:28:19.000Z | 2020-04-06T21:28:19.000Z | stix_shifter_modules/elastic/entry_point.py | 6un9-h0-Dan/stix-shifter | f99feee8c247b9fc1d79f6db623c301b49685b63 | [
"Apache-2.0"
]
| null | null | null | stix_shifter_modules/elastic/entry_point.py | 6un9-h0-Dan/stix-shifter | f99feee8c247b9fc1d79f6db623c301b49685b63 | [
"Apache-2.0"
]
| null | null | null | from stix_shifter_utils.utils.entry_point_base import EntryPointBase
from stix_shifter_utils.modules.cim.stix_translation.cim_data_mapper import CimDataMapper
from stix_shifter_utils.modules.car.stix_translation.car_data_mapper import CarDataMapper
from .stix_translation.stix_to_elastic import StixToElastic
class EntryPoint(EntryPointBase):
def __init__(self, connection={}, configuration={}, options={}):
super().__init__(options)
self.add_dialect('default', query_translator=StixToElastic(), data_mapper=CarDataMapper(options), default=True)
self.add_dialect('cim', query_translator=StixToElastic(), data_mapper=CimDataMapper(options), default_include=False)
self.add_dialect('car', query_translator=StixToElastic(), data_mapper=CarDataMapper(options), default_include=False) | 74.272727 | 124 | 0.812729 | 507 | 0.620563 | 0 | 0 | 0 | 0 | 0 | 0 | 19 | 0.023256 |
1c962e345da89a5eb411a0b3f49cfb775dfe43b5 | 1,850 | py | Python | src/http_pick/pickergui.py | thomaspcole/http-pick | c470869878483241672c2928fd85458ab30555c4 | [
"MIT"
]
| null | null | null | src/http_pick/pickergui.py | thomaspcole/http-pick | c470869878483241672c2928fd85458ab30555c4 | [
"MIT"
]
| null | null | null | src/http_pick/pickergui.py | thomaspcole/http-pick | c470869878483241672c2928fd85458ab30555c4 | [
"MIT"
]
| null | null | null | from PyQt5.QtWidgets import (QMainWindow, QToolButton, QWidget, QHBoxLayout)
from PyQt5.QtGui import QIcon
from PyQt5 import QtCore
from math import floor
import sys
class MainWindow(QMainWindow):
def __init__(self, browsers, iconsize=72, displayappname=False, x=0, y=0, callback=lambda v: print(v)):
super().__init__()
self.setFocus()
self.centralwidget = QWidget()
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
self.setWindowFlags(QtCore.Qt.FramelessWindowHint)
self.setCentralWidget(self.centralwidget)
self.lay = QHBoxLayout(self.centralwidget)
self.lay.setContentsMargins(0,0,0,0)
self.lay.setSpacing(0)
xOffset = floor((iconsize*len(browsers))/2)
yOffset = floor(iconsize*1.25)
self.move(x-xOffset,y-yOffset)
for b in browsers:
self.btn = QToolButton(self)
if '/' in b: #'Normal' launch path
path = b
appname = path.split('/')
elif '.' in b: #Flatpak ref
path = b
appname = path.split('.')
self.btn.setIcon(QIcon.fromTheme(appname[-1]))
self.btn.setIconSize(QtCore.QSize(iconsize,iconsize))
self.btn.setStyleSheet("QToolButton {background-color: transparent; border: 0px; color: white;}")
if(displayappname):
self.btn.setToolButtonStyle(QtCore.Qt.ToolButtonStyle.ToolButtonTextUnderIcon)
self.btn.setText(appname[-1].capitalize())
self.btn.clicked.connect(lambda v, path=path : callback(path))
self.lay.addWidget(self.btn)
def on_focusChanged(self):
if(self.isActiveWindow() == False):
quit() | 40.217391 | 148 | 0.591892 | 1,683 | 0.90973 | 0 | 0 | 0 | 0 | 0 | 0 | 118 | 0.063784 |
1c967254ce0d2a6e7d37a5e738a1749e4d64b857 | 6,324 | py | Python | genetic_pwdcrack.py | robotenique/AI-programming | 41a690963b452165342cfd3caa81bfad13d1cc76 | [
"Unlicense"
]
| 3 | 2018-04-05T16:38:48.000Z | 2020-11-15T21:24:57.000Z | genetic_pwdcrack.py | robotenique/AI-programming | 41a690963b452165342cfd3caa81bfad13d1cc76 | [
"Unlicense"
]
| null | null | null | genetic_pwdcrack.py | robotenique/AI-programming | 41a690963b452165342cfd3caa81bfad13d1cc76 | [
"Unlicense"
]
| null | null | null | """
Crack a password using a genetic algorithm!
"""
import random as rnd
def main():
"""
This file implements a genetic algorithm to solve the problem of
cracking a given password, by creating 'generations' of different
words, selecting the best, breeeding them, applying a simple crossover
(randomized) and a mutation chance.
"""
#variables dict: Define the problem constants
genetic_variables = {
'password' : "verylongwordpass",
'size_population' : 100,
'best_sample' : 20,
'lucky_few' : 20,
'number_of_child' : 5,
'number_of_generations' : 10000, #Overkill >:D
'chance_of_mutation' : .5
}
prob = genetic_variables
#program
if (prob['best_sample'] + prob['lucky_few'])/2*prob['number_of_child'] != prob['size_population']:
print ("population size not stable")
return
last_gen, _ = genetic_algorithm(**genetic_variables)
print("Last generation: \n\n")
print(last_gen)
def genetic_algorithm(**kwargs):
"""
Execute the genetic algorithm.
This algorithm takes a dict as an argument.
It will iterate based on the variable 'number_of_generations', and return
the last_gen and the historic
"""
# Unpack the values from the dict
password = kwargs['password']
size_population = kwargs['size_population']
best_sample = kwargs['best_sample']
lucky_few = kwargs['lucky_few']
number_of_child = kwargs['number_of_child']
number_of_generations = kwargs['number_of_generations']
chance_of_mutation = kwargs['chance_of_mutation']
hist = []
# The genetic algorithm
curr_pop = initial_pop(size_population, password)
hist = curr_pop
last_found = -1
for _ in range (number_of_generations):
curr_pop = next_gen(curr_pop, password, best_sample, lucky_few, number_of_child, chance_of_mutation)
hist.append(curr_pop)
if check_solution(curr_pop, password):
last_found = _
break
if last_found != -1:
print(f"Found a solution in the {last_found} generation!!")
else:
print("No solution found! D':")
return curr_pop, hist
def next_gen(curr_pop, password, best_sample, lucky_few, number_of_child, chance_of_mutation):
"""
-> This is the main task of the Genetic Algorithm <-
Given the current population, apply the following steps:
- Compute the fitness of each individual in the population
- Select the best ones (and some lucky guys)
- Make them reproduce
- Mutate the children
- Return this new population
"""
pop_sorted = compute_perf_pop(curr_pop, password)
next_breeders = select_from_population(pop_sorted, best_sample, lucky_few)
next_pop = create_children(next_breeders, number_of_child)
next_gen = mutate_pop(next_pop, chance_of_mutation)
return next_gen
def initial_pop(size, password):
"""
Generate a population consisting of random words, each with the same
length as the password, and the population has the size specified.
"""
return [word_generate(len(password)) for _ in range(size)]
def fitness (password, test_word):
"""
The fitness function:
fitness(test_word): (# of correct chars) / (total number of chars)
fitness(test_word) = 0 if # of correct chars = 0
fitness(test_word) = 100 if # of correct chars = total number of chars
"""
if (len(test_word) != len(password)):
print("Incompatible password...")
return
else:
score = (1 if password[i] == test_word[i] else 0 for i in range(len(password)))
return sum(score)*100/len(password)
def compute_perf_pop(population, password):
"""
Return the population, sorted by the fitness from each individual
"""
populationPerf = {ind:fitness(password, ind) for ind in population}
# Sort by fitness, reversed (best ones in the beginning of the list)
return sorted(populationPerf.items(), key= lambda it: it[1], reverse=True)
def select_from_population(pop_sorted, best_sample, lucky_few):
"""
Create the next breeders, with 'best_sample' individuals which have the
top fitness value from the population, and 'lucky_few' individuals which
are randomly selected.
"""
next_gen = []
for i in range(best_sample):
next_gen.append(pop_sorted[i][0])
# Simple lucky few: randomly select some elements from the population
for i in range(lucky_few):
next_gen.append(rnd.choice(pop_sorted)[0])
rnd.shuffle(next_gen)
return next_gen
def create_children(breeders, nof_childs):
"""
Create the next population of individuals, by breeding two by two
"""
next_pop = []
mid_pos = len(breeders)//2 # len(breeders) must be an even number
for ind_1, ind_2 in zip(breeders[:mid_pos], breeders[mid_pos:]):
for _ in range(nof_childs):
next_pop.append(create_child(ind_1, ind_2))
return next_pop
def mutate_pop(population, chance):
"""
Given a chance for mutation, this apply the mutation layer
to the genetic algorithm, by generating a mutation with the chance
specified.
"""
for i in range(len(population)):
if rnd.random() < chance:
population[i] = mutate_word(population[i])
return population
def mutate_word(word):
"""
Mutate a letter(gene) from the word, then return it
"""
pos = int(rnd.random()*len(word))
word = word[:pos] + chr(97 + int(26*rnd.random())) + word[pos + 1:]
return word
def create_child(ind_1, ind_2):
"""
For each letter of the child, get a random gene from ind_1 or ind_2
in the i-th position.
"""
temp = [ind_1[i] if rnd.random() < 0.5 else ind_2[i] for i in range(len(ind_1))]
return "".join(temp)
def word_generate(length):
"""
Generate a string with random lowercase letters, with length = length!
"""
# Generate a random letter from alphabet, lowercase, and add to result
return "".join((chr(97 + rnd.randint(0, 26)) for _ in range(length)))
def check_solution(population, password):
"""
Check if the population found a solution to the problem
"""
return any(ind == password for ind in population)
if __name__ == '__main__':
main()
| 34 | 108 | 0.669355 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,956 | 0.467426 |
1c9707dc1574081d46ce438a0fbd3d659ca252fc | 7,985 | py | Python | openverse_catalog/dags/providers/provider_api_scripts/science_museum.py | yavik-kapadia/openverse-catalog | 853766f2176a96450f456a9fd6675e134c0866e1 | [
"MIT"
]
| 25 | 2021-05-06T20:53:45.000Z | 2022-03-30T23:18:50.000Z | openverse_catalog/dags/providers/provider_api_scripts/science_museum.py | yavik-kapadia/openverse-catalog | 853766f2176a96450f456a9fd6675e134c0866e1 | [
"MIT"
]
| 272 | 2021-05-17T05:53:00.000Z | 2022-03-31T23:57:20.000Z | openverse_catalog/dags/providers/provider_api_scripts/science_museum.py | yavik-kapadia/openverse-catalog | 853766f2176a96450f456a9fd6675e134c0866e1 | [
"MIT"
]
| 13 | 2021-06-12T07:09:06.000Z | 2022-03-29T17:39:13.000Z | import logging
from common.licenses import get_license_info
from common.loader import provider_details as prov
from common.requester import DelayedRequester
from common.storage.image import ImageStore
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s: %(message)s", level=logging.INFO
)
logger = logging.getLogger(__name__)
LIMIT = 100
DELAY = 5.0
RETRIES = 3
PROVIDER = prov.SCIENCE_DEFAULT_PROVIDER
ENDPOINT = "https://collection.sciencemuseumgroup.org.uk/search/"
delay_request = DelayedRequester(delay=DELAY)
image_store = ImageStore(provider=PROVIDER)
HEADERS = {"Accept": "application/json"}
DEFAULT_QUERY_PARAMS = {
"has_image": 1,
"image_license": "CC",
"page[size]": LIMIT,
"page[number]": 0,
"date[from]": 0,
"date[to]": 1500,
}
YEAR_RANGE = [
(0, 1500),
(1500, 1750),
(1750, 1825),
(1825, 1850),
(1850, 1875),
(1875, 1900),
(1900, 1915),
(1915, 1940),
(1940, 1965),
(1965, 1990),
(1990, 2020),
]
# global variable to keep track of records pulled
RECORD_IDS = []
def main():
logger.info("Begin: Science Museum script")
for year_range in YEAR_RANGE:
logger.info(f"Running for years {year_range}")
from_year, to_year = year_range
image_count = _page_records(from_year=from_year, to_year=to_year)
logger.info(f"Images pulled till now {image_count}")
image_count = image_store.commit()
logger.info(f"Total images pulled {image_count}")
def _page_records(from_year, to_year):
image_count = 0
page_number = 0
condition = True
while condition:
query_param = _get_query_param(
page_number=page_number, from_year=from_year, to_year=to_year
)
batch_data = _get_batch_objects(query_param=query_param)
if type(batch_data) == list:
if len(batch_data) > 0:
image_count = _handle_object_data(batch_data)
page_number += 1
else:
condition = False
else:
condition = False
return image_count
def _get_query_param(
page_number=0, from_year=0, to_year=1500, default_query_param=None
):
if default_query_param is None:
default_query_param = DEFAULT_QUERY_PARAMS
query_param = default_query_param.copy()
query_param["page[number]"] = page_number
query_param["date[from]"] = from_year
query_param["date[to]"] = to_year
return query_param
def _get_batch_objects(
endpoint=ENDPOINT, headers=None, retries=RETRIES, query_param=None
):
if headers is None:
headers = HEADERS.copy()
data = None
for retry in range(retries):
response = delay_request.get(endpoint, query_param, headers=headers)
try:
response_json = response.json()
if "data" in response_json.keys():
data = response_json.get("data")
break
except Exception as e:
logger.error(f"Failed to due to {e}")
return data
def _handle_object_data(batch_data):
image_count = 0
for obj_ in batch_data:
id_ = obj_.get("id")
if id_ in RECORD_IDS:
continue
RECORD_IDS.append(id_)
foreign_landing_url = obj_.get("links", {}).get("self")
if foreign_landing_url is None:
continue
obj_attributes = obj_.get("attributes")
if obj_attributes is None:
continue
title = obj_attributes.get("summary_title")
creator = _get_creator_info(obj_attributes)
metadata = _get_metadata(obj_attributes)
multimedia = obj_attributes.get("multimedia")
if multimedia is None:
continue
for image_data in multimedia:
foreign_id = image_data.get("admin", {}).get("uid")
if foreign_id is None:
continue
processed = image_data.get("processed")
source = image_data.get("source")
image_url, height, width = _get_image_info(processed)
if image_url is None:
continue
license_version = _get_license_version(source)
if license_version is None:
continue
license_, version = license_version.lower().split(" ")
license_ = license_.replace("cc-", "")
license_info = get_license_info(license_=license_, license_version=version)
thumbnail_url = _get_thumbnail_url(processed)
image_count = image_store.add_item(
foreign_identifier=foreign_id,
foreign_landing_url=foreign_landing_url,
image_url=image_url,
height=height,
width=width,
license_info=license_info,
thumbnail_url=thumbnail_url,
creator=creator,
title=title,
meta_data=metadata,
)
return image_count
def _get_creator_info(obj_attr):
creator_info = None
life_cycle = obj_attr.get("lifecycle")
if life_cycle:
creation = life_cycle.get("creation")
if type(creation) == list:
maker = creation[0].get("maker")
if type(maker) == list:
creator_info = maker[0].get("summary_title")
return creator_info
def _get_image_info(processed):
if processed.get("large"):
image = processed.get("large").get("location")
measurements = processed.get("large").get("measurements")
elif processed.get("medium"):
image = processed.get("medium").get("location")
measurements = processed.get("medium").get("measurements")
else:
image = None
measurements = None
image = check_url(image)
height, width = _get_dimensions(measurements)
return image, height, width
def _get_thumbnail_url(processed):
if processed.get("large_thumbnail"):
image = processed.get("large_thumbnail").get("location")
elif processed.get("medium_thumbnail"):
image = processed.get("medium_thumbnail").get("location")
elif processed.get("small_thumbnail"):
image = processed.get("small_thumbnail").get("location")
else:
image = None
thumbnail_url = check_url(image)
return thumbnail_url
def check_url(image_url):
base_url = "https://coimages.sciencemuseumgroup.org.uk/images/"
if image_url:
if "http" in image_url:
checked_url = image_url
else:
checked_url = base_url + image_url
else:
checked_url = None
return checked_url
def _get_dimensions(measurements):
height_width = {}
if measurements:
dimensions = measurements.get("dimensions")
if dimensions:
for dim in dimensions:
height_width[dim.get("dimension")] = dim.get("value")
return height_width.get("height"), height_width.get("width")
def _get_license_version(source):
license_version = None
if source:
legal = source.get("legal")
if legal:
rights = legal.get("rights")
if type(rights) == list:
license_version = rights[0].get("usage_terms")
return license_version
def _get_metadata(obj_attr):
metadata = {}
identifier = obj_attr.get("identifier")
if type(identifier) == list:
metadata["accession number"] = identifier[0].get("value")
name = obj_attr.get("name")
if type(name) == list:
metadata["name"] = name[0].get("value")
category = obj_attr.get("categories")
if type(category) == list:
metadata["category"] = category[0].get("value")
creditline = obj_attr.get("legal")
if type(creditline) == dict:
metadata["creditline"] = creditline.get("credit_line")
description = obj_attr.get("description")
if type(description) == list:
metadata["description"] = description[0].get("value")
return metadata
if __name__ == "__main__":
main()
| 30.830116 | 87 | 0.628053 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,128 | 0.141265 |
1c97af3344054a3843093ee257c735adccd419f3 | 1,089 | py | Python | digitaltape.py | heerdyes/tapegame | d6e0c6f81fe9c7c85a54edbd037be318ff7ed391 | [
"Artistic-2.0"
]
| null | null | null | digitaltape.py | heerdyes/tapegame | d6e0c6f81fe9c7c85a54edbd037be318ff7ed391 | [
"Artistic-2.0"
]
| null | null | null | digitaltape.py | heerdyes/tapegame | d6e0c6f81fe9c7c85a54edbd037be318ff7ed391 | [
"Artistic-2.0"
]
| null | null | null | # tape variables
TS_MAX=1000
# the digital tape model
class DTape:
def __init__(self,size,alphabet,noopidx=0):
if size>TS_MAX:
self.size=TS_MAX
else:
self.size=size
if len(alphabet)==0:
raise Exception('alphabet has zero symbols')
self.alphabet=alphabet
self.data=[self.alphabet[noopidx] for x in range(self.size)]
class DTapeMC:
def __init__(self,dtape,cmdmap,noopsym):
self.tape=dtape
self.thead=0
self.cmdmap=cmdmap
self.noopsym=noopsym
self.jmpctr=1
def process_cell(self):
if self.thead>=len(self.tape.data) or self.thead<0:
print('[TAPEBOUND_EXCEEDED] machine head @[%d] is beyond tape'%self.thead)
return
datum=self.tape.data[self.thead]
print('evaluating: %s'%datum)
if datum==self.noopsym:
print('noop')
else:
eval(cmdmap[datum])
self.thead+=self.jmpctr
class DTapeComputer:
def __init__(self,dtapemc,casetteimg):
self.tapemc=dtapemc
| 26.560976 | 86 | 0.602388 | 1,027 | 0.943067 | 0 | 0 | 0 | 0 | 0 | 0 | 145 | 0.13315 |
1c980836374b3fb5fedf0a12599c8c546395b546 | 422 | py | Python | webhook-cdk/lambda/vars.py | ncalteen/github-webhook-lambda-example | 414daf1a70343abf207ff37dc4a9d65d6892197d | [
"MIT"
]
| null | null | null | webhook-cdk/lambda/vars.py | ncalteen/github-webhook-lambda-example | 414daf1a70343abf207ff37dc4a9d65d6892197d | [
"MIT"
]
| null | null | null | webhook-cdk/lambda/vars.py | ncalteen/github-webhook-lambda-example | 414daf1a70343abf207ff37dc4a9d65d6892197d | [
"MIT"
]
| 1 | 2022-03-29T14:42:25.000Z | 2022-03-29T14:42:25.000Z | import json
# Output must be returned in the format mentioned below:
# https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-output-format
lambda_response = {
"isBase64Encoded": False,
"statusCode": 200,
"headers": {
"Content-Type": "application/json",
},
"body": json.dumps({
"Status": "OK"
})
}
| 26.375 | 152 | 0.668246 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 296 | 0.701422 |
1c988d19204c6f421dff8e8f0c696fe6f0e5ec4f | 3,737 | py | Python | gym_unblockme/envs/unblockme_render.py | fedingo/gym-unblockme | a4dd20a7608122e09862d681259111e2634f3d4b | [
"MIT"
]
| 3 | 2019-02-12T15:53:17.000Z | 2019-07-03T12:00:32.000Z | gym_unblockme/envs/unblockme_render.py | fedingo/gym-unblockme | a4dd20a7608122e09862d681259111e2634f3d4b | [
"MIT"
]
| null | null | null | gym_unblockme/envs/unblockme_render.py | fedingo/gym-unblockme | a4dd20a7608122e09862d681259111e2634f3d4b | [
"MIT"
]
| null | null | null | import pygame
import time
import numpy as np
import sys
gray = (150, 150, 150)
white = (255, 255, 255)
black = (0, 0, 0, )
red_block = (255, 0, 0)
red_border = (76, 0, 19)
block_color = (255, 128, 0)
border_color = (165,42,42)
screen = None
SIDE = 50
BORDER = 5
MARGIN = 5
LINE = 1
h_switch = True
def __draw_horizontal_block(x,y):
global screen, h_switch
pygame.draw.rect(screen, border_color, pygame.Rect(MARGIN + y*SIDE,MARGIN + x*SIDE, SIDE, SIDE))
pygame.draw.rect(screen, block_color, pygame.Rect(MARGIN + y*SIDE + h_switch*BORDER, MARGIN + x*SIDE + BORDER,
SIDE - BORDER, SIDE - 2*BORDER))
h_switch = not h_switch
def __draw_red_block(x,y):
global screen, h_switch
pygame.draw.rect(screen, red_border, pygame.Rect(MARGIN + y*SIDE,MARGIN + x*SIDE, SIDE, SIDE))
pygame.draw.rect(screen, red_block, pygame.Rect(MARGIN + y*SIDE + h_switch*BORDER, MARGIN + x*SIDE + BORDER,
SIDE - BORDER, SIDE - 2*BORDER))
h_switch = not h_switch
def __draw_vertical_block(x,y):
global screen
pygame.draw.rect(screen, border_color, pygame.Rect(MARGIN + y*SIDE, MARGIN + x*SIDE, SIDE, 2*SIDE))
pygame.draw.rect(screen, block_color, pygame.Rect(MARGIN + y*SIDE + BORDER, MARGIN + x*SIDE + BORDER,
SIDE - 2*BORDER, 2*SIDE - 2*BORDER))
## Render function for the unblockme_class
def render_unblockme(game_object):
matrix = game_object.internal_state
k, h, _ = game_object.shape
global screen
if screen is None:
pygame.init()
screen = pygame.display.set_mode((2*MARGIN+k*SIDE, 2*MARGIN+h*SIDE))
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.display.quit()
pygame.quit()
sys.exit(0)
screen.fill(black)
# first we draw the background
for x in range(0,k):
for y in range(0,h):
cell = matrix[x,y,:]
selected_block = np.where(cell == 1)[0]
if len(selected_block) != 0:
#draw the exit on the outer border
if selected_block[0] == 0:
if y == 0:
pygame.draw.rect(screen, white, pygame.Rect(y*SIDE,x*SIDE+MARGIN, SIDE+MARGIN, SIDE))
else:
pygame.draw.rect(screen, white, pygame.Rect(y*SIDE+MARGIN,x*SIDE+MARGIN, SIDE+MARGIN, SIDE))
# Draw the background with the grid pattern
pygame.draw.rect(screen, gray , pygame.Rect(MARGIN + y*SIDE,MARGIN + x*SIDE, SIDE, SIDE))
pygame.draw.rect(screen, white, pygame.Rect(MARGIN + y*SIDE + LINE,MARGIN + x*SIDE + LINE,
SIDE - 2*LINE, SIDE - 2*LINE))
# then we draw the blocks in the grid
for x in range(0,k):
for y in range(0,h):
cell = matrix[x,y,1:]
selected_block = np.where(cell == 1)[0]
if len(selected_block) != 0:
if selected_block[-1] == 1:
__draw_horizontal_block(x,y)
elif selected_block[-1] == 2:
if (x == 0 or not (matrix[x-1,y,1:] == cell).all() ) and \
(x != k-1 and (matrix[x+1,y,1:] == cell).all() ):
__draw_vertical_block(x,y)
elif selected_block[-1] == 0:
__draw_red_block(x,y)
pygame.display.update()
time.sleep(0.1)
if __name__ == "__main__":
from unblockme_class import *
matrix, goal = get_example()
game = unblock_me(matrix, goal)
render_unblockme(game) | 36.637255 | 116 | 0.557399 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 196 | 0.052448 |
1c98d508fd84565e1b07d0b60db1b387344d3b53 | 2,852 | py | Python | scaffolds/__init__.py | chhsiao1981/frontend_template | fcd68c47d9ba3b04c8eb59c684c93baa20a688aa | [
"MIT"
]
| null | null | null | scaffolds/__init__.py | chhsiao1981/frontend_template | fcd68c47d9ba3b04c8eb59c684c93baa20a688aa | [
"MIT"
]
| null | null | null | scaffolds/__init__.py | chhsiao1981/frontend_template | fcd68c47d9ba3b04c8eb59c684c93baa20a688aa | [
"MIT"
]
| null | null | null | # API
from pyramid.scaffolds import PyramidTemplate
import os
import re
import logging
def _camelcase_to_upper_camel_case(the_str):
if not the_str:
return ''
return the_str[0].upper() + the_str[1:]
def _upper_camelcase_to_camelcase(the_str):
if not the_str:
return ''
return the_str[0].lower() + the_str[1:]
def _camelcase_to_constant(the_str):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', the_str)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).upper()
class MyTemplate(PyramidTemplate):
def pre(self, command, output_dir, vars):
the_args = command.args
module_name = '' if not isinstance(the_args, list) or len(the_args) < 2 else the_args[1]
logging.warning('command: %s output_dir: %s vars: %s args: %s module_name: %s', command, output_dir, vars, command.args, module_name)
self._setup_module(vars, module_name)
return PyramidTemplate.pre(self, command, output_dir, vars)
def _setup_module(self, vars, full_module_name):
full_module_path = full_module_name.replace('.', os.path.sep)
module_name = os.path.basename(full_module_path)
class_name = _camelcase_to_upper_camel_case(module_name)
constant_name = _camelcase_to_constant(module_name)
sub_pkg_dir = os.path.dirname(full_module_path)
sub_pkg_name = sub_pkg_dir.replace(os.path.sep, '.')
test_name = '' if not module_name else 'test' + class_name
sub_pkg_dir_list = [] if not sub_pkg_dir else sub_pkg_dir.split(os.path.sep)
test_dir_list = ['test_' + each_pkg for each_pkg in sub_pkg_dir_list]
test_dir = os.path.sep.join(test_dir_list)
pkg_name = vars['package']
if sub_pkg_name:
pkg_name += '.' + sub_pkg_name
project_name = vars['project']
vars['module_name'] = module_name
vars['class_name'] = class_name
vars['sub_pkg_name'] = sub_pkg_name
vars['sub_pkg_dir'] = sub_pkg_dir
vars['constant_name'] = constant_name
vars['test_name'] = test_name
vars['test_dir'] = test_dir
vars['pkg_name'] = pkg_name
vars['project_name'] = project_name
class ComponentProjectTemplate(MyTemplate):
_template_dir = 'component'
summary = 'component'
class ContainerProjectTemplate(MyTemplate):
_template_dir = 'container'
summary = 'container'
class SubContainerProjectTemplate(MyTemplate):
_template_dir = 'subcontainer'
summary = 'subcontainer'
class ModuleProjectTemplate(MyTemplate):
_template_dir = 'module'
summary = 'module'
class InitStarterProjectTemplate(MyTemplate):
_template_dir = 'init_starter'
summary = 'including store / middleware / utils'
class InitDevProjectTemplate(MyTemplate):
_template_dir = 'init_dev'
summary = 'starting project'
| 29.102041 | 141 | 0.678822 | 2,331 | 0.817321 | 0 | 0 | 0 | 0 | 0 | 0 | 448 | 0.157083 |
1c990cbd7a7616bc0cdb891dffbb562850c5ab57 | 21,364 | py | Python | phy/cluster/tests/test_supervisor.py | mikailweston/phy | d774cb989152a4b7344ac9b70c79c204a5036763 | [
"BSD-3-Clause"
]
| null | null | null | phy/cluster/tests/test_supervisor.py | mikailweston/phy | d774cb989152a4b7344ac9b70c79c204a5036763 | [
"BSD-3-Clause"
]
| null | null | null | phy/cluster/tests/test_supervisor.py | mikailweston/phy | d774cb989152a4b7344ac9b70c79c204a5036763 | [
"BSD-3-Clause"
]
| null | null | null | # -*- coding: utf-8 -*-
"""Test GUI component."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
#from contextlib import contextmanager
from pytest import yield_fixture, fixture, raises
import numpy as np
from numpy.testing import assert_array_equal as ae
from .. import supervisor as _supervisor
from ..supervisor import (Supervisor,
TaskLogger,
ClusterView,
SimilarityView,
ActionCreator,
)
from phy.gui import GUI
from phy.gui.widgets import Barrier
from phy.gui.qt import qInstallMessageHandler
from phy.gui.tests.test_widgets import _assert, _wait_until_table_ready
from phy.utils.context import Context
from phylib.utils import connect, Bunch, emit
def handler(msg_type, msg_log_context, msg_string):
pass
qInstallMessageHandler(handler)
#------------------------------------------------------------------------------
# Fixtures
#------------------------------------------------------------------------------
@yield_fixture
def gui(tempdir, qtbot):
# NOTE: mock patch show box exec_
_supervisor._show_box = lambda _: _
gui = GUI(position=(200, 100), size=(500, 500), config_dir=tempdir)
gui.set_default_actions()
gui.show()
qtbot.waitForWindowShown(gui)
yield gui
qtbot.wait(5)
gui.close()
del gui
qtbot.wait(5)
@fixture
def supervisor(qtbot, gui, cluster_ids, cluster_groups, cluster_labels,
similarity, tempdir):
spike_clusters = np.repeat(cluster_ids, 2)
s = Supervisor(
spike_clusters,
cluster_groups=cluster_groups,
cluster_labels=cluster_labels,
similarity=similarity,
context=Context(tempdir),
sort=('id', 'desc'),
)
s.attach(gui)
b = Barrier()
connect(b('cluster_view'), event='ready', sender=s.cluster_view)
connect(b('similarity_view'), event='ready', sender=s.similarity_view)
b.wait()
return s
#------------------------------------------------------------------------------
# Test tasks
#------------------------------------------------------------------------------
@fixture
def tl():
class MockClusterView(object):
_selected = [0]
def select(self, cl, callback=None, **kwargs):
self._selected = cl
callback({'selected': cl, 'next': cl[-1] + 1})
def next(self, callback=None):
callback({'selected': [self._selected[-1] + 1], 'next': self._selected[-1] + 2})
def previous(self, callback=None): # pragma: no cover
callback({'selected': [self._selected[-1] - 1], 'next': self._selected[-1]})
class MockSimilarityView(MockClusterView):
pass
class MockSupervisor(object):
def merge(self, cluster_ids, to, callback=None):
callback(Bunch(deleted=cluster_ids, added=[to]))
def split(self, old_cluster_ids, new_cluster_ids, callback=None):
callback(Bunch(deleted=old_cluster_ids, added=new_cluster_ids))
def move(self, which, group, callback=None):
callback(Bunch(metadata_changed=which, metadata_value=group))
def undo(self, callback=None):
callback(Bunch())
def redo(self, callback=None):
callback(Bunch())
out = TaskLogger(MockClusterView(), MockSimilarityView(), MockSupervisor())
return out
def test_task_1(tl):
assert tl.last_state(None) is None
def test_task_2(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.process()
assert tl.last_state() == ([0], 1, None, None)
def test_task_3(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.similarity_view, 'select', [100])
tl.process()
assert tl.last_state() == ([0], 1, [100], 101)
def test_task_merge(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.similarity_view, 'select', [100])
tl.enqueue(tl.supervisor, 'merge', [0, 100], 1000)
tl.process()
assert tl.last_state() == ([1000], 1001, None, None)
tl.enqueue(tl.supervisor, 'undo')
tl.process()
assert tl.last_state() == ([0], 1, [100], 101)
tl.enqueue(tl.supervisor, 'redo')
tl.process()
assert tl.last_state() == ([1000], 1001, None, None)
def test_task_split(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.similarity_view, 'select', [100])
tl.enqueue(tl.supervisor, 'split', [0, 100], [1000, 1001])
tl.process()
assert tl.last_state() == ([1000, 1001], 1002, None, None)
def test_task_move_1(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.supervisor, 'move', [0], 'good')
tl.process()
assert tl.last_state() == ([1], 2, None, None)
def test_task_move_best(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.similarity_view, 'select', [100])
tl.enqueue(tl.supervisor, 'move', 'best', 'good')
tl.process()
assert tl.last_state() == ([1], 2, None, None)
def test_task_move_similar(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.similarity_view, 'select', [100])
tl.enqueue(tl.supervisor, 'move', 'similar', 'good')
tl.process()
assert tl.last_state() == ([0], 1, [101], 102)
def test_task_move_all(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.similarity_view, 'select', [100])
tl.enqueue(tl.supervisor, 'move', 'all', 'good')
tl.process()
assert tl.last_state() == ([1], 2, [101], 102)
#------------------------------------------------------------------------------
# Test cluster and similarity views
#------------------------------------------------------------------------------
@fixture
def data():
_data = [{"id": i,
"n_spikes": 100 - 10 * i,
"group": {2: 'noise', 3: 'noise', 5: 'mua', 8: 'good'}.get(i, None),
"is_masked": i in (2, 3, 5),
} for i in range(10)]
return _data
def test_cluster_view_1(qtbot, gui, data):
cv = ClusterView(gui, data=data)
_wait_until_table_ready(qtbot, cv)
cv.sort_by('n_spikes', 'asc')
cv.select([1])
qtbot.wait(10)
assert cv.state == {'current_sort': ('n_spikes', 'asc'), 'selected': [1]}
cv.set_state({'current_sort': ('id', 'desc'), 'selected': [2]})
assert cv.state == {'current_sort': ('id', 'desc'), 'selected': [2]}
def test_similarity_view_1(qtbot, gui, data):
sv = SimilarityView(gui, data=data)
_wait_until_table_ready(qtbot, sv)
@connect(sender=sv)
def on_request_similar_clusters(sender, cluster_id):
return [{'id': id} for id in (100 + cluster_id, 110 + cluster_id, 102 + cluster_id)]
sv.reset([5])
_assert(sv.get_ids, [105, 115, 107])
def test_cluster_view_extra_columns(qtbot, gui, data):
for cl in data:
cl['my_metrics'] = cl['id'] * 1000
cv = ClusterView(gui, data=data, columns=['id', 'n_spikes', 'my_metrics'])
_wait_until_table_ready(qtbot, cv)
#------------------------------------------------------------------------------
# Test ActionCreator
#------------------------------------------------------------------------------
def test_action_creator_1(qtbot, gui):
ac = ActionCreator()
ac.attach(gui)
gui.show()
#------------------------------------------------------------------------------
# Test GUI component
#------------------------------------------------------------------------------
def _select(supervisor, cluster_ids, similar=None):
supervisor.task_logger.enqueue(supervisor.cluster_view, 'select', cluster_ids)
if similar is not None:
supervisor.task_logger.enqueue(supervisor.similarity_view, 'select', similar)
supervisor.task_logger.process()
supervisor.block()
supervisor.task_logger.show_history()
assert supervisor.task_logger.last_state()[0] == cluster_ids
assert supervisor.task_logger.last_state()[2] == similar
def _assert_selected(supervisor, sel):
assert supervisor.selected == sel
def test_select(qtbot, supervisor):
_select(supervisor, [30], [20])
_assert_selected(supervisor, [30, 20])
def test_supervisor_busy(qtbot, supervisor):
_select(supervisor, [30], [20])
o = object()
emit('is_busy', o, True)
assert supervisor._is_busy
# The action fails while the supervisor is busy.
with raises(RuntimeError):
emit('action', supervisor.action_creator, 'merge')
emit('is_busy', o, False)
assert not supervisor._is_busy
# The action succeeds because the supervisor is no longer busy.
emit('action', supervisor.action_creator, 'merge')
supervisor.block()
assert not supervisor._is_busy
def test_supervisor_cluster_metrics(
qtbot, gui, cluster_ids, cluster_groups, similarity, tempdir):
spike_clusters = np.repeat(cluster_ids, 2)
def my_metrics(cluster_id):
return cluster_id ** 2
cluster_metrics = {'my_metrics': my_metrics}
mc = Supervisor(spike_clusters,
cluster_groups=cluster_groups,
cluster_metrics=cluster_metrics,
similarity=similarity,
context=Context(tempdir),
)
mc.attach(gui)
b = Barrier()
connect(b('cluster_view'), event='ready', sender=mc.cluster_view)
connect(b('similarity_view'), event='ready', sender=mc.similarity_view)
b.wait()
assert 'my_metrics' in mc.columns
def test_supervisor_select_1(qtbot, supervisor):
# WARNING: always use actions in tests, because this doesn't call
# the supervisor method directly, but raises an event, enqueue the task,
# and call TaskLogger.process() which handles the cascade of callbacks.
supervisor.select_actions.select([0])
supervisor.block()
_assert_selected(supervisor, [0])
supervisor.task_logger.show_history()
def test_supervisor_color(qtbot, supervisor):
supervisor.view_actions.colormap_linear()
supervisor.view_actions.color_field_n_spikes()
supervisor.view_actions.toggle_categorical_colormap(False)
supervisor.view_actions.toggle_logarithmic_colormap(True)
def test_supervisor_select_2(qtbot, supervisor):
supervisor.select_actions.next_best()
supervisor.block()
_assert_selected(supervisor, [30])
def test_supervisor_select_order(qtbot, supervisor):
_select(supervisor, [1, 0])
_assert_selected(supervisor, [1, 0])
_select(supervisor, [0, 1])
_assert_selected(supervisor, [0, 1])
def test_supervisor_edge_cases(supervisor):
# Empty selection at first.
ae(supervisor.clustering.cluster_ids, [0, 1, 2, 10, 11, 20, 30])
_select(supervisor, [0])
supervisor.undo()
supervisor.block()
supervisor.redo()
supervisor.block()
# Merge.
supervisor.merge()
supervisor.block()
_assert_selected(supervisor, [0])
supervisor.merge([])
supervisor.block()
_assert_selected(supervisor, [0])
supervisor.merge([10])
supervisor.block()
_assert_selected(supervisor, [0])
# Split.
supervisor.split([])
supervisor.block()
_assert_selected(supervisor, [0])
# Move.
supervisor.move('ignored', [])
supervisor.block()
supervisor.save()
def test_supervisor_save(qtbot, gui, supervisor):
emit('request_save', gui)
def test_supervisor_skip(qtbot, gui, supervisor):
# yield [0, 1, 2, 10, 11, 20, 30]
# # i, g, N, i, g, N, N
expected = [30, 20, 11, 2, 1]
for clu in expected:
supervisor.select_actions.next_best()
supervisor.block()
_assert_selected(supervisor, [clu])
def test_supervisor_sort(qtbot, supervisor):
supervisor.sort('id', 'desc')
qtbot.wait(50)
assert supervisor.state.cluster_view.current_sort == ('id', 'desc')
supervisor.select_actions.sort_by_n_spikes()
qtbot.wait(50)
assert supervisor.state.cluster_view.current_sort == ('n_spikes', 'desc')
def test_supervisor_filter(qtbot, supervisor):
supervisor.filter('5 <= id && id <= 20')
qtbot.wait(50)
_cl = []
supervisor.cluster_view.get_ids(lambda cluster_ids: _cl.extend(cluster_ids))
qtbot.wait(50)
assert _cl == [20, 11, 10]
def test_supervisor_merge_1(qtbot, supervisor):
_select(supervisor, [30], [20])
_assert_selected(supervisor, [30, 20])
supervisor.actions.merge()
supervisor.block()
_assert_selected(supervisor, [31])
supervisor.actions.undo()
supervisor.block()
_assert_selected(supervisor, [30, 20])
supervisor.actions.redo()
supervisor.block()
supervisor.task_logger.show_history()
_assert_selected(supervisor, [31])
assert supervisor.is_dirty()
def test_supervisor_merge_event(qtbot, supervisor):
_select(supervisor, [30], [20])
_l = []
@connect(sender=supervisor)
def on_select(sender, cluster_ids):
_l.append(cluster_ids)
supervisor.actions.merge()
supervisor.block()
# After a merge, there should be only one select event.
assert len(_l) == 1
def test_supervisor_merge_move(qtbot, supervisor):
"""Check that merge then move selects the next cluster in the original
cluster view, not the updated cluster view."""
_select(supervisor, [20, 11], [])
_assert_selected(supervisor, [20, 11])
supervisor.actions.merge()
supervisor.block()
_assert_selected(supervisor, [31])
supervisor.actions.move('good', 'all')
supervisor.block()
_assert_selected(supervisor, [30])
supervisor.actions.move('good', 'all')
supervisor.block()
_assert_selected(supervisor, [2])
def test_supervisor_split_0(qtbot, supervisor):
_select(supervisor, [1, 2])
_assert_selected(supervisor, [1, 2])
supervisor.actions.split([1, 2])
supervisor.block()
_assert_selected(supervisor, [31, 32, 33])
supervisor.actions.undo()
supervisor.block()
_assert_selected(supervisor, [1, 2])
supervisor.actions.redo()
supervisor.block()
_assert_selected(supervisor, [31, 32, 33])
def test_supervisor_split_1(supervisor):
supervisor.select_actions.select([1, 2])
supervisor.block()
@connect(sender=supervisor)
def on_request_split(sender):
return [1, 2]
supervisor.actions.split()
supervisor.block()
_assert_selected(supervisor, [31, 32, 33])
def test_supervisor_split_2(gui, similarity):
spike_clusters = np.array([0, 0, 1])
supervisor = Supervisor(spike_clusters,
similarity=similarity,
)
supervisor.attach(gui)
b = Barrier()
connect(b('cluster_view'), event='ready', sender=supervisor.cluster_view)
connect(b('similarity_view'), event='ready', sender=supervisor.similarity_view)
b.wait()
supervisor.actions.split([0])
supervisor.block()
_assert_selected(supervisor, [2, 3])
def test_supervisor_state(tempdir, qtbot, gui, supervisor):
supervisor.select(1)
cv = supervisor.cluster_view
assert supervisor.state.cluster_view.current_sort == ('id', 'desc')
assert supervisor.state.cluster_view.selected == [1]
cv.sort_by('id')
assert supervisor.state.cluster_view.current_sort == ('id', 'asc')
cv.set_state({'current_sort': ('n_spikes', 'desc')})
assert supervisor.state.cluster_view.current_sort == ('n_spikes', 'desc')
cv.sort_by('id', 'desc')
assert supervisor.all_cluster_ids == [30, 20, 11, 10, 2, 1, 0]
def test_supervisor_label(supervisor):
_select(supervisor, [20])
supervisor.label("my_field", 3.14)
supervisor.block()
supervisor.label("my_field", 1.23, cluster_ids=30)
supervisor.block()
assert 'my_field' in supervisor.fields
assert supervisor.get_labels('my_field')[20] == 3.14
assert supervisor.get_labels('my_field')[30] == 1.23
def test_supervisor_label_cluster_1(supervisor):
_select(supervisor, [20, 30])
supervisor.label("my_field", 3.14)
supervisor.block()
# Same value for the old clusters.
l = supervisor.get_labels('my_field')
assert l[20] == l[30] == 3.14
up = supervisor.merge()
supervisor.block()
assert supervisor.get_labels('my_field')[up.added[0]] == 3.14
def test_supervisor_label_cluster_2(supervisor):
_select(supervisor, [20])
supervisor.label("my_field", 3.14)
supervisor.block()
# One of the parents.
l = supervisor.get_labels('my_field')
assert l[20] == 3.14
assert l[30] is None
up = supervisor.merge([20, 30])
supervisor.block()
assert supervisor.get_labels('my_field')[up.added[0]] == 3.14
def test_supervisor_label_cluster_3(supervisor):
# Conflict: largest cluster wins.
_select(supervisor, [20, 30])
supervisor.label("my_field", 3.14)
supervisor.block()
# Create merged cluster from 20 and 30.
up = supervisor.merge()
new = up.added[0]
supervisor.block()
# It fot the label of its parents.
assert supervisor.get_labels('my_field')[new] == 3.14
# Now, we label a smaller cluster.
supervisor.label("my_field", 2.718, cluster_ids=[10])
# We merge the large and small cluster together.
up = supervisor.merge(up.added + [10])
supervisor.block()
# The new cluster should have the value of the first, merged big cluster, i.e. 3.14.
assert supervisor.get_labels('my_field')[up.added[0]] == 3.14
def test_supervisor_move_1(supervisor):
_select(supervisor, [20])
_assert_selected(supervisor, [20])
assert not supervisor.move('', '')
supervisor.actions.move('noise', 'all')
supervisor.block()
_assert_selected(supervisor, [11])
supervisor.actions.undo()
supervisor.block()
_assert_selected(supervisor, [20])
supervisor.actions.redo()
supervisor.block()
_assert_selected(supervisor, [11])
def test_supervisor_move_2(supervisor):
_select(supervisor, [20], [10])
_assert_selected(supervisor, [20, 10])
supervisor.actions.move('noise', 10)
supervisor.block()
_assert_selected(supervisor, [20, 2])
supervisor.actions.undo()
supervisor.block()
_assert_selected(supervisor, [20, 10])
supervisor.actions.redo()
supervisor.block()
_assert_selected(supervisor, [20, 2])
def test_supervisor_move_3(qtbot, supervisor):
supervisor.select_actions.next()
supervisor.block()
_assert_selected(supervisor, [30])
supervisor.actions.move_best_to_noise()
supervisor.block()
_assert_selected(supervisor, [20])
supervisor.actions.move_best_to_mua()
supervisor.block()
_assert_selected(supervisor, [11])
supervisor.actions.move_best_to_good()
supervisor.block()
_assert_selected(supervisor, [2])
supervisor.cluster_meta.get('group', 30) == 'noise'
supervisor.cluster_meta.get('group', 20) == 'mua'
supervisor.cluster_meta.get('group', 11) == 'good'
def test_supervisor_move_4(supervisor):
_select(supervisor, [30], [20])
_assert_selected(supervisor, [30, 20])
supervisor.actions.move_similar_to_noise()
supervisor.block()
_assert_selected(supervisor, [30, 11])
supervisor.actions.move_similar_to_mua()
supervisor.block()
_assert_selected(supervisor, [30, 2])
supervisor.actions.move_similar_to_good()
supervisor.block()
_assert_selected(supervisor, [30, 1])
supervisor.cluster_meta.get('group', 20) == 'noise'
supervisor.cluster_meta.get('group', 11) == 'mua'
supervisor.cluster_meta.get('group', 2) == 'good'
def test_supervisor_move_5(supervisor):
_select(supervisor, [30], [20])
_assert_selected(supervisor, [30, 20])
supervisor.actions.move_all_to_noise()
supervisor.block()
_assert_selected(supervisor, [11, 2])
supervisor.select_actions.next()
supervisor.block()
_assert_selected(supervisor, [11, 1])
supervisor.actions.move_all_to_mua()
supervisor.block()
_assert_selected(supervisor, [2])
supervisor.actions.move_all_to_good()
supervisor.block()
_assert_selected(supervisor, [])
supervisor.cluster_meta.get('group', 30) == 'noise'
supervisor.cluster_meta.get('group', 20) == 'noise'
supervisor.cluster_meta.get('group', 11) == 'mua'
supervisor.cluster_meta.get('group', 10) == 'mua'
supervisor.cluster_meta.get('group', 2) == 'good'
supervisor.cluster_meta.get('group', 1) == 'good'
def test_supervisor_reset(qtbot, supervisor):
supervisor.select_actions.select([10, 11])
supervisor.select_actions.reset_wizard()
supervisor.block()
_assert_selected(supervisor, [30])
supervisor.select_actions.next()
supervisor.block()
_assert_selected(supervisor, [30, 20])
supervisor.select_actions.next()
supervisor.block()
_assert_selected(supervisor, [30, 11])
supervisor.select_actions.previous()
supervisor.block()
_assert_selected(supervisor, [30, 20])
def test_supervisor_nav(qtbot, supervisor):
supervisor.select_actions.reset_wizard()
supervisor.block()
_assert_selected(supervisor, [30])
supervisor.select_actions.next_best()
supervisor.block()
_assert_selected(supervisor, [20])
supervisor.select_actions.previous_best()
supervisor.block()
_assert_selected(supervisor, [30])
| 27.460154 | 92 | 0.637521 | 1,108 | 0.051863 | 332 | 0.01554 | 2,792 | 0.130687 | 0 | 0 | 3,349 | 0.156759 |
1c9997692782ea5187e69a11b0059d2cc2e4c11c | 2,733 | py | Python | Source/CommandManager.py | SOBotics/Botpy | 8e3eb48fcc2a46fd60f4d49832941fa1b71bc223 | [
"WTFPL"
]
| 5 | 2017-09-19T10:19:33.000Z | 2020-10-11T13:29:43.000Z | Source/CommandManager.py | SOBotics/Botpy | 8e3eb48fcc2a46fd60f4d49832941fa1b71bc223 | [
"WTFPL"
]
| 33 | 2018-05-14T09:05:06.000Z | 2020-04-20T08:48:59.000Z | Source/CommandManager.py | SOBotics/Botpy | 8e3eb48fcc2a46fd60f4d49832941fa1b71bc223 | [
"WTFPL"
]
| 1 | 2017-09-27T10:40:34.000Z | 2017-09-27T10:40:34.000Z | #
# CommandManager.py
# Botpy
#
# Created by Ashish Ahuja on 4th September 2017.
#
#
import threading
import chatexchange as ce
class CommandManager:
def __init__(self, commands):
self.commands = commands
self.running_commands = []
def run_command(self, command):
if command.privileges() == 0:
command_thread = threading.Thread(target=command.run)
self.running_commands.append([command, command_thread])
command_thread.start()
return
if command.message.room.is_user_privileged(command.message.user.id, command.privileges()):
command_thread = threading.Thread(target=command.run)
self.running_commands.append([command, command_thread])
command_thread.start()
return
command.reply("You do not have sufficient privileges to run this command.")
def handle_command(self, message):
try:
message_content = message.content.split()
del message_content[0]
except AttributeError:
return
for command in self.commands:
command_usage = command.usage()
usage_index = -1
for usage in command_usage:
usage_index += 1
usage_components = usage.split()
args = []
match = True
last_index = min(len(usage_components), len(message_content))
for i in range(last_index):
content_component = message_content[i]
usage_component = usage_components[i]
if usage_component == '*':
args.append(content_component)
elif usage_component == '...':
#Everything else is arguments
temp_index = i
while temp_index < len(message_content):
args.append(message_content[temp_index])
temp_index += 1
elif content_component != usage_component:
match = False
min_count = len(usage_components) - 1 \
if usage_components[-1] == '...' else len(usage_components)
if len(message_content) < min_count:
match = False
if match:
self.run_command(command(self, message, args, usage_index))
return
def cleanup_finished_commands(self):
for command, command_thread in self.running_commands:
if not command_thread.isAlive():
self.running_commands.remove([command, command_thread])
| 35.038462 | 98 | 0.555799 | 2,602 | 0.952067 | 0 | 0 | 0 | 0 | 0 | 0 | 180 | 0.065862 |
1c9a0955e72ca504725f135176d44e72aae8607c | 1,237 | py | Python | tests/periodicities/gen_makefile.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
]
| 377 | 2016-10-13T20:52:44.000Z | 2022-03-29T18:04:14.000Z | tests/periodicities/gen_makefile.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
]
| 160 | 2016-10-13T16:11:53.000Z | 2022-03-28T04:21:34.000Z | tests/periodicities/gen_makefile.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
]
| 63 | 2017-03-09T14:51:18.000Z | 2022-03-27T20:52:57.000Z | import os
import glob
subdirs = glob.glob("tests/periodicities/*");
subdirs = ['tests/periodicities/Month',
'tests/periodicities/Minute',
'tests/periodicities/Week',
'tests/periodicities/Business_Hour',
'tests/periodicities/Business_Day',
'tests/periodicities/Second',
'tests/periodicities/Semi_Month',
'tests/periodicities/Hour',
'tests/periodicities/Day']
#print(subdirs)
print("PYTHON=python3\n\n");
lAllTarget = "";
for subdir1 in sorted(subdirs):
lBase = os.path.basename(subdir1);
test_target = "";
for filename in sorted(glob.glob(subdir1 + "/*.py")):
bn = os.path.basename(filename);
logfile = bn.replace("/" , "_");
logfile = "logs/periodicities_" + logfile.replace(".py" , ".log");
print("#PROCESSING FILE : " , filename, bn , logfile);
print(bn , " : " , "\n\t", "-$(PYTHON) " , filename , " > " , logfile , " 2>&1");
test_target = bn + " " + test_target;
lAllTarget = lAllTarget + " " + lBase;
print("\n\n", lBase , ": ", test_target, "\n" , "\n");
print("\n# ********************************************** \n");
print("all: " , lAllTarget , "\n\t\n");
| 32.552632 | 89 | 0.5481 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 519 | 0.419563 |
1c9a500d9b0aad2641a02cde2360c7ed9e056cc6 | 3,494 | py | Python | test/test_util_registry.py | SimulatedANeal/carpedm | 22bd5d28cfff50d7462e2a8e1b8dc1675e2a4c89 | [
"MIT"
]
| 2 | 2020-09-30T04:59:06.000Z | 2021-03-30T20:42:44.000Z | test/test_util_registry.py | SimulatedANeal/carpedm | 22bd5d28cfff50d7462e2a8e1b8dc1675e2a4c89 | [
"MIT"
]
| null | null | null | test/test_util_registry.py | SimulatedANeal/carpedm | 22bd5d28cfff50d7462e2a8e1b8dc1675e2a4c89 | [
"MIT"
]
| 1 | 2018-05-25T07:15:16.000Z | 2018-05-25T07:15:16.000Z | #
# Copyright (C) 2018 Neal Digre.
#
# This software may be modified and distributed under the terms
# of the MIT license. See the LICENSE file for details.
#
#
# Portions of this module are copied or lightly modified from the
# Tensor2Tensor registry_test module, so here is their license:
#
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for utils.registry
References:
Slight modification of `Tensor2Tensor registry_test`_.
.. _Tensor2Tensor registry_test: https://github.com/tensorflow/
tensor2tensor/blob/master/tensor2tensor/utils/registry_test.py
"""
import unittest
from carpedm.util import registry
from carpedm.models.generic import Model
from carpedm.models.baseline import SingleCharBaseline
class ModelRegistryTest(unittest.TestCase):
def setUp(self):
registry._reset()
def test_model_registration(self):
@registry.register_model
class MyModel1(Model):
pass
model = registry.model("my_model1")
self.assertTrue(model is MyModel1)
def test_named_registration(self):
@registry.register_model("model2")
class MyModel1(Model):
pass
model = registry.model("model2")
self.assertTrue(model is MyModel1)
def test_request_unprovided_model(self):
with self.assertRaisesRegex(LookupError, "never registered"):
_ = registry.model("not_provided")
def test_duplicate_registration(self):
@registry.register_model
def m1():
pass
with self.assertRaisesRegex(LookupError, "already registered"):
@registry.register_model("m1")
def m2():
pass
def test_list_models(self):
@registry.register_model
def m1():
pass
@registry.register_model
def m2():
pass
self.assertSetEqual({"m1", "m2"}, set(registry.list_models()))
def test_snake_case(self):
convert = registry._convert_camel_to_snake
self.assertEqual("typical_camel_case", convert("TypicalCamelCase"))
self.assertEqual("numbers_fuse2gether", convert("NumbersFuse2gether"))
self.assertEqual("numbers_fuse2_gether", convert("NumbersFuse2Gether"))
self.assertEqual("lstm_seq2_seq", convert("LSTMSeq2Seq"))
self.assertEqual("starts_lower", convert("startsLower"))
self.assertEqual("starts_lower_caps", convert("startsLowerCAPS"))
self.assertEqual("caps_fuse_together", convert("CapsFUSETogether"))
self.assertEqual("startscap", convert("Startscap"))
self.assertEqual("s_tartscap", convert("STartscap"))
class ModelProvidedTest(unittest.TestCase):
def setUp(self):
from carpedm import models
def test_access_provided_model(self):
model = registry.model("single_char_baseline")
self.assertTrue(model is SingleCharBaseline)
if __name__ == '__main__':
unittest.main()
| 29.361345 | 79 | 0.697195 | 2,169 | 0.620778 | 0 | 0 | 404 | 0.115627 | 0 | 0 | 1,514 | 0.433314 |
1c9a93508b958132a0c4b43ee8f248a5003366fb | 383 | py | Python | pay-api/migrations/versions/8f7565cf50c1_.py | stevenc987/sbc-pay | 04f02f362f88a30c082b0643583b8d0ebff6063f | [
"Apache-2.0"
]
| null | null | null | pay-api/migrations/versions/8f7565cf50c1_.py | stevenc987/sbc-pay | 04f02f362f88a30c082b0643583b8d0ebff6063f | [
"Apache-2.0"
]
| null | null | null | pay-api/migrations/versions/8f7565cf50c1_.py | stevenc987/sbc-pay | 04f02f362f88a30c082b0643583b8d0ebff6063f | [
"Apache-2.0"
]
| null | null | null | """empty message
Revision ID: 8f7565cf50c1
Revises: 872760122cc9, 8652bf9c03ff
Create Date: 2020-10-02 11:11:49.823678
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '8f7565cf50c1'
down_revision = ('872760122cc9', '8652bf9c03ff')
branch_labels = None
depends_on = None
def upgrade():
pass
def downgrade():
pass
| 15.32 | 48 | 0.741514 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 206 | 0.537859 |
1c9adb0e11b484554e5e9a324f68f256e624b588 | 13,217 | py | Python | iotronic/wamp/agent.py | smartmeio/stack4things-openstack-iotronic | 3e5782eb1fb33b7c3c8c9362e24d30241153c371 | [
"Apache-2.0"
]
| 1 | 2021-11-04T09:43:49.000Z | 2021-11-04T09:43:49.000Z | iotronic/wamp/agent.py | smartmeio/stack4things-openstack-iotronic | 3e5782eb1fb33b7c3c8c9362e24d30241153c371 | [
"Apache-2.0"
]
| null | null | null | iotronic/wamp/agent.py | smartmeio/stack4things-openstack-iotronic | 3e5782eb1fb33b7c3c8c9362e24d30241153c371 | [
"Apache-2.0"
]
| null | null | null | # Copyright 2017 MDSLAB - University of Messina
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import asyncio
import json
import subprocess
import time
import txaio
from iotronic.common import exception
from iotronic.common.i18n import _
from iotronic.common.i18n import _LI
from iotronic.common.i18n import _LW
from iotronic.db import api as dbapi
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_messaging.rpc import dispatcher
import importlib
from threading import Thread
import ssl
import os
import signal
from autobahn.asyncio.component import Component
LOG = logging.getLogger(__name__)
service_opts = [
cfg.StrOpt('notification_level',
choices=[('debug', _('"debug" level')),
('info', _('"info" level')),
('warning', _('"warning" level')),
('error', _('"error" level')),
('critical', _('"critical" level'))],
help=_('Specifies the minimum level for which to send '
'notifications. If not set, no notifications will '
'be sent. The default is for this option to be unset.')),
]
wamp_opts = [
cfg.StrOpt('wamp_transport_url',
default='ws://localhost:8181/',
help=('URL of wamp broker')),
cfg.StrOpt('wamp_realm',
default='s4t',
help=('realm broker')),
cfg.BoolOpt('register_agent',
default=False,
help=('Flag for marking this agent as a registration agent')),
cfg.BoolOpt('skip_cert_verify',
default=False,
help=(
'Flag for skipping the verification of the server cert '
'(for the auto-signed ones)')),
cfg.IntOpt('autoPingInterval',
default=2,
help=('autoPingInterval parameter for wamp')),
cfg.IntOpt('autoPingTimeout',
default=2,
help=('autoPingInterval parameter for wamp')),
cfg.BoolOpt('service_allow_list',
default=False,
help='Enable service allow list checks.'),
cfg.StrOpt('service_allow_list_path',
default="(/var/lib/wstun/allowlist)",
help='Path of allowlist.json file.'),
]
proxy_opts = [
cfg.StrOpt('proxy',
choices=[('nginx', _('nginx proxy')), ],
help=_('Proxy for webservices')),
]
CONF = cfg.CONF
cfg.CONF.register_opts(service_opts)
cfg.CONF.register_opts(proxy_opts)
CONF.register_opts(wamp_opts, 'wamp')
txaio.start_logging(level="info")
wamp_session_caller = None
AGENT_HOST = None
LOOP = None
connected = False
async def wamp_request(kwarg):
# for previous LR version (to be removed asap)
if 'req' in kwarg:
LOG.debug("calling: " + kwarg['wamp_rpc_call'] +
" with request id: " + kwarg['req']['uuid'])
d = await wamp_session_caller.call(kwarg['wamp_rpc_call'],
kwarg['req'],
*kwarg['data'])
else:
LOG.debug("calling: " + kwarg['wamp_rpc_call'])
d = await wamp_session_caller.call(kwarg['wamp_rpc_call'],
*kwarg['data'])
return d
# OSLO ENDPOINT
class WampEndpoint(object):
def s4t_invoke_wamp(self, ctx, **kwarg):
LOG.debug("CONDUCTOR sent me: " + kwarg['wamp_rpc_call'])
r = asyncio.run_coroutine_threadsafe(wamp_request(kwarg), LOOP)
return r.result()
def read_allowlist():
try:
with open(CONF.wamp.service_allow_list_path, "r") as allow_file:
allow_list_str = allow_file.read()
allow_list = json.loads(allow_list_str)
#LOG.debug(allow_list)
return allow_list
except Exception as err:
LOG.error(err)
class AgentEndpoint(object):
# used for testing
def echo(self, ctx, text):
LOG.debug("ECHO of " + text)
return text
def create_tap_interface(self, ctx, port_uuid, tcp_port):
time.sleep(12)
LOG.debug('Creating tap interface on the wamp agent host')
p = subprocess.Popen('socat -d -d TCP:localhost:' + tcp_port +
',reuseaddr,forever,interval=10 TUN,tun-type=tap,'
'tun-name=tap' + port_uuid[0:14] +
',up ', shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
return 1
def addin_allowlist(self, ctx, device, port):
try:
allow_list = read_allowlist()
new_node={}
new_node['client']=device
new_node['port']=str(port)
if new_node in allow_list:
LOG.warning("This device already exposes this port!")
else:
allow_list.append(new_node)
with open(CONF.wamp.service_allow_list_path, "r+") as allow_file:
allow_file.seek(0)
allow_file.write("%s" % json.dumps(allow_list))
allow_file.truncate()
read_allowlist()
LOG.debug("Added device/service port in allow list.")
except Exception as err:
print(err)
def remove_from_allowlist(self, ctx, device, port):
try:
allow_list = read_allowlist()
new_node={}
new_node['client']=device
new_node['port']=str(port)
if new_node in allow_list:
allow_list.remove(new_node)
with open(CONF.wamp.service_allow_list_path, "r+") as allow_file:
allow_file.seek(0)
allow_file.write("%s" % json.dumps(allow_list))
allow_file.truncate()
LOG.debug("Removed device/service port from allow list.")
except Exception as err:
print(err)
class RPCServer(Thread):
def __init__(self):
# AMQP CONFIG
proxy = importlib.import_module("iotronic.wamp.proxies." + CONF.proxy)
endpoints = [
WampEndpoint(),
AgentEndpoint(),
proxy.ProxyManager()
]
Thread.__init__(self)
transport = oslo_messaging.get_transport(CONF)
target = oslo_messaging.Target(topic='s4t',
server=AGENT_HOST)
access_policy = dispatcher.DefaultRPCAccessPolicy
self.server = oslo_messaging.get_rpc_server(
transport, target,
endpoints, executor='threading',
access_policy=access_policy)
def run(self):
LOG.info("Starting AMQP server... ")
self.server.start()
def stop(self):
LOG.info("Stopping AMQP server... ")
self.server.stop()
LOG.info("AMQP server stopped. ")
class WampManager(object):
def __init__(self):
LOG.debug("wamp url: %s wamp realm: %s",
CONF.wamp.wamp_transport_url, CONF.wamp.wamp_realm)
self.loop = asyncio.get_event_loop()
global LOOP
LOOP = self.loop
wamp_transport = CONF.wamp.wamp_transport_url
wurl_list = wamp_transport.split(':')
is_wss = False
if wurl_list[0] == "wss":
is_wss = True
whost = wurl_list[1].replace('/', '')
wport = int(wurl_list[2].replace('/', ''))
if is_wss and CONF.wamp.skip_cert_verify:
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
wamp_transport = [
{
"url": CONF.wamp.wamp_transport_url,
"serializers": ["json"],
"endpoint": {
"type": "tcp",
"host": whost,
"port": wport,
"tls": ctx
},
},
]
comp = Component(
transports=wamp_transport,
realm=CONF.wamp.wamp_realm
)
self.comp = comp
@comp.on_join
async def onJoin(session, details):
global connected
connected = True
global wamp_session_caller, AGENT_HOST
wamp_session_caller = session
import iotronic.wamp.functions as fun
session.subscribe(fun.board_on_leave,
'wamp.session.on_leave')
session.subscribe(fun.board_on_join,
'wamp.session.on_join')
try:
if CONF.wamp.register_agent:
session.register(fun.registration,
u'stack4things.register')
LOG.info("I have been set as registration agent")
session.register(fun.connection,
AGENT_HOST + u'.stack4things.connection')
session.register(fun.echo,
AGENT_HOST + u'.stack4things.echo')
session.register(fun.alive,
AGENT_HOST + u'.stack4things.alive')
session.register(fun.wamp_alive,
AGENT_HOST + u'.stack4things.wamp_alive')
session.register(fun.notify_result,
AGENT_HOST + u'.stack4things.notify_result')
LOG.debug("procedure registered")
except Exception as e:
LOG.error("could not register procedure: {0}".format(e))
LOG.info("WAMP session ready.")
session_l = await session.call(u'wamp.session.list')
session_l.remove(details.session)
fun.update_sessions(session_l, AGENT_HOST)
@comp.on_leave
async def onLeave(session, details):
LOG.warning('WAMP Session Left: ' + str(details))
@comp.on_disconnect
async def onDisconnect(session, was_clean):
LOG.warning('WAMP Transport Left: ' + str(was_clean))
global connected
connected = False
if not connected:
comp.start(self.loop)
def start(self):
LOG.info("Starting WAMP server...")
self.comp.start(self.loop)
self.loop.run_forever()
def stop(self):
LOG.info("Stopping WAMP server...")
# Canceling pending tasks and stopping the loop
asyncio.gather(*asyncio.Task.all_tasks()).cancel()
# Stopping the loop
self.loop.stop()
LOG.info("WAMP server stopped.")
class WampAgent(object):
def __init__(self, host):
signal.signal(signal.SIGINT, self.stop_handler)
logging.register_options(CONF)
CONF(project='iotronic')
logging.setup(CONF, "iotronic-wamp-agent")
if CONF.debug:
txaio.start_logging(level="debug")
# to be removed asap
self.host = host
self.dbapi = dbapi.get_instance()
try:
wpa = self.dbapi.register_wampagent(
{'hostname': self.host, 'wsurl': CONF.wamp.wamp_transport_url})
except exception.WampAgentAlreadyRegistered:
LOG.warn(_LW("A wampagent with hostname %(hostname)s "
"was previously registered. Updating registration"),
{'hostname': self.host})
wpa = self.dbapi.register_wampagent(
{'hostname': self.host, 'wsurl': CONF.wamp.wamp_transport_url},
update_existing=True)
self.wampagent = wpa
self.wampagent.ragent = CONF.wamp.register_agent
self.wampagent.save()
global AGENT_HOST
AGENT_HOST = self.host
self.r = RPCServer()
self.w = WampManager()
self.r.start()
self.w.start()
def del_host(self, deregister=True):
if deregister:
try:
self.dbapi.unregister_wampagent(self.host)
LOG.info(_LI('Successfully stopped wampagent with hostname '
'%(hostname)s.'),
{'hostname': self.host})
except exception.WampAgentNotFound:
pass
else:
LOG.info(_LI('Not deregistering wampagent with hostname '
'%(hostname)s.'),
{'hostname': self.host})
def stop_handler(self, signum, frame):
self.w.stop()
self.r.stop()
self.del_host()
os._exit(0)
| 31.544153 | 81 | 0.557464 | 8,981 | 0.679504 | 0 | 0 | 2,082 | 0.157524 | 2,623 | 0.198457 | 3,183 | 0.240826 |
1c9ae3c6c99371fef5bc7aaa5ea9deed848c23c0 | 1,406 | py | Python | src/export_to_poseviz.py | anibali/metro-pose3d | dd0c8a82ae271ce69441d216d615428e5ab1d5d1 | [
"MIT"
]
| 52 | 2020-03-10T05:18:02.000Z | 2021-12-23T04:03:38.000Z | src/export_to_poseviz.py | anibali/metro-pose3d | dd0c8a82ae271ce69441d216d615428e5ab1d5d1 | [
"MIT"
]
| 2 | 2020-03-30T08:08:06.000Z | 2020-03-31T04:26:10.000Z | src/export_to_poseviz.py | anibali/metro-pose3d | dd0c8a82ae271ce69441d216d615428e5ab1d5d1 | [
"MIT"
]
| 7 | 2020-04-02T09:02:00.000Z | 2020-12-12T07:11:07.000Z | #!/usr/bin/env python3
import argparse
import logging
import sys
import numpy as np
import util
def main():
flags = initialize()
logging.debug(f'Loading from {flags.in_path}')
a = np.load(flags.in_path, allow_pickle=True)
all_results_3d = {}
for image_path, coords3d_pred in zip(a['image_path'], a['coords3d_pred_world']):
image_path = image_path.decode('utf8')
all_results_3d.setdefault(
image_path, []).append(coords3d_pred.tolist())
logging.info(f'Writing to file {flags.out_path}')
util.dump_json(all_results_3d, flags.out_path)
def initialize():
parser = argparse.ArgumentParser()
parser.add_argument('--in-path', type=str, required=True)
parser.add_argument('--out-path', type=str, default=None)
parser.add_argument('--loglevel', type=str, default='info')
flags = parser.parse_args()
if flags.out_path is None:
flags.out_path = flags.in_path.replace('.npz', '.json')
loglevel = dict(error=40, warning=30, info=20, debug=10)[flags.loglevel]
simple_formatter = logging.Formatter('{asctime}-{levelname:^1.1} -- {message}', style='{')
print_handler = logging.StreamHandler(sys.stdout)
print_handler.setLevel(loglevel)
print_handler.setFormatter(simple_formatter)
logging.basicConfig(level=loglevel, handlers=[print_handler])
return flags
if __name__ == '__main__':
main()
| 29.914894 | 94 | 0.69559 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 235 | 0.167141 |
1c9b5d5805105a181cbbe52dc9cadbd70001e7f9 | 1,606 | py | Python | xcube/core/gen2/local/helpers.py | bcdev/xcube | 9d275ef3baef8fbcea5c1fbbfb84c3d0164aecd3 | [
"MIT"
]
| 97 | 2018-06-26T13:02:55.000Z | 2022-03-26T21:03:13.000Z | xcube/core/gen2/local/helpers.py | bcdev/xcube | 9d275ef3baef8fbcea5c1fbbfb84c3d0164aecd3 | [
"MIT"
]
| 524 | 2018-11-09T12:00:08.000Z | 2022-03-31T17:00:13.000Z | xcube/core/gen2/local/helpers.py | bcdev/xcube | 9d275ef3baef8fbcea5c1fbbfb84c3d0164aecd3 | [
"MIT"
]
| 15 | 2019-07-09T08:46:03.000Z | 2022-02-07T18:47:34.000Z | # The MIT License (MIT)
# Copyright (c) 2021 by the xcube development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import xarray as xr
def is_empty_cube(cube: xr.Dataset) -> bool:
return len(cube.data_vars) == 0
def strip_cube(cube: xr.Dataset) -> xr.Dataset:
drop_vars = [k for k, v in cube.data_vars.items()
if len(v.shape) < 3
or np.product(v.shape) == 0
or v.shape[-2] < 2
or v.shape[-1] < 2]
if drop_vars:
return cube.drop_vars(drop_vars)
return cube
| 41.179487 | 81 | 0.71731 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,129 | 0.702989 |
98bad417651d09071c208452c18aa4573b275f66 | 1,683 | py | Python | core/log.py | dl-stuff/dl9 | 1cbe98afc53a1de9d413797fb130946acc4b6ba4 | [
"MIT"
]
| null | null | null | core/log.py | dl-stuff/dl9 | 1cbe98afc53a1de9d413797fb130946acc4b6ba4 | [
"MIT"
]
| null | null | null | core/log.py | dl-stuff/dl9 | 1cbe98afc53a1de9d413797fb130946acc4b6ba4 | [
"MIT"
]
| null | null | null | """Simulation logs"""
from __future__ import annotations # default once 3.10
import sys
from enum import Enum
from typing import Type, TYPE_CHECKING
if TYPE_CHECKING:
from core.timeline import Timeline
class LogKind(Enum):
def __str__(self) -> str:
return self.name
DEBUG = 0
SIM = 1
class LogData:
pass
class Logger:
__slots__ = ["_timeline", "_entries", "_data"]
PRINT_ASAP = True
def __init__(self, timeline: Timeline):
self._timeline = timeline
self.reset()
def reset(self):
self._entries = []
self._data = LogData()
def __call__(self, fmt: str, kind: LogKind, *args, **kwargs) -> None:
entry = LogEntry(self._timeline.now, fmt, kind, *args, **kwargs)
if self.PRINT_ASAP:
print(entry.fmt(), flush=True)
entry.process(self._data)
self._entries.append(entry)
def write(self, output=sys.stdout):
for entry in self:
output.write(entry.fmt())
output.write("\n")
class LogEntry:
"""1 row in the log"""
__slots__ = ["_timestamp", "_kind", "_fmt", "_args", "_kwargs"]
def __init__(self, timestamp: float, fmt: str, kind: LogKind, *args, **kwargs) -> None:
self._timestamp = timestamp
self._fmt = "{ts:>8.3f}{kind:>6}| " + fmt
self._kind = kind
self._args = args
self._kwargs = kwargs
def fmt(self) -> str:
"""Format this line of log"""
return self._fmt.format(ts=self._timestamp, kind=self._kind, *self._args, **self._kwargs)
def process(self, data: LogData) -> None:
"""Does any kind of updates to log data"""
pass
| 25.119403 | 97 | 0.60309 | 1,463 | 0.869281 | 0 | 0 | 0 | 0 | 0 | 0 | 229 | 0.136067 |
98bba1bb3ab4288928a1669d17e724ae9a0d33c2 | 3,760 | py | Python | zipline/__init__.py | chalant/pluto | e7bfd35a2c1fc0e0753bd2f840b0a4385b5124fc | [
"Apache-2.0"
]
| null | null | null | zipline/__init__.py | chalant/pluto | e7bfd35a2c1fc0e0753bd2f840b0a4385b5124fc | [
"Apache-2.0"
]
| null | null | null | zipline/__init__.py | chalant/pluto | e7bfd35a2c1fc0e0753bd2f840b0a4385b5124fc | [
"Apache-2.0"
]
| null | null | null | # #
# # Copyright 2015 Quantopian, Inc.
# #
# # Licensed under the Apache License, Version 2.0 (the "License");
# # you may not use this file except in compliance with the License.
# # You may obtain a copy of the License at
# #
# # http://www.apache.org/licenses/LICENSE-2.0
# #
# # Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# # See the License for the specific language governing permissions and
# # limitations under the License.
# from distutils.version import StrictVersion
# import os
# import numpy as np
#
# # This is *not* a place to dump arbitrary classes/modules for convenience,
# # it is a place to expose the public interfaces.
# from trading_calendars import get_calendar
#
# from . import data
# from . import finance
# from . import gens
# from . import utils
# from .utils.numpy_utils import numpy_version
# from .utils.pandas_utils import new_pandas
# from .utils.run_algo import run_algorithm
# from ._version import get_versions
#
# # These need to happen after the other imports.
# from . algorithm import TradingAlgorithm
# from . import api
# from zipline import extensions as ext
# from zipline.finance.blotter import Blotter
#
# # PERF: Fire a warning if calendars were instantiated during zipline import.
# # Having calendars doesn't break anything per-se, but it makes zipline imports
# # noticeably slower, which becomes particularly noticeable in the Zipline CLI.
# from trading_calendars.calendar_utils import global_calendar_dispatcher
# if global_calendar_dispatcher._calendars:
# import warnings
# warnings.warn(
# "Found TradingCalendar instances after zipline import.\n"
# "Zipline startup will be much slower until this is fixed!",
# )
# del warnings
# del global_calendar_dispatcher
#
#
# __version__ = get_versions()['version']
# del get_versions
#
# extension_args = ext.Namespace()
#
#
# def load_ipython_extension(ipython):
# from .__main__ import zipline_magic
# ipython.register_magic_function(zipline_magic, 'line_cell', 'zipline')
#
#
# if os.name == 'nt':
# # we need to be able to write to our temp directoy on windows so we
# # create a subdir in %TMP% that has write access and use that as %TMP%
# def _():
# import atexit
# import tempfile
#
# tempfile.tempdir = tempdir = tempfile.mkdtemp()
#
# @atexit.register
# def cleanup_tempdir():
# import shutil
# shutil.rmtree(tempdir)
# _()
# del _
#
# __all__ = [
# 'Blotter',
# 'TradingAlgorithm',
# 'api',
# 'data',
# 'finance',
# 'get_calendar',
# 'gens',
# 'run_algorithm',
# 'utils',
# 'extension_args'
# ]
#
#
# def setup(self,
# np=np,
# numpy_version=numpy_version,
# StrictVersion=StrictVersion,
# new_pandas=new_pandas):
# """Lives in zipline.__init__ for doctests."""
#
# if numpy_version >= StrictVersion('1.14'):
# self.old_opts = np.get_printoptions()
# np.set_printoptions(legacy='1.13')
# else:
# self.old_opts = None
#
# if new_pandas:
# self.old_err = np.geterr()
# # old pandas has numpy compat that sets this
# np.seterr(all='ignore')
# else:
# self.old_err = None
#
#
# def teardown(self, np=np):
# """Lives in zipline.__init__ for doctests."""
#
# if self.old_err is not None:
# np.seterr(**self.old_err)
#
# if self.old_opts is not None:
# np.set_printoptions(**self.old_opts)
#
#
# del os
# del np
# del numpy_version
# del StrictVersion
# del new_pandas
| 29.147287 | 80 | 0.668617 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,632 | 0.965957 |
98bbff443455dac48b3d58e95d525a9389d58413 | 7,896 | py | Python | smarts/core/utils/traffic_history_service.py | c-h-a-r-l-i-e/SMARTS | 6cb8ffda74e235c25d63b74313623b2e03e402f7 | [
"MIT"
]
| null | null | null | smarts/core/utils/traffic_history_service.py | c-h-a-r-l-i-e/SMARTS | 6cb8ffda74e235c25d63b74313623b2e03e402f7 | [
"MIT"
]
| null | null | null | smarts/core/utils/traffic_history_service.py | c-h-a-r-l-i-e/SMARTS | 6cb8ffda74e235c25d63b74313623b2e03e402f7 | [
"MIT"
]
| null | null | null | # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
import os
import pickle
from dataclasses import dataclass
from multiprocessing import Pipe, Process, Queue
import ijson
import smarts.core.scenario as scenario
@dataclass
class RequestHistoryRange:
start_index: int
batch_count: int
class Traffic_history_service:
"""responsible for dynamically fetching traffic history json to reduce
memory use of traffic history data
"""
class QueueDone:
pass
def __init__(self, history_file_path):
self._history_file_path = history_file_path
self._all_timesteps = set()
self._current_traffic_history = {}
self._prev_batch_history = {}
# return if traffic history is not used
if history_file_path is None:
return
self._log = logging.getLogger(self.__class__.__name__)
send_data_conn, receive_data_conn = Pipe()
self._receive_data_conn = receive_data_conn
self._request_queue = Queue()
self._fetch_history_proc = Process(
target=self._fetch_history,
args=(
send_data_conn,
self._request_queue,
self._history_file_path,
),
)
self._fetch_history_proc.daemon = True
self._fetch_history_proc.start()
self._range_start = 0
self._batch_size = 300
# initialize
with open(self._history_file_path, "rb") as f:
for index, (t, vehicles_state) in enumerate(
ijson.kvitems(f, "", use_float=True)
):
self._all_timesteps.add(t)
if (
self._range_start <= index
and index < self._range_start + self._batch_size
):
self._current_traffic_history[t] = vehicles_state
self._range_start += self._batch_size
# prepares the next batch
self._prepare_next_batch()
self._receive_data_conn.recv()
def teardown(self):
if self.is_in_use:
self._request_queue.put(Traffic_history_service.QueueDone())
self._request_queue.close()
self._request_queue = None
self._fetch_history_proc.join(timeout=3)
if self._fetch_history_proc.is_alive():
self._log.warning("fetch history process still alive after teardown")
self._fetch_history_proc = None
self._history_file_path = None
def __del__(self):
self.teardown()
@property
def is_in_use(self):
return self._history_file_path is not None
def _fetch_history(self, send_data_conn, request_queue, history_file_path):
"""prepare 1 batch ahead, when received request, immediately return the previously
prepared batch and prepares the next batch.
"""
return_batch = {}
while True:
historyRange = request_queue.get()
if type(historyRange) is Traffic_history_service.QueueDone:
break
assert isinstance(historyRange, RequestHistoryRange)
send_data_conn.send(return_batch)
return_batch = {}
with open(history_file_path, "rb") as f:
for index, (t, vehicles_state) in enumerate(
ijson.kvitems(f, "", use_float=True)
):
if (
historyRange.start_index <= index
and index < historyRange.start_index + historyRange.batch_count
):
return_batch[t] = vehicles_state
send_data_conn.close()
@property
def all_timesteps(self):
return self._all_timesteps
@property
def history_file_path(self):
return self._history_file_path
@property
def traffic_history(self):
return {**self._current_traffic_history, **self._prev_batch_history}
def _prepare_next_batch(self):
self._request_queue.put(
RequestHistoryRange(
start_index=self._range_start,
batch_count=self._batch_size,
)
)
self._range_start += self._batch_size
def fetch_history_at_timestep(self, timestep: str):
if timestep not in self._all_timesteps:
return {}
elif timestep in self.traffic_history:
return self.traffic_history[timestep]
# ask child process to prepare the next batch:
self._prepare_next_batch()
self._prev_batch_history = self._current_traffic_history
# receives the previous batch child process prepared
self._current_traffic_history = self._receive_data_conn.recv()
if timestep in self._current_traffic_history:
return self._current_traffic_history[timestep]
# no history exists at requested timestamp
return {}
@staticmethod
def apply_map_location_offset(position, map_offset):
return [pos + map_offset[i] for i, pos in enumerate(position[:2])]
@staticmethod
def fetch_agent_missions(
history_file_path: str, scenario_root_path: str, mapLocationOffset
):
assert os.path.isdir(scenario_root_path)
history_mission_filepath = os.path.join(
scenario_root_path, "history_mission.pkl"
)
if not os.path.exists(history_mission_filepath):
history_mission = {}
else:
with open(history_mission_filepath, "rb") as f:
history_mission = pickle.load(f)
if history_file_path in history_mission:
return history_mission[history_file_path]
vehicle_missions = {}
with open(history_file_path, "rb") as f:
for t, vehicles_state in ijson.kvitems(f, "", use_float=True):
for vehicle_id in vehicles_state:
if vehicle_id in vehicle_missions:
continue
vehicle_missions[vehicle_id] = scenario.Mission(
start=scenario.Start(
Traffic_history_service.apply_map_location_offset(
vehicles_state[vehicle_id]["position"],
mapLocationOffset,
),
scenario.Heading(vehicles_state[vehicle_id]["heading"]),
),
goal=scenario.EndlessGoal(),
start_time=float(t),
)
history_mission[history_file_path] = vehicle_missions
# update cached history_mission_file
with open(history_mission_filepath, "wb") as f:
pickle.dump(history_mission, f)
return vehicle_missions
| 37.245283 | 90 | 0.630699 | 6,572 | 0.83232 | 0 | 0 | 2,299 | 0.29116 | 0 | 0 | 1,742 | 0.220618 |
98bd00128cc88c306abd43b1058840e641926a91 | 195 | py | Python | src/basics/sql_lite/update_data.py | FoxNeo/MyPythonProjects | 3499ef0853f0087f6f143e1633b0a88a3d7b9818 | [
"MIT"
]
| null | null | null | src/basics/sql_lite/update_data.py | FoxNeo/MyPythonProjects | 3499ef0853f0087f6f143e1633b0a88a3d7b9818 | [
"MIT"
]
| null | null | null | src/basics/sql_lite/update_data.py | FoxNeo/MyPythonProjects | 3499ef0853f0087f6f143e1633b0a88a3d7b9818 | [
"MIT"
]
| null | null | null | import sqlite3
connect = sqlite3.connect("production.db")
cursor = connect.cursor()
cursor.execute("UPDATE PERSON SET edad = 19 WHERE nombre = 'Conker'")
connect.commit()
connect.close()
| 24.375 | 70 | 0.723077 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 68 | 0.348718 |
98bd3099195cf49ba522ba023294ea3a974ffe7f | 1,599 | py | Python | calvin/runtime/south/plugins/media/defaultimpl/image.py | josrolgil/exjobbCalvin | 976459eaa50246586360c049b9880d753623d574 | [
"Apache-2.0"
]
| 1 | 2016-05-10T22:36:31.000Z | 2016-05-10T22:36:31.000Z | calvin/runtime/south/plugins/media/defaultimpl/image.py | josrolgil/exjobbCalvin | 976459eaa50246586360c049b9880d753623d574 | [
"Apache-2.0"
]
| null | null | null | calvin/runtime/south/plugins/media/defaultimpl/image.py | josrolgil/exjobbCalvin | 976459eaa50246586360c049b9880d753623d574 | [
"Apache-2.0"
]
| null | null | null | import pygame
from StringIO import StringIO
import cv2
import os
import numpy
class Image(object):
"""
Image object
"""
def __init__(self):
self.display = None
def show_image(self, image, width, height):
"""
Show image
"""
size = (width, height)
self.display = pygame.display.set_mode(size, 0)
self.snapshot = pygame.surface.Surface(size, 0, self.display)
img = pygame.image.load(StringIO(image))
self.display.blit(img, (0, 0))
pygame.display.flip()
def detect_face(self, image):
linux_prefix = "/usr/share/opencv"
mac_prefix = "/usr/local/share/OpenCV"
suffix = "/haarcascades/haarcascade_frontalface_default.xml"
linux_path = linux_prefix + suffix
mac_path = mac_prefix + suffix
if os.path.exists(linux_path) :
cpath = linux_path
elif os.path.exists(mac_path) :
cpath = mac_path
else :
raise Exception("No Haarcascade found")
classifier = cv2.CascadeClassifier(cpath)
jpg = numpy.fromstring(image, numpy.int8)
image = cv2.imdecode(jpg, 1)
faces = classifier.detectMultiScale(image)
if len(faces) > 0 :
for (x,y,w,h) in faces :
if w < 120 :
# Too small to be a nearby face
continue
return True
return False
def close(self):
"""
Close display
"""
if not self.display is None:
pygame.display.quit()
| 26.65 | 69 | 0.56035 | 1,518 | 0.949343 | 0 | 0 | 0 | 0 | 0 | 0 | 247 | 0.154472 |
98bd3fd17ab9f4b238b6d43814353c33f950c7b3 | 3,340 | py | Python | durin/models.py | mlodic/django-rest-durin | b31a7257fb9765a4928c08bb1e68e80f48159229 | [
"MIT"
]
| null | null | null | durin/models.py | mlodic/django-rest-durin | b31a7257fb9765a4928c08bb1e68e80f48159229 | [
"MIT"
]
| null | null | null | durin/models.py | mlodic/django-rest-durin | b31a7257fb9765a4928c08bb1e68e80f48159229 | [
"MIT"
]
| null | null | null | import binascii
from os import urandom
import humanize
from django.conf import settings
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from durin.settings import durin_settings
from durin.signals import token_renewed
User = settings.AUTH_USER_MODEL
def _create_token_string() -> str:
return binascii.hexlify(
urandom(int(durin_settings.TOKEN_CHARACTER_LENGTH / 2))
).decode()
class Client(models.Model):
name = models.CharField(
max_length=64,
null=False,
blank=False,
db_index=True,
unique=True,
help_text=_("A unique identification name for the client."),
)
token_ttl = models.DurationField(
null=False,
default=durin_settings.DEFAULT_TOKEN_TTL,
help_text=_(
"""
Token Time To Live (TTL) in timedelta. Format: <em>DAYS HH:MM:SS</em>.
"""
),
)
def __str__(self):
td = humanize.naturaldelta(self.token_ttl)
return "({0}, {1})".format(self.name, td)
class AuthTokenManager(models.Manager):
def create(self, user, client, delta_ttl=None):
token = _create_token_string()
if delta_ttl is not None:
expiry = timezone.now() + delta_ttl
else:
expiry = timezone.now() + client.token_ttl
instance = super(AuthTokenManager, self).create(
token=token, user=user, client=client, expiry=expiry
)
return instance
class AuthToken(models.Model):
class Meta:
constraints = [
models.UniqueConstraint(
fields=["user", "client"], name="unique token for user per client"
)
]
objects = AuthTokenManager()
token = models.CharField(
max_length=durin_settings.TOKEN_CHARACTER_LENGTH,
null=False,
blank=False,
db_index=True,
unique=True,
help_text=_("Token is auto-generated on save."),
)
user = models.ForeignKey(
User,
null=False,
blank=False,
related_name="auth_token_set",
on_delete=models.CASCADE,
)
client = models.ForeignKey(
Client,
null=False,
blank=False,
related_name="auth_token_set",
on_delete=models.CASCADE,
)
created = models.DateTimeField(auto_now_add=True)
expiry = models.DateTimeField(null=False)
def renew_token(self, renewed_by):
new_expiry = timezone.now() + self.client.token_ttl
self.expiry = new_expiry
self.save(update_fields=("expiry",))
token_renewed.send(
sender=renewed_by,
username=self.user.get_username(),
token_id=self.pk,
expiry=new_expiry,
)
return new_expiry
@property
def expires_in(self) -> str:
if self.expiry:
td = self.expiry - self.created
return humanize.naturaldelta(td)
else:
return "N/A"
@property
def has_expired(self) -> bool:
return timezone.now() > self.expiry
def __repr__(self) -> str:
return "({0}, {1}/{2})".format(
self.token, self.user.get_username(), self.client.name
)
def __str__(self) -> str:
return self.token
| 26.299213 | 82 | 0.606886 | 2,862 | 0.856886 | 0 | 0 | 282 | 0.084431 | 0 | 0 | 303 | 0.090719 |
98be793b0386d37224cb563ae56376daaaeb6f10 | 507 | py | Python | linter.py | CudaText-addons/cuda_lint_htmltidy | afcf1dbfaa2dfc2d63e1ded4781d5f3e4b40a21c | [
"MIT"
]
| null | null | null | linter.py | CudaText-addons/cuda_lint_htmltidy | afcf1dbfaa2dfc2d63e1ded4781d5f3e4b40a21c | [
"MIT"
]
| null | null | null | linter.py | CudaText-addons/cuda_lint_htmltidy | afcf1dbfaa2dfc2d63e1ded4781d5f3e4b40a21c | [
"MIT"
]
| null | null | null | # Copyright (c) 2013 Aparajita Fishman
# Change for CudaLint: Alexey T.
# License: MIT
import os
from cuda_lint import Linter, util
if os.name=='nt':
_exe = os.path.join(os.path.dirname(__file__), 'tidy_win32', 'tidy')
else:
_exe = 'tidy'
class HtmlTidy(Linter):
syntax = ('HTML', 'HTML_')
cmd = (_exe, '-errors', '-quiet', '-utf8')
regex = r'^line (?P<line>\d+) column (?P<col>\d+) - (?:(?P<error>Error)|(?P<warning>Warning)): (?P<message>.+)'
error_stream = util.STREAM_STDERR
| 26.684211 | 115 | 0.627219 | 256 | 0.504931 | 0 | 0 | 0 | 0 | 0 | 0 | 252 | 0.497041 |
98bf3939045052dd4fba91a19ad1fdf6be1101a5 | 640 | py | Python | PP4E-Examples-1.4/Examples/PP4E/Dstruct/Basic/inter2.py | AngelLiang/PP4E | 3a7f63b366e1e4700b4d2524884696999a87ba9d | [
"MIT"
]
| null | null | null | PP4E-Examples-1.4/Examples/PP4E/Dstruct/Basic/inter2.py | AngelLiang/PP4E | 3a7f63b366e1e4700b4d2524884696999a87ba9d | [
"MIT"
]
| null | null | null | PP4E-Examples-1.4/Examples/PP4E/Dstruct/Basic/inter2.py | AngelLiang/PP4E | 3a7f63b366e1e4700b4d2524884696999a87ba9d | [
"MIT"
]
| null | null | null | "set operations for multiple sequences"
def intersect(*args):
res = []
for x in args[0]: # scan the first list
for other in args[1:]: # for all other arguments
if x not in other: break # this item in each one?
else:
res.append(x) # add common items to the end
return res
def union(*args):
res = []
for seq in args: # for all sequence-arguments
for x in seq: # for all nodes in argument
if not x in res:
res.append(x) # add new items to result
return res
| 33.684211 | 68 | 0.504688 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 218 | 0.340625 |
98bf61f5f3abef89b085be204210156d6a5477f5 | 3,006 | py | Python | airtech_api/utils/error_messages/serialization_errors.py | chidioguejiofor/airtech-api | 45d77da0cc4230dd3cb7ab4cbb5168a9239850f5 | [
"MIT"
]
| 1 | 2019-04-04T12:27:55.000Z | 2019-04-04T12:27:55.000Z | airtech_api/utils/error_messages/serialization_errors.py | chidioguejiofor/airtech-api | 45d77da0cc4230dd3cb7ab4cbb5168a9239850f5 | [
"MIT"
]
| 34 | 2019-03-26T11:18:17.000Z | 2022-02-10T08:12:36.000Z | airtech_api/utils/error_messages/serialization_errors.py | chidioguejiofor/airtech-api | 45d77da0cc4230dd3cb7ab4cbb5168a9239850f5 | [
"MIT"
]
| null | null | null | msg_dict = {
'resource_not_found':
'The resource you specified was not found',
'invalid_gender':
"The gender you specified is invalid!!",
'many_invalid_fields':
'Some errors occured while validating some fields. Please check and try again',
'unique':
'The {} you inputted already exists',
'user_not_found':
'The user with that username/email and password combination was not found',
'email_not_found':
'A user with email `{}` does not exist',
'user_already_verified':
'The user with that email has already been verified',
'invalid_flight_type':
'Flight type must be either international or local',
'invalid_flight_schedule':
'Flight schedule must be at least 12 hours before it is created',
'resource_id_not_found':
'The {} with that id was not found',
'user_book_flight_twice':
'You had previously booked for this Flight and thus cannot do it again',
'flight_booking_expired':
'You cannot book for a flight less than 24 hours before the flight',
'flight_schedule_expired':
'The schedule of this flight has already passed and thus you cannot book it',
'missing_field':
'You forgot to include this field',
'value_not_a_file':
'The value you inputted is not a file',
'not_an_image':
'The file you uploaded is not a valid image',
'image_too_large':
'Image must not be more than 2MB',
'payment_link_error':
'An error occurred while creating payment link',
'booking_already_paid':
'You have already paid for this flight',
'booking_expired':
'Your booking has expired, thus you cannot pay for this ticket',
'invalid_url':
'The `{}` field must be a valid URL with protocols `http` or `https`',
"invalid_url_field":
'This field must be a valid URL with protocols `http` or `https`',
'paystack_threw_error':
"There was an unexpected error while processing request. "
"Please raise this as an issue in at "
"https://github.com/chidioguejiofor/airtech-api/issues",
'empty_request':
'You did not specify any `{}` data in your request',
'paid_booking_cannot_be_deleted':
'You cannot delete this Booking because you have already paid for it',
'cannot_delete_expired_booking':
'You cannot delete an expired booking',
'cannot_delete_flight_with_bookings':
'You cannot delete this flight because users have started booking it',
'cannot_delete_flight_that_has_flown':
'You cannot delete this flight because the schedule date has been passed',
'cannot_update_flight_field_with_bookings':
'You cannot update the `{}` of this flight because it has already been booked',
'cannot_update_field':
'You cannot update a {} {}',
'regular_user_only':
'This endpoint is for only regular users',
'profile_not_updated':
'You need to update your profile picture before you can do this',
'only_alpha_and_numbers':
'This field can contain only alphabets and numbers'
}
| 42.338028 | 83 | 0.706254 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,586 | 0.860279 |
98c098366590ca27e9adc66a721c84a992752bc9 | 109 | py | Python | blog/be/server/serialization/__init__.py | kamko/lnu_ht19_4ME310_final_project | ccb5d3c659cde0dac49c1bd6c3d46c46e73a111e | [
"MIT"
]
| null | null | null | blog/be/server/serialization/__init__.py | kamko/lnu_ht19_4ME310_final_project | ccb5d3c659cde0dac49c1bd6c3d46c46e73a111e | [
"MIT"
]
| 2 | 2020-06-07T19:02:54.000Z | 2020-06-07T19:03:02.000Z | blog/be/server/serialization/__init__.py | kamko/lnu_ht19_4ME310_final_project | ccb5d3c659cde0dac49c1bd6c3d46c46e73a111e | [
"MIT"
]
| null | null | null | from .marshmallow import ma
from .schemas import ArticleSchema
__all__ = [
'ma',
'ArticleSchema'
]
| 12.111111 | 34 | 0.688073 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 19 | 0.174312 |
98c0a5642acac614148ed6f1d7bcaa9979233d3b | 8,950 | py | Python | scripts/fast_queue.py | ourresearch/openalex-guts | f6c3e9992361e4bb1dbe76fbfb644c80f081319a | [
"MIT"
]
| 48 | 2021-11-20T08:17:53.000Z | 2022-03-19T13:57:15.000Z | scripts/fast_queue.py | ourresearch/openalex-guts | f6c3e9992361e4bb1dbe76fbfb644c80f081319a | [
"MIT"
]
| null | null | null | scripts/fast_queue.py | ourresearch/openalex-guts | f6c3e9992361e4bb1dbe76fbfb644c80f081319a | [
"MIT"
]
| 2 | 2022-01-04T16:28:48.000Z | 2022-02-05T21:25:01.000Z | import argparse
from time import sleep, time
from collections import defaultdict
from sqlalchemy import orm, text, insert, delete
from sqlalchemy.orm import selectinload
import models
from app import db
from app import logger
from scripts.queue import JsonWorks, JsonAuthors, JsonConcepts, JsonInstitutions, JsonVenues
from util import elapsed
def run(**kwargs):
entity_type = kwargs.get("entity")
method_name = kwargs.get("method")
if entity_type == "work" and method_name == "add_everything":
queue_table = "queue.work_add_everything"
elif method_name == "store":
queue_table = f"queue.{entity_type.lower()}_store"
else:
queue_table = f"queue.{method_name.lower()}"
if single_id := kwargs.get('id'):
if objects := get_objects(entity_type, [single_id]):
logger.info(f'found object {objects[0]}')
store_objects(objects)
db.session.commit()
else:
logger.warn(f'found no object with id {single_id}')
else:
objects_updated = 0
limit = kwargs.get('limit')
chunk = kwargs.get('chunk')
total_count = 0
while limit is None or objects_updated < limit:
loop_start = time()
if object_ids := fetch_queue_chunk_ids(queue_table, chunk):
objects = get_objects(entity_type, object_ids)
for obj in objects:
method_start_time = time()
total_count += 1
print(f"*** #{total_count} starting {obj}.{method_name}() method")
method_to_run = getattr(obj, method_name)
method_to_run()
print(f">>> finished {obj}.{method_name}(). took {elapsed(method_start_time, 4)} seconds")
# print(1/0)
logger.info('committing')
start_time = time()
if method_name == "store":
store_json_objects(objects)
else:
db.session.commit() # fail loudly for now
logger.info(f'commit took {elapsed(start_time, 4)}s')
finish_object_ids(queue_table, object_ids)
objects_updated += len(objects)
logger.info(f'processed chunk of {chunk} objects in {elapsed(loop_start, 2)} seconds')
else:
logger.info('nothing ready in the queue, waiting 5 seconds...')
sleep(5)
def store_json_objects(objects):
delete_dict_all_objects = defaultdict(list)
insert_dict_all_objects = defaultdict(list)
for count, obj in enumerate(objects):
obj.delete_dict = defaultdict(list)
for row in obj.insert_dicts:
for table_name, insert_dict in row.items():
insert_dict_all_objects[table_name] += [insert_dict]
obj.delete_dict[table_name] += [insert_dict["id"]]
for table_name, ids in obj.delete_dict.items():
delete_dict_all_objects[table_name] += ids
start_time = time()
for table_name, delete_ids in delete_dict_all_objects.items():
my_table = globals()[table_name]
db.session.remove()
db.session.execute(delete(my_table).where(my_table.id.in_(delete_ids)))
db.session.commit()
print("delete done")
for table_name, all_insert_strings in insert_dict_all_objects.items():
my_table = globals()[table_name]
db.session.remove()
db.session.execute(insert(my_table).values(all_insert_strings))
db.session.commit()
print("insert and commit took {} seconds".format(elapsed(start_time, 2)))
def fetch_queue_chunk_ids(queue_table, chunk_size):
text_query = f"""
with chunk as (
select id
from {queue_table}
where started is null
order by
finished asc nulls first,
rand
limit :chunk
for update skip locked
)
update {queue_table}
set started = now()
from chunk
where {queue_table}.id = chunk.id
returning chunk.id;
"""
logger.info(f'getting {chunk_size} ids from the queue')
start_time = time()
ids = [
row[0] for row in
db.engine.execute(text(text_query).bindparams(chunk=chunk_size).execution_options(autocommit=True)).all()
]
logger.info(f'got {len(ids)} ids from the queue in {elapsed(start_time, 4)}s')
logger.info(f'got these ids: {ids}')
return ids
def finish_object_ids(queue_table, object_ids):
# logger.info(f'finishing queue chunk')
start_time = time()
query_text = f'''
update {queue_table}
set finished = now(), started=null
where id = any(:ids)
'''
db.session.execute(text(query_text).bindparams(ids=object_ids))
db.session.commit()
# logger.info(f'finished saving finish_objects in {elapsed(start_time, 4)}s')
def get_objects(entity_type, object_ids):
logger.info(f'getting {len(object_ids)} objects')
start_time = time()
if entity_type == "work":
objects = db.session.query(models.Work).options(
selectinload(models.Work.records).selectinload(models.Record.journals).raiseload('*'),
selectinload(models.Work.records).raiseload('*'),
selectinload(models.Work.locations),
selectinload(models.Work.journal).raiseload('*'),
selectinload(models.Work.references).raiseload('*'),
selectinload(models.Work.references_unmatched).raiseload('*'),
selectinload(models.Work.mesh),
selectinload(models.Work.counts_by_year).raiseload('*'),
selectinload(models.Work.abstract),
selectinload(models.Work.extra_ids).raiseload('*'),
selectinload(models.Work.related_works).raiseload('*'),
selectinload(models.Work.affiliations).selectinload(models.Affiliation.author).selectinload(models.Author.orcids).raiseload('*'),
selectinload(models.Work.affiliations).selectinload(models.Affiliation.author).raiseload('*'),
selectinload(models.Work.affiliations).selectinload(models.Affiliation.institution).selectinload(models.Institution.ror).raiseload('*'),
selectinload(models.Work.affiliations).selectinload(models.Affiliation.institution).raiseload('*'),
selectinload(models.Work.concepts).selectinload(models.WorkConcept.concept).raiseload('*'),
selectinload(models.Work.concepts_full).raiseload('*'),
orm.Load(models.Work).raiseload('*')
).filter(models.Work.paper_id.in_(object_ids)).all()
elif entity_type == "author":
objects = db.session.query(models.Author).options(
selectinload(models.Author.counts_by_year_papers),
selectinload(models.Author.counts_by_year_citations),
selectinload(models.Author.alternative_names),
selectinload(models.Author.author_concepts),
selectinload(models.Author.orcids).selectinload(models.AuthorOrcid.orcid_data),
selectinload(models.Author.last_known_institution).selectinload(models.Institution.ror).raiseload('*'),
selectinload(models.Author.last_known_institution).raiseload('*'),
orm.Load(models.Author).raiseload('*')
).filter(models.Author.author_id.in_(object_ids)).all()
elif entity_type == "venue":
objects = db.session.query(models.Venue).options(
selectinload(models.Venue.counts_by_year_papers),
selectinload(models.Venue.counts_by_year_citations),
orm.Load(models.Venue).raiseload('*')
).filter(models.Venue.journal_id.in_(object_ids)).all()
elif entity_type == "institution":
objects = db.session.query(models.Institution).filter(models.Institution.affiliation_id.in_(object_ids)).all()
elif entity_type == "concept":
objects = db.session.query(models.Concept).filter(models.Concept.field_of_study_id.in_(object_ids)).all()
logger.info(f'got {len(objects)} objects in {elapsed(start_time, 4)}s')
return objects
# python -m scripts.fast_queue --entity=work --method=add_everything --limit=3
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run fast queue.")
parser.add_argument('--entity', type=str, help="the entity type to run")
parser.add_argument('--method', type=str, help="the method to run")
parser.add_argument('--id', nargs="?", type=str, help="id of the one thing you want to update (case sensitive)")
parser.add_argument('--limit', "-l", nargs="?", type=int, help="how many objects to work on")
parser.add_argument(
'--chunk', "-ch", nargs="?", default=100, type=int, help="how many objects to take off the queue at once"
)
parsed_args = parser.parse_args()
run(**vars(parsed_args))
| 42.216981 | 149 | 0.639777 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,981 | 0.221341 |
98c14c64fb91ce8b039d5c03cf8ab0036d83b74c | 3,810 | py | Python | cogs/memes.py | Code-Cecilia/botman-rewrite | 9d8baeebf267c62df975d2f209e85589b81934af | [
"MIT"
]
| 2 | 2022-02-21T14:10:15.000Z | 2022-02-21T14:10:50.000Z | cogs/memes.py | Code-Cecilia/botman-rewrite | 9d8baeebf267c62df975d2f209e85589b81934af | [
"MIT"
]
| null | null | null | cogs/memes.py | Code-Cecilia/botman-rewrite | 9d8baeebf267c62df975d2f209e85589b81934af | [
"MIT"
]
| null | null | null | import json
import discord
from discord.ext import commands
from assets import internet_funcs
from assets.list_funcs import chunks
class Memes(commands.Cog, description="Memes from https://imgflip.com/"):
def __init__(self, bot):
self.bot = bot
with open("config.json") as configFile:
config = json.load(configFile)
self.username = config.get("imgflip_username")
self.password = config.get("imgflip_password")
self.memetemps = {}
@commands.Cog.listener()
async def on_ready(self):
result = json.loads(await internet_funcs.get_response("https://api.imgflip.com/get_memes"))
if result["success"] is not True:
return
result = result["data"]["memes"]
for k in result:
self.memetemps[k["id"]] = {"name": k["name"], "box_count": k["box_count"]}
@commands.command(name="memetemplates", aliases=["memetemps"])
async def meme_temps(self, ctx):
"""Fetches top 100 meme templates from imgflip.com"""
# TODO: pagination for meme templates
result = list(self.memetemps.items())
if not result:
await self.on_ready()
result = list(self.memetemps.items())
n = 0
split_entries = list(chunks(result, 25))
for entry in split_entries:
embed = discord.Embed(title="Meme Templates", color=0x00ff00)
for meme in entry:
n += 1
meme_id = meme[0]
meme_name = meme[1]["name"]
embed.add_field(name=f"{n}. {meme_name}", value=f"ID: `{meme_id}`", inline=False)
try:
await ctx.author.send(embed=embed)
except discord.Forbidden:
await ctx.send("I can't DM you! Please enable DMs and try again.")
return
@commands.command(name="memegen", aliases=["memegenerator"])
async def meme_gen(self, ctx, meme_id, *text):
"""Generates a meme from imgflip. For template IDs, see the `memetemplates` command"""
text = list(text)
if self.memetemps == {}:
await self.on_ready()
if len(text) > 20:
text = text[:20]
if not str(meme_id).isnumeric():
found = False
for k, v in self.memetemps.items():
if str(meme_id).lower() == str(v["name"]).lower():
meme_id = int(k)
found = True
break
if not found:
return await ctx.send("Meme not found. Please check the ID and try again.")
# clean up the number of boxes to send
if meme_id in self.memetemps.keys():
if len(text) > self.memetemps[meme_id]["box_count"]:
text = text[:int(self.memetemps[meme_id]["box_count"])]
if len(text) < self.memetemps[meme_id]["box_count"]:
text += [""] * int(self.memetemps[meme_id]["box_count"] - len(text))
# ready the text boxes
boxes_dict = {}
for box_count in range(len(text)):
boxes_dict[f"boxes[{box_count}][text]"] = text[box_count]
boxes_dict[f"boxes[{box_count}][color]"] = "#000000"
boxes_dict[f"boxes[{box_count}][outline_color]"] = "#FFFFFF"
# send the request
payload = {"template_id": meme_id, "username": self.username, "password": self.password}
payload.update(boxes_dict)
result = json.loads(await internet_funcs.post("https://api.imgflip.com/caption_image", data=payload))
if result["success"] is not True:
await ctx.send("An error occurred:" + " " + "**" + result["error_message"] + "**")
return
await ctx.send(result["data"]["url"])
def setup(bot):
bot.add_cog(Memes(bot))
| 38.877551 | 109 | 0.574803 | 3,628 | 0.952231 | 0 | 0 | 3,255 | 0.854331 | 3,094 | 0.812073 | 941 | 0.246982 |
98c1adf25f25e2b996bb3df26aef461028014a20 | 55 | py | Python | examples/forest_fire/run.py | fire-suppression-abm/mesa | 8498eea3e5d4a739aee3b003107a0e7de59c5026 | [
"Apache-2.0"
]
| 1,704 | 2015-02-01T17:59:44.000Z | 2022-03-30T13:25:47.000Z | examples/forest_fire/run.py | fire-suppression-abm/mesa | 8498eea3e5d4a739aee3b003107a0e7de59c5026 | [
"Apache-2.0"
]
| 1,048 | 2015-01-12T01:16:05.000Z | 2022-03-31T11:44:33.000Z | examples/forest_fire/run.py | fire-suppression-abm/mesa | 8498eea3e5d4a739aee3b003107a0e7de59c5026 | [
"Apache-2.0"
]
| 831 | 2015-03-04T13:41:25.000Z | 2022-03-30T14:33:17.000Z | from forest_fire.server import server
server.launch()
| 13.75 | 37 | 0.818182 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
98c1fbeb0d5441c90960a350fd079ea801185651 | 2,298 | py | Python | scripts/collect_timelines1.py | tedhchen/twitter_timeline_tools | bc21e8c7c4e976409281e2697e1ec75044648eb8 | [
"MIT"
]
| null | null | null | scripts/collect_timelines1.py | tedhchen/twitter_timeline_tools | bc21e8c7c4e976409281e2697e1ec75044648eb8 | [
"MIT"
]
| null | null | null | scripts/collect_timelines1.py | tedhchen/twitter_timeline_tools | bc21e8c7c4e976409281e2697e1ec75044648eb8 | [
"MIT"
]
| null | null | null | # Prep
import json, configparser, pickle, csv, logging, os
import pandas as pd
from tweepy import AppAuthHandler, API, Cursor
# Reading in configuation
params = configparser.ConfigParser()
params.read('config.ini')
# Functions
# Takes config file and returns authenticated api object
def twitter_auth(config):
auth = AppAuthHandler(params['keys']['key'], params['keys']['secret'])
api = API(auth, wait_on_rate_limit = True, wait_on_rate_limit_notify = True)
return api
# Get relevant user ids
def get_ids(path, subset = None):
df = pd.read_csv(path, header = 0, dtype = {'user': 'object', 'subset': 'object'})
if subset != None:
df.user = df.user[df['subset'] == subset]
return list(df.user)
# takes user ids, and writes out a txt file wiith each user's status jsons
def get_timelines(users, api, outfolder):
i = 0
for user in users:
timeline = []
try:
for status in Cursor(api.user_timeline, user_id = user, include_rts = True, exclude_replies = False, count = 200, tweet_mode = 'extended').items():
timeline.append(status)
timeline = [json.dumps(line._json) for line in timeline]
filename = 'timeline_' + user + '.txt'
with open(os.path.join(outfolder, filename), 'a', encoding = 'utf-8', newline = '') as outfile:
for line in timeline:
outfile.write(line + '\n')
except Exception as e:
logging.exception("Exception occurred when working with user id: " + user + '.')
i += 1
if i % 100 == 0:
print('Finished ' + str(i) + ' users.')
return None
def retry_missed_users(log, api, outfolder):
missed = []
with open(log, 'r') as infile:
for line in infile:
if 'Exception occurred when working with user id:' in line:
missed.append(line[79:-2])
get_timelines(missed, api, outfolder)
# Running script
# Setting up logger
logging.basicConfig(filename, filemode = 'a', format = '(%(asctime)s) %(levelname)s: %(message)s', level = logging.INFO)
# Authenticating api
api = twitter_auth(params)
# Get users from pre-parsed data
# csv file with:
# user, subset
# ..., ...
# subset is just a way to subset users from the csv file
# if subset == None, then no subsetting is performed
users = get_ids(path, subset)
# Getting timelines
get_timelines(users, api, outpath)
# Double checking errors
retry_missed_users(logfile, api, outpath)
| 31.479452 | 150 | 0.700174 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 757 | 0.329417 |
98c28b91f69f483e365aff3baa53ce90ba90427d | 937 | py | Python | aquarius/app/auth_util.py | oceanprotocol/provider-backend | f9e36e3d6b880de548c6b92c38d10d76daf369ba | [
"Apache-2.0"
]
| null | null | null | aquarius/app/auth_util.py | oceanprotocol/provider-backend | f9e36e3d6b880de548c6b92c38d10d76daf369ba | [
"Apache-2.0"
]
| 1 | 2018-08-15T09:57:01.000Z | 2018-08-15T09:57:01.000Z | aquarius/app/auth_util.py | oceanprotocol/provider-backend | f9e36e3d6b880de548c6b92c38d10d76daf369ba | [
"Apache-2.0"
]
| null | null | null | #
# Copyright 2021 Ocean Protocol Foundation
# SPDX-License-Identifier: Apache-2.0
#
from eth_utils import is_address
from web3 import Web3
def sanitize_addresses(addresses):
return [Web3.toChecksumAddress(a) for a in addresses if is_address(a)]
def compare_eth_addresses(address, checker, logger):
"""
Compare two addresses and return TRUE if there is a match
:param str address: Address
:param str checker: Address to compare with
:param logger: instance of logging
:return: boolean
"""
logger.debug("compare_eth_addresses address: %s" % address)
logger.debug("compare_eth_addresses checker: %s" % checker)
if not is_address(address):
logger.debug("Address is not web3 valid")
return False
if not is_address(checker):
logger.debug("Checker is not web3 valid")
return False
return Web3.toChecksumAddress(address) == Web3.toChecksumAddress(checker)
| 31.233333 | 77 | 0.71825 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 418 | 0.446105 |
98c37885dc6ddaa81fc5bc670f8b3da95afaa94e | 673 | py | Python | oscar/lib/python2.7/site-packages/phonenumbers/data/region_DJ.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
]
| null | null | null | oscar/lib/python2.7/site-packages/phonenumbers/data/region_DJ.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
]
| null | null | null | oscar/lib/python2.7/site-packages/phonenumbers/data/region_DJ.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
]
| null | null | null | """Auto-generated file, do not edit by hand. DJ metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_DJ = PhoneMetadata(id='DJ', country_code=253, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[27]\\d{7}', possible_length=(8,)),
fixed_line=PhoneNumberDesc(national_number_pattern='2(?:1[2-5]|7[45])\\d{5}', example_number='21360003', possible_length=(8,)),
mobile=PhoneNumberDesc(national_number_pattern='77\\d{6}', example_number='77831001', possible_length=(8,)),
number_format=[NumberFormat(pattern='(\\d{2})(\\d{2})(\\d{2})(\\d{2})', format='\\1 \\2 \\3 \\4')])
| 74.777778 | 132 | 0.708767 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 185 | 0.274889 |
98c56839c6c8ff2db03c4e3a4f565347728ce5e0 | 4,541 | py | Python | llvm-7.0.0.src/utils/unicode-case-fold.py | sillywalk/grazz | a0adb1a90d41ff9006d8c1476546263f728b3c83 | [
"Apache-2.0"
]
| 171 | 2018-09-17T13:15:12.000Z | 2022-03-18T03:47:04.000Z | llvm-7.0.0.src/utils/unicode-case-fold.py | sillywalk/grazz | a0adb1a90d41ff9006d8c1476546263f728b3c83 | [
"Apache-2.0"
]
| 51 | 2019-10-23T11:55:08.000Z | 2021-12-21T06:32:11.000Z | llvm-7.0.0.src/utils/unicode-case-fold.py | sillywalk/grazz | a0adb1a90d41ff9006d8c1476546263f728b3c83 | [
"Apache-2.0"
]
| 35 | 2018-09-18T07:46:53.000Z | 2022-03-27T07:59:48.000Z | #!/usr/bin/env python
"""
Unicode case folding database conversion utility
Parses the database and generates a C++ function which implements the case
folding algorithm. The database entries are of the form:
<code>; <status>; <mapping>; # <name>
<status> can be one of four characters:
C - Common mappings
S - mappings for Simple case folding
F - mappings for Full case folding
T - special case for Turkish I characters
Right now this generates a function which implements simple case folding (C+S
entries).
"""
import sys
import re
import urllib2
# This variable will body of the mappings function
body = ""
# Reads file line-by-line, extracts Common and Simple case fold mappings and
# returns a (from_char, to_char, from_name) tuple.
def mappings(f):
previous_from = -1
expr = re.compile(r'^(.*); [CS]; (.*); # (.*)')
for line in f:
m = expr.match(line)
if not m: continue
from_char = int(m.group(1), 16)
to_char = int(m.group(2), 16)
from_name = m.group(3)
if from_char <= previous_from:
raise Exception("Duplicate or unsorted characters in input")
yield from_char, to_char, from_name
previous_from = from_char
# Computes the shift (to_char - from_char) in a mapping.
def shift(mapping):
return mapping[1] - mapping[0]
# Computes the stride (from_char2 - from_char1) of two mappings.
def stride2(mapping1, mapping2):
return mapping2[0] - mapping1[0]
# Computes the stride of a list of mappings. The list should have at least two
# mappings. All mappings in the list are assumed to have the same stride.
def stride(block):
return stride2(block[0], block[1])
# b is a list of mappings. All the mappings are assumed to have the same
# shift and the stride between adjecant mappings (if any) is constant.
def dump_block(b):
global body
if len(b) == 1:
# Special case for handling blocks of length 1. We don't even need to
# emit the "if (C < X) return C" check below as all characters in this
# range will be caught by the "C < X" check emitted by the first
# non-trivial block.
body += " // {2}\n if (C == {0:#06x})\n return {1:#06x};\n".format(*b[0])
return
first = b[0][0]
last = first + stride(b) * (len(b)-1)
modulo = first % stride(b)
# All characters before this block map to themselves.
body += " if (C < {0:#06x})\n return C;\n".format(first)
body += " // {0} characters\n".format(len(b))
# Generic pattern: check upper bound (lower bound is checked by the "if"
# above) and modulo of C, return C+shift.
pattern = " if (C <= {0:#06x} && C % {1} == {2})\n return C + {3};\n"
if stride(b) == 2 and shift(b[0]) == 1 and modulo == 0:
# Special case:
# We can elide the modulo-check because the expression "C|1" will map
# the intervening characters to themselves.
pattern = " if (C <= {0:#06x})\n return C | 1;\n"
elif stride(b) == 1:
# Another special case: X % 1 is always zero, so don't emit the
# modulo-check.
pattern = " if (C <= {0:#06x})\n return C + {3};\n"
body += pattern.format(last, stride(b), modulo, shift(b[0]))
current_block = []
f = urllib2.urlopen(sys.argv[1])
for m in mappings(f):
if len(current_block) == 0:
current_block.append(m)
continue
if shift(current_block[0]) != shift(m):
# Incompatible shift, start a new block.
dump_block(current_block)
current_block = [m]
continue
if len(current_block) == 1 or stride(current_block) == stride2(current_block[-1], m):
current_block.append(m)
continue
# Incompatible stride, start a new block.
dump_block(current_block)
current_block = [m]
f.close()
dump_block(current_block)
print '//===---------- Support/UnicodeCaseFold.cpp -------------------------------===//'
print '//'
print '// This file was generated by utils/unicode-case-fold.py from the Unicode'
print '// case folding database at'
print '// ', sys.argv[1]
print '//'
print '// To regenerate this file, run:'
print '// utils/unicode-case-fold.py \\'
print '// "{}" \\'.format(sys.argv[1])
print '// > lib/Support/UnicodeCaseFold.cpp'
print '//'
print '//===----------------------------------------------------------------------===//'
print ''
print '#include "llvm/Support/Unicode.h"'
print ''
print "int llvm::sys::unicode::foldCharSimple(int C) {"
print body
print " return C;"
print "}"
| 32.905797 | 89 | 0.618146 | 0 | 0 | 466 | 0.102621 | 0 | 0 | 0 | 0 | 2,647 | 0.582911 |
98c615953ef0bbcfd93b9c52b023ec8e35bea466 | 115,101 | py | Python | trade_remedies_caseworker/cases/views.py | uktrade/trade-remedies-caseworker | fece9fde3cb241d96cbc1aaf7188d976f8621600 | [
"MIT"
]
| 1 | 2020-08-27T09:53:00.000Z | 2020-08-27T09:53:00.000Z | trade_remedies_caseworker/cases/views.py | uktrade/trade-remedies-caseworker | fece9fde3cb241d96cbc1aaf7188d976f8621600 | [
"MIT"
]
| 7 | 2020-10-14T16:23:42.000Z | 2021-09-24T14:18:47.000Z | trade_remedies_caseworker/cases/views.py | uktrade/trade-remedies-caseworker | fece9fde3cb241d96cbc1aaf7188d976f8621600 | [
"MIT"
]
| null | null | null | import itertools
import json
import logging
import re
from django.views.generic import TemplateView
from django.http import HttpResponse
from django.views import View
from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import render, redirect
from django.urls import reverse
from django.utils import timezone
from django.utils.decorators import method_decorator
from django_chunk_upload_handlers.clam_av import VirusFoundInFileException
from core.base import GroupRequiredMixin
from core.utils import (
deep_index_items_by,
deep_index_items_by_exists,
get,
key_by,
index_users_by_group,
compact_list,
submission_contact,
public_login_url,
parse_notify_template,
parse_api_datetime,
pluck,
to_json,
from_json,
deep_update,
internal_redirect,
is_date,
notify_footer,
notify_contact_email,
)
from django_countries import countries
from django.conf import settings
from cases.submissions import SUBMISSION_TYPE_HELPERS, get_submission_deadline
from cases.utils import decorate_orgs
from core.constants import (
ALL_REGION_ALLOWED_TYPE_IDS,
SECURITY_GROUP_TRA_HEAD_OF_INVESTIGATION,
SECURITY_GROUP_TRA_LEAD_INVESTIGATOR,
SECURITY_GROUPS_TRA,
SECURITY_GROUP_TRA_ADMINISTRATOR,
SECURITY_GROUPS_TRA_ADMINS,
SECURITY_GROUP_ORGANISATION_OWNER,
SUBMISSION_TYPE_QUESTIONNAIRE,
SUBMISSION_TYPE_APPLICATION,
SUBMISSION_NOTICE_TYPE_INVITE,
SUBMISSION_NOTICE_TYPE_DEFICIENCY,
SUBMISSION_TYPE_THIRD_PARTY,
CASE_ROLE_AWAITING_APPROVAL,
CASE_ROLE_REJECTED,
CASE_ROLE_APPLICANT,
CASE_ROLE_PREPARING,
DIRECTION_TRA_TO_PUBLIC,
)
from trade_remedies_client.mixins import TradeRemediesAPIClientMixin
from trade_remedies_client.exceptions import APIException
logger = logging.getLogger(__name__)
org_fields = json.dumps(
{
"Organisation": {
"id": 0,
"has_non_draft_subs": 0,
"gov_body": 0,
"has_roi": 0,
}
}
)
class CasesView(LoginRequiredMixin, TemplateView, TradeRemediesAPIClientMixin):
template_name = "cases/cases.html"
def get(self, request, *args, **kwargs):
list_mode = request.GET.get("tab", "my")
panel_layout = self.client(self.request.user).get_system_boolean("PRE_RELEASE_PANELS")
fields = {
"Case": {
"id": 0,
"user_case": 0,
"name": 0,
"reference": 0,
"created_at": 0,
"type": {
"name": 0,
"acronym": 0,
"colour": 0,
"reference": 0,
"applicant": {"organisation": 0, "name": 0, "id": 0},
},
"applicant": {
"organisation": {
"name": 0,
"id": 0,
}
},
"stage": {"name": 0},
"case_status": {"next_action": 0, "next_notice_due": 0},
}
}
if list_mode == "archived":
fields = deep_update(
fields,
{
"Case": {
"workflow_state": {
"MEASURE_EXPIRY": 0,
"DETERMINATION_ACTIVE_DATE": 0,
}
}
},
)
cases = self.client(request.user).get_cases(
archived=list_mode == "archived",
all_cases=list_mode == "all",
new_cases=list_mode == "new",
fields=json.dumps(fields),
)
tabs = {
"value": list_mode,
"tabList": [
{"label": "Your cases", "value": "my", "sr_text": "Show your cases"},
{"label": "Open cases", "value": "all", "sr_text": "Show open cases"},
{
"label": "New applications",
"value": "new",
"sr_text": "Show new applications",
},
{
"label": "Archived",
"value": "archived",
"sr_text": "Show archived cases",
},
],
}
template_name = self.template_name if panel_layout else "cases/cases_old.html"
body_class = "full-width kill-footer" if panel_layout else "full-width"
return render(
request,
template_name,
{
"body_classes": body_class,
"cases": cases,
"tabs": tabs,
},
)
class CaseBaseView(
LoginRequiredMixin,
GroupRequiredMixin,
PermissionRequiredMixin,
TemplateView,
TradeRemediesAPIClientMixin,
):
permission_required = []
groups_required = SECURITY_GROUPS_TRA
supress_nav_section = False
def dispatch(self, *args, **kwargs):
if self.request.user.is_authenticated:
self._client = self.client(self.request.user)
self.case_id = kwargs.get("case_id")
return super().dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
self.kwargs = kwargs
self.organisation_id = kwargs.get("organisation_id")
self.request = request
self.user_token = request.user.token
case_fields = json.dumps(
{
"Case": {
"id": 0,
"name": 0,
"initiated_at": 0,
"decision_to_initiate,name": 0,
"reference": 0,
"sequence": 0,
"type": 0,
"archived_at": 0,
"archive_reason": {"name": 0},
"submission_count": 0,
"participant_count": 0,
"stage": {"name": 0},
"case_status": 0,
"organisation": {"id": 0, "name": 0},
}
}
)
self.case = self._client.get_case(self.case_id, fields=case_fields)
self.document_count = self._client.get_case_document_count(self.case_id)
self.start = int(request.GET.get("start", 0))
self.limit = int(request.GET.get("limit", 20))
content_id = self.kwargs.get("nav_section_id")
context = {
"case": self.case,
"case_id": self.case_id,
"document_count": self.document_count,
"content": self._client.get_case_content(self.case_id, content_id=content_id),
"tree": self._client.get_nav_section(self.case_id, selected_content=content_id),
"body_classes": "full-width",
"panel_layout": self._client.get_system_boolean("PRE_RELEASE_PANELS"),
"organisation_id": self.organisation_id,
"submission_group_name": "submission",
"alert": request.GET.get("alert"),
"user": request.user,
}
deep_update(context, self.add_page_data())
if context.get("redirect"):
return redirect(context.get("redirect"))
return render(request, self.template_name, context)
def add_page_data(self):
return {}
def get_documents(self, submission, all_versions=None):
result = self._client.get_submission_documents(
self.case_id, submission.get("id"), all_versions=all_versions
)
all_documents = result.get("documents", [])
deficiency_docs = result.get("deficiency_documents", [])
if all_versions:
# If this submission has an immediate ancestor, get the docs from that to mark status
docs_by_submission = deep_index_items_by(all_documents, "version")
this_version = int(submission.get("version"))
this_sub = docs_by_submission.get(str(this_version))
sub_docs = this_sub[0].get("documents")
# we have a list of the submissions that make up a family - id, version and documents.
if this_version > 1:
parent_sub = docs_by_submission.get(str(this_version - 1))
parent_docs = parent_sub and parent_sub[0].get("documents")
parent_doc_idx = {}
for parent_doc in parent_docs:
doc_type = get(parent_doc, "type/name") + "|" + get(parent_doc, "name")
parent_doc_idx[doc_type] = parent_doc
for document in sub_docs:
document["parent"] = parent_doc_idx.get(
get(document, "type/name") + "|" + get(document, "name")
)
else:
sub_docs = all_documents
submission_documents = deep_index_items_by(sub_docs, "type/key")
document_conf_index = deep_index_items_by(
submission_documents.get("respondent", []), "confidential"
)
confidential = document_conf_index.get("true", [])
confidential.sort(key=lambda cf: cf.get("name"))
non_conf = document_conf_index.get("", [])
doc_index = key_by(confidential, "id")
non_conf.sort(key=lambda nc: get(get(doc_index, str(nc.get("parent_id"))), "name"))
return {
"caseworker": submission_documents.get("caseworker", []),
"respondent": submission_documents.get("respondent", []),
"loa": submission_documents.get("loa", []),
"deficiency": deficiency_docs,
"confidential": confidential,
"nonconfidential": non_conf,
}
def has_permission(self):
"""
Override this method to customize the way permissions are checked.
"""
perms = self.get_permission_required()
return not perms or self.request.user.has_perms(perms)
class CaseAdminView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
permission_required = ("case_admin",)
template_name = "cases/admin.html"
def add_page_data(self):
case_enums = self._client.get_all_case_enums()
case_users = self._client.get_case_users(self.case["id"])
context = {
"case_enums": case_enums,
"case": self.case,
"users": case_users,
"participants": self._client.get_case_participants(self.case_id),
}
return context
def post(self, request, case_id, *args, **kwargs):
action = request.POST.get("action")
case = self._client.get_case(case_id)
update_spec = {}
if action == "initiation_flag_toggle":
if case["initiated_at"]:
update_spec["initiated_at"] = ""
else:
update_spec["initiated_at"] = timezone.now()
elif action == "set_case_stage":
update_spec["ignore_flow"] = request.POST.get("ignore_flow") or "false"
update_spec["stage_id"] = request.POST.get("stage_id")
elif action == "set_name":
update_spec["name"] = request.POST.get("name")
elif action == "set_case_type":
update_spec["stage_id"] = ""
update_spec["type_id"] = request.POST.get("type_id")
elif action == "toggle_archived":
if case.get("archived_at"):
update_spec["archived_at"] = ""
else:
update_spec["archived_at"] = timezone.now()
update_spec["archive_reason_id"] = request.POST.get("archive_reason_id")
elif action == "reset_initiation_decision":
update_spec["reset_initiation_decision"] = True
if update_spec:
response = self._client.update_case(case_id, update_spec)
return redirect(f"/case/{case_id}/admin/")
class CaseMilestoneDatesView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
permission_required = ("case_admin",)
template_name = "cases/milestone_dates.html"
def add_page_data(self):
case_enums = self._client.get_all_case_enums(self.case_id)
case_milestones = self._client.case_milestones(self.case["id"])
existing_keys = [cm["key"] for cm in case_milestones]
context = {
"milestone_types": case_enums.get("milestone_types"),
"available_review_types": case_enums.get("available_review_types"),
"milestones": case_milestones,
"existing_milestones": existing_keys,
}
return context
def post(self, request, case_id, milestone_key=None):
milestone_key = milestone_key or request.POST.get("milestone_key")
date = request.POST.get("date")
response = self._client.set_case_milestone(case_id, milestone_key, date)
return redirect(f"/case/{case_id}/milestones/")
class CaseView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
permission_required = []
template_name = "cases/case.html"
extra_case_fields = json.dumps(
{
"Case": {
"applicant": {
"organisation": {
"id": 0,
"name": 0,
"primary_contact": {
"name": 0,
"email": 0,
"phone": 0,
"address": 0,
"post_code": 0,
"country": {"name": 0},
"has_user": 0,
"user": {"id": 0, "organisation": {"id": 0, "name": 0}},
},
}
},
"parent": {"id": 0, "name": 0, "reference": 0, "type": 0},
"workflow_state": {"LINKED_CASE_CONFIRM": 0},
"initiated_sequence": 0,
}
}
)
def add_page_data(self):
team = self._client.get_case_team_members(self.case_id)
team_by_group = index_users_by_group([member.get("user") for member in team])
group_order = [
SECURITY_GROUP_TRA_ADMINISTRATOR,
SECURITY_GROUP_TRA_HEAD_OF_INVESTIGATION,
SECURITY_GROUP_TRA_LEAD_INVESTIGATOR,
]
case_extras = self._client.get_case(self.case_id, fields=self.extra_case_fields)
return {
"audit": self._client.get_audit(
case_id=self.case_id, start=0, limit=20, milestone=True
),
"case_page": True,
"case": case_extras,
"team_groups": team_by_group,
"group_order": group_order,
"public_base_url": settings.PUBLIC_BASE_URL,
}
def post(self, request, case_id, *args, **kwargs):
self._client.set_case_data(case_id, {"name": request.POST.get("name")})
redirect = request.POST.get("redirect")
if redirect:
return internal_redirect(request.POST.get("redirect"), "/")
else:
return HttpResponse(json.dumps({"result": "ok"}), content_type="application/json")
class PartiesView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/parties.html"
def add_page_data(self):
parties = []
roles = self._client.get_case_roles()
all_case_invites = self._client.get_contact_case_invitations(self.case_id)
all_participants = self._client.get_case_participants(self.case_id, fields=org_fields)
case_invites = deep_index_items_by(all_case_invites, "contact/id")
invited = set([])
accepted = set([])
for invite in all_case_invites:
org_id = invite.get("organisation", {}).get("id")
if invite.get("accepted_at"):
# note: accepted and invited are mutually exclusive
accepted.add(org_id)
else:
invited.add(org_id)
for role in roles:
_base = all_participants[role["key"]]
_base["key"] = role["key"]
_base["name"] = role["plural"]
if role["allow_cw_create"]:
_base["add_link"] = f"Add {role['name']}"
parties.append(_base)
return {
"party_types": parties,
"invites": case_invites,
"accepted_orgs": list(accepted),
"invited_orgs": list(invited),
"pre_release_invitations": self._client.get_system_boolean("PRE_RELEASE_INVITATIONS"),
"alert": self.request.GET.get("alert"),
}
class CaseTeamView(CaseBaseView):
permission_required = "can_assign_team"
template_name = "cases/team.html"
def add_page_data(self):
all_users = self._client.get_all_users(group_name="caseworker")
users_by_group = index_users_by_group(all_users)
team = self._client.get_case_team_members(self.case_id)
return {
"team": [member.get("user", {}).get("id") for member in team],
"tra_users": all_users,
"grouped_users": users_by_group,
"groups": SECURITY_GROUPS_TRA[1:],
"inactive_user_count": sum(user["active"] is False for user in all_users),
"singleton_groups": [
SECURITY_GROUP_TRA_HEAD_OF_INVESTIGATION,
SECURITY_GROUP_TRA_ADMINISTRATOR,
],
}
def post(self, request, case_id, *args, **kwargs):
user_ids = request.POST.getlist("user_id")
response = self._client.assign_case_team(case_id, user_ids)
return redirect(f"/case/{case_id}/")
class SubmissionsView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/submissions.html"
show_global = False
sub_page = ""
def get_tab(self, role, party):
if not role.get("allow_cw_create"):
return role["key"]
return "sampled" if party.get("sampled") else "not_sampled"
def consolidate_submissions(
self, case, participants, submissions_by_party, counts, selected_tab
):
roles = []
single_role_return = None # for awaiting and rejected - only return that specific role
for role in self._client.get_case_roles():
role["participants"] = []
for party in participants.get(role["key"], {}).get("parties", []):
tab = self.get_tab(role, party)
submissions = submissions_by_party.get(party["id"], [])
submissions += submissions_by_party.get("", [])
if submissions:
counts[tab] = counts.get(tab, 0) + len(submissions)
if tab == selected_tab:
party["submissions"] = submissions
role["participants"].append(party)
if not party.get("gov_body"):
role["customer_parties"] = True
sort_key = (
"submissions/0/received_at"
if selected_tab == CASE_ROLE_AWAITING_APPROVAL
else "name"
)
role["participants"].sort(key=lambda pt: get(pt, sort_key) or "")
if role.get("key") == selected_tab:
single_role_return = role
if role.get("allow_cw_create"):
roles.append(role)
return [single_role_return] if single_role_return else roles
def get_name(self, participant):
return participant.get("name")
def flatten_participants(self, source):
participants = []
for role in source:
rec = source[role]
participants = participants + rec["parties"]
participants.sort(key=self.get_name)
return participants
def divide_submissions(self, submissions):
incoming = []
outgoing = []
draft = []
for submission in submissions:
if get(submission, "status/sent"):
outgoing.append(submission)
elif get(submission, "status/default") and get(submission, "type/direction") != 1:
draft.append(submission)
else:
if (
not get(submission, "status/draft")
or get(submission, "type/key") == "application"
): # customer draft should not be seen by investigators
incoming.append(submission)
return {
"incoming": sorted(incoming, key=lambda su: su.get("received_at") or "", reverse=True),
"outgoing": sorted(outgoing, key=lambda su: su.get("sent_at") or "", reverse=True),
"draft": sorted(draft, key=lambda su: su.get("created_at") or "", reverse=True),
}
def add_page_data(self):
tab = self.request.GET.get("tab", "sampled").lower()
all_submissions = self._client.get_submissions(self.case_id, show_global=True)
submissions_by_type = deep_index_items_by(all_submissions, "type/name")
# Get submissions that have just been created by customer
# or are still in draft after creation
draft_submissions = deep_index_items_by(all_submissions, "status/default").get("true") or []
# Remove any that are back with the customer following deficiency
draft_first_version_submissions = (
deep_index_items_by(draft_submissions, "version").get("1") or []
)
# Exclude these drafts from our list
non_draft_submissions = [
sub for sub in all_submissions if sub not in draft_first_version_submissions
]
# draft applications are included to allow a heads up view
# to the caseworker before it's submitted
if submissions_by_type.get("application", [{}])[0].get("status", {}).get("default") is True:
submissions_by_type["application"][0]["tra_editable"] = True
non_draft_submissions += submissions_by_type["application"]
submissions_by_party = deep_index_items_by(non_draft_submissions, "organisation/id")
case_enums = self._client.get_all_case_enums()
invites = self._client.get_case_invite_submissions(self.case_id)
participants = self._client.get_case_participants(self.case_id, fields=org_fields)
flat_participants = self.flatten_participants(participants)
counts = {}
if self.sub_page:
self.template_name = f"cases/submissions_{self.sub_page}.html"
tab = self.request.GET.get("tab", "incoming").lower()
elif self._client.get_system_boolean("PRE_NEW_SUBMISSION_PAGE"):
self.template_name = "cases/submissions_new.html"
context = {
"raw_participants": participants,
"submissions": submissions_by_type,
"participants": flat_participants,
"counts": counts,
"all_roles": self.consolidate_submissions(
self.case,
participants=participants,
submissions_by_party=submissions_by_party,
counts=counts,
selected_tab=tab,
),
"submission_types": case_enums["case_worker_allowed_submission_types"],
"invites": invites,
"tab": tab,
"submission_groups": self.divide_submissions(all_submissions),
"all_submissions": all_submissions,
}
# TODO: Temp handling of application vs ex_officio ones
if not submissions_by_type.get("application") and submissions_by_type.get(
"ex officio application"
):
context["submissions"]["application"] = submissions_by_type["ex officio application"]
return context
class SubmissionView(CaseBaseView):
"""
View and modify submissions
"""
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/submission.html"
extra_case_fields = json.dumps(
{
"Case": {
"applicant": 0,
"product": 0,
"sources": 0,
}
}
)
def add_page_data_old(self):
alert = self.request.GET.get("alert") # indicates the submission has just been created
documents = []
submission = {}
submission_id = self.kwargs.get("submission_id")
third_party_invite = False
if submission_id:
submission = self._client.get_submission(self.case_id, submission_id)
submission_type = submission["type"]
third_party_invite = submission_type["name"] == "Invite 3rd party"
self.organisation_id = submission["organisation"]["id"]
created_by_id = get(submission, "created_by/id")
if created_by_id:
full_user = self._client.get_user(created_by_id)
submission["created_by"]["organisation"] = get(full_user, "organisations/0")
submission_context = {}
if SUBMISSION_TYPE_HELPERS.get(submission_type["key"]):
submission_context = SUBMISSION_TYPE_HELPERS[submission_type["key"]](
submission, self.request.user
).get_context()
self.template_name = "cases/submission.html"
case_extras = self._client.get_case(self.case_id, fields=self.extra_case_fields)
context = {
"submission": submission,
"template_name": f"{submission_type['key']}",
"documents": self.get_documents(submission=submission, all_versions=True),
"alert": alert,
"case": case_extras,
"third_party_invite": third_party_invite,
**submission_context,
}
if (
not submission
or not submission.get("status")
or submission.get("status", {}).get("default")
):
context["mode"] = "form"
else:
context["mode"] = "view"
if self.organisation_id:
self.organisation = self._client.get_organisation(self.organisation_id)
context["organisation"] = self.organisation
context["organisation_id"] = str(self.organisation["id"])
return context
def get_all_participants(self, case_participants):
all_parties = []
roles = {}
for type_name, role_parties in case_participants.items():
parties = role_parties.get("parties")
if parties:
all_parties.extend(parties)
role = parties[0].get("role")
roles[role.get("key")] = role
return deep_index_items_by(all_parties, "sampled"), roles
def add_page_data(self):
case_enums = self._client.get_all_case_enums()
submission = {}
participants = self._client.get_case_participants(self.case_id, fields=org_fields)
parties, roles = self.get_all_participants(participants)
alert = self.request.GET.get("alert") # indicates the submission has just been created
virus = self.request.GET.get("virus")
upload_error = self.request.GET.get("upload_error")
return_data = {
"virus": virus,
"upload_error": upload_error,
}
submission_id = self.kwargs.get("submission_id")
if submission_id:
submission = self._client.get_submission(self.case_id, submission_id)
json_data = from_json(submission.get("deficiency_notice_params"))
_default = submission.get("status", {}).get("default")
if not _default or (
_default and submission["type"]["id"] == SUBMISSION_TYPE_APPLICATION
):
page_data = self.add_page_data_old()
return_data.update(page_data)
return return_data
self.organisation_id = submission["organisation"]["id"]
return_data.update(
{
"roles": roles,
"submission": submission,
"status": (submission.get("status") or {}).get("id"),
"alert": alert,
"documents": self.get_documents(submission=submission),
"role": submission.get("organisation_case_role") or {"name": "Public file"},
"participants": participants,
"all_participants": parties,
"json_data": json_data,
"selected_submission_type": submission.get("type", {}).get("key")
or "questionnaire",
}
)
else:
role = self.request.GET.get("for")
sampled = self.request.GET.get("sampled") == "sampled"
full_role = (
self._client.get_case_role(role)
if (role and role != "public")
else {"name": "Public file"}
)
case_enums = self._client.get_all_case_enums(direction=DIRECTION_TRA_TO_PUBLIC)
# Get all draft submissions of this type
all_submissions = self._client.get_submissions(self.case_id, show_global=True)
draft_submissions = (
deep_index_items_by(all_submissions, "status/default").get("true") or []
)
# draft_submissions_this_role = deep_index_items_by(draft_submissions,
# 'organisation_case_role/key').get('' if role == 'public' else role)
draft_submissions_this_role = deep_index_items_by(
draft_submissions, "organisation_id"
).get("")
return_data.update(
{
"submission": submission,
"submission_type_id": self.kwargs.get("submission_type_id")
or self.request.GET.get("submission_type_id"),
"submission_statuses": case_enums["submission_statuses"],
"statuses_by_type": case_enums["statuses_by_type"],
"selected_submission_type": self.request.GET.get("submission_type")
or "questionnaire",
"organisation_id": self.kwargs.get("organisation_id"),
"draft_submissions": draft_submissions_this_role,
"role": full_role,
}
)
if role == "public":
return_data.update(
{
"submission_types": case_enums["public_submission_types"],
"public": True,
"organisation_id": self.kwargs.get("organisation_id"),
}
)
else:
role_participants = participants.get(role, {}).get("parties", [])
filtered = list(
filter(
lambda party: party
if party.get("sampled") == sampled and not party.get("gov_body")
else None,
role_participants,
)
)
return_data.update(
{
"submission_types": case_enums["case_worker_allowed_submission_types"],
"participants": participants,
"roles": roles,
}
)
self.organisation_id = self.organisation_id or self.request.GET.get("organisation_id")
if self.organisation_id:
self.organisation = self._client.get_organisation(self.organisation_id)
return_data["organisation"] = self.organisation
return_data["organisation_id"] = str(self.organisation["id"])
# add errors from the url
errors = self.request.GET.get("errors")
if errors:
try:
return_data["errors"] = json.loads(errors)
except Exception as ex:
pass
# Set up template to use
template_name = (
submission["type"]["key"]
if submission
else (role if role == "public" else "questionnaire")
)
return_data.update({"template_name": template_name, "mode": "form"})
return return_data
def post( # noqa: C901
self,
request,
case_id,
submission_id=None,
organisation_id=None,
*args,
**kwargs,
):
"""
Update an existing submission
"""
return_data = {"submission_id": str(submission_id)}
contact_id = request.POST.get("contact_id")
btn_value = request.POST.get("btn-value")
review = request.POST.get("review")
name = request.POST.get("name")
due_at = request.POST.get("due_at")
response_window_yn = request.POST.get("response_window_yn")
time_window = request.POST.get("time_window")
meta_raw = request.POST.getlist("meta")
meta = [json.loads(block) for block in meta_raw]
file_details = deep_index_items_by(meta, "name")
file_details_by_id = deep_index_items_by(meta, "file/id")
organisation_id = organisation_id or request.POST.get("organisation_id")
send_to = request.POST.get("send_to")
submission = self._client.get_submission(case_id, submission_id)
organisation_id = submission.get("organisation", {}).get("id")
status_id = request.POST.get("submission_status_id")
if submission_id and btn_value == "discard":
delete_submission_response = self._client.delete_submission(
case_id=case_id, submission_id=submission_id
)
return HttpResponse(
json.dumps({"redirect_url": f"/case/{case_id}/submissions/"}),
content_type="application/json",
)
# check if the update is for name or notify contact
if (
submission["name"] != name
or not submission["contact"]
or submission.get("contact", {}).get("id") != contact_id
):
if name is not None and not name:
return_data.update({"errors": '{"name":"You must enter a name"}'})
if due_at and not is_date(due_at):
return_data.update({"errors": '{"due_date":"Invalid date"}'})
if not return_data.get("errors"):
self._client.update_submission(
case_id=case_id,
submission_id=submission_id,
name=name,
contact_id=contact_id, # TODO:not used
due_at=due_at,
time_window=time_window,
description=request.POST.get("description"),
url=request.POST.get("url"),
)
# API `update_submission` returns an incomplete submission
# (no documents) so we re-fetch the submission here.
submission = self._client.get_submission(case_id, submission_id)
return_data.update({"submission": submission})
if submission.get("id"):
for _file in request.FILES.getlist("files"):
try:
_file.readline() # Important, will raise VirusFoundInFileException if infected
original_file_name = _file.original_name
details = file_details.get(original_file_name.lower())[0]
confidential = details.get("confidential")
document_type = details.get("submission_document_type")
document = self._client.upload_document(
case_id=str(case_id),
submission_id=submission_id,
organisation_id=str(organisation_id),
data={
"name": "Questionnaire",
"confidential": confidential,
"submission_document_type": document_type,
"document_name": original_file_name,
"file_name": _file.name,
"file_size": _file.file_size,
},
)
except (VirusFoundInFileException, APIException) as e:
redirect_url = f"/case/{case_id}/submission/{submission_id}/?"
if isinstance(e, VirusFoundInFileException):
redirect_url += "virus=true"
else:
redirect_url += f"upload_error={e}"
logger.warning(f"File upload aborted: {e}")
return HttpResponse(
json.dumps({"redirect_url": redirect_url}),
content_type="application/json",
)
if case_files := request.POST.getlist("case_files"):
for case_file_id in case_files:
details = (file_details_by_id.get(case_file_id) or [])[0]
document = self._client.attach_document(
case_id=str(case_id),
submission_id=submission_id,
organisation_id=str(organisation_id),
data={"submission_document_type": details.get("submission_document_type")},
document_id=case_file_id,
)
submission_group_name = get(submission, "type/key")
if btn_value in ["send", "publish", "withdraw"]:
if btn_value in ["publish", "withdraw"]:
result = self._client.set_submission_state(
case_id,
submission_id,
"sent"
if (btn_value == "send" or submission_group_name == "public")
else "",
{"publish": "issue", "withdraw": "un-issue"}[btn_value],
)
result = self._client.update_submission(
case_id=case_id, submission_id=submission_id
)
return_data.update(
{
"redirect_url": f"/case/{case_id}/submission/{submission['id']}/?alert={btn_value}" # noqa: E301, E501
}
)
if btn_value == "sufficient":
# Set the submission to sufficient
result = self._client.set_submission_state(case_id, submission_id, btn_value)
return_data.update({"alert": "Submission approved"})
submission_type = submission["type"]
type_helpers = SUBMISSION_TYPE_HELPERS.get(submission_type["key"])
if type_helpers:
return_data.update(
type_helpers(submission, self.request.user).on_approve() or {}
)
# Update submission document approvals
self.update_submission_status(request.POST, submission)
# set any deficiency-notice parameters
updated = False
deficiency_notice_params = from_json(submission.get("deficiency_notice_params"))
send_to = request.POST.getlist("send_to")
if send_to:
deficiency_notice_params["send_to"] = send_to
updated = True
regex = r"^deficiency_notice_params_"
for param_key in request.POST:
matches = re.split(regex, param_key)
if len(matches) > 1:
value = request.POST[param_key]
updated = updated or (deficiency_notice_params.get(matches[1]) != value)
if value == "__remove":
if get(deficiency_notice_params, matches[1]):
deficiency_notice_params.pop(matches[1])
else:
deficiency_notice_params[matches[1]] = value
if updated:
update_submission_response = self._client.update_submission(
case_id=case_id,
submission_id=submission_id,
deficiency_notice_params=to_json(deficiency_notice_params),
)
if btn_value == "save-exit":
return_data.update({"redirect_url": f"/case/{case_id}/submissions"})
if deficiency_notice_params:
return_data.update(
{"redirect_url": f"/case/{case_id}/submission/{submission_id}"}
)
return HttpResponse(json.dumps(return_data), content_type="application/json")
def update_submission_status(self, request_params, submission):
"""Update submission document statuses.
For each document in the submission review, examine response to
establish if it was marked sufficient/deficient. Call API to update
submission document status if it has changed.
:param (dict) request_params: request parameters
:param (dict) submission: submission
"""
submission_docs = {doc["id"]: doc for doc in submission.get("documents")}
for doc_id in request_params:
if doc_id in submission_docs:
current_status = submission_docs[doc_id]["sufficient"]
new_status = request_params[doc_id] == "yes"
if current_status != new_status:
self._client.set_submission_document_state(
case_id=submission["case"]["id"],
submission_id=submission.get("id"),
document_id=doc_id,
status="sufficient" if new_status else "deficient",
block_from_public_file=submission_docs.get("block_from_public_file"),
block_reason=submission_docs.get("block_reason"),
)
class SubmissionCreateView(SubmissionView):
groups_required = SECURITY_GROUPS_TRA
def post(self, request, case_id, *args, **kwargs):
btn_value = request.POST.get("btn-value")
send_to = request.POST.getlist("send_to")
organisation_id = request.POST.get("organisation_id")
submission_data = {
"submission_type": int(
request.POST.get("submission_type_id", SUBMISSION_TYPE_QUESTIONNAIRE)
),
"case_id": str(case_id),
"organisation_id": str(organisation_id) if organisation_id else None,
"contact_id": request.POST.getlist("contact_id"),
"public": request.POST.get("public"),
}
if send_to:
submission_data["deficiency_notice_params"] = to_json(
{"send_to": send_to, "case_role": request.POST.get("role_key")}
)
result = self._client.create_submission(**submission_data)
submission = result.get("submission", {}) if result else {}
return HttpResponse(
json.dumps(
{
"submission_id": submission.get("id"),
"redirect_url": f"/case/{case_id}/submission/{submission['id']}/",
}
),
content_type="application/json",
)
class SubmissionDocumentView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
def post(self, request, case_id, submission_id, organisation_id=None, *args, **kwargs):
response = {}
document_list_json = request.POST.get("document_list")
if document_list_json:
document_list = json.loads(document_list_json)
for doc_id, doc_status in document_list.items():
logger.debug(f"update document state {doc_id}")
response = self._client.set_submission_document_state(
case_id=case_id,
submission_id=submission_id,
document_id=doc_id,
status=doc_status["status"],
block_from_public_file=doc_status["block_from_public_file"],
block_reason=doc_status["block_reason"],
)
return HttpResponse(json.dumps(response), content_type="application/json")
def delete(self, request, case_id, submission_id, document_id, *args, **kwargs):
response = self._client.detach_document(
case_id=case_id, submission_id=submission_id, document_id=document_id
)
return HttpResponse(json.dumps(response), content_type="application/json")
class SubmissionStatusView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
def post(self, request, case_id, submission_id, *args, **kwargs):
stage_change_if_sufficient = request.POST.get("stage_change_if_sufficient")
stage_change_if_deficient = request.POST.get("stage_change_if_deficient")
submission = self._client.get_submission(case_id, submission_id)
status_id = request.POST.get("submission_status_id")
if submission.get("status", {}).get("id") != status_id:
status_response = self._client.set_submission_status(
case_id=case_id,
submission_id=submission_id,
status_id=status_id,
stage_change_if_sufficient=stage_change_if_sufficient,
stage_change_if_deficient=stage_change_if_deficient,
deficiency_documents=request.FILES.getlist("deficiency_document"),
issue=request.POST.get("issue"),
)
if status_response.get("submission"):
submission_id = status_response["submission"]["id"]
return redirect(f"/case/{case_id}/submission/{submission_id}/")
class SubmissionApprovalView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/submission.html"
def add_page_data(self):
submission_id = self.kwargs.get("submission_id")
submission = self._client.get_submission(self.case_id, submission_id)
sub_documents = self._client.get_submission_documents(self.case_id, submission_id)
documents = sub_documents.get("documents", [])
submission.update(sub_documents)
case_enums = self._client.get_all_case_enums()
submission_type_id = submission["type"]["id"]
status_map = case_enums["submission_status_map"]
status_options = status_map.get(str(submission_type_id), {}).get("keys", [])
status_context = status_map.get(str(submission_type_id))
submission_documents = self.get_documents(submission=submission)
context = {
"template_name": submission["type"]["key"],
"mode": "approval",
"submission": submission,
"case_enums": case_enums,
"status_context": status_context,
"documents": submission_documents,
}
return context
class SubmissionDeficiencyView(CaseBaseView):
"""
Set the submission into a deficiency status and notify the party about it.
"""
groups_required = SECURITY_GROUPS_TRA
raise_exception = True
def get(self, request, case_id, submission_id, *args, **kwargs):
submission = self._client.get_submission(case_id, submission_id)
submission_type = submission["type"]
contact = submission_contact(submission)
contact_name = contact.get("name")
organisation_name = submission.get("organisation", {}).get("name") or (
contact.get("organisation") or {}
).get("name")
notification_template = self._client.get_notification_template(
"NOTIFY_SUBMISSION_DEFICIENCY"
)
template_name = f"cases/submissions/{submission_type['key']}/notify.html"
due_at = get_submission_deadline(submission, settings.FRIENDLY_DATE_FORMAT)
case_number = submission["case"]["reference"]
email = notify_contact_email(self._client, case_number)
footer = notify_footer(self._client, email)
values = {
"full_name": contact_name,
"case_name": submission["case"]["name"],
"case_number": case_number,
"company_name": organisation_name,
"deadline": due_at or "No deadline assigned",
"submission_type": submission.get("type", {}).get("name"),
"login_url": public_login_url(),
"footer": footer,
}
context = {
"form_action": f"/case/{case_id}/submission/{submission_id}/status/notify/",
"form_title": f"Deficiency Notice for {organisation_name}",
"cancel_redirect_url": f"/case/{case_id}/submission/{submission_id}/",
"editable_fields": { # leaving one as a future example
# 'full_name': {'title': 'Name'},
},
"notification_template": notification_template,
"submission": submission,
"case_id": str(case_id),
"contact": contact,
"values": values,
"parsed_template": parse_notify_template(notification_template["body"], values),
}
return render(request, template_name, context)
def post(self, request, case_id, submission_id, *args, **kwargs):
stage_change_if_sufficient = request.POST.get("stage_change_if_sufficient")
stage_change_if_deficient = request.POST.get("stage_change_if_deficient")
submission = self._client.get_submission(case_id, submission_id)
notify_keys = [
"full_name",
"case_name",
"case_number",
"company_name",
"deadline",
"submission_type",
"login_url",
]
notify_data = {key: request.POST.get(key) for key in notify_keys}
if request.POST.get("contact_id"):
notify_data["contact_id"] = request.POST["contact_id"]
case_enums = self._client.get_all_case_enums()
submission_type_id = submission["type"]["id"]
status_map = case_enums["submission_status_map"]
status_context = status_map.get(str(submission_type_id))
status_id = status_context.get("NO")
error = None
if status_id:
if submission.get("status", {}).get("id") != status_id:
status_response = self._client.set_submission_status(
case_id=case_id,
submission_id=submission_id,
status_id=status_id,
stage_change_if_sufficient=stage_change_if_sufficient,
stage_change_if_deficient=stage_change_if_deficient,
)
self._client.submission_notify(
case_id=case_id,
organisation_id=submission["organisation"]["id"],
submission_id=submission["id"],
values=notify_data,
notice_type=SUBMISSION_NOTICE_TYPE_DEFICIENCY,
)
# reset the submission id to redirect to the new clone if available
if status_response.get("submission"):
submission_id = status_response["submission"]["id"]
return HttpResponse(
json.dumps(
{
"redirect_url": f"/case/{case_id}/submission/{submission_id}/",
}
),
content_type="application/json",
)
# If there's no deficiency state for this submission type, return an error
return HttpResponse(
json.dumps(
{
"error": "No deficiency status for this submission type",
}
),
content_type="application/json",
)
class SubmissionVerifyBaseView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
def get_submission_id(self, case_id=None, organisation_id=None):
submission_id = self.kwargs.get("submission_id")
if not submission_id:
# If this is called from the party page - there is no submission id
# so find from the org/case
submissions = self._client.get_submissions_public(
organisation_id=organisation_id,
case_id=case_id,
fields=json.dumps({"id": 0, "type": {"key": 0}}),
)
for submission in submissions:
if get(submission, "type/key") in ["interest", "application"]:
submission_id = submission.get("id")
break # we only want one reg-of-interest submission
return submission_id
def update_submission_json(self, case_id, submission, params):
regex = r"^deficiency_notice_params_"
deficiency_notice_params = submission.get("deficiency_notice_params") or {}
updated = False
response = None
for param_key in params:
matches = re.split(regex, param_key)
if len(matches) > 1:
value = params[param_key]
updated = updated or (deficiency_notice_params.get(matches[1]) != value)
deficiency_notice_params[matches[1]] = value
if updated:
response = self._client.update_submission(
case_id=case_id,
submission_id=get(submission, "id"),
deficiency_notice_params=to_json(deficiency_notice_params),
)
return response
class SubmissionVerifyViewTasks(SubmissionVerifyBaseView):
"""
Used to verify user and orgs admission to a case
"""
template_name = "cases/verify/submission_verify_tasks.html"
submission_fields = json.dumps(
{
"Submission": {
"id": 0,
"deficiency_notice_params": 0,
"organisation": {
"id": 0,
"name": 0,
},
"contact": {
"name": 0,
"email": 0,
"user": {
"name": 0,
"email": 0,
"id": 0,
"organisation": {
"organisation": {
"id": 0,
"name": 0,
}
},
},
"organisation": {
"id": 0,
"name": 0,
},
},
"case": 0,
"type": 0,
"created_by": 0,
"organisation_case_role_outer": 0,
}
}
)
def get(self, request, case_id, organisation_id, **kwargs):
submission_id = self.get_submission_id(case_id=case_id, organisation_id=organisation_id)
if not submission_id:
return HttpResponse(
json.dumps(
{
"error": "You cannot verify this organisation "
"as they have not yet registered interest in this case.",
}
),
content_type="application/json",
)
submission = self._client.get_submission(
self.case_id, submission_id, fields=self.submission_fields
)
json_data = submission.get("deficiency_notice_params") or {}
organisation = submission.get("organisation")
caserole = self._client.get_organisation_case_role(
case_id=case_id, organisation_id=get(submission, "organisation/id")
)
org_matches = self._client.get_organisation_matches(organisation_id, with_details="none")
return render(
request,
self.template_name,
{
"submission": submission,
"organisation": organisation,
"caserole": caserole,
"org_matches": org_matches,
"page_data": {
"submission": submission,
"organisation": organisation,
},
},
)
class SubmisisonVerifyEditLoaView(SubmissionVerifyBaseView):
def get(self, request, case_id, organisation_id):
submission_id = self.get_submission_id(case_id=case_id, organisation_id=organisation_id)
submission = self._client.get_submission(case_id, submission_id)
organisation = self._client.get_organisation(
case_id=case_id, organisation_id=organisation_id
)
documents = self.get_documents(submission)
caserole = self._client.get_organisation_case_role(
case_id=self.case_id, organisation_id=organisation_id
)
org_contacts = self._client.get_organisation_contacts(
organisation_id, case_id, exclude_indirect=True
)
return render(
request,
"cases/verify/loa.html",
{
"auth_contacts": org_contacts,
"organisation": organisation,
"documents": documents,
"LOA": caserole.get("auth_contact"),
"submission": submission,
},
)
def post(self, request, case_id, organisation_id, *args, **kwargs):
submission_id = self.get_submission_id(case_id=case_id, organisation_id=organisation_id)
submission = self._client.get_submission(case_id, submission_id)
self.update_submission_json(case_id, submission, request.POST)
result = self._client.set_organisation_case_role_loa(
case_id,
organisation_id,
pluck(
request.POST,
["LOA_contact_id", "name", "email", "address", "org_name", "phone"],
),
)
return HttpResponse(json.dumps(result))
class SubmisisonVerifyOrganisation(SubmissionVerifyBaseView):
enable_merge = False
def get(self, request, case_id, organisation_id):
test_org_id = request.GET.get("org_id") or organisation_id
submission_id = self.get_submission_id(case_id=case_id, organisation_id=organisation_id)
submission = self._client.get_submission(case_id, submission_id)
organisation = self._client.get_organisation(case_id=case_id, organisation_id=test_org_id)
if self.enable_merge:
org_matches = self._client.get_organisation_matches(test_org_id, with_details=True)
else:
org_matches = self._client.get_organisation_matches(test_org_id, with_details=False)
org_matches.sort(
key=lambda m: 1 if m.get("id") == test_org_id else 0
) # put the actual match at the end
matches = decorate_orgs(org_matches, test_org_id, exclude_case_id=case_id)
for match in matches:
if str(match.get("id")) == str(organisation.get("id")):
organisation.update(match)
return render(
request,
"cases/verify/merge_org.html" if self.enable_merge else "cases/verify/verify_org.html",
{
"case_id": self.case_id,
"organisation": organisation,
"match_list": matches,
"representing": test_org_id != organisation_id,
"json_data": submission.get("deficiency_notice_params"),
},
)
def post(self, request, case_id, organisation_id, *args, **kwargs):
test_org_id = request.POST.get("org_id") or organisation_id
submission_id = self.get_submission_id(case_id=case_id, organisation_id=organisation_id)
submission = self._client.get_submission(case_id, submission_id)
verify = request.POST.get("deficiency_notice_params_org_verify")
if verify == "verified":
self._client.verify_caserole(
case_id=case_id, organisation_id=get(submission, "organisation/id")
)
elif verify == "rejected":
result = self._client.reject_organisation(case_id, organisation_id)
result = self.update_submission_json(case_id, submission, request.POST)
return HttpResponse(json.dumps({"result": True}))
class SubmissionVerifyAccept(SubmissionVerifyBaseView):
def get(self, request, case_id, organisation_id):
submission_id = self.get_submission_id(case_id=case_id, organisation_id=organisation_id)
submission = self._client.get_submission(case_id, submission_id)
organisation = self._client.get_organisation(
case_id=case_id, organisation_id=organisation_id
)
caserole = self._client.get_organisation_case_role(
case_id=self.case_id, organisation_id=organisation_id
)
roles = self._client.get_case_roles(
exclude=[
CASE_ROLE_APPLICANT,
CASE_ROLE_AWAITING_APPROVAL,
CASE_ROLE_REJECTED,
CASE_ROLE_PREPARING,
]
)
return render(
request,
"cases/verify/accept.html",
{
"submission": submission,
"organisation": organisation,
"roles": roles,
"caserole": caserole,
"role_name": get(caserole, "role/name"),
},
)
def post(self, request, case_id, organisation_id, *args, **kwargs):
role_key = request.POST.get("role_key")
result = {}
result = self._client.set_organisation_case_role(
case_id, organisation_id, role_key, pluck(request.POST, ["approve"])
)
return HttpResponse(json.dumps(result))
class SubmissionVerifyNotify(SubmissionVerifyBaseView):
def get(self, request, case_id, organisation_id):
caserole = self._client.get_organisation_case_role(
case_id=self.case_id, organisation_id=organisation_id
)
role_name = get(caserole, "role/name")
action = (
"reject" if get(caserole, "role/key") == "rejected" else "accept"
) # Todo: get this from the right place
submission_id = self.get_submission_id(case_id=case_id, organisation_id=organisation_id)
submission = self._client.get_submission(case_id, submission_id)
case = self._client.get_case(case_id)
contact = submission_contact(submission)
organisation = self._client.get_organisation(
case_id=case_id, organisation_id=organisation_id
)
notify_key = (
"NOTIFY_INTERESTED_PARTY_REQUEST_PERMITTED"
if action == "accept"
else "NOTIFY_INTERESTED_PARTY_REQUEST_DENIED"
)
try:
notification_template = self._client.get_notification_template(notify_key)
values = self._client.create_notify_context(
{
"full_name": contact.get("name"),
"case_name": case.get("name"),
"case_number": case.get("reference"),
"company_name": organisation["name"],
"login_url": public_login_url(),
"role": role_name,
}
)
parsed_template = parse_notify_template(notification_template["body"], values)
except Exception as ex:
parsed_template = ""
# contacts for the notification contact selector
contacts = organisation.get("contacts", [])
user = self._client.get_user(get(submission, "created_by/id"))
contacts.append(user.get("contact"))
return render(
request,
"cases/verify/notify.html",
{
"parsed_template": parsed_template,
},
)
def post(self, request, case_id, organisation_id, *args, **kwargs):
submission_id = self.get_submission_id(case_id=case_id, organisation_id=organisation_id)
self._client.approve_submission(submission_id=submission_id)
return HttpResponse(json.dumps({"result": True}))
class SubmissionNotifyView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
raise_exception = True
def get(self, request, case_id, submission_id, *args, **kwargs):
case = self._client.get_case(case_id)
submission = self._client.get_submission(case_id, submission_id)
json_data = from_json(submission.get("deficiency_notice_params"))
contact = None
contact_name = None
send_to = json_data.get("send_to")
if not send_to:
contact = submission_contact(submission)
contact_name = contact and contact.get("name")
submission_type = submission["type"]
notify_sys_param_name = submission_type.get("notify_template") or "NOTIFY_QUESTIONNAIRE"
notification_template = self._client.get_notification_template(notify_sys_param_name)
template_name = f"cases/submissions/{submission_type['key']}/notify.html"
due_at = get_submission_deadline(submission, settings.FRIENDLY_DATE_FORMAT)
case_number = case["reference"]
email = notify_contact_email(self._client, case_number)
footer = notify_footer(self._client, email)
values = {
"full_name": contact_name,
"case_number": case_number,
"case_name": case["name"],
"investigation_type": case["type"]["name"],
"country": case["sources"][0]["country"] if case["sources"] else "N/A",
"company_name": submission["organisation"].get("name"),
"deadline": due_at or "No deadline assigned",
"login_url": public_login_url(),
"description": submission.get("description"),
"submission_request_name": submission.get("name"),
"notice_type": submission.get("type", {}).get("name"),
"notice_url": submission["url"],
"notice_of_initiation_url": submission["url"],
"footer": footer,
}
template_list = []
if send_to:
for case_role, participant_list in (
self._client.get_case_participants(case_id) or {}
).items():
for participant in participant_list.get("parties"):
if participant.get("id") in send_to:
contact = participant.get("primary_contact")
if contact:
local_values = {
"full_name": contact.get("name"),
"email": contact.get("email"),
"company_name": participant.get("name"),
}
values.update(local_values)
template_list.append(
{
"values": local_values,
"preview": parse_notify_template(
notification_template["body"], values
),
}
)
else:
template_list[contact.get("email")] = parse_notify_template(
notification_template["body"], values
)
context = {
"form_action": f"/case/{case_id}/submission/{submission_id}/notify/",
"form_title": f"Invite {contact_name}",
"cancel_redirect_url": f"/case/{case_id}/submission/{submission_id}/",
"editable_fields": { # leaving one as an example
# 'full_name': {'title': 'Full Name', 'disabled': True},
},
"notification_template": notification_template,
"templates": template_list,
"submission": submission,
"case_id": str(case_id),
"contact": contact,
"values": values,
}
return render(request, template_name, context)
def post(self, request, case_id, submission_id, *args, **kwargs):
submission = self._client.get_submission(case_id, submission_id)
notify_keys = ["full_name", "product", "submission_request_name", "description"]
notify_data = {key: request.POST.get(key) for key in notify_keys if key in request.POST}
due_at = get_submission_deadline(submission, settings.FRIENDLY_DATE_FORMAT)
notify_data["deadline"] = due_at or "No deadline assigned"
if request.POST.get("multiple"):
return self.post_multiple(request, case_id, submission, context=notify_data)
self._client.submission_notify(
case_id=case_id,
organisation_id=submission["organisation"]["id"],
submission_id=submission["id"],
values=notify_data,
notice_type=SUBMISSION_NOTICE_TYPE_INVITE,
)
return HttpResponse(
json.dumps(
{
"redirect_url": f"/case/{case_id}/submission/{submission_id}/",
"error": None,
}
),
content_type="application/json",
)
def post_multiple(self, request, case_id, submission, context=None):
"""
Called to handle a notify post to multiple recipents.
We must clone the submission for each target and send the notification
"""
case = self._client.get_case(case_id)
json_data = from_json(submission.get("deficiency_notice_params"))
send_to = json_data.get("send_to")
# We need to know which is the last party in the list
# so we can modify the existing sub rather than clone it.
party_counter = len(send_to)
for case_role, participant_list in (
self._client.get_case_participants(case_id) or {}
).items():
for participant in participant_list.get("parties"):
if participant.get("id") in send_to:
contact = participant.get("primary_contact")
party_counter -= 1
if contact: # don't try to send if there is no contact
data = {
"case_id": case_id,
"submission_id": submission["id"],
"organisation_id": participant.get("id"),
"contact_id": contact.get("id"),
}
if party_counter:
cloned_submission = self._client.clone_submission(**data)
else:
cloned_submission = self._client.update_submission(**data).get(
"submission"
)
context["full_name"] = contact.get("full_name")
self._client.submission_notify(
case_id=case_id,
organisation_id=participant.get("id"),
submission_id=cloned_submission["id"],
values=context or {},
notice_type=SUBMISSION_NOTICE_TYPE_INVITE,
)
return HttpResponse(
json.dumps(
{
"alert": f'Sent {len(send_to)} request{"" if len(send_to) < 2 else "s"}',
"redirect_url": f'/case/{case_id}/submission/{submission.get("id")}/'
if len(send_to) < 2
else f"/case/{case_id}/submissions/",
"error": None,
}
),
content_type="application/json",
)
class OrganisationDetailsView(LoginRequiredMixin, View, TradeRemediesAPIClientMixin):
def get(self, request, case_id, organisation_id, *args, **kwargs):
client = self.client(request.user)
item = request.GET.get("item")
template = request.GET.get("template")
result = {}
case_submissions = client.get_submissions(case_id)
idx_submissions = deep_index_items_by(case_submissions, "organisation/id")
org_id = str(organisation_id)
third_party_contacts = []
if item == "contacts":
contacts = client.get_organisation_contacts(org_id, case_id)
for contact in contacts:
case = get(contact, "cases/" + str(case_id)) or {}
contact["primary"] = case.get("primary")
all_case_invites = client.get_contact_case_invitations(case_id)
if org_id in idx_submissions:
org_submission_idx = deep_index_items_by(idx_submissions[org_id], "id")
third_party_contacts = self.get_third_party_contacts(
org_id, org_submission_idx, all_case_invites
)
# `contacts` may also contain on-boarded third-party contacts that
# have a user, so we need to prune these out.
third_party_contact_ids = set([i["id"] for i in third_party_contacts])
contacts = [
i
for i in itertools.filterfalse(
lambda x: x["id"] in third_party_contact_ids, contacts
)
]
result = {
"contacts": contacts,
"pre_release_invitations": client.get_system_boolean("PRE_RELEASE_INVITATIONS"),
"invites": deep_index_items_by(all_case_invites, "contact/id"),
"third_party_contacts": third_party_contacts,
"case_role_id": request.GET.get("caserole"),
}
elif item == "submissions":
result["submissions"] = idx_submissions.get(org_id, [])
elif item == "details":
result["party"] = client.get_organisation(organisation_id=organisation_id)
if template:
deep_update(
result,
{
"case_id": case_id,
"case": {"id": case_id},
"organisation": {"id": org_id},
},
)
return render(request, template, result)
return HttpResponse(json.dumps({"result": result}), content_type="application/json")
@staticmethod
def get_third_party_contacts(organisation_id, submissions, invites):
"""Get third party contacts.
Given an organisation, its submissions and all invitations for a case,
build a list of third party invite contacts. We include the invite submissions
yet to be approved but flag the contact with `submission_sufficient`
:param (str) organisation_id: Organisation ID.
:param (dict) submissions: The organisation's submissions keyed on id.
:param (list) invites: All invites for a case.
:returns (list): Contacts arising from 3rd party invite submissions.
"""
third_party_contacts = []
for invite in invites:
if invite["submission"]:
submission_id = invite["submission"]["id"]
full_submission = submissions.get(submission_id)
if not full_submission:
# Submission not at this org
continue
if full_submission[0]["type"]["id"] != SUBMISSION_TYPE_THIRD_PARTY:
# Not a third party submission
continue
inviting_organisation = full_submission[0]["organisation"]["id"]
if inviting_organisation == organisation_id:
submission_sufficient = full_submission[0]["status"]["sufficient"]
invite["contact"]["is_third_party"] = True
invite["contact"]["submission_id"] = submission_id
invite["contact"]["submission_sufficient"] = submission_sufficient
invite["contact"]["invited"] = invite["email_sent"]
third_party_contacts.append(invite["contact"])
return third_party_contacts
class CaseOrganisationView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "organisations/organisation_in_case.html"
def add_page_data(self):
organisation = self._client.get_organisation(organisation_id=self.organisation_id)
caserole = None
case_submissions = self._client.get_submissions_public(self.case_id, self.organisation_id)
idx_submissions = deep_index_items_by(case_submissions, "organisation/id")
submissions = idx_submissions.get(str(self.organisation_id), [])
roi_app_submission = next(
filter(lambda x: get(x, "type/key") in ["interest", "application"], submissions),
None,
)
cases = self._client.organisation_cases(self.organisation_id)
user_cases = self._client.organisation_user_cases(self.organisation_id)
cases_idx = deep_index_items_by_exists(cases, "archived_at")
for case in cases:
if get(case, "id") == str(self.case_id):
caserole = case
invites = self._client.get_contact_case_invitations(
self.case_id,
)
return {
"case": self.case,
"invites": invites,
"party": organisation,
"organisation": organisation,
"cases_idx": cases_idx,
"submissions": submissions,
"user_cases": user_cases,
"roi_app_submission": roi_app_submission,
"caserole": caserole,
}
class OrganisationMatchView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/organisation_dedupe.html"
def add_page_data(self):
organisation = self._client.get_organisation(
organisation_id=self.organisation_id, case_id=self.case_id
)
org_matches = self._client.get_organisation_matches(self.organisation_id)
org_matches = decorate_orgs(org_matches, self.organisation_id)
return {
"case": self.case,
"organisation": organisation,
"org_matches": org_matches,
}
class FilesView(CaseBaseView):
"""
View all case documents
"""
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/files.html"
def add_page_data(self):
tab = self.request.GET.get("tab", "respondent")
sort = self.request.GET.get("sort")
direction = self.request.GET.get("dir", "asc")
submission_id = self.request.GET.get("submission_id")
collapse_identical = self.request.GET.get("collapse_identical", "false") in (
"true",
"1",
"Y",
)
tabs = {
"tabList": [
{"label": "Respondent", "value": "respondent"},
{"label": "Investigator", "value": "investigator"},
],
"value": tab,
}
case_enums = self._client.get_all_case_enums(direction=DIRECTION_TRA_TO_PUBLIC)
case_files = self._client.get_case_documents(
case_id=self.case_id,
source=tab,
submission_id=submission_id,
order_by=sort,
order_dir=direction,
)
submission = None
if submission_id:
submission = self._client.get_submission(self.case_id, submission_id)
return {
"tabs": tabs,
"tab": tab,
"case_enums": case_enums,
"file_list": case_files,
"sort": sort,
"dir": direction,
"collapse_identical": collapse_identical,
"submission": submission,
"pre_document_search": self._client.get_system_boolean("PRE_DOCUMENT_SEARCH"),
}
def post(self, request, case_id, *args, **kwargs):
action = request.POST.get("action")
name = request.POST.get("name")
confirm = request.POST.get("confirm") == "true"
tab = request.POST.get("tab", "respondent")
document_ids = request.POST.getlist("document_id")
if document_ids:
if action == "issue" and confirm:
submission_type_id = request.POST.get("submission_type_id")
response = self._client.issue_documents_to_case(
case_id=case_id,
name=name,
document_ids=document_ids,
submission_type_id=submission_type_id,
)
elif action == "confidential":
response = self._client.toggle_documents_confidentiality(
case_id=case_id, document_ids=document_ids
)
return redirect(f"/case/{case_id}/files/?tab={tab}")
class FileBrowseView(View, TradeRemediesAPIClientMixin):
def get(self, request, case_id, *args, **kwargs):
_client = self.client(request.user)
case_files = _client.get_case_documents(case_id=case_id, source="investigator")
# Add application bundle documents
case_files.extend(_client.get_system_documents())
return HttpResponse(json.dumps(case_files), content_type="application/json")
class WorkflowEditor(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
permission_required = ("workflow_editor",)
template_name = "cases/workflow_editor.html"
def add_page_data(self):
case_workflow = self._client.get_case_workflow(self.case_id)
return {
"workflow": case_workflow.get("workflow"),
"state": case_workflow.get("state"),
}
def post(self, request, case_id, *args, **kwargs):
workflow = request.POST.get("workflow")
self._client.save_case_workflow(case_id, workflow)
return HttpResponse(json.dumps({"saved": 1}), content_type="application/json")
class ActionsView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/actions.html"
def add_page_data(self):
permissions = {}
for permission_key in self.request.user.permissions:
permissions[permission_key] = 1
case_workflow = self._client.get_case_workflow(self.case_id)
return {
"workflow": case_workflow.get("workflow"),
"state": case_workflow.get("state"),
"permissions": permissions,
}
class StateView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/action.html"
def post(self, request, case_id, state_key=None, *args, **kwargs):
value = request.POST.get(state_key)
state_map = self._client.set_case_workflow_state(case_id, [state_key], {state_key: value})
return HttpResponse(
json.dumps({"workflow_state": state_map}), content_type="application/json"
)
class ActionView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/action.html"
def get_state_from_children(self, item):
any_mode = item.get("required") # this is a bodge and the logic is reverse
state = None
completed = False if any_mode else True
for child in item.get("children", []):
value = self.get_value(child.get("key"))
if value:
state = state or "in-progress"
if any_mode:
if value == "complete":
completed = True
else:
if value != "complete":
completed = False
return "complete" if state and completed else state
state_map = {}
def get_value(self, key):
return (self.state_map.get(key) or [""])[0]
def set_value(self, key, value):
arr = self.state_map.get(key) or [""]
arr[0] = value
self.state_map[key] = arr
def post(self, request, case_id, action_id=None, *args, **kwargs): # noqa: C901
values = {}
node_keys = []
action_key = request.POST.get("action-key")
btn_action = request.POST.get("btn_action")
complete = True
error = False
state = ""
wf = self._client.get_case_workflow(case_id)
workflow = wf.get("workflow")
self.state_map = wf.get("state")
index = key_by(workflow["root"], "key", "children")
action = index.get(action_key.lower(), {})
for task in action.get("children", []):
response_type = task.get("response_type", {}).get("name", "")
if response_type.lower() not in (
"notesection",
"timer",
"label",
): # notes don't count as in-progress
task_key = task.get("key")
old_val = self.get_value(task_key)
new_val = request.POST.get(task_key)
if old_val != new_val:
values[task_key] = new_val
node_keys.append(task_key)
if not new_val:
if task.get("required"):
complete = False
else:
if new_val != "na":
state = "in-progress"
if complete:
state = "complete"
if (self.get_value(action_key) or "") != state:
values[action_key] = state
node_keys.append(action_key)
self.set_value(action_key, state)
# ripple the state down the tree
loc_action = action
while loc_action.get("parent_key"):
loc_action = index.get(loc_action.get("parent_key"))
loc_key = loc_action.get("key")
loc_state = self.get_state_from_children(loc_action)
if (self.get_value(loc_key) or "") != loc_state:
values[loc_key] = loc_state
node_keys.append(loc_key)
self.set_value(loc_key, loc_state)
if any(values):
self.state_map = self._client.set_case_workflow_state(case_id, node_keys, values)
if error:
action_id = action.get("id")
return redirect(f"/case/{case_id}/action/{action_id}")
else:
return HttpResponse(
json.dumps({"workflow_state": self.state_map}),
content_type="application/json",
)
class NavSectionView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/nav_section.html"
def post(self, request, case_id, *args, **kwargs):
content_id = kwargs.get("nav_section_id")
response = self._client.set_case_content(
case_id, content_id=content_id, content=request.POST
)
content_id = response.get("id")
return redirect(f"/case/{case_id}/section/{content_id}")
def add_page_data(self):
return {}
class AuditView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/audit.html"
def add_page_data(self):
milestone = self.request.GET.get("milestone", "true") == "true"
limit = int(self.request.GET.get("limit", self.limit))
audit_data = self._client.get_audit(
case_id=self.case_id, start=self.start, limit=limit, milestone=milestone
)
url = reverse("case_audit", kwargs={"case_id": self.case_id})
prev_url = next_url = None
prev_page = max(0, self.start - limit)
milestone_flag = f"milestone={milestone}".lower()
if len(audit_data) >= limit:
next_page = max(0, self.start + limit)
next_url = f"{url}?{milestone_flag}&start={next_page}"
if next_page > limit:
prev_url = f"{url}?{milestone_flag}&start={prev_page}"
self.start = next_page
else:
self.start = prev_page + len(audit_data)
if prev_page:
prev_url = f"{url}?{milestone_flag}&start={prev_page}"
return {
"milestone": milestone,
"events": audit_data,
"next_url": next_url,
"prev_url": prev_url,
}
class CaseAuditExport(LoginRequiredMixin, View, TradeRemediesAPIClientMixin):
groups_required = SECURITY_GROUPS_TRA
def get(self, request, case_id, *args, **kwargs):
file = self.client(request.user).get_audit_export(case_id)
response = HttpResponse(file, content_type="application/vnd.ms-excel")
response["Content-Disposition"] = "attachment; filename=trade_remedies_export.xlsx"
return response
class NoteView(LoginRequiredMixin, View, TradeRemediesAPIClientMixin):
groups_required = SECURITY_GROUPS_TRA
def get(
self,
request,
case_id,
content_type=None,
model_id=None,
model_key=None,
*args,
**kwargs,
):
notes = self.client(request.user).get_notes(
case_id, content_type, model_id, model_key=model_key
)
return HttpResponse(json.dumps(notes), content_type="application/json")
def post(self, request, case_id, note_id=None, *args, **kwargs): # noqa: C901
entity_id = request.POST.get("model_id")
model_key = request.POST.get("model_key")
content_type = request.POST.get("content_type")
client = self.client(request.user)
content = request.POST.get("content")
if note_id is None:
result = client.create_note(
case_id=case_id,
content_type=content_type,
model_id=entity_id,
model_key=model_key,
note_text=content,
)
note_id = result.get("id")
else:
delete_list = request.POST.getlist("delete_list")
if delete_list:
for document_id in delete_list:
deleted = client.delete_note_document(case_id, note_id, document_id)
conf_list = request.POST.getlist("set_confidential")
if conf_list:
for document_id in conf_list:
result = client.update_note_document(
case_id, note_id, document_id, "confidential"
)
nonconf_list = request.POST.getlist("set_non-confidential")
if nonconf_list:
for document_id in nonconf_list:
result = client.update_note_document(
case_id, note_id, document_id, "non-confidential"
)
result = client.update_note(case_id, note_id, content)
file_meta = request.POST.getlist("file-meta")
files = request.FILES.getlist("files")
for idx, _file in enumerate(files):
try:
_file.readline() # Important, will raise VirusFoundInFileException if infected
except VirusFoundInFileException:
# Display a fake doc in the widget until
# a poll for success clears it
msg = "File upload aborted: malware detected in file!"
document = {
"name": msg,
"safe": False,
}
result["documents"].append(document)
else:
document = {
"document_name": _file.original_name,
"name": _file.name,
"size": _file.file_size,
}
result = client.add_note_document(
case_id=case_id,
note_id=note_id,
document=json.dumps(document),
confidentiality=file_meta[idx],
)
redirect_url = request.POST.get("redirect")
if redirect_url:
return internal_redirect(redirect_url, "/")
else:
# Return note json to be rendered at the client
return HttpResponse(json.dumps(result), content_type="application/json")
class PublicFileView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/public_file.html"
def add_page_data(self):
tab = self.request.GET.get("tab", "all")
tabs = {
"tabList": [
{"label": "All", "value": "all"},
{"label": "Notices", "value": "tra"},
{"label": "Business", "value": "business"},
{"label": "Withdrawn", "value": "withdrawn"},
],
"value": tab,
}
case_submissions = self._client.get_submissions(self.case_id, show_global=True)
by_tra = deep_index_items_by_exists(case_submissions, "is_tra")
tra_by_published = deep_index_items_by_exists(by_tra.get("true"), "issued_at")
by_published = deep_index_items_by_exists(case_submissions, "issued_at")
if tab == "all":
submissions = by_published.get("true")
if tab == "tra":
submissions = deep_index_items_by(by_published.get("true"), "is_tra").get("true")
if tab == "business":
submissions = deep_index_items_by(by_published.get("true"), "is_tra").get("")
if tab == "withdrawn":
submissions = deep_index_items_by(by_published.get("false"), "is_tra").get("true")
return {
"tabs": tabs,
"submissions": submissions,
"public_base_url": settings.PUBLIC_BASE_URL,
}
class CaseFormView(LoginRequiredMixin, TemplateView, TradeRemediesAPIClientMixin):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/case_form.html"
def get_context(self, client, case_id=None):
if case_id:
case = client.get_case(case_id)
else:
case = {
"new": True,
"id": "",
"organisation": {"id": ""},
"type": {"id": "1"},
}
enums = client.get_all_case_enums()
gov_bodies = client.get_organisations(gov_body=True)
country_dict = {}
for country in countries:
country_dict[country[0]] = country[1]
context = {
"body_classes": "full-width",
"case": case,
"organisations": gov_bodies,
"country_dict": country_dict,
"organisation_name": case.get("organisation", {}).get("name") or "Secretary of State",
"contact_country": "GB",
"submission": {"type": {"id": 4}},
"tra_team_names": [
settings.ORGANISATION_NAME,
settings.ORGANISATION_INITIALISM + " Team 1",
settings.ORGANISATION_INITIALISM + " Team 2",
settings.ORGANISATION_INITIALISM + " Team 3",
],
}
context.update(enums)
# context['countries'] = countries[0]
return context
def get(self, request, case_id=None, *args, **kwargs):
client = self.client(request.user)
context = self.get_context(client, case_id)
return render(request, self.template_name, context)
def post(self, request, case_id=None, *args, **kwargs):
post_data = {
"id": case_id,
}
non_required_fields = [
"submission_status_id",
"case_name",
"organisation_name",
"organisation_id",
# 'organisation_address', 'organisation_post_code', 'companies_house_id',
# 'contact_name', 'contact_email', 'contact_phone', 'contact_address',
# 'contact_country',
]
error_lookup = {
"case_type_id": "Case type",
"product_name": "Product name",
"submission_type_id": "Submission type",
"sector_id": "Product sector",
"product_description": "Product description",
"export_country_code": "Export country",
"hs_code": "Product code",
}
required_fields = list(error_lookup.keys())
list_fields = ["export_country_code", "hs_code"]
case_fields = required_fields + non_required_fields
errors = {}
client = self.client(request.user)
if request.POST.get("case_type_id") in ALL_REGION_ALLOWED_TYPE_IDS:
required_fields.remove("export_country_code")
for field in case_fields:
post_data[field] = (
compact_list(request.POST.getlist(field))
if field in list_fields
else request.POST.get(field)
)
for field in required_fields:
if field in error_lookup and not post_data.get(field):
fieldname = error_lookup.get(field)
errors[field] = f"{fieldname} is required"
for i, code in enumerate(post_data.get("hs_code")):
if len(str(code)) not in (6, 7, 8, 9, 10): # temporary validation
errors["hs_code"] = "HS codes should be between 6 and 10 digits"
if not errors:
post_data["ex_oficio"] = True
result = client.submit_full_case_data(post_data)
return redirect("/cases/")
else:
context = self.get_context(client, case_id)
context["errors"] = errors
context.update(post_data)
return render(request, self.template_name, context)
class InviteContactView(CaseBaseView):
"""
Invite a contact to the case
"""
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/invite.html"
raise_exception = True
def get_organisation_admin_user_contact(self, organisation_id):
contact = None
organisation = self._client.get_organisation(organisation_id)
admin_user = [
user
for user in organisation.get("users", [])
if user.get("security_group") == SECURITY_GROUP_ORGANISATION_OWNER
]
if admin_user:
user = self._client.get_user(admin_user[0]["user_id"])
contact = user.get("contact")
contact["organisation"] = organisation
return contact
def add_page_data(self):
contact = None
organisation = None
if self.kwargs.get("organisation_id"):
organisation = self._client.get_organisation(self.kwargs.get("organisation_id"))
if self.kwargs.get("contact_id"):
contact = self._client.get_contact(self.kwargs["contact_id"])
form_url = f"/case/{self.case['id']}/invite/{self.kwargs['contact_id']}/as/{self.kwargs['case_role_id']}/" # noqa: E501
if organisation:
form_url = f"{form_url}for/{organisation['id']}/"
elif self.kwargs.get("organisation_id"):
contact = self.get_organisation_admin_user_contact(self.kwargs["organisation_id"])
form_url = f"/case/{self.case['id']}/invite/organisation/{self.kwargs['organisation_id']}/as/{self.kwargs['case_role_id']}/" # noqa: E501
if not organisation:
organisation = contact["organisation"]
notification_template = self._client.get_notification_template(
"NOTIFY_INFORM_INTERESTED_PARTIES"
)
deep_update(
self.case,
self._client.get_case(
self.case_id,
fields=json.dumps(
{
"Case": {
"latest_notice_of_initiation_url": 0,
"registration_deadline": 0,
"product": 0,
}
}
),
),
)
case_number = self.case["reference"]
email = notify_contact_email(self._client, case_number)
footer = notify_footer(self._client, email)
values = {
"full_name": contact["name"],
"product": get(self.case, "product/name"),
"case_number": case_number,
"case_name": self.case["name"],
"notice_of_initiation_url": self.case.get("latest_notice_of_initiation_url"),
"company_name": organisation["name"],
"deadline": parse_api_datetime(
get(self.case, "registration_deadline"), settings.FRIENDLY_DATE_FORMAT
),
"footer": footer,
"guidance_url": self._client.get_system_parameters("LINK_HELP_BOX_GUIDANCE")["value"],
"email": email,
"login_url": f"{settings.PUBLIC_BASE_URL}",
}
context = {
"form_url": form_url,
"editable_fields": ["full_name", "product"],
"case": self.case,
"contact": contact,
"case_role_id": self.kwargs["case_role_id"],
"parsed_template": parse_notify_template(notification_template["body"], values),
"values": values,
"organisation": organisation,
"organisation_id": self.kwargs.get("organisation_id"),
}
return context
def post(
self,
request,
contact_id=None,
case_id=None,
case_role_id=None,
organisation_id=None,
*args,
**kwargs,
):
notify_keys = ["full_name", "product"]
notify_data = {key: request.POST.get(key) for key in notify_keys}
if organisation_id and contact_id:
notify_data["organisation_id"] = organisation_id
elif organisation_id and not contact_id:
contact = self.get_organisation_admin_user_contact(organisation_id)
contact_id = contact["id"]
response = self._client.invite_contact(case_id, contact_id, case_role_id, notify_data)
return HttpResponse(json.dumps(response), content_type="application/json")
class IssueFilesFormView(CaseBaseView):
"""
Issue files to case
"""
groups_required = SECURITY_GROUPS_TRA
template_name = "widgets/issue_files_form.html"
def add_page_data(self):
case_enums = self._client.get_all_case_enums()
return {
"case_enums": case_enums,
"case": self.case,
}
class CaseBundlesView(CaseBaseView):
"""
Assign documents to the case directly (not via submissions)
"""
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/case_bundles.html"
def add_page_data(self):
list_mode = self.request.GET.get("tab", "live")
tabs = {
"value": list_mode,
"tabList": [
{"label": "Live", "value": "live", "sr_text": "Show live bundles"},
{"label": "Draft", "value": "draft", "sr_text": "Show draft bundles"},
],
}
case_bundles = self._client.get_case_submission_bundles(
case_id=self.case["id"],
status=list_mode.upper(),
)
return {
"bundles": case_bundles,
"error": self.kwargs.get("error"),
"tabs": tabs,
"status": list_mode,
}
@method_decorator(csrf_exempt, name="dispatch")
class CaseBundleView(CaseBaseView):
"""
View and edit a specific bundle full of documents
"""
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/case_bundle_builder.html"
def add_page_data(self):
case_enums = self._client.get_all_case_enums()
bundle = None
bundle_id = self.kwargs.get("bundle_id")
virus = self.request.GET.get("virus")
upload_error = self.request.GET.get("upload_error")
return_data = {
"virus": virus,
"upload_error": upload_error,
}
if bundle_id:
bundle = self._client.get_case_submission_bundles(
case_id=self.case["id"], bundle_id=self.kwargs.get("bundle_id")
)
return_data.update(
{
"bundle": bundle,
"submission_types": case_enums["submission_types"],
}
)
return return_data
def post(self, request, case_id, bundle_id=None, *args, **kwargs): # noqa: C901
name = request.POST.get("name")
data = pluck(request.POST, ["name", "description"])
btn_value = request.POST.get("btn-value")
if btn_value == "send":
data["status"] = "LIVE"
# Upload documents
if bundle_id:
meta_raw = request.POST.getlist("meta")
meta = [json.loads(block) for block in meta_raw]
file_details = deep_index_items_by(meta, "name")
for _file in request.FILES.getlist("files"):
try:
_file.readline() # Important, will raise VirusFoundInFileException if infected
original_file_name = _file.original_name
details = file_details.get(original_file_name.lower())[0]
confidential = details.get("confidential")
document_type = details.get("submission_document_type")
document = self._client.upload_document(
case_id=str(case_id),
data={
"bundle_id": bundle_id,
"confidential": confidential,
"submission_document_type": document_type,
"document_name": original_file_name,
"file_name": _file.name,
"file_size": _file.file_size,
},
)
except (VirusFoundInFileException, APIException) as e:
redirect_url = f"/case/{case_id}/bundle/{bundle_id}/?"
msg = "File upload aborted: "
if isinstance(e, VirusFoundInFileException):
redirect_url += "virus=true"
else:
msg += f"{e}"
redirect_url += f"upload_error={msg}"
logger.warning(f"{msg}")
return HttpResponse(
json.dumps({"redirect_url": redirect_url}),
content_type="application/json",
)
# Attach existing documents to this bundle
if case_files := request.POST.getlist("case_files"):
file_details_by_id = deep_index_items_by(meta, "file/id")
for case_file_id in case_files:
details = (file_details_by_id.get(case_file_id) or [])[0]
document = self._client.attach_document(
case_id=str(case_id),
data={
"bundle_id": bundle_id,
"submission_document_type": details.get("submission_document_type"),
},
document_id=case_file_id,
)
else:
data = pluck(request.POST, ["name", "submission_type_id"])
data["case_id"] = case_id
# Anything else to send?
response = None
if data:
response = self._client.set_case_submission_bundle(bundle_id=bundle_id, data=data)
ret = {"result": "ok", "status": data.get("status")}
response_id = response and response.get("id")
if response_id:
ret["redirect_url"] = f"/case/{case_id}/bundle/{response_id}/"
return HttpResponse(json.dumps(ret), content_type="application/json")
def delete(self, request, case_id, document_id, *args, **kwargs):
response = self._client.delete_case_submission_bundle(case_id, document_id)
return redirect(f"/case/{case_id}/documents/")
class SubmissionInviteNotifyView(CaseBaseView):
"""
Notify an invitee about an invitation to case.
"""
groups_required = SECURITY_GROUPS_TRA
raise_exception = True
template_name = "cases/invite.html"
def add_page_data(self):
"""Add page data.
CaseBaseView override.
"""
case_id = self.kwargs.get("case_id")
submission_id = self.kwargs.get("submission_id")
contact_id = self.kwargs.get("contact_id")
case = self._client.get_case(case_id)
submission = self._client.get_submission(case_id, submission_id)
inviting_organisation = submission["organisation"]
invited_contact = self._client.get_contact(contact_id)
inviting_contact = submission.get("contact") or {}
notification_template = self._client.get_notification_template("NOTIFY_THIRD_PARTY_INVITE")
form_url = f"/case/{case_id}/submission/{submission_id}/invite/{contact_id}/notify/"
# Attempt to infer the invite URL
login_url = f"{settings.PUBLIC_BASE_URL}"
invites = self._client.get_invitations(case_id, submission_id)
for i in invites:
if i["contact"]["id"] == str(contact_id):
invite = self._client.get_invite_details(i["id"])
code = invite.get("code")
login_url = f"{login_url}/invitation/{code}/{case_id}/"
break
case_number = case["reference"]
email = notify_contact_email(self._client, case_number)
footer = notify_footer(self._client, email)
values = {
"full_name": invited_contact["name"],
"case_name": case["name"],
"invited_by_organisation": inviting_organisation["name"],
"invited_by_name": inviting_contact["name"],
"notice_of_initiation_url": self.case.get("latest_notice_of_initiation_url"),
"login_url": login_url,
"deadline": parse_api_datetime(
get(self.case, "registration_deadline"), settings.FRIENDLY_DATE_FORMAT
),
"footer": footer,
"email": email,
}
context = {
"form_url": form_url,
"notification_template": notification_template,
"submission": submission,
"case": case,
"contact": invited_contact,
"parsed_template": parse_notify_template(notification_template["body"], values),
"values": values,
}
return context
def post(self, request, case_id, submission_id, contact_id, *args, **kwargs):
notify_data = {
"case_id": case_id,
"submission_id": submission_id,
"contact_id": contact_id,
}
response = self._client.action_third_party_invite(
case_id=case_id,
submission_id=submission_id,
contact_id=contact_id,
params=notify_data,
)
return HttpResponse(json.dumps(response), content_type="application/json")
class UpdateParentView(CaseBaseView):
template_name = "cases/update_parent.html"
linked_case_confirm_key = "LINKED_CASE_CONFIRM"
cases_fields = json.dumps(
{
"Case": {
"name": 0,
"id": 0,
"reference": 0,
}
}
)
case_fields = json.dumps(
{"Case": {"parent": {"id": 0}, "workflow_state": {linked_case_confirm_key: 0}}}
)
def add_page_data(self):
cases = self._client.get_cases(archived=True, all_cases=False, fields=self.cases_fields)
case = self._client.get_case(self.case_id, fields=self.case_fields)
return {"case": case, "cases": cases}
def post(self, request, case_id, *args, **kwargs):
link_confirm = request.POST.get("link_confirm")
parent_id = request.POST.get("parent_id")
_client = self.client(request.user)
case = _client.get_case(case_id, fields=self.case_fields)
if get(case, "parent/id") != parent_id:
_client.set_case_data(case_id, {"parent_id": parent_id})
if (get(case, f"workflow_state/{self.linked_case_confirm_key}") or [0])[0] != link_confirm:
_client.set_case_workflow_state(
case_id, values={f"{self.linked_case_confirm_key}": link_confirm}
)
return HttpResponse(
json.dumps({"parent_id": parent_id, "link_confirm": link_confirm}),
content_type="application/json",
)
class NoticesView(
LoginRequiredMixin, GroupRequiredMixin, TemplateView, TradeRemediesAPIClientMixin
):
groups_required = SECURITY_GROUPS_TRA_ADMINS
template_name = "cases/notices.html"
def get(self, request):
client = self.client(request.user)
notices = client.get_notices()
return render(
request,
self.template_name,
{
"body_classes": "full-width",
"notices": notices,
},
)
class NoticeView(LoginRequiredMixin, GroupRequiredMixin, TemplateView, TradeRemediesAPIClientMixin):
groups_required = SECURITY_GROUPS_TRA_ADMINS
template_name = "cases/notice.html"
cases_fields = json.dumps({"Case": {"name": 0, "id": 0, "reference": 0}})
def get(self, request, notice_id=None):
client = self.client(request.user)
enums = client.get_all_case_enums()
case_types = enums.get("case_types", [])
cases = client.get_cases(archived=True, all_cases=False, fields=self.cases_fields)
notice = {}
if notice_id:
notice = client.get_notice(notice_id)
return render(
request,
self.template_name,
{
"body_classes": "full-width",
"notice": notice,
"cases": cases,
"case_types": case_types,
},
)
def post(self, request, notice_id=None):
client = self.client(request.user)
notice = client.create_update_notice(
name=request.POST.get("name"),
reference=request.POST.get("reference"),
terminated_at=request.POST.get("terminated_at"),
published_at=request.POST.get("published_at"),
case_type=request.POST.get("case_type_id"),
review_case=request.POST.get("review_case_id"),
notice_id=notice_id,
)
return redirect("/cases/notices/")
class DocumentSearchView(CaseBaseView):
template_name = "documents/documents.html"
def add_page_data(self):
query = self.request.GET.get("query")
conf_status = self.request.GET.get("confidential_status")
user_type = self.request.GET.get("user_type")
response = self._client.search_documents(
case_id=self.case_id,
query=query,
confidential_status=conf_status,
user_type=user_type,
)
return {
"body_classes": "full-width",
"documents": response.pop("results", []),
"query": query,
"conf_status": conf_status,
**response,
}
class CaseTeamJsonView(LoginRequiredMixin, View, TradeRemediesAPIClientMixin):
def get(self, request, case_id, **kwargs):
team = self.client(request.user).get_case_team_members(case_id)
return HttpResponse(json.dumps(team), content_type="application/json")
| 41.063503 | 150 | 0.574391 | 112,809 | 0.980087 | 0 | 0 | 6,470 | 0.056212 | 0 | 0 | 23,368 | 0.203022 |
98c73bdbed560b9a7619895e79b9cc268edc26d3 | 10,352 | py | Python | analyze-ir-signal.py | hnw/analyze-ir-signal | f810c0ced955ec70c5e4c0d3556efa0a6d0d3138 | [
"MIT"
]
| null | null | null | analyze-ir-signal.py | hnw/analyze-ir-signal | f810c0ced955ec70c5e4c0d3556efa0a6d0d3138 | [
"MIT"
]
| null | null | null | analyze-ir-signal.py | hnw/analyze-ir-signal | f810c0ced955ec70c5e4c0d3556efa0a6d0d3138 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
import numpy as np
from sklearn.neighbors import KernelDensity
# Relative tolerance (in percent) for some comparisons on measured data.
TOLERANCE = 25
# Lower tolerance for comparison of measured data
LTOL = 100 - TOLERANCE
# Upper tolerance for comparison of measured data
UTOL = 100 + TOLERANCE
# Resolution of the raw input buffer data. Corresponds to 2 pulses of each 26.3 at 38 kHz.
MICROS_PER_TICK = 50
# Value is subtracted from all marks and added to all spaces before decoding, to compensate for the signal forming of different IR receiver modules.
MARK_EXCESS_MICROS = 20
NEC_ADDRESS_BITS = 16 # 16 bit address or 8 bit address and 8 bit inverted address
NEC_COMMAND_BITS = 16 # Command and inverted command
NEC_BITS = (NEC_ADDRESS_BITS + NEC_COMMAND_BITS)
NEC_UNIT = 560
NEC_HEADER_MARK = (16 * NEC_UNIT) # 9000
NEC_HEADER_SPACE = (8 * NEC_UNIT) # 4500
NEC_BIT_MARK = NEC_UNIT
NEC_ONE_SPACE = (3 * NEC_UNIT) # 1690
NEC_ZERO_SPACE = NEC_UNIT
PROTOCOL_IS_LSB_FIRST = False
PROTOCOL_IS_MSB_FIRST = True
def ticks_low(us):
return us * LTOL / 100
def ticks_high(us):
return us * UTOL / 100 + 1
def match_mark(us, match):
passed = ((us >= ticks_low(match + MARK_EXCESS_MICROS)) and (us <= ticks_high(match + MARK_EXCESS_MICROS)));
return passed
def match_space(us, match):
passed = ((us >= ticks_low(match - MARK_EXCESS_MICROS)) and (us <= ticks_high(match - MARK_EXCESS_MICROS)));
return passed
def decode_pulse_distance_data(data, nbits, bitmark, onespace, zerospace, msbfirst):
a = data[:len(data)-len(data)%2].reshape(-1,2)
decoded = 0
if msbfirst:
for b in a[:nbits]:
if (not match_mark(b[0], bitmark)):
print("Mark=%d is not %d" % (b[0], bitmark))
return False
if match_space(b[1], onespace):
decoded = (decoded << 1) | 1
elif match_space(b[1], zerospace):
decoded = (decoded << 1) | 0
else:
print("Space=%d is not %d or %d" % (b[1], onespace, zerospace))
return False
else:
mask = 1
i = 0
for b in a[:nbits]:
if i % 8 == 0:
mask = 1
decoded <<= 8
if (not match_mark(b[0], bitmark)):
print("Mark=%d is not %d" % (b[0], bitmark))
return False
if match_space(b[1], onespace):
decoded |= mask
elif match_space(b[1], zerospace):
pass
else:
print("Space=%d is not %d or %d" % (b[1], onespace, zerospace))
return False
mask <<= 1
i += 1
print("decoded = %04X" % decoded)
return decoded
#C8E880=明?
#131780
data_onoff=[3016,1561,344,1186,343,1189,343,425,341,421,348,1185,348,425,341,424,346,419,345,1189,343,1187,342,1188,341,428,342,1184,347,425,344,439,328,423,351,415,351,414,348,428,342,1188,341,436,330,424,348,421,343,422,348,8272,3011,1563,343,1185,344,1183,346,422,346,422,349,1182,346,422,344,425,345,421,349,1185,342,1187,348,1184,342,422,348,1183,346,423,346,419,351,419,348,424,344,427,340,445,323,1190,342,442,325,423,345,422,347,419,348,8272,3014,1559,348,1201,326,1206,326,419,348,425,343,1183,347,419,349,424,343,427,340,1189,343,1186,343,1187,342,422,348,1184,344,436,330,422,351,423,344,424,341,422,348,438,329,1205,324,425,343,422,351,419,348,425,344]
#131720
data_30=[3015,1558,345,1183,346,1201,329,419,347,426,344,1185,347,419,345,444,325,442,328,1201,328,1204,325,1204,330,418,345,1186,343,422,348,422,348,425,342,445,322,1205,325,425,342,426,346,419,345,424,345,440,327,426,345,8262,3012,1562,341,1189,344,1186,341,424,345,424,343,1186,343,429,341,425,343,425,345,1185,344,1186,343,1187,346,425,341,1187,340,440,305,448,354,404,357,419,350,1185,341,426,341,440,329,419,348,426,320,448,345,421,345,8260,3013,1563,343,1187,352,1166,328,450,342,439,328,1208,323,425,343,421,347,422,349,1187,342,1186,352,1165,353,439,330,1204,300,453,341,424,342,428,318,462,333,1203,299,446,346,424,344,428,349,431,301,446,348,424,342]
#131750
data_50=[3020,1555,345,1188,344,1188,342,419,347,424,346,1183,345,419,351,424,342,441,326,1204,328,1201,326,1205,324,422,356,1176,348,419,344,422,350,425,342,1186,343,416,350,1185,345,423,347,419,347,421,348,442,331,418,348,8258,3016,1558,345,1184,346,1183,346,425,342,421,348,1188,343,424,344,417,349,422,348,1185,344,1184,348,1181,346,419,350,1187,342,424,345,424,343,422,350,1183,343,442,328,1201,327,419,349,423,346,424,343,421,349,435,331,8258,3018,1557,346,1184,348,1181,346,422,347,423,344,1186,346,419,354,415,355,407,350,1186,343,1190,343,1183,346,419,348,1203,326,419,350,419,348,424,344,1184,344,422,346,1187,343,417,350,421,348,419,347,442,325,425,346]
#131760
data_100=[3023,1556,346,1183,346,1183,346,419,349,422,348,1184,348,417,352,419,350,420,347,1183,348,1201,328,1185,346,422,345,1185,358,412,345,422,348,419,347,441,328,1187,345,1183,346,425,343,424,346,422,344,440,329,439,332,8267,3017,1558,346,1185,344,1185,344,423,346,423,347,1184,345,420,349,423,346,438,328,1190,342,1184,346,1180,355,416,349,1199,330,419,351,419,351,418,345,422,348,1186,345,1202,327,422,350,414,353,421,348,420,346,419,350,8268,3017,1560,343,1187,345,1202,330,414,352,417,356,1178,348,418,350,417,352,417,350,1184,347,1182,348,1185,344,438,331,1189,343,441,325,417,353,419,362,397,357,1184,346,1184,348,424,342,425,345,419,349,436,331,425,345]
# 070710
data_brighten=[3021,1557,348,1183,348,1183,345,1184,344,423,347,437,330,421,348,421,349,418,348,1184,348,1182,351,1197,329,421,348,421,348,417,350,422,347,423,347,1185,343,419,350,440,327,441,329,421,348,421,353,418,344,423,346,8252,3019,1559,347,1184,343,1184,345,1184,348,417,350,419,350,424,345,439,328,435,335,1201,328,1188,341,1186,347,438,328,441,330,420,347,419,349,423,345,1185,345,427,344,438,328,420,349,416,354,418,348,425,344,424,346,8252,3017,1576,329,1202,327,1186,349,1180,346,419,360,429,326,442,328,422,345,419,350,1184,346,1184,347,1203,330,418,345,422,348,422,351,435,331,437,330,1200,329,419,350,421,345,425,345,419,351,434,332,442,327,421,346,8255,3017,1558,345,1185,348,1199,331,1200,327,418,353,420,347,425,343,422,348,423,344,1189,342,1181,348,1184,345,424,346,436,331,437,332,421,349,420,346,1185,347,425,342,424,346,439,327,419,350,419,348,423,347,422,347,8251,3018,1561,342,1187,342,1185,347,1203,327,417,352,421,348,424,343,419,349,438,349,1155,354,1186,346,1186,344,422,347,419,347,436,333,420,349,424,343,1184,348,421,346,423,346,419,351,438,328,423,346,422,348,421,345]
# 070730
data_darken=[3018,1562,345,1189,372,1147,353,1184,348,423,347,420,346,423,346,424,345,422,347,1185,345,1183,346,1184,348,417,352,419,348,424,346,424,344,423,347,1185,344,1184,345,419,350,424,345,427,343,439,331,414,353,419,349,8261,3018,1558,351,1182,343,1190,350,1176,346,425,345,425,340,422,348,436,333,422,347,1201,328,1186,343,1190,343,424,341,443,327,421,348,417,351,419,352,1181,345,1186,348,415,353,421,345,424,346,419,347,428,341,441,329,8263,3018,1556,347,1188,342,1189,341,1186,346,423,347,440,336,398,361,421,346,428,341,1184,345,1187,342,1185,347,421,349,423,343,423,347,424,344,426,341,1186,343,1189,344,423,343,441,328,417,353,417,349,424,346,423,346]
# 131740
data_nightlight=[3020,1557,348,1185,350,1180,350,419,349,421,348,1185,346,419,348,441,327,442,331,1180,348,1184,348,1188,343,424,346,1186,343,419,350,424,345,424,346,419,350,421,350,1201,329,421,347,417,352,419,350,421,348,421,349,8260,3019,1557,351,1183,346,1184,348,422,347,419,347,1204,327,441,328,441,328,437,335,1184,344,1184,349,1185,346,438,331,1185,348,419,346,426,343,420,350,418,350,420,350,1183,352,418,347,417,354,423,344,422,347,417,354,8259,3019,1557,348,1189,343,1184,346,421,345,440,331,1184,346,422,345,424,345,438,331,1186,346,1186,344,1184,345,419,350,1184,347,421,348,417,353,419,347,424,345,424,346,1184,345,423,346,423,346,427,342,419,353,419,345]
data_hitachi=[8917,4558,525,590,525,1725,526,1728,527,590,524,1727,526,606,513,1726,521,592,527,1725,525,594,528,605,509,1727,526,588,531,1723,527,588,532,1736,511,1726,526,588,531,583,533,593,529,1720,529,1723,526,1730,523,588,530,588,528,1722,528,1727,535,1703,537,586,534,603,512,594,525,1728,522,39873]
def show_aeha(a):
a = a[:len(a)-len(a)%2].reshape(-1,2)
trailers = np.where(a>8000)[0] # Trailer >=8ms
if len(trailers) > 0:
a = a[:trailers[0]]
leaders = np.where(a>2800)[0] # T=350-500us, Leader=8T,4T
if len(leaders) > 0 & leaders[0] == 0:
print("Trailer = ",a[0])
a = a[1:]
if len(a) % 8 != 0:
print("Warning: Data corrupted: bit length = ",len(a))
return
if len(a) < 24:
print("Warning: Data too short: bit length = ",len(a))
return
a[a<=500] = 0
a[(a>500) * (a<=1500)] = 1
customer_code = 0
for b in a[7::-1]:
customer_code = customer_code << 1 | b[1]
for b in a[15:7:-1]:
customer_code = customer_code << 1 | b[1]
print("Customer Code = %04X" % customer_code)
parity = 0
for b in a[19:15:-1]:
parity = parity << 1 | b[1]
print("Parity = 0x%01X" % parity)
data0 = 0
for b in a[23:19:-1]:
data0 = data0 << 1 | b[1]
print("data0 = 0x%01X" % data0)
def decode_nec(rawbuf):
# Check we have the right amount of data (68). The +4 is for initial gap, start bit mark and space + stop bit mark.
if (len(rawbuf) != 2 * NEC_BITS + 4) and (len(rawbuf) != 4):
print("NEC: Data length=%d is not 68 lr 4" % len(rawbuf))
return False
# Check header "mark" this must be done for repeat and data
if (not match_mark(rawbuf[0], NEC_HEADER_MARK)):
print("NEC: Header mark length is wrong")
return False
# Check command header space
if (not match_space(rawbuf[1], NEC_HEADER_SPACE)):
print("NEC: Header space length is wrong")
return False
if (not decode_pulse_distance_data(rawbuf[2:], NEC_BITS, NEC_BIT_MARK, NEC_ONE_SPACE, NEC_ZERO_SPACE, PROTOCOL_IS_LSB_FIRST)):
return False
# Stop bit
if (not match_mark(rawbuf[2 + (2 * NEC_BITS)], NEC_BIT_MARK)):
print("NEC: Stop bit mark length is wrong")
return False
#a = np.array(data_nightlight)
a = np.array(data_hitachi)
#show_aeha(a)
decode_nec(a)
| 57.511111 | 1,100 | 0.681897 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,293 | 0.124879 |
98c872b368191fe4e11021c3430aca414eab1a34 | 2,698 | py | Python | mmdet/models/emod_ops/ar_module.py | zhenglab/EMOD | 68bef744a99d0ec4eef8f3cc6b1f5ab3c0807d89 | [
"Apache-2.0"
]
| 2 | 2020-12-09T08:40:04.000Z | 2021-07-27T08:44:46.000Z | mmdet/models/emod_ops/ar_module.py | zhenglab/EMOD | 68bef744a99d0ec4eef8f3cc6b1f5ab3c0807d89 | [
"Apache-2.0"
]
| null | null | null | mmdet/models/emod_ops/ar_module.py | zhenglab/EMOD | 68bef744a99d0ec4eef8f3cc6b1f5ab3c0807d89 | [
"Apache-2.0"
]
| null | null | null | import torch
from torch import nn
from mmcv.cnn.utils import constant_init, kaiming_init
class SimAttention(nn.Module):
def __init__(self, in_channels):
super(SimAttention, self).__init__()
self.conv_attn = nn.Conv2d(in_channels, 1, kernel_size=1)
self.softmax = nn.Softmax(dim=2)
kaiming_init(self.conv_attn, mode='fan_in')
self.conv_attn.inited = True
def forward(self, x):
b, c, h, w = x.size()
x_in = x
x_in = x_in.view(b, c, h * w)
x_in = x_in.unsqueeze(1)
x_attn = self.conv_attn(x)
x_attn = x_attn.view(b, 1, h * w)
x_attn = self.softmax(x_attn)
x_attn = x_attn.unsqueeze(-1)
x_out = torch.matmul(x_in, x_attn)
x_out = x_out.view(b, c, 1, 1)
return x_out
class SimRelation(nn.Module):
def __init__(self, in_channels, ratio, act=False):
super(SimRelation, self).__init__()
self.planes = int(in_channels * ratio)
self.act = act
self.mlp = nn.Sequential(
nn.Linear(in_features=in_channels, out_features=self.planes),
nn.LayerNorm([self.planes]),
nn.ReLU(inplace=True),
nn.Linear(in_features=self.planes, out_features=in_channels))
constant_init(self.mlp[-1], val=0)
if self.act:
self.activate = nn.Sigmoid()
def forward(self, x):
x_in = x
x_in = x_in.view(x.size(0), -1)
x_out = self.mlp(x_in)
if self.act:
x_out = self.activate(x_out)
x_out = x_out.view(x.size(0), x.size(1), 1, 1)
return x_out
class ARModule(nn.Module):
"""AR Module for EMOD."""
def __init__(self,
in_channels,
ratio,
fusion_type='add'):
super(ARModule, self).__init__()
assert fusion_type in ['add', 'mul'], 'fusion_type should be add or mul.'
self.fusion_type = fusion_type
# attention
self.sim_attention = SimAttention(in_channels)
# relation
if self.fusion_type == 'add':
self.sim_relation = SimRelation(in_channels, ratio, act=False)
else:
self.sim_relation = SimRelation(in_channels, ratio, act=True)
def forward(self, x):
x_attn = self.sim_attention(x)
out = x
if self.fusion_type == 'add':
x_rel = self.sim_relation(x_attn)
out = out + x_rel
else:
x_rel = self.sim_relation(x_attn)
out = out * x_rel
return out
| 29.326087 | 81 | 0.546331 | 2,603 | 0.964789 | 0 | 0 | 0 | 0 | 0 | 0 | 114 | 0.042254 |
98c9bbdcbfc1d4a76b6ddc9df442f68e0236c7a7 | 519 | py | Python | prayer_times_v2.py | danish09/request_api | 67aac9079cb30fc0069a9273c8b4074122ea4d3b | [
"MIT"
]
| null | null | null | prayer_times_v2.py | danish09/request_api | 67aac9079cb30fc0069a9273c8b4074122ea4d3b | [
"MIT"
]
| null | null | null | prayer_times_v2.py | danish09/request_api | 67aac9079cb30fc0069a9273c8b4074122ea4d3b | [
"MIT"
]
| null | null | null | import json
import requests
from datetime import datetime
from playsound import playsound
tday=datetime.today().strftime('%Y-%m-%d')
right_now=datetime.today().strftime('%I-%M-%p')
response = requests.get("https://www.londonprayertimes.com/api/times/?format=json&key=0239f686-4423-408e-9a0c-7968a403d197&year=&month=")
data=response.json()
for key,value in data.items():
if value >= '03:30' and value < '06:00':
print('It is asr time')
#playsound('/home/danish/Downloads/adan.mp3') | 23.590909 | 137 | 0.693642 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 207 | 0.398844 |
98ca5c7bd9f6d4e14adea6a5004535831845ac15 | 6,763 | py | Python | pokemon/pokemon_tests/test_serializers.py | pessman/pokemon_utils | cbe06ebe323cb38a35846274d812bdbe8d0ae8ca | [
"MIT"
]
| 1 | 2019-03-11T04:12:50.000Z | 2019-03-11T04:12:50.000Z | pokemon/pokemon_tests/test_serializers.py | pessman/pokemon_utils | cbe06ebe323cb38a35846274d812bdbe8d0ae8ca | [
"MIT"
]
| null | null | null | pokemon/pokemon_tests/test_serializers.py | pessman/pokemon_utils | cbe06ebe323cb38a35846274d812bdbe8d0ae8ca | [
"MIT"
]
| 2 | 2019-03-13T03:17:29.000Z | 2019-04-04T20:06:50.000Z | import pytest
from django.test import TestCase
from rest_framework import serializers as drf_serializers
from pokemon import models, serializers
@pytest.mark.django_db
class StatsSerializer(TestCase):
"""
Test Module for StatsSerializer
"""
def setUp(self):
models.Nature.objects.create(
name="Adamant",
positive="attack",
negative="special_attack"
)
self.valid_base_stats = {
"hit_points": 108,
"attack": 130,
"defense": 95,
"special_attack": 80,
"special_defense": 85,
"speed": 102
}
self.valid_ivs = {
"hit_points": 24,
"attack": 12,
"defense": 30,
"special_attack": 16,
"special_defense": 23,
"speed": 5
}
self.invalid_ivs_high = {
"hit_points": 33,
"attack": 12,
"defense": 30,
"special_attack": 16,
"special_defense": 23,
"speed": 5
}
self.invalid_ivs_low = {
"hit_points": -1,
"attack": 12,
"defense": 30,
"special_attack": 16,
"special_defense": 23,
"speed": 5
}
self.valid_evs = {
"hit_points": 74,
"attack": 190,
"defense": 91,
"special_attack": 48,
"special_defense": 84,
"speed": 23
}
self.invalid_evs_high_individual = {
"hit_points": 0,
"attack": 300,
"defense": 0,
"special_attack": 0,
"special_defense": 0,
"speed": 0
}
self.invalid_evs_high_total = {
"hit_points": 74,
"attack": 190,
"defense": 91,
"special_attack": 48,
"special_defense": 84,
"speed": 100
}
self.invalid_evs_low_individual = {
"hit_points": 0,
"attack": -10,
"defense": 0,
"special_attack": 0,
"special_defense": 0,
"speed": 0
}
self.valid_level = 78
self.invalid_level_high = 110
self.invalid_level_low = 0
self.valid_nature = "adamant"
self.invalid_nature = "thisisntanature"
def test_stats_serializer(self):
serializer = serializers.StatsSerializer(data={
"base_stats": self.valid_base_stats,
"evs": self.valid_evs,
"ivs": self.valid_ivs,
"level": self.valid_level,
"nature": self.valid_nature
})
serializer.is_valid(raise_exception=True)
stats = serializer.get_stats()
self.assertEqual(stats["hit_points"], 289)
self.assertEqual(stats["attack"], 278)
self.assertEqual(stats["defense"], 193)
self.assertEqual(stats["special_attack"], 135)
self.assertEqual(stats["special_defense"], 171)
self.assertEqual(stats["speed"], 171)
def test_invalid_nature(self):
with pytest.raises(drf_serializers.ValidationError) as exc:
serializer = serializers.StatsSerializer(data={
"base_stats": self.valid_base_stats,
"evs": self.valid_evs,
"ivs": self.valid_ivs,
"level": self.valid_level,
"nature": self.invalid_nature
})
serializer.is_valid(raise_exception=True)
def test_invalid_level_high(self):
with pytest.raises(drf_serializers.ValidationError):
serializer = serializers.StatsSerializer(data={
"base_stats": self.valid_base_stats,
"evs": self.valid_evs,
"ivs": self.valid_ivs,
"level": self.invalid_level_high,
"nature": self.valid_nature
})
serializer.is_valid(raise_exception=True)
def test_invalid_level_low(self):
with pytest.raises(drf_serializers.ValidationError):
serializer = serializers.StatsSerializer(data={
"base_stats": self.valid_base_stats,
"evs": self.valid_evs,
"ivs": self.valid_ivs,
"level": self.invalid_level_low,
"nature": self.valid_nature
})
serializer.is_valid(raise_exception=True)
def test_invalid_ivs_low(self):
with pytest.raises(drf_serializers.ValidationError):
serializer = serializers.StatsSerializer(data={
"base_stats": self.valid_base_stats,
"evs": self.valid_evs,
"ivs": self.invalid_ivs_low,
"level": self.valid_level,
"nature": self.valid_nature
})
serializer.is_valid(raise_exception=True)
def test_invalid_ivs_high(self):
with pytest.raises(drf_serializers.ValidationError):
serializer = serializers.StatsSerializer(data={
"base_stats": self.valid_base_stats,
"evs": self.valid_evs,
"ivs": self.invalid_ivs_high,
"level": self.valid_level,
"nature": self.valid_nature
})
serializer.is_valid(raise_exception=True)
def test_invalid_evs_high_total(self):
with pytest.raises(drf_serializers.ValidationError):
serializer = serializers.StatsSerializer(data={
"base_stats": self.valid_base_stats,
"evs": self.invalid_evs_high_total,
"ivs": self.valid_ivs,
"level": self.valid_level,
"nature": self.valid_nature
})
serializer.is_valid(raise_exception=True)
def test_invalid_evs_high_individual(self):
with pytest.raises(drf_serializers.ValidationError):
serializer = serializers.StatsSerializer(data={
"base_stats": self.valid_base_stats,
"evs": self.invalid_evs_high_individual,
"ivs": self.valid_ivs,
"level": self.valid_level,
"nature": self.valid_nature
})
serializer.is_valid(raise_exception=True)
def test_invalid_evs_low_individual(self):
with pytest.raises(drf_serializers.ValidationError):
serializer = serializers.StatsSerializer(data={
"base_stats": self.valid_base_stats,
"evs": self.invalid_evs_low_individual,
"ivs": self.valid_ivs,
"level": self.valid_level,
"nature": self.valid_nature
})
serializer.is_valid(raise_exception=True)
| 32.990244 | 67 | 0.546799 | 6,591 | 0.974567 | 0 | 0 | 6,614 | 0.977968 | 0 | 0 | 1,061 | 0.156883 |
98ca9c54fc93a4a5630df7be404c28ca3e935a2c | 4,962 | py | Python | sqlpuzzle/_common/argsparser.py | Dundee/python-sqlpuzzle | 260524922a0645c9bf94a9779195f93ef2c78cba | [
"MIT"
]
| 8 | 2015-03-19T11:25:32.000Z | 2020-09-02T11:30:10.000Z | sqlpuzzle/_common/argsparser.py | Dundee/python-sqlpuzzle | 260524922a0645c9bf94a9779195f93ef2c78cba | [
"MIT"
]
| 7 | 2015-03-23T14:34:28.000Z | 2022-02-21T12:36:01.000Z | sqlpuzzle/_common/argsparser.py | Dundee/python-sqlpuzzle | 260524922a0645c9bf94a9779195f93ef2c78cba | [
"MIT"
]
| 4 | 2018-11-28T21:59:27.000Z | 2020-01-05T01:50:08.000Z | from sqlpuzzle.exceptions import InvalidArgumentException
__all__ = ('parse_args',)
# pylint: disable=dangerous-default-value,keyword-arg-before-vararg
def parse_args(options={}, *args, **kwds):
"""
Parser of arguments.
dict options {
int min_items: Min of required items to fold one tuple. (default: 1)
int max_items: Count of items in one tuple. Last `max_items-min_items`
items is by default set to None. (default: 1)
bool allow_dict: Flag allowing dictionary as first (and only one)
argument or dictinary as **kwds. (default: False)
bool allow_list: Flag allowing list as first (and only one) argument.
(default: False)
}
Examples:
calling with min_items=1, max_items=2, allow_dict=False:
arg1, arg2 => ((arg1, None), (arg2, None))
(arg1a, arg1b), arg2 => ((arg1a, arg1b), arg2, None))
arg1=val1 => FAIL
{key1: val1} => FAIL
calling with min_items=2, max_items=3, allow_dict=True:
arg1, arg2 => ((arg1, arg2, None),)
arg1, arg2, arg3 => ((arg1, arg2, arg3),)
(arg1a, arg1b, arg1c) => ((arg1a, arg1b, arg1c),)
arg1=val1, arg2=val2 => ((arg1, val1, None), (arg2, val2, None))
{key1: val1, key2: val2} => ((key1, val1, None), (key2, val2, None))
(arg1a, arg1b), arg2a, arg2b => FAIL
"""
parser_options = ParserOptions(options)
parser_input = ParserInput(args, kwds)
parser = Parser(parser_options, parser_input)
parser.parse()
return parser.output_data
# pylint: disable=too-few-public-methods
class ParserOptions:
def __init__(self, options):
self.min_items = options.get('min_items', 1)
self.max_items = options.get('max_items', 1)
self.allow_dict = options.get('allow_dict', False)
self.allow_list = options.get('allow_list', False)
assert self.min_items <= self.max_items
assert not self.allow_dict or (self.allow_dict and self.max_items > 1)
class ParserInput:
def __init__(self, args, kwds):
self.args = args
self.kwds = kwds
@property
def list(self):
if self.is_list:
return self.args[0]
return []
@property
def dictionary_or_kwds(self):
if self.is_dictionary:
return self.args[0]
if self.is_kwds:
return self.kwds
return {}
@property
def is_list(self):
return len(self.args) == 1 and isinstance(self.args[0], (list, tuple))
@property
def is_dictionary(self):
return len(self.args) == 1 and isinstance(self.args[0], dict)
@property
def is_kwds(self):
return self.kwds != {}
@property
def is_args(self):
return len(self.args) > 0 and not isinstance(self.args[0], (list, tuple))
def count_of_args_is_in_interval(self, min_, max_):
return min_ <= len(self.args) <= max_
class Parser:
def __init__(self, options, input_data):
self.options = options
self.input_data = input_data
self.output_data = []
def parse(self):
if (
self.options.min_items > 1
and self.input_data.is_args
and self.input_data.count_of_args_is_in_interval(self.options.min_items, self.options.max_items)
):
self._parse_item(self.input_data.args)
elif self.options.allow_list and self.input_data.is_list:
self._parse_list(self.input_data.list)
elif not self.input_data.is_dictionary and self.input_data.args:
self._parse_list(self.input_data.args)
if self.input_data.is_dictionary or self.input_data.is_kwds:
if not self.options.allow_dict:
raise InvalidArgumentException('Dictionary or kwds is disabled.')
self._parse_dictionary(self.input_data.dictionary_or_kwds)
def _parse_dictionary(self, dictionary):
for item in sorted(dictionary.items()):
self._parse_item(item)
def _parse_list(self, list_):
for item in list_:
if isinstance(item, (list, tuple)):
self._parse_item(item)
elif self.options.min_items == 1:
self._parse_item((item,))
else:
raise InvalidArgumentException('Too few arguments.')
def _parse_item(self, item):
batch = self._create_batch(item)
self.output_data.append(batch)
def _create_batch(self, values):
if len(values) > self.options.max_items:
raise InvalidArgumentException('Too many arguments.')
return self._append_nones(tuple(values))
def _append_nones(self, tuple_with_values):
count_of_nones = self.options.max_items - len(tuple_with_values)
tuple_with_nones = (None,) * count_of_nones
return tuple_with_values + tuple_with_nones
| 33.755102 | 112 | 0.620314 | 3,284 | 0.66183 | 0 | 0 | 678 | 0.136638 | 0 | 0 | 1,477 | 0.297662 |
98cab2bad7becb5d77b33c01de7f7ffa0e4c8c44 | 16,809 | py | Python | reviewboard/webapi/tests/test_review_screenshot_comment.py | ParikhKadam/reviewboard | 7395902e4c181bcd1d633f61105012ffb1d18e1b | [
"MIT"
]
| 921 | 2015-01-01T15:26:28.000Z | 2022-03-29T11:30:38.000Z | reviewboard/webapi/tests/test_review_screenshot_comment.py | ParikhKadam/reviewboard | 7395902e4c181bcd1d633f61105012ffb1d18e1b | [
"MIT"
]
| 5 | 2015-03-17T18:57:47.000Z | 2020-10-02T13:24:31.000Z | reviewboard/webapi/tests/test_review_screenshot_comment.py | ParikhKadam/reviewboard | 7395902e4c181bcd1d633f61105012ffb1d18e1b | [
"MIT"
]
| 285 | 2015-01-12T06:24:36.000Z | 2022-03-29T11:03:50.000Z | from __future__ import unicode_literals
from django.contrib.auth.models import User
from djblets.webapi.errors import PERMISSION_DENIED
from reviewboard.reviews.models import ScreenshotComment
from reviewboard.webapi.resources import resources
from reviewboard.webapi.tests.base import BaseWebAPITestCase
from reviewboard.webapi.tests.mimetypes import (
screenshot_comment_item_mimetype,
screenshot_comment_list_mimetype)
from reviewboard.webapi.tests.mixins import (
BasicTestsMetaclass,
ReviewRequestChildItemMixin,
ReviewRequestChildListMixin)
from reviewboard.webapi.tests.mixins_comment import (
CommentItemMixin,
CommentListMixin)
from reviewboard.webapi.tests.urls import (
get_review_screenshot_comment_item_url,
get_review_screenshot_comment_list_url)
class BaseTestCase(BaseWebAPITestCase):
fixtures = ['test_users']
def _create_screenshot_review_with_issue(self, publish=False,
comment_text=None):
"""Sets up a review for a screenshot that includes an open issue.
If `publish` is True, the review is published. The review request is
always published.
Returns the response from posting the comment, the review object, and
the review request object.
"""
if not comment_text:
comment_text = 'Test screenshot comment with an opened issue'
review_request = self.create_review_request(publish=True,
submitter=self.user)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=self.user,
publish=publish)
comment = self.create_screenshot_comment(review, screenshot,
comment_text,
issue_opened=True)
return comment, review, review_request
class ResourceListTests(CommentListMixin, ReviewRequestChildListMixin,
BaseTestCase, metaclass=BasicTestsMetaclass):
"""Testing the ReviewScreenshotCommentResource list APIs."""
sample_api_url = 'review-requests/<id>/reviews/<id>/screenshot-comments/'
resource = resources.review_screenshot_comment
def setup_review_request_child_test(self, review_request):
self.create_screenshot(review_request)
review = self.create_review(review_request, user=self.user)
return (get_review_screenshot_comment_list_url(review),
screenshot_comment_list_mimetype)
def compare_item(self, item_rsp, comment):
self.assertEqual(item_rsp['id'], comment.pk)
self.assertEqual(item_rsp['text'], comment.text)
self.assertEqual(item_rsp['x'], comment.x)
self.assertEqual(item_rsp['y'], comment.y)
self.assertEqual(item_rsp['w'], comment.w)
self.assertEqual(item_rsp['h'], comment.h)
self.assertEqual(item_rsp['extra_data'], comment.extra_data)
if comment.rich_text:
self.assertEqual(item_rsp['text_type'], 'markdown')
else:
self.assertEqual(item_rsp['text_type'], 'plain')
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name,
populate_items):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=user)
if populate_items:
items = [self.create_screenshot_comment(review, screenshot)]
else:
items = []
return (get_review_screenshot_comment_list_url(review,
local_site_name),
screenshot_comment_list_mimetype,
items)
#
# HTTP POST tests
#
def setup_basic_post_test(self, user, with_local_site, local_site_name,
post_valid_data):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=user)
return (get_review_screenshot_comment_list_url(review,
local_site_name),
screenshot_comment_item_mimetype,
{
'screenshot_id': screenshot.pk,
'text': 'Test comment',
'x': 2,
'y': 2,
'w': 10,
'h': 10,
},
[review, screenshot])
def check_post_result(self, user, rsp, review, screenshot):
comment = \
ScreenshotComment.objects.get(pk=rsp['screenshot_comment']['id'])
self.compare_item(rsp['screenshot_comment'], comment)
def test_post_with_issue(self):
"""Testing the
POST review-requests/<id>/reviews/<id>/screenshot-comments/ API
with an issue
"""
comment_text = "Test screenshot comment with an opened issue"
comment, review, review_request = \
self._create_screenshot_review_with_issue(
publish=False, comment_text=comment_text)
rsp = self.api_get(
get_review_screenshot_comment_list_url(review),
expected_mimetype=screenshot_comment_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn('screenshot_comments', rsp)
self.assertEqual(len(rsp['screenshot_comments']), 1)
self.assertEqual(rsp['screenshot_comments'][0]['text'], comment_text)
self.assertTrue(rsp['screenshot_comments'][0]['issue_opened'])
class ResourceItemTests(CommentItemMixin, ReviewRequestChildItemMixin,
BaseTestCase, metaclass=BasicTestsMetaclass):
"""Testing the ReviewScreenshotCommentResource item APIs."""
fixtures = ['test_users']
sample_api_url = \
'review-requests/<id>/reviews/<id>/screenshot-comments/<id>/'
resource = resources.review_screenshot_comment
def compare_item(self, item_rsp, comment):
self.assertEqual(item_rsp['id'], comment.pk)
self.assertEqual(item_rsp['text'], comment.text)
self.assertEqual(item_rsp['x'], comment.x)
self.assertEqual(item_rsp['y'], comment.y)
self.assertEqual(item_rsp['w'], comment.w)
self.assertEqual(item_rsp['h'], comment.h)
self.assertEqual(item_rsp['extra_data'], comment.extra_data)
if comment.rich_text:
self.assertEqual(item_rsp['text_type'], 'markdown')
else:
self.assertEqual(item_rsp['text_type'], 'plain')
def setup_review_request_child_test(self, review_request):
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=self.user)
comment = self.create_screenshot_comment(review, screenshot)
return (get_review_screenshot_comment_item_url(review, comment.pk),
screenshot_comment_item_mimetype)
#
# HTTP DELETE tests
#
def setup_basic_delete_test(self, user, with_local_site, local_site_name):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=user)
comment = self.create_screenshot_comment(review, screenshot)
return (get_review_screenshot_comment_item_url(review, comment.pk,
local_site_name),
[comment, review])
def check_delete_result(self, user, comment, review):
self.assertNotIn(comment, review.screenshot_comments.all())
def test_delete_with_does_not_exist_error(self):
"""Testing the
DELETE review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API
with Does Not Exist error
"""
review_request = self.create_review_request(publish=True)
self.create_screenshot(review_request)
review = self.create_review(review_request, user=self.user)
self.api_delete(get_review_screenshot_comment_item_url(review, 123),
expected_status=404)
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=user)
comment = self.create_screenshot_comment(review, screenshot)
return (get_review_screenshot_comment_item_url(review, comment.pk,
local_site_name),
screenshot_comment_item_mimetype,
comment)
#
# HTTP PUT tests
#
def setup_basic_put_test(self, user, with_local_site, local_site_name,
put_valid_data):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=user)
comment = self.create_screenshot_comment(review, screenshot)
return (get_review_screenshot_comment_item_url(review, comment.pk,
local_site_name),
screenshot_comment_item_mimetype,
{'text': 'Test comment'},
comment,
[])
def check_put_result(self, user, item_rsp, comment, *args):
comment = ScreenshotComment.objects.get(pk=comment.pk)
self.assertEqual(item_rsp['text_type'], 'plain')
self.assertEqual(item_rsp['text'], 'Test comment')
self.compare_item(item_rsp, comment)
def test_put_with_issue(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API
with an issue, removing issue_opened
"""
comment, review, review_request = \
self._create_screenshot_review_with_issue()
rsp = self.api_put(
get_review_screenshot_comment_item_url(review, comment.pk),
{'issue_opened': False},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertFalse(rsp['screenshot_comment']['issue_opened'])
def test_put_issue_status_before_publish(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id> API
with an issue, before review is published
"""
comment, review, review_request = \
self._create_screenshot_review_with_issue()
# The issue_status should not be able to be changed while the review is
# unpublished.
rsp = self.api_put(
get_review_screenshot_comment_item_url(review, comment.pk),
{'issue_status': 'resolved'},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
# The issue_status should still be "open"
self.assertEqual(rsp['screenshot_comment']['issue_status'], 'open')
def test_put_issue_status_after_publish(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API
with an issue, after review is published
"""
comment, review, review_request = \
self._create_screenshot_review_with_issue(publish=True)
rsp = self.api_put(
get_review_screenshot_comment_item_url(review, comment.pk),
{'issue_status': 'resolved'},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['screenshot_comment']['issue_status'], 'resolved')
def test_put_issue_status_by_issue_creator(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API
permissions for issue creator
"""
comment, review, review_request = \
self._create_screenshot_review_with_issue(publish=True)
# Change the owner of the review request so that it's not owned by
# self.user
review_request.submitter = User.objects.get(username='doc')
review_request.save()
# The review/comment (and therefore issue) is still owned by self.user,
# so we should be able to change the issue status.
rsp = self.api_put(
get_review_screenshot_comment_item_url(review, comment.pk),
{'issue_status': 'dropped'},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['screenshot_comment']['issue_status'], 'dropped')
def test_put_issue_status_by_uninvolved_user(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API
permissions for an uninvolved user
"""
comment, review, review_request = \
self._create_screenshot_review_with_issue(publish=True)
# Change the owner of the review request and review so that they're not
# owned by self.user.
new_owner = User.objects.get(username='doc')
review_request.submitter = new_owner
review_request.save()
review.user = new_owner
review.save()
rsp = self.api_put(
get_review_screenshot_comment_item_url(review, comment.pk),
{'issue_status': 'dropped'},
expected_status=403)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], PERMISSION_DENIED.code)
def test_put_deleted_screenshot_comment_issue_status(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id>
API with an issue and a deleted screenshot
"""
comment_text = "Test screenshot comment with an opened issue"
x, y, w, h = (2, 2, 10, 10)
review_request = self.create_review_request(publish=True,
submitter=self.user,
target_people=[self.user])
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=self.user)
comment = self.create_screenshot_comment(review, screenshot,
comment_text, x, y, w, h,
issue_opened=True)
# First, let's ensure that the user that has created the comment
# cannot alter the issue_status while the review is unpublished.
rsp = self.api_put(
get_review_screenshot_comment_item_url(review, comment.pk),
{'issue_status': 'resolved'},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
# The issue_status should still be "open"
self.assertEqual(rsp['screenshot_comment']['issue_status'], 'open')
# Next, let's publish the review, and try altering the issue_status.
# This should be allowed, since the review request was made by the
# current user.
review.public = True
review.save()
rsp = self.api_put(
rsp['screenshot_comment']['links']['self']['href'],
{'issue_status': 'resolved'},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['screenshot_comment']['issue_status'], 'resolved')
# Delete the screenshot.
self._delete_screenshot(review_request, screenshot)
review_request.publish(review_request.submitter)
# Try altering the issue_status. This should be allowed.
rsp = self.api_put(
rsp['screenshot_comment']['links']['self']['href'],
{'issue_status': 'open'},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['screenshot_comment']['issue_status'], 'open')
| 41.198529 | 79 | 0.636861 | 16,001 | 0.951931 | 0 | 0 | 0 | 0 | 0 | 0 | 3,794 | 0.225712 |
98caf2eb8158bde50b1d44dd5a0629d9a33340c7 | 1,163 | py | Python | qbapi/app.py | dimddev/qb | fbf9f4cac8aaf14243229e3193960da7114bb7ba | [
"BSD-3-Clause"
]
| null | null | null | qbapi/app.py | dimddev/qb | fbf9f4cac8aaf14243229e3193960da7114bb7ba | [
"BSD-3-Clause"
]
| null | null | null | qbapi/app.py | dimddev/qb | fbf9f4cac8aaf14243229e3193960da7114bb7ba | [
"BSD-3-Clause"
]
| null | null | null | """
Command line tool
"""
import asyncio
from qbapi.request import create_request
from qbapi.services.clients import Producer, Consumer
async def spider(user_data: tuple) -> None:
"""spider
:param user_data:
:type user_data: tuple
:rtype: None
"""
producer_queue = asyncio.Queue()
consumer_queue = asyncio.Queue()
max_workers = 0
for data in user_data:
await producer_queue.put(await create_request(data))
max_workers += 1
producer_tasks = []
consumer_tasks = []
for _ in range(max_workers):
producer_tasks.append(
asyncio.create_task(
Producer().process(producer_queue, consumer_queue)
)
)
consumer_tasks.append(
asyncio.create_task(
Consumer().process(consumer_queue)
)
)
await producer_queue.join()
await consumer_queue.join()
for i, task in enumerate(producer_tasks):
task.cancel()
consumer_tasks[i].cancel()
await asyncio.gather(*producer_tasks, return_exceptions=True)
await asyncio.gather(*consumer_tasks, return_exceptions=True)
| 21.943396 | 66 | 0.638005 | 0 | 0 | 0 | 0 | 0 | 0 | 1,023 | 0.879622 | 109 | 0.093723 |
98cd18a83142f071207fd03be7967e2e0520ebe6 | 9,063 | py | Python | test/test_literal.py | hrnciar/rdflib | d507fdac93be2ec3e35882e3efaa5e7c7349fa93 | [
"BSD-3-Clause"
]
| null | null | null | test/test_literal.py | hrnciar/rdflib | d507fdac93be2ec3e35882e3efaa5e7c7349fa93 | [
"BSD-3-Clause"
]
| null | null | null | test/test_literal.py | hrnciar/rdflib | d507fdac93be2ec3e35882e3efaa5e7c7349fa93 | [
"BSD-3-Clause"
]
| null | null | null | import unittest
import datetime
import rdflib # needed for eval(repr(...)) below
from rdflib.term import Literal, URIRef, _XSD_DOUBLE, bind, _XSD_BOOLEAN
from rdflib.namespace import XSD
def uformat(s):
return s.replace("u'", "'")
class TestLiteral(unittest.TestCase):
def setUp(self):
pass
def test_repr_apostrophe(self):
a = rdflib.Literal("'")
b = eval(repr(a))
self.assertEqual(a, b)
def test_repr_quote(self):
a = rdflib.Literal('"')
b = eval(repr(a))
self.assertEqual(a, b)
def test_backslash(self):
d = r"""
<rdf:RDF
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:foo="http://example.org/foo#">
<rdf:Description>
<foo:bar>a\b</foo:bar>
</rdf:Description>
</rdf:RDF>
"""
g = rdflib.Graph()
g.parse(data=d, format="xml")
a = rdflib.Literal("a\\b")
b = list(g.objects())[0]
self.assertEqual(a, b)
def test_literal_from_bool(self):
l = rdflib.Literal(True)
self.assertEqual(l.datatype, rdflib.XSD["boolean"])
class TestNew(unittest.TestCase):
def testCantPassLangAndDatatype(self):
self.assertRaises(
TypeError, Literal, "foo", lang="en", datatype=URIRef("http://example.com/")
)
def testFromOtherLiteral(self):
l = Literal(1)
l2 = Literal(l)
self.assertTrue(isinstance(l.value, int))
self.assertTrue(isinstance(l2.value, int))
# change datatype
l = Literal("1")
l2 = Literal(l, datatype=rdflib.XSD.integer)
self.assertTrue(isinstance(l2.value, int))
def testDatatypeGetsAutoURIRefConversion(self):
# drewp disapproves of this behavior, but it should be
# represented in the tests
x = Literal("foo", datatype="http://example.com/")
self.assertTrue(isinstance(x.datatype, URIRef))
x = Literal("foo", datatype=Literal("pennies"))
self.assertEqual(x.datatype, URIRef("pennies"))
class TestRepr(unittest.TestCase):
def testOmitsMissingDatatypeAndLang(self):
self.assertEqual(repr(Literal("foo")), uformat("rdflib.term.Literal(u'foo')"))
def testOmitsMissingDatatype(self):
self.assertEqual(
repr(Literal("foo", lang="en")),
uformat("rdflib.term.Literal(u'foo', lang='en')"),
)
def testOmitsMissingLang(self):
self.assertEqual(
repr(Literal("foo", datatype=URIRef("http://example.com/"))),
uformat(
"rdflib.term.Literal(u'foo', datatype=rdflib.term.URIRef(u'http://example.com/'))"
),
)
def testSubclassNameAppearsInRepr(self):
class MyLiteral(Literal):
pass
x = MyLiteral("foo")
self.assertEqual(repr(x), uformat("MyLiteral('foo')"))
class TestDoubleOutput(unittest.TestCase):
def testNoDanglingPoint(self):
"""confirms the fix for https://github.com/RDFLib/rdflib/issues/237"""
vv = Literal("0.88", datatype=_XSD_DOUBLE)
out = vv._literal_n3(use_plain=True)
self.assertTrue(out in ["8.8e-01", "0.88"], out)
class TestParseBoolean(unittest.TestCase):
"""confirms the fix for https://github.com/RDFLib/rdflib/issues/913"""
def testTrueBoolean(self):
test_value = Literal("tRue", datatype=_XSD_BOOLEAN)
self.assertTrue(test_value.value)
test_value = Literal("1", datatype=_XSD_BOOLEAN)
self.assertTrue(test_value.value)
def testFalseBoolean(self):
test_value = Literal("falsE", datatype=_XSD_BOOLEAN)
self.assertFalse(test_value.value)
test_value = Literal("0", datatype=_XSD_BOOLEAN)
self.assertFalse(test_value.value)
def testNonFalseBoolean(self):
test_value = Literal("abcd", datatype=_XSD_BOOLEAN)
self.assertRaises(DeprecationWarning)
self.assertFalse(test_value.value)
test_value = Literal("10", datatype=_XSD_BOOLEAN)
self.assertRaises(DeprecationWarning)
self.assertFalse(test_value.value)
class TestBindings(unittest.TestCase):
def testBinding(self):
class a:
def __init__(self, v):
self.v = v[3:-3]
def __str__(self):
return "<<<%s>>>" % self.v
dtA = rdflib.URIRef("urn:dt:a")
bind(dtA, a)
va = a("<<<2>>>")
la = Literal(va, normalize=True)
self.assertEqual(la.value, va)
self.assertEqual(la.datatype, dtA)
la2 = Literal("<<<2>>>", datatype=dtA)
self.assertTrue(isinstance(la2.value, a))
self.assertEqual(la2.value.v, va.v)
class b:
def __init__(self, v):
self.v = v[3:-3]
def __str__(self):
return "B%s" % self.v
dtB = rdflib.URIRef("urn:dt:b")
bind(dtB, b, None, lambda x: "<<<%s>>>" % x)
vb = b("<<<3>>>")
lb = Literal(vb, normalize=True)
self.assertEqual(lb.value, vb)
self.assertEqual(lb.datatype, dtB)
def testSpecificBinding(self):
def lexify(s):
return "--%s--" % s
def unlexify(s):
return s[2:-2]
datatype = rdflib.URIRef("urn:dt:mystring")
# Datatype-specific rule
bind(datatype, str, unlexify, lexify, datatype_specific=True)
s = "Hello"
normal_l = Literal(s)
self.assertEqual(str(normal_l), s)
self.assertEqual(normal_l.toPython(), s)
self.assertEqual(normal_l.datatype, None)
specific_l = Literal("--%s--" % s, datatype=datatype)
self.assertEqual(str(specific_l), lexify(s))
self.assertEqual(specific_l.toPython(), s)
self.assertEqual(specific_l.datatype, datatype)
class TestXsdLiterals(unittest.TestCase):
def test_make_literals(self):
"""
Tests literal construction.
"""
inputs = [
# these literals do not get conerted to python types
("ABCD", XSD.integer, None),
("ABCD", XSD.gYear, None),
("-10000", XSD.gYear, None),
("-1921-00", XSD.gYearMonth, None),
("1921-00", XSD.gMonthDay, None),
("1921-13", XSD.gMonthDay, None),
("-1921-00", XSD.gMonthDay, None),
("10", XSD.gDay, None),
("-1", XSD.gDay, None),
("0000", XSD.gYear, None),
("0000-00-00", XSD.date, None),
("NOT A VALID HEX STRING", XSD.hexBinary, None),
("NOT A VALID BASE64 STRING", XSD.base64Binary, None),
# these literals get converted to python types
("1921-05-01", XSD.date, datetime.date),
("1921-05-01T00:00:00", XSD.dateTime, datetime.datetime),
("1921-05", XSD.gYearMonth, datetime.date),
("0001-01", XSD.gYearMonth, datetime.date),
("0001-12", XSD.gYearMonth, datetime.date),
("2002-01", XSD.gYearMonth, datetime.date),
("9999-01", XSD.gYearMonth, datetime.date),
("9999-12", XSD.gYearMonth, datetime.date),
("1921", XSD.gYear, datetime.date),
("2000", XSD.gYear, datetime.date),
("0001", XSD.gYear, datetime.date),
("9999", XSD.gYear, datetime.date),
("1982", XSD.gYear, datetime.date),
("2002", XSD.gYear, datetime.date),
("1921-05-01T00:00:00+00:30", XSD.dateTime, datetime.datetime),
("1921-05-01T00:00:00-00:30", XSD.dateTime, datetime.datetime),
("abcdef0123", XSD.hexBinary, bytes),
("", XSD.hexBinary, bytes),
("UkRGTGli", XSD.base64Binary, bytes),
("", XSD.base64Binary, bytes),
]
self.check_make_literals(inputs)
@unittest.expectedFailure
def test_make_literals_ki(self):
"""
Known issues with literal construction.
"""
inputs = [
("1921-01Z", XSD.gYearMonth, datetime.date),
("1921Z", XSD.gYear, datetime.date),
("1921-00", XSD.gYearMonth, datetime.date),
("1921-05-01Z", XSD.date, datetime.date),
("1921-05-01+00:30", XSD.date, datetime.date),
("1921-05-01+00:30", XSD.date, datetime.date),
("1921-05-01+00:00", XSD.date, datetime.date),
("1921-05-01+00:00", XSD.date, datetime.date),
("1921-05-01T00:00:00Z", XSD.dateTime, datetime.datetime),
]
self.check_make_literals(inputs)
def check_make_literals(self, inputs):
for literal_pair in inputs:
(lexical, type, value_cls) = literal_pair
with self.subTest(f"tesing {literal_pair}"):
literal = Literal(lexical, datatype=type)
if value_cls is not None:
self.assertIsInstance(literal.value, value_cls)
else:
self.assertIsNone(literal.value)
self.assertEqual(lexical, f"{literal}")
if __name__ == "__main__":
unittest.main()
| 33.69145 | 98 | 0.580051 | 8,754 | 0.965905 | 0 | 0 | 727 | 0.080216 | 0 | 0 | 1,714 | 0.189121 |
98cd74ddbd3fcee3b46641490c05dc6a010713cd | 2,472 | py | Python | src/messages/text/ruling.py | rkulyn/telegram-dutch-taxbot | f6c2222e5f2b9f96d8e035e9d6f64c67da3a73e1 | [
"MIT"
]
| 2 | 2020-02-27T13:15:07.000Z | 2020-09-19T15:19:29.000Z | src/messages/text/ruling.py | rkulyn/telegram-dutch-taxbot | f6c2222e5f2b9f96d8e035e9d6f64c67da3a73e1 | [
"MIT"
]
| null | null | null | src/messages/text/ruling.py | rkulyn/telegram-dutch-taxbot | f6c2222e5f2b9f96d8e035e9d6f64c67da3a73e1 | [
"MIT"
]
| null | null | null | import telegram
from emoji import emojize
from .base import TextMessageBase
class RulingHelpTextMessage(TextMessageBase):
"""
Ruling help message.
Taken from:
https://www.iamexpat.nl/expat-info/taxation/30-percent-ruling/requirements
"""
def get_text(self):
message = emojize(
"<b>30% RULING INCOME REQUIREMENTS</b>\n\n"
"<a href=\"https://www.iamexpat.nl/expat-info/taxation/30-percent-ruling/requirements\">Go to Source</a>\n\n"
"<b>2019 salary requirements</b>\n\n"
":small_blue_diamond: Minimum taxable salary at 70%: <b>37743 EUR</b> \n"
":small_blue_diamond: Employee with master's degree: <b>28690 EUR</b> \n"
":small_blue_diamond: Scientific researchers: <b>No minimum</b> \n"
":small_blue_diamond: Medical training specialists: <b>No minimum</b> \n\n"
"<b>2018 salary requirements</b>\n\n"
":small_blue_diamond: Minimum taxable salary at 70%: <b>37296 EUR</b> \n"
":small_blue_diamond: Employee with master's degree: <b>28350 EUR</b> \n"
":small_blue_diamond: Scientific researchers: <b>No minimum</b> \n"
":small_blue_diamond: Medical training specialists: <b>No minimum</b> \n\n"
"<b>2017 salary requirements</b>\n\n"
":small_blue_diamond: Minimum taxable salary at 70%: <b>37000 EUR</b> \n"
":small_blue_diamond: Employee with master's degree: <b>28125 EUR</b> \n"
":small_blue_diamond: Scientific researchers: <b>No minimum</b> \n"
":small_blue_diamond: Medical training specialists: <b>No minimum</b> \n\n"
"<b>2016 salary requirements</b>\n\n"
":small_blue_diamond: Minimum taxable salary at 70%: <b>36889 EUR</b> \n"
":small_blue_diamond: Employee with master's degree: <b>28041 EUR</b> \n"
":small_blue_diamond: Scientific researchers: <b>No minimum</b> \n"
":small_blue_diamond: Medical training specialists: <b>No minimum</b> \n\n"
"Type /start to start calculation. \n"
"Type /help get more details. \n\n",
use_aliases=True
)
return message
def get_options(self):
"""
Disable link preview.
Add HTML tags render support.
"""
return {
"disable_web_page_preview": True,
"parse_mode": telegram.ParseMode.HTML,
}
| 44.142857 | 121 | 0.607605 | 2,392 | 0.967638 | 0 | 0 | 0 | 0 | 0 | 0 | 1,780 | 0.720065 |
98cecf3619ad0f5f809b91b86260d60284ee57d7 | 14,312 | py | Python | extras/20190910/code/dummy_11a/resnet18_unet_softmax_01/train.py | pyaf/severstal-steel-defect-detection | 68a0df4164e84803b6cba78597a079d3736b4e00 | [
"MIT"
]
| null | null | null | extras/20190910/code/dummy_11a/resnet18_unet_softmax_01/train.py | pyaf/severstal-steel-defect-detection | 68a0df4164e84803b6cba78597a079d3736b4e00 | [
"MIT"
]
| null | null | null | extras/20190910/code/dummy_11a/resnet18_unet_softmax_01/train.py | pyaf/severstal-steel-defect-detection | 68a0df4164e84803b6cba78597a079d3736b4e00 | [
"MIT"
]
| null | null | null | import os
os.environ['CUDA_VISIBLE_DEVICES']='0'
from common import *
from dataset import *
from model import *
def valid_augment(image, mask, infor):
return image, mask, infor
def train_augment(image, mask, infor):
u=np.random.choice(3)
if u==0:
pass
elif u==1:
image, mask = do_random_crop_rescale(image, mask, 1600-(256-224), 224)
elif u==2:
image, mask = do_random_crop_rotate_rescale(image, mask, 1600-(256-224), 224)
if np.random.rand()>0.5:
image = do_random_log_contast(image)
if np.random.rand()>0.5:
image, mask = do_flip_lr(image, mask)
if np.random.rand()>0.5:
image, mask = do_flip_ud(image, mask)
if np.random.rand()>0.5:
image, mask = do_noise(image, mask)
return image, mask, infor
def null_collate(batch):
batch_size = len(batch)
input = []
truth_mask = []
truth_label = []
infor = []
for b in range(batch_size):
input.append(batch[b][0])
#truth_mask.append(batch[b][1])
infor.append(batch[b][2])
mask = batch[b][1]
label = (mask.reshape(4,-1).sum(1)>0).astype(np.int32)
num_class,H,W = mask.shape
mask = mask.transpose(1,2,0)*[1,2,3,4]
mask = mask.reshape(-1,4)
mask = mask.max(-1).reshape(1,H,W)
truth_mask.append(mask)
truth_label.append(label)
input = np.stack(input)
input = image_to_input(input, IMAGE_RGB_MEAN,IMAGE_RGB_STD)
input = torch.from_numpy(input).float()
truth_mask = np.stack(truth_mask)
truth_mask = torch.from_numpy(truth_mask).long()
truth_label = np.array(truth_label)
truth_label = torch.from_numpy(truth_label).float()
return input, truth_mask, truth_label, infor
#------------------------------------
def do_valid(net, valid_loader, out_dir=None):
#out_dir=None
valid_num = np.zeros(11, np.float32)
valid_loss = np.zeros(11, np.float32)
for t, (input, truth_mask, truth_label, infor) in enumerate(valid_loader):
#if b==5: break
net.eval()
input = input.cuda()
truth_mask = truth_mask.cuda()
truth_label = truth_label.cuda()
with torch.no_grad():
logit = data_parallel(net, input) #net(input)
loss = criterion(logit, truth_mask)
tn,tp, num_neg,num_pos = metric_hit(logit, truth_mask)
dn,dp, num_neg,num_pos = metric_dice(logit, truth_mask, threshold=0.5, sum_threshold=100)
#zz=0
#---
batch_size = len(infor)
l = np.array([ loss.item(), tn,*tp, dn,*dp ])
n = np.array([ batch_size, num_neg,*num_pos, num_neg,*num_pos ])
valid_loss += l*n
valid_num += n
# debug-----------------------------
if out_dir is not None:
probability = torch.softmax(logit,1)
image = input_to_image(input, IMAGE_RGB_MEAN,IMAGE_RGB_STD)
probability = one_hot_encode_predict(probability)
truth_mask = one_hot_encode_truth(truth_mask)
probability_mask = probability.data.cpu().numpy()
truth_label = truth_label.data.cpu().numpy()
truth_mask = truth_mask.data.cpu().numpy()
for b in range(0, batch_size, 4):
image_id = infor[b].image_id[:-4]
result = draw_predict_result(image[b], truth_mask[b], truth_label[b], probability_mask[b], stack='vertical')
draw_shadow_text(result,'%05d %s.jpg'%(valid_num[0]-batch_size+b, image_id),(5,24),1,[255,255,255],2)
image_show('result',result,resize=1)
cv2.imwrite(out_dir +'/valid/%s.png'%(infor[b].image_id[:-4]), result)
cv2.waitKey(1)
pass
# debug-----------------------------
#print(valid_loss)
print('\r %8d /%8d'%(valid_num[0], len(valid_loader.dataset)),end='',flush=True)
pass #-- end of one data loader --
assert(valid_num[0] == len(valid_loader.dataset))
valid_loss = valid_loss/valid_num
return valid_loss
def run_train():
out_dir = \
'/root/share/project/kaggle/2019/steel/result1/resnet18-seg-full-softmax-foldb1-1-4balance'
initial_checkpoint = \
'/root/share/project/kaggle/2019/steel/result1/resnet18-seg-full-softmax-foldb1-1-4balance/checkpoint/00114000_model.pth'
schduler = NullScheduler(lr=0.001)
batch_size = 8 #8
iter_accum = 4
loss_weight = None#[5,5,2,5] #
train_sampler = FourBalanceClassSampler #RandomSampler
## setup -----------------------------------------------------------------------------
for f in ['checkpoint','train','valid','backup'] : os.makedirs(out_dir +'/'+f, exist_ok=True)
backup_project_as_zip(PROJECT_PATH, out_dir +'/backup/code.train.%s.zip'%IDENTIFIER)
log = Logger()
log.open(out_dir+'/log.train.txt',mode='a')
log.write('\n--- [START %s] %s\n\n' % (IDENTIFIER, '-' * 64))
log.write('\t%s\n' % COMMON_STRING)
log.write('\n')
log.write('\tSEED = %u\n' % SEED)
log.write('\tPROJECT_PATH = %s\n' % PROJECT_PATH)
log.write('\t__file__ = %s\n' % __file__)
log.write('\tout_dir = %s\n' % out_dir)
log.write('\n')
## dataset ----------------------------------------
log.write('** dataset setting **\n')
train_dataset = SteelDataset(
mode = 'train',
csv = ['train.csv',],
split = ['train_b1_11568.npy',],
augment = train_augment,
)
train_loader = DataLoader(
train_dataset,
sampler = train_sampler(train_dataset),
batch_size = batch_size,
drop_last = True,
num_workers = 8,
pin_memory = True,
collate_fn = null_collate
)
valid_dataset = SteelDataset(
mode = 'train',
csv = ['train.csv',],
split = ['valid_b1_1000.npy',],
augment = valid_augment,
)
valid_loader = DataLoader(
valid_dataset,
sampler = SequentialSampler(valid_dataset),
batch_size = 4,
drop_last = False,
num_workers = 8,
pin_memory = True,
collate_fn = null_collate
)
assert(len(train_dataset)>=batch_size)
log.write('batch_size = %d\n'%(batch_size))
log.write('train_dataset : \n%s\n'%(train_dataset))
log.write('valid_dataset : \n%s\n'%(valid_dataset))
log.write('\n')
## net ----------------------------------------
log.write('** net setting **\n')
net = Net().cuda()
log.write('\tinitial_checkpoint = %s\n' % initial_checkpoint)
if initial_checkpoint is not None:
state_dict = torch.load(initial_checkpoint, map_location=lambda storage, loc: storage)
##for k in ['logit.weight','logit.bias']: state_dict.pop(k, None) #tramsfer sigmoid feature to softmax network
##net.load_state_dict(state_dict,strict=False)
net.load_state_dict(state_dict,strict=False)
else:
net.load_pretrain(skip=['logit'], is_print=False)
log.write('%s\n'%(type(net)))
log.write('\tloss_weight = %s\n' % str(loss_weight))
log.write('\ttrain_loader.sampler = %s\n' % str(train_loader.sampler))
log.write('\n')
## optimiser ----------------------------------
# if 0: ##freeze
# for p in net.encoder1.parameters(): p.requires_grad = False
# pass
#net.set_mode('train',is_freeze_bn=True)
#-----------------------------------------------
#optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, net.parameters()),lr=schduler(0))
#optimizer = torch.optim.RMSprop(net.parameters(), lr =0.0005, alpha = 0.95)
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, net.parameters()), lr=schduler(0), momentum=0.9, weight_decay=0.0001)
num_iters = 3000*1000
iter_smooth = 50
iter_log = 500
iter_valid = 1500
iter_save = [0, num_iters-1]\
+ list(range(0, num_iters, 1500))#1*1000
start_iter = 0
start_epoch= 0
rate = 0
if initial_checkpoint is not None:
initial_optimizer = initial_checkpoint.replace('_model.pth','_optimizer.pth')
if os.path.exists(initial_optimizer):
checkpoint = torch.load(initial_optimizer)
start_iter = checkpoint['iter' ]
start_epoch = checkpoint['epoch']
#optimizer.load_state_dict(checkpoint['optimizer'])
pass
log.write('optimizer\n %s\n'%(optimizer))
log.write('schduler\n %s\n'%(schduler))
log.write('\n')
## start training here! ##############################################
log.write('** start training here! **\n')
log.write(' batch_size=%d, iter_accum=%d\n'%(batch_size,iter_accum))
log.write(' experiment = %s\n' % __file__.split('/')[-2])
log.write(' |-------------------------------- VALID-----------------------------|---------- TRAIN/BATCH ------------------------------\n')
log.write('rate iter epoch | loss hit_neg,pos1,2,3,4 dice_neg,pos1,2,3,4 | loss hit_neg,pos1,2,3,4 | time \n')
log.write('------------------------------------------------------------------------------------------------------------------------------------------------\n')
#0.00000 0.0* 0.0 | 0.690 0.50 [0.00,1.00,0.00,1.00] 0.44 [0.00,0.02,0.00,0.15] | 0.000 0.00 [0.00,0.00,0.00,0.00] | 0 hr 00 min
train_loss = np.zeros(20,np.float32)
valid_loss = np.zeros(20,np.float32)
batch_loss = np.zeros(20,np.float32)
iter = 0
i = 0
start = timer()
while iter<num_iters:
sum_train_loss = np.zeros(20,np.float32)
sum = np.zeros(20,np.float32)
optimizer.zero_grad()
for t, (input, truth_mask, truth_label, infor) in enumerate(train_loader):
batch_size = len(infor)
iter = i + start_iter
epoch = (iter-start_iter)*batch_size/len(train_dataset) + start_epoch
#if 0:
if (iter % iter_valid==0):
valid_loss = do_valid(net, valid_loader, out_dir) #
#pass
if (iter % iter_log==0):
print('\r',end='',flush=True)
asterisk = '*' if iter in iter_save else ' '
log.write('%0.5f %5.1f%s %5.1f | %5.3f %4.2f [%4.2f,%4.2f,%4.2f,%4.2f] %4.2f [%4.2f,%4.2f,%4.2f,%4.2f] | %5.3f %4.2f [%4.2f,%4.2f,%4.2f,%4.2f] | %s' % (\
rate, iter/1000, asterisk, epoch,
*valid_loss[:11],
*train_loss[:6],
time_to_str((timer() - start),'min'))
)
log.write('\n')
#if 0:
if iter in iter_save:
torch.save(net.state_dict(),out_dir +'/checkpoint/%08d_model.pth'%(iter))
torch.save({
#'optimizer': optimizer.state_dict(),
'iter' : iter,
'epoch' : epoch,
}, out_dir +'/checkpoint/%08d_optimizer.pth'%(iter))
pass
# learning rate schduler -------------
lr = schduler(iter)
if lr<0 : break
adjust_learning_rate(optimizer, lr)
rate = get_learning_rate(optimizer)
# one iteration update -------------
#net.set_mode('train',is_freeze_bn=True)
net.train()
input = input.cuda()
truth_label = truth_label.cuda()
truth_mask = truth_mask.cuda()
logit = data_parallel(net,input) #net(input)
loss = criterion(logit, truth_mask, loss_weight)
tn,tp, num_neg,num_pos = metric_hit(logit, truth_mask)
(loss/iter_accum).backward()
if (iter % iter_accum)==0:
optimizer.step()
optimizer.zero_grad()
# print statistics ------------
l = np.array([ loss.item(), tn,*tp ])
n = np.array([ batch_size, num_neg,*num_pos ])
batch_loss[:6] = l
sum_train_loss[:6] += l*n
sum[:6] += n
if iter%iter_smooth == 0:
train_loss = sum_train_loss/(sum+1e-12)
sum_train_loss[...] = 0
sum[...] = 0
print('\r',end='',flush=True)
asterisk = ' '
print('%0.5f %5.1f%s %5.1f | %5.3f %4.2f [%4.2f,%4.2f,%4.2f,%4.2f] %4.2f [%4.2f,%4.2f,%4.2f,%4.2f] | %5.3f %4.2f [%4.2f,%4.2f,%4.2f,%4.2f] | %s' % (\
rate, iter/1000, asterisk, epoch,
*valid_loss[:11],
*batch_loss[:6],
time_to_str((timer() - start),'min'))
, end='',flush=True)
i=i+1
# debug-----------------------------
if 1:
for di in range(3):
if (iter+di)%1000==0:
probability = torch.softmax(logit,1)
image = input_to_image(input, IMAGE_RGB_MEAN,IMAGE_RGB_STD)
probability = one_hot_encode_predict(probability)
truth_mask = one_hot_encode_truth(truth_mask)
probability_mask = probability.data.cpu().numpy()
truth_label = truth_label.data.cpu().numpy()
truth_mask = truth_mask.data.cpu().numpy()
for b in range(batch_size):
result = draw_predict_result(image[b], truth_mask[b], truth_label[b], probability_mask[b], stack='vertical')
image_show('result',result,resize=1)
cv2.imwrite(out_dir +'/train/%05d.png'%(di*100+b), result)
cv2.waitKey(1)
pass
pass #-- end of one data loader --
pass #-- end of all iterations --
log.write('\n')
# main #################################################################
if __name__ == '__main__':
print( '%s: calling main function ... ' % os.path.basename(__file__))
run_train()
| 33.995249 | 180 | 0.523896 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,571 | 0.249511 |
98d09bdc81e45d8b676af2c3e285dd5d038ee1da | 1,283 | py | Python | city_coord_download.py | Yuchen971/Chinese-city-level-geojson | 51f8d3d336f3e335b15bbf37882a9f248f0e6461 | [
"MIT"
]
| null | null | null | city_coord_download.py | Yuchen971/Chinese-city-level-geojson | 51f8d3d336f3e335b15bbf37882a9f248f0e6461 | [
"MIT"
]
| null | null | null | city_coord_download.py | Yuchen971/Chinese-city-level-geojson | 51f8d3d336f3e335b15bbf37882a9f248f0e6461 | [
"MIT"
]
| null | null | null | import requests
import os
def get_json(save_dir, adcode):
# 获取当前地图轮廓
base_url = 'https://geo.datav.aliyun.com/areas/bound/' + str(adcode) + '.json'
full_url = 'https://geo.datav.aliyun.com/areas/bound/' + str(adcode) + '_full.json'
base_r = requests.get(base_url)
if base_r.status_code == 200:
cur_obj_name = base_r.json()['features'][0]['properties']['name']
print(cur_obj_name)
cur_file_dir = os.path.join(save_dir, cur_obj_name)
if not os.path.exists(cur_file_dir):
os.mkdir(cur_file_dir)
base_json_file = os.path.join(cur_file_dir, str(adcode) + '.json')
with open(base_json_file, 'w') as file:
file.write(base_r.text)
# 获取当前地图子地图轮廓
full_r = requests.get(full_url)
if full_r.status_code == 200 and 'cur_obj_name' in vars():
full_json_file = os.path.join(cur_file_dir, str(adcode) + '_full.json')
with open(full_json_file, 'w') as file:
file.write(full_r.text)
for item in full_r.json()['features']:
chadcode = item['properties']['adcode']
if chadcode == adcode:
pass
else:
get_json(cur_file_dir, chadcode)
get_json('/Users/yuchenli/Downloads/city_geojson-master', 100000) | 44.241379 | 87 | 0.626656 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 310 | 0.234671 |
98d0ac63dc3b2801dd2a6ec85e229c55affc71b1 | 703 | py | Python | myproject/core/clusterAnalysis.py | xiaoxiansheng19/data_analysis | 9e05aada3f545472500e04225d8537b7f0f90a85 | [
"MIT"
]
| null | null | null | myproject/core/clusterAnalysis.py | xiaoxiansheng19/data_analysis | 9e05aada3f545472500e04225d8537b7f0f90a85 | [
"MIT"
]
| null | null | null | myproject/core/clusterAnalysis.py | xiaoxiansheng19/data_analysis | 9e05aada3f545472500e04225d8537b7f0f90a85 | [
"MIT"
]
| null | null | null | # from sklearn.cluster import DBSCAN,KMeans
#
#
# def run(data,radius=300):
# res={}
# # 默认参数 epsilon=0.001, min_samples=200
# epsilon = radius / 100000
# # epsilon = 0.003
# min_samples = 100
# db = DBSCAN(eps=epsilon, min_samples=min_samples)
# # eps表示两个向量可以被视作为同一个类的最大的距离
# # min_samples表示一个类中至少要包含的元素数量,如果小于这个数量,那么不构成一个类
# y_pred = db.fit_predict(data)
# # print(y_pred)
# # df_user_info['label'] = y_pred
# n_clusters_ = len(set(y_pred)) - (1 if -1 in y_pred else 0) # 获取分簇的数目
# if n_clusters_<1:
# model = KMeans(n_clusters=1, random_state=0)
# model.fit(data)
# centroid = model.cluster_centers_
# res['point']= | 33.47619 | 76 | 0.627312 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 813 | 0.97599 |
98d0b391f82dbbbda80cf6f637cf8415548b806e | 1,881 | py | Python | verticapy/tests/vDataFrame/test_vDF_create.py | sitingren/VerticaPy | aa18f4f1277e264005de2d1a8646c28acd1ba137 | [
"Apache-2.0"
]
| null | null | null | verticapy/tests/vDataFrame/test_vDF_create.py | sitingren/VerticaPy | aa18f4f1277e264005de2d1a8646c28acd1ba137 | [
"Apache-2.0"
]
| null | null | null | verticapy/tests/vDataFrame/test_vDF_create.py | sitingren/VerticaPy | aa18f4f1277e264005de2d1a8646c28acd1ba137 | [
"Apache-2.0"
]
| null | null | null | # (c) Copyright [2018-2021] Micro Focus or one of its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest, warnings
from verticapy import vDataFrame, drop_table
from verticapy import set_option
set_option("print_info", False)
@pytest.fixture(scope="module")
def titanic_vd(base):
from verticapy.learn.datasets import load_titanic
titanic = load_titanic(cursor=base.cursor)
yield titanic
with warnings.catch_warnings(record=True) as w:
drop_table(name="public.titanic", cursor=base.cursor)
class TestvDFCreate:
def test_creating_vDF_using_input_relation(self, base, titanic_vd):
tvdf = vDataFrame(input_relation="public.titanic", cursor=base.cursor)
assert tvdf["pclass"].count() == 1234
def test_creating_vDF_using_input_relation_schema(self, base, titanic_vd):
tvdf = vDataFrame(input_relation="titanic", schema="public", cursor=base.cursor)
assert tvdf["pclass"].count() == 1234
def test_creating_vDF_using_input_relation_vcolumns(self, base, titanic_vd):
tvdf = vDataFrame(
input_relation="public.titanic",
usecols=["age", "survived"],
cursor=base.cursor,
)
assert tvdf["survived"].count() == 1234
@pytest.mark.skip(reason="test not implemented")
def test_creating_vDF_using_input_relation_dsn(self):
pass
| 34.2 | 88 | 0.725678 | 845 | 0.449229 | 255 | 0.135566 | 406 | 0.215843 | 0 | 0 | 742 | 0.394471 |
98d33c72cdff1bb8b3302772a68873ef14217bfa | 353 | py | Python | Solutions/beta/beta_is_it_an_isogram.py | citrok25/Codewars-1 | dc641c5079e2e8b5955eb027fd15427e5bdb2e26 | [
"MIT"
]
| 46 | 2017-08-24T09:27:57.000Z | 2022-02-25T02:24:33.000Z | Solutions/beta/beta_is_it_an_isogram.py | abbhishek971/Codewars | 9e761811db724da1e8aae44594df42b4ee879a16 | [
"MIT"
]
| null | null | null | Solutions/beta/beta_is_it_an_isogram.py | abbhishek971/Codewars | 9e761811db724da1e8aae44594df42b4ee879a16 | [
"MIT"
]
| 35 | 2017-08-01T22:09:48.000Z | 2022-02-18T17:21:37.000Z | import re
from collections import Counter
def is_isogram(word):
if not isinstance(word, str) or word == '': return False
word = {j for i,j in Counter(
re.sub('[^a-z]', '', word.lower())
).most_common()
}
return len(word) == 1
| 32.090909 | 67 | 0.430595 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.033994 |
98d39e717fc52a479b273f0813ba804a39854ac0 | 1,011 | py | Python | p23_Merge_k_Sorted_Lists.py | bzhou26/leetcode_sol | 82506521e2cc412f96cd1dfc3c8c3ab635f67f73 | [
"MIT"
]
| null | null | null | p23_Merge_k_Sorted_Lists.py | bzhou26/leetcode_sol | 82506521e2cc412f96cd1dfc3c8c3ab635f67f73 | [
"MIT"
]
| null | null | null | p23_Merge_k_Sorted_Lists.py | bzhou26/leetcode_sol | 82506521e2cc412f96cd1dfc3c8c3ab635f67f73 | [
"MIT"
]
| null | null | null | '''
- Leetcode problem: 23
- Difficulty: Hard
- Brief problem description:
Merge k sorted linked lists and return it as one sorted list. Analyze and describe its complexity.
Example:
Input:
[
1->4->5,
1->3->4,
2->6
]
Output: 1->1->2->3->4->4->5->6
- Solution Summary:
- Used Resources:
--- Bo Zhou
'''
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def mergeKLists(self, lists: List[ListNode]) -> ListNode:
pq = []
for l in lists:
if l:
heapq.heappush(pq, (l.val, id(l), l))
newNode = ListNode()
result = newNode
while pq:
minVal, i, minNode = heapq.heappop(pq)
newNode.next = minNode
nextNode = minNode.next
newNode = minNode
if nextNode:
heapq.heappush(pq, (nextNode.val, id(nextNode), nextNode))
return result.next
| 20.22 | 98 | 0.565776 | 541 | 0.535114 | 0 | 0 | 0 | 0 | 0 | 0 | 461 | 0.455984 |
98d3b83719bd7419b5fbeccf9de154866fb50efc | 117 | py | Python | flocx_ui/content/flocx/views.py | whitel/flocx-ui | 3887882f1a7a650850bda9d7627cf6ebcc6c32e8 | [
"Apache-2.0"
]
| null | null | null | flocx_ui/content/flocx/views.py | whitel/flocx-ui | 3887882f1a7a650850bda9d7627cf6ebcc6c32e8 | [
"Apache-2.0"
]
| null | null | null | flocx_ui/content/flocx/views.py | whitel/flocx-ui | 3887882f1a7a650850bda9d7627cf6ebcc6c32e8 | [
"Apache-2.0"
]
| null | null | null | from django.views import generic
class IndexView(generic.TemplateView):
template_name = 'project/flocx/index.html' | 29.25 | 44 | 0.811966 | 83 | 0.709402 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.222222 |
98d56156be74bebcd376e40f41b92a8ab49e898e | 5,833 | py | Python | wificontrol/utils/networkstranslate.py | patrislav1/pywificontrol | 1edf9cdb95158804033dba8fcb860e5214ded10f | [
"BSD-3-Clause"
]
| 1 | 2019-02-12T14:08:08.000Z | 2019-02-12T14:08:08.000Z | wificontrol/utils/networkstranslate.py | patrislav1/pywificontrol | 1edf9cdb95158804033dba8fcb860e5214ded10f | [
"BSD-3-Clause"
]
| null | null | null | wificontrol/utils/networkstranslate.py | patrislav1/pywificontrol | 1edf9cdb95158804033dba8fcb860e5214ded10f | [
"BSD-3-Clause"
]
| 2 | 2018-12-05T15:55:22.000Z | 2019-01-28T03:44:21.000Z | # Written by Ivan Sapozhkov and Denis Chagin <[email protected]>
#
# Copyright (c) 2016, Emlid Limited
# All rights reserved.
#
# Redistribution and use in source and binary forms,
# with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def create_security(proto, key_mgmt, group):
if not proto:
return 'open'
if not key_mgmt:
if "wep" in group:
return 'wep'
else:
return None
else:
if "wpa-psk" in key_mgmt:
if proto == "WPA":
return "wpapsk"
elif proto == "RSN":
return "wpa2psk"
else:
return None
elif "wpa-eap" in key_mgmt:
return 'wpaeap'
else:
return None
def convert_to_wpas_network(network):
return dict(WpasNetworkConverter(network))
def convert_to_wificontrol_network(network, current_network):
wifinetwork = dict(WifiControlNetworkConverter(network))
try:
if wifinetwork['ssid'] == current_network['ssid']:
wifinetwork.update(current_network)
wifinetwork["connected"] = True
except TypeError:
pass
finally:
return wifinetwork
class WpasNetworkConverter(object):
def __init__(self, network_dict):
def rawUtf8(s):
return "{}".format(s.encode('utf-8'))[2:-1]
self.security = network_dict.get('security')
self.name = rawUtf8(network_dict.get('ssid', ''))
self.password = rawUtf8(network_dict.get('password', ''))
self.identity = rawUtf8(network_dict.get('identity', ''))
def __iter__(self):
if (self.security == 'open'):
yield "ssid", "{}".format(self.name)
yield "key_mgmt", "NONE"
elif (self.security == 'wep'):
yield "ssid", "{}".format(self.name)
yield "key_mgmt", "NONE"
yield "group", "WEP104 WEP40"
yield "wep_key0", "{}".format(self.password)
elif (self.security == 'wpapsk'):
yield "ssid", "{}".format(self.name)
yield "key_mgmt", "WPA-PSK"
yield "pairwise", "CCMP TKIP"
yield "group", "CCMP TKIP"
yield "eap", "TTLS PEAP TLS"
yield "psk", "{}".format(self.password)
elif (self.security == 'wpa2psk'):
yield "ssid", "{}".format(self.name)
yield "proto", "RSN"
yield "key_mgmt", "WPA-PSK"
yield "pairwise", "CCMP TKIP"
yield "group", "CCMP TKIP"
yield "eap", "TTLS PEAP TLS"
yield "psk", "{}".format(self.password)
elif (self.security == 'wpaeap'):
yield "ssid", "{}".format(self.name)
yield "key_mgmt", "WPA-EAP"
yield "pairwise", "CCMP TKIP"
yield "group", "CCMP TKIP"
yield "eap", "TTLS PEAP TLS"
yield "identity", "{}".format(self.identity)
yield "password", "{}".format(self.password)
yield "phase1", "peaplable=0"
else:
yield "ssid", "{}".format(self.name)
yield "psk", "{}".format(self.password)
class WifiControlNetworkConverter(object):
def __init__(self, network_dict):
self.name = network_dict.get('ssid')
self.key_mgmt = network_dict.get('key_mgmt')
self.proto = network_dict.get('proto')
self.group = network_dict.get('group')
def __iter__(self):
if (self.key_mgmt == 'NONE'):
if not self.group:
yield "ssid", self.name
yield "security", "Open"
else:
yield "ssid", self.name
yield "security", "WEP"
elif (self.key_mgmt == 'WPA-PSK'):
if not self.proto:
yield "ssid", self.name
yield "security", "WPA-PSK"
else:
yield "ssid", self.name
yield "security", "WPA2-PSK"
elif (self.key_mgmt == 'WPA-EAP'):
yield "ssid", self.name
yield "security", "WPA-EAP"
else:
yield "ssid", self.name
yield "security", "NONE"
yield "connected", False
if __name__ == '__main__':
network = {'ssid': "MySSID", 'password': "NewPassword", 'security': "wpaeap", 'identity': "[email protected]"}
conv = convert_to_wpas_network(network)
reconv = convert_to_wificontrol_network(conv)
print(conv, reconv)
| 36.006173 | 113 | 0.603806 | 2,987 | 0.512086 | 2,305 | 0.395165 | 0 | 0 | 0 | 0 | 2,509 | 0.430139 |
98d57c6c79fbcfbe80f6e85abd3550ed59d42da1 | 22,613 | py | Python | src/LaminariaCore.py | MrKelpy/IFXG | 695865a8140fdf258a643ee29d6439a59037bc99 | [
"MIT"
]
| null | null | null | src/LaminariaCore.py | MrKelpy/IFXG | 695865a8140fdf258a643ee29d6439a59037bc99 | [
"MIT"
]
| null | null | null | src/LaminariaCore.py | MrKelpy/IFXG | 695865a8140fdf258a643ee29d6439a59037bc99 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""
This module is distributed as part of the Laminaria Core (Python Version).
Get the Source Code in GitHub:
https://github.com/MrKelpy/LaminariaCore
The LaminariaCore is Open Source and distributed under the
MIT License
"""
# Built-in Imports
import datetime
import random
import asyncio
import os
# Third Party Imports
import screeninfo
from discord.ext import commands
import discord
from fpdf import FPDF
# Local Application Imports
###############################################################################
### DATE & TIME ###
###############################################################################
def twochars(arg):
"""
Formats a string of two characters into the format of (0X), useful for date formatting.
:param arg: The string
:return: String
"""
if len(arg) == 1:
return f"0{arg}"
return arg
def get_formatted_date(date: datetime, include_seconds: bool = False):
"""
Returns a given date in the handy DD/MM/YY - HH:MM:SS format.
:param date: The date to be formatted -> datetime.datetime
:param include_seconds: If set to True, include seconds in the format.
:return: String
"""
date_string = f"{twochars(str(date.day))}/{twochars(str(date.month))}/{twochars(str(date.year))} - " \
f"{twochars(str(date.hour))}:{twochars(str(date.minute))}"
if include_seconds:
date_string += f":{twochars(str(date.second))}"
return date_string
def get_formatted_date_now(include_seconds: bool = False, formatting: int = 1):
"""
Returns the current date in the handy DD/MM/YY - HH:MM:SS format (default) or in the specified one.
:param formatting: Format type -> int
:param include_seconds: If set to True, include seconds in the format.
:return: String
"""
now = datetime.datetime.now()
if formatting == 1:
date_string = f"{twochars(str(now.day))}/{twochars(str(now.month))}/{twochars(str(now.year))} - " \
f"{twochars(str(now.hour))}:{twochars(str(now.minute))}"
elif formatting == 2:
date_string = f"{twochars(str(now.day))}.{twochars(str(now.month))}.{twochars(str(now.year))}_" \
f"{twochars(str(now.hour))}.{twochars(str(now.minute))}"
else:
date_string = f"{twochars(str(now.day))}/{twochars(str(now.month))}/{twochars(str(now.year))} - " \
f"{twochars(str(now.hour))}:{twochars(str(now.minute))}"
if include_seconds:
date_string += f":{twochars(str(now.second))}"
return date_string
def time_until_midnight():
"""
Get seconds left until midnight
"""
tomorrow = datetime.date.today() + datetime.timedelta(days=1)
timedelta_until_midnight = datetime.datetime.combine(tomorrow, datetime.time.min) - datetime.datetime.now()
return timedelta_until_midnight.seconds
###############################################################################
### GENERAL ###
###############################################################################
def get_absolute_screen_coords(relx, rely):
"""
Returns absolute screen coordinates based off the given relative
coordinates. For instance, in a 1920x720 screen, the x50, y50 input would be
x960, y360.
:param relx: Relative X Coordinate
:param rely: Relative Y Coordinate
:return: Absolute Coordinates
"""
monitor = screeninfo.get_monitors()[0]
x = (relx*monitor.width)/100
y = (rely*monitor.height)/100
return x, y
def get_relative_screen_coords(x, y):
"""
Returns relative screen coordinates based off the given absolute
coordinates. The relative coordinates are percentage-based values calculates
relatively to the monitor specs and the given coords.
:param x: Absolute X
:param y: Absolute Y
:return:
"""
monitor = screeninfo.get_monitors()[0]
relx = (x*100)/monitor.width
rely = (y*100)/monitor.height
return relx, rely
###############################################################################
### PLACEHOLDERS ###
###############################################################################
async def small_ipsum():
"""
Returns the small version of the lorem impsum placeholder
:return:
"""
return "Lorem ipsum dolor sit amet."
async def big_ipsum():
"""
Returns a bigger version of the lorem ipsum text than the small_ipsum function does.
:return:
"""
return "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt " \
"ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco " \
"laboris nisi ut aliquip ex ea commodo consequat."
###############################################################################
### DISCORD.PY ###
###############################################################################
async def hasrole(role: discord.Role, user: discord.Member, add: bool = False):
"""
Checks if a user has a certain role.
:param role: The role to be checked for. -> discord.Role
:param user: The user. -> discord.Member
:param add: If set to True, adds the role to the user, will always return True.
:return: True, if user has the role. False otherwise.
"""
for r in user.roles:
if r == role:
return True
else:
if add is True:
await user.add_roles(role)
return True
return False
async def getrolenamed(role: str, guild: discord.Guild, create: bool = False, exact: bool = True):
"""
Returns a role inside a Guild based on a given name.
:param role: The role to be gathered. -> str
:param guild: The guild to retrieve the role from. -> discord.Guild
:param create: If set to True, creates the role. (If non existant!)
:param exact: If set to True, matches the role exactly
:return: discord.Role, None if not found.
"""
for r in guild.roles:
if exact and r.name == role:
return r
elif role in r.name:
return r
else:
if create is True:
colours = [discord.Colour.red(), discord.Colour.dark_teal(), discord.Colour.teal(), discord.Colour.gold(),
discord.Colour.blurple(), discord.Colour.purple(), discord.Colour.green(),
discord.Colour.greyple(),
discord.Colour.orange(), discord.Colour.light_grey()]
return_role = await guild.create_role(name=role, colour=random.choice(colours))
return return_role
return None
async def get_textchannel_by_name(channel: str, guild: discord.Guild,
delete: bool = False, create: bool = False, category: str = None, exact: bool = True):
"""
Returns a text channel based on a given name.
:param channel: The channel to be gathered. -> str
:param guild: The guild to retrieve the channel from. -> discord.Guild
:param delete: If set to True, deletes the role. (If found!)
:param create: If set to True, creates the role. (If not found!)
:param category: The category to create the channel into. (If create is True!)
:param exact: If set to True, the channelname needs to match the channel at 100%. Else, no.
:return: discord.TextChannel, None if not found.
"""
for text_channel in guild.text_channels:
if exact:
if text_channel.name == channel.lower():
if delete is True:
await text_channel.delete()
continue
return text_channel
else:
if channel.lower() in text_channel.name:
if delete is True:
await text_channel.delete()
continue
return text_channel
if create is True:
text_channel = await guild.create_text_channel(channel, category=category)
return text_channel
return None
async def get_category_by_name(category_name: str, guild: discord.Guild, delete: bool = False, create: bool = False,
exact: bool = True):
"""
Returns a category based on a given name.
:param exact: If set to True, matches the name exactly as it is.*
:param category_name: The category to be gathered. -> str
:param guild: The guild to retrieve the category from. -> discord.Guild
:param delete: If set to True, deletes the category. (If found!)
:param create: If set to True, creates the category. (If not found!)
:return: discord.Category, None if not found.
"""
for category in guild.categories:
if exact and category.name == category_name:
if delete is True:
await category.delete()
continue
return category
elif not exact and category_name in category.name:
if delete is True:
await category.delete()
continue
return category
if create is True:
category = await guild.create_category(category_name)
return category
return None
async def twochars_async(arg):
"""
Formats a string of two characters into the format of (0X), useful for date formatting.
:param arg: The string
:return: String
"""
if len(arg) == 1:
return f"0{arg}"
return arg
async def as_get_formatted_date_now(include_seconds: bool = False, formatting: int = 1):
"""
Returns the current date in the handy DD/MM/YY - HH:MM:SS format (default) or in the specified one.
:param formatting: Format type -> int
:param include_seconds: If set to True, include seconds in the format.
:return: String
"""
now = datetime.datetime.now()
if formatting == 1:
date_string = f"{await twochars(str(now.day))}/{await twochars(str(now.month))}/{await twochars(str(now.year))} - " \
f"{await twochars(str(now.hour))}:{await twochars(str(now.minute))}"
elif formatting == 2:
date_string = f"{await twochars(str(now.day))}.{await twochars(str(now.month))}.{await twochars(str(now.year))}_" \
f"{await twochars(str(now.hour))}.{await twochars(str(now.minute))}"
else:
date_string = f"{await twochars(str(now.day))}/{await twochars(str(now.month))}/{await twochars(str(now.year))} - " \
f"{await twochars(str(now.hour))}:{await twochars(str(now.minute))}"
if include_seconds:
date_string += f":{await twochars(str(now.second))}"
return date_string
async def get_formatted_date_async(date: datetime, include_seconds: bool = False):
"""
Returns a given date in the handy DD/MM/YY - HH:MM:SS format.
:param date: The date to be formatted -> datetime.datetime
:param include_seconds: If set to True, include seconds in the format.
:return: String
"""
date_string = f"{await twochars(str(date.day))}/{await twochars(str(date.month))}/{await twochars(str(date.year))} - " \
f"{await twochars(str(date.hour))}:{await twochars(str(date.minute))}"
if include_seconds:
date_string += f":{await twochars(str(date.second))}"
return date_string
async def send_loading(channel: discord.TextChannel, colour=discord.Colour.red()):
"""
Sends a loading embed to a specified channel.
:param channel: The channel for the message to be sent to. -> discord.TextChannel
:param colour: The embed colour. -> discord.Colour
:return: discord.Embed
"""
loading_embed = discord.Embed(
title='Loading...',
colour=colour
)
loading = await channel.send(embed=loading_embed)
return loading
async def get_textchannel_chatlog(text_channel: discord.TextChannel, limit: int = None):
"""
Returns a TextChannel chatlog
:param text_channel: The text channel for the data to be gathered from
:param limit: An integer to limit the amount of messages retrieved.
:return: String
"""
all_messages = await text_channel.history(limit=limit).flatten()
all_messages.reverse()
# Parses out and legibilises the messages into a chatlog
chatlog = ""
for message in all_messages:
if message.embeds:
content = message.embeds[0].title
elif message.attachments:
content = f"FILE(s) :{[file.filename for file in message.attachments]}"
else:
content = message.content
content = content.split("```")
content = '\n'.join(content)
chatlog += f"[{await get_formatted_date_async(message.created_at, include_seconds=True)}] [- MSG ID: {message.id}]" \
f" [- AUTHOR ID: {message.author.id}] <{message.author}> {content}\n"
return chatlog
async def get_textchannel_firstmessage(text_channel: discord.TextChannel):
"""
Returns the first message on a TextChannel
:param text_channel: The textchannel to retrieve the message from. -> discord.TextChannel
:return: discord.Message
"""
all_messages = await text_channel.history(limit=None).flatten()
all_messages.reverse()
return all_messages[0]
async def get_member_object(member_id: int, guild: discord.Guild):
"""
Returns a discord.Member object of a member from a given ID
:param member_id: The member ID. -> int
:param guild: The guild to retrieve the member from. -> discord.Guild
:return: discord.Member, None if not found.
"""
for member in guild.members:
if int(member.id) == int(member_id):
return member
return None
async def show_help_menu(ctx, bot: commands.Bot, colour=discord.Colour.red(), reverse=False):
"""
Standard help menu used between bots created by Alex, with loads of quirks to make the UI more appealing.
The help menu is completely computer-generated.
Description management:
> Leaving the description of a command without text will it not be shown in the UI
> Writing |String| at the beggining of a command description will have it sorted into a category
(Replace "String" with the category name)
> Categories are sorted alphabetically, aswell as bot_commands.
> Not specifying a category will result in the command being thrown into a "General" category
:param reverse:
:param ctx: discord context.
:param bot: discord BOT instance.
:param colour: Help menu embed colour
:return: discord.Embed
"""
help_menu_base = discord.Embed(
title=f"{bot.user.name}'s Help Menu - ",
description=f"Prefix: `{ctx.prefix}`",
colour=colour
)
dev = await bot.fetch_user(740969223681212507)
commands_dictionary = dict()
embed_list = list()
for command in bot.commands:
# Iterates through all the registered bot_commands
if not command.description:
# Skips over the command if no description is provided
continue
category_name = "General"
if command.description.startswith("|") and command.description.count(
"|") == 2 and not command.description.endswith("||"):
# Parses out the category of a command if a match is detected
category_name = command.description.split("|")[1].strip().title()
command.description = command.description.split("|")[2].strip()
params = ""
alias_list = "No aliases found"
for param in command.clean_params:
# Parses out the command parameters for usage in the command info
params += f" <{param}> "
if command.aliases:
# If any command aliases exist, parse them out for usage in the command info
alias_list = ""
for alias in command.aliases:
alias_list += f"|{ctx.prefix}{alias}| "
# Build the dict update
try:
_ = commands_dictionary[category_name]
commands_dictionary[category_name].append([command.name, command.description, alias_list, params])
except KeyError:
command_registration = {category_name: [[command.name, command.description, alias_list, params]]}
commands_dictionary.update(command_registration)
for category in sorted(commands_dictionary):
# Loads in the categories with their bot_commands to the help menu
# Loads in the embed for the category
category_embed = help_menu_base.copy()
category_embed.title += f"{category} Commands"
for command in sorted(commands_dictionary[category]):
# Gets the command info
name = command[0]
description = command[1]
aliases = command[2]
params = command[3]
category_embed.add_field(name=name.title(), value=f"{description}\n`USAGE: {ctx.prefix}{name}{params}`\n"
f"`ALIASES: {aliases}`", inline=False)
category_embed.timestamp = datetime.datetime.now()
category_embed.set_footer(text=f"Developed by {dev}")
category_embed.set_thumbnail(url=bot.user.avatar_url)
embed_list.append(category_embed)
if reverse:
embed_list = reversed(embed_list)
for embed in embed_list:
# Sends all the embeds in the list
await ctx.send(embed=embed)
async def convert_txt_to_pdf(path: str):
"""
Converts a .txt file to a .pdf file
:param path: The path for the file. -> str
:return:
"""
pdf = FPDF()
pdf.add_page()
pdf.set_font("Arial", size=15)
output_path = str(os.path.splitext(path)[0]) + ".pdf"
with open(path, 'r') as txtfile:
lines = txtfile.readlines()
for line in lines:
if line == '\n':
pdf.cell(200, 10, txt='\n', ln=1, align="L")
continue
if line[0] == "|" and line[2] == "|":
pdf.cell(200, 10, txt=line[3:].strip(), ln=1, align=line[1])
continue
pdf.cell(200, 10, txt=line.strip(), ln=1, align="L")
pdf.output(output_path)
async def load_missing_perms_embed(colour=discord.Colour.red()):
"""
Quickly loads a missing permissions embed
:param colour: The embed colour
:return: discord.Embed
"""
embed = discord.Embed(
title="Missing permissions!",
description="Sorry, you can't use this command.",
colour=colour
)
embed.timestamp = datetime.datetime.now()
return embed
async def interactive_dialog(**kwargs):
"""
Creates an "interactive dialog" as i name it; An embed that uses the wait_for() function together to facilitate the
creation of dialogs.
:param kwargs: expects ctx, channel, check, title, body and optionally emojis, colour.
> PILLAR ARGUMENTS are arguments that are mandatory; Vital for the function to be used.
> OPTIONAL ARGUMENTS are... optional arguments. What did you expect?
> "Ctx" is the command context. (PILLAR ARGUMENT)
> "Check" is the type of event to happen, aswell as the wait_for check to perform on the response. (PILLAR ARGUMENT)
> "Title" is the dialog embed title. (PILLAR ARGUMENT)
> "Body" is the dialog embed description. (PILLAR ARGUMENT)
> "Channel" is the place where to send the dialog to. (OPTIONAL ARGUMENT)
> "Emojis" is a list with a list of reactions, (UTF-8 Symbols) to add into the dialog. (OPTIONAL ARGUMENT)
> "Colour" is the dialog embed colour. Defaults to discord.Colours.red() (OPTIONAL ARGUMENT)
> "Picture" is the dialog image, the big picture at the bottom of the embed. (OPTIONAL ARGUMENT)
> "Thumbnail" is the dialog embed thumbnail, the small picture that gets placed on the top right side of the embed. (OPTIONAL ARGUMENT)
> "Footer" is the dialog footer, the small text at the bottom of the embed. (OPTIONAL ARGUMENT)
:return: The user's response.
"""
# Performs a kwargs check to raise errors if any of the pillar arguments are missing
if "ctx" not in kwargs: raise TypeError("Missing CTX argument in interactive dialog.")
if "check" not in kwargs: raise TypeError("Missing CHECK argument in interactive dialog.")
if "title" not in kwargs: raise TypeError("Missing TITLE argument in interactive dialog.")
if "body" not in kwargs: raise TypeError("Missing BODY argument in interactive dialog.")
# Performs a kwargs check to default the arguments if any of the optional arguments are missing
if "channel" not in kwargs: kwargs["channel"] = kwargs["ctx"].author
if "emojis" not in kwargs: kwargs["emojis"] = None
if "colour" not in kwargs: kwargs["colour"] = discord.Colour.red()
if "picture" not in kwargs: kwargs["picture"] = None
if "thumbnail" not in kwargs: kwargs["thumbnail"] = None
if "footer" not in kwargs: kwargs["footer"] = None
# Loads the dialog embed
dialog_embed = discord.Embed(
title=kwargs["title"],
description=kwargs["body"],
colour=kwargs["colour"]
)
dialog_embed.timestamp = datetime.datetime.now()
dialog_embed.set_thumbnail(url=kwargs["thumbnail"])
dialog_embed.set_image(url=kwargs["picture"])
dialog_embed.set_footer(text=kwargs["footer"])
# Sends the embed to the desired channel
dialog_message = await kwargs["channel"].send(embed=dialog_embed)
# Starts the event type cheks, and their proper handles
if kwargs["check"][0] == "message":
try:
msg = await kwargs["ctx"].bot.wait_for("message", kwargs["check"][1], timeout=120.0)
return msg
except asyncio.TimeoutError:
# Returns an empty response if a timeout occurs.
return
if kwargs["check"][0] == "reaction":
if kwargs["emojis"] is not None:
# Adds the reactions to a message, if the emojis kwarg is not missing.
for emoji in kwargs["emojis"]:
await dialog_message.add_reaction(emoji)
try:
reaction, user = await kwargs["ctx"].bot.wait_for("message", kwargs["check"][1], timeout=120.0)
return reaction, user
except asyncio.TimeoutError:
# Returns an empty response if a timeout occurs.
return
| 35.388106 | 139 | 0.618007 | 0 | 0 | 0 | 0 | 0 | 0 | 17,958 | 0.794145 | 12,040 | 0.532437 |
98d580323bddffeab7acfa9058f08c58277e005a | 1,280 | py | Python | examples/api/default_value.py | clamdad/atom | 45d8a2e696002914dd5b71c150edbe54e9ba1e59 | [
"BSD-3-Clause-Clear"
]
| 222 | 2015-01-01T10:16:41.000Z | 2022-03-15T21:28:08.000Z | examples/api/default_value.py | clamdad/atom | 45d8a2e696002914dd5b71c150edbe54e9ba1e59 | [
"BSD-3-Clause-Clear"
]
| 100 | 2015-01-28T16:26:04.000Z | 2022-03-29T07:17:44.000Z | examples/api/default_value.py | clamdad/atom | 45d8a2e696002914dd5b71c150edbe54e9ba1e59 | [
"BSD-3-Clause-Clear"
]
| 45 | 2015-01-05T14:08:13.000Z | 2022-01-31T14:17:20.000Z | # --------------------------------------------------------------------------------------
# Copyright (c) 2013-2021, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# --------------------------------------------------------------------------------------
""" Demonstrate all the ways to initialize a value
1. Pass the value directly
2. Assign the default value explicitly
3. Provide the value during initialization of the object
4. Provide factory callable that returns a value
5. Use a _default_* static method
"""
import sys
from atom.api import Atom, Int, Str
def get_mother():
return "Maude " + get_last_name()
def get_last_name():
"""Return a last name based on the system byteorder."""
return sys.byteorder.capitalize()
class Person(Atom):
"""A simple class representing a person object."""
first_name = Str("Bob")
age = Int(default=40)
address = Str()
mother = Str(factory=get_mother)
last_name = Str()
def _default_last_name(self):
return get_last_name()
if __name__ == "__main__":
bob = Person(address="101 Main")
print((bob.first_name, bob.last_name, bob.age))
print(bob.mother)
| 24.615385 | 88 | 0.604688 | 278 | 0.217188 | 0 | 0 | 0 | 0 | 0 | 0 | 762 | 0.595313 |
98d5c4c121e4fde76563c5c0ac59d5c2ef8f0cbc | 23,139 | py | Python | linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/Kamaelia/Codec/YUV4MPEG.py | mdavid/nuxleus | 653f1310d8bf08eaa5a7e3326c2349e56a6abdc2 | [
"BSD-3-Clause"
]
| 1 | 2017-03-28T06:41:51.000Z | 2017-03-28T06:41:51.000Z | linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/Kamaelia/Codec/YUV4MPEG.py | mdavid/nuxleus | 653f1310d8bf08eaa5a7e3326c2349e56a6abdc2 | [
"BSD-3-Clause"
]
| null | null | null | linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/Kamaelia/Codec/YUV4MPEG.py | mdavid/nuxleus | 653f1310d8bf08eaa5a7e3326c2349e56a6abdc2 | [
"BSD-3-Clause"
]
| 1 | 2016-12-13T21:08:58.000Z | 2016-12-13T21:08:58.000Z | #!/usr/bin/env python
#
# Copyright (C) 2007 British Broadcasting Corporation and Kamaelia Contributors(1)
# All Rights Reserved.
#
# You may only modify and redistribute this under the terms of any of the
# following licenses(2): Mozilla Public License, V1.1, GNU General
# Public License, V2.0, GNU Lesser General Public License, V2.1
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://kamaelia.sourceforge.net/AUTHORS - please extend this file,
# not this notice.
# (2) Reproduced in the COPYING file, and at:
# http://kamaelia.sourceforge.net/COPYING
# Under section 3.5 of the MPL, we are using this text since we deem the MPL
# notice inappropriate for this file. As per MPL/GPL/LGPL removal of this
# notice is prohibited.
#
# Please contact us via: [email protected]
# to discuss alternative licensing.
# -------------------------------------------------------------------------
#
"""\
=============================================
Parsing and Creation of YUV4MPEG format files
=============================================
YUV4MPEGToFrame parses YUV4MPEG format data sent to its "inbox" inbox and sends
video fram data structures to its "outbox" outbox.
FrameToYUV4MPEG does the reverse - taking frame data structures sent to its
"inbox" inbox and outputting YUV4MPEG format data to its "outbox" outbox."
The YUV4MPEG file format is supported by many tools, such as mjpegtools,
mplayer/mencoder, and ffmpeg.
Example Usage
-------------
Playback a YUV4MPEG format file::
Pipeline( RateControlledFileReader("video.yuv4mpeg",readmode="bytes", ...),
YUV4MPEGToFrame(),
VideoOverlay()
).run()
Decode a dirac encoded video file to a YUV4MPEG format file::
Pipeline( RateControlledFileReader("video.dirac",readmode="bytes", ...),
DiracDecoder(),
FrameToYUV4MPEG(),
SimpleFileWriter("output.yuv4mpeg")
).run()
YUV4MPEGToFrame Behaviour
-------------------------
Send binary data as strings containing YUV4MPEG format data to the "inbox" inbox
and frame data structures will be sent out of the "outbox" outbox as soon as
they are parsed.
See below for a description of the uncompressed frame data structure format.
This component supports sending data out of its outbox to a size limited inbox.
If the size limited inbox is full, this component will pause until it is able
to send out the data. Data will not be consumed from the inbox if this component
is waiting to send to the outbox.
If a producerFinished message is received on the "control" inbox, this component
will complete parsing any data pending in its inbox, and finish sending any
resulting data to its outbox. It will then send the producerFinished message on
out of its "signal" outbox and terminate.
If a shutdownMicroprocess message is received on the "control" inbox, this
component will immediately send it on out of its "signal" outbox and immediately
terminate. It will not complete processing, or sending on any pending data.
FrameToYUV4MPEG Behaviour
-------------------------
Send frame data structures to the "inbox" inbox of this component. YUV4MPEG
format binary string data will be sent out of the "outbox" outbox.
See below for a description of the uncompressed frame data structure format.
The header data for the YUV4MPEG file is determined from the first frame.
All frames sent to this component must therefore be in the same pixel format and
size, otherwise the output data will not be valid YUV4MPEG.
This component supports sending data out of its outbox to a size limited inbox.
If the size limited inbox is full, this component will pause until it is able
to send out the data. Data will not be consumed from the inbox if this component
is waiting to send to the outbox.
If a producerFinished message is received on the "control" inbox, this component
will complete parsing any data pending in its inbox, and finish sending any
resulting data to its outbox. It will then send the producerFinished message on
out of its "signal" outbox and terminate.
If a shutdownMicroprocess message is received on the "control" inbox, this
component will immediately send it on out of its "signal" outbox and immediately
terminate. It will not complete processing, or sending on any pending data.
=========================
UNCOMPRESSED FRAME FORMAT
=========================
A frame is a dictionary data structure. It must, at minimum contain the first 3
("yuv", "size" and "pixformat")::
{
"yuv" : (y_data, u_data, v_data) # a tuple of strings
"size" : (width, height) # in pixels
"pixformat" : pixelformat # format of raw video data
"frame_rate" : fps # frames per second
"interlaced" : 0 or not 0 # non-zero if the frame is two interlaced fields
"topfieldfirst" : 0 or not 0 # non-zero the first field comes first in the data
"pixel_aspect" : fraction # aspect ratio of pixels
"sequence_meta" : metadata # string containing extended metadata
# (no whitespace or control characters)
}
All other fields are optional when providing frames to FrameToYUV4MPEG.
YUV4MPEGToFrame only guarantees to fill inthe YUV data itself. All other fields
will be filled in if the relevant header data is detected in the file.
The pixel formats recognised (and therefore supported) are::
"YUV420_planar"
"YUV411_planar"
"YUV422_planar"
"YUV444_planar"
"YUV4444_planar"
"Y_planar"
"""
from Axon.Component import component
#from Axon.Ipc import WaitComplete
from Axon.Ipc import shutdownMicroprocess, producerFinished
from Axon.AxonExceptions import noSpaceInBox
import re
from Kamaelia.Support.Data.Rationals import rational
class YUV4MPEGToFrame(component):
"""\
YUV4MPEGToFrame() -> new YUV4MPEGToFrame component.
Parses YUV4MPEG format binarydata, sent as strings to its "inbox" inbox
and outputs uncompressed video frame data structures to its "outbox" outbox.
"""
def __init__(self):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
super(YUV4MPEGToFrame,self).__init__()
self.remainder = ""
self.shutdownMsg = None
def checkShutdown(self):
"""\
Collects any new shutdown messages arriving at the "control" inbox, and
returns "NOW" if immediate shutdown is required, or "WHENEVER" if the
component can shutdown when it has finished processing pending data.
"""
while self.dataReady("control"):
newMsg = self.recv("control")
if isinstance(newMsg, shutdownMicroprocess):
self.shutdownMsg = newMsg
elif self.shutdownMsg is None and isinstance(newMsg, producerFinished):
self.shutdownMsg = newMsg
if isinstance(self.shutdownMsg, shutdownMicroprocess):
return "NOW"
elif self.shutdownMsg is not None:
return "WHENEVER"
else:
return None
def readline(self):
"""\
Generator.
Read up to the next newline char from the stream of chunks of binary
string data arriving at the "inbox" inbox.
Any excess data is placed into self.remainder ready for the next call
to self.readline or self.readbytes.
Data is only read from the inbox when required. It is not preemptively
fetched.
The read data is placed into self.bytesread
If a shutdown is detected, self.bytesread is set to "" and this
generator immediately returns.
"""
bytes = []
newdata = self.remainder
index = newdata.find("\x0a")
while index==-1:
bytes.append(newdata)
while not self.dataReady("inbox"):
if self.checkShutdown():
self.bytesread=""
return
self.pause()
yield 1
newdata = self.recv("inbox")
index = newdata.find("\x0a")
tail = newdata[:index+1]
self.remainder = newdata[index+1:]
bytes.append(tail)
self.bytesread = "".join(bytes)
return
def readbytes(self,size):
"""\
Generator.
Read the specified number of bytes from the stream of chunks of binary
string data arriving at the "inbox" inbox.
Any excess data is placed into self.remainder ready for the next call
to self.readline or self.readbytes.
Data is only read from the inbox when required. It is not preemptively
fetched.
The read data is placed into self.bytesread
If a shutdown is detected, self.bytesread is set to "" and this
generator immediately returns.
"""
buf = [self.remainder]
bufsize = len(self.remainder)
while bufsize < size:
if self.dataReady("inbox"):
newdata = self.recv("inbox")
buf.append(newdata)
bufsize += len(newdata)
shutdown = self.checkShutdown()
if shutdown == "NOW" or (shutdown and not self.dataReady("inbox") and bufsize<size):
self.bytesread=""
return
if bufsize<size and not self.anyReady():
self.pause()
yield 1
excess = bufsize-size
if excess:
wanted = buf[:-1]
tail, self.remainder = buf[-1][:-excess], buf[-1][-excess:]
wanted.append(tail)
else:
wanted = buf
self.remainder = ""
self.bytesread = "".join(wanted)
return
def safesend(self, data, boxname):
"""\
Generator.
Sends data out of the named outbox. If the destination is full
(noSpaceInBox exception) then it waits until there is space and retries
until it succeeds.
If a shutdownMicroprocess message is received, returns early.
"""
while 1:
try:
self.send(data, boxname)
return
except noSpaceInBox:
if self.checkShutdown() == "NOW":
return
self.pause()
yield 1
def main(self):
"""Main loop"""
# parse header
for _ in self.readline(): yield _
if self.checkShutdown() == "NOW" or (self.checkShutdown() and self.bytesread==""):
self.send(self.shutdownMsg,"signal")
return
line = self.bytesread
m = re.match("^YUV4MPEG2((?: .\S*)*)\n$", line)
assert(m)
fields = m.groups()[0]
seq_params = parse_seq_tags(fields)
yield 1
while 1:
for _ in self.readline(): yield _
line = self.bytesread
if self.checkShutdown() == "NOW" or (self.checkShutdown() and self.bytesread==""):
break
m = re.match("^FRAME((?: .\S*)*)\n$", line)
assert(m)
fields = m.groups()[0]
frame_params = parse_frame_tags(fields)
ysize = seq_params["size"][0] * seq_params["size"][1]
csize = seq_params["chroma_size"][0] * seq_params["chroma_size"][1]
for _ in self.readbytes(ysize): yield _
if self.checkShutdown() == "NOW" or (self.checkShutdown() and self.bytesread==""):
break
y = self.bytesread
for _ in self.readbytes(csize): yield _
if self.checkShutdown() == "NOW" or (self.checkShutdown() and self.bytesread==""):
break
u = self.bytesread
for _ in self.readbytes(csize): yield _
if self.checkShutdown() == "NOW" or (self.checkShutdown() and self.bytesread==""):
break
v = self.bytesread
frame = { "yuv" : (y,u,v) }
frame.update(seq_params)
frame.update(frame_params)
for _ in self.safesend(frame,"outbox"): yield _
if self.checkShutdown() == "NOW" or (self.checkShutdown() and not self.dataReady("inbox")):
break
yield 1
if self.shutdownMsg:
self.send(self.shutdownMsg, "signal")
else:
self.send(producerFinished(), "signal")
def parse_seq_tags(fields):
"""Parses YUV4MPEG header tags"""
params = {}
tags = {}
while fields:
m = re.match("^ (.)(\S*)(.*)$", fields)
(tag,value,fields) = m.groups()
tags[tag] = value
if "W" in tags and "H" in tags:
params['size'] = (int(tags["W"]), int(tags["H"]))
else:
raise
if "C" in tags:
C = tags["C"]
if C == "420jpeg": # 4:2:0 with JPEG/MPEG-1 siting (default)
params['pixformat'] = "YUV420_planar"
params['chroma_size'] = (params['size'][0]/2, params['size'][1]/2)
elif C == "420mpeg2": # 4:2:0 with MPEG-2 siting
params['pixformat'] = "YUV420_planar"
params['chroma_size'] = (params['size'][0]/2, params['size'][1]/2)
elif C == "420paldv": # 4:2:0 with PAL-DV siting
params['pixformat'] = "YUV420_planar"
params['chroma_size'] = (params['size'][0]/2, params['size'][1]/2)
elif C == "411": # 4:1:1, cosited
params['pixformat'] = "YUV411_planar"
params['chroma_size'] = (params['size'][0]/4, params['size'][1])
elif C == "422": # 4:2:2, cosited
params['pixformat'] = "YUV422_planar"
params['chroma_size'] = (params['size'][0]/2, params['size'][1])
elif C == "444": # 4:4:4 (no subsampling)
params['pixformat'] = "YUV444_planar"
params['chroma_size'] = (params['size'][0], params['size'][1])
elif C == "444alpha": # 4:4:4 with an alpha channel
params['pixformat'] = "YUV4444_planar"
params['chroma_size'] = (params['size'][0], params['size'][1])
elif C == "mono": # luma (Y') plane only
params['pixformat'] = "Y_planar"
params['chroma_size'] = (0,0)
else:
params['pixformat'] = "YUV420_planar"
params['chroma_size'] = (params['size'][0]/2, params['size'][1]/2)
if "I" in tags:
I = tags["I"]
if I == "?": # unknown (default)
pass
elif I == "p": # progressive/none
params["interlaced"] = False
elif I == "t": # top-field-first
params["interlaced"] = True
params["topfieldfirst"] = True
elif I == "b": # bottom-field-first
params["interlaced"] = True
params["topfieldfirst"] = False
elif I == "m": # mixed-mode: refer to 'I' tag in frame header
pass
if "F" in tags:
m = re.match("^(\d+):(\d+)$",tags["F"])
num, denom = float(m.groups()[0]), float(m.groups()[1])
if denom > 0:
params["frame_rate"] = num/denom
if "A" in tags:
m = re.match("^(\d+):(\d+)$",tags["A"])
num, denom = float(m.groups()[0]), float(m.groups()[1])
if denom > 0:
params["pixel_aspect"] = num/denom
if "X" in tags:
params["sequence_meta"] = tags["X"]
return params
def parse_frame_tags(fields):
"""\
Parses YUV4MPEG frame tags.
"""
params = {}
tags = {}
while fields:
m = re.match("^ (.)(\S*)(.*)$", fields)
(tag,value,fields) = m.groups()
tags[tag] = value
if "I" in tags:
x,y,z = tags["I"][0], tags["I"][1], tags["I"][2]
if x == "t": # top-field-first
params["interlaced"] = True
params["topfieldfirst"] = True
elif x == "T": # top-field-first and repeat
params["interlaced"] = True
params["topfieldfirst"] = True
elif x == "b": # bottom-field-first
params["interlaced"] = True
params["topfieldfirst"] = False
elif x == "B": # bottom-field-first and repeat
params["interlaced"] = True
params["topfieldfirst"] = False
elif x == "1": # single progressive frame
params["interlaced"] = False
elif x == "2": # double progressive frame (repeat)
params["interlaced"] = False
elif x == "3": # triple progressive frame (repeat)
params["interlaced"] = False
if y == "p": # fields sampled at same time
params["interlaced"] = False
elif y == "i": # fields sampled at different times
params["interlaced"] = True
if z == "p": # progressive (subsampling over whole frame)
pass
elif z == "i": # interlaced (each field subsampled independently)
pass
elif z == "?": # unknown (allowed only for non-4:2:0 subsampling)
pass
if "X" in tags:
params["meta"] = tags["X"]
return params
class FrameToYUV4MPEG(component):
"""\
FrameToYUV4MPEG() -> new FrameToYUV4MPEG component.
Parses uncompressed video frame data structures sent to its "inbox" inbox
and writes YUV4MPEG format binary data as strings to its "outbox" outbox.
"""
def checkShutdown(self):
"""\
Collects any new shutdown messages arriving at the "control" inbox, and
ensures self.shutdownMsg contains the highest priority one encountered
so far.
"""
while self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, producerFinished) and not isinstance(self.shutdownMsg,shutdownMicroprocess):
self.shutdownMsg = msg
elif isinstance(msg, shutdownMicroprocess):
self.shutdownMsg = msg
def canShutdown(self):
"""\
Returns true if the component should terminate when it has finished
processing any pending data.
"""
return isinstance(self.shutdownMsg, (producerFinished, shutdownMicroprocess))
def mustShutdown(self):
"""Returns true if the component should terminate immediately."""
return isinstance(self.shutdownMsg, shutdownMicroprocess)
def sendoutbox(self,data):
"""\
Generator.
Sends data out of the "outbox" outbox. If the destination is full
(noSpaceInBox exception) then it waits until there is space. It keeps
retrying until it succeeds.
If the component is ordered to immediately terminate then "STOP" is
raised as an exception.
"""
while 1:
try:
self.send(data,"outbox")
return
except noSpaceInBox:
self.checkShutdown()
if self.mustShutdown():
raise "STOP"
self.pause()
yield 1
self.checkShutdown()
if self.mustShutdown():
raise "STOP"
def main(self):
"""Main loop"""
self.shutdownMsg = None
try:
while not self.dataReady("inbox"):
self.checkShutdown()
if self.canShutdown():
raise "STOP"
self.pause()
yield 1
frame = self.recv("inbox")
for _ in self.write_header(frame):
yield _
for _ in self.write_frame(frame):
yield _
while 1:
while self.dataReady("inbox"):
frame = self.recv("inbox")
for _ in self.write_frame(frame):
yield _
self.checkShutdown()
if self.canShutdown():
raise "STOP"
self.pause()
yield 1
except "STOP":
self.send(self.shutdownMsg,"signal")
def write_header(self, frame):
"""\
Generator.
Sends the YUV4MPEG format header to the "outbox" outbox, based on
attributes of the supplied frame data structure.
"""
format = "YUV4MPEG2 W%d H%d" % tuple(frame['size'])
if frame['pixformat']=="YUV420_planar":
format += " C420mpeg2"
elif frame['pixformat']=="YUV411_planar":
format += " C411"
elif frame['pixformat']=="YUV422_planar":
format += " C422"
elif frame['pixformat']=="YUV444_planar":
format += " C444"
elif frame['pixformat']=="YUV4444_planar":
format += " C444alpha"
elif frame['pixformat']=="Y_planar":
format += " Cmono"
interlace = frame.get("interlaced",False)
topfieldfirst = frame.get("topfieldfirst",False)
if interlace and topfieldfirst:
format += " It"
elif interlace and not topfieldfirst:
format += " Ib"
elif not interlace:
format += " Ip"
rate = frame.get("frame_rate", 0)
if rate > 0:
num,denom = rational(rate)
format += " F%d:%d" % (num,denom)
rate = frame.get("pixel_aspect", 0)
if rate > 0:
num,denom = rational(rate)
format += " A%d:%d" % (num,denom)
if "sequence_meta" in frame:
format += " X"+frame['sequence_meta']
format += "\x0a"
for _ in self.sendoutbox(format):
yield _
def write_frame(self, frame):
"""\
Generator.
Writes out YUV4MPEG format frame marker and data.
"""
for _ in self.sendoutbox("FRAME\x0a"):
yield _
for component in frame['yuv']:
for _ in self.sendoutbox(component):
yield _
__kamaelia_components__ = ( YUV4MPEGToFrame, FrameToYUV4MPEG, )
if __name__ == "__main__":
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.File.Reading import RateControlledFileReader
from Kamaelia.UI.Pygame.VideoOverlay import VideoOverlay
Pipeline( RateControlledFileReader("/data/stream.yuv",readmode="bytes",rate=25*(608256+128)),
YUV4MPEGToFrame(),
FrameToYUV4MPEG(),
YUV4MPEGToFrame(),
VideoOverlay(),
).run()
| 35.543779 | 107 | 0.563075 | 11,852 | 0.512209 | 9,202 | 0.397684 | 0 | 0 | 0 | 0 | 11,552 | 0.499244 |
98d7520f9994f6836e73faaf42f63009eee0dc64 | 697 | py | Python | project/cli/event.py | DanielGrams/gsevp | e94034f7b64de76f38754b56455e83092378261f | [
"MIT"
]
| 1 | 2021-06-01T14:49:18.000Z | 2021-06-01T14:49:18.000Z | project/cli/event.py | DanielGrams/gsevp | e94034f7b64de76f38754b56455e83092378261f | [
"MIT"
]
| 286 | 2020-12-04T14:13:00.000Z | 2022-03-09T19:05:16.000Z | project/cli/event.py | DanielGrams/gsevpt | a92f71694388e227e65ed1b24446246ee688d00e | [
"MIT"
]
| null | null | null | import click
from flask.cli import AppGroup
from project import app, db
from project.dateutils import berlin_tz
from project.services.event import (
get_recurring_events,
update_event_dates_with_recurrence_rule,
)
event_cli = AppGroup("event")
@event_cli.command("update-recurring-dates")
def update_recurring_dates():
# Setting the timezone is neccessary for cli command
db.session.execute("SET timezone TO :val;", {"val": berlin_tz.zone})
events = get_recurring_events()
for event in events:
update_event_dates_with_recurrence_rule(event)
db.session.commit()
click.echo(f"{len(events)} event(s) were updated.")
app.cli.add_command(event_cli)
| 24.034483 | 72 | 0.746055 | 0 | 0 | 0 | 0 | 407 | 0.583931 | 0 | 0 | 150 | 0.215208 |
98d900684301053ffd4e6344e16abaa1c0d10ed9 | 3,647 | py | Python | test/functional/examples/test_examples.py | ymn1k/testplan | b1bde8495c449d75a74a7fe4e7c6501b0476f833 | [
"Apache-2.0"
]
| null | null | null | test/functional/examples/test_examples.py | ymn1k/testplan | b1bde8495c449d75a74a7fe4e7c6501b0476f833 | [
"Apache-2.0"
]
| null | null | null | test/functional/examples/test_examples.py | ymn1k/testplan | b1bde8495c449d75a74a7fe4e7c6501b0476f833 | [
"Apache-2.0"
]
| 1 | 2019-09-11T09:13:18.000Z | 2019-09-11T09:13:18.000Z | import os
import re
import sys
import subprocess
import pytest
from testplan.common.utils.path import change_directory
import platform
ON_WINDOWS = platform.system() == 'Windows'
KNOWN_EXCEPTIONS = [
"TclError: Can't find a usable init\.tcl in the following directories:", # Matplotlib module improperly installed. Will skip Data Science example.
"ImportError: lib.*\.so\..+: cannot open shared object file: No such file or directory", # Matplotlib module improperly installed. Will skip Data Science example.
"ImportError: No module named sklearn.*", # Missing module sklearn. Will skip Data Science example.
"ImportError: No module named Tkinter", # Missing module Tkinter. Will skip Data Science example.
"ImportError: No module named _tkinter.*", # Missing module Tkinter. Will skip Data Science example.
"RuntimeError: Download pyfixmsg library .*", # Missing module pyfixmsg. Will skip FIX example.
"No spec file set\. You should download .*", # Missing FIX spec file. Will skip FIX example.
"AttributeError: 'module' object has no attribute 'poll'",
"RuntimeError: You need to compile test binary first." # Need to compile cpp binary first. Will skip GTest example.
]
SKIP_ON_WINDOWS = [
os.path.join('Cpp', 'GTest', 'test_plan.py'),
]
ROOT_DIR_CONTENTS = [
"setup.py",
"requirements.txt",
"README.rst",
"LICENSE.md"
]
def _depth_from_repo_root():
cwd = os.getcwd()
depth = []
while True:
contents = os.listdir(cwd)
if all([entry in contents for entry in ROOT_DIR_CONTENTS]):
return depth
parent_dir = os.path.dirname(cwd)
if os.path.realpath(cwd) == os.path.realpath(parent_dir):
raise RuntimeError('Could not find repo directory')
depth.append(os.pardir)
cwd = parent_dir
def _relative_dir(directory):
path_args = _depth_from_repo_root() + [directory]
return os.path.join(*path_args)
def _param_formatter(param):
if 'examples' in param:
return repr(param.rsplit('examples')[1])
return repr(param)
@pytest.mark.parametrize(
'root,filename',
[
(os.path.abspath(root), filename)
for root, _, files in os.walk(
_relative_dir(os.path.join('testplan', 'examples')))
for filename in files
if 'test_plan' in filename
],
ids=_param_formatter,
)
def test_example(root, filename):
file_path = os.path.join(root, filename)
if ON_WINDOWS and any(
[file_path.endswith(skip_name) for skip_name in SKIP_ON_WINDOWS]
):
pytest.skip()
with change_directory(root), open(filename) as file_obj:
file_obj.readline()
second_line = file_obj.readline()
try:
subprocess.check_output(
[sys.executable, filename],
stderr=subprocess.STDOUT
)
except subprocess.CalledProcessError as e:
out = e.output.decode()
for exception in KNOWN_EXCEPTIONS:
if re.search(exception, out):
pytest.xfail()
assert 'Exception in test_plan definition' not in out, \
'Exception raised in test_plan definition.'
assert 'Traceback (most recent call last):' not in out, \
'Exception raised during test:\n{}'.format(out)
assert \
('# This plan contains tests that demonstrate failures '
'as well.') == second_line.strip(), \
"Expected \'{}\' example to pass, it failed.\n{}".format(
file_path,
out
)
| 34.733333 | 166 | 0.636413 | 0 | 0 | 0 | 0 | 1,562 | 0.428297 | 0 | 0 | 1,395 | 0.382506 |
98db431598c035c6864fd313e00c493666f532f6 | 1,223 | py | Python | peco/template/template.py | Tikubonn/peco | c77fc163ad31d3c271d299747914ce4ef3386987 | [
"MIT"
]
| null | null | null | peco/template/template.py | Tikubonn/peco | c77fc163ad31d3c271d299747914ce4ef3386987 | [
"MIT"
]
| null | null | null | peco/template/template.py | Tikubonn/peco | c77fc163ad31d3c271d299747914ce4ef3386987 | [
"MIT"
]
| null | null | null |
from io import StringIO
class Template:
"""
this has information that parsed source code.
you can get rendered text with .render() and .render_string()
"""
def __init__(self, sentencenode, scope):
self.sentencenode = sentencenode
self.scope = scope
def render(self, stream, **parameters):
"""
render template to stream with parameters.
Parameters
----------
stream: io.TextIOBase
this file-like object used to output.
parameters:
this used to rendering.
"""
with self.scope:
for name, value in parameters.items():
self.scope.set_value(name, value)
self.sentencenode.write(stream)
def render_string(self, **parameters):
"""
render template with parameters then return rendered text.
Parameters
----------
parameters:
this used to rendering.
Returns
-------
rendered: str
this is rendered string.
"""
with StringIO() as stream:
self.render(stream, **parameters)
rendered = stream.getvalue()
return rendered
| 23.519231 | 66 | 0.555192 | 1,195 | 0.977105 | 0 | 0 | 0 | 0 | 0 | 0 | 631 | 0.515944 |
98dc08bcdfcddaf7d2d055024948658ae151bf17 | 2,342 | py | Python | mtp_api/apps/credit/tests/test_views/test_credit_list/test_security_credit_list/test_credit_list_with_blank_string_filters.py | ministryofjustice/mtp-api | b1c34c29e4aa9f48598cb060abe1368ae7686e0b | [
"MIT"
]
| 5 | 2016-01-05T12:21:35.000Z | 2020-10-28T17:06:02.000Z | mtp_api/apps/credit/tests/test_views/test_credit_list/test_security_credit_list/test_credit_list_with_blank_string_filters.py | ministryofjustice/mtp-api | b1c34c29e4aa9f48598cb060abe1368ae7686e0b | [
"MIT"
]
| 209 | 2015-06-12T09:39:41.000Z | 2022-03-21T16:01:19.000Z | mtp_api/apps/credit/tests/test_views/test_credit_list/test_security_credit_list/test_credit_list_with_blank_string_filters.py | ministryofjustice/mtp-api | b1c34c29e4aa9f48598cb060abe1368ae7686e0b | [
"MIT"
]
| 1 | 2021-04-11T06:19:23.000Z | 2021-04-11T06:19:23.000Z | from core import getattr_path
from rest_framework import status
from credit.tests.test_views.test_credit_list.test_security_credit_list import SecurityCreditListTestCase
class CreditListWithBlankStringFiltersTestCase(SecurityCreditListTestCase):
def assertAllResponsesHaveBlankField(self, filters, blank_fields, expected_filter): # noqa: N802
expected_results = list(filter(expected_filter, self._get_managed_prison_credits()))
url = self._get_url(**filters)
response = self.client.get(
url, format='json',
HTTP_AUTHORIZATION=self.get_http_authorization_for_user(self._get_authorised_user())
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
results = []
for result in response.data.get('results', []):
results.append(result['id'])
for blank_field in blank_fields:
self.assertIn(result[blank_field], ['', None])
self.assertListEqual(
sorted(results),
sorted(expected_result.id for expected_result in expected_results)
)
def test_blank_sender_name(self):
self.assertAllResponsesHaveBlankField(
{
'sender_name__isblank': 'True'
},
['sender_name'],
lambda credit: getattr_path(credit, 'transaction.sender_name', None) == ''
)
def test_blank_sender_sort_code(self):
self.assertAllResponsesHaveBlankField(
{
'sender_sort_code__isblank': 'True'
},
['sender_sort_code'],
lambda credit: getattr_path(credit, 'transaction.sender_sort_code', None) == ''
)
def test_blank_sender_account_number(self):
self.assertAllResponsesHaveBlankField(
{
'sender_account_number__isblank': 'True'
},
['sender_account_number'],
lambda credit: getattr_path(credit, 'transaction.sender_account_number', None) == ''
)
def test_blank_sender_roll_number(self):
self.assertAllResponsesHaveBlankField(
{
'sender_roll_number__isblank': 'True'
},
['sender_roll_number'],
lambda credit: getattr_path(credit, 'transaction.sender_roll_number', None) == ''
)
| 36.59375 | 105 | 0.634927 | 2,168 | 0.925705 | 0 | 0 | 0 | 0 | 0 | 0 | 371 | 0.158412 |
98dc59660d9259931f06beb23b9db7e987e199a4 | 3,800 | py | Python | vipermonkey/core/filetype.py | lap1nou/ViperMonkey | 631d242f43108226bb25ed91e773a274012dc8c2 | [
"Unlicense"
]
| 874 | 2016-09-29T08:19:00.000Z | 2022-03-28T03:34:16.000Z | vipermonkey/core/filetype.py | Mercury-180/ViperMonkey | 1045dadcf7bebedc126ca36d25475e413196d053 | [
"Unlicense"
]
| 94 | 2016-09-30T17:03:36.000Z | 2022-03-01T17:25:26.000Z | vipermonkey/core/filetype.py | Mercury-180/ViperMonkey | 1045dadcf7bebedc126ca36d25475e413196d053 | [
"Unlicense"
]
| 186 | 2016-09-29T10:59:37.000Z | 2022-03-26T10:20:38.000Z | """
Check for Office file types
ViperMonkey is a specialized engine to parse, analyze and interpret Microsoft
VBA macros (Visual Basic for Applications), mainly for malware analysis.
Author: Philippe Lagadec - http://www.decalage.info
License: BSD, see source code or documentation
Project Repository:
https://github.com/decalage2/ViperMonkey
"""
# === LICENSE ==================================================================
# ViperMonkey is copyright (c) 2015-2016 Philippe Lagadec (http://www.decalage.info)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Office magic numbers.
magic_nums = {
"office97" : "D0 CF 11 E0 A1 B1 1A E1", # Office 97
"office2007" : "50 4B 3 4", # Office 2007+ (PKZip)
}
# PE magic number.
pe_magic_num = "4D 5A"
def get_1st_8_bytes(fname, is_data):
info = None
is_data = (is_data or (len(fname) > 200))
if (not is_data):
try:
tmp = open(fname, 'rb')
tmp.close()
except:
is_data = True
if (not is_data):
with open(fname, 'rb') as f:
info = f.read(8)
else:
info = fname[:9]
curr_magic = ""
for b in info:
curr_magic += hex(ord(b)).replace("0x", "").upper() + " "
return curr_magic
def is_pe_file(fname, is_data):
"""
Check to see if the given file is a PE executable.
return - True if it is a PE file, False if not.
"""
# Read the 1st 8 bytes of the file.
curr_magic = get_1st_8_bytes(fname, is_data)
# See if we the known magic #.
return (curr_magic.startswith(pe_magic_num))
def is_office_file(fname, is_data):
"""
Check to see if the given file is a MS Office file format.
return - True if it is an Office file, False if not.
"""
# Read the 1st 8 bytes of the file.
curr_magic = get_1st_8_bytes(fname, is_data)
# See if we have 1 of the known magic #s.
for typ in magic_nums.keys():
magic = magic_nums[typ]
if (curr_magic.startswith(magic)):
return True
return False
def is_office97_file(fname, is_data):
# Read the 1st 8 bytes of the file.
curr_magic = get_1st_8_bytes(fname, is_data)
# See if we have the Office97 magic #.
return (curr_magic.startswith(magic_nums["office97"]))
def is_office2007_file(fname, is_data):
# Read the 1st 8 bytes of the file.
curr_magic = get_1st_8_bytes(fname, is_data)
# See if we have the Office 2007 magic #.
return (curr_magic.startswith(magic_nums["office2007"]))
| 33.043478 | 84 | 0.678158 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,518 | 0.662632 |
98dec69515aeffc54b77de9f6161248b53aa1b30 | 2,699 | py | Python | packs/kubernetes/tests/test_third_party_resource.py | userlocalhost2000/st2contrib | 1a5f759e76401743ed9023d298a3d767e3885db1 | [
"Apache-2.0"
]
| 164 | 2015-01-17T16:08:33.000Z | 2021-08-03T02:34:07.000Z | packs/kubernetes/tests/test_third_party_resource.py | userlocalhost2000/st2contrib | 1a5f759e76401743ed9023d298a3d767e3885db1 | [
"Apache-2.0"
]
| 442 | 2015-01-01T11:19:01.000Z | 2017-09-06T23:26:17.000Z | packs/kubernetes/tests/test_third_party_resource.py | userlocalhost2000/st2contrib | 1a5f759e76401743ed9023d298a3d767e3885db1 | [
"Apache-2.0"
]
| 202 | 2015-01-13T00:37:40.000Z | 2020-11-07T11:30:10.000Z | from st2tests.base import BaseSensorTestCase
from third_party_resource import ThirdPartyResource
class ThirdPartyResourceTestCase(BaseSensorTestCase):
sensor_cls = ThirdPartyResource
def test_k8s_object_to_st2_trigger_bad_object(self):
k8s_obj = {
'type': 'kanye',
'object': {
'kind': 'president',
'metadata': {
'name': 'west',
'namespace': 'westashians'
# uid missing
# label missing
}
}
}
sensor = self.get_sensor_instance()
self.assertRaises(KeyError, sensor._k8s_object_to_st2_trigger, k8s_obj)
def test_k8s_object_to_st2_trigger(self):
k8s_obj = {
'type': 'kanye',
'object': {
'kind': 'president',
'metadata': {
'name': 'west',
'namespace': 'westashians',
'uid': 'coinye',
'labels': ['rapper', 'train wrecker']
}
}
}
sensor = self.get_sensor_instance()
payload = sensor._k8s_object_to_st2_trigger(k8s_obj)
self.assertTrue('resource' in payload)
self.assertEqual(payload['resource'], k8s_obj['type'])
self.assertTrue('object_kind' in payload)
self.assertEqual(payload['object_kind'], k8s_obj['object']['kind'])
self.assertTrue('name' in payload)
self.assertEqual(payload['name'], k8s_obj['object']['metadata']['name'])
self.assertTrue('labels' in payload)
self.assertListEqual(payload['labels'], k8s_obj['object']['metadata']['labels'])
self.assertTrue('namespace' in payload)
self.assertEqual(payload['namespace'], k8s_obj['object']['metadata']['namespace'])
self.assertTrue('uid' in payload)
self.assertEqual(payload['uid'], k8s_obj['object']['metadata']['uid'])
def test_get_trigger_payload_from_line(self):
line = '{"object": {"kind": "president", ' + \
'"metadata": {"labels": ["rapper", "train wrecker"], ' + \
'"namespace": "westashians", ' + \
'"name": "west", "uid": "coinye"}}, "type": "kanye"}'
sensor = self.get_sensor_instance()
payload = sensor._get_trigger_payload_from_line(line)
self.assertTrue(payload is not None)
self.assertTrue('resource' in payload)
self.assertTrue('object_kind' in payload)
self.assertTrue('name' in payload)
self.assertTrue('labels' in payload)
self.assertTrue('namespace' in payload)
self.assertTrue('uid' in payload)
| 40.283582 | 90 | 0.567247 | 2,598 | 0.962579 | 0 | 0 | 0 | 0 | 0 | 0 | 693 | 0.256762 |
98df6d63c240e8262eac8f0396a8b8f0ecd76ac8 | 10,728 | py | Python | PrometheusScrapper/scrapper.py | masterchef/webscraper | f47220e941980e2a6dda593d74696062784062e1 | [
"MIT"
]
| null | null | null | PrometheusScrapper/scrapper.py | masterchef/webscraper | f47220e941980e2a6dda593d74696062784062e1 | [
"MIT"
]
| null | null | null | PrometheusScrapper/scrapper.py | masterchef/webscraper | f47220e941980e2a6dda593d74696062784062e1 | [
"MIT"
]
| null | null | null | import datetime
import getpass
import logging
import os
import pathlib
import platform
import re
import smtplib
import sys
from contextlib import contextmanager
from email.message import EmailMessage
from functools import wraps
import azure.functions as func
import click
import gspread
import pandas as pd
from apscheduler.schedulers.background import BlockingScheduler
from oauth2client.service_account import ServiceAccountCredentials
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
@contextmanager
def get_driver(*args, **kwargs):
options = Options()
options.headless = True
options.add_argument("--window-size=1920,1200")
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument('--disable-crash-reporter')
options.add_argument('--disable-logging')
options.add_argument('--log-level=3')
if platform.system() == 'Linux':
DRIVER_PATH = 'chromedriver'
elif platform.system() == "Darwin":
DRIVER_PATH = (pathlib.Path(__file__).parent.parent /
'chromedriver').resolve()
else:
log.error('Unsupported OS')
exit(0)
driver = webdriver.Chrome(options=options, executable_path=DRIVER_PATH)
yield driver
driver.close()
driver.quit()
def get_browser(func):
@wraps(func)
def wrapper(*args, **kwargs):
with get_driver() as d:
kwargs['driver'] = d
return func(*args, **kwargs)
return wrapper
@click.group()
@click.option('--email', is_flag=True, help='A flag for sending email with results.')
@click.option('--email_to', help='CSV of email addresses to send notification to.')
@click.option('--username', help='SMTP account username.')
@click.option('--gsheet', is_flag=True, help='A flag for updating google sheet with results')
@click.option('--doc_key', help='Google Doc Key to update')
@click.pass_context
def cli(ctx, email, email_to, username, gsheet, doc_key):
ctx.ensure_object(dict)
if email and (not username or not email_to):
log.error('Please provide email sending parameters')
exit(0)
elif email:
password = getpass.getpass(
"Please enter your google account password for sending email:\n")
ctx.obj['password'] = password
if gsheet and not doc_key:
log.error('Please provide a gsheet doc key')
exit(0)
pass
@cli.command('schedule')
@click.option('--hour', default='*/1', help='Cron hour expression')
@click.pass_context
def schedule(ctx, hour):
email = ctx.parent.params['email']
username = ctx.parent.params['username']
email_to = ctx.parent.params['email_to']
password = ctx.obj.get('password', None)
gsheet = ctx.parent.params['gsheet']
doc_key = ctx.parent.params['doc_key']
schedule = BlockingScheduler()
schedule.add_job(run, kwargs={"email": email, "gsheet": gsheet, "doc_key": doc_key,
"username": username, "email_to": email_to, "password": password}, trigger='cron', hour=hour)
try:
schedule.start()
except (KeyboardInterrupt, SystemExit):
schedule.shutdown()
@cli.command('run')
@click.pass_context
def once(ctx):
email = ctx.parent.params['email']
gsheet = ctx.parent.params['gsheet']
username = ctx.parent.params['username']
email_to = ctx.parent.params['email_to']
password = ctx.obj.get('password', None)
doc_key = ctx.parent.params['doc_key']
run(email, username, email_to, password, gsheet, doc_key)
def run(email, username, email_to, password, gsheet, doc_key):
log.info('In run')
content = []
for link in os.environ["searchLinks"].split():
content += get_prometheus_apartments(link)
formatted_content = format_email(content)
if gsheet:
log.info('Updating gsheet')
update_historical_data(doc_key, content)
formatted_content += f'For historical data click the link below:\nhttps://docs.google.com/spreadsheets/d/1XZocxmyQ91e1exBvwDAaSR8Rhavy9WPnwLSz0Z5SKsM/edit?usp=sharing'
if email:
log.info('Sending email')
send_email(username, password, email_to, formatted_content)
log.info(content)
@get_browser
def get_prometheus_apartments(url, driver):
driver.get(url)
content = []
log.info(f'Getting apartments: {url}')
try:
anchors = driver.find_elements_by_xpath(
"//div[@id='results-cards']/div/a[@class='card-wrapper']")
except Exception as e:
log.exception(f'{e}')
return content
links = [a.get_attribute('href') for a in anchors]
apartments = []
for apt in links:
name = apt.strip('/').split('/')[-1]
apartments.append({'name': name, 'url': f'{apt}lease'})
# Scrape each appartment in parallel
for apt in apartments:
results = get_availability(apt)
if results:
content.append(results)
# with Pool() as pool:
# results = [pool.apply_async(get_availability, args=(apt,)) for apt in apartments]
# for result in results:
# data = result.get()
# if data:
# content.append(data)
return content
def update_historical_data(doc_key, content):
date = datetime.datetime.today().strftime('%Y-%m-%d')
all_content = []
for apt in content:
complex = apt['meta']['name']
data = apt['data']
for row in data:
cleaned_values = [f'{date}', f'{complex}'] + \
[value.replace('$', '').replace(',', '') for value in row]
all_content.append(cleaned_values)
update_gdoc(doc_key, all_content)
def format_email(content):
result = ''
for apt in content:
complex = apt['meta']['name']
data = apt['data']
if complex != 'mansion-grove':
continue
result += f'------------ {complex} ----------------\n'
total_available = sum(int(row[-1]) for row in data)
result += '\n'.join(', '.join(row) for row in data)
result += f'\nTotal Available: {total_available}\n'
return result
@get_browser
def get_availability(data, driver):
"""
Returns apartment availability information
"""
url = data['url']
content = []
log.info(f'Processing {url}')
driver.get(url)
delay = 60 # seconds
try:
WebDriverWait(driver, delay).until(
EC.frame_to_be_available_and_switch_to_it('rp-leasing-widget'))
WebDriverWait(driver, delay).until(EC.presence_of_element_located(
(By.XPATH, "//button[contains(@class, 'primary')][contains(text(), 'Start')]")))
except TimeoutException:
log.info(f'Page did not load: {url}')
return content
try:
driver.find_element_by_xpath(
"//button[contains(@class, 'primary')][contains(text(), 'Start')]").click()
WebDriverWait(driver, delay).until(
EC.presence_of_element_located((By.XPATH, "//div[contains(@class, 'floorplan-tile')]/div/span[contains(@class, 'name')]")))
# Print plan prices
names = [n.text for n in driver.find_elements_by_xpath(
"//div[contains(@class, 'floorplan-tile')]/div/span[contains(@class, 'name')]")]
specs = [n.text for n in driver.find_elements_by_xpath(
"//div[contains(@class, 'floorplan-tile')]/div/span[contains(@class, 'specs')]")]
prices = [n.text for n in driver.find_elements_by_xpath(
"//div[contains(@class, 'floorplan-tile')]/div/span[contains(@class, 'range')]")]
availability = [n.text for n in driver.find_elements_by_xpath(
"//div[contains(@class, 'floorplan-tile')]/div[@class='tile-buttons']/button")]
except Exception:
log.exception(f'Unable to parse {url}')
return content
for i in range(len(names)):
match = re.match(
r'\((\d+)\).*', availability[i]) if len(availability) > i else None
units = int(match.groups()[0]) if match else '0'
match = re.match(
r'(\$\d*)( \- \$\d*\*)*', prices[i].split(' - ')[0].replace(',', '').replace('From ', '')) if len(prices) > i else None
min_price = match.groups()[0] if match else '$0'
content.append((names[i], specs[i], min_price, str(units)))
return {'meta': data, 'data': content}
def send_email(username, password, to, content):
if not content:
log.info('Nothing to send')
return
msg = EmailMessage()
msg.set_content(content)
msg['Subject'] = f'Apartment availability'
msg['From'] = username
msg['To'] = to
# Send the message via our own SMTP server.
s = smtplib.SMTP_SSL('smtp.gmail.com', 465)
s.login(username, password)
s.send_message(msg)
s.quit()
def update_gdoc(doc_key, cells):
scope = [
"https://spreadsheets.google.com/feeds",
"https://www.googleapis.com/auth/drive",
]
CREDENTIALS_PATH = pathlib.Path(__file__).parent.parent / 'credentials.json'
credentials = ServiceAccountCredentials.from_json_keyfile_name(
CREDENTIALS_PATH.resolve(), scope,
)
docs = gspread.authorize(credentials)
sheet = docs.open_by_key(doc_key).sheet1
new = pd.DataFrame(cells)
new.columns = ['Date', 'Complex', 'Plan', 'Specs', 'Price', 'Availability']
existing = pd.DataFrame(sheet.get_all_values()[1:])
if existing.size:
existing.columns = ['Date', 'Complex',
'Plan', 'Specs', 'Price', 'Availability']
updated = existing.append(new)
updated = updated.groupby(['Date', 'Complex', 'Plan', 'Specs']).min()
updated.reset_index(inplace=True)
sheet.update([updated.columns.values.tolist()] +
updated.values.tolist(), value_input_option='USER_ENTERED')
if __name__ == '__main__':
cli()
def azurefunc(PrometheusScrapper: func.TimerRequest) -> None:
email = os.environ["SendEmail"]
email_to = os.environ["EmailTo"]
username = os.environ["GmailUsername"]
password = os.environ["GmailPassword"]
gsheet = os.environ["UpdateGSheet"]
doc_key = os.environ["GSheetKey"]
run(email, username, email_to, password, gsheet, doc_key)
| 34.495177 | 175 | 0.646905 | 0 | 0 | 788 | 0.073453 | 6,201 | 0.57802 | 0 | 0 | 2,828 | 0.263609 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.